2021-12-19 04:28:38 +00:00
#![ allow(clippy::return_self_not_must_use) ]
2021-07-30 11:01:38 +00:00
use crate ::{ log ::LogMeta , stream ::EventStream , ContractError , EthLogDecode } ;
2020-05-31 16:01:34 +00:00
use ethers_core ::{
2022-11-22 21:15:36 +00:00
abi ::{ Address , Detokenize , RawLog } ,
types ::{ BlockNumber , Filter , Log , Topic , ValueOrArray , H256 } ,
2020-05-28 09:04:12 +00:00
} ;
2020-11-30 09:33:06 +00:00
use ethers_providers ::{ FilterWatcher , Middleware , PubsubClient , SubscriptionStream } ;
2021-10-29 12:29:35 +00:00
use std ::{ borrow ::Cow , marker ::PhantomData } ;
2020-05-27 08:46:16 +00:00
2021-03-15 11:59:52 +00:00
/// A trait for implementing event bindings
2021-07-13 11:13:12 +00:00
pub trait EthEvent : Detokenize + Send + Sync {
2021-03-15 11:59:52 +00:00
/// The name of the event this type represents
2021-03-16 19:37:19 +00:00
fn name ( ) -> Cow < 'static , str > ;
2021-03-15 11:59:52 +00:00
/// Retrieves the signature for the event this data corresponds to.
/// This signature is the Keccak-256 hash of the ABI signature of
/// this event.
fn signature ( ) -> H256 ;
/// Retrieves the ABI signature for the event this data corresponds
/// to.
fn abi_signature ( ) -> Cow < 'static , str > ;
2021-03-19 15:44:59 +00:00
/// Decodes an Ethereum `RawLog` into an instance of the type.
fn decode_log ( log : & RawLog ) -> Result < Self , ethers_core ::abi ::Error >
where
Self : Sized ;
/// Returns true if this is an anonymous event
fn is_anonymous ( ) -> bool ;
/// Returns an Event builder for the ethereum event represented by this types ABI signature.
fn new < M : Middleware > ( filter : Filter , provider : & M ) -> Event < M , Self >
where
Self : Sized ,
{
let filter = filter . event ( & Self ::abi_signature ( ) ) ;
2021-10-29 12:29:35 +00:00
Event { filter , provider , datatype : PhantomData }
2021-03-19 15:44:59 +00:00
}
}
// Convenience implementation
impl < T : EthEvent > EthLogDecode for T {
fn decode_log ( log : & RawLog ) -> Result < Self , ethers_core ::abi ::Error >
where
Self : Sized ,
{
T ::decode_log ( log )
}
2021-03-15 11:59:52 +00:00
}
2020-06-10 18:20:47 +00:00
/// Helper for managing the event filter before querying or streaming its logs
2020-06-21 22:08:40 +00:00
#[ derive(Debug) ]
2020-06-17 06:38:04 +00:00
#[ must_use = " event filters do nothing unless you `query` or `stream` them " ]
2021-03-19 15:44:59 +00:00
pub struct Event < ' a , M , D > {
2020-06-10 18:20:47 +00:00
/// The event filter's state
2020-05-27 08:46:16 +00:00
pub filter : Filter ,
2020-09-24 21:33:09 +00:00
pub ( crate ) provider : & ' a M ,
2021-03-19 15:44:59 +00:00
/// Stores the event datatype
2020-05-27 08:46:16 +00:00
pub ( crate ) datatype : PhantomData < D > ,
}
// TODO: Improve these functions
2021-03-19 15:44:59 +00:00
impl < M , D : EthLogDecode > Event < '_ , M , D > {
2020-06-10 18:20:47 +00:00
/// Sets the filter's `from` block
2020-05-27 08:46:16 +00:00
#[ allow(clippy::wrong_self_convention) ]
pub fn from_block < T : Into < BlockNumber > > ( mut self , block : T ) -> Self {
2021-03-16 19:46:07 +00:00
self . filter = self . filter . from_block ( block ) ;
2020-05-27 08:46:16 +00:00
self
}
2020-06-10 18:20:47 +00:00
/// Sets the filter's `to` block
2020-05-27 08:46:16 +00:00
#[ allow(clippy::wrong_self_convention) ]
pub fn to_block < T : Into < BlockNumber > > ( mut self , block : T ) -> Self {
2021-03-16 19:46:07 +00:00
self . filter = self . filter . to_block ( block ) ;
self
}
/// Sets the filter's `blockHash`. Setting this will override previously
/// set `from_block` and `to_block` fields.
#[ allow(clippy::wrong_self_convention) ]
pub fn at_block_hash < T : Into < H256 > > ( mut self , hash : T ) -> Self {
self . filter = self . filter . at_block_hash ( hash ) ;
2020-05-27 08:46:16 +00:00
self
}
2020-06-10 18:20:47 +00:00
/// Sets the filter's 0th topic (typically the event name for non-anonymous events)
2022-06-17 16:34:16 +00:00
pub fn topic0 < T : Into < Topic > > ( mut self , topic : T ) -> Self {
2020-05-30 14:11:51 +00:00
self . filter . topics [ 0 ] = Some ( topic . into ( ) ) ;
2020-05-27 08:46:16 +00:00
self
}
2020-05-31 21:17:50 +00:00
2020-06-10 18:20:47 +00:00
/// Sets the filter's 1st topic
2022-06-17 16:34:16 +00:00
pub fn topic1 < T : Into < Topic > > ( mut self , topic : T ) -> Self {
2020-05-31 21:17:50 +00:00
self . filter . topics [ 1 ] = Some ( topic . into ( ) ) ;
self
}
2020-06-02 11:33:21 +00:00
2020-06-10 18:20:47 +00:00
/// Sets the filter's 2nd topic
2022-06-17 16:34:16 +00:00
pub fn topic2 < T : Into < Topic > > ( mut self , topic : T ) -> Self {
2020-06-02 11:33:21 +00:00
self . filter . topics [ 2 ] = Some ( topic . into ( ) ) ;
self
}
2020-06-10 18:20:47 +00:00
/// Sets the filter's 3rd topic
2022-06-17 16:34:16 +00:00
pub fn topic3 < T : Into < Topic > > ( mut self , topic : T ) -> Self {
2020-06-02 11:33:21 +00:00
self . filter . topics [ 3 ] = Some ( topic . into ( ) ) ;
self
}
2022-11-22 21:15:36 +00:00
/// Sets the filter's address.
pub fn address ( mut self , address : ValueOrArray < Address > ) -> Self {
self . filter = self . filter . address ( address ) ;
self
}
2020-05-27 08:46:16 +00:00
}
2021-03-19 15:44:59 +00:00
impl < ' a , M , D > Event < ' a , M , D >
2020-06-15 08:46:07 +00:00
where
2020-09-24 21:33:09 +00:00
M : Middleware ,
2021-03-19 15:44:59 +00:00
D : EthLogDecode ,
2020-06-15 08:46:07 +00:00
{
2022-07-04 18:47:11 +00:00
/// Turns this event filter into `Stream` that yields decoded events.
///
/// This will first install a new logs filter via [`eth_newFilter`](https://docs.alchemy.com/alchemy/apis/ethereum/eth-newfilter) using the configured `filter` object. See also [`FilterWatcher`](ethers_providers::FilterWatcher).
///
/// Once the filter is created, this will periodically call [`eth_getFilterChanges`](https://docs.alchemy.com/alchemy/apis/ethereum/eth-getfilterchanges) to get the newest logs and decode them
///
/// **Note:** Compared to [`Self::subscribe`], which is only available on `PubsubClient`s, such
/// as Websocket, this is a poll-based subscription, as the node does not notify us when a new
/// matching log is available, instead we have to actively ask for new logs using additional RPC
/// requests, and this is done on an interval basis.
///
/// # Example
///
/// ```
2022-07-24 21:41:06 +00:00
/// # #[cfg(feature = "abigen")]
2022-07-04 18:47:11 +00:00
/// # async fn test<M:ethers_providers::Middleware>(contract: ethers_contract::Contract<M>) {
/// # use ethers_core::types::*;
/// # use futures_util::stream::StreamExt;
/// # use ethers_contract::{Contract, EthEvent};
///
/// // The event we want to get
/// #[derive(Clone, Debug, EthEvent)]
/// pub struct Approval {
/// #[ethevent(indexed)]
/// pub token_owner: Address,
/// #[ethevent(indexed)]
/// pub spender: Address,
/// pub tokens: U256,
/// }
///
/// let ev = contract.event::<Approval>().from_block(1337).to_block(2000);
/// let mut event_stream = ev.stream().await.unwrap();
///
/// while let Some(Ok(approval)) = event_stream.next().await {
/// let Approval{token_owner,spender,tokens} = approval;
/// }
///
/// # }
/// ```
2020-06-15 08:46:07 +00:00
pub async fn stream (
2020-11-30 09:33:06 +00:00
& ' a self ,
) -> Result <
// Wraps the FilterWatcher with a mapping to the event
EventStream < ' a , FilterWatcher < ' a , M ::Provider , Log > , D , ContractError < M > > ,
ContractError < M > ,
> {
2021-10-29 12:29:35 +00:00
let filter =
self . provider . watch ( & self . filter ) . await . map_err ( ContractError ::MiddlewareError ) ? ;
Ok ( EventStream ::new ( filter . id , filter , Box ::new ( move | log | self . parse_log ( log ) ) ) )
2020-11-30 09:33:06 +00:00
}
2022-07-17 18:26:16 +00:00
/// As [`Self::stream`], but does not discard [`Log`] metadata.
pub async fn stream_with_meta (
& ' a self ,
) -> Result <
// Wraps the FilterWatcher with a mapping to the event
EventStream < ' a , FilterWatcher < ' a , M ::Provider , Log > , ( D , LogMeta ) , ContractError < M > > ,
ContractError < M > ,
> {
let filter =
self . provider . watch ( & self . filter ) . await . map_err ( ContractError ::MiddlewareError ) ? ;
Ok ( EventStream ::new (
filter . id ,
filter ,
Box ::new ( move | log | {
let meta = LogMeta ::from ( & log ) ;
Ok ( ( self . parse_log ( log ) ? , meta ) )
} ) ,
) )
}
2020-11-30 09:33:06 +00:00
}
2021-03-19 15:44:59 +00:00
impl < ' a , M , D > Event < ' a , M , D >
2020-11-30 09:33:06 +00:00
where
M : Middleware ,
< M as Middleware > ::Provider : PubsubClient ,
2021-03-19 15:44:59 +00:00
D : EthLogDecode ,
2020-11-30 09:33:06 +00:00
{
/// Returns a subscription for the event
2022-07-04 18:47:11 +00:00
///
/// See also [Self::stream()].
2020-11-30 09:33:06 +00:00
pub async fn subscribe (
& ' a self ,
) -> Result <
// Wraps the SubscriptionStream with a mapping to the event
EventStream < ' a , SubscriptionStream < ' a , M ::Provider , Log > , D , ContractError < M > > ,
ContractError < M > ,
> {
let filter = self
. provider
. subscribe_logs ( & self . filter )
. await
. map_err ( ContractError ::MiddlewareError ) ? ;
2021-10-29 12:29:35 +00:00
Ok ( EventStream ::new ( filter . id , filter , Box ::new ( move | log | self . parse_log ( log ) ) ) )
2020-06-15 08:46:07 +00:00
}
2022-07-17 18:26:16 +00:00
pub async fn subscribe_with_meta (
& ' a self ,
) -> Result <
// Wraps the SubscriptionStream with a mapping to the event
EventStream < ' a , SubscriptionStream < ' a , M ::Provider , Log > , ( D , LogMeta ) , ContractError < M > > ,
ContractError < M > ,
> {
let filter = self
. provider
. subscribe_logs ( & self . filter )
. await
. map_err ( ContractError ::MiddlewareError ) ? ;
Ok ( EventStream ::new (
filter . id ,
filter ,
Box ::new ( move | log | {
let meta = LogMeta ::from ( & log ) ;
Ok ( ( self . parse_log ( log ) ? , meta ) )
} ) ,
) )
}
2020-06-15 08:46:07 +00:00
}
2021-03-19 15:44:59 +00:00
impl < M , D > Event < '_ , M , D >
2020-05-27 08:46:16 +00:00
where
2020-09-24 21:33:09 +00:00
M : Middleware ,
2021-03-19 15:44:59 +00:00
D : EthLogDecode ,
2020-05-27 08:46:16 +00:00
{
/// Queries the blockchain for the selected filter and returns a vector of matching
/// event logs
2020-09-24 21:33:09 +00:00
pub async fn query ( & self ) -> Result < Vec < D > , ContractError < M > > {
2021-10-29 12:29:35 +00:00
let logs =
self . provider . get_logs ( & self . filter ) . await . map_err ( ContractError ::MiddlewareError ) ? ;
2020-06-02 11:33:21 +00:00
let events = logs
. into_iter ( )
. map ( | log | self . parse_log ( log ) )
2020-09-24 21:33:09 +00:00
. collect ::< Result < Vec < _ > , ContractError < M > > > ( ) ? ;
2020-06-02 11:33:21 +00:00
Ok ( events )
2020-05-31 21:17:50 +00:00
}
2020-06-21 22:08:40 +00:00
/// Queries the blockchain for the selected filter and returns a vector of logs
/// along with their metadata
2020-09-24 21:33:09 +00:00
pub async fn query_with_meta ( & self ) -> Result < Vec < ( D , LogMeta ) > , ContractError < M > > {
2021-10-29 12:29:35 +00:00
let logs =
self . provider . get_logs ( & self . filter ) . await . map_err ( ContractError ::MiddlewareError ) ? ;
2020-05-27 08:46:16 +00:00
let events = logs
. into_iter ( )
. map ( | log | {
2020-06-21 22:08:40 +00:00
let meta = LogMeta ::from ( & log ) ;
2020-06-02 11:33:21 +00:00
let event = self . parse_log ( log ) ? ;
2020-06-21 22:08:40 +00:00
Ok ( ( event , meta ) )
2020-05-27 08:46:16 +00:00
} )
2020-09-24 21:33:09 +00:00
. collect ::< Result < _ , ContractError < M > > > ( ) ? ;
2020-05-27 08:46:16 +00:00
Ok ( events )
}
2022-04-24 14:10:21 +00:00
pub fn parse_log ( & self , log : Log ) -> Result < D , ContractError < M > > {
2021-10-29 12:29:35 +00:00
D ::decode_log ( & RawLog { topics : log . topics , data : log . data . to_vec ( ) } ) . map_err ( From ::from )
2020-06-02 11:33:21 +00:00
}
2020-05-27 08:46:16 +00:00
}