From cd5ecd72c3170ee8898601e2bc845ecf96129bcd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0001/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 162 ++++++++++++++++++++++-------- 1 file changed, 118 insertions(+), 44 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e4adc0dd6ac..78935754bf5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,6 +75,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -157,7 +163,7 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; optional server_ioc_work; @@ -500,12 +506,12 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - fc::optional outstanding_read_bytes; + std::atomic outstanding_read_bytes{0}; queued_buffer buffer_queue; - uint32_t reads_in_flight = 0; + std::atomic reads_in_flight{0}; uint32_t trx_in_progress_size = 0; fc::sha256 node_id; handshake_message last_handshake_recv; @@ -638,9 +644,9 @@ namespace eosio { }; struct msg_handler : public fc::visitor { - net_plugin_impl &impl; + net_plugin_impl& impl; connection_ptr c; - msg_handler( net_plugin_impl &imp, const connection_ptr& conn) : impl(imp), c(conn) {} + msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} void operator()( const signed_block& msg ) const { EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); @@ -656,16 +662,30 @@ namespace eosio { } void operator()( signed_block&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c ) impl->handle_message( c, ptr ); + }); } void operator()( packed_transaction&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c) impl->handle_message( c, ptr ); + }); } template void operator()( T&& msg ) const { - impl.handle_message( c, std::forward(msg) ); + connection_wptr weak = c; + app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); } }; @@ -703,6 +723,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -774,7 +796,9 @@ namespace eosio { initialize(); } - connection::~connection() {} + connection::~connection() { + pending_message_buffer.reset(); + } void connection::initialize() { auto *rnd = node_id.data(); @@ -822,7 +846,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1101,6 +1124,7 @@ namespace eosio { static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { // this implementation is to avoid copy of signed_block to net_message // matches which of net_message for signed_block + fc_dlog( logger, "sending block ${bn}", ("bn", sb->block_num()) ); return create_send_buffer( signed_block_which, *sb ); } @@ -1903,7 +1927,9 @@ namespace eosio { return false; } else { - start_read_message( con ); + boost::asio::post(*server_ioc, [this, con]() { + start_read_message( con ); + }); ++started_sessions; return true; // for now, we can just use the application main loop. @@ -1988,7 +2014,7 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes ? *conn->outstanding_read_bytes : message_header_size; + std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2004,7 +2030,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2013,7 +2039,7 @@ namespace eosio { if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight) ); + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); } else { peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); } @@ -2035,19 +2061,19 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { - app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); if (!conn) { return; } --conn->reads_in_flight; - conn->outstanding_read_bytes.reset(); + conn->outstanding_read_bytes = 0; + bool close_connection = false; try { if( !ec ) { @@ -2061,18 +2087,16 @@ namespace eosio { uint32_t bytes_in_buffer = conn->pending_message_buffer.bytes_to_read(); if (bytes_in_buffer < message_header_size) { - conn->outstanding_read_bytes.emplace(message_header_size - bytes_in_buffer); + conn->outstanding_read_bytes = message_header_size - bytes_in_buffer; break; } else { uint32_t message_length; auto index = conn->pending_message_buffer.read_index(); conn->pending_message_buffer.peek(&message_length, sizeof(message_length), index); if(message_length > def_send_buffer_size*2 || message_length == 0) { - boost::system::error_code ec; - fc_elog( logger,"incoming message length unexpected (${i}), from ${p}", - ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec))) ); - close(conn); - return; + fc_elog( logger,"incoming message length unexpected (${i})", ("i", message_length) ); + close_connection = true; + break; } auto total_message_bytes = message_length + message_header_size; @@ -2089,37 +2113,43 @@ namespace eosio { conn->pending_message_buffer.add_space( outstanding_message_bytes - available_buffer_bytes ); } - conn->outstanding_read_bytes.emplace(outstanding_message_bytes); + conn->outstanding_read_bytes = outstanding_message_bytes; break; } } } - start_read_message(conn); + if( !close_connection ) start_read_message( conn ); } else { - auto pname = conn->peer_name(); if (ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); + fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); } else { - fc_ilog( logger, "Peer ${p} closed connection",("p",pname) ); + fc_ilog( logger, "Peer closed connection" ); } - close( conn ); + close_connection = true; } } catch(const std::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.what()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data: ${s}", ("s",ex.what()) ); + close_connection = true; } catch(const fc::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.to_string()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data ${s}", ("s",ex.to_string()) ); + close_connection = true; } catch (...) { - fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); - close( conn ); + fc_elog( logger, "Undefined exception handling read data" ); + close_connection = true; + } + + if( close_connection ) { + connection_wptr weak_conn = conn; + app().post( priority::medium, "close conn", [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } - }); }); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; @@ -2131,6 +2161,7 @@ namespace eosio { bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { try { // if next message is a block we already have, exit early +/* auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); @@ -2147,7 +2178,7 @@ namespace eosio { return true; } } - +*/ auto ds = conn->pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); @@ -2528,18 +2559,61 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller &cc = chain_plug->chain(); - block_id_type blk_id = msg->id(); - uint32_t blk_num = msg->block_num(); + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { + signed_block_ptr msg = m; + controller& cc = chain_plug->chain(); + block_id_type blk_id = msg ? msg->id() : block_id_type(); + uint32_t blk_num = msg ? msg->block_num() : 0; fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( cc.fetch_block_by_id(blk_id)) { + if( msg && cc.fetch_block_by_id(blk_id)) { sync_master->recv_block(c, blk_id, blk_num); return; } + signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; + if( prev == nullptr ){ //&& sync_master->is_active(c) ) { + // see if top is ready + if( !sync_master->incoming_blocks.empty() ) { + prev = sync_master->incoming_blocks.top(); + auto prev_prev = cc.fetch_block_by_id( prev->previous ); + if( prev_prev != nullptr ) { + sync_master->incoming_blocks.pop(); + if(msg) sync_master->incoming_blocks.emplace( msg ); + msg = prev; + blk_id = msg->id(); + blk_num = msg->block_num(); + connection_wptr weak = c; + app().post(priority::medium, "re post blk", [this, weak](){ + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + }); + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 1b49c5fc3307d1279ba9d4db27a4cd90a01a7e43 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0002/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 78935754bf5..afc08e4361d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -664,7 +664,7 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); @@ -672,7 +672,7 @@ namespace eosio { void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c) impl->handle_message( c, ptr ); }); @@ -682,7 +682,7 @@ namespace eosio { void operator()( T&& msg ) const { connection_wptr weak = c; - app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -2143,7 +2143,7 @@ namespace eosio { if( close_connection ) { connection_wptr weak_conn = conn; - app().post( priority::medium, "close conn", [this, weak_conn]() { + app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2585,7 +2585,7 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; - app().post(priority::medium, "re post blk", [this, weak](){ + app().post(priority::medium, [this, weak](){ connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2594,7 +2594,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2606,7 +2606,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From 14e7a832a22fc469b20b208333b1cef6f3f8909f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0003/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 53 ++----------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index afc08e4361d..4de2d122ba9 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,12 +75,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -506,7 +500,7 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - std::atomic outstanding_read_bytes{0}; + std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads queued_buffer buffer_queue; @@ -723,8 +717,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2006,6 +1998,7 @@ namespace eosio { }); } + // only called from server_ioc thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { @@ -2572,48 +2565,6 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } - signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; - if( prev == nullptr ){ //&& sync_master->is_active(c) ) { - // see if top is ready - if( !sync_master->incoming_blocks.empty() ) { - prev = sync_master->incoming_blocks.top(); - auto prev_prev = cc.fetch_block_by_id( prev->previous ); - if( prev_prev != nullptr ) { - sync_master->incoming_blocks.pop(); - if(msg) sync_master->incoming_blocks.emplace( msg ); - msg = prev; - blk_id = msg->id(); - blk_num = msg->block_num(); - connection_wptr weak = c; - app().post(priority::medium, [this, weak](){ - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - }); - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 9d1bdb3809bab450bb1c466b701e42f78057b8e1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:27 -0600 Subject: [PATCH 0004/1648] Update to appbase with FIFO priority queue --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index da4bf8cb324..5c10377c426 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit da4bf8cb324225b002b3105da42b62769da94ce9 +Subproject commit 5c10377c426d1905c46d781cbb75a34e79728bca From 6caa09c4dc184009b69650ae15badeb912ba39ad Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:49:28 -0600 Subject: [PATCH 0005/1648] Revert unneeded changes to handle_message --- plugins/net_plugin/net_plugin.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4de2d122ba9..46645215bc6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2552,16 +2552,15 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { - signed_block_ptr msg = m; + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); - block_id_type blk_id = msg ? msg->id() : block_id_type(); - uint32_t blk_num = msg ? msg->block_num() : 0; + block_id_type blk_id = msg->id(); + uint32_t blk_num = msg->block_num(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( msg && cc.fetch_block_by_id(blk_id)) { + if( cc.fetch_block_by_id(blk_id) ) { sync_master->recv_block(c, blk_id, blk_num); return; } From f5d408bdc4bf2c7e4d51f377b1e6109f5a038330 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:50:46 -0600 Subject: [PATCH 0006/1648] Logging fixes --- plugins/net_plugin/net_plugin.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 46645215bc6..53e46420123 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1636,6 +1636,7 @@ namespace eosio { uint32_t bnum = bs->block_num; peer_block_state pbstate{bs->id, bnum}; + fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; for( auto& cp : my_impl->connections ) { @@ -1650,7 +1651,7 @@ namespace eosio { if( !send_buffer ) { send_buffer = create_send_buffer( bs->block ); } - fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } } @@ -1672,7 +1673,7 @@ namespace eosio { } void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); + fc_dlog( logger, "rejected block ${id}", ("id", id) ); auto range = received_blocks.equal_range(id); received_blocks.erase(range.first, range.second); } From c3e779169661f46016ce9bd4140bae0934b0c3f8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0007/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 56 ++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 53e46420123..393bfab60b9 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -419,6 +419,7 @@ namespace eosio { } } + // thread safe uint32_t write_queue_size() const { return _write_queue_size; } bool is_out_queue_empty() const { return _out_queue.empty(); } @@ -477,7 +478,7 @@ namespace eosio { std::function callback; }; - uint32_t _write_queue_size = 0; + std::atomic _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -506,7 +507,7 @@ namespace eosio { queued_buffer buffer_queue; std::atomic reads_in_flight{0}; - uint32_t trx_in_progress_size = 0; + std::atomic trx_in_progress_size{0}; fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -516,6 +517,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; + std::mutex read_delay_timer_mutex; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -837,7 +839,10 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); - if( read_delay_timer ) read_delay_timer->cancel(); + { + std::lock_guard g( read_delay_timer_mutex ); + if( read_delay_timer ) read_delay_timer->cancel(); + } } void connection::txn_send_pending(const vector& ids) { @@ -2024,27 +2029,36 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); - } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); + uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); + uint32_t trx_in_progress_size = conn->trx_in_progress_size; + uint32_t reads_in_flight = conn->reads_in_flight; + if( write_queue_size > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); + } else if( reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); } else { - peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); + peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } - if( conn->buffer_queue.write_queue_size() > 2*def_max_write_queue_size || - conn->reads_in_flight > 2*def_max_reads_in_flight || - conn->trx_in_progress_size > 2*def_max_trx_in_progress_size ) + if( write_queue_size > 2*def_max_write_queue_size || + reads_in_flight > 2*def_max_reads_in_flight || + trx_in_progress_size > 2*def_max_trx_in_progress_size ) { - fc_wlog( logger, "queues over full, giving up on connection ${p}", ("p", conn->peer_name()) ); - my_impl->close( conn ); + fc_wlog( logger, "queues over full, giving up on connection" ); + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + my_impl->close( conn ); + }); return; } + std::lock_guard g( conn->read_delay_timer_mutex ); if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( @@ -2055,7 +2069,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2136,7 +2150,6 @@ namespace eosio { } if( close_connection ) { - connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2146,9 +2159,14 @@ namespace eosio { } }); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); - close( conn ); + fc_elog( logger, "Undefined exception in start_read_message" ); + connection_wptr weak_conn = conn; + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } } From 1da7c8b9309dd7f66c092b9b41dc44a8ad148304 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0008/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 162 ++++++++++++++++++++++-------- 1 file changed, 118 insertions(+), 44 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e4adc0dd6ac..78935754bf5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,6 +75,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -157,7 +163,7 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; optional server_ioc_work; @@ -500,12 +506,12 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - fc::optional outstanding_read_bytes; + std::atomic outstanding_read_bytes{0}; queued_buffer buffer_queue; - uint32_t reads_in_flight = 0; + std::atomic reads_in_flight{0}; uint32_t trx_in_progress_size = 0; fc::sha256 node_id; handshake_message last_handshake_recv; @@ -638,9 +644,9 @@ namespace eosio { }; struct msg_handler : public fc::visitor { - net_plugin_impl &impl; + net_plugin_impl& impl; connection_ptr c; - msg_handler( net_plugin_impl &imp, const connection_ptr& conn) : impl(imp), c(conn) {} + msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} void operator()( const signed_block& msg ) const { EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); @@ -656,16 +662,30 @@ namespace eosio { } void operator()( signed_block&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c ) impl->handle_message( c, ptr ); + }); } void operator()( packed_transaction&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c) impl->handle_message( c, ptr ); + }); } template void operator()( T&& msg ) const { - impl.handle_message( c, std::forward(msg) ); + connection_wptr weak = c; + app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); } }; @@ -703,6 +723,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -774,7 +796,9 @@ namespace eosio { initialize(); } - connection::~connection() {} + connection::~connection() { + pending_message_buffer.reset(); + } void connection::initialize() { auto *rnd = node_id.data(); @@ -822,7 +846,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1101,6 +1124,7 @@ namespace eosio { static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { // this implementation is to avoid copy of signed_block to net_message // matches which of net_message for signed_block + fc_dlog( logger, "sending block ${bn}", ("bn", sb->block_num()) ); return create_send_buffer( signed_block_which, *sb ); } @@ -1903,7 +1927,9 @@ namespace eosio { return false; } else { - start_read_message( con ); + boost::asio::post(*server_ioc, [this, con]() { + start_read_message( con ); + }); ++started_sessions; return true; // for now, we can just use the application main loop. @@ -1988,7 +2014,7 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes ? *conn->outstanding_read_bytes : message_header_size; + std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2004,7 +2030,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2013,7 +2039,7 @@ namespace eosio { if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight) ); + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); } else { peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); } @@ -2035,19 +2061,19 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { - app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); if (!conn) { return; } --conn->reads_in_flight; - conn->outstanding_read_bytes.reset(); + conn->outstanding_read_bytes = 0; + bool close_connection = false; try { if( !ec ) { @@ -2061,18 +2087,16 @@ namespace eosio { uint32_t bytes_in_buffer = conn->pending_message_buffer.bytes_to_read(); if (bytes_in_buffer < message_header_size) { - conn->outstanding_read_bytes.emplace(message_header_size - bytes_in_buffer); + conn->outstanding_read_bytes = message_header_size - bytes_in_buffer; break; } else { uint32_t message_length; auto index = conn->pending_message_buffer.read_index(); conn->pending_message_buffer.peek(&message_length, sizeof(message_length), index); if(message_length > def_send_buffer_size*2 || message_length == 0) { - boost::system::error_code ec; - fc_elog( logger,"incoming message length unexpected (${i}), from ${p}", - ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec))) ); - close(conn); - return; + fc_elog( logger,"incoming message length unexpected (${i})", ("i", message_length) ); + close_connection = true; + break; } auto total_message_bytes = message_length + message_header_size; @@ -2089,37 +2113,43 @@ namespace eosio { conn->pending_message_buffer.add_space( outstanding_message_bytes - available_buffer_bytes ); } - conn->outstanding_read_bytes.emplace(outstanding_message_bytes); + conn->outstanding_read_bytes = outstanding_message_bytes; break; } } } - start_read_message(conn); + if( !close_connection ) start_read_message( conn ); } else { - auto pname = conn->peer_name(); if (ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); + fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); } else { - fc_ilog( logger, "Peer ${p} closed connection",("p",pname) ); + fc_ilog( logger, "Peer closed connection" ); } - close( conn ); + close_connection = true; } } catch(const std::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.what()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data: ${s}", ("s",ex.what()) ); + close_connection = true; } catch(const fc::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.to_string()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data ${s}", ("s",ex.to_string()) ); + close_connection = true; } catch (...) { - fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); - close( conn ); + fc_elog( logger, "Undefined exception handling read data" ); + close_connection = true; + } + + if( close_connection ) { + connection_wptr weak_conn = conn; + app().post( priority::medium, "close conn", [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } - }); }); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; @@ -2131,6 +2161,7 @@ namespace eosio { bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { try { // if next message is a block we already have, exit early +/* auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); @@ -2147,7 +2178,7 @@ namespace eosio { return true; } } - +*/ auto ds = conn->pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); @@ -2528,18 +2559,61 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller &cc = chain_plug->chain(); - block_id_type blk_id = msg->id(); - uint32_t blk_num = msg->block_num(); + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { + signed_block_ptr msg = m; + controller& cc = chain_plug->chain(); + block_id_type blk_id = msg ? msg->id() : block_id_type(); + uint32_t blk_num = msg ? msg->block_num() : 0; fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( cc.fetch_block_by_id(blk_id)) { + if( msg && cc.fetch_block_by_id(blk_id)) { sync_master->recv_block(c, blk_id, blk_num); return; } + signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; + if( prev == nullptr ){ //&& sync_master->is_active(c) ) { + // see if top is ready + if( !sync_master->incoming_blocks.empty() ) { + prev = sync_master->incoming_blocks.top(); + auto prev_prev = cc.fetch_block_by_id( prev->previous ); + if( prev_prev != nullptr ) { + sync_master->incoming_blocks.pop(); + if(msg) sync_master->incoming_blocks.emplace( msg ); + msg = prev; + blk_id = msg->id(); + blk_num = msg->block_num(); + connection_wptr weak = c; + app().post(priority::medium, "re post blk", [this, weak](){ + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + }); + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From ce933f4f2ea66c57699e91bccda16e4b956700d0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0009/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 78935754bf5..afc08e4361d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -664,7 +664,7 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); @@ -672,7 +672,7 @@ namespace eosio { void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c) impl->handle_message( c, ptr ); }); @@ -682,7 +682,7 @@ namespace eosio { void operator()( T&& msg ) const { connection_wptr weak = c; - app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -2143,7 +2143,7 @@ namespace eosio { if( close_connection ) { connection_wptr weak_conn = conn; - app().post( priority::medium, "close conn", [this, weak_conn]() { + app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2585,7 +2585,7 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; - app().post(priority::medium, "re post blk", [this, weak](){ + app().post(priority::medium, [this, weak](){ connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2594,7 +2594,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2606,7 +2606,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From e269547e68f3213f1b824be232578c42a7cc66bb Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0010/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 53 ++----------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index afc08e4361d..4de2d122ba9 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,12 +75,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -506,7 +500,7 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - std::atomic outstanding_read_bytes{0}; + std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads queued_buffer buffer_queue; @@ -723,8 +717,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2006,6 +1998,7 @@ namespace eosio { }); } + // only called from server_ioc thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { @@ -2572,48 +2565,6 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } - signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; - if( prev == nullptr ){ //&& sync_master->is_active(c) ) { - // see if top is ready - if( !sync_master->incoming_blocks.empty() ) { - prev = sync_master->incoming_blocks.top(); - auto prev_prev = cc.fetch_block_by_id( prev->previous ); - if( prev_prev != nullptr ) { - sync_master->incoming_blocks.pop(); - if(msg) sync_master->incoming_blocks.emplace( msg ); - msg = prev; - blk_id = msg->id(); - blk_num = msg->block_num(); - connection_wptr weak = c; - app().post(priority::medium, [this, weak](){ - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - }); - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 40db0f820706d9541656af9b5774ab8962d98bac Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:49:28 -0600 Subject: [PATCH 0011/1648] Revert unneeded changes to handle_message --- plugins/net_plugin/net_plugin.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4de2d122ba9..46645215bc6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2552,16 +2552,15 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { - signed_block_ptr msg = m; + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); - block_id_type blk_id = msg ? msg->id() : block_id_type(); - uint32_t blk_num = msg ? msg->block_num() : 0; + block_id_type blk_id = msg->id(); + uint32_t blk_num = msg->block_num(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( msg && cc.fetch_block_by_id(blk_id)) { + if( cc.fetch_block_by_id(blk_id) ) { sync_master->recv_block(c, blk_id, blk_num); return; } From 5f8094ed5ff26e28d2a7651e337a4a809789dc86 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:50:46 -0600 Subject: [PATCH 0012/1648] Logging fixes --- plugins/net_plugin/net_plugin.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 46645215bc6..53e46420123 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1636,6 +1636,7 @@ namespace eosio { uint32_t bnum = bs->block_num; peer_block_state pbstate{bs->id, bnum}; + fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; for( auto& cp : my_impl->connections ) { @@ -1650,7 +1651,7 @@ namespace eosio { if( !send_buffer ) { send_buffer = create_send_buffer( bs->block ); } - fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } } @@ -1672,7 +1673,7 @@ namespace eosio { } void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); + fc_dlog( logger, "rejected block ${id}", ("id", id) ); auto range = received_blocks.equal_range(id); received_blocks.erase(range.first, range.second); } From d183fae6f6a80cd0c3cbd0ed45ba219409ad2457 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0013/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 56 ++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 53e46420123..393bfab60b9 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -419,6 +419,7 @@ namespace eosio { } } + // thread safe uint32_t write_queue_size() const { return _write_queue_size; } bool is_out_queue_empty() const { return _out_queue.empty(); } @@ -477,7 +478,7 @@ namespace eosio { std::function callback; }; - uint32_t _write_queue_size = 0; + std::atomic _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -506,7 +507,7 @@ namespace eosio { queued_buffer buffer_queue; std::atomic reads_in_flight{0}; - uint32_t trx_in_progress_size = 0; + std::atomic trx_in_progress_size{0}; fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -516,6 +517,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; + std::mutex read_delay_timer_mutex; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -837,7 +839,10 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); - if( read_delay_timer ) read_delay_timer->cancel(); + { + std::lock_guard g( read_delay_timer_mutex ); + if( read_delay_timer ) read_delay_timer->cancel(); + } } void connection::txn_send_pending(const vector& ids) { @@ -2024,27 +2029,36 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); - } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); + uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); + uint32_t trx_in_progress_size = conn->trx_in_progress_size; + uint32_t reads_in_flight = conn->reads_in_flight; + if( write_queue_size > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); + } else if( reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); } else { - peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); + peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } - if( conn->buffer_queue.write_queue_size() > 2*def_max_write_queue_size || - conn->reads_in_flight > 2*def_max_reads_in_flight || - conn->trx_in_progress_size > 2*def_max_trx_in_progress_size ) + if( write_queue_size > 2*def_max_write_queue_size || + reads_in_flight > 2*def_max_reads_in_flight || + trx_in_progress_size > 2*def_max_trx_in_progress_size ) { - fc_wlog( logger, "queues over full, giving up on connection ${p}", ("p", conn->peer_name()) ); - my_impl->close( conn ); + fc_wlog( logger, "queues over full, giving up on connection" ); + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + my_impl->close( conn ); + }); return; } + std::lock_guard g( conn->read_delay_timer_mutex ); if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( @@ -2055,7 +2069,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2136,7 +2150,6 @@ namespace eosio { } if( close_connection ) { - connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2146,9 +2159,14 @@ namespace eosio { } }); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); - close( conn ); + fc_elog( logger, "Undefined exception in start_read_message" ); + connection_wptr weak_conn = conn; + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } } From e65ad222e238b5a9c25d5a0dee8d97ca5bfc703a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 2 Mar 2019 11:22:30 -0600 Subject: [PATCH 0014/1648] Remove unneeded access to atomic --- plugins/net_plugin/net_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 393bfab60b9..8fc480600ed 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -26,6 +26,8 @@ #include #include +#include + using namespace eosio::chain::plugin_interface::compat; namespace fc { @@ -2013,7 +2015,9 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; + std::size_t minimum_read = + std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); + minimum_read = minimum_read != 0 ? minimum_read : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2080,7 +2084,6 @@ namespace eosio { } --conn->reads_in_flight; - conn->outstanding_read_bytes = 0; bool close_connection = false; try { From 44f0ef52a1172fa95f83e4f989357aa6b9f57853 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 4 Mar 2019 15:44:26 -0500 Subject: [PATCH 0015/1648] Move sig recovery in producer plugin to use thread pool of chain controller --- plugins/producer_plugin/producer_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f3eb2164cea..9730437af8f 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,8 +351,11 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { + fc::microseconds max_trx_cpu_usage{ cfg.max_transaction_cpu_usage }; + auto& tp = *_thread_pool; + boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { From 67ba7d043857e9d981fa3eb98cd5ee6ffaf23be6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 4 Mar 2019 15:47:26 -0500 Subject: [PATCH 0016/1648] Cache db_read_mode to avoid accessing controller --- plugins/net_plugin/net_plugin.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8fc480600ed..8ffe505c4e3 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -145,6 +145,7 @@ namespace eosio { bool network_version_match = false; chain_id_type chain_id; fc::sha256 node_id; + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; chain_plugin* chain_plug = nullptr; @@ -2054,7 +2055,7 @@ namespace eosio { trx_in_progress_size > 2*def_max_trx_in_progress_size ) { fc_wlog( logger, "queues over full, giving up on connection" ); - app().post( priority::medium, [this, weak_conn]() { + app().post( priority::medium, [weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2536,8 +2537,7 @@ namespace eosio { void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); - controller& cc = my_impl->chain_plug->chain(); - if( cc.get_read_mode() == eosio::db_read_mode::READ_ONLY ) { + if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; } @@ -3097,7 +3097,8 @@ namespace eosio { my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { + my->db_read_mode = cc.get_read_mode(); + if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; fc_ilog( logger, "node in read-only mode setting max_nodes_per_host to 0 to prevent connections" ); } From 5cf4d317e4e3b22f8383547acb8a9165cc3a9687 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:13 -0500 Subject: [PATCH 0017/1648] Make chain_id const since read from thread pool threads --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 402b61df0cf..efd4bfaed72 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -124,7 +124,7 @@ struct controller_impl { resource_limits_manager resource_limits; authorization_manager authorization; controller::config conf; - chain_id_type chain_id; + const chain_id_type chain_id; // read by thread_pool threads bool replaying= false; optional replay_head_time; db_read_mode read_mode = db_read_mode::SPECULATIVE; From 40b47f54ba25046019aeb10968a116e8cc761da3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:54 -0500 Subject: [PATCH 0018/1648] Add trx id to log message --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8ffe505c4e3..abadf27d8e4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2562,7 +2562,7 @@ namespace eosio { } else { auto trace = result.get(); if (!trace->except) { - fc_dlog(logger, "chain accepted transaction"); + fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); this->dispatcher->bcast_transaction(ptrx); return; } From 6db8bc3b78ca0862d1f9aeca173fb47b94765ccb Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:18:34 -0500 Subject: [PATCH 0019/1648] Use command line max_transaction_time for limit of transaction sig recovery. This was done so that global properties configuration does not have to be accessed since the plan is to move this code into the thread pool. --- plugins/producer_plugin/producer_plugin.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 9730437af8f..0ae10077aa6 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -134,7 +134,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _thread_pool; - int32_t _max_transaction_time_ms; + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool fc::microseconds _max_irreversible_block_age_us; int32_t _produce_time_offset_us = 0; int32_t _last_block_time_offset_us = 0; @@ -350,8 +350,7 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - const auto& cfg = chain.get_global_properties().configuration; - fc::microseconds max_trx_cpu_usage{ cfg.max_transaction_cpu_usage }; + fc::microseconds max_trx_cpu_usage = fc::milliseconds( _max_transaction_time_ms.load() ); auto& tp = *_thread_pool; boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress From 7f340f7347da146b360251214fbf655d11a955cd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:58:27 -0500 Subject: [PATCH 0020/1648] Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr --- plugins/net_plugin/net_plugin.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index abadf27d8e4..71b71c96518 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -158,8 +158,6 @@ namespace eosio { bool use_socket_read_watermark = false; - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -189,7 +187,6 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); - void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -2769,17 +2766,6 @@ namespace eosio { dispatcher->bcast_block(block); } - void net_plugin_impl::transaction_ack(const std::pair& results) { - const auto& id = results.second->id; - if (results.first) { - fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); - } else { - fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); - dispatcher->bcast_transaction(results.second); - } - } - bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3095,8 +3081,6 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From 679b059f823a9955a5b58d79fa6b360fc2b06215 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 15:51:47 -0500 Subject: [PATCH 0021/1648] Revert "Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr" This reverts commit 7f340f7347da146b360251214fbf655d11a955cd. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 71b71c96518..abadf27d8e4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -158,6 +158,8 @@ namespace eosio { bool use_socket_read_watermark = false; + channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -187,6 +189,7 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); + void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -2766,6 +2769,17 @@ namespace eosio { dispatcher->bcast_block(block); } + void net_plugin_impl::transaction_ack(const std::pair& results) { + const auto& id = results.second->id; + if (results.first) { + fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + dispatcher->rejected_transaction(id); + } else { + fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); + dispatcher->bcast_transaction(results.second); + } + } + bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3081,6 +3095,8 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } + my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From cdcb59dd41bce4ff27c3ae518ffa620b0eb4d6ce Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 18:54:14 -0500 Subject: [PATCH 0022/1648] Fix tests, handle -1 as unlimited max_transaction_tim_ms. --- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0ae10077aa6..4dfd9d32cce 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -350,7 +350,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - fc::microseconds max_trx_cpu_usage = fc::milliseconds( _max_transaction_time_ms.load() ); + const auto max_trx_time_ms = _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); auto& tp = *_thread_pool; boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress From 49d80942726746285a82b19e1c3632999968eb70 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 20:25:24 -0500 Subject: [PATCH 0023/1648] force a build --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index abadf27d8e4..dabd71d85db 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2711,8 +2711,8 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } - fc_dlog(logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); + fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", + ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); } void net_plugin_impl::expire_local_txns() { From f61cc5d11dad254c5da2cb40d24f43e851d54077 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Mar 2019 13:55:44 -0500 Subject: [PATCH 0024/1648] Move more of incoming transaction processing to thread pool --- plugins/net_plugin/net_plugin.cpp | 144 +++++++++++++++++++----------- 1 file changed, 94 insertions(+), 50 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index dabd71d85db..7e0599750a5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -152,6 +152,7 @@ namespace eosio { producer_plugin* producer_plug = nullptr; int started_sessions = 0; + std::mutex local_txns_mtx; node_transaction_index local_txns; shared_ptr resolver; @@ -670,11 +671,7 @@ namespace eosio { } void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if( c) impl->handle_message( c, ptr ); - }); + impl.handle_message( c, ptr ); } template @@ -851,21 +848,35 @@ namespace eosio { void connection::txn_send_pending(const vector& ids) { const std::set known_ids(ids.cbegin(), ids.cend()); my_impl->expire_local_txns(); - for(auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ){ - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + vector>> trx_to_send; + { + std::lock_guard g( my_impl->local_txns_mtx ); + for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { + const bool found = known_ids.find( tx->id ) != known_ids.cend(); + if( !found ) { + trx_to_send.emplace_back( tx->serialized_txn ); + } } } + for( const auto& t : trx_to_send ) { + queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } } void connection::txn_send(const vector& ids) { - for(const auto& t : ids) { - auto tx = my_impl->local_txns.get().find(t); - if( tx != my_impl->local_txns.end() ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + vector>> trx_to_send; + { + std::lock_guard g( my_impl->local_txns_mtx ); + for( const auto& t : ids ) { + auto tx = my_impl->local_txns.get().find( t ); + if( tx != my_impl->local_txns.end()) { + trx_to_send.emplace_back( tx->serialized_txn ); + } } } + for( const auto& t : trx_to_send ) { + queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } } void connection::blk_send_branch() { @@ -1476,7 +1487,10 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - note.known_trx.pending = my_impl->local_txns.size(); + { + std::lock_guard g( my_impl->local_txns_mtx ); + note.known_trx.pending = my_impl->local_txns.size(); + } c->enqueue( note ); return; } @@ -1696,9 +1710,12 @@ namespace eosio { } received_transactions.erase(range.first, range.second); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end() ) { //found - fc_dlog(logger, "found trxid in local_trxs" ); - return; + { + std::lock_guard g( my_impl->local_txns_mtx ); + if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + fc_dlog( logger, "found trxid in local_trxs" ); + return; + } } time_point_sec trx_expiration = ptrx->packed_trx->expiration(); @@ -1707,7 +1724,10 @@ namespace eosio { auto buff = create_send_buffer( trx ); node_transaction_state nts = {id, trx_expiration, 0, buff}; - my_impl->local_txns.insert(std::move(nts)); + { + std::lock_guard g( my_impl->local_txns_mtx ); + my_impl->local_txns.insert( std::move( nts )); + } my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { @@ -2438,6 +2458,7 @@ namespace eosio { // plan to get all except what we already know about. req.req_trx.mode = catch_up; send_req = true; + std::lock_guard g( my_impl->local_txns_mtx ); size_t known_sum = local_txns.size(); if( known_sum ) { for( const auto& t : local_txns.get() ) { @@ -2534,6 +2555,7 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } + // called from thread_pool threads void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2541,36 +2563,47 @@ namespace eosio { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; } - if( sync_master->is_active(c) ) { - fc_dlog(logger, "got a txn during sync - dropping"); - return; - } auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - if(local_txns.get().find(tid) != local_txns.end()) { - fc_dlog(logger, "got a duplicate transaction - dropping"); - return; + { + std::lock_guard g( local_txns_mtx ); + if( local_txns.get().find( tid ) != local_txns.end()) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; + } } - dispatcher->recv_transaction(c, tid); + connection_wptr weak_ptr = c; + app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ + auto c = weak_ptr.lock(); + dispatcher->recv_transaction(c, tid); + }); c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); + bool accepted = false; if (result.contains()) { peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); } else { auto trace = result.get(); if (!trace->except) { fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); - this->dispatcher->bcast_transaction(ptrx); - return; + accepted = true; } - peer_elog(c, "bad packed_transaction : ${m}", ("m",trace->except->what())); + if( !accepted ) { + peer_elog( c, "bad packed_transaction : ${m}", ("m", trace->except->what())); + } } - dispatcher->rejected_transaction(ptrx->id); + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}]() { + if( accepted ) { + dispatcher->bcast_transaction( ptrx ); + } else { + dispatcher->rejected_transaction( ptrx->id ); + } + }); }); } @@ -2621,15 +2654,19 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - for (const auto &recpt : msg->transactions) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() : recpt.trx.get().id(); - auto ltx = local_txns.get().find(id); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find(id); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); + { + std::lock_guard g( local_txns_mtx ); + for( const auto& recpt : msg->transactions ) { + auto id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto ltx = local_txns.get().find( id ); + if( ltx != local_txns.end()) { + local_txns.modify( ltx, ubn ); + } + auto ctx = c->trx_state.get().find( id ); + if( ctx != c->trx_state.end()) { + c->trx_state.modify( ctx, ubn ); + } } } sync_master->recv_block(c, blk_id, blk_num); @@ -2697,8 +2734,6 @@ namespace eosio { start_txn_timer(); auto now = time_point::now(); - auto start_size = local_txns.size(); - expire_local_txns(); controller& cc = chain_plug->chain(); @@ -2711,20 +2746,29 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } - fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); + fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); } void net_plugin_impl::expire_local_txns() { - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec(0) ); - auto ex_up = old.upper_bound( time_point::now() ); - old.erase( ex_lo, ex_up ); - - auto& stale = local_txns.get(); controller& cc = chain_plug->chain(); uint32_t lib = cc.last_irreversible_block_num(); - stale.erase( stale.lower_bound(1), stale.upper_bound(lib) ); + size_t start_size = 0, end_size = 0; + + { + std::lock_guard g( local_txns_mtx ); + + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); + auto ex_up = old.upper_bound( time_point::now()); + old.erase( ex_lo, ex_up ); + + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); + end_size = local_txns.size(); + } + + fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); } void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { From 910840f125f0e0cf6127a79662e89caeab390f5c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0025/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 162 ++++++++++++++++++++++-------- 1 file changed, 118 insertions(+), 44 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 320214ae933..0c3ba2d3083 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,6 +75,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -157,7 +163,7 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; optional server_ioc_work; @@ -500,12 +506,12 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - fc::optional outstanding_read_bytes; + std::atomic outstanding_read_bytes{0}; queued_buffer buffer_queue; - uint32_t reads_in_flight = 0; + std::atomic reads_in_flight{0}; uint32_t trx_in_progress_size = 0; fc::sha256 node_id; handshake_message last_handshake_recv; @@ -638,9 +644,9 @@ namespace eosio { }; struct msg_handler : public fc::visitor { - net_plugin_impl &impl; + net_plugin_impl& impl; connection_ptr c; - msg_handler( net_plugin_impl &imp, const connection_ptr& conn) : impl(imp), c(conn) {} + msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} void operator()( const signed_block& msg ) const { EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); @@ -656,16 +662,30 @@ namespace eosio { } void operator()( signed_block&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c ) impl->handle_message( c, ptr ); + }); } void operator()( packed_transaction&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c) impl->handle_message( c, ptr ); + }); } template void operator()( T&& msg ) const { - impl.handle_message( c, std::forward(msg) ); + connection_wptr weak = c; + app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); } }; @@ -703,6 +723,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -775,7 +797,9 @@ namespace eosio { initialize(); } - connection::~connection() {} + connection::~connection() { + pending_message_buffer.reset(); + } void connection::initialize() { auto *rnd = node_id.data(); @@ -823,7 +847,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1102,6 +1125,7 @@ namespace eosio { static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { // this implementation is to avoid copy of signed_block to net_message // matches which of net_message for signed_block + fc_dlog( logger, "sending block ${bn}", ("bn", sb->block_num()) ); return create_send_buffer( signed_block_which, *sb ); } @@ -1916,7 +1940,9 @@ namespace eosio { return false; } else { - start_read_message( con ); + boost::asio::post(*server_ioc, [this, con]() { + start_read_message( con ); + }); ++started_sessions; return true; // for now, we can just use the application main loop. @@ -2001,7 +2027,7 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes ? *conn->outstanding_read_bytes : message_header_size; + std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2017,7 +2043,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2026,7 +2052,7 @@ namespace eosio { if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight) ); + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); } else { peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); } @@ -2048,19 +2074,19 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { - app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); if (!conn) { return; } --conn->reads_in_flight; - conn->outstanding_read_bytes.reset(); + conn->outstanding_read_bytes = 0; + bool close_connection = false; try { if( !ec ) { @@ -2074,18 +2100,16 @@ namespace eosio { uint32_t bytes_in_buffer = conn->pending_message_buffer.bytes_to_read(); if (bytes_in_buffer < message_header_size) { - conn->outstanding_read_bytes.emplace(message_header_size - bytes_in_buffer); + conn->outstanding_read_bytes = message_header_size - bytes_in_buffer; break; } else { uint32_t message_length; auto index = conn->pending_message_buffer.read_index(); conn->pending_message_buffer.peek(&message_length, sizeof(message_length), index); if(message_length > def_send_buffer_size*2 || message_length == 0) { - boost::system::error_code ec; - fc_elog( logger,"incoming message length unexpected (${i}), from ${p}", - ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec))) ); - close(conn); - return; + fc_elog( logger,"incoming message length unexpected (${i})", ("i", message_length) ); + close_connection = true; + break; } auto total_message_bytes = message_length + message_header_size; @@ -2102,37 +2126,43 @@ namespace eosio { conn->pending_message_buffer.add_space( outstanding_message_bytes - available_buffer_bytes ); } - conn->outstanding_read_bytes.emplace(outstanding_message_bytes); + conn->outstanding_read_bytes = outstanding_message_bytes; break; } } } - start_read_message(conn); + if( !close_connection ) start_read_message( conn ); } else { - auto pname = conn->peer_name(); if (ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); + fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); } else { - fc_ilog( logger, "Peer ${p} closed connection",("p",pname) ); + fc_ilog( logger, "Peer closed connection" ); } - close( conn ); + close_connection = true; } } catch(const std::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.what()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data: ${s}", ("s",ex.what()) ); + close_connection = true; } catch(const fc::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.to_string()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data ${s}", ("s",ex.to_string()) ); + close_connection = true; } catch (...) { - fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); - close( conn ); + fc_elog( logger, "Undefined exception handling read data" ); + close_connection = true; + } + + if( close_connection ) { + connection_wptr weak_conn = conn; + app().post( priority::medium, "close conn", [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } - }); }); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; @@ -2144,6 +2174,7 @@ namespace eosio { bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { try { // if next message is a block we already have, exit early +/* auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); @@ -2160,7 +2191,7 @@ namespace eosio { return true; } } - +*/ auto ds = conn->pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); @@ -2541,18 +2572,61 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller &cc = chain_plug->chain(); - block_id_type blk_id = msg->id(); - uint32_t blk_num = msg->block_num(); + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { + signed_block_ptr msg = m; + controller& cc = chain_plug->chain(); + block_id_type blk_id = msg ? msg->id() : block_id_type(); + uint32_t blk_num = msg ? msg->block_num() : 0; fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( cc.fetch_block_by_id(blk_id)) { + if( msg && cc.fetch_block_by_id(blk_id)) { sync_master->recv_block(c, blk_id, blk_num); return; } + signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; + if( prev == nullptr ){ //&& sync_master->is_active(c) ) { + // see if top is ready + if( !sync_master->incoming_blocks.empty() ) { + prev = sync_master->incoming_blocks.top(); + auto prev_prev = cc.fetch_block_by_id( prev->previous ); + if( prev_prev != nullptr ) { + sync_master->incoming_blocks.pop(); + if(msg) sync_master->incoming_blocks.emplace( msg ); + msg = prev; + blk_id = msg->id(); + blk_num = msg->block_num(); + connection_wptr weak = c; + app().post(priority::medium, "re post blk", [this, weak](){ + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + }); + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 0e7518c9c3b1dfb5a1cf31dd04eda8550798c514 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0026/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0c3ba2d3083..da049bfc09c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -664,7 +664,7 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); @@ -672,7 +672,7 @@ namespace eosio { void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c) impl->handle_message( c, ptr ); }); @@ -682,7 +682,7 @@ namespace eosio { void operator()( T&& msg ) const { connection_wptr weak = c; - app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -2156,7 +2156,7 @@ namespace eosio { if( close_connection ) { connection_wptr weak_conn = conn; - app().post( priority::medium, "close conn", [this, weak_conn]() { + app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2598,7 +2598,7 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; - app().post(priority::medium, "re post blk", [this, weak](){ + app().post(priority::medium, [this, weak](){ connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2607,7 +2607,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2619,7 +2619,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From 70bf0348aeaecf839b71d5cfec2608c3818e174a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0027/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 53 ++----------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index da049bfc09c..eacf1da5e8d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,12 +75,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -506,7 +500,7 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - std::atomic outstanding_read_bytes{0}; + std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads queued_buffer buffer_queue; @@ -723,8 +717,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2019,6 +2011,7 @@ namespace eosio { }); } + // only called from server_ioc thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { @@ -2585,48 +2578,6 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } - signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; - if( prev == nullptr ){ //&& sync_master->is_active(c) ) { - // see if top is ready - if( !sync_master->incoming_blocks.empty() ) { - prev = sync_master->incoming_blocks.top(); - auto prev_prev = cc.fetch_block_by_id( prev->previous ); - if( prev_prev != nullptr ) { - sync_master->incoming_blocks.pop(); - if(msg) sync_master->incoming_blocks.emplace( msg ); - msg = prev; - blk_id = msg->id(); - blk_num = msg->block_num(); - connection_wptr weak = c; - app().post(priority::medium, [this, weak](){ - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - }); - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 65c1436aad62ec3e296384a4f019b2e6f693d901 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:49:28 -0600 Subject: [PATCH 0028/1648] Revert unneeded changes to handle_message --- plugins/net_plugin/net_plugin.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index eacf1da5e8d..7e6974f64a4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2565,16 +2565,15 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { - signed_block_ptr msg = m; + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); - block_id_type blk_id = msg ? msg->id() : block_id_type(); - uint32_t blk_num = msg ? msg->block_num() : 0; + block_id_type blk_id = msg->id(); + uint32_t blk_num = msg->block_num(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( msg && cc.fetch_block_by_id(blk_id)) { + if( cc.fetch_block_by_id(blk_id) ) { sync_master->recv_block(c, blk_id, blk_num); return; } From 518d6e0876ab4581b328004d515bb9442dd7429d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:50:46 -0600 Subject: [PATCH 0029/1648] Logging fixes --- plugins/net_plugin/net_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7e6974f64a4..b77b2e0c34c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1637,6 +1637,7 @@ namespace eosio { uint32_t bnum = bs->block_num; peer_block_state pbstate{bs->id, bnum}; + fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; for( auto& cp : my_impl->connections ) { @@ -1651,7 +1652,7 @@ namespace eosio { if( !send_buffer ) { send_buffer = create_send_buffer( bs->block ); } - fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } } From 971ba45dba70ba31004883a55d5c1f3ac8778db4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0030/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 56 ++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b77b2e0c34c..8a06f93c365 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -419,6 +419,7 @@ namespace eosio { } } + // thread safe uint32_t write_queue_size() const { return _write_queue_size; } bool is_out_queue_empty() const { return _out_queue.empty(); } @@ -477,7 +478,7 @@ namespace eosio { std::function callback; }; - uint32_t _write_queue_size = 0; + std::atomic _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -506,7 +507,7 @@ namespace eosio { queued_buffer buffer_queue; std::atomic reads_in_flight{0}; - uint32_t trx_in_progress_size = 0; + std::atomic trx_in_progress_size{0}; fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -516,6 +517,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; + std::mutex read_delay_timer_mutex; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -838,7 +840,10 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); - if( read_delay_timer ) read_delay_timer->cancel(); + { + std::lock_guard g( read_delay_timer_mutex ); + if( read_delay_timer ) read_delay_timer->cancel(); + } } void connection::txn_send_pending(const vector& ids) { @@ -2037,27 +2042,36 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); - } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); + uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); + uint32_t trx_in_progress_size = conn->trx_in_progress_size; + uint32_t reads_in_flight = conn->reads_in_flight; + if( write_queue_size > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); + } else if( reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); } else { - peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); + peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } - if( conn->buffer_queue.write_queue_size() > 2*def_max_write_queue_size || - conn->reads_in_flight > 2*def_max_reads_in_flight || - conn->trx_in_progress_size > 2*def_max_trx_in_progress_size ) + if( write_queue_size > 2*def_max_write_queue_size || + reads_in_flight > 2*def_max_reads_in_flight || + trx_in_progress_size > 2*def_max_trx_in_progress_size ) { - fc_wlog( logger, "queues over full, giving up on connection ${p}", ("p", conn->peer_name()) ); - my_impl->close( conn ); + fc_wlog( logger, "queues over full, giving up on connection" ); + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + my_impl->close( conn ); + }); return; } + std::lock_guard g( conn->read_delay_timer_mutex ); if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( @@ -2068,7 +2082,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2149,7 +2163,6 @@ namespace eosio { } if( close_connection ) { - connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2159,9 +2172,14 @@ namespace eosio { } }); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); - close( conn ); + fc_elog( logger, "Undefined exception in start_read_message" ); + connection_wptr weak_conn = conn; + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } } From f72825020e53048ba674aee1d02946219ae37c97 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0031/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 54 +++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8a06f93c365..87489870609 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,6 +75,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -719,6 +725,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2042,7 +2050,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2082,7 +2090,7 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2596,6 +2604,48 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } + signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; + if( prev == nullptr ){ //&& sync_master->is_active(c) ) { + // see if top is ready + if( !sync_master->incoming_blocks.empty() ) { + prev = sync_master->incoming_blocks.top(); + auto prev_prev = cc.fetch_block_by_id( prev->previous ); + if( prev_prev != nullptr ) { + sync_master->incoming_blocks.pop(); + if(msg) sync_master->incoming_blocks.emplace( msg ); + msg = prev; + blk_id = msg->id(); + blk_num = msg->block_num(); + connection_wptr weak = c; + app().post(priority::medium, "re post blk", [this, weak](){ + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + }); + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 4e8f9081511df17bd03289436865676253c5a5ce Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0032/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 87489870609..662d6ba983e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2171,6 +2171,7 @@ namespace eosio { } if( close_connection ) { + connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2617,7 +2618,7 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; - app().post(priority::medium, "re post blk", [this, weak](){ + app().post(priority::medium, [this, weak](){ connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2626,7 +2627,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2638,7 +2639,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From 328b1ad1e5712d42eec8a7ab7156c533f2ca267f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0033/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 50 ------------------------------- 1 file changed, 50 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 662d6ba983e..93d780012ad 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,12 +75,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -725,8 +719,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2605,48 +2597,6 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } - signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; - if( prev == nullptr ){ //&& sync_master->is_active(c) ) { - // see if top is ready - if( !sync_master->incoming_blocks.empty() ) { - prev = sync_master->incoming_blocks.top(); - auto prev_prev = cc.fetch_block_by_id( prev->previous ); - if( prev_prev != nullptr ) { - sync_master->incoming_blocks.pop(); - if(msg) sync_master->incoming_blocks.emplace( msg ); - msg = prev; - blk_id = msg->id(); - blk_num = msg->block_num(); - connection_wptr weak = c; - app().post(priority::medium, [this, weak](){ - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - }); - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 21f3607f47163e61afe2e7bcc24c378028a20b33 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0034/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 93d780012ad..8a06f93c365 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2042,7 +2042,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2082,7 +2082,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2163,7 +2163,6 @@ namespace eosio { } if( close_connection ) { - connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; From 5fef638c0c2f7580c8d46b500721bd5f18388555 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 2 Mar 2019 11:22:30 -0600 Subject: [PATCH 0035/1648] Remove unneeded access to atomic --- plugins/net_plugin/net_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8a06f93c365..1a1b9e1ffa3 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -26,6 +26,8 @@ #include #include +#include + using namespace eosio::chain::plugin_interface::compat; namespace fc { @@ -2026,7 +2028,9 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; + std::size_t minimum_read = + std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); + minimum_read = minimum_read != 0 ? minimum_read : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2093,7 +2097,6 @@ namespace eosio { } --conn->reads_in_flight; - conn->outstanding_read_bytes = 0; bool close_connection = false; try { From c2b3e2b46f5eac5c7cfb1aaa3468561735bd04e2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 4 Mar 2019 15:44:26 -0500 Subject: [PATCH 0036/1648] Move sig recovery in producer plugin to use thread pool of chain controller --- plugins/producer_plugin/producer_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..6b7646e9cdd 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,8 +351,11 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { + fc::microseconds max_trx_cpu_usage{ cfg.max_transaction_cpu_usage }; + auto& tp = *_thread_pool; + boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { From e55a3ab6bade592ffcf82033bf459b078f41520a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 4 Mar 2019 15:47:26 -0500 Subject: [PATCH 0037/1648] Cache db_read_mode to avoid accessing controller --- plugins/net_plugin/net_plugin.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1a1b9e1ffa3..04c0686cdfb 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -145,6 +145,7 @@ namespace eosio { bool network_version_match = false; chain_id_type chain_id; fc::sha256 node_id; + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; chain_plugin* chain_plug = nullptr; @@ -2067,7 +2068,7 @@ namespace eosio { trx_in_progress_size > 2*def_max_trx_in_progress_size ) { fc_wlog( logger, "queues over full, giving up on connection" ); - app().post( priority::medium, [this, weak_conn]() { + app().post( priority::medium, [weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2549,8 +2550,7 @@ namespace eosio { void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); - controller& cc = my_impl->chain_plug->chain(); - if( cc.get_read_mode() == eosio::db_read_mode::READ_ONLY ) { + if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; } @@ -3112,7 +3112,8 @@ namespace eosio { my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { + my->db_read_mode = cc.get_read_mode(); + if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; fc_ilog( logger, "node in read-only mode setting max_nodes_per_host to 0 to prevent connections" ); } From 13d2e375b3b9b65c2e64403d9326329755df4f7f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:13 -0500 Subject: [PATCH 0038/1648] Make chain_id const since read from thread pool threads --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..b8470993c05 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -124,7 +124,7 @@ struct controller_impl { resource_limits_manager resource_limits; authorization_manager authorization; controller::config conf; - chain_id_type chain_id; + const chain_id_type chain_id; // read by thread_pool threads bool replaying= false; optional replay_head_time; db_read_mode read_mode = db_read_mode::SPECULATIVE; From 4d1e3f006f71139dff984b050ec82a59d1e48d21 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:54 -0500 Subject: [PATCH 0039/1648] Add trx id to log message --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 04c0686cdfb..b6984fb4c3f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2575,7 +2575,7 @@ namespace eosio { } else { auto trace = result.get(); if (!trace->except) { - fc_dlog(logger, "chain accepted transaction"); + fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); this->dispatcher->bcast_transaction(ptrx); return; } From 8e80ac1e97d9f408cf1d63c7e3b66d1771471564 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:18:34 -0500 Subject: [PATCH 0040/1648] Use command line max_transaction_time for limit of transaction sig recovery. This was done so that global properties configuration does not have to be accessed since the plan is to move this code into the thread pool. --- plugins/producer_plugin/producer_plugin.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6b7646e9cdd..c2d600c467b 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -134,7 +134,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _thread_pool; - int32_t _max_transaction_time_ms; + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool fc::microseconds _max_irreversible_block_age_us; int32_t _produce_time_offset_us = 0; int32_t _last_block_time_offset_us = 0; @@ -350,8 +350,7 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - const auto& cfg = chain.get_global_properties().configuration; - fc::microseconds max_trx_cpu_usage{ cfg.max_transaction_cpu_usage }; + fc::microseconds max_trx_cpu_usage = fc::milliseconds( _max_transaction_time_ms.load() ); auto& tp = *_thread_pool; boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress From e8f864d1eaf3721f4c5c031df318871b682f0380 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:58:27 -0500 Subject: [PATCH 0041/1648] Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr --- plugins/net_plugin/net_plugin.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b6984fb4c3f..eb2330f5bb1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -158,8 +158,6 @@ namespace eosio { bool use_socket_read_watermark = false; - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -189,7 +187,6 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); - void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -2784,17 +2781,6 @@ namespace eosio { dispatcher->bcast_block(block); } - void net_plugin_impl::transaction_ack(const std::pair& results) { - const auto& id = results.second->id; - if (results.first) { - fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); - } else { - fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); - dispatcher->bcast_transaction(results.second); - } - } - bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3110,8 +3096,6 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From 53c978d53e591aadcdf9f5bdc0997fe1f4ea3953 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 15:51:47 -0500 Subject: [PATCH 0042/1648] Revert "Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr" This reverts commit 7f340f7347da146b360251214fbf655d11a955cd. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index eb2330f5bb1..b6984fb4c3f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -158,6 +158,8 @@ namespace eosio { bool use_socket_read_watermark = false; + channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -187,6 +189,7 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); + void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -2781,6 +2784,17 @@ namespace eosio { dispatcher->bcast_block(block); } + void net_plugin_impl::transaction_ack(const std::pair& results) { + const auto& id = results.second->id; + if (results.first) { + fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + dispatcher->rejected_transaction(id); + } else { + fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); + dispatcher->bcast_transaction(results.second); + } + } + bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3096,6 +3110,8 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } + my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From 0b347921c09b1a6a40695d66150704d15e741e89 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 18:54:14 -0500 Subject: [PATCH 0043/1648] Fix tests, handle -1 as unlimited max_transaction_tim_ms. --- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c2d600c467b..217f931a532 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -350,7 +350,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - fc::microseconds max_trx_cpu_usage = fc::milliseconds( _max_transaction_time_ms.load() ); + const auto max_trx_time_ms = _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); auto& tp = *_thread_pool; boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress From b0838c1fd372b0c28f107353bbc530ebd4aafb3e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 20:25:24 -0500 Subject: [PATCH 0044/1648] force a build --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b6984fb4c3f..099c3a6621c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2726,8 +2726,8 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } - fc_dlog(logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); + fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", + ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); } void net_plugin_impl::expire_local_txns() { From c546380d444ee233453a221a574454a8178468c1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Mar 2019 13:55:44 -0500 Subject: [PATCH 0045/1648] Move more of incoming transaction processing to thread pool --- plugins/net_plugin/net_plugin.cpp | 144 +++++++++++++++++++----------- 1 file changed, 94 insertions(+), 50 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 099c3a6621c..9e828bbb45d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -152,6 +152,7 @@ namespace eosio { producer_plugin* producer_plug = nullptr; int started_sessions = 0; + std::mutex local_txns_mtx; node_transaction_index local_txns; shared_ptr resolver; @@ -670,11 +671,7 @@ namespace eosio { } void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if( c) impl->handle_message( c, ptr ); - }); + impl.handle_message( c, ptr ); } template @@ -852,21 +849,35 @@ namespace eosio { void connection::txn_send_pending(const vector& ids) { const std::set known_ids(ids.cbegin(), ids.cend()); my_impl->expire_local_txns(); - for(auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ){ - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + vector>> trx_to_send; + { + std::lock_guard g( my_impl->local_txns_mtx ); + for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { + const bool found = known_ids.find( tx->id ) != known_ids.cend(); + if( !found ) { + trx_to_send.emplace_back( tx->serialized_txn ); + } } } + for( const auto& t : trx_to_send ) { + queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } } void connection::txn_send(const vector& ids) { - for(const auto& t : ids) { - auto tx = my_impl->local_txns.get().find(t); - if( tx != my_impl->local_txns.end() ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + vector>> trx_to_send; + { + std::lock_guard g( my_impl->local_txns_mtx ); + for( const auto& t : ids ) { + auto tx = my_impl->local_txns.get().find( t ); + if( tx != my_impl->local_txns.end()) { + trx_to_send.emplace_back( tx->serialized_txn ); + } } } + for( const auto& t : trx_to_send ) { + queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } } void connection::blk_send_branch() { @@ -1477,7 +1488,10 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - note.known_trx.pending = my_impl->local_txns.size(); + { + std::lock_guard g( my_impl->local_txns_mtx ); + note.known_trx.pending = my_impl->local_txns.size(); + } c->enqueue( note ); return; } @@ -1709,9 +1723,12 @@ namespace eosio { } received_transactions.erase(range.first, range.second); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end() ) { //found - fc_dlog(logger, "found trxid in local_trxs" ); - return; + { + std::lock_guard g( my_impl->local_txns_mtx ); + if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + fc_dlog( logger, "found trxid in local_trxs" ); + return; + } } time_point_sec trx_expiration = ptrx->packed_trx->expiration(); @@ -1720,7 +1737,10 @@ namespace eosio { auto buff = create_send_buffer( trx ); node_transaction_state nts = {id, trx_expiration, 0, buff}; - my_impl->local_txns.insert(std::move(nts)); + { + std::lock_guard g( my_impl->local_txns_mtx ); + my_impl->local_txns.insert( std::move( nts )); + } my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { @@ -2451,6 +2471,7 @@ namespace eosio { // plan to get all except what we already know about. req.req_trx.mode = catch_up; send_req = true; + std::lock_guard g( my_impl->local_txns_mtx ); size_t known_sum = local_txns.size(); if( known_sum ) { for( const auto& t : local_txns.get() ) { @@ -2547,6 +2568,7 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } + // called from thread_pool threads void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2554,36 +2576,47 @@ namespace eosio { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; } - if( sync_master->is_active(c) ) { - fc_dlog(logger, "got a txn during sync - dropping"); - return; - } auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - if(local_txns.get().find(tid) != local_txns.end()) { - fc_dlog(logger, "got a duplicate transaction - dropping"); - return; + { + std::lock_guard g( local_txns_mtx ); + if( local_txns.get().find( tid ) != local_txns.end()) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; + } } - dispatcher->recv_transaction(c, tid); + connection_wptr weak_ptr = c; + app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ + auto c = weak_ptr.lock(); + dispatcher->recv_transaction(c, tid); + }); c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); + bool accepted = false; if (result.contains()) { peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); } else { auto trace = result.get(); if (!trace->except) { fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); - this->dispatcher->bcast_transaction(ptrx); - return; + accepted = true; } - peer_elog(c, "bad packed_transaction : ${m}", ("m",trace->except->what())); + if( !accepted ) { + peer_elog( c, "bad packed_transaction : ${m}", ("m", trace->except->what())); + } } - dispatcher->rejected_transaction(ptrx->id); + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}]() { + if( accepted ) { + dispatcher->bcast_transaction( ptrx ); + } else { + dispatcher->rejected_transaction( ptrx->id ); + } + }); }); } @@ -2634,15 +2667,19 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - for (const auto &recpt : msg->transactions) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() : recpt.trx.get().id(); - auto ltx = local_txns.get().find(id); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find(id); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); + { + std::lock_guard g( local_txns_mtx ); + for( const auto& recpt : msg->transactions ) { + auto id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto ltx = local_txns.get().find( id ); + if( ltx != local_txns.end()) { + local_txns.modify( ltx, ubn ); + } + auto ctx = c->trx_state.get().find( id ); + if( ctx != c->trx_state.end()) { + c->trx_state.modify( ctx, ubn ); + } } } sync_master->recv_block(c, blk_id, blk_num); @@ -2711,8 +2748,6 @@ namespace eosio { start_txn_timer(); auto now = time_point::now(); - auto start_size = local_txns.size(); - expire_local_txns(); controller& cc = chain_plug->chain(); @@ -2726,20 +2761,29 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } - fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); + fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); } void net_plugin_impl::expire_local_txns() { - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec(0) ); - auto ex_up = old.upper_bound( time_point::now() ); - old.erase( ex_lo, ex_up ); - - auto& stale = local_txns.get(); controller& cc = chain_plug->chain(); uint32_t lib = cc.last_irreversible_block_num(); - stale.erase( stale.lower_bound(1), stale.upper_bound(lib) ); + size_t start_size = 0, end_size = 0; + + { + std::lock_guard g( local_txns_mtx ); + + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); + auto ex_up = old.upper_bound( time_point::now()); + old.erase( ex_lo, ex_up ); + + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); + end_size = local_txns.size(); + } + + fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); } void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { From 93f620f59dca7379554d407a8c474d31a6af0408 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 7 Mar 2019 09:10:13 -0500 Subject: [PATCH 0046/1648] Use unique_lock instead of lock_guard to clean up code --- plugins/net_plugin/net_plugin.cpp | 120 ++++++++++++++---------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9e828bbb45d..af00cba06ff 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -521,7 +521,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; - std::mutex read_delay_timer_mutex; + std::mutex read_delay_timer_mtx; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -840,25 +840,23 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); - { - std::lock_guard g( read_delay_timer_mutex ); - if( read_delay_timer ) read_delay_timer->cancel(); - } + + std::lock_guard g( read_delay_timer_mtx ); + if( read_delay_timer ) read_delay_timer->cancel(); } void connection::txn_send_pending(const vector& ids) { const std::set known_ids(ids.cbegin(), ids.cend()); my_impl->expire_local_txns(); vector>> trx_to_send; - { - std::lock_guard g( my_impl->local_txns_mtx ); - for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - trx_to_send.emplace_back( tx->serialized_txn ); - } + std::unique_lock g( my_impl->local_txns_mtx ); + for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { + const bool found = known_ids.find( tx->id ) != known_ids.cend(); + if( !found ) { + trx_to_send.emplace_back( tx->serialized_txn ); } } + g.unlock(); for( const auto& t : trx_to_send ) { queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); } @@ -866,15 +864,14 @@ namespace eosio { void connection::txn_send(const vector& ids) { vector>> trx_to_send; - { - std::lock_guard g( my_impl->local_txns_mtx ); - for( const auto& t : ids ) { - auto tx = my_impl->local_txns.get().find( t ); - if( tx != my_impl->local_txns.end()) { - trx_to_send.emplace_back( tx->serialized_txn ); - } + std::unique_lock g( my_impl->local_txns_mtx ); + for( const auto& t : ids ) { + auto tx = my_impl->local_txns.get().find( t ); + if( tx != my_impl->local_txns.end()) { + trx_to_send.emplace_back( tx->serialized_txn ); } } + g.unlock(); for( const auto& t : trx_to_send ) { queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); } @@ -1488,10 +1485,9 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - { - std::lock_guard g( my_impl->local_txns_mtx ); - note.known_trx.pending = my_impl->local_txns.size(); - } + std::unique_lock g( my_impl->local_txns_mtx ); + note.known_trx.pending = my_impl->local_txns.size(); + g.unlock(); c->enqueue( note ); return; } @@ -1723,13 +1719,12 @@ namespace eosio { } received_transactions.erase(range.first, range.second); - { - std::lock_guard g( my_impl->local_txns_mtx ); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found - fc_dlog( logger, "found trxid in local_trxs" ); - return; - } + std::unique_lock g( my_impl->local_txns_mtx ); + if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + fc_dlog( logger, "found trxid in local_trxs" ); + return; } + g.unlock(); time_point_sec trx_expiration = ptrx->packed_trx->expiration(); const packed_transaction& trx = *ptrx->packed_trx; @@ -1737,10 +1732,9 @@ namespace eosio { auto buff = create_send_buffer( trx ); node_transaction_state nts = {id, trx_expiration, 0, buff}; - { - std::lock_guard g( my_impl->local_txns_mtx ); - my_impl->local_txns.insert( std::move( nts )); - } + g.lock(); + my_impl->local_txns.insert( std::move( nts )); + g.unlock(); my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { @@ -2096,7 +2090,7 @@ namespace eosio { }); return; } - std::lock_guard g( conn->read_delay_timer_mutex ); + std::lock_guard g( conn->read_delay_timer_mtx ); if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( @@ -2580,13 +2574,12 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - { - std::lock_guard g( local_txns_mtx ); - if( local_txns.get().find( tid ) != local_txns.end()) { - fc_dlog( logger, "got a duplicate transaction - dropping" ); - return; - } + std::unique_lock g( local_txns_mtx ); + if( local_txns.get().find( tid ) != local_txns.end()) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; } + g.unlock(); connection_wptr weak_ptr = c; app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ auto c = weak_ptr.lock(); @@ -2667,21 +2660,20 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - { - std::lock_guard g( local_txns_mtx ); - for( const auto& recpt : msg->transactions ) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() - : recpt.trx.get().id(); - auto ltx = local_txns.get().find( id ); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find( id ); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); - } + std::unique_lock g( local_txns_mtx ); + for( const auto& recpt : msg->transactions ) { + auto id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto ltx = local_txns.get().find( id ); + if( ltx != local_txns.end()) { + local_txns.modify( ltx, ubn ); + } + auto ctx = c->trx_state.get().find( id ); + if( ctx != c->trx_state.end()) { + c->trx_state.modify( ctx, ubn ); } } + g.unlock(); sync_master->recv_block(c, blk_id, blk_num); } else { @@ -2769,19 +2761,19 @@ namespace eosio { uint32_t lib = cc.last_irreversible_block_num(); size_t start_size = 0, end_size = 0; - { - std::lock_guard g( local_txns_mtx ); + std::unique_lock g( local_txns_mtx ); - start_size = local_txns.size(); - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); - auto ex_up = old.upper_bound( time_point::now()); - old.erase( ex_lo, ex_up ); + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); + auto ex_up = old.upper_bound( time_point::now()); + old.erase( ex_lo, ex_up ); - auto& stale = local_txns.get(); - stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); - end_size = local_txns.size(); - } + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); + end_size = local_txns.size(); + + g.unlock(); fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); } From c3830f34882cdbc9d4a3ff9e9861c3262fd65d98 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 8 Mar 2019 09:34:05 -0500 Subject: [PATCH 0047/1648] Move creation of future outside post to prevent wait on future right after creation --- plugins/producer_plugin/producer_plugin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 217f931a532..569fce37dd8 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -352,10 +352,10 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto max_trx_time_ms = _max_transaction_time_ms.load(); fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); auto& tp = *_thread_pool; - boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { - // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress - transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); + boost::asio::post( tp, [self = this, trx, persist_until_expired, next]() { if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { From a0207ef200928246cb7ccf056da022ea338933d7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 11 Mar 2019 11:36:25 -0500 Subject: [PATCH 0048/1648] Test run with producer thread pool instead of chain controller thread pool --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 569fce37dd8..50f6c531d0d 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -352,9 +352,9 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto max_trx_time_ms = _max_transaction_time_ms.load(); fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); - // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress - transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); auto& tp = *_thread_pool; + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, tp, chain.get_chain_id(), max_trx_cpu_usage ); boost::asio::post( tp, [self = this, trx, persist_until_expired, next]() { if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); From a7a588733d0281f4096ea79deea89d3e9e4eb7b8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 11 Mar 2019 15:57:18 -0500 Subject: [PATCH 0049/1648] Combine received_blocks and blck_state. Make blck_state thread safe and use it for short-cut out when receiving block. --- plugins/net_plugin/net_plugin.cpp | 135 ++++++++++++++++-------------- 1 file changed, 70 insertions(+), 65 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index af00cba06ff..87a925aa519 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -110,6 +110,7 @@ namespace eosio { uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; uint32_t num_clients = 0; + uint32_t current_connection_id = 0; vector supplied_peers; vector allowed_peers; ///< peer keys allowed to connect @@ -363,20 +364,24 @@ namespace eosio { > transaction_state_index; - /** - * - */ struct peer_block_state { block_id_type id; uint32_t block_num; + uint32_t connection_id; }; typedef multi_index_container< eosio::peer_block_state, indexed_by< - ordered_unique< tag, member, sha256_less >, - ordered_unique< tag, member > - > + ordered_unique< tag, + composite_key< peer_block_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, + ordered_non_unique< tag, member > + > > peer_block_state_index; @@ -498,7 +503,6 @@ namespace eosio { ~connection(); void initialize(); - peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive @@ -513,6 +517,7 @@ namespace eosio { std::atomic reads_in_flight{0}; std::atomic trx_in_progress_size{0}; fc::sha256 node_id; + const uint32_t connection_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; @@ -614,9 +619,6 @@ namespace eosio { bool to_sync_queue = false); void do_queue_write(int priority); - bool add_peer_block(const peer_block_state& pbs); - bool peer_has_block(const block_id_type& blkid); - fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { if (!_logger_variant) { @@ -722,10 +724,11 @@ namespace eosio { }; class dispatch_manager { - public: - std::multimap received_blocks; + std::mutex blk_state_mtx; + peer_block_state_index blk_state; std::multimap received_transactions; + public: void bcast_transaction(const transaction_metadata_ptr& trx); void rejected_transaction(const transaction_id_type& msg); void bcast_block(const block_state_ptr& bs); @@ -737,17 +740,21 @@ namespace eosio { void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); void retry_fetch(const connection_ptr& conn); + + bool add_peer_block(const peer_block_state& pbs); + bool peer_has_block(const block_id_type& blkid, uint32_t connection_id); + bool have_block(const block_id_type& blkid); }; //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : blk_state(), - trx_state(), + : trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), + connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), last_handshake_sent(), sent_handshake_count(0), @@ -767,12 +774,12 @@ namespace eosio { } connection::connection( socket_ptr s ) - : blk_state(), - trx_state(), + : trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), socket( s ), node_id(), + connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), last_handshake_sent(), sent_handshake_count(0), @@ -812,7 +819,6 @@ namespace eosio { void connection::reset() { peer_requested.reset(); - blk_state.clear(); trx_state.clear(); } @@ -933,7 +939,7 @@ namespace eosio { signed_block_ptr b = cc.fetch_block_by_id(blkid); if(b) { fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - add_peer_block({blkid, block_header::num_from_id(blkid)}); + my_impl->dispatcher->add_peer_block({blkid, block_header::num_from_id(blkid), connection_id}); enqueue_block( b ); } else { fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", @@ -1254,20 +1260,6 @@ namespace eosio { sync_wait(); } - bool connection::add_peer_block(const peer_block_state& entry) { - auto bptr = blk_state.get().find(entry.id); - bool added = (bptr == blk_state.end()); - if (added){ - blk_state.insert(entry); - } - return added; - } - - bool connection::peer_has_block( const block_id_type& blkid ) { - auto blk_itr = blk_state.get().find(blkid); - return blk_itr != blk_state.end(); - } - //----------------------------------------------------------- sync_manager::sync_manager( uint32_t req_span ) @@ -1645,26 +1637,48 @@ namespace eosio { //------------------------------------------------------------------------ - void dispatch_manager::bcast_block(const block_state_ptr& bs) { - std::set skips; - auto range = received_blocks.equal_range(bs->id); - for (auto org = range.first; org != range.second; ++org) { - skips.insert(org->second); + bool dispatch_manager::add_peer_block(const peer_block_state& entry) { + std::lock_guard g(blk_state_mtx); + auto bptr = blk_state.get().find(std::make_tuple(std::ref(entry.id), entry.connection_id)); + bool added = (bptr == blk_state.end()); + if (added){ + blk_state.insert(entry); } - received_blocks.erase(range.first, range.second); + return added; + } + bool dispatch_manager::peer_has_block( const block_id_type& blkid, uint32_t connection_id ) { + std::lock_guard g(blk_state_mtx); + auto blk_itr = blk_state.get().find(std::make_tuple(std::ref(blkid), connection_id)); + return blk_itr != blk_state.end(); + } + + bool dispatch_manager::have_block( const block_id_type& blkid ) { + std::lock_guard g(blk_state_mtx); + auto blk_itr = blk_state.get().find( blkid ); + return blk_itr != blk_state.end(); + } + + void dispatch_manager::expire_blocks( uint32_t lib_num ) { + std::lock_guard g(blk_state_mtx); + auto& stale_blk = blk_state.get(); + stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib_num) ); + } + + void dispatch_manager::bcast_block(const block_state_ptr& bs) { uint32_t bnum = bs->block_num; peer_block_state pbstate{bs->id, bnum}; fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; for( auto& cp : my_impl->connections ) { - if( skips.find( cp ) != skips.end() || !cp->current() ) { + if( !cp->current() ) { continue; } bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; if( !has_block ) { - if( !cp->add_peer_block( pbstate ) ) { + pbstate.connection_id = cp->connection_id; + if( !add_peer_block( pbstate ) ) { continue; } if( !send_buffer ) { @@ -1678,7 +1692,8 @@ namespace eosio { } void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { - received_blocks.insert(std::make_pair(id, c)); + peer_block_state pbstate{id, bnum, c->connection_id}; + add_peer_block( pbstate ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -1693,20 +1708,6 @@ namespace eosio { void dispatch_manager::rejected_block(const block_id_type& id) { fc_dlog( logger, "rejected block ${id}", ("id", id) ); - auto range = received_blocks.equal_range(id); - received_blocks.erase(range.first, range.second); - } - - void dispatch_manager::expire_blocks( uint32_t lib_num ) { - for( auto i = received_blocks.begin(); i != received_blocks.end(); ) { - const block_id_type& blk_id = i->first; - uint32_t blk_num = block_header::num_from_id( blk_id ); - if( blk_num <= lib_num ) { - i = received_blocks.erase( i ); - } else { - ++i; - } - } } void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { @@ -1795,7 +1796,7 @@ namespace eosio { try { b = cc.fetch_block_by_id(blkid); // if exists if(b) { - c->add_peer_block({blkid, block_header::num_from_id(blkid)}); + add_peer_block({blkid, block_header::num_from_id(blkid), c->connection_id}); } } catch (const assert_exception &ex) { fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what()) ); @@ -1851,7 +1852,7 @@ namespace eosio { sendit = trx != conn->trx_state.end(); } else { - sendit = conn->peer_has_block(bid); + sendit = peer_has_block(bid, c->connection_id); } if (sendit) { conn->enqueue(*c->last_req); @@ -2204,7 +2205,6 @@ namespace eosio { bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { try { // if next message is a block we already have, exit early -/* auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); @@ -2212,16 +2212,23 @@ namespace eosio { block_header bh; fc::raw::unpack( peek_ds, bh ); - controller& cc = chain_plug->chain(); block_id_type blk_id = bh.id(); - uint32_t blk_num = bh.block_num(); - if( cc.fetch_block_by_id( blk_id ) ) { - sync_master->recv_block( conn, blk_id, blk_num ); + if( dispatcher->have_block( blk_id ) ) { + connection_wptr weak = conn; + app().post(priority::low, + [dispatcher = dispatcher.get(), sync_master = sync_master.get(), weak{std::move(weak)}, blk_id] { + connection_ptr c = weak.lock(); + if(c) { + auto blk_num = block_header::num_from_id(blk_id); + dispatcher->recv_block(c, blk_id, blk_num); + sync_master->recv_block( c, blk_id, blk_num ); + } + }); conn->pending_message_buffer.advance_read_ptr( message_length ); return true; } } -*/ + auto ds = conn->pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); @@ -2750,8 +2757,6 @@ namespace eosio { stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); auto &stale_txn_e = c->trx_state.get(); stale_txn_e.erase(stale_txn_e.lower_bound(time_point_sec()), stale_txn_e.upper_bound(time_point::now())); - auto &stale_blk = c->blk_state.get(); - stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); } From 4a588b479f292c0b29ecc41f1679f914326f4174 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 13:27:28 -0500 Subject: [PATCH 0050/1648] Avoid tying up a thread to wait on transaction future. Also removes likely safe but undefined behavior. --- .../eosio/chain/transaction_metadata.hpp | 7 ++++++- libraries/chain/transaction_metadata.cpp | 19 ++++++++++--------- plugins/producer_plugin/producer_plugin.cpp | 12 +++++------- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..947b557a1d4 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -52,8 +52,13 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); + // must be called from main application thread. signing_keys_future must be accessed only from main application thread. + // next() should only be called on main application thread after future is valid, to avoid dependency on appbase, + // it is up to the caller to have next() post to the application thread which makes sure future is only accessed from + // application thread and that assignment to future in this method has completed. static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + const chain_id_type& chain_id, fc::microseconds time_limit, + std::function next = std::function() ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..e9994167aa0 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -24,21 +24,22 @@ const flat_set& transaction_metadata::recover_keys( const chain } void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { - if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created + boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit, + std::function next ) +{ + if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) {// already created + if( next ) next(); return; + } - std::weak_ptr mtrx_wp = mtrx; - mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { + mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx, next{std::move(next)}]() { fc::time_point deadline = time_limit == fc::microseconds::maximum() ? fc::time_point::maximum() : fc::time_point::now() + time_limit; - auto mtrx = mtrx_wp.lock(); fc::microseconds cpu_usage; flat_set recovered_pub_keys; - if( mtrx ) { - const signed_transaction& trn = mtrx->packed_trx->get_signed_transaction(); - cpu_usage = trn.get_signature_keys( chain_id, deadline, recovered_pub_keys ); - } + const signed_transaction& trn = mtrx->packed_trx->get_signed_transaction(); + cpu_usage = trn.get_signature_keys( chain_id, deadline, recovered_pub_keys ); + if( next ) next(); return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); } ); } diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 50f6c531d0d..68113333f66 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -354,13 +354,11 @@ class producer_plugin_impl : public std::enable_shared_from_thissigning_keys_future.valid() ) - trx->signing_keys_future.wait(); - app().post(priority::low, [self, trx, persist_until_expired, next]() { - self->process_incoming_transaction_async( trx, persist_until_expired, next ); - }); + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage, + [self = this, trx, persist_until_expired, next{std::move(next)}]() mutable { + app().post(priority::low, [self, trx{std::move(trx)}, persist_until_expired, next{std::move(next)}]() { + self->process_incoming_transaction_async( trx, persist_until_expired, next ); + }); }); } From ef3d282d7e10e9d5753205e980adec36d63c2a62 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 13:38:50 -0500 Subject: [PATCH 0051/1648] Update comment --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 68113333f66..dbc42ed9282 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -353,7 +353,7 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Thu, 14 Mar 2019 09:22:04 -0500 Subject: [PATCH 0052/1648] Assign future on main application thread --- plugins/producer_plugin/producer_plugin.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index dbc42ed9282..6f81308aa83 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -352,13 +352,18 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto max_trx_time_ms = _max_transaction_time_ms.load(); fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); - auto& tp = *_thread_pool; - // use chain thread pool for sig recovery - transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage, + + auto after_sig_recovery = [self = this, trx, persist_until_expired, next{std::move(next)}]() mutable { app().post(priority::low, [self, trx{std::move(trx)}, persist_until_expired, next{std::move(next)}]() { self->process_incoming_transaction_async( trx, persist_until_expired, next ); }); + }; + + app().post(priority::low, [trx, &chain, max_trx_cpu_usage, after_sig_recovery{std::move(after_sig_recovery)}]() mutable { + // use chain thread pool for sig recovery + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), + max_trx_cpu_usage, std::move( after_sig_recovery ) ); }); } From b9af7286c7236d2916d45b95a06454a5805af8dc Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Mar 2019 12:37:34 -0500 Subject: [PATCH 0053/1648] Acquire lib via lib channel. Make expire_local_txns() thread safe. --- plugins/net_plugin/net_plugin.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 87a925aa519..a8ade80f317 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -28,7 +28,7 @@ #include -using namespace eosio::chain::plugin_interface::compat; +using namespace eosio::chain::plugin_interface; namespace fc { extern std::unordered_map& get_logger_map(); @@ -146,6 +146,7 @@ namespace eosio { bool network_version_match = false; chain_id_type chain_id; fc::sha256 node_id; + std::atomic lib_num{0}; eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; @@ -160,7 +161,8 @@ namespace eosio { bool use_socket_read_watermark = false; - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; uint16_t thread_pool_size = 4; optional thread_pool; @@ -192,6 +194,9 @@ namespace eosio { void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); + void on_irreversible_block( const block_state_ptr& blk ) { + lib_num = blk->block_num; + } bool is_valid( const handshake_message &msg); @@ -2749,10 +2754,9 @@ namespace eosio { auto now = time_point::now(); expire_local_txns(); - controller& cc = chain_plug->chain(); - uint32_t lib = cc.last_irreversible_block_num(); + uint32_t lib = lib_num.load(); dispatcher->expire_blocks( lib ); - for ( auto &c : connections ) { + for ( auto& c : connections ) { auto &stale_txn = c->trx_state.get(); stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); auto &stale_txn_e = c->trx_state.get(); @@ -2761,9 +2765,9 @@ namespace eosio { fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); } + // thread safe void net_plugin_impl::expire_local_txns() { - controller& cc = chain_plug->chain(); - uint32_t lib = cc.last_irreversible_block_num(); + uint32_t lib = lib_num.load(); size_t start_size = 0, end_size = 0; std::unique_lock g( local_txns_mtx ); @@ -3151,7 +3155,12 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->incoming_transaction_ack_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->incoming_irreversible_block_subscription = app().get_channel().subscribe( + [this]( block_state_ptr s ) { + my->on_irreversible_block( s ); + }); my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { From afbcf5af65d8bd8662ad99fe93324dba9b74b05c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 12 Mar 2019 14:18:21 -0500 Subject: [PATCH 0054/1648] Remove unneeded request for arbitrary list of transactions --- plugins/net_plugin/net_plugin.cpp | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a8ade80f317..eff2a983382 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -595,7 +595,6 @@ namespace eosio { const string peer_name(); void txn_send_pending(const vector& ids); - void txn_send(const vector& txn_lis); void blk_send_branch(); void blk_send(const block_id_type& blkid); @@ -873,21 +872,6 @@ namespace eosio { } } - void connection::txn_send(const vector& ids) { - vector>> trx_to_send; - std::unique_lock g( my_impl->local_txns_mtx ); - for( const auto& t : ids ) { - auto tx = my_impl->local_txns.get().find( t ); - if( tx != my_impl->local_txns.end()) { - trx_to_send.emplace_back( tx->serialized_txn ); - } - } - g.unlock(); - for( const auto& t : trx_to_send ) { - queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } - } - void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); uint32_t head_num = cc.fork_db_head_block_num(); @@ -2480,6 +2464,7 @@ namespace eosio { std::lock_guard g( my_impl->local_txns_mtx ); size_t known_sum = local_txns.size(); if( known_sum ) { + expire_local_txns(); for( const auto& t : local_txns.get() ) { req.req_trx.ids.push_back( t.id ); } @@ -2545,12 +2530,16 @@ namespace eosio { case catch_up : c->txn_send_pending(msg.req_trx.ids); break; - case normal : - c->txn_send(msg.req_trx.ids); - break; case none : if(msg.req_blocks.mode == none) c->stop_send(); + // no break + case normal : + if( !msg.req_trx.ids.empty() ) { + elog( "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); + close(c); + return; + } break; default:; } From 2bd622003ae6384bcbbe0be6957332e7701230b7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 14:19:32 -0500 Subject: [PATCH 0055/1648] Remove txn_send_pending to avoid large # trxs at end of sync --- plugins/net_plugin/net_plugin.cpp | 33 ------------------------------- 1 file changed, 33 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index eff2a983382..8ddc22b34cc 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -594,8 +594,6 @@ namespace eosio { const string peer_name(); - void txn_send_pending(const vector& ids); - void blk_send_branch(); void blk_send(const block_id_type& blkid); void stop_send(); @@ -855,23 +853,6 @@ namespace eosio { if( read_delay_timer ) read_delay_timer->cancel(); } - void connection::txn_send_pending(const vector& ids) { - const std::set known_ids(ids.cbegin(), ids.cend()); - my_impl->expire_local_txns(); - vector>> trx_to_send; - std::unique_lock g( my_impl->local_txns_mtx ); - for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - trx_to_send.emplace_back( tx->serialized_txn ); - } - } - g.unlock(); - for( const auto& t : trx_to_send ) { - queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } - } - void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); uint32_t head_num = cc.fork_db_head_block_num(); @@ -2457,19 +2438,6 @@ namespace eosio { break; } case catch_up : { - if( msg.known_trx.pending > 0) { - // plan to get all except what we already know about. - req.req_trx.mode = catch_up; - send_req = true; - std::lock_guard g( my_impl->local_txns_mtx ); - size_t known_sum = local_txns.size(); - if( known_sum ) { - expire_local_txns(); - for( const auto& t : local_txns.get() ) { - req.req_trx.ids.push_back( t.id ); - } - } - } break; } case normal: { @@ -2528,7 +2496,6 @@ namespace eosio { switch (msg.req_trx.mode) { case catch_up : - c->txn_send_pending(msg.req_trx.ids); break; case none : if(msg.req_blocks.mode == none) From d882f464d896954ac70f3be9b918b47279c71a47 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Mar 2019 13:15:20 -0500 Subject: [PATCH 0056/1648] Remove dead code --- plugins/net_plugin/net_plugin.cpp | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8ddc22b34cc..d229b459277 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1724,13 +1724,6 @@ namespace eosio { void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_id_type& id) { received_transactions.insert(std::make_pair(id, c)); - if (c && - c->last_req && - c->last_req->req_trx.mode != none && - !c->last_req->req_trx.ids.empty() && - c->last_req->req_trx.ids.back() == id) { - c->last_req.reset(); - } fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); @@ -1797,17 +1790,10 @@ namespace eosio { return; } fc_wlog( logger, "failed to fetch from ${p}",("p",c->peer_name())); - transaction_id_type tid; block_id_type bid; - bool is_txn = false; - if( c->last_req->req_trx.mode == normal && !c->last_req->req_trx.ids.empty() ) { - is_txn = true; - tid = c->last_req->req_trx.ids.back(); - } - else if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { + if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { bid = c->last_req->req_blocks.ids.back(); - } - else { + } else { fc_wlog( logger,"no retry, block mpde = ${b} trx mode = ${t}", ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); return; @@ -1816,14 +1802,7 @@ namespace eosio { if (conn == c || conn->last_req) { continue; } - bool sendit = false; - if (is_txn) { - auto trx = conn->trx_state.get().find(tid); - sendit = trx != conn->trx_state.end(); - } - else { - sendit = peer_has_block(bid, c->connection_id); - } + bool sendit = peer_has_block( bid, c->connection_id ); if (sendit) { conn->enqueue(*c->last_req); conn->fetch_wait(); From dcc0ea480f8a18cef453f1e01390b51c6c237bf4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Mar 2019 14:03:58 -0500 Subject: [PATCH 0057/1648] Remove dead code including serialized transaction --- plugins/net_plugin/net_plugin.cpp | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d229b459277..72b29be6c56 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -63,7 +63,6 @@ namespace eosio { transaction_id_type id; time_point_sec expires; /// time after which this may be purged. uint32_t block_num = 0; /// block transaction was included in - std::shared_ptr> serialized_txn; /// the received raw bundle }; struct by_expiry; @@ -722,7 +721,7 @@ namespace eosio { void rejected_block(const connection_ptr& c, uint32_t blk_num); void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void recv_notice(const connection_ptr& c, const notice_message& msg); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg); }; class dispatch_manager { @@ -1447,9 +1446,7 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - std::unique_lock g( my_impl->local_txns_mtx ); - note.known_trx.pending = my_impl->local_txns.size(); - g.unlock(); + note.known_trx.pending = 0; c->enqueue( note ); return; } @@ -1523,8 +1520,10 @@ namespace eosio { c->enqueue( req ); } - void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { + void sync_manager::sync_recv_notice( const connection_ptr& c, const notice_message& msg) { fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); + EOS_ASSERT( msg.known_blocks.mode == catch_up || msg.known_blocks.mode == last_irr_catch_up, plugin_exception, + "sync_recv_notice only called on catch_up" ); if( msg.known_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); @@ -1537,8 +1536,7 @@ namespace eosio { } else { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } - } - else { + } else if (msg.known_blocks.mode == last_irr_catch_up) { c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; reset_lib_num(c); start_sync(c, msg.known_trx.pending); @@ -1692,21 +1690,18 @@ namespace eosio { std::unique_lock g( my_impl->local_txns_mtx ); if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + g.unlock(); fc_dlog( logger, "found trxid in local_trxs" ); return; } + time_point_sec trx_expiration = ptrx->packed_trx->expiration(); + node_transaction_state nts = {id, trx_expiration, 0}; + my_impl->local_txns.insert( std::move( nts )); g.unlock(); - time_point_sec trx_expiration = ptrx->packed_trx->expiration(); const packed_transaction& trx = *ptrx->packed_trx; - auto buff = create_send_buffer( trx ); - node_transaction_state nts = {id, trx_expiration, 0, buff}; - g.lock(); - my_impl->local_txns.insert( std::move( nts )); - g.unlock(); - my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { return false; @@ -2433,7 +2428,7 @@ namespace eosio { } case last_irr_catch_up: case catch_up: { - sync_master->recv_notice(c,msg); + sync_master->sync_recv_notice(c,msg); break; } case normal : { @@ -2523,6 +2518,7 @@ namespace eosio { std::unique_lock g( local_txns_mtx ); if( local_txns.get().find( tid ) != local_txns.end()) { + g.unlock(); fc_dlog( logger, "got a duplicate transaction - dropping" ); return; } From 219b2ac1a601151d4ce94d4e7233f0fd24365fdf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 08:58:06 -0500 Subject: [PATCH 0058/1648] Consolidate transaction tracking, reducing memory requirements and making thread safe. --- plugins/net_plugin/net_plugin.cpp | 467 ++++++++++++++---------------- 1 file changed, 218 insertions(+), 249 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 72b29be6c56..0405832d481 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -50,9 +50,6 @@ namespace eosio { class connection; - class sync_manager; - class dispatch_manager; - using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; @@ -61,8 +58,9 @@ namespace eosio { struct node_transaction_state { transaction_id_type id; - time_point_sec expires; /// time after which this may be purged. - uint32_t block_num = 0; /// block transaction was included in + time_point_sec expires; /// time after which this may be purged. + uint32_t block_num = 0; /// block transaction was included in + uint32_t connection_id = 0; }; struct by_expiry; @@ -80,26 +78,119 @@ namespace eosio { node_transaction_state, indexed_by< ordered_unique< - tag< by_id >, - member < node_transaction_state, - transaction_id_type, - &node_transaction_state::id >, - sha256_less >, + tag, + composite_key< node_transaction_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, ordered_non_unique< tag< by_expiry >, - member< node_transaction_state, - fc::time_point_sec, - &node_transaction_state::expires > - >, + member< node_transaction_state, fc::time_point_sec, &node_transaction_state::expires > >, ordered_non_unique< tag, - member< node_transaction_state, - uint32_t, - &node_transaction_state::block_num > > + member< node_transaction_state, uint32_t, &node_transaction_state::block_num > > > > node_transaction_index; + struct peer_block_state { + block_id_type id; + uint32_t block_num = 0; + uint32_t connection_id = 0; + }; + + typedef multi_index_container< + eosio::peer_block_state, + indexed_by< + ordered_unique< tag, + composite_key< peer_block_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, + ordered_non_unique< tag, member > + > + > peer_block_state_index; + + + struct update_block_num { + uint32_t new_bnum; + update_block_num(uint32_t bnum) : new_bnum(bnum) {} + void operator() (node_transaction_state& nts) { + nts.block_num = new_bnum; + } + }; + + class sync_manager { + private: + enum stages { + lib_catchup, + head_catchup, + in_sync + }; + + uint32_t sync_known_lib_num; + uint32_t sync_last_requested_num; + uint32_t sync_next_expected_num; + uint32_t sync_req_span; + connection_ptr source; + stages state; + + chain_plugin* chain_plug = nullptr; + + constexpr static auto stage_str(stages s); + + public: + explicit sync_manager(uint32_t span); + void set_state(stages s); + bool sync_required(); + void send_handshakes(); + bool is_active(const connection_ptr& conn); + void reset_lib_num(const connection_ptr& conn); + void request_next_chunk(const connection_ptr& conn = connection_ptr()); + void start_sync(const connection_ptr& c, uint32_t target); + void reassign_fetch(const connection_ptr& c, go_away_reason reason); + void verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); + void rejected_block(const connection_ptr& c, uint32_t blk_num); + void sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); + void recv_handshake(const connection_ptr& c, const handshake_message& msg); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg); + }; + + class dispatch_manager { + std::mutex blk_state_mtx; + peer_block_state_index blk_state; + std::mutex local_txns_mtx; + node_transaction_index local_txns; + + public: + void bcast_transaction(const transaction_metadata_ptr& trx); + void rejected_transaction(const transaction_id_type& msg, uint32_t head_blk_num); + void bcast_block(const block_state_ptr& bs); + void rejected_block(const block_id_type& id); + + void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); + void expire_blocks( uint32_t bnum ); + void recv_transaction(const connection_ptr& conn, const transaction_metadata_ptr& txn); + void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); + + void retry_fetch(const connection_ptr& conn); + + bool add_peer_block(const peer_block_state& pbs); + bool peer_has_block(const block_id_type& blkid, uint32_t connection_id); + bool have_block(const block_id_type& blkid); + + bool add_peer_txn( const node_transaction_state& nts ); + void update_txns_block_num( const signed_block_ptr& sb ); + void update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ); + bool peer_has_txn( const transaction_id_type& tid, uint32_t connection_id ); + bool have_txn( const transaction_id_type& tid ); + void expire_txns( uint32_t lib_num ); + }; + class net_plugin_impl { public: unique_ptr acceptor; @@ -146,6 +237,7 @@ namespace eosio { chain_id_type chain_id; fc::sha256 node_id; std::atomic lib_num{0}; + uint32_t head_blk_num{0}; eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; @@ -153,9 +245,6 @@ namespace eosio { producer_plugin* producer_plug = nullptr; int started_sessions = 0; - std::mutex local_txns_mtx; - node_transaction_index local_txns; - shared_ptr resolver; bool use_socket_read_watermark = false; @@ -188,13 +277,12 @@ namespace eosio { void close(const connection_ptr& c); size_t count_open_sockets() const; - template - void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); - void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& blk ) { lib_num = blk->block_num; + controller& cc = chain_plug->chain(); + head_blk_num = cc.head_block_num(); } bool is_valid( const handshake_message &msg); @@ -230,7 +318,6 @@ namespace eosio { void start_monitors(); void expire_txns(); - void expire_local_txns(); void connection_monitor(std::weak_ptr from_connection); /** \name Peer Timestamps * Time message handling @@ -348,58 +435,6 @@ namespace eosio { constexpr uint16_t net_version = proto_explicit_sync; - struct transaction_state { - transaction_id_type id; - uint32_t block_num = 0; ///< the block number the transaction was included in - time_point_sec expires; - }; - - typedef multi_index_container< - transaction_state, - indexed_by< - ordered_unique< tag, member, sha256_less >, - ordered_non_unique< tag< by_expiry >, member< transaction_state,fc::time_point_sec,&transaction_state::expires >>, - ordered_non_unique< - tag, - member< transaction_state, - uint32_t, - &transaction_state::block_num > > - > - - > transaction_state_index; - - struct peer_block_state { - block_id_type id; - uint32_t block_num; - uint32_t connection_id; - }; - - typedef multi_index_container< - eosio::peer_block_state, - indexed_by< - ordered_unique< tag, - composite_key< peer_block_state, - member, - member - >, - composite_key_compare< sha256_less, std::less > - >, - ordered_non_unique< tag, member > - > - > peer_block_state_index; - - - struct update_block_num { - uint32_t new_bnum; - update_block_num(uint32_t bnum) : new_bnum(bnum) {} - void operator() (node_transaction_state& nts) { - nts.block_num = new_bnum; - } - void operator() (transaction_state& ts) { - ts.block_num = new_bnum; - } - }; - /** * Index by start_block_num */ @@ -507,7 +542,6 @@ namespace eosio { ~connection(); void initialize(); - transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive socket_ptr socket; @@ -688,70 +722,10 @@ namespace eosio { } }; - class sync_manager { - private: - enum stages { - lib_catchup, - head_catchup, - in_sync - }; - - uint32_t sync_known_lib_num; - uint32_t sync_last_requested_num; - uint32_t sync_next_expected_num; - uint32_t sync_req_span; - connection_ptr source; - stages state; - - chain_plugin* chain_plug = nullptr; - - constexpr static auto stage_str(stages s); - - public: - explicit sync_manager(uint32_t span); - void set_state(stages s); - bool sync_required(); - void send_handshakes(); - bool is_active(const connection_ptr& conn); - void reset_lib_num(const connection_ptr& conn); - void request_next_chunk(const connection_ptr& conn = connection_ptr()); - void start_sync(const connection_ptr& c, uint32_t target); - void reassign_fetch(const connection_ptr& c, go_away_reason reason); - void verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); - void rejected_block(const connection_ptr& c, uint32_t blk_num); - void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); - void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void sync_recv_notice( const connection_ptr& c, const notice_message& msg); - }; - - class dispatch_manager { - std::mutex blk_state_mtx; - peer_block_state_index blk_state; - std::multimap received_transactions; - - public: - void bcast_transaction(const transaction_metadata_ptr& trx); - void rejected_transaction(const transaction_id_type& msg); - void bcast_block(const block_state_ptr& bs); - void rejected_block(const block_id_type& id); - - void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); - void expire_blocks( uint32_t bnum ); - void recv_transaction(const connection_ptr& conn, const transaction_id_type& id); - void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); - - void retry_fetch(const connection_ptr& conn); - - bool add_peer_block(const peer_block_state& pbs); - bool peer_has_block(const block_id_type& blkid, uint32_t connection_id); - bool have_block(const block_id_type& blkid); - }; - //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : trx_state(), - peer_requested(), + : peer_requested(), server_ioc( my_impl->server_ioc ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), @@ -775,8 +749,7 @@ namespace eosio { } connection::connection( socket_ptr s ) - : trx_state(), - peer_requested(), + : peer_requested(), server_ioc( my_impl->server_ioc ), socket( s ), node_id(), @@ -820,7 +793,6 @@ namespace eosio { void connection::reset() { peer_requested.reset(); - trx_state.clear(); } void connection::flush_queues() { @@ -1553,7 +1525,7 @@ namespace eosio { send_handshakes(); } } - void sync_manager::recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { + void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); if (state == lib_catchup) { if (blk_num != sync_next_expected_num) { @@ -1627,6 +1599,68 @@ namespace eosio { return blk_itr != blk_state.end(); } + bool dispatch_manager::add_peer_txn( const node_transaction_state& nts ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( std::make_tuple( std::ref( nts.id ), nts.connection_id ) ); + bool added = (tptr == local_txns.end()); + if( added ) { + local_txns.insert( nts ); + } + return added; + } + + void dispatch_manager::update_txns_block_num( const signed_block_ptr& sb ) { + update_block_num ubn( sb->block_num() ); + std::lock_guard g( local_txns_mtx ); + for( const auto& recpt : sb->transactions ) { + const transaction_id_type& id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto range = local_txns.get().equal_range( id ); + for( auto itr = range.first; itr != range.second; ++itr ) { + local_txns.modify( itr, ubn ); + } + } + } + + void dispatch_manager::update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ) { + update_block_num ubn( blk_num ); + std::lock_guard g( local_txns_mtx ); + auto range = local_txns.get().equal_range( id ); + for( auto itr = range.first; itr != range.second; ++itr ) { + local_txns.modify( itr, ubn ); + } + } + + bool dispatch_manager::peer_has_txn( const transaction_id_type& tid, uint32_t connection_id ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( std::make_tuple( std::ref( tid ), connection_id ) ); + return tptr != local_txns.end(); + } + + bool dispatch_manager::have_txn( const transaction_id_type& tid ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( tid ); + return tptr != local_txns.end(); + } + + void dispatch_manager::expire_txns( uint32_t lib_num ) { + size_t start_size = 0, end_size = 0; + { + std::lock_guard g( local_txns_mtx ); + + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); + auto ex_up = old.upper_bound( time_point::now() ); + old.erase( ex_lo, ex_up ); + + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); + end_size = local_txns.size(); + } + fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)( "r", start_size - end_size ) ); + } + void dispatch_manager::expire_blocks( uint32_t lib_num ) { std::lock_guard g(blk_state_mtx); auto& stale_blk = blk_state.get(); @@ -1679,55 +1713,41 @@ namespace eosio { } void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { - std::set skips; const auto& id = ptrx->id; - - auto range = received_transactions.equal_range(id); - for (auto org = range.first; org != range.second; ++org) { - skips.insert(org->second); - } - received_transactions.erase(range.first, range.second); - - std::unique_lock g( my_impl->local_txns_mtx ); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found - g.unlock(); - fc_dlog( logger, "found trxid in local_trxs" ); - return; - } - time_point_sec trx_expiration = ptrx->packed_trx->expiration(); - node_transaction_state nts = {id, trx_expiration, 0}; - my_impl->local_txns.insert( std::move( nts )); - g.unlock(); - const packed_transaction& trx = *ptrx->packed_trx; - auto buff = create_send_buffer( trx ); + time_point_sec trx_expiration = trx.expiration(); + node_transaction_state nts = {id, trx_expiration, 0, 0}; - my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { - if( skips.find(c) != skips.end() || c->syncing ) { - return false; - } - const auto& bs = c->trx_state.find(id); - bool unknown = bs == c->trx_state.end(); - if( unknown ) { - c->trx_state.insert(transaction_state({id,0,trx_expiration})); - fc_dlog(logger, "sending trx to ${n}", ("n",c->peer_name() ) ); - } - return unknown; - }); + std::shared_ptr> send_buffer; + for( auto& cp : my_impl->connections ) { + if( !cp->current() ) { + continue; + } + if( !add_peer_txn(nts) ) { + continue; + } + if( !send_buffer ) { + send_buffer = create_send_buffer( trx ); + } + fc_dlog(logger, "sending trx to ${n}", ("n", cp->peer_name() ) ); + cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); + } } - void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_id_type& id) { - received_transactions.insert(std::make_pair(id, c)); + void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_metadata_ptr& txn) { + node_transaction_state nts = {txn->id, txn->packed_trx->expiration(), 0, c->connection_id}; + add_peer_txn( nts ); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); } - void dispatch_manager::rejected_transaction(const transaction_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); - auto range = received_transactions.equal_range(id); - received_transactions.erase(range.first, range.second); + void dispatch_manager::rejected_transaction(const transaction_id_type& id, uint32_t head_blk_num) { + fc_dlog( logger, "not sending rejected transaction ${tid}", ("tid", id) ); + // keep rejected transaction around for awhile so we don't broadcast it + // update its block number so it will be purged when current block number is lib + update_txns_block_num( id, head_blk_num ); } void dispatch_manager::recv_notice(const connection_ptr& c, const notice_message& msg, bool generated) { @@ -2165,7 +2185,7 @@ namespace eosio { if(c) { auto blk_num = block_header::num_from_id(blk_id); dispatcher->recv_block(c, blk_id, blk_num); - sync_master->recv_block( c, blk_id, blk_num ); + sync_master->sync_recv_block( c, blk_id, blk_num ); } }); conn->pending_message_buffer.advance_read_ptr( message_length ); @@ -2203,16 +2223,6 @@ namespace eosio { return count; } - - template - void net_plugin_impl::send_transaction_to_all(const std::shared_ptr>& send_buffer, VerifierFunc verify) { - for( auto &c : connections) { - if( c->current() && verify( c )) { - c->enqueue_buffer( send_buffer, true, priority::low, no_reason ); - } - } - } - bool net_plugin_impl::is_valid(const handshake_message& msg) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without @@ -2516,18 +2526,17 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - std::unique_lock g( local_txns_mtx ); - if( local_txns.get().find( tid ) != local_txns.end()) { - g.unlock(); - fc_dlog( logger, "got a duplicate transaction - dropping" ); - return; - } - g.unlock(); + bool have_trx = dispatcher->have_txn( tid ); connection_wptr weak_ptr = c; - app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ + app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, ptrx](){ auto c = weak_ptr.lock(); - dispatcher->recv_transaction(c, tid); + dispatcher->recv_transaction(c, ptrx); }); + if( have_trx ) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; + } + c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); @@ -2546,11 +2555,11 @@ namespace eosio { } } - app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}]() { + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num = this->head_blk_num]() { if( accepted ) { dispatcher->bcast_transaction( ptrx ); } else { - dispatcher->rejected_transaction( ptrx->id ); + dispatcher->rejected_transaction( ptrx->id, head_blk_num ); } }); }); @@ -2565,7 +2574,7 @@ namespace eosio { try { if( cc.fetch_block_by_id(blk_id) ) { - sync_master->recv_block(c, blk_id, blk_num); + sync_master->sync_recv_block(c, blk_id, blk_num); return; } } catch( ...) { @@ -2603,21 +2612,10 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - std::unique_lock g( local_txns_mtx ); - for( const auto& recpt : msg->transactions ) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() - : recpt.trx.get().id(); - auto ltx = local_txns.get().find( id ); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find( id ); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); - } - } - g.unlock(); - sync_master->recv_block(c, blk_id, blk_num); + boost::asio::post( *server_ioc, [self = this, msg]() { + self->dispatcher->update_txns_block_num( msg ); + }); + sync_master->sync_recv_block(c, blk_id, blk_num); } else { sync_master->rejected_block(c, blk_num); @@ -2683,41 +2681,12 @@ namespace eosio { start_txn_timer(); auto now = time_point::now(); - expire_local_txns(); - uint32_t lib = lib_num.load(); dispatcher->expire_blocks( lib ); - for ( auto& c : connections ) { - auto &stale_txn = c->trx_state.get(); - stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); - auto &stale_txn_e = c->trx_state.get(); - stale_txn_e.erase(stale_txn_e.lower_bound(time_point_sec()), stale_txn_e.upper_bound(time_point::now())); - } + dispatcher->expire_txns( lib ); fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); } - // thread safe - void net_plugin_impl::expire_local_txns() { - uint32_t lib = lib_num.load(); - size_t start_size = 0, end_size = 0; - - std::unique_lock g( local_txns_mtx ); - - start_size = local_txns.size(); - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); - auto ex_up = old.upper_bound( time_point::now()); - old.erase( ex_lo, ex_up ); - - auto& stale = local_txns.get(); - stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); - end_size = local_txns.size(); - - g.unlock(); - - fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); - } - void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); @@ -2764,7 +2733,7 @@ namespace eosio { const auto& id = results.second->id; if (results.first) { fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); + dispatcher->rejected_transaction(id, head_blk_num); } else { fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); dispatcher->bcast_transaction(results.second); From a4d5559342720f7d87cbbab4a8efc3a74a498bd4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 09:47:01 -0500 Subject: [PATCH 0059/1648] Fix for missing connection_id --- plugins/net_plugin/net_plugin.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0405832d481..6064da2d58a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1723,6 +1723,7 @@ namespace eosio { if( !cp->current() ) { continue; } + nts.connection_id = cp->connection_id; if( !add_peer_txn(nts) ) { continue; } From d5aa85b43ac6a8573430160ae1d5eca519b7eee9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 12:32:49 -0500 Subject: [PATCH 0060/1648] Make all timers thread safe. Expire trx/blks on thread pool. --- plugins/net_plugin/net_plugin.cpp | 117 +++++++++++++++++++----------- 1 file changed, 74 insertions(+), 43 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6064da2d58a..8b8b0d9ccfa 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -222,14 +222,17 @@ namespace eosio { unique_ptr< sync_manager > sync_master; unique_ptr< dispatch_manager > dispatcher; - unique_ptr connector_check; - unique_ptr transaction_check; + std::mutex connector_check_timer_mtx; + unique_ptr connector_check_timer; + std::mutex expire_timer_mtx; + unique_ptr expire_timer; + std::mutex keepalive_timer_mtx; unique_ptr keepalive_timer; boost::asio::steady_timer::duration connector_period; boost::asio::steady_timer::duration txn_exp_period; boost::asio::steady_timer::duration resp_expected_period; boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; - int max_cleanup_time_ms = 0; + int max_cleanup_time_ms = 0; const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. @@ -314,10 +317,10 @@ namespace eosio { void handle_message(const connection_ptr& c, const packed_transaction_ptr& msg); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); - void start_txn_timer(); + void start_expire_timer(); void start_monitors(); - void expire_txns(); + void expire(); void connection_monitor(std::weak_ptr from_connection); /** \name Peer Timestamps * Time message handling @@ -540,7 +543,9 @@ namespace eosio { explicit connection( socket_ptr s ); ~connection(); - void initialize(); + private: + void initialize(); // only called from constructor + public: optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive @@ -563,7 +568,8 @@ namespace eosio { bool syncing = false; uint16_t protocol_version = 0; string peer_addr; - unique_ptr response_expected; + std::mutex response_expected_timer_mtx; + unique_ptr response_expected_timer; std::mutex read_delay_timer_mtx; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; @@ -737,7 +743,7 @@ namespace eosio { syncing(false), protocol_version(0), peer_addr(endpoint), - response_expected(), + response_expected_timer(), read_delay_timer(), no_retry(no_reason), fork_head(), @@ -761,7 +767,7 @@ namespace eosio { syncing(false), protocol_version(0), peer_addr(), - response_expected(), + response_expected_timer(), read_delay_timer(), no_retry(no_reason), fork_head(), @@ -779,7 +785,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + // called only from constructor, no mutex needed + response_expected_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); } @@ -1116,15 +1123,19 @@ namespace eosio { to_sync_queue); } + // thread safe void connection::cancel_wait() { - if (response_expected) - response_expected->cancel(); + std::lock_guard g( response_expected_timer_mtx ); + if (response_expected_timer) + response_expected_timer->cancel(); } + // thread safe void connection::sync_wait() { - response_expected->expires_from_now( my_impl->resp_expected_period); connection_wptr c(shared_from_this()); - response_expected->async_wait( [c]( boost::system::error_code ec ) { + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer->expires_from_now( my_impl->resp_expected_period); + response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -1137,10 +1148,12 @@ namespace eosio { } ); } + // thread safe void connection::fetch_wait() { - response_expected->expires_from_now( my_impl->resp_expected_period); connection_wptr c(shared_from_this()); - response_expected->async_wait( [c]( boost::system::error_code ec ) { + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer->expires_from_now( my_impl->resp_expected_period); + response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -2625,8 +2638,9 @@ namespace eosio { } void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { - connector_check->expires_from_now( du); - connector_check->async_wait( [this, from_connection](boost::system::error_code ec) { + std::lock_guard g( connector_check_timer_mtx ); + connector_check_timer->expires_from_now( du); + connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { app().post( priority::low, [this, from_connection, ec]() { if( !ec) { connection_monitor(from_connection); @@ -2639,22 +2653,23 @@ namespace eosio { }); } - void net_plugin_impl::start_txn_timer() { - transaction_check->expires_from_now( txn_exp_period); - transaction_check->async_wait( [this]( boost::system::error_code ec ) { - int lower_than_low = priority::low - 1; - app().post( lower_than_low, [this, ec]() { - if( !ec ) { - expire_txns(); - } else { - fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message())); - start_txn_timer(); - } - } ); - }); + // thread safe + void net_plugin_impl::start_expire_timer() { + std::lock_guard g( expire_timer_mtx ); + expire_timer->expires_from_now( txn_exp_period); + expire_timer->async_wait( [this]( boost::system::error_code ec ) { + if( !ec ) { + expire(); + } else { + fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message()) ); + start_expire_timer(); + } + } ); } + // thread safe void net_plugin_impl::ticker() { + std::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { app().post( priority::low, [this, ec]() { @@ -2672,14 +2687,20 @@ namespace eosio { } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( *server_ioc )); - transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); + { + std::lock_guard g( connector_check_timer_mtx ); + connector_check_timer.reset(new boost::asio::steady_timer( *server_ioc )); + } + { + std::lock_guard g( expire_timer_mtx ); + expire_timer.reset( new boost::asio::steady_timer( *server_ioc ) ); + } start_conn_timer(connector_period, std::weak_ptr()); - start_txn_timer(); + start_expire_timer(); } - void net_plugin_impl::expire_txns() { - start_txn_timer(); + void net_plugin_impl::expire() { + start_expire_timer(); auto now = time_point::now(); uint32_t lib = lib_num.load(); @@ -3034,7 +3055,10 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + { + std::lock_guard g( my->keepalive_timer_mtx ); + my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + } my->ticker(); if( my->acceptor ) { @@ -3088,12 +3112,19 @@ namespace eosio { if( my->server_ioc_work ) my->server_ioc_work->reset(); - if( my->connector_check ) - my->connector_check->cancel(); - if( my->transaction_check ) - my->transaction_check->cancel(); - if( my->keepalive_timer ) - my->keepalive_timer->cancel(); + { + std::lock_guard g( my->connector_check_timer_mtx ); + if( my->connector_check_timer ) + my->connector_check_timer->cancel(); + }{ + std::lock_guard g( my->expire_timer_mtx ); + if( my->expire_timer ) + my->expire_timer->cancel(); + }{ + std::lock_guard g( my->keepalive_timer_mtx ); + if( my->keepalive_timer ) + my->keepalive_timer->cancel(); + } my->done = true; if( my->acceptor ) { From b36787a4c3f25a23b6e943ba102ea8707ed77eba Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 14:44:48 -0500 Subject: [PATCH 0061/1648] Break expire into two steps --- plugins/net_plugin/net_plugin.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8b8b0d9ccfa..852ced707b5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1658,19 +1658,21 @@ namespace eosio { void dispatch_manager::expire_txns( uint32_t lib_num ) { size_t start_size = 0, end_size = 0; - { - std::lock_guard g( local_txns_mtx ); - start_size = local_txns.size(); - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); - auto ex_up = old.upper_bound( time_point::now() ); - old.erase( ex_lo, ex_up ); + std::unique_lock g( local_txns_mtx ); + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); + auto ex_up = old.upper_bound( time_point::now() ); + old.erase( ex_lo, ex_up ); + g.unlock(); // allow other threads opportunity to use local_txns + + g.lock(); + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); + end_size = local_txns.size(); + g.unlock(); - auto& stale = local_txns.get(); - stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); - end_size = local_txns.size(); - } fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)( "r", start_size - end_size ) ); } @@ -2700,13 +2702,13 @@ namespace eosio { } void net_plugin_impl::expire() { - start_expire_timer(); - auto now = time_point::now(); uint32_t lib = lib_num.load(); dispatcher->expire_blocks( lib ); dispatcher->expire_txns( lib ); fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); + + start_expire_timer(); } void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { From 91516ed676a480d858511b09b018c664fb063a0a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 18:17:52 -0500 Subject: [PATCH 0062/1648] Fix issue with sync and known block optimization --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 852ced707b5..7f477c07e0f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2195,7 +2195,7 @@ namespace eosio { block_id_type blk_id = bh.id(); if( dispatcher->have_block( blk_id ) ) { connection_wptr weak = conn; - app().post(priority::low, + app().post(priority::high, // high since block processing is high and this needs to run before next block [dispatcher = dispatcher.get(), sync_master = sync_master.get(), weak{std::move(weak)}, blk_id] { connection_ptr c = weak.lock(); if(c) { From f650ec0552c2b0dfd90751539f25dec4f61b87df Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 18:44:16 -0500 Subject: [PATCH 0063/1648] Add transaction id to log message --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7f477c07e0f..5ccd7d4b8d0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2533,7 +2533,6 @@ namespace eosio { // called from thread_pool threads void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); - peer_ilog(c, "received packed_transaction"); if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; @@ -2541,6 +2540,7 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; + peer_ilog(c, "received packed_transaction ${id}", ("id", tid)); bool have_trx = dispatcher->have_txn( tid ); connection_wptr weak_ptr = c; @@ -2549,7 +2549,7 @@ namespace eosio { dispatcher->recv_transaction(c, ptrx); }); if( have_trx ) { - fc_dlog( logger, "got a duplicate transaction - dropping" ); + fc_dlog( logger, "got a duplicate transaction - dropping ${id}", ("id", tid) ); return; } From 4c8a88a52bf8ba84d5295a7447e041e72f4911c7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 07:35:16 -0500 Subject: [PATCH 0064/1648] Remove dead code --- .../include/eosio/net_plugin/net_plugin.hpp | 1 - plugins/net_plugin/net_plugin.cpp | 15 --------------- 2 files changed, 16 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 01c1383468d..3bc594dd313 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -38,7 +38,6 @@ namespace eosio { optional status( const string& endpoint )const; vector connections()const; - size_t num_peers() const; private: std::unique_ptr my; }; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 5ccd7d4b8d0..df897d887e3 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -278,7 +278,6 @@ namespace eosio { bool process_next_message(const connection_ptr& conn, uint32_t message_length); void close(const connection_ptr& c); - size_t count_open_sockets() const; void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); @@ -2229,16 +2228,6 @@ namespace eosio { return true; } - size_t net_plugin_impl::count_open_sockets() const - { - size_t count = 0; - for( auto &c : connections) { - if(c->socket->is_open()) - ++count; - } - return count; - } - bool net_plugin_impl::is_valid(const handshake_message& msg) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without @@ -3153,10 +3142,6 @@ namespace eosio { FC_CAPTURE_AND_RETHROW() } - size_t net_plugin::num_peers() const { - return my->count_open_sockets(); - } - /** * Used to trigger a new connection from RPC API */ From 7e728f4ccbe1a52bb5dc2e8df13576d982d56022 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 08:21:39 -0500 Subject: [PATCH 0065/1648] Protect start_read_message via strand --- plugins/net_plugin/net_plugin.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index df897d887e3..33a2b8e999e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -548,7 +548,8 @@ namespace eosio { optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive - socket_ptr socket; + boost::asio::io_context::strand strand; + socket_ptr socket; // only accessed through strand after construction fc::message_buffer<1024*1024> pending_message_buffer; std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads @@ -732,6 +733,7 @@ namespace eosio { connection::connection( string endpoint ) : peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), connection_id( ++my_impl->current_connection_id ), @@ -756,6 +758,7 @@ namespace eosio { connection::connection( socket_ptr s ) : peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( s ), node_id(), connection_id( ++my_impl->current_connection_id ), @@ -1935,7 +1938,7 @@ namespace eosio { return false; } else { - boost::asio::post(*server_ioc, [this, con]() { + con->strand.post( [this, con]() { start_read_message( con ); }); ++started_sessions; @@ -2014,7 +2017,7 @@ namespace eosio { }); } - // only called from server_ioc thread + // only called from strand thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { @@ -2074,7 +2077,7 @@ namespace eosio { if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( - app().get_priority_queue().wrap( priority::low, [this, weak_conn]( boost::system::error_code ) { + boost::asio::bind_executor(conn->strand, [this, weak_conn]( boost::system::error_code ) { auto conn = weak_conn.lock(); if( !conn ) return; start_read_message( conn ); @@ -2085,6 +2088,7 @@ namespace eosio { ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( conn->strand, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { auto conn = weak_conn.lock(); if (!conn) { @@ -2168,7 +2172,7 @@ namespace eosio { close( conn ); }); } - }); + })); } catch (...) { fc_elog( logger, "Undefined exception in start_read_message" ); connection_wptr weak_conn = conn; From e1ea686eb64f2cdb84376ab12b4194a7eec13303 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 23:19:07 -0500 Subject: [PATCH 0066/1648] Made all access to impl->connections thread safe --- plugins/net_plugin/net_plugin.cpp | 385 ++++++++++++++++-------------- 1 file changed, 207 insertions(+), 178 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 33a2b8e999e..776014afdbd 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -56,6 +56,14 @@ namespace eosio { using socket_ptr = std::shared_ptr; using io_work_t = boost::asio::executor_work_guard; + template + void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { + if( !strand.running_in_this_thread() ) { + elog( "wrong strand: ${f} : line ${n}, exiting", ("f", func)("n", line) ); + app().quit(); + } + } + struct node_transaction_state { transaction_id_type id; time_point_sec expires; /// time after which this may be purged. @@ -199,7 +207,6 @@ namespace eosio { string p2p_server_address; uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; - uint32_t num_clients = 0; uint32_t current_connection_id = 0; vector supplied_peers; @@ -217,7 +224,8 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - std::set< connection_ptr > connections; + mutable std::mutex connections_mtx; + std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; unique_ptr< dispatch_manager > dispatcher; @@ -261,7 +269,7 @@ namespace eosio { optional server_ioc_work; - void connect(const connection_ptr& c); + bool resolve_and_connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); bool start_session(const connection_ptr& c); void start_listen_loop(); @@ -277,8 +285,6 @@ namespace eosio { */ bool process_next_message(const connection_ptr& conn, uint32_t message_length); - void close(const connection_ptr& c); - void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& blk ) { @@ -542,14 +548,28 @@ namespace eosio { explicit connection( socket_ptr s ); ~connection(); + + void start(); + + bool socket_is_open() const { return socket_open.load(); } // thread safe + const string& peer_address() const { return peer_addr; } // thread safe + const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start() + private: + static const string unknown; + void initialize(); // only called from constructor + void update_endpoints(); + public: optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive boost::asio::io_context::strand strand; socket_ptr socket; // only accessed through strand after construction + private: + std::atomic socket_open{false}; + public: fc::message_buffer<1024*1024> pending_message_buffer; std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads @@ -567,14 +587,21 @@ namespace eosio { bool connecting = false; bool syncing = false; uint16_t protocol_version = 0; - string peer_addr; + private: + const string peer_addr; + string remote_endpoint_ip; // not updated after start + string remote_endpoint_port; // not updated after start + string local_endpoint_ip; // not updated after start + string local_endpoint_port; // not updated after start + public: + std::mutex response_expected_timer_mtx; unique_ptr response_expected_timer; std::mutex read_delay_timer_mtx; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; - uint32_t fork_head_num = 0; + std::atomic fork_head_num{0}; // provides memory barrier for fork_head optional last_req; connection_status get_status()const { @@ -663,30 +690,24 @@ namespace eosio { fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { if (!_logger_variant) { - boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); - string ip = ec ? "" : rep.address().to_string(); - string port = ec ? "" : std::to_string(rep.port()); - - auto lep = socket->local_endpoint(ec); - string lip = ec ? "" : lep.address().to_string(); - string lport = ec ? "" : std::to_string(lep.port()); - _logger_variant.emplace(fc::mutable_variant_object() ("_name", peer_name()) ("_id", node_id) ("_sid", ((string)node_id).substr(0, 7)) - ("_ip", ip) - ("_port", port) - ("_lip", lip) - ("_lport", lport) + ("_ip", remote_endpoint_ip) + ("_port", remote_endpoint_port) + ("_lip", local_endpoint_ip) + ("_lport", local_endpoint_port) ); } return *_logger_variant; } }; - struct msg_handler : public fc::visitor { + const string connection::unknown = ""; + + +struct msg_handler : public fc::visitor { net_plugin_impl& impl; connection_ptr c; msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} @@ -747,8 +768,6 @@ namespace eosio { response_expected_timer(), read_delay_timer(), no_retry(no_reason), - fork_head(), - fork_head_num(0), last_req() { fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); @@ -772,8 +791,6 @@ namespace eosio { response_expected_timer(), read_delay_timer(), no_retry(no_reason), - fork_head(), - fork_head_num(0), last_req() { fc_ilog( logger, "accepted network connection" ); @@ -792,8 +809,26 @@ namespace eosio { read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); } + void connection::update_endpoints() { + boost::system::error_code ec; + auto rep = socket->remote_endpoint(ec); + remote_endpoint_ip = ec ? unknown : rep.address().to_string(); + remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); + + auto lep = socket->local_endpoint(ec); + local_endpoint_ip = ec ? unknown : lep.address().to_string(); + local_endpoint_port = ec ? unknown : std::to_string(lep.port()); + } + + void connection::start() { + verify_strand_in_this_thread( strand, __func__, __LINE__ ); + + update_endpoints(); + socket_open = true; + } + bool connection::connected() { - return (socket && socket->is_open() && !connecting); + return socket_is_open() && !connecting; } bool connection::current() { @@ -809,12 +844,8 @@ namespace eosio { } void connection::close() { - if(socket) { - socket->close(); - } - else { - fc_wlog( logger, "no socket to close!" ); - } + socket_open = false; + socket->close(); flush_queues(); connecting = false; syncing = false; @@ -943,7 +974,7 @@ namespace eosio { if( !buffer_queue.add_write_queue( buff, callback, to_sync_queue )) { fc_wlog( logger, "write_queue full ${s} bytes, giving up on connection ${p}", ("s", buffer_queue.write_queue_size())("p", peer_name()) ); - my_impl->close( shared_from_this() ); + close(); return; } if( buffer_queue.is_out_queue_empty() && trigger_send) { @@ -957,7 +988,7 @@ namespace eosio { connection_wptr c(shared_from_this()); if(!socket->is_open()) { fc_elog(logger,"socket not open to ${p}",("p",peer_name())); - my_impl->close(c.lock()); + close(); return; } std::vector bufs; @@ -980,7 +1011,7 @@ namespace eosio { else { fc_wlog( logger, "connection closure detected on write to ${p}",("p",pname) ); } - my_impl->close(conn); + conn->close(); return; } conn->buffer_queue.clear_out_queue(); @@ -1115,7 +1146,7 @@ namespace eosio { if (close_after_send != no_reason) { fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", ("r", reason_str(close_after_send))("p", conn->peer_name()) ); - my_impl->close(conn); + conn->close(); return; } } else { @@ -1183,15 +1214,11 @@ namespace eosio { if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; } - if( !peer_addr.empty() ) { - return peer_addr; + if( !peer_address().empty() ) { + return peer_address(); } - if( socket != nullptr ) { - boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); - if( !ec ) { - return rep.address().to_string() + ':' + std::to_string( rep.port() ); - } + if( remote_endpoint_port != unknown ) { + return remote_endpoint_ip + ":" + remote_endpoint_port; } return "connecting client"; } @@ -1247,12 +1274,13 @@ namespace eosio { state = newstate; } + // uses controller, only call from application thread bool sync_manager::is_active(const connection_ptr& c) { if (state == head_catchup && c) { + auto fork_head_num = c->fork_head_num.load(); // provide memory barrier for c->fork_head bool fhset = c->fork_head != block_id_type(); - fc_dlog(logger, "fork_head_num = ${fn} fork_head set = ${s}", - ("fn", c->fork_head_num)("s", fhset)); - return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_head_block_num(); + fc_dlog( logger, "fork_head_num = ${fn} fork_head set = ${s}", ("fn", fork_head_num)( "s", fhset ) ); + return c->fork_head != block_id_type() && fork_head_num < chain_plug->chain().fork_db_head_block_num(); } return state != in_sync; } @@ -1296,14 +1324,13 @@ namespace eosio { if (conn && conn->current() ) { source = conn; - } - else { + } else { + std::lock_guard g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { if (!source) { source = *my_impl->connections.begin(); } - } - else { + } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); @@ -1364,8 +1391,9 @@ namespace eosio { void sync_manager::send_handshakes() { - for( auto &ci : my_impl->connections) { - if( ci->current()) { + std::lock_guard g( my_impl->connections_mtx ); + for( auto& ci : my_impl->connections ) { + if( ci->current() ) { ci->send_handshake(); } } @@ -1483,13 +1511,15 @@ namespace eosio { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; + std::unique_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { - if (cc->fork_head == id || - cc->fork_head_num > num) { + // fork_head_num provides memory barrier for fork_head + if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; break; } } + g.unlock(); if( req.req_blocks.mode == catch_up ) { c->fork_head = id; c->fork_head_num = num; @@ -1498,8 +1528,7 @@ namespace eosio { if (state == lib_catchup) return; set_state(head_catchup); - } - else { + } else { c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -1514,7 +1543,7 @@ namespace eosio { if( msg.known_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); - my_impl->close(c); + c->close(); return; } if (msg.known_blocks.mode == catch_up) { @@ -1535,7 +1564,7 @@ namespace eosio { fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); sync_last_requested_num = 0; source.reset(); - my_impl->close(c); + c->close(); set_state(in_sync); send_handshakes(); } @@ -1546,7 +1575,7 @@ namespace eosio { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); - my_impl->close(c); + c->close(); return; } sync_next_expected_num = blk_num + 1; @@ -1557,18 +1586,20 @@ namespace eosio { source.reset(); block_id_type null_id; - for (const auto& cp : my_impl->connections) { + std::unique_lock g( my_impl->connections_mtx ); + for( const auto& cp : my_impl->connections ) { + uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head if (cp->fork_head == null_id) { continue; } - if (cp->fork_head == blk_id || cp->fork_head_num < blk_num) { + if( fork_head_num < blk_num || cp->fork_head == blk_id ) { c->fork_head = null_id; c->fork_head_num = 0; - } - else { + } else { set_state(head_catchup); } } + g.unlock(); if (state == in_sync) { send_handshakes(); @@ -1690,6 +1721,7 @@ namespace eosio { fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; + std::lock_guard g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1736,6 +1768,7 @@ namespace eosio { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; + std::lock_guard g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1831,6 +1864,7 @@ namespace eosio { ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); return; } + std::unique_lock g( my_impl->connections_mtx ); for (auto& conn : my_impl->connections) { if (conn == c || conn->last_req) { continue; @@ -1843,6 +1877,7 @@ namespace eosio { return; } } + g.unlock(); // at this point no other peer has it, re-request or do nothing? if( c->connected() ) { @@ -1853,29 +1888,30 @@ namespace eosio { //------------------------------------------------------------------------ - void net_plugin_impl::connect(const connection_ptr& c) { + bool net_plugin_impl::resolve_and_connect(const connection_ptr& c) { if( c->no_retry != go_away_reason::no_reason) { fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); - return; + return false; } - auto colon = c->peer_addr.find(':'); + auto colon = c->peer_address().find(':'); if (colon == std::string::npos || colon == 0) { - fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_addr) ); - for ( auto itr : connections ) { - if((*itr).peer_addr == c->peer_addr) { - (*itr).reset(); - close(itr); - connections.erase(itr); + fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_address()) ); + std::lock_guard g( my_impl->connections_mtx ); + for ( auto& cp : connections ) { + if( cp->peer_address() == c->peer_address() ) { + cp->reset(); + cp->close(); + connections.erase( cp ); break; } } - return; + return false; } - auto host = c->peer_addr.substr( 0, colon ); - auto port = c->peer_addr.substr( colon + 1); + auto host = c->peer_address().substr( 0, colon ); + auto port = c->peer_address().substr( colon + 1); idump((host)(port)); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); connection_wptr weak_conn = c; @@ -1889,11 +1925,12 @@ namespace eosio { if( !err ) { connect( c, endpoint_itr ); } else { - fc_elog( logger, "Unable to resolve ${peer_addr}: ${error}", - ("peer_addr", c->peer_name())( "error", err.message()) ); + fc_elog( logger, "Unable to resolve ${add}: ${error}", + ("add", c->peer_name())( "error", err.message()) ); } } ); } ); + return true; } void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { @@ -1915,12 +1952,12 @@ namespace eosio { } } else { if( endpoint_itr != tcp::resolver::iterator()) { - close( c ); + c->close(); connect( c, endpoint_itr ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; - my_impl->close( c ); + c->close(); } } } ); @@ -1934,11 +1971,11 @@ namespace eosio { if (ec) { fc_elog( logger, "connection failed to ${peer}: ${error}", ( "peer", con->peer_name())("error",ec.message()) ); con->connecting = false; - close(con); + con->close(); return false; - } - else { + } else { con->strand.post( [this, con]() { + con->start(); start_read_message( con ); }); ++started_sessions; @@ -1951,69 +1988,65 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { - if( !ec ) { - uint32_t visitors = 0; - uint32_t from_addr = 0; - boost::system::error_code rec; - auto paddr = socket->remote_endpoint(rec).address(); - if (rec) { - fc_elog(logger,"Error getting remote endpoint: ${m}",("m", rec.message())); - } - else { - for (auto &conn : connections) { - if(conn->socket->is_open()) { - if (conn->peer_addr.empty()) { - visitors++; - boost::system::error_code ec; - if (paddr == conn->socket->remote_endpoint(ec).address()) { - from_addr++; - } + auto new_socket = std::make_shared( std::ref( *server_ioc ) ); + acceptor->async_accept( *new_socket, [new_socket, this, ioc = server_ioc]( boost::system::error_code ec ) { + // called from thread_pool threads, new_socket not shared yet + if( !ec ) { + uint32_t visitors = 0; + uint32_t from_addr = 0; + boost::system::error_code rec; + const auto& paddr_add = new_socket->remote_endpoint( rec ).address(); + string paddr_str; + if( rec ) { + fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); + } else { + paddr_str = paddr_add.to_string(); + std::unique_lock g( connections_mtx ); + for( auto& conn : connections ) { + if( conn->socket_is_open() ) { + if( conn->peer_address().empty() ) { + ++visitors; + if( paddr_str == conn->remote_address() ) { + ++from_addr; } } } - if (num_clients != visitors) { - fc_ilog( logger,"checking max client, visitors = ${v} num clients ${n}",("v",visitors)("n",num_clients) ); - num_clients = visitors; + } + g.unlock(); + if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { + connection_ptr new_connection = std::make_shared( new_socket ); + if( start_session( new_connection ) ) { + g.lock(); + connections.insert( new_connection ); + g.unlock(); } - if( from_addr < max_nodes_per_host && (max_client_count == 0 || num_clients < max_client_count )) { - ++num_clients; - connection_ptr c = std::make_shared( socket ); - connections.insert( c ); - start_session( c ); + } else { + if( from_addr >= max_nodes_per_host ) { + fc_elog( logger, "Number of connections (${n}) from ${ra} exceeds limit", + ("n", from_addr + 1)( "ra", paddr_str ) ); + } else { + fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count) ); } - else { - if (from_addr >= max_nodes_per_host) { - fc_elog(logger, "Number of connections (${n}) from ${ra} exceeds limit", - ("n", from_addr+1)("ra",paddr.to_string())); - } - else { - fc_elog(logger, "Error max_client_count ${m} exceeded", - ( "m", max_client_count) ); - } - socket->close(); - } - } - } else { - fc_elog( logger, "Error accepting connection: ${m}",( "m", ec.message() ) ); - // For the listed error codes below, recall start_listen_loop() - switch (ec.value()) { - case ECONNABORTED: - case EMFILE: - case ENFILE: - case ENOBUFS: - case ENOMEM: - case EPROTO: - break; - default: - return; + new_socket->close(); // new_socket never associated with a connection } } - start_listen_loop(); - }); + } else { + fc_elog( logger, "Error accepting connection: ${m}", ("m", ec.message()) ); + // For the listed error codes below, recall start_listen_loop() + switch( ec.value() ) { + case ECONNABORTED: + case EMFILE: + case ENFILE: + case ENOBUFS: + case ENOMEM: + case EPROTO: + break; + default: + return; + } + } + start_listen_loop(); }); } @@ -2069,7 +2102,7 @@ namespace eosio { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - my_impl->close( conn ); + conn->close(); }); return; } @@ -2165,22 +2198,22 @@ namespace eosio { } if( close_connection ) { - app().post( priority::medium, [this, weak_conn]() { + app().post( priority::medium, [weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - close( conn ); + conn->close(); }); } })); } catch (...) { fc_elog( logger, "Undefined exception in start_read_message" ); connection_wptr weak_conn = conn; - app().post( priority::medium, [this, weak_conn]() { + app().post( priority::medium, [weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - close( conn ); + conn->close(); }); } } @@ -2226,7 +2259,7 @@ namespace eosio { } catch( const fc::exception& e ) { fc_elog( logger, "Exception in handling message from ${p}: ${s}", ("p", conn->peer_name())("s", e.to_detail_string()) ); - close( conn ); + conn->close(); return false; } return true; @@ -2281,9 +2314,10 @@ namespace eosio { return; } - if( c->peer_addr.empty() || c->last_handshake_recv.node_id == fc::sha256()) { + if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { fc_dlog(logger, "checking for duplicate" ); - for(const auto &check : connections) { + std::lock_guard g( connections_mtx ); + for(const auto& check : connections) { if(check == c) continue; if(check->connected() && check->peer_name() == msg.p2p_address) { @@ -2302,9 +2336,9 @@ namespace eosio { return; } } - } - else { - fc_dlog(logger, "skipping duplicate check, addr == ${pa}, id = ${ni}",("pa",c->peer_addr)("ni",c->last_handshake_recv.node_id)); + } else { + fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", + ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); } if( msg.chain_id != chain_id) { @@ -2375,7 +2409,7 @@ namespace eosio { c->node_id = msg.node_id; } c->flush_queues(); - close(c); + c->close(); } void net_plugin_impl::handle_message(const connection_ptr& c, const time_message& msg) { @@ -2468,7 +2502,7 @@ namespace eosio { if( msg.req_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}, closing ${p}", ("s", msg.req_blocks.ids.size())("p",c->peer_name()) ); - close(c); + c->close(); return; } @@ -2496,8 +2530,8 @@ namespace eosio { // no break case normal : if( !msg.req_trx.ids.empty() ) { - elog( "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); - close(c); + fc_elog( logger, "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); + c->close(); return; } break; @@ -2672,8 +2706,9 @@ namespace eosio { if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } + std::lock_guard g( connections_mtx ); for( auto& c : connections ) { - if( c->socket->is_open()) { + if( c->socket_is_open() ) { c->send_time(); } } @@ -2708,6 +2743,7 @@ namespace eosio { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); + std::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { @@ -2715,32 +2751,20 @@ namespace eosio { start_conn_timer(std::chrono::milliseconds(1), *it); // avoid exhausting return; } - if( !(*it)->socket->is_open() && !(*it)->connecting) { - if( (*it)->peer_addr.length() > 0) { - connect(*it); - } - else { + if( !(*it)->socket_is_open() && !(*it)->connecting) { + if( (*it)->peer_address().length() > 0) { + resolve_and_connect(*it); + } else { it = connections.erase(it); continue; } } ++it; } + g.unlock(); start_conn_timer(connector_period, std::weak_ptr()); } - void net_plugin_impl::close(const connection_ptr& c) { - if( c->peer_addr.empty() && c->socket->is_open() ) { - if (num_clients == 0) { - fc_wlog( logger, "num_clients already at 0"); - } - else { - --num_clients; - } - } - c->close(); - } - void net_plugin_impl::accepted_block(const block_state_ptr& block) { fc_dlog(logger,"signaled, id = ${id}",("id", block->id)); dispatcher->bcast_block(block); @@ -2943,7 +2967,6 @@ namespace eosio { my->resp_expected_period = def_resp_expected_wait; my->max_client_count = options.at( "max-clients" ).as(); my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); - my->num_clients = 0; my->started_sessions = 0; my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); @@ -3090,7 +3113,7 @@ namespace eosio { my->start_monitors(); - for( auto seed_node : my->supplied_peers ) { + for( const auto& seed_node : my->supplied_peers ) { connect( seed_node ); } handle_sighup(); @@ -3128,9 +3151,10 @@ namespace eosio { my->acceptor->close(); fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); + std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); - my->close( con ); + con->close(); } my->connections.clear(); } @@ -3155,18 +3179,21 @@ namespace eosio { connection_ptr c = std::make_shared(host); fc_dlog(logger,"adding new connection to the list"); + std::unique_lock g( my->connections_mtx ); my->connections.insert( c ); + g.unlock(); fc_dlog(logger,"calling active connector"); - my->connect( c ); + my->resolve_and_connect( c ); return "added connection"; } string net_plugin::disconnect( const string& host ) { + std::lock_guard g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { - if( (*itr)->peer_addr == host ) { + if( (*itr)->peer_address() == host ) { (*itr)->reset(); fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); - my->close(*itr); + (*itr)->close(); my->connections.erase(itr); return "connection removed"; } @@ -3183,6 +3210,7 @@ namespace eosio { vector net_plugin::connections()const { vector result; + std::lock_guard g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); @@ -3190,8 +3218,9 @@ namespace eosio { return result; } connection_ptr net_plugin_impl::find_connection(const string& host )const { + std::lock_guard g( connections_mtx ); for( const auto& c : connections ) - if( c->peer_addr == host ) return c; + if( c->peer_address() == host ) return c; return connection_ptr(); } From e63b66c3e0fef4974926ece65f795745eb65cfc8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 23:51:21 -0500 Subject: [PATCH 0067/1648] Fix deadlock on close when closing connection that is syncing from peer --- plugins/net_plugin/net_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 776014afdbd..87b7c59a541 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -856,7 +856,7 @@ struct msg_handler : public fc::visitor { sent_handshake_count = 0; last_handshake_recv = handshake_message(); last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num(shared_from_this()); + my_impl->sync_master->reset_lib_num(nullptr); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); @@ -1289,6 +1289,7 @@ struct msg_handler : public fc::visitor { if(state == in_sync) { source.reset(); } + if( !c ) return; if( c->current() ) { if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num) { sync_known_lib_num =c->last_handshake_recv.last_irreversible_block_num; From 1bcb0fb81635ccf5732fa65b9c9ba5381db0f437 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 13:28:15 -0500 Subject: [PATCH 0068/1648] Move socket ownership into connection. --- plugins/net_plugin/net_plugin.cpp | 309 ++++++++++++------------------ 1 file changed, 119 insertions(+), 190 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 87b7c59a541..6e4636211e7 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -53,7 +53,6 @@ namespace eosio { using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; - using socket_ptr = std::shared_ptr; using io_work_t = boost::asio::executor_work_guard; template @@ -207,7 +206,7 @@ namespace eosio { string p2p_server_address; uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; - uint32_t current_connection_id = 0; + std::atomic current_connection_id{0}; vector supplied_peers; vector allowed_peers; ///< peer keys allowed to connect @@ -224,7 +223,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable std::mutex connections_mtx; + mutable std::mutex connections_mtx; // switch to shared_mutex in C++17 std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -254,7 +253,6 @@ namespace eosio { string user_agent_name; chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; - int started_sessions = 0; shared_ptr resolver; @@ -271,7 +269,6 @@ namespace eosio { bool resolve_and_connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); - bool start_session(const connection_ptr& c); void start_listen_loop(); void start_read_message(const connection_ptr& c); @@ -546,19 +543,18 @@ namespace eosio { public: explicit connection( string endpoint ); - explicit connection( socket_ptr s ); + connection(); ~connection(); - void start(); + bool start_session(); - bool socket_is_open() const { return socket_open.load(); } // thread safe - const string& peer_address() const { return peer_addr; } // thread safe - const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start() + bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic + const string& peer_address() const { return peer_addr; } // thread safe, const + const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start_session() private: static const string unknown; - void initialize(); // only called from constructor void update_endpoints(); public: @@ -566,7 +562,7 @@ namespace eosio { optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive boost::asio::io_context::strand strand; - socket_ptr socket; // only accessed through strand after construction + tcp::socket socket; // only accessed through strand after construction private: std::atomic socket_open{false}; public: @@ -596,9 +592,9 @@ namespace eosio { public: std::mutex response_expected_timer_mtx; - unique_ptr response_expected_timer; + boost::asio::steady_timer response_expected_timer; std::mutex read_delay_timer_mtx; - unique_ptr read_delay_timer; + boost::asio::steady_timer read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head @@ -634,6 +630,9 @@ namespace eosio { bool current(); void reset(); void close(); + private: + static void _close( connection* self ); // for easy capture + public: void send_handshake(); /** \name Peer Timestamps @@ -755,7 +754,7 @@ struct msg_handler : public fc::visitor { : peer_requested(), server_ioc( my_impl->server_ioc ), strand( *my_impl->server_ioc ), - socket( std::make_shared( std::ref( *my_impl->server_ioc ))), + socket( *my_impl->server_ioc ), node_id(), connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), @@ -765,20 +764,20 @@ struct msg_handler : public fc::visitor { syncing(false), protocol_version(0), peer_addr(endpoint), - response_expected_timer(), - read_delay_timer(), + response_expected_timer( *my_impl->server_ioc ), + read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), last_req() { fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); - initialize(); + node_id.data()[0] = 0; } - connection::connection( socket_ptr s ) + connection::connection() : peer_requested(), server_ioc( my_impl->server_ioc ), strand( *my_impl->server_ioc ), - socket( s ), + socket( *my_impl->server_ioc ), node_id(), connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), @@ -788,43 +787,47 @@ struct msg_handler : public fc::visitor { syncing(false), protocol_version(0), peer_addr(), - response_expected_timer(), - read_delay_timer(), + response_expected_timer( *my_impl->server_ioc ), + read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), last_req() { fc_ilog( logger, "accepted network connection" ); - initialize(); + node_id.data()[0] = 0; } connection::~connection() { pending_message_buffer.reset(); } - void connection::initialize() { - auto *rnd = node_id.data(); - rnd[0] = 0; - // called only from constructor, no mutex needed - response_expected_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - } - void connection::update_endpoints() { boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); + auto rep = socket.remote_endpoint(ec); remote_endpoint_ip = ec ? unknown : rep.address().to_string(); remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); - auto lep = socket->local_endpoint(ec); + auto lep = socket.local_endpoint(ec); local_endpoint_ip = ec ? unknown : lep.address().to_string(); local_endpoint_port = ec ? unknown : std::to_string(lep.port()); } - void connection::start() { + bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); update_endpoints(); - socket_open = true; + boost::asio::ip::tcp::no_delay nodelay( true ); + boost::system::error_code ec; + socket.set_option( nodelay, ec ); + if( ec ) { + fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", peer_name())( "error", ec.message() ) ); + connecting = false; + close(); + return false; + } else { + socket_open = true; + my_impl->start_read_message( shared_from_this() ); + return true; + } } bool connection::connected() { @@ -844,24 +847,30 @@ struct msg_handler : public fc::visitor { } void connection::close() { - socket_open = false; - socket->close(); - flush_queues(); - connecting = false; - syncing = false; - if( last_req ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); + strand.dispatch( [self = shared_from_this()]() { + connection::_close( self.get() ); + }); + } + + void connection::_close( connection* self ) { + self->socket_open = false; + self->socket.close(); + self->flush_queues(); + self->connecting = false; + self->syncing = false; + if( self->last_req ) { + my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } - reset(); - sent_handshake_count = 0; - last_handshake_recv = handshake_message(); - last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num(nullptr); - fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); - cancel_wait(); + self->reset(); + self->sent_handshake_count = 0; + self->last_handshake_recv = handshake_message(); + self->last_handshake_sent = handshake_message(); + my_impl->sync_master->reset_lib_num( nullptr ); + fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); + self->cancel_wait(); - std::lock_guard g( read_delay_timer_mtx ); - if( read_delay_timer ) read_delay_timer->cancel(); + std::lock_guard g( self->read_delay_timer_mtx ); + self->read_delay_timer.cancel(); } void connection::blk_send_branch() { @@ -985,56 +994,36 @@ struct msg_handler : public fc::visitor { void connection::do_queue_write(int priority) { if( !buffer_queue.ready_to_send() ) return; - connection_wptr c(shared_from_this()); - if(!socket->is_open()) { - fc_elog(logger,"socket not open to ${p}",("p",peer_name())); - close(); - return; - } + connection_ptr c(shared_from_this()); + std::vector bufs; buffer_queue.fill_out_buffer( bufs ); - boost::asio::async_write(*socket, bufs, [c, priority]( boost::system::error_code ec, std::size_t w ) { - app().post(priority, [c, priority, ec, w]() { + boost::asio::async_write( socket, bufs, + boost::asio::bind_executor( strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { try { - auto conn = c.lock(); - if(!conn) - return; - - conn->buffer_queue.out_callback( ec, w ); + c->buffer_queue.out_callback( ec, w ); - if(ec) { - string pname = conn ? conn->peer_name() : "no connection name"; - if( ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p",pname)("i", ec.message()) ); - } - else { - fc_wlog( logger, "connection closure detected on write to ${p}",("p",pname) ); + if( ec ) { + if( ec.value() != boost::asio::error::eof ) { + fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p", c->peer_name())( "i", ec.message() ) ); + } else { + fc_wlog( logger, "connection closure detected on write to ${p}", ("p", c->peer_name()) ); } - conn->close(); + c->close(); return; } - conn->buffer_queue.clear_out_queue(); - conn->enqueue_sync_block(); - conn->do_queue_write( priority ); - } - catch(const std::exception &ex) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.what()) ); - } - catch(const fc::exception &ex) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.to_string()) ); - } - catch(...) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); + c->buffer_queue.clear_out_queue(); + c->enqueue_sync_block(); + c->do_queue_write( priority ); + } catch( const std::exception& ex ) { + fc_elog( logger, "Exception in do_queue_write to ${p} ${s}", ("p", c->peer_name())( "s", ex.what() ) ); + } catch( const fc::exception& ex ) { + fc_elog( logger, "Exception in do_queue_write to ${p} ${s}", ("p", c->peer_name())( "s", ex.to_string() ) ); + } catch( ... ) { + fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } - }); - }); + })); } void connection::cancel_sync(go_away_reason reason) { @@ -1138,20 +1127,15 @@ struct msg_handler : public fc::visitor { bool trigger_send, int priority, go_away_reason close_after_send, bool to_sync_queue) { - connection_wptr weak_this = shared_from_this(); + connection_ptr self = shared_from_this(); queue_write(send_buffer,trigger_send, priority, - [weak_this, close_after_send](boost::system::error_code ec, std::size_t ) { - connection_ptr conn = weak_this.lock(); - if (conn) { + [conn{std::move(self)}, close_after_send](boost::system::error_code ec, std::size_t ) { if (close_after_send != no_reason) { fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", ("r", reason_str(close_after_send))("p", conn->peer_name()) ); conn->close(); return; } - } else { - fc_wlog(logger, "connection expired before enqueued net_message called callback!"); - } }, to_sync_queue); } @@ -1159,16 +1143,15 @@ struct msg_handler : public fc::visitor { // thread safe void connection::cancel_wait() { std::lock_guard g( response_expected_timer_mtx ); - if (response_expected_timer) - response_expected_timer->cancel(); + response_expected_timer.cancel(); } // thread safe void connection::sync_wait() { connection_wptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer->expires_from_now( my_impl->resp_expected_period); - response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { + response_expected_timer.expires_from_now( my_impl->resp_expected_period); + response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -1185,8 +1168,8 @@ struct msg_handler : public fc::visitor { void connection::fetch_wait() { connection_wptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer->expires_from_now( my_impl->resp_expected_period); - response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { + response_expected_timer.expires_from_now( my_impl->resp_expected_period); + response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -1942,13 +1925,10 @@ struct msg_handler : public fc::visitor { auto current_endpoint = *endpoint_itr; ++endpoint_itr; c->connecting = true; - connection_wptr weak_conn = c; - c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { - app().post( priority::low, [weak_conn, endpoint_itr, this, err]() { - auto c = weak_conn.lock(); - if( !c ) return; - if( !err && c->socket->is_open()) { - if( start_session( c )) { + c->socket.async_connect( current_endpoint, + boost::asio::bind_executor( c->strand, [c, endpoint_itr, this]( const boost::system::error_code& err ) { + if( !err && c->socket.is_open()) { + if( c->start_session() ) { c->send_handshake(); } } else { @@ -1961,42 +1941,18 @@ struct msg_handler : public fc::visitor { c->close(); } } - } ); - } ); - } - - bool net_plugin_impl::start_session(const connection_ptr& con) { - boost::asio::ip::tcp::no_delay nodelay( true ); - boost::system::error_code ec; - con->socket->set_option( nodelay, ec ); - if (ec) { - fc_elog( logger, "connection failed to ${peer}: ${error}", ( "peer", con->peer_name())("error",ec.message()) ); - con->connecting = false; - con->close(); - return false; - } else { - con->strand.post( [this, con]() { - con->start(); - start_read_message( con ); - }); - ++started_sessions; - return true; - // for now, we can just use the application main loop. - // con->readloop_complete = bf::async( [=](){ read_loop( con ); } ); - // con->writeloop_complete = bf::async( [=](){ write_loop con ); } ); - } + } ) ); } - void net_plugin_impl::start_listen_loop() { - auto new_socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *new_socket, [new_socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - // called from thread_pool threads, new_socket not shared yet + connection_ptr new_connection = std::make_shared(); + acceptor->async_accept( new_connection->socket, + boost::asio::bind_executor( new_connection->strand, [new_connection, this]( boost::system::error_code ec ) { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; - const auto& paddr_add = new_socket->remote_endpoint( rec ).address(); + const auto& paddr_add = new_connection->socket.remote_endpoint( rec ).address(); string paddr_str; if( rec ) { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); @@ -2015,8 +1971,7 @@ struct msg_handler : public fc::visitor { } g.unlock(); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { - connection_ptr new_connection = std::make_shared( new_socket ); - if( start_session( new_connection ) ) { + if( new_connection->start_session() ) { g.lock(); connections.insert( new_connection ); g.unlock(); @@ -2029,7 +1984,8 @@ struct msg_handler : public fc::visitor { } else { fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count) ); } - new_socket->close(); // new_socket never associated with a connection + // new_connection never added to connections and start_session not called, lifetime will end + new_connection->socket.close(); } } } else { @@ -2048,16 +2004,13 @@ struct msg_handler : public fc::visitor { } } start_listen_loop(); - }); + })); } // only called from strand thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { - if(!conn->socket) { - return; - } connection_wptr weak_conn = conn; std::size_t minimum_read = @@ -2068,7 +2021,7 @@ struct msg_handler : public fc::visitor { const size_t max_socket_read_watermark = 4096; std::size_t socket_read_watermark = std::min(minimum_read, max_socket_read_watermark); boost::asio::socket_base::receive_low_watermark read_watermark_opt(socket_read_watermark); - conn->socket->set_option(read_watermark_opt); + conn->socket.set_option(read_watermark_opt); } auto completion_handler = [minimum_read](boost::system::error_code ec, std::size_t bytes_transferred) -> std::size_t { @@ -2098,19 +2051,14 @@ struct msg_handler : public fc::visitor { reads_in_flight > 2*def_max_reads_in_flight || trx_in_progress_size > 2*def_max_trx_in_progress_size ) { - fc_wlog( logger, "queues over full, giving up on connection" ); - app().post( priority::medium, [weak_conn]() { - auto conn = weak_conn.lock(); - if( !conn ) return; - fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); - }); + fc_elog( logger, "queues over full, giving up on connection, closing connection to: ${p}", + ("p", conn->peer_name()) ); + conn->close(); return; } std::lock_guard g( conn->read_delay_timer_mtx ); - if( !conn->read_delay_timer ) return; - conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); - conn->read_delay_timer->async_wait( + conn->read_delay_timer.expires_from_now( def_read_delay_for_full_write_queue ); + conn->read_delay_timer.async_wait( boost::asio::bind_executor(conn->strand, [this, weak_conn]( boost::system::error_code ) { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2120,15 +2068,10 @@ struct msg_handler : public fc::visitor { } ++conn->reads_in_flight; - boost::asio::async_read(*conn->socket, + boost::asio::async_read( conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, boost::asio::bind_executor( conn->strand, - [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { - auto conn = weak_conn.lock(); - if (!conn) { - return; - } - + [this, conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { --conn->reads_in_flight; bool close_connection = false; @@ -2199,23 +2142,13 @@ struct msg_handler : public fc::visitor { } if( close_connection ) { - app().post( priority::medium, [weak_conn]() { - auto conn = weak_conn.lock(); - if( !conn ) return; - fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); - }); + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + conn->close(); } })); } catch (...) { - fc_elog( logger, "Undefined exception in start_read_message" ); - connection_wptr weak_conn = conn; - app().post( priority::medium, [weak_conn]() { - auto conn = weak_conn.lock(); - if( !conn ) return; - fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); - }); + fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", conn->peer_name()) ); + conn->close(); } } @@ -2968,7 +2901,6 @@ struct msg_handler : public fc::visitor { my->resp_expected_period = def_resp_expected_wait; my->max_client_count = options.at( "max-clients" ).as(); my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); - my->started_sessions = 0; my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); @@ -3131,6 +3063,9 @@ struct msg_handler : public fc::visitor { if( my->server_ioc_work ) my->server_ioc_work->reset(); + if( my->server_ioc ) + my->server_ioc->stop(); + { std::lock_guard g( my->connector_check_timer_mtx ); if( my->connector_check_timer ) @@ -3146,22 +3081,16 @@ struct msg_handler : public fc::visitor { } my->done = true; - if( my->acceptor ) { - fc_ilog( logger, "close acceptor" ); - my->acceptor->cancel(); - my->acceptor->close(); - - fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); + { + fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { - fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); + fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close(); } my->connections.clear(); } - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { my->thread_pool->join(); my->thread_pool->stop(); From c66bf7ecb22065fa02011eebb06b70d50af653ac Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 13:55:14 -0500 Subject: [PATCH 0069/1648] Make queued_buffer thread safe --- plugins/net_plugin/net_plugin.cpp | 50 +++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6e4636211e7..fccdb9d01da 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -458,33 +458,44 @@ namespace eosio { static void populate(handshake_message &hello); }; + // thread safe class queued_buffer : boost::noncopyable { public: void clear_write_queue() { + std::lock_guard g( _mtx ); _write_queue.clear(); _sync_write_queue.clear(); _write_queue_size = 0; } void clear_out_queue() { + std::lock_guard g( _mtx ); while ( _out_queue.size() > 0 ) { _out_queue.pop_front(); } } - // thread safe - uint32_t write_queue_size() const { return _write_queue_size; } + uint32_t write_queue_size() const { + std::lock_guard g( _mtx ); + return _write_queue_size; + } - bool is_out_queue_empty() const { return _out_queue.empty(); } + bool is_out_queue_empty() const { + std::lock_guard g( _mtx ); + return _out_queue.empty(); + } bool ready_to_send() const { + std::lock_guard g( _mtx ); // if out_queue is not empty then async_write is in progress return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); } + // @param callback must not callback into queued_buffer bool add_write_queue( const std::shared_ptr>& buff, std::function callback, bool to_sync_queue ) { + std::lock_guard g( _mtx ); if( to_sync_queue ) { _sync_write_queue.push_back( {buff, callback} ); } else { @@ -498,6 +509,7 @@ namespace eosio { } void fill_out_buffer( std::vector& bufs ) { + std::lock_guard g( _mtx ); if( _sync_write_queue.size() > 0 ) { // always send msgs from sync_write_queue first fill_out_buffer( bufs, _sync_write_queue ); } else { // postpone real_time write_queue if sync queue is not empty @@ -507,6 +519,7 @@ namespace eosio { } void out_callback( boost::system::error_code ec, std::size_t w ) { + std::lock_guard g( _mtx ); for( auto& m : _out_queue ) { m.callback( ec, w ); } @@ -531,7 +544,8 @@ namespace eosio { std::function callback; }; - std::atomic _write_queue_size{0}; + mutable std::mutex _mtx; + uint32_t _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -847,7 +861,7 @@ struct msg_handler : public fc::visitor { } void connection::close() { - strand.dispatch( [self = shared_from_this()]() { + strand.post( [self = shared_from_this()]() { connection::_close( self.get() ); }); } @@ -950,12 +964,15 @@ struct msg_handler : public fc::visitor { syncing = false; } + // thread safe void connection::send_handshake() { - handshake_initializer::populate(last_handshake_sent); - last_handshake_sent.generation = ++sent_handshake_count; - fc_dlog(logger, "Sending handshake generation ${g} to ${ep}", - ("g",last_handshake_sent.generation)("ep", peer_name())); - enqueue(last_handshake_sent); + app().post( priority::low, [c = shared_from_this()]() { + handshake_initializer::populate( c->last_handshake_sent ); + c->last_handshake_sent.generation = ++c->sent_handshake_count; + fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", + ("g", c->last_handshake_sent.generation)( "ep", c->peer_name() ) ); + c->enqueue( c->last_handshake_sent ); + }); } void connection::send_time() { @@ -991,6 +1008,7 @@ struct msg_handler : public fc::visitor { } } + // called from connection strand and application thread void connection::do_queue_write(int priority) { if( !buffer_queue.ready_to_send() ) return; @@ -999,8 +1017,9 @@ struct msg_handler : public fc::visitor { std::vector bufs; buffer_queue.fill_out_buffer( bufs ); - boost::asio::async_write( socket, bufs, - boost::asio::bind_executor( strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { + strand.dispatch( [c{std::move(c)}, bufs{std::move(bufs)}, priority]() { + boost::asio::async_write( c->socket, bufs, + boost::asio::bind_executor( c->strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { try { c->buffer_queue.out_callback( ec, w ); @@ -1023,7 +1042,8 @@ struct msg_handler : public fc::visitor { } catch( ... ) { fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } - })); + })); + }); } void connection::cancel_sync(go_away_reason reason) { @@ -2789,8 +2809,8 @@ struct msg_handler : public fc::visitor { return chain::signature_type(); } - void - handshake_initializer::populate( handshake_message &hello) { + // call from main application thread + void handshake_initializer::populate( handshake_message& hello ) { hello.network_version = net_version_base + net_version; hello.chain_id = my_impl->chain_id; hello.node_id = my_impl->node_id; From ba140e8443b67d45354849901f30bbe0d1e4c57c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 15:58:17 -0500 Subject: [PATCH 0070/1648] Make use of resolver thread safe --- plugins/net_plugin/net_plugin.cpp | 86 ++++++++++++++----------------- 1 file changed, 38 insertions(+), 48 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index fccdb9d01da..e70a74ccdf4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -254,8 +254,6 @@ namespace eosio { chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; - shared_ptr resolver; - bool use_socket_read_watermark = false; compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; @@ -268,7 +266,7 @@ namespace eosio { bool resolve_and_connect(const connection_ptr& c); - void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); + void connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr); void start_listen_loop(); void start_read_message(const connection_ptr& c); @@ -594,7 +592,7 @@ namespace eosio { handshake_message last_handshake_recv; handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; - bool connecting = false; + std::atomic connecting{false}; bool syncing = false; uint16_t protocol_version = 0; private: @@ -609,7 +607,7 @@ namespace eosio { boost::asio::steady_timer response_expected_timer; std::mutex read_delay_timer_mtx; boost::asio::steady_timer read_delay_timer; - go_away_reason no_retry = no_reason; + std::atomic no_retry{no_reason}; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head optional last_req; @@ -1892,52 +1890,42 @@ struct msg_handler : public fc::visitor { //------------------------------------------------------------------------ + // called from any thread bool net_plugin_impl::resolve_and_connect(const connection_ptr& c) { if( c->no_retry != go_away_reason::no_reason) { fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); return false; } - auto colon = c->peer_address().find(':'); - + string::size_type colon = c->peer_address().find(':'); if (colon == std::string::npos || colon == 0) { fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_address()) ); - std::lock_guard g( my_impl->connections_mtx ); - for ( auto& cp : connections ) { - if( cp->peer_address() == c->peer_address() ) { - cp->reset(); - cp->close(); - connections.erase( cp ); - break; - } - } return false; } - auto host = c->peer_address().substr( 0, colon ); - auto port = c->peer_address().substr( colon + 1); + string host = c->peer_address().substr( 0, colon ); + string port = c->peer_address().substr( colon + 1); idump((host)(port)); - tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); + tcp::resolver::query query( tcp::v4(), host, port ); connection_wptr weak_conn = c; // Note: need to add support for IPv6 too + auto resolver = std::make_shared( *server_ioc ); resolver->async_resolve( query, - [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { - app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { - auto c = weak_conn.lock(); - if( !c ) return; - if( !err ) { - connect( c, endpoint_itr ); - } else { - fc_elog( logger, "Unable to resolve ${add}: ${error}", - ("add", c->peer_name())( "error", err.message()) ); - } - } ); - } ); + [resolver, ioc = server_ioc, weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { + auto c = weak_conn.lock(); + if( !c ) return; + if( !err ) { + connect( c, resolver, endpoint_itr ); + } else { + fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); + } + } ); return true; } - void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { + // called from any thread + void net_plugin_impl::connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr) { if( c->no_retry != go_away_reason::no_reason) { string rsn = reason_str(c->no_retry); return; @@ -1946,7 +1934,7 @@ struct msg_handler : public fc::visitor { ++endpoint_itr; c->connecting = true; c->socket.async_connect( current_endpoint, - boost::asio::bind_executor( c->strand, [c, endpoint_itr, this]( const boost::system::error_code& err ) { + boost::asio::bind_executor( c->strand, [resolver, c, endpoint_itr, this]( const boost::system::error_code& err ) { if( !err && c->socket.is_open()) { if( c->start_session() ) { c->send_handshake(); @@ -1954,7 +1942,7 @@ struct msg_handler : public fc::visitor { } else { if( endpoint_itr != tcp::resolver::iterator()) { c->close(); - connect( c, endpoint_itr ); + connect( c, resolver, endpoint_itr ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; @@ -2620,19 +2608,17 @@ struct msg_handler : public fc::visitor { } } + // called from any thread void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { std::lock_guard g( connector_check_timer_mtx ); connector_check_timer->expires_from_now( du); connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { - app().post( priority::low, [this, from_connection, ec]() { if( !ec) { connection_monitor(from_connection); - } - else { + } else { fc_elog( logger, "Error from connection check monitor: ${m}",( "m", ec.message())); start_conn_timer( connector_period, std::weak_ptr()); } - }); }); } @@ -2693,6 +2679,7 @@ struct msg_handler : public fc::visitor { start_expire_timer(); } + // called from any thread void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); @@ -2707,7 +2694,10 @@ struct msg_handler : public fc::visitor { } if( !(*it)->socket_is_open() && !(*it)->connecting) { if( (*it)->peer_address().length() > 0) { - resolve_and_connect(*it); + if( !resolve_and_connect(*it) ) { + it = connections.erase(it); + continue; + } } else { it = connections.erase(it); continue; @@ -2997,14 +2987,14 @@ struct msg_handler : public fc::visitor { boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); } - my->resolver = std::make_shared( std::ref( *my->server_ioc )); if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); // Note: need to add support for IPv6 too? - my->listen_endpoint = *my->resolver->resolve( query ); + tcp::resolver resolver( *my->server_ioc ); + my->listen_endpoint = *resolver.resolve( query ); my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); @@ -3127,13 +3117,13 @@ struct msg_handler : public fc::visitor { if( my->find_connection( host ) ) return "already connected"; - connection_ptr c = std::make_shared(host); - fc_dlog(logger,"adding new connection to the list"); - std::unique_lock g( my->connections_mtx ); - my->connections.insert( c ); - g.unlock(); - fc_dlog(logger,"calling active connector"); - my->resolve_and_connect( c ); + connection_ptr c = std::make_shared( host ); + fc_dlog( logger, "calling active connector" ); + if( my->resolve_and_connect( c ) ) { + fc_dlog( logger, "adding new connection to the list" ); + std::unique_lock g( my->connections_mtx ); + my->connections.insert( c ); + } return "added connection"; } From 22f9e666070fe2e08d7d2d1839e30c7baefe525f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 23:08:28 -0500 Subject: [PATCH 0071/1648] Use boost::shared_mutex for connections. Multithread broadcast block. --- plugins/net_plugin/net_plugin.cpp | 88 ++++++++++++++++++------------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e70a74ccdf4..e1d3dbbf2b1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include @@ -223,7 +225,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable std::mutex connections_mtx; // switch to shared_mutex in C++17 + mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17 std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -593,7 +595,7 @@ namespace eosio { handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; std::atomic connecting{false}; - bool syncing = false; + std::atomic syncing{false}; uint16_t protocol_version = 0; private: const string peer_addr; @@ -1327,7 +1329,7 @@ struct msg_handler : public fc::visitor { if (conn && conn->current() ) { source = conn; } else { - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { if (!source) { source = *my_impl->connections.begin(); @@ -1393,7 +1395,7 @@ struct msg_handler : public fc::visitor { void sync_manager::send_handshakes() { - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& ci : my_impl->connections ) { if( ci->current() ) { ci->send_handshake(); @@ -1513,7 +1515,7 @@ struct msg_handler : public fc::visitor { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - std::unique_lock g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { // fork_head_num provides memory barrier for fork_head if( cc->fork_head_num > num || cc->fork_head == id ) { @@ -1588,7 +1590,7 @@ struct msg_handler : public fc::visitor { source.reset(); block_id_type null_id; - std::unique_lock g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head if (cp->fork_head == null_id) { @@ -1717,31 +1719,43 @@ struct msg_handler : public fc::visitor { stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib_num) ); } + // thread safe void dispatch_manager::bcast_block(const block_state_ptr& bs) { - uint32_t bnum = bs->block_num; - peer_block_state pbstate{bs->id, bnum}; - fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); + fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) ); - std::shared_ptr> send_buffer; - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); + bool have_connection = false; for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; } - bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; - if( !has_block ) { - pbstate.connection_id = cp->connection_id; - if( !add_peer_block( pbstate ) ) { - continue; - } - if( !send_buffer ) { - send_buffer = create_send_buffer( bs->block ); - } - fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); - cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); - } + have_connection = true; + break; } + g.unlock(); + if( !have_connection ) return; + std::shared_ptr> send_buffer = create_send_buffer( bs->block ); + + g.lock(); + for( auto& cp : my_impl->connections ) { + if( !cp->current() ) { + continue; + } + cp->strand.post( [this, cp, bs, send_buffer]() { + uint32_t bnum = bs->block_num; + // todo protect cp->last_handshake_recv + bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; + if( !has_block ) { + peer_block_state pbstate{bs->id, bnum, cp->connection_id}; + if( !add_peer_block( pbstate ) ) { + return; + } + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); + cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); + } + }); + } } void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { @@ -1770,7 +1784,7 @@ struct msg_handler : public fc::visitor { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1866,7 +1880,7 @@ struct msg_handler : public fc::visitor { ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); return; } - std::unique_lock g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for (auto& conn : my_impl->connections) { if (conn == c || conn->last_req) { continue; @@ -1966,7 +1980,7 @@ struct msg_handler : public fc::visitor { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); } else { paddr_str = paddr_add.to_string(); - std::unique_lock g( connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& conn : connections ) { if( conn->socket_is_open() ) { if( conn->peer_address().empty() ) { @@ -2258,7 +2272,7 @@ struct msg_handler : public fc::visitor { if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { fc_dlog(logger, "checking for duplicate" ); - std::lock_guard g( connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { if(check == c) continue; @@ -2646,7 +2660,7 @@ struct msg_handler : public fc::visitor { if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - std::lock_guard g( connections_mtx ); + boost::shared_lock g( connections_mtx ); for( auto& c : connections ) { if( c->socket_is_open() ) { c->send_time(); @@ -2684,7 +2698,7 @@ struct msg_handler : public fc::visitor { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); - std::unique_lock g( connections_mtx ); + boost::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { @@ -2710,8 +2724,10 @@ struct msg_handler : public fc::visitor { } void net_plugin_impl::accepted_block(const block_state_ptr& block) { - fc_dlog(logger,"signaled, id = ${id}",("id", block->id)); - dispatcher->bcast_block(block); + boost::asio::post( *server_ioc, [this, ioc=server_ioc, block]() { + fc_dlog( logger, "signaled, id = ${id}", ("id", block->id) ); + dispatcher->bcast_block( block ); + }); } void net_plugin_impl::transaction_ack(const std::pair& results) { @@ -3093,7 +3109,7 @@ struct msg_handler : public fc::visitor { my->done = true; { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); - std::lock_guard g( my->connections_mtx ); + boost::unique_lock g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close(); @@ -3121,14 +3137,14 @@ struct msg_handler : public fc::visitor { fc_dlog( logger, "calling active connector" ); if( my->resolve_and_connect( c ) ) { fc_dlog( logger, "adding new connection to the list" ); - std::unique_lock g( my->connections_mtx ); + boost::unique_lock g( my->connections_mtx ); my->connections.insert( c ); } return "added connection"; } string net_plugin::disconnect( const string& host ) { - std::lock_guard g( my->connections_mtx ); + boost::unique_lock g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { (*itr)->reset(); @@ -3150,7 +3166,7 @@ struct msg_handler : public fc::visitor { vector net_plugin::connections()const { vector result; - std::lock_guard g( my->connections_mtx ); + boost::shared_lock g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); @@ -3158,7 +3174,7 @@ struct msg_handler : public fc::visitor { return result; } connection_ptr net_plugin_impl::find_connection(const string& host )const { - std::lock_guard g( connections_mtx ); + boost::shared_lock g( connections_mtx ); for( const auto& c : connections ) if( c->peer_address() == host ) return c; return connection_ptr(); From 90b479db92d387618a82b3106e1132676c130bf7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 07:16:05 -0500 Subject: [PATCH 0072/1648] Work toward making sync_manager and handshake message thread safe. --- plugins/net_plugin/net_plugin.cpp | 682 +++++++++++++++++------------- 1 file changed, 382 insertions(+), 300 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e1d3dbbf2b1..205bfbfb105 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -141,28 +141,29 @@ namespace eosio { in_sync }; + std::mutex sync_mtx; uint32_t sync_known_lib_num; uint32_t sync_last_requested_num; uint32_t sync_next_expected_num; uint32_t sync_req_span; - connection_ptr source; - stages state; + connection_ptr sync_source; + stages sync_state; chain_plugin* chain_plug = nullptr; - constexpr static auto stage_str(stages s); + private: + constexpr static auto stage_str( stages s ); + void set_state( stages s ); + bool is_sync_required(); + void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); + void start_sync( const connection_ptr& c, uint32_t target ); + void verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); + static void send_handshakes(); public: explicit sync_manager(uint32_t span); - void set_state(stages s); - bool sync_required(); - void send_handshakes(); - bool is_active(const connection_ptr& conn); - void reset_lib_num(const connection_ptr& conn); - void request_next_chunk(const connection_ptr& conn = connection_ptr()); - void start_sync(const connection_ptr& c, uint32_t target); - void reassign_fetch(const connection_ptr& c, go_away_reason reason); - void verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); + void sync_reset_lib_num(const connection_ptr& conn); + void sync_reassign_fetch(const connection_ptr& c, go_away_reason reason); void rejected_block(const connection_ptr& c, uint32_t blk_num); void sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); @@ -214,6 +215,7 @@ namespace eosio { vector allowed_peers; ///< peer keys allowed to connect std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes + // thread safe, only updated in plugin initialize enum possible_connections : char { None = 0, @@ -246,17 +248,24 @@ namespace eosio { const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. bool network_version_match = false; - chain_id_type chain_id; - fc::sha256 node_id; - std::atomic lib_num{0}; - uint32_t head_blk_num{0}; + chain_id_type chain_id; // thread safe, only updated in plugin_initialize + fc::sha256 node_id; // thread safe, only updated in plugin initialize + string user_agent_name; // thread safe, only updated in plugin initialize + + mutable std::mutex chain_info_mtx; + uint32_t chain_lib_num{0}; + uint32_t chain_head_blk_num{0}; + uint32_t chain_fork_head_blk_num{0}; + block_id_type chain_lib_id; + block_id_type chain_head_blk_id; + block_id_type chain_fork_head_blk_id; + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; - string user_agent_name; chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; - bool use_socket_read_watermark = false; + bool use_socket_read_watermark = false; // thread safe, not modified outside plugin_initialize compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; @@ -266,31 +275,17 @@ namespace eosio { std::shared_ptr server_ioc; optional server_ioc_work; + void update_chain_info(); + // lib_num, head_block_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id + std::tuple get_chain_info() const; - bool resolve_and_connect(const connection_ptr& c); - void connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr); void start_listen_loop(); - void start_read_message(const connection_ptr& c); - /** \brief Process the next message from the pending message buffer - * - * Process the next message from the pending_message_buffer. - * message_length is the already determined length of the data - * part of the message that will handle the message. - * Returns true is successful. Returns false if an error was - * encountered unpacking or processing the message. - */ - bool process_next_message(const connection_ptr& conn, uint32_t message_length); - - void accepted_block(const block_state_ptr&); + void on_accepted_block( const block_state_ptr& bs ); void transaction_ack(const std::pair&); - void on_irreversible_block( const block_state_ptr& blk ) { - lib_num = blk->block_num; - controller& cc = chain_plug->chain(); - head_blk_num = cc.head_block_num(); - } + void on_irreversible_block( const block_state_ptr& blk ); - bool is_valid( const handshake_message &msg); + static bool is_valid( const handshake_message& msg ); void handle_message(const connection_ptr& c, const handshake_message& msg); void handle_message(const connection_ptr& c, const chain_size_message& msg); @@ -455,7 +450,7 @@ namespace eosio { }; struct handshake_initializer { - static void populate(handshake_message &hello); + static void populate(handshake_message& hello); }; // thread safe @@ -647,6 +642,21 @@ namespace eosio { private: static void _close( connection* self ); // for easy capture public: + + bool resolve_and_connect(); + void connect(const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr); + void start_read_message(); + + /** \brief Process the next message from the pending message buffer + * + * Process the next message from the pending_message_buffer. + * message_length is the already determined length of the data + * part of the message that will handle the message. + * Returns true is successful. Returns false if an error was + * encountered unpacking or processing the message. + */ + bool process_next_message(uint32_t message_length); + void send_handshake(); /** \name Peer Timestamps @@ -719,8 +729,8 @@ namespace eosio { const string connection::unknown = ""; - -struct msg_handler : public fc::visitor { + // called from connection strand + struct msg_handler : public fc::visitor { net_plugin_impl& impl; connection_ptr c; msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} @@ -746,16 +756,61 @@ struct msg_handler : public fc::visitor { if( c ) impl->handle_message( c, ptr ); }); } + void operator()( packed_transaction&& msg ) const { + // continue call to handle_message on connection strand shared_ptr ptr = std::make_shared( std::move( msg ) ); impl.handle_message( c, ptr ); } - template - void operator()( T&& msg ) const - { + void operator()( const handshake_message& msg ) const { + // continue call to handle_message on connection strand + impl.handle_message( c, msg ); + } + + void operator()( const chain_size_message& msg ) const { connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const go_away_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const time_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const notice_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const request_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const sync_request_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -839,7 +894,7 @@ struct msg_handler : public fc::visitor { return false; } else { socket_open = true; - my_impl->start_read_message( shared_from_this() ); + start_read_message(); return true; } } @@ -879,7 +934,7 @@ struct msg_handler : public fc::visitor { self->sent_handshake_count = 0; self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num( nullptr ); + my_impl->sync_master->sync_reset_lib_num( nullptr ); fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); self->cancel_wait(); @@ -964,9 +1019,8 @@ struct msg_handler : public fc::visitor { syncing = false; } - // thread safe void connection::send_handshake() { - app().post( priority::low, [c = shared_from_this()]() { + strand.post( [c = shared_from_this()]() { handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", @@ -1168,20 +1222,13 @@ struct msg_handler : public fc::visitor { // thread safe void connection::sync_wait() { - connection_wptr c(shared_from_this()); + connection_ptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); response_expected_timer.expires_from_now( my_impl->resp_expected_period); - response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { - app().post(priority::low, [c, ec]() { - connection_ptr conn = c.lock(); - if (!conn) { - // connection was destroyed before this lambda was delivered - return; - } - - conn->sync_timeout(ec); - }); - } ); + response_expected_timer.async_wait( + boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { + c->sync_timeout( ec ); + } ) ); } // thread safe @@ -1202,17 +1249,17 @@ struct msg_handler : public fc::visitor { } ); } + // called from connection strand void connection::sync_timeout( boost::system::error_code ec ) { if( !ec ) { - my_impl->sync_master->reassign_fetch(shared_from_this(), benign_other); - } - else if( ec == boost::asio::error::operation_aborted) { - } - else { - fc_elog( logger,"setting timer for sync request got error ${ec}",("ec", ec.message()) ); + my_impl->sync_master->sync_reassign_fetch( shared_from_this(), benign_other ); + } else if( ec == boost::asio::error::operation_aborted ) { + } else { + fc_elog( logger, "setting timer for sync request got error ${ec}", ("ec", ec.message()) ); } } + // todo: last_handshake_recv not thread safe const string connection::peer_name() { if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; @@ -1253,8 +1300,8 @@ struct msg_handler : public fc::visitor { ,sync_last_requested_num( 0 ) ,sync_next_expected_num( 1 ) ,sync_req_span( req_span ) - ,source() - ,state(in_sync) + ,sync_source() + ,sync_state(in_sync) { chain_plug = app().find_plugin(); EOS_ASSERT( chain_plug, chain::missing_chain_plugin_exception, "" ); @@ -1270,53 +1317,48 @@ struct msg_handler : public fc::visitor { } void sync_manager::set_state(stages newstate) { - if (state == newstate) { + if( sync_state == newstate ) { return; } - fc_dlog(logger, "old state ${os} becoming ${ns}",("os",stage_str(state))("ns",stage_str(newstate))); - state = newstate; + fc_dlog( logger, "old state ${os} becoming ${ns}", ("os", stage_str( sync_state ))( "ns", stage_str( newstate ) ) ); + sync_state = newstate; } - // uses controller, only call from application thread - bool sync_manager::is_active(const connection_ptr& c) { - if (state == head_catchup && c) { - auto fork_head_num = c->fork_head_num.load(); // provide memory barrier for c->fork_head - bool fhset = c->fork_head != block_id_type(); - fc_dlog( logger, "fork_head_num = ${fn} fork_head set = ${s}", ("fn", fork_head_num)( "s", fhset ) ); - return c->fork_head != block_id_type() && fork_head_num < chain_plug->chain().fork_db_head_block_num(); - } - return state != in_sync; - } - - void sync_manager::reset_lib_num(const connection_ptr& c) { - if(state == in_sync) { - source.reset(); + void sync_manager::sync_reset_lib_num(const connection_ptr& c) { + std::unique_lock g( sync_mtx ); + if( sync_state == in_sync ) { + sync_source.reset(); } if( !c ) return; if( c->current() ) { - if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num) { - sync_known_lib_num =c->last_handshake_recv.last_irreversible_block_num; + if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num ) { + sync_known_lib_num = c->last_handshake_recv.last_irreversible_block_num; } - } else if( c == source ) { + } else if( c == sync_source ) { sync_last_requested_num = 0; - request_next_chunk(); + request_next_chunk( std::move(g) ); } } - bool sync_manager::sync_required() { - fc_dlog(logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_head_block_num())); + bool sync_manager::is_sync_required() { + fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", + ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) + ("head", chain_plug->chain().fork_db_head_block_num() ) ); return( sync_last_requested_num < sync_known_lib_num || chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); } - void sync_manager::request_next_chunk( const connection_ptr& conn ) { - uint32_t head_block = chain_plug->chain().fork_db_head_block_num(); + // call with g_sync locked + void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { + uint32_t fork_head_block_num = 0; + uint32_t lib_block_num = 0; + std::tie( lib_block_num, std::ignore, fork_head_block_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); - if (head_block < sync_last_requested_num && source && source->current()) { - fc_ilog(logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", - ("h",head_block)("r",sync_last_requested_num)("p",source->peer_name())); + if( fork_head_block_num < sync_last_requested_num && sync_source && sync_source->current() ) { + fc_ilog( logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", + ("h", fork_head_block_num)( "r", sync_last_requested_num )( "p", sync_source->peer_address() ) ); return; } @@ -1327,25 +1369,25 @@ struct msg_handler : public fc::visitor { */ if (conn && conn->current() ) { - source = conn; + sync_source = conn; } else { boost::shared_lock g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { - if (!source) { - source = *my_impl->connections.begin(); + if (!sync_source) { + sync_source = *my_impl->connections.begin(); } } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); // do we remember the previous source? - if (source) { + if (sync_source) { //try to find it in the list - cptr = my_impl->connections.find(source); + cptr = my_impl->connections.find( sync_source ); cend = cptr; if (cptr == my_impl->connections.end()) { //not there - must have been closed! cend is now connections.end, so just flatten the ring. - source.reset(); + sync_source.reset(); cptr = my_impl->connections.begin(); } else { //was found - advance the start to the next. cend is the old source. @@ -1359,8 +1401,8 @@ struct msg_handler : public fc::visitor { auto cstart_it = cptr; do { //select the first one which is current and break out. - if((*cptr)->current()) { - source = *cptr; + if( (*cptr)->current() ) { + sync_source = *cptr; break; } if(++cptr == my_impl->connections.end()) @@ -1371,11 +1413,11 @@ struct msg_handler : public fc::visitor { } // verify there is an available source - if (!source || !source->current() ) { + if( !sync_source || !sync_source->current() ) { fc_elog( logger, "Unable to continue syncing at this time"); - sync_known_lib_num = chain_plug->chain().last_irreversible_block_num(); + sync_known_lib_num = lib_block_num; sync_last_requested_num = 0; - set_state(in_sync); // probably not, but we can't do anything else + set_state( in_sync ); // probably not, but we can't do anything else return; } @@ -1385,16 +1427,18 @@ struct msg_handler : public fc::visitor { if( end > sync_known_lib_num ) end = sync_known_lib_num; if( end > 0 && end >= start ) { - fc_ilog(logger, "requesting range ${s} to ${e}, from ${n}", - ("n",source->peer_name())("s",start)("e",end)); - source->request_sync_blocks(start, end); sync_last_requested_num = end; + connection_ptr c = sync_source; + g_sync.unlock(); + fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", + ("n", c->peer_address())( "s", start )( "e", end ) ); + c->request_sync_blocks( start, end ); } } } - void sync_manager::send_handshakes() - { + // static, thread safe + void sync_manager::send_handshakes() { boost::shared_lock g( my_impl->connections_mtx ); for( auto& ci : my_impl->connections ) { if( ci->current() ) { @@ -1404,37 +1448,40 @@ struct msg_handler : public fc::visitor { } void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { + std::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } - if (!sync_required()) { + if( !is_sync_required() ) { uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); uint32_t hnum = chain_plug->chain().fork_db_head_block_num(); fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", - ("b",bnum)("h",hnum)("t",target)); + ("b", bnum)( "h", hnum )( "t", target ) ); return; } - if (state == in_sync) { - set_state(lib_catchup); + if( sync_state == in_sync ) { + set_state( lib_catchup ); sync_next_expected_num = chain_plug->chain().last_irreversible_block_num() + 1; } - fc_ilog(logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", - ( "cc",sync_last_requested_num)("t",target)("p",c->peer_name())); + fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", + ("cc", sync_last_requested_num)( "t", target )( "p", c->peer_address() ) ); - request_next_chunk(c); + request_next_chunk( std::move( g_sync ), c ); } - void sync_manager::reassign_fetch(const connection_ptr& c, go_away_reason reason) { - fc_ilog(logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", - ( "cc",sync_last_requested_num)("ne",sync_next_expected_num)("p",c->peer_name())); + // called from connection strand + void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { + std::unique_lock g( sync_mtx ); + fc_ilog( logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", + ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_address() ) ); - if (c == source) { + if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - request_next_chunk(); + request_next_chunk( std::move(g) ); } } @@ -1442,7 +1489,7 @@ struct msg_handler : public fc::visitor { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); uint32_t peer_lib = msg.last_irreversible_block_num; - reset_lib_num(c); + sync_reset_lib_num(c); c->syncing = false; //-------------------------------- @@ -1527,11 +1574,14 @@ struct msg_handler : public fc::visitor { if( req.req_blocks.mode == catch_up ) { c->fork_head = id; c->fork_head_num = num; - fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} target LIB = ${lib} next_expected = ${ne}", - ("s",stage_str(state))("fhn",num)("lib",sync_known_lib_num)("ne", sync_next_expected_num) ); - if (state == lib_catchup) + std::lock_guard g( sync_mtx ); + fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} " + "target LIB = ${lib} next_expected = ${ne}", + ("s", stage_str( sync_state ))( "fhn", num )( "lib", sync_known_lib_num ) + ("ne", sync_next_expected_num ) ); + if( sync_state == lib_catchup ) return; - set_state(head_catchup); + set_state( head_catchup ); } else { c->fork_head = block_id_type(); c->fork_head_num = 0; @@ -1541,12 +1591,12 @@ struct msg_handler : public fc::visitor { } void sync_manager::sync_recv_notice( const connection_ptr& c, const notice_message& msg) { - fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); + fc_ilog( logger, "sync_manager got ${m} block notice", ("m", modes_str( msg.known_blocks.mode )) ); EOS_ASSERT( msg.known_blocks.mode == catch_up || msg.known_blocks.mode == last_irr_catch_up, plugin_exception, "sync_recv_notice only called on catch_up" ); if( msg.known_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", - ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); + ("s", msg.known_blocks.ids.size())("p", c->peer_address()) ); c->close(); return; } @@ -1558,24 +1608,31 @@ struct msg_handler : public fc::visitor { } } else if (msg.known_blocks.mode == last_irr_catch_up) { c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; - reset_lib_num(c); + sync_reset_lib_num(c); start_sync(c, msg.known_trx.pending); } } - void sync_manager::rejected_block(const connection_ptr& c, uint32_t blk_num) { - if (state != in_sync ) { - fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); + // called from connection strand + void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { + std::unique_lock g( sync_mtx ); + if( sync_state != in_sync ) { + fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_address()) ); sync_last_requested_num = 0; - source.reset(); + sync_source.reset(); + set_state( in_sync ); + g.unlock(); c->close(); - set_state(in_sync); send_handshakes(); } } + + // called from connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { - fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); - if (state == lib_catchup) { + fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_address() ) ); + std::unique_lock g_sync( sync_mtx ); + stages state = sync_state; + if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); @@ -1583,13 +1640,14 @@ struct msg_handler : public fc::visitor { return; } sync_next_expected_num = blk_num + 1; - } - if (state == head_catchup) { - fc_dlog(logger, "sync_manager in head_catchup state"); - set_state(in_sync); - source.reset(); + } else if( state == head_catchup ) { + fc_dlog( logger, "sync_manager in head_catchup state" ); + set_state( in_sync ); + sync_source.reset(); + g_sync.unlock(); block_id_type null_id; + bool set_state_to_head_catchup = false; boost::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head @@ -1600,26 +1658,29 @@ struct msg_handler : public fc::visitor { c->fork_head = null_id; c->fork_head_num = 0; } else { - set_state(head_catchup); + set_state_to_head_catchup = true; } } g.unlock(); - if (state == in_sync) { + if( set_state_to_head_catchup ) { + g_sync.lock(); + set_state( head_catchup ); + g_sync.unlock(); + } else { send_handshakes(); } - } - else if (state == lib_catchup) { + } else if( state == lib_catchup ) { if( blk_num == sync_known_lib_num ) { - fc_dlog( logger, "All caught up with last known last irreversible block resending handshake"); - set_state(in_sync); + fc_dlog( logger, "All caught up with last known last irreversible block resending handshake" ); + set_state( in_sync ); + g_sync.unlock(); send_handshakes(); - } - else if (blk_num == sync_last_requested_num) { - request_next_chunk(); - } - else { - fc_dlog(logger,"calling sync_wait on connection ${p}",("p",c->peer_name())); + } else if( blk_num == sync_last_requested_num ) { + request_next_chunk( std::move( g_sync) ); + } else { + g_sync.unlock(); + fc_dlog( logger, "calling sync_wait on connection ${p}", ("p", c->peer_address()) ); c->sync_wait(); } } @@ -1627,11 +1688,12 @@ struct msg_handler : public fc::visitor { //------------------------------------------------------------------------ + // thread safe bool dispatch_manager::add_peer_block(const peer_block_state& entry) { - std::lock_guard g(blk_state_mtx); + std::lock_guard g( blk_state_mtx ); auto bptr = blk_state.get().find(std::make_tuple(std::ref(entry.id), entry.connection_id)); bool added = (bptr == blk_state.end()); - if (added){ + if( added ) { blk_state.insert(entry); } return added; @@ -1659,6 +1721,7 @@ struct msg_handler : public fc::visitor { return added; } + // thread safe void dispatch_manager::update_txns_block_num( const signed_block_ptr& sb ) { update_block_num ubn( sb->block_num() ); std::lock_guard g( local_txns_mtx ); @@ -1672,6 +1735,7 @@ struct msg_handler : public fc::visitor { } } + // thread safe void dispatch_manager::update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ) { update_block_num ubn( blk_num ); std::lock_guard g( local_txns_mtx ); @@ -1758,6 +1822,7 @@ struct msg_handler : public fc::visitor { } } + // called from connection strand void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { peer_block_state pbstate{id, bnum, c->connection_id}; add_peer_block( pbstate ); @@ -1905,58 +1970,57 @@ struct msg_handler : public fc::visitor { //------------------------------------------------------------------------ // called from any thread - bool net_plugin_impl::resolve_and_connect(const connection_ptr& c) { - if( c->no_retry != go_away_reason::no_reason) { - fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); + bool connection::resolve_and_connect() { + if( no_retry != go_away_reason::no_reason) { + fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( no_retry ))); return false; } - string::size_type colon = c->peer_address().find(':'); + string::size_type colon = peer_address().find(':'); if (colon == std::string::npos || colon == 0) { - fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_address()) ); + fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p", peer_address()) ); return false; } - string host = c->peer_address().substr( 0, colon ); - string port = c->peer_address().substr( colon + 1); + string host = peer_address().substr( 0, colon ); + string port = peer_address().substr( colon + 1); idump((host)(port)); tcp::resolver::query query( tcp::v4(), host, port ); - connection_wptr weak_conn = c; + connection_wptr weak_conn = shared_from_this(); // Note: need to add support for IPv6 too auto resolver = std::make_shared( *server_ioc ); - resolver->async_resolve( query, - [resolver, ioc = server_ioc, weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { + resolver->async_resolve( query, boost::asio::bind_executor( strand, + [resolver, ioc = server_ioc, weak_conn]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { auto c = weak_conn.lock(); if( !c ) return; if( !err ) { - connect( c, resolver, endpoint_itr ); + c->connect( resolver, endpoint_itr ); } else { fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); } - } ); + } ) ); return true; } - // called from any thread - void net_plugin_impl::connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr) { - if( c->no_retry != go_away_reason::no_reason) { - string rsn = reason_str(c->no_retry); + // called from connection strand + void connection::connect( const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr ) { + if( no_retry != go_away_reason::no_reason) { return; } auto current_endpoint = *endpoint_itr; ++endpoint_itr; - c->connecting = true; - c->socket.async_connect( current_endpoint, - boost::asio::bind_executor( c->strand, [resolver, c, endpoint_itr, this]( const boost::system::error_code& err ) { - if( !err && c->socket.is_open()) { + connecting = true; + socket.async_connect( current_endpoint, + boost::asio::bind_executor( strand, [resolver, c = shared_from_this(), endpoint_itr]( const boost::system::error_code& err ) { + if( !err && c->socket.is_open() ) { if( c->start_session() ) { c->send_handshake(); } } else { - if( endpoint_itr != tcp::resolver::iterator()) { + if( endpoint_itr != tcp::resolver::iterator() ) { c->close(); - connect( c, resolver, endpoint_itr ); + c->connect( resolver, endpoint_itr ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; @@ -2030,20 +2094,17 @@ struct msg_handler : public fc::visitor { } // only called from strand thread - void net_plugin_impl::start_read_message(const connection_ptr& conn) { - + void connection::start_read_message() { try { - connection_wptr weak_conn = conn; - std::size_t minimum_read = - std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); + std::atomic_exchange( &outstanding_read_bytes, 0 ); minimum_read = minimum_read != 0 ? minimum_read : message_header_size; - if (use_socket_read_watermark) { + if (my_impl->use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; std::size_t socket_read_watermark = std::min(minimum_read, max_socket_read_watermark); boost::asio::socket_base::receive_low_watermark read_watermark_opt(socket_read_watermark); - conn->socket.set_option(read_watermark_opt); + socket.set_option(read_watermark_opt); } auto completion_handler = [minimum_read](boost::system::error_code ec, std::size_t bytes_transferred) -> std::size_t { @@ -2054,46 +2115,46 @@ struct msg_handler : public fc::visitor { } }; - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || - conn->reads_in_flight > def_max_reads_in_flight || - conn->trx_in_progress_size > def_max_trx_in_progress_size ) + if( buffer_queue.write_queue_size() > def_max_write_queue_size || + reads_in_flight > def_max_reads_in_flight || + trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule - uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); - uint32_t trx_in_progress_size = conn->trx_in_progress_size; - uint32_t reads_in_flight = conn->reads_in_flight; + uint32_t write_queue_size = buffer_queue.write_queue_size(); + uint32_t trx_in_progress_size = this->trx_in_progress_size.load(); + uint32_t reads_in_flight = this->reads_in_flight.load(); if( write_queue_size > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); + peer_wlog( this, "write_queue full ${s} bytes", ("s", write_queue_size) ); } else if( reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); + peer_wlog( this, "max reads in flight ${s}", ("s", reads_in_flight) ); } else { - peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); + peer_wlog( this, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } if( write_queue_size > 2*def_max_write_queue_size || reads_in_flight > 2*def_max_reads_in_flight || - trx_in_progress_size > 2*def_max_trx_in_progress_size ) + trx_in_progress_size > 2*def_max_trx_in_progress_size ) { fc_elog( logger, "queues over full, giving up on connection, closing connection to: ${p}", - ("p", conn->peer_name()) ); - conn->close(); + ("p", peer_name()) ); + close(); return; } - std::lock_guard g( conn->read_delay_timer_mtx ); - conn->read_delay_timer.expires_from_now( def_read_delay_for_full_write_queue ); - conn->read_delay_timer.async_wait( - boost::asio::bind_executor(conn->strand, [this, weak_conn]( boost::system::error_code ) { + std::lock_guard g( read_delay_timer_mtx ); + read_delay_timer.expires_from_now( def_read_delay_for_full_write_queue ); + connection_wptr weak_conn = shared_from_this(); + read_delay_timer.async_wait( boost::asio::bind_executor(strand, [weak_conn]( boost::system::error_code ) { auto conn = weak_conn.lock(); if( !conn ) return; - start_read_message( conn ); + conn->start_read_message(); } ) ); return; } - ++conn->reads_in_flight; - boost::asio::async_read( conn->socket, - conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, - boost::asio::bind_executor( conn->strand, - [this, conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { + ++reads_in_flight; + boost::asio::async_read( socket, + pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( strand, + [conn = shared_from_this()]( boost::system::error_code ec, std::size_t bytes_transferred ) { --conn->reads_in_flight; bool close_connection = false; @@ -2125,7 +2186,7 @@ struct msg_handler : public fc::visitor { if (bytes_in_buffer >= total_message_bytes) { conn->pending_message_buffer.advance_read_ptr(message_header_size); - if (!process_next_message(conn, message_length)) { + if (!conn->process_next_message(message_length)) { return; } } else { @@ -2140,7 +2201,7 @@ struct msg_handler : public fc::visitor { } } } - if( !close_connection ) start_read_message( conn ); + if( !close_connection ) conn->start_read_message(); } else { if (ec.value() != boost::asio::error::eof) { fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); @@ -2169,15 +2230,16 @@ struct msg_handler : public fc::visitor { } })); } catch (...) { - fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); + fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", peer_name()) ); + close(); } } - bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { + // called from connection strand + bool connection::process_next_message( uint32_t message_length ) { try { // if next message is a block we already have, exit early - auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); + auto peek_ds = pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); if( which == signed_block_which ) { @@ -2185,26 +2247,21 @@ struct msg_handler : public fc::visitor { fc::raw::unpack( peek_ds, bh ); block_id_type blk_id = bh.id(); - if( dispatcher->have_block( blk_id ) ) { - connection_wptr weak = conn; - app().post(priority::high, // high since block processing is high and this needs to run before next block - [dispatcher = dispatcher.get(), sync_master = sync_master.get(), weak{std::move(weak)}, blk_id] { - connection_ptr c = weak.lock(); - if(c) { - auto blk_num = block_header::num_from_id(blk_id); - dispatcher->recv_block(c, blk_id, blk_num); - sync_master->sync_recv_block( c, blk_id, blk_num ); - } - }); - conn->pending_message_buffer.advance_read_ptr( message_length ); + if( my_impl->dispatcher->have_block( blk_id ) ) { + auto blk_num = block_header::num_from_id( blk_id ); + connection_ptr c = shared_from_this(); + my_impl->dispatcher->recv_block( c, blk_id, blk_num ); + my_impl->sync_master->sync_recv_block( c, blk_id, blk_num ); + + pending_message_buffer.advance_read_ptr( message_length ); return true; } } - auto ds = conn->pending_message_buffer.create_datastream(); + auto ds = pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); - msg_handler m( *this, conn ); + msg_handler m( *my_impl, shared_from_this() ); if( msg.contains() ) { m( std::move( msg.get() ) ); } else if( msg.contains() ) { @@ -2214,14 +2271,35 @@ struct msg_handler : public fc::visitor { } } catch( const fc::exception& e ) { fc_elog( logger, "Exception in handling message from ${p}: ${s}", - ("p", conn->peer_name())("s", e.to_detail_string()) ); - conn->close(); + ("p", peer_name())("s", e.to_detail_string()) ); + close(); return false; } return true; } - bool net_plugin_impl::is_valid(const handshake_message& msg) { + // call only from main application thread + void net_plugin_impl::update_chain_info() { + controller& cc = chain_plug->chain(); + std::lock_guard g( chain_info_mtx ); + chain_lib_num = cc.last_irreversible_block_num(); + chain_lib_id = cc.last_irreversible_block_id(); + chain_head_blk_num = cc.head_block_num(); + chain_head_blk_id = cc.head_block_id(); + chain_fork_head_blk_num = cc.fork_db_head_block_num(); + chain_fork_head_blk_id = cc.fork_db_head_block_id(); + } + + // lib_num, head_blk_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id + std::tuple + net_plugin_impl::get_chain_info() const { + std::lock_guard g( chain_info_mtx ); + return std::make_tuple( + chain_lib_num, chain_head_blk_num, chain_fork_head_blk_num, + chain_lib_id, chain_head_blk_id, chain_fork_head_blk_id ); + } + + bool net_plugin_impl::is_valid( const handshake_message& msg ) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without // affecting state. @@ -2250,16 +2328,14 @@ struct msg_handler : public fc::visitor { peer_ilog(c, "received chain_size_message"); } - void net_plugin_impl::handle_message(const connection_ptr& c, const handshake_message& msg) { + // called from connection strand + void net_plugin_impl::handle_message( const connection_ptr& c, const handshake_message& msg ) { peer_ilog(c, "received handshake_message"); - if (!is_valid(msg)) { + if( !is_valid( msg ) ) { peer_elog( c, "bad handshake message"); - c->enqueue( go_away_message( fatal_other )); + c->enqueue( go_away_message( fatal_other ) ); return; } - controller& cc = chain_plug->chain(); - uint32_t lib_num = cc.last_irreversible_block_num(); - uint32_t peer_lib = msg.last_irreversible_block_num; if( c->connecting ) { c->connecting = false; } @@ -2325,28 +2401,32 @@ struct msg_handler : public fc::visitor { return; } - bool on_fork = false; - fc_dlog(logger, "lib_num = ${ln} peer_lib = ${pl}",("ln",lib_num)("pl",peer_lib)); + uint32_t peer_lib = msg.last_irreversible_block_num; + app().post( priority::low, [peer_lib, chain_plug = this->chain_plug, c, msg_lib_id = msg.last_irreversible_block_id]() { + controller& cc = chain_plug->chain(); + uint32_t lib_num = cc.last_irreversible_block_num(); - if( peer_lib <= lib_num && peer_lib > 0) { - try { - block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib); - on_fork =( msg.last_irreversible_block_id != peer_lib_id); - } - catch( const unknown_block_exception &ex) { - fc_wlog( logger, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); - on_fork = true; - } - catch( ...) { - fc_wlog( logger, "caught an exception getting block id for ${pl}",("pl",peer_lib) ); - on_fork = true; - } - if( on_fork) { - fc_elog( logger, "Peer chain is forked" ); - c->enqueue( go_away_message( forked )); - return; + bool on_fork = false; + fc_dlog( logger, "lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); + + if( peer_lib <= lib_num && peer_lib > 0 ) { + try { + block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib ); + on_fork = (msg_lib_id != peer_lib_id); + } catch( const unknown_block_exception& ex ) { + fc_wlog( logger, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); + on_fork = true; + } catch( ... ) { + fc_wlog( logger, "caught an exception getting block id for ${pl}", ("pl", peer_lib) ); + on_fork = true; + } + if( on_fork ) { + fc_elog( logger, "Peer chain is forked" ); + c->enqueue( go_away_message( forked ) ); + return; + } } - } + }); if (c->sent_handshake_count == 0) { c->send_handshake(); @@ -2513,7 +2593,7 @@ struct msg_handler : public fc::visitor { trx->get_signatures().size() * sizeof(signature_type); } - // called from thread_pool threads + // called from connection strand void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { @@ -2538,6 +2618,7 @@ struct msg_handler : public fc::visitor { c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { + // next (this lambda) called from application thread c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); bool accepted = false; if (result.contains()) { @@ -2554,7 +2635,10 @@ struct msg_handler : public fc::visitor { } } - app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num = this->head_blk_num]() { + controller& cc = chain_plug->chain(); + uint32_t head_blk_num = cc.head_block_num(); + + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num]() { if( accepted ) { dispatcher->bcast_transaction( ptrx ); } else { @@ -2564,6 +2648,7 @@ struct msg_handler : public fc::visitor { }); } + // called from application thread void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); block_id_type blk_id = msg->id(); @@ -2573,7 +2658,9 @@ struct msg_handler : public fc::visitor { try { if( cc.fetch_block_by_id(blk_id) ) { - sync_master->sync_recv_block(c, blk_id, blk_num); + c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + sync_master->sync_recv_block( c, blk_id, blk_num ); + }); return; } } catch( ...) { @@ -2581,14 +2668,16 @@ struct msg_handler : public fc::visitor { fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } - dispatcher->recv_block(c, blk_id, blk_num); + c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + dispatcher->recv_block( c, blk_id, blk_num ); + }); fc::microseconds age( fc::time_point::now() - msg->timestamp); peer_ilog(c, "received signed_block : #${n} block age in secs = ${age}", ("n",blk_num)("age",age.to_seconds())); go_away_reason reason = fatal_other; try { - chain_plug->accept_block(msg); //, sync_master->is_active(c)); + chain_plug->accept_block(msg); reason = no_reason; } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); @@ -2609,16 +2698,18 @@ struct msg_handler : public fc::visitor { fc_elog( logger, "handle sync block caught something else from ${p}",("num",blk_num)("p",c->peer_name())); } - update_block_num ubn(blk_num); if( reason == no_reason ) { boost::asio::post( *server_ioc, [self = this, msg]() { self->dispatcher->update_txns_block_num( msg ); }); - sync_master->sync_recv_block(c, blk_id, blk_num); - } - else { - sync_master->rejected_block(c, blk_num); - dispatcher->rejected_block( blk_id ); + c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + sync_master->sync_recv_block( c, blk_id, blk_num ); + }); + } else { + c->strand.post( [sync_master = sync_master.get(), dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + sync_master->rejected_block( c, blk_num ); + dispatcher->rejected_block( blk_id ); + }); } } @@ -2685,7 +2776,8 @@ struct msg_handler : public fc::visitor { void net_plugin_impl::expire() { auto now = time_point::now(); - uint32_t lib = lib_num.load(); + uint32_t lib = 0; + std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); dispatcher->expire_blocks( lib ); dispatcher->expire_txns( lib ); fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); @@ -2708,7 +2800,7 @@ struct msg_handler : public fc::visitor { } if( !(*it)->socket_is_open() && !(*it)->connecting) { if( (*it)->peer_address().length() > 0) { - if( !resolve_and_connect(*it) ) { + if( !(*it)->resolve_and_connect() ) { it = connections.erase(it); continue; } @@ -2723,17 +2815,28 @@ struct msg_handler : public fc::visitor { start_conn_timer(connector_period, std::weak_ptr()); } - void net_plugin_impl::accepted_block(const block_state_ptr& block) { + // called from application thread + void net_plugin_impl::on_accepted_block(const block_state_ptr& block) { + update_chain_info(); boost::asio::post( *server_ioc, [this, ioc=server_ioc, block]() { fc_dlog( logger, "signaled, id = ${id}", ("id", block->id) ); dispatcher->bcast_block( block ); }); } + // called from application thread + void net_plugin_impl::on_irreversible_block( const block_state_ptr& ) { + update_chain_info(); + } + + // called from application thread void net_plugin_impl::transaction_ack(const std::pair& results) { const auto& id = results.second->id; if (results.first) { fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + + controller& cc = chain_plug->chain(); + uint32_t head_blk_num = cc.head_block_num(); dispatcher->rejected_transaction(id, head_blk_num); } else { fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); @@ -2815,7 +2918,7 @@ struct msg_handler : public fc::visitor { return chain::signature_type(); } - // call from main application thread + // call from connection strand void handshake_initializer::populate( handshake_message& hello ) { hello.network_version = net_version_base + net_version; hello.chain_id = my_impl->chain_id; @@ -2839,29 +2942,8 @@ struct msg_handler : public fc::visitor { #endif hello.agent = my_impl->user_agent_name; - - controller& cc = my_impl->chain_plug->chain(); - hello.head_id = fc::sha256(); - hello.last_irreversible_block_id = fc::sha256(); - hello.head_num = cc.fork_db_head_block_num(); - hello.last_irreversible_block_num = cc.last_irreversible_block_num(); - if( hello.last_irreversible_block_num ) { - try { - hello.last_irreversible_block_id = cc.get_block_id_for_num(hello.last_irreversible_block_num); - } - catch( const unknown_block_exception &ex) { - fc_wlog( logger, "caught unkown_block" ); - hello.last_irreversible_block_num = 0; - } - } - if( hello.head_num ) { - try { - hello.head_id = cc.get_block_id_for_num( hello.head_num ); - } - catch( const unknown_block_exception &ex) { - hello.head_num = 0; - } - } + std::tie( hello.last_irreversible_block_num, std::ignore, hello.head_num, + hello.last_irreversible_block_id, std::ignore, hello.head_id ) = my_impl->get_chain_info(); } net_plugin::net_plugin() @@ -3054,7 +3136,7 @@ struct msg_handler : public fc::visitor { } chain::controller&cc = my->chain_plug->chain(); { - cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); + cc.accepted_block.connect( boost::bind(&net_plugin_impl::on_accepted_block, my.get(), _1)); } my->incoming_transaction_ack_subscription = app().get_channel().subscribe( @@ -3135,7 +3217,7 @@ struct msg_handler : public fc::visitor { connection_ptr c = std::make_shared( host ); fc_dlog( logger, "calling active connector" ); - if( my->resolve_and_connect( c ) ) { + if( c->resolve_and_connect() ) { fc_dlog( logger, "adding new connection to the list" ); boost::unique_lock g( my->connections_mtx ); my->connections.insert( c ); From b8762f851b9683b8e6e42d4c7471adc5caed66f4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 11:36:38 -0500 Subject: [PATCH 0073/1648] Make sync_manager::recv_handshake connection strand safe --- plugins/net_plugin/net_plugin.cpp | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 205bfbfb105..07a3203da4b 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -161,13 +161,13 @@ namespace eosio { static void send_handshakes(); public: - explicit sync_manager(uint32_t span); - void sync_reset_lib_num(const connection_ptr& conn); - void sync_reassign_fetch(const connection_ptr& c, go_away_reason reason); - void rejected_block(const connection_ptr& c, uint32_t blk_num); - void sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); - void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void sync_recv_notice( const connection_ptr& c, const notice_message& msg); + explicit sync_manager( uint32_t span ); + void sync_reset_lib_num( const connection_ptr& conn ); + void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); + void rejected_block( const connection_ptr& c, uint32_t blk_num ); + void sync_recv_block( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num ); + void recv_handshake( const connection_ptr& c, const handshake_message& msg ); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg ); }; class dispatch_manager { @@ -1485,10 +1485,15 @@ namespace eosio { } } - void sync_manager::recv_handshake(const connection_ptr& c, const handshake_message& msg) { - controller& cc = chain_plug->chain(); - uint32_t lib_num = cc.last_irreversible_block_num(); + void sync_manager::recv_handshake( const connection_ptr& c, const handshake_message& msg ) { + uint32_t lib_num = 0; uint32_t peer_lib = msg.last_irreversible_block_num; + uint32_t head = 0; + block_id_type head_id; + + std::tie( lib_num, std::ignore, head, + std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); + sync_reset_lib_num(c); c->syncing = false; @@ -1504,8 +1509,6 @@ namespace eosio { // //----------------------------- - uint32_t head = cc.fork_db_head_block_num(); - block_id_type head_id = cc.fork_db_head_block_id(); if (head_id == msg.head_id) { fc_dlog(logger, "sync check state 0"); // notify peer of our pending transactions @@ -1520,7 +1523,7 @@ namespace eosio { fc_dlog(logger, "sync check state 1"); // wait for receipt of a notice message before initiating sync if (c->protocol_version < proto_explicit_sync) { - start_sync( c, peer_lib); + start_sync( c, peer_lib ); } return; } From 99b422067955fa9d4fa5b3dd2e8af64fa977dc9d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 22:26:39 -0500 Subject: [PATCH 0074/1648] Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. --- plugins/net_plugin/net_plugin.cpp | 396 +++++++++++++++--------------- 1 file changed, 193 insertions(+), 203 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 07a3203da4b..2cc242b9659 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -147,14 +147,14 @@ namespace eosio { uint32_t sync_next_expected_num; uint32_t sync_req_span; connection_ptr sync_source; - stages sync_state; + std::atomic sync_state; chain_plugin* chain_plug = nullptr; private: constexpr static auto stage_str( stages s ); void set_state( stages s ); - bool is_sync_required(); + bool is_sync_required( uint32_t fork_head_block_num ); void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); void start_sync( const connection_ptr& c, uint32_t target ); void verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); @@ -162,6 +162,7 @@ namespace eosio { public: explicit sync_manager( uint32_t span ); + bool syncing_with_peer() const { return sync_state == lib_catchup; } void sync_reset_lib_num( const connection_ptr& conn ); void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); void rejected_block( const connection_ptr& c, uint32_t blk_num ); @@ -637,7 +638,6 @@ namespace eosio { bool connected(); bool current(); - void reset(); void close(); private: static void _close( connection* self ); // for easy capture @@ -759,61 +759,51 @@ namespace eosio { void operator()( packed_transaction&& msg ) const { // continue call to handle_message on connection strand + fc_dlog( logger, "handle packed_transaction" ); shared_ptr ptr = std::make_shared( std::move( msg ) ); impl.handle_message( c, ptr ); } void operator()( const handshake_message& msg ) const { // continue call to handle_message on connection strand + fc_dlog( logger, "handle handshake_message" ); impl.handle_message( c, msg ); } void operator()( const chain_size_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle chain_size_message" ); + impl.handle_message( c, msg ); } void operator()( const go_away_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle go_away_message" ); + impl.handle_message( c, msg ); } void operator()( const time_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle time_message" ); + impl.handle_message( c, msg ); } void operator()( const notice_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle notice_message" ); + impl.handle_message( c, msg ); } void operator()( const request_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle request_message" ); + impl.handle_message( c, msg ); } void operator()( const sync_request_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle sync_request_message" ); + impl.handle_message( c, msg ); } }; @@ -907,10 +897,6 @@ namespace eosio { return (connected() && !syncing); } - void connection::reset() { - peer_requested.reset(); - } - void connection::flush_queues() { buffer_queue.clear_write_queue(); } @@ -930,7 +916,7 @@ namespace eosio { if( self->last_req ) { my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } - self->reset(); + self->peer_requested.reset(); self->sent_handshake_count = 0; self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); @@ -943,8 +929,11 @@ namespace eosio { } void connection::blk_send_branch() { - controller& cc = my_impl->chain_plug->chain(); - uint32_t head_num = cc.fork_db_head_block_num(); + uint32_t head_num = 0; + block_id_type head_id; + std::tie( std::ignore, std::ignore, head_num, + std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); + notice_message note; note.known_blocks.mode = normal; note.known_blocks.pending = 0; @@ -953,30 +942,16 @@ namespace eosio { enqueue(note); return; } - block_id_type head_id; - block_id_type lib_id; block_id_type remote_head_id; uint32_t remote_head_num = 0; - try { - if (last_handshake_recv.generation >= 1) { - remote_head_id = last_handshake_recv.head_id; - remote_head_num = block_header::num_from_id(remote_head_id); - fc_dlog(logger, "maybe truncating branch at = ${h}:${id}",("h",remote_head_num)("id",remote_head_id)); - } - - lib_id = last_handshake_recv.last_irreversible_block_id; - head_id = cc.fork_db_head_block_id(); - } - catch (const assert_exception& ex) { - fc_elog( logger, "unable to retrieve block info: ${n} for ${p}",("n",ex.to_string())("p",peer_name()) ); - enqueue(note); - return; - } - catch (const fc::exception& ex) { - } - catch (...) { + if( last_handshake_recv.generation >= 1 ) { + remote_head_id = last_handshake_recv.head_id; + remote_head_num = block_header::num_from_id(remote_head_id); + fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", remote_head_num)( "id", remote_head_id ) ); } + block_id_type lib_id = last_handshake_recv.last_irreversible_block_id; + if( !peer_requested ) { peer_requested = sync_state( block_header::num_from_id(lib_id)+1, block_header::num_from_id(head_id), @@ -992,27 +967,29 @@ namespace eosio { syncing = false; } - void connection::blk_send(const block_id_type& blkid) { - controller &cc = my_impl->chain_plug->chain(); - try { - signed_block_ptr b = cc.fetch_block_by_id(blkid); - if(b) { - fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - my_impl->dispatcher->add_peer_block({blkid, block_header::num_from_id(blkid), connection_id}); - enqueue_block( b ); - } else { - fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", - ("id",blkid)("p",peer_name()) ); + void connection::blk_send( const block_id_type& blkid ) { + app().post( priority::low, [blkid, c = shared_from_this()]() { + controller& cc = my_impl->chain_plug->chain(); + try { + signed_block_ptr b = cc.fetch_block_by_id( blkid ); + if( b ) { + fc_dlog( logger, "found block for id at num ${n}", ("n", b->block_num()) ); + my_impl->dispatcher->add_peer_block( {blkid, block_header::num_from_id( blkid ), c->connection_id} ); + c->strand.post( [c, b{std::move(b)}]() { + c->enqueue_block( b ); + } ); + } else { + fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", + ("id", blkid)( "p", c->peer_address() ) ); + } + } catch( const assert_exception& ex ) { + fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", + ("ex", ex.to_string())( "id", blkid )( "p", c->peer_address() ) ); + } catch( ... ) { + fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", + ("id", blkid)( "p", c->peer_address() ) ); } - } - catch (const assert_exception &ex) { - fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", - ("ex",ex.to_string())("id",blkid)("p",peer_name()) ); - } - catch (...) { - fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", - ("id",blkid)("p",peer_name()) ); - } + }); } void connection::stop_send() { @@ -1024,7 +1001,7 @@ namespace eosio { handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", - ("g", c->last_handshake_sent.generation)( "ep", c->peer_name() ) ); + ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) ); c->enqueue( c->last_handshake_sent ); }); } @@ -1062,7 +1039,6 @@ namespace eosio { } } - // called from connection strand and application thread void connection::do_queue_write(int priority) { if( !buffer_queue.ready_to_send() ) return; @@ -1101,8 +1077,8 @@ namespace eosio { } void connection::cancel_sync(go_away_reason reason) { - fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", - ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); + fc_dlog( logger, "cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", + ("m", reason_str( reason ))( "o", buffer_queue.write_queue_size() )( "p", peer_address() ) ); cancel_wait(); flush_queues(); switch (reason) { @@ -1113,33 +1089,38 @@ namespace eosio { break; } default: - fc_dlog(logger, "sending empty request but not calling sync wait on ${p}", ("p",peer_name())); + fc_dlog(logger, "sending empty request but not calling sync wait on ${p}", ("p",peer_address())); enqueue( ( sync_request_message ) {0,0} ); } } bool connection::enqueue_sync_block() { - if (!peer_requested) + if( !peer_requested ) { + fc_dlog( logger, "enqueue sync block, with no peer_requested" ); return false; + } else { + fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); + } uint32_t num = ++peer_requested->last; - bool trigger_send = num == peer_requested->start_block; + bool trigger_send = true; // todo: = num == peer_requested->start_block; if(num == peer_requested->end_block) { peer_requested.reset(); } - try { + app().post( priority::low, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); - signed_block_ptr sb = cc.fetch_block_by_number(num); - if(sb) { - enqueue_block( sb, trigger_send, true); - return true; + signed_block_ptr sb = cc.fetch_block_by_number( num ); + if( sb ) { + c->strand.post( [c, sb{std::move(sb)}, trigger_send]() { + c->enqueue_block( sb, trigger_send, true ); + }); } - } catch ( ... ) { - fc_wlog( logger, "write loop exception" ); - } - return false; + }); + + return true; } void connection::enqueue( const net_message& m, bool trigger_send ) { + verify_strand_in_this_thread( strand, __func__, __LINE__ ); go_away_reason close_after_send = no_reason; if (m.contains()) { close_after_send = m.get().reason; @@ -1194,6 +1175,8 @@ namespace eosio { } void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send, bool to_sync_queue) { + fc_dlog( logger, "enqueue block ${num}", ("num", sb->block_num()) ); + verify_strand_in_this_thread( strand, __func__, __LINE__ ); enqueue_buffer( create_send_buffer( sb ), trigger_send, priority::low, no_reason, to_sync_queue); } @@ -1224,7 +1207,7 @@ namespace eosio { void connection::sync_wait() { connection_ptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer.expires_from_now( my_impl->resp_expected_period); + response_expected_timer.expires_from_now( my_impl->resp_expected_period ); response_expected_timer.async_wait( boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { c->sync_timeout( ec ); @@ -1233,20 +1216,13 @@ namespace eosio { // thread safe void connection::fetch_wait() { - connection_wptr c(shared_from_this()); + connection_ptr c( shared_from_this() ); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer.expires_from_now( my_impl->resp_expected_period); - response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { - app().post(priority::low, [c, ec]() { - connection_ptr conn = c.lock(); - if (!conn) { - // connection was destroyed before this lambda was delivered - return; - } - - conn->fetch_timeout(ec); - }); - } ); + response_expected_timer.expires_from_now( my_impl->resp_expected_period ); + response_expected_timer.async_wait( + boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { + c->fetch_timeout(ec); + } ) ); } // called from connection strand @@ -1275,21 +1251,19 @@ namespace eosio { void connection::fetch_timeout( boost::system::error_code ec ) { if( !ec ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); - } - else if( ec == boost::asio::error::operation_aborted ) { + my_impl->dispatcher->retry_fetch( shared_from_this() ); + } else if( ec == boost::asio::error::operation_aborted ) { if( !connected() ) { - fc_dlog(logger, "fetch timeout was cancelled due to dead connection"); + fc_dlog( logger, "fetch timeout was cancelled due to dead connection" ); } - } - else { + } else { fc_elog( logger, "setting timer for fetch request got error ${ec}", ("ec", ec.message() ) ); } } void connection::request_sync_blocks(uint32_t start, uint32_t end) { sync_request_message srm = {start,end}; - enqueue( net_message(srm)); + enqueue( net_message(srm) ); sync_wait(); } @@ -1340,15 +1314,6 @@ namespace eosio { } } - bool sync_manager::is_sync_required() { - fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) - ("head", chain_plug->chain().fork_db_head_block_num() ) ); - - return( sync_last_requested_num < sync_known_lib_num || - chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); - } - // call with g_sync locked void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { uint32_t fork_head_block_num = 0; @@ -1430,9 +1395,10 @@ namespace eosio { sync_last_requested_num = end; connection_ptr c = sync_source; g_sync.unlock(); - fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", - ("n", c->peer_address())( "s", start )( "e", end ) ); - c->request_sync_blocks( start, end ); + c->strand.post( [c, start, end]() { + fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", ("n", c->peer_address())( "s", start )( "e", end ) ); + c->request_sync_blocks( start, end ); + } ); } } } @@ -1447,23 +1413,35 @@ namespace eosio { } } + bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { + fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", + ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) + ("head", fork_head_block_num ) ); + + return( sync_last_requested_num < sync_known_lib_num || + fork_head_block_num < sync_last_requested_num ); + } + void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { std::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } - if( !is_sync_required() ) { - uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); - uint32_t hnum = chain_plug->chain().fork_db_head_block_num(); + uint32_t lib_num = 0; + uint32_t fork_head_block_num = 0; + std::tie( lib_num, std::ignore, fork_head_block_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); + + if( !is_sync_required( fork_head_block_num ) ) { fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", - ("b", bnum)( "h", hnum )( "t", target ) ); + ("b", lib_num)( "h", fork_head_block_num )( "t", target ) ); return; } if( sync_state == in_sync ) { set_state( lib_catchup ); - sync_next_expected_num = chain_plug->chain().last_irreversible_block_num() + 1; + sync_next_expected_num = lib_num + 1; } fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", @@ -1635,6 +1613,7 @@ namespace eosio { fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_address() ) ); std::unique_lock g_sync( sync_mtx ); stages state = sync_state; + fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", @@ -1643,7 +1622,8 @@ namespace eosio { return; } sync_next_expected_num = blk_num + 1; - } else if( state == head_catchup ) { + } + if( state == head_catchup ) { fc_dlog( logger, "sync_manager in head_catchup state" ); set_state( in_sync ); sync_source.reset(); @@ -1790,9 +1770,14 @@ namespace eosio { void dispatch_manager::bcast_block(const block_state_ptr& bs) { fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) ); - boost::shared_lock g( my_impl->connections_mtx ); + if( my_impl->sync_master->syncing_with_peer() ) return; bool have_connection = false; + boost::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { + + peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", + ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load()) ); + if( !cp->current() ) { continue; } @@ -1818,7 +1803,7 @@ namespace eosio { if( !add_peer_block( pbstate ) ) { return; } - fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_address() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } }); @@ -1865,8 +1850,10 @@ namespace eosio { send_buffer = create_send_buffer( trx ); } - fc_dlog(logger, "sending trx to ${n}", ("n", cp->peer_name() ) ); - cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); + cp->strand.post( [cp, send_buffer]() { + fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_address()) ); + cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); + } ); } } @@ -1885,67 +1872,65 @@ namespace eosio { update_txns_block_num( id, head_blk_num ); } + // called from connection strand void dispatch_manager::recv_notice(const connection_ptr& c, const notice_message& msg, bool generated) { request_message req; req.req_trx.mode = none; req.req_blocks.mode = none; - bool send_req = false; if (msg.known_trx.mode == normal) { req.req_trx.mode = normal; req.req_trx.pending = 0; - send_req = false; - } - else if (msg.known_trx.mode != none) { - fc_elog( logger,"passed a notice_message with something other than a normal on none known_trx" ); + } else if (msg.known_trx.mode != none) { + fc_elog( logger, "passed a notice_message with something other than a normal on none known_trx" ); return; } if (msg.known_blocks.mode == normal) { req.req_blocks.mode = normal; - controller& cc = my_impl->chain_plug->chain(); // known_blocks.ids is never > 1 if( !msg.known_blocks.ids.empty() ) { - const block_id_type& blkid = msg.known_blocks.ids.back(); - signed_block_ptr b; - try { - b = cc.fetch_block_by_id(blkid); // if exists - if(b) { - add_peer_block({blkid, block_header::num_from_id(blkid), c->connection_id}); + app().post( priority::low, [this, msg{std::move(msg)}, req{std::move(req)}, c]() mutable { + const block_id_type& blkid = msg.known_blocks.ids.back(); + signed_block_ptr b; + try { + controller& cc = my_impl->chain_plug->chain(); + b = cc.fetch_block_by_id( blkid ); // if exists + if( b ) { + add_peer_block( {blkid, block_header::num_from_id( blkid ), c->connection_id} ); + } + } catch( const assert_exception& ex ) { + fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}", ("ex", ex.what()) ); + // keep going, client can ask another peer + } catch( ... ) { + fc_elog( logger, "failed to retrieve block for id" ); } - } catch (const assert_exception &ex) { - fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what()) ); - // keep going, client can ask another peer - } catch (...) { - fc_elog( logger, "failed to retrieve block for id"); - } - if (!b) { - send_req = true; - req.req_blocks.ids.push_back( blkid ); - } + if( !b ) { + req.req_blocks.ids.push_back( blkid ); + c->strand.post( [req{std::move(req)}, c{std::move(c)}]() mutable { + fc_dlog( logger, "send req" ); + c->enqueue( req ); + c->fetch_wait(); + c->last_req = std::move( req ); + }); + } + }); } - } - else if (msg.known_blocks.mode != none) { + } else if (msg.known_blocks.mode != none) { fc_elog( logger, "passed a notice_message with something other than a normal on none known_blocks" ); return; } - fc_dlog( logger, "send req = ${sr}", ("sr",send_req)); - if( send_req) { - c->enqueue(req); - c->fetch_wait(); - c->last_req = std::move(req); - } } void dispatch_manager::retry_fetch(const connection_ptr& c) { if (!c->last_req) { return; } - fc_wlog( logger, "failed to fetch from ${p}",("p",c->peer_name())); + fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); block_id_type bid; if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { bid = c->last_req->req_blocks.ids.back(); } else { - fc_wlog( logger,"no retry, block mpde = ${b} trx mode = ${t}", - ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); + fc_wlog( logger, "no retry, block mpde = ${b} trx mode = ${t}", + ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } boost::shared_lock g( my_impl->connections_mtx ); @@ -1955,9 +1940,11 @@ namespace eosio { } bool sendit = peer_has_block( bid, c->connection_id ); if (sendit) { - conn->enqueue(*c->last_req); - conn->fetch_wait(); - conn->last_req = c->last_req; + conn->strand.post( [conn, last_req = *c->last_req]() { + conn->enqueue( last_req ); + conn->fetch_wait(); + conn->last_req = last_req; + } ); return; } } @@ -2291,6 +2278,8 @@ namespace eosio { chain_head_blk_id = cc.head_block_id(); chain_fork_head_blk_num = cc.fork_db_head_block_num(); chain_fork_head_blk_id = cc.fork_db_head_block_id(); + fc_dlog( logger, "updating chain info lib ${lib}, head ${head}, fork ${fork}", + ("lib", chain_lib_num)("head", chain_head_blk_num)("fork", chain_fork_head_blk_num) ); } // lib_num, head_blk_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id @@ -2409,10 +2398,10 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); - bool on_fork = false; - fc_dlog( logger, "lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); + fc_dlog( logger, "handshake, check for fork lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); if( peer_lib <= lib_num && peer_lib > 0 ) { + bool on_fork = false; try { block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib ); on_fork = (msg_lib_id != peer_lib_id); @@ -2424,9 +2413,10 @@ namespace eosio { on_fork = true; } if( on_fork ) { - fc_elog( logger, "Peer chain is forked" ); - c->enqueue( go_away_message( forked ) ); - return; + c->strand.post( [c]() { + fc_elog( logger, "Peer chain is forked" ); + c->enqueue( go_away_message( forked ) ); + } ); } } }); @@ -2438,13 +2428,13 @@ namespace eosio { c->last_handshake_recv = msg; c->_logger_variant.reset(); - sync_master->recv_handshake(c,msg); + sync_master->recv_handshake( c, msg ); } void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { - peer_wlog(c, "received go_away_message, reason = ${r}", ("r",reason_str( msg.reason )) ); + peer_wlog( c, "received go_away_message, reason = ${r}", ("r", reason_str( msg.reason )) ); c->no_retry = msg.reason; - if(msg.reason == duplicate ) { + if( msg.reason == duplicate ) { c->node_id = msg.node_id; } c->flush_queues(); @@ -2452,7 +2442,7 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const time_message& msg) { - peer_ilog(c, "received time_message"); + peer_ilog( c, "received time_message" ); /* We've already lost however many microseconds it took to dispatch * the message, but it can't be helped. */ @@ -2488,12 +2478,13 @@ namespace eosio { // peer tells us about one or more blocks or txns. When done syncing, forward on // notices of previously unknown blocks or txns, // - peer_ilog(c, "received notice_message"); + peer_ilog( c, "received notice_message" ); c->connecting = false; request_message req; bool send_req = false; - if (msg.known_trx.mode != none) { - fc_dlog(logger,"this is a ${m} notice with ${n} transactions", ("m",modes_str(msg.known_trx.mode))("n",msg.known_trx.pending)); + if( msg.known_trx.mode != none ) { + fc_dlog( logger, "this is a ${m} notice with ${n} transactions", + ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); } switch (msg.known_trx.mode) { case none: @@ -2579,12 +2570,13 @@ namespace eosio { } - void net_plugin_impl::handle_message(const connection_ptr& c, const sync_request_message& msg) { - if( msg.end_block == 0) { + void net_plugin_impl::handle_message( const connection_ptr& c, const sync_request_message& msg ) { + fc_dlog( logger, "peer requested ${start} to ${end}", ("start", msg.start_block)("end", msg.end_block) ); + if( msg.end_block == 0 ) { c->peer_requested.reset(); c->flush_queues(); } else { - c->peer_requested = sync_state( msg.start_block,msg.end_block,msg.start_block-1); + c->peer_requested = sync_state( msg.start_block, msg.end_block, msg.start_block-1); c->enqueue_sync_block(); } } @@ -2609,11 +2601,7 @@ namespace eosio { peer_ilog(c, "received packed_transaction ${id}", ("id", tid)); bool have_trx = dispatcher->have_txn( tid ); - connection_wptr weak_ptr = c; - app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, ptrx](){ - auto c = weak_ptr.lock(); - dispatcher->recv_transaction(c, ptrx); - }); + dispatcher->recv_transaction(c, ptrx); if( have_trx ) { fc_dlog( logger, "got a duplicate transaction - dropping ${id}", ("id", tid) ); return; @@ -2641,11 +2629,11 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t head_blk_num = cc.head_block_num(); - app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num]() { + boost::asio::post( *my_impl->server_ioc, [accepted, ptrx{std::move(ptrx)}, head_blk_num]() { if( accepted ) { - dispatcher->bcast_transaction( ptrx ); + my_impl->dispatcher->bcast_transaction( ptrx ); } else { - dispatcher->rejected_transaction( ptrx->id, head_blk_num ); + my_impl->dispatcher->rejected_transaction( ptrx->id, head_blk_num ); } }); }); @@ -2749,7 +2737,6 @@ namespace eosio { std::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { - app().post( priority::low, [this, ec]() { ticker(); if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); @@ -2757,11 +2744,12 @@ namespace eosio { boost::shared_lock g( connections_mtx ); for( auto& c : connections ) { if( c->socket_is_open() ) { - c->send_time(); + c->strand.post( [c]() { + c->send_time(); + } ); } } } ); - } ); } void net_plugin_impl::start_monitors() { @@ -3078,6 +3066,8 @@ namespace eosio { } void net_plugin::plugin_startup() { + handle_sighup(); + my->producer_plug = app().find_plugin(); my->thread_pool.emplace( my->thread_pool_size ); @@ -3157,10 +3147,11 @@ namespace eosio { my->start_monitors(); + my->update_chain_info(); + for( const auto& seed_node : my->supplied_peers ) { connect( seed_node ); } - handle_sighup(); } void net_plugin::handle_sighup() { @@ -3232,7 +3223,6 @@ namespace eosio { boost::unique_lock g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { - (*itr)->reset(); fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); (*itr)->close(); my->connections.erase(itr); From 3eb770ad3850d4ac7430153e4efad916c14420ed Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 07:34:57 -0500 Subject: [PATCH 0075/1648] Accept block_state_ptr as const& --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2cc242b9659..4c6a5bfaf28 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3135,7 +3135,7 @@ namespace eosio { my->incoming_transaction_ack_subscription = app().get_channel().subscribe( boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); my->incoming_irreversible_block_subscription = app().get_channel().subscribe( - [this]( block_state_ptr s ) { + [this]( const block_state_ptr& s ) { my->on_irreversible_block( s ); }); From 541b9c7acc090879f8539f33ff1f3bf6ba2fd0d6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 12:10:16 -0500 Subject: [PATCH 0076/1648] Protect last_req and fix syncing issue --- plugins/net_plugin/net_plugin.cpp | 49 +++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4c6a5bfaf28..b9a1dfe9d74 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -228,7 +228,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17 + mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17, also protects connection::last_req std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -608,7 +608,7 @@ namespace eosio { std::atomic no_retry{no_reason}; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head - optional last_req; + optional last_req; // mutex protected by connections_mtx connection_status get_status()const { connection_status stat; @@ -913,14 +913,18 @@ namespace eosio { self->flush_queues(); self->connecting = false; self->syncing = false; - if( self->last_req ) { + + boost::shared_lock g_conn( my_impl->connections_mtx ); + bool has_last_req = !!self->last_req; + g_conn.unlock(); + if( has_last_req ) { my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } self->peer_requested.reset(); self->sent_handshake_count = 0; self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); - my_impl->sync_master->sync_reset_lib_num( nullptr ); + my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); self->cancel_wait(); @@ -1814,13 +1818,16 @@ namespace eosio { void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { peer_block_state pbstate{id, bnum, c->connection_id}; add_peer_block( pbstate ); + boost::unique_lock g( my_impl->connections_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && !c->last_req->req_blocks.ids.empty() && c->last_req->req_blocks.ids.back() == id) { + fc_dlog( logger, "reseting last_req" ); c->last_req.reset(); } + g.unlock(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); @@ -1909,6 +1916,7 @@ namespace eosio { fc_dlog( logger, "send req" ); c->enqueue( req ); c->fetch_wait(); + boost::unique_lock g( my_impl->connections_mtx ); c->last_req = std::move( req ); }); } @@ -1921,6 +1929,8 @@ namespace eosio { } void dispatch_manager::retry_fetch(const connection_ptr& c) { + fc_dlog( logger, "retry fetch" ); + boost::shared_lock g( my_impl->connections_mtx ); if (!c->last_req) { return; } @@ -1933,24 +1943,37 @@ namespace eosio { ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } - boost::shared_lock g( my_impl->connections_mtx ); - for (auto& conn : my_impl->connections) { - if (conn == c || conn->last_req) { + for( auto& conn : my_impl->connections ) { + if( conn == c || conn->last_req ) { continue; } - bool sendit = peer_has_block( bid, c->connection_id ); - if (sendit) { + bool sendit = peer_has_block( bid, conn->connection_id ); + if( sendit ) { conn->strand.post( [conn, last_req = *c->last_req]() { conn->enqueue( last_req ); conn->fetch_wait(); + boost::unique_lock g( my_impl->connections_mtx ); conn->last_req = last_req; } ); return; } } - g.unlock(); + // found no peer that we know has it, so ask some random connection + for( auto& conn : my_impl->connections ) { + if( conn == c || conn->last_req ) { + continue; + } + conn->strand.post( [conn, last_req = *c->last_req]() { + conn->enqueue( last_req ); + conn->fetch_wait(); + boost::unique_lock g( my_impl->connections_mtx ); + conn->last_req = last_req; + } ); + return; + } // at this point no other peer has it, re-request or do nothing? + fc_wlog( logger, "no peer has last_req" ); if( c->connected() ) { c->enqueue(*c->last_req); c->fetch_wait(); @@ -2009,8 +2032,10 @@ namespace eosio { } } else { if( endpoint_itr != tcp::resolver::iterator() ) { - c->close(); - c->connect( resolver, endpoint_itr ); + c->close(); // close posts to strand, so also post connect otherwise connect will happen before close + c->strand.post( [resolver, c, endpoint_itr]() { + c->connect( resolver, endpoint_itr ); + } ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; From 1e051295a05ebd0bfe497fb6b1c1dcd76560b128 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 13:04:21 -0500 Subject: [PATCH 0077/1648] Get sync block at medium priority --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b9a1dfe9d74..105d332be6a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1110,7 +1110,7 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - app().post( priority::low, [num, trigger_send, c = shared_from_this()]() { + app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { From 27651ec9ba89a4cff5a02a83e5d30c70711fd57b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 13:29:35 -0500 Subject: [PATCH 0078/1648] Remove dead code --- plugins/net_plugin/net_plugin.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 105d332be6a..4851bc51992 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1004,8 +1004,10 @@ namespace eosio { strand.post( [c = shared_from_this()]() { handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; - fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", - ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) ); + fc_dlog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}", + ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) + ( "lib", c->last_handshake_sent.last_irreversible_block_num ) + ( "head", c->last_handshake_sent.head_num ) ); c->enqueue( c->last_handshake_sent ); }); } @@ -2506,7 +2508,6 @@ namespace eosio { peer_ilog( c, "received notice_message" ); c->connecting = false; request_message req; - bool send_req = false; if( msg.known_trx.mode != none ) { fc_dlog( logger, "this is a ${m} notice with ${n} transactions", ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); @@ -2547,10 +2548,6 @@ namespace eosio { peer_elog(c, "bad notice_message : invalid known_blocks.mode ${m}",("m",static_cast(msg.known_blocks.mode))); } } - fc_dlog(logger, "send req = ${sr}", ("sr",send_req)); - if( send_req) { - c->enqueue(req); - } } void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { From 790e8889ab9168837c426fef6589479bc1bb23ca Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 07:48:45 -0500 Subject: [PATCH 0079/1648] Add protection for last_handshake_*, last_req, and socket close --- .../include/eosio/net_plugin/protocol.hpp | 2 +- plugins/net_plugin/net_plugin.cpp | 148 +++++++++++------- 2 files changed, 96 insertions(+), 54 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 7170c1abd20..cdb3d98fd2f 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -36,7 +36,7 @@ namespace eosio { block_id_type head_id; string os; string agent; - int16_t generation; + int16_t generation = 0; }; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4851bc51992..9068329b158 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -587,8 +587,6 @@ namespace eosio { std::atomic trx_in_progress_size{0}; fc::sha256 node_id; const uint32_t connection_id; - handshake_message last_handshake_recv; - handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; std::atomic connecting{false}; std::atomic syncing{false}; @@ -608,16 +606,13 @@ namespace eosio { std::atomic no_retry{no_reason}; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head - optional last_req; // mutex protected by connections_mtx - connection_status get_status()const { - connection_status stat; - stat.peer = peer_addr; - stat.connecting = connecting; - stat.syncing = syncing; - stat.last_handshake = last_handshake_recv; - return stat; - } + mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent + optional last_req; + handshake_message last_handshake_recv; + handshake_message last_handshake_sent; + + connection_status get_status()const; /** \name Peer Timestamps * Time message handling @@ -816,8 +811,6 @@ namespace eosio { socket( *my_impl->server_ioc ), node_id(), connection_id( ++my_impl->current_connection_id ), - last_handshake_recv(), - last_handshake_sent(), sent_handshake_count(0), connecting(false), syncing(false), @@ -826,7 +819,9 @@ namespace eosio { response_expected_timer( *my_impl->server_ioc ), read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), - last_req() + last_req(), + last_handshake_recv(), + last_handshake_sent() { fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); node_id.data()[0] = 0; @@ -839,8 +834,6 @@ namespace eosio { socket( *my_impl->server_ioc ), node_id(), connection_id( ++my_impl->current_connection_id ), - last_handshake_recv(), - last_handshake_sent(), sent_handshake_count(0), connecting(true), syncing(false), @@ -849,14 +842,15 @@ namespace eosio { response_expected_timer( *my_impl->server_ioc ), read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), - last_req() + last_req(), + last_handshake_recv(), + last_handshake_sent() { fc_ilog( logger, "accepted network connection" ); node_id.data()[0] = 0; } connection::~connection() { - pending_message_buffer.reset(); } void connection::update_endpoints() { @@ -870,6 +864,16 @@ namespace eosio { local_endpoint_port = ec ? unknown : std::to_string(lep.port()); } + connection_status connection::get_status()const { + connection_status stat; + stat.peer = peer_addr; + stat.connecting = connecting; + stat.syncing = syncing; + std::lock_guard g( conn_mtx ); + stat.last_handshake = last_handshake_recv; + return stat; + } + bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); @@ -914,7 +918,7 @@ namespace eosio { self->connecting = false; self->syncing = false; - boost::shared_lock g_conn( my_impl->connections_mtx ); + std::unique_lock g_conn( self->conn_mtx ); bool has_last_req = !!self->last_req; g_conn.unlock(); if( has_last_req ) { @@ -922,14 +926,18 @@ namespace eosio { } self->peer_requested.reset(); self->sent_handshake_count = 0; + g_conn.lock(); self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); + g_conn.unlock(); my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); - fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); + fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx self->cancel_wait(); std::lock_guard g( self->read_delay_timer_mtx ); self->read_delay_timer.cancel(); + + self->pending_message_buffer.reset(); } void connection::blk_send_branch() { @@ -948,13 +956,15 @@ namespace eosio { } block_id_type remote_head_id; uint32_t remote_head_num = 0; + std::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation >= 1 ) { remote_head_id = last_handshake_recv.head_id; remote_head_num = block_header::num_from_id(remote_head_id); - fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", remote_head_num)( "id", remote_head_id ) ); + fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", remote_head_num)( "id", remote_head_id ) ); } block_id_type lib_id = last_handshake_recv.last_irreversible_block_id; + g_conn.unlock(); if( !peer_requested ) { peer_requested = sync_state( block_header::num_from_id(lib_id)+1, @@ -965,6 +975,7 @@ namespace eosio { uint32_t end = std::max( peer_requested->end_block, block_header::num_from_id(head_id) ); peer_requested = sync_state( start, end, start - 1 ); } + fc_dlog( logger, "enqueue ${s} - ${e}", ("s", peer_requested->start_block)("e", peer_requested->end_block) ); enqueue_sync_block(); // still want to send transactions along during blk branch sync @@ -1002,13 +1013,16 @@ namespace eosio { void connection::send_handshake() { strand.post( [c = shared_from_this()]() { + std::unique_lock g_conn( c->conn_mtx ); handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; fc_dlog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}", ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) ( "lib", c->last_handshake_sent.last_irreversible_block_num ) ( "head", c->last_handshake_sent.head_num ) ); - c->enqueue( c->last_handshake_sent ); + auto cpy = c->last_handshake_sent; + g_conn.unlock(); + c->enqueue( cpy ); }); } @@ -1057,6 +1071,9 @@ namespace eosio { boost::asio::async_write( c->socket, bufs, boost::asio::bind_executor( c->strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { try { + // May have closed connection and cleared buffer_queue + if( !c->socket_is_open() ) return; + c->buffer_queue.out_callback( ec, w ); if( ec ) { @@ -1112,7 +1129,8 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { + const int higher_than_low = priority::low + 2; // otherwise client gets very little if we are syncing to a peer + app().post( higher_than_low, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { @@ -1241,11 +1259,13 @@ namespace eosio { } } - // todo: last_handshake_recv not thread safe + // locks conn_mtx, do not call while holding conn_mtx const string connection::peer_name() { + std::unique_lock g_conn( conn_mtx ); if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; } + g_conn.unlock(); if( !peer_address().empty() ) { return peer_address(); } @@ -1311,6 +1331,7 @@ namespace eosio { } if( !c ) return; if( c->current() ) { + std::lock_guard g_conn( c->conn_mtx ); if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num ) { sync_known_lib_num = c->last_handshake_recv.last_irreversible_block_num; } @@ -1594,7 +1615,9 @@ namespace eosio { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } } else if (msg.known_blocks.mode == last_irr_catch_up) { + std::unique_lock g_conn( c->conn_mtx ); c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; + g_conn.unlock(); sync_reset_lib_num(c); start_sync(c, msg.known_trx.pending); } @@ -1607,7 +1630,7 @@ namespace eosio { fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_address()) ); sync_last_requested_num = 0; sync_source.reset(); - set_state( in_sync ); + //todo: set_state( in_sync ); g.unlock(); c->close(); send_handshakes(); @@ -1623,7 +1646,7 @@ namespace eosio { if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", - ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); + ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_address()) ); c->close(); return; } @@ -1802,14 +1825,15 @@ namespace eosio { } cp->strand.post( [this, cp, bs, send_buffer]() { uint32_t bnum = bs->block_num; - // todo protect cp->last_handshake_recv + std::unique_lock g_conn( cp->conn_mtx ); bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; + g_conn.unlock(); if( !has_block ) { peer_block_state pbstate{bs->id, bnum, cp->connection_id}; if( !add_peer_block( pbstate ) ) { return; } - fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_address() ) ); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } }); @@ -1820,7 +1844,7 @@ namespace eosio { void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { peer_block_state pbstate{id, bnum, c->connection_id}; add_peer_block( pbstate ); - boost::unique_lock g( my_impl->connections_mtx ); + std::unique_lock g( c->conn_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -1918,7 +1942,7 @@ namespace eosio { fc_dlog( logger, "send req" ); c->enqueue( req ); c->fetch_wait(); - boost::unique_lock g( my_impl->connections_mtx ); + std::lock_guard g( c->conn_mtx ); c->last_req = std::move( req ); }); } @@ -1932,8 +1956,8 @@ namespace eosio { void dispatch_manager::retry_fetch(const connection_ptr& c) { fc_dlog( logger, "retry fetch" ); - boost::shared_lock g( my_impl->connections_mtx ); - if (!c->last_req) { + std::unique_lock g_c_conn( c->conn_mtx ); + if( !c->last_req ) { return; } fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); @@ -1945,39 +1969,39 @@ namespace eosio { ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } + g_c_conn.unlock(); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& conn : my_impl->connections ) { - if( conn == c || conn->last_req ) { + if( conn == c ) continue; + + std::unique_lock g_conn_conn( conn->conn_mtx ); + if( conn->last_req ) { continue; } + g_conn_conn.unlock(); bool sendit = peer_has_block( bid, conn->connection_id ); if( sendit ) { - conn->strand.post( [conn, last_req = *c->last_req]() { + g.unlock(); + g_c_conn.lock(); + auto last_req = *c->last_req; + g_c_conn.unlock(); + conn->strand.post( [conn, last_req{std::move(last_req)}]() { conn->enqueue( last_req ); conn->fetch_wait(); - boost::unique_lock g( my_impl->connections_mtx ); + std::lock_guard g_conn_conn( conn->conn_mtx ); conn->last_req = last_req; } ); return; } } - // found no peer that we know has it, so ask some random connection - for( auto& conn : my_impl->connections ) { - if( conn == c || conn->last_req ) { - continue; - } - conn->strand.post( [conn, last_req = *c->last_req]() { - conn->enqueue( last_req ); - conn->fetch_wait(); - boost::unique_lock g( my_impl->connections_mtx ); - conn->last_req = last_req; - } ); - return; - } // at this point no other peer has it, re-request or do nothing? fc_wlog( logger, "no peer has last_req" ); if( c->connected() ) { - c->enqueue(*c->last_req); + g_c_conn.lock(); + auto last_req = *c->last_req; + g_c_conn.unlock(); + c->enqueue( last_req ); c->fetch_wait(); } } @@ -2173,8 +2197,11 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this()]( boost::system::error_code ec, std::size_t bytes_transferred ) { --conn->reads_in_flight; - bool close_connection = false; + // may have closed connection and cleared pending_message_buffer + if( !conn->socket_is_open() ) return; + + bool close_connection = false; try { if( !ec ) { if (bytes_transferred > conn->pending_message_buffer.bytes_to_write()) { @@ -2355,6 +2382,10 @@ namespace eosio { c->enqueue( go_away_message( fatal_other ) ); return; } + fc_dlog( logger, "received handshake gen ${g} from ${ep}, lib ${lib}, head ${head}", + ("g", msg.generation)( "ep", c->peer_address() ) + ( "lib", msg.last_irreversible_block_num )( "head", msg.head_num ) ); + if( c->connecting ) { c->connecting = false; } @@ -2365,7 +2396,9 @@ namespace eosio { return; } + std::unique_lock g_conn( c->conn_mtx ); if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { + g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); boost::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { @@ -2376,7 +2409,13 @@ namespace eosio { // we need to avoid the case where they would both tell a different connection to go away. // Using the sum of the initial handshake times of the two connections, we will // arbitrarily (but consistently between the two peers) keep one of them. - if (msg.time + c->last_handshake_sent.time <= check->last_handshake_sent.time + check->last_handshake_recv.time) + std::unique_lock g_check_conn( check->conn_mtx ); + auto check_time = check->last_handshake_sent.time + check->last_handshake_recv.time; + g_check_conn.unlock(); + g_conn.lock(); + auto c_time = c->last_handshake_sent.time; + g_conn.unlock(); + if (msg.time + c_time <= check_time) continue; fc_dlog( logger, "sending go_away duplicate to ${ep}", ("ep",msg.p2p_address) ); @@ -2390,6 +2429,7 @@ namespace eosio { } else { fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); + g_conn.unlock(); } if( msg.chain_id != chain_id) { @@ -2453,7 +2493,9 @@ namespace eosio { } } + std::unique_lock g_conn( c->conn_mtx ); c->last_handshake_recv = msg; + g_conn.unlock(); c->_logger_variant.reset(); sync_master->recv_handshake( c, msg ); } @@ -2507,7 +2549,6 @@ namespace eosio { // peer_ilog( c, "received notice_message" ); c->connecting = false; - request_message req; if( msg.known_trx.mode != none ) { fc_dlog( logger, "this is a ${m} notice with ${n} transactions", ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); @@ -2516,8 +2557,9 @@ namespace eosio { case none: break; case last_irr_catch_up: { + std::unique_lock g_conn( c->conn_mtx ); c->last_handshake_recv.head_num = msg.known_trx.pending; - req.req_trx.mode = none; + g_conn.unlock(); break; } case catch_up : { From 30e9676f1d438c912c5221f26240bb8d1ec1dc66 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 09:21:34 -0500 Subject: [PATCH 0080/1648] Protect connection fork_head, fork_head_num. Improve logging. --- plugins/net_plugin/net_plugin.cpp | 32 +++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9068329b158..7ba52469f3b 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -604,13 +604,13 @@ namespace eosio { std::mutex read_delay_timer_mtx; boost::asio::steady_timer read_delay_timer; std::atomic no_retry{no_reason}; - block_id_type fork_head; - std::atomic fork_head_num{0}; // provides memory barrier for fork_head - mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent + mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent, fork_head, fork_head_num optional last_req; handshake_message last_handshake_recv; handshake_message last_handshake_sent; + block_id_type fork_head; + uint32_t fork_head_num{0}; connection_status get_status()const; @@ -1119,7 +1119,6 @@ namespace eosio { bool connection::enqueue_sync_block() { if( !peer_requested ) { - fc_dlog( logger, "enqueue sync block, with no peer_requested" ); return false; } else { fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); @@ -1572,7 +1571,7 @@ namespace eosio { req.req_blocks.mode = catch_up; boost::shared_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { - // fork_head_num provides memory barrier for fork_head + std::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; break; @@ -1580,8 +1579,10 @@ namespace eosio { } g.unlock(); if( req.req_blocks.mode == catch_up ) { + std::unique_lock g_conn( c->conn_mtx ); c->fork_head = id; c->fork_head_num = num; + g_conn.unlock(); std::lock_guard g( sync_mtx ); fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} " "target LIB = ${lib} next_expected = ${ne}", @@ -1591,6 +1592,7 @@ namespace eosio { return; set_state( head_catchup ); } else { + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -1639,14 +1641,16 @@ namespace eosio { // called from connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { - fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_address() ) ); + fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_name() ) ); std::unique_lock g_sync( sync_mtx ); stages state = sync_state; fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { + auto sync_next_expected = sync_next_expected_num; + g_sync.unlock(); fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", - ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_address()) ); + ("ne", sync_next_expected)( "bn", blk_num )( "p", c->peer_name() ) ); c->close(); return; } @@ -1662,11 +1666,15 @@ namespace eosio { bool set_state_to_head_catchup = false; boost::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { - uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head - if (cp->fork_head == null_id) { + std::unique_lock g_cp_conn( cp->conn_mtx ); + uint32_t fork_head_num = cp->fork_head_num; + block_id_type fork_head_id = cp->fork_head; + g_cp_conn.unlock(); + if( fork_head_id == null_id ) { continue; } - if( fork_head_num < blk_num || cp->fork_head == blk_id ) { + if( fork_head_num < blk_num || fork_head_id == blk_id ) { + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; } else { @@ -1884,7 +1892,7 @@ namespace eosio { } cp->strand.post( [cp, send_buffer]() { - fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_address()) ); + fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_name()) ); cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); } ); } @@ -2383,7 +2391,7 @@ namespace eosio { return; } fc_dlog( logger, "received handshake gen ${g} from ${ep}, lib ${lib}, head ${head}", - ("g", msg.generation)( "ep", c->peer_address() ) + ("g", msg.generation)( "ep", c->peer_name() ) ( "lib", msg.last_irreversible_block_num )( "head", msg.head_num ) ); if( c->connecting ) { From f5d5f74198961768bd54f9f1b6ddebd182a2a5ca Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 12:37:34 -0500 Subject: [PATCH 0081/1648] Fix protection of connections. Record block after verify it links. --- plugins/net_plugin/net_plugin.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7ba52469f3b..eda58c0f794 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1128,8 +1128,7 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - const int higher_than_low = priority::low + 2; // otherwise client gets very little if we are syncing to a peer - app().post( higher_than_low, [num, trigger_send, c = shared_from_this()]() { + app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { @@ -2107,9 +2106,9 @@ namespace eosio { g.unlock(); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { if( new_connection->start_session() ) { - g.lock(); + boost::unique_lock g_unique( connections_mtx ); connections.insert( new_connection ); - g.unlock(); + g_unique.unlock(); } } else { @@ -2731,9 +2730,9 @@ namespace eosio { fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } - c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { - dispatcher->recv_block( c, blk_id, blk_num ); - }); +// c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { +// dispatcher->recv_block( c, blk_id, blk_num ); +// }); fc::microseconds age( fc::time_point::now() - msg->timestamp); peer_ilog(c, "received signed_block : #${n} block age in secs = ${age}", ("n",blk_num)("age",age.to_seconds())); @@ -2765,7 +2764,8 @@ namespace eosio { boost::asio::post( *server_ioc, [self = this, msg]() { self->dispatcher->update_txns_block_num( msg ); }); - c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + c->strand.post( [sync_master = sync_master.get(), dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + dispatcher->recv_block( c, blk_id, blk_num ); sync_master->sync_recv_block( c, blk_id, blk_num ); }); } else { @@ -2882,7 +2882,7 @@ namespace eosio { void net_plugin_impl::on_accepted_block(const block_state_ptr& block) { update_chain_info(); boost::asio::post( *server_ioc, [this, ioc=server_ioc, block]() { - fc_dlog( logger, "signaled, id = ${id}", ("id", block->id) ); + fc_dlog( logger, "signaled, blk id = ${id}", ("id", block->id) ); dispatcher->bcast_block( block ); }); } From 7fd099ca3ff4ad3b5de1cbf33bfb4b699f2cb08e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0082/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 164 ++++++++++++++++++++++-------- 1 file changed, 119 insertions(+), 45 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..062f72f5fb1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,6 +75,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -157,7 +163,7 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; optional server_ioc_work; @@ -501,12 +507,12 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - fc::optional outstanding_read_bytes; + std::atomic outstanding_read_bytes{0}; queued_buffer buffer_queue; - uint32_t reads_in_flight = 0; + std::atomic reads_in_flight{0}; uint32_t trx_in_progress_size = 0; fc::sha256 node_id; handshake_message last_handshake_recv; @@ -639,9 +645,9 @@ namespace eosio { }; struct msg_handler : public fc::visitor { - net_plugin_impl &impl; + net_plugin_impl& impl; connection_ptr c; - msg_handler( net_plugin_impl &imp, const connection_ptr& conn) : impl(imp), c(conn) {} + msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} void operator()( const signed_block& msg ) const { EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); @@ -657,16 +663,30 @@ namespace eosio { } void operator()( signed_block&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c ) impl->handle_message( c, ptr ); + }); } void operator()( packed_transaction&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + shared_ptr ptr = std::make_shared( std::move( msg ) ); + connection_wptr weak = c; + app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c) impl->handle_message( c, ptr ); + }); } template void operator()( T&& msg ) const { - impl.handle_message( c, std::forward(msg) ); + connection_wptr weak = c; + app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); } }; @@ -704,6 +724,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -778,7 +800,9 @@ namespace eosio { initialize(); } - connection::~connection() {} + connection::~connection() { + pending_message_buffer.reset(); + } void connection::initialize() { auto *rnd = node_id.data(); @@ -826,7 +850,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1106,6 +1129,7 @@ namespace eosio { static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { // this implementation is to avoid copy of signed_block to net_message // matches which of net_message for signed_block + fc_dlog( logger, "sending block ${bn}", ("bn", sb->block_num()) ); return create_send_buffer( signed_block_which, *sb ); } @@ -1921,7 +1945,9 @@ namespace eosio { return false; } else { - start_read_message( con ); + boost::asio::post(*server_ioc, [this, con]() { + start_read_message( con ); + }); ++started_sessions; return true; // for now, we can just use the application main loop. @@ -2006,7 +2032,7 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes ? *conn->outstanding_read_bytes : message_header_size; + std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2022,7 +2048,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2031,7 +2057,7 @@ namespace eosio { if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight) ); + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); } else { peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); } @@ -2053,20 +2079,20 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, boost::asio::bind_executor( conn->strand, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { - app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); if (!conn) { return; } --conn->reads_in_flight; - conn->outstanding_read_bytes.reset(); + conn->outstanding_read_bytes = 0; + bool close_connection = false; try { if( !ec ) { @@ -2080,18 +2106,16 @@ namespace eosio { uint32_t bytes_in_buffer = conn->pending_message_buffer.bytes_to_read(); if (bytes_in_buffer < message_header_size) { - conn->outstanding_read_bytes.emplace(message_header_size - bytes_in_buffer); + conn->outstanding_read_bytes = message_header_size - bytes_in_buffer; break; } else { uint32_t message_length; auto index = conn->pending_message_buffer.read_index(); conn->pending_message_buffer.peek(&message_length, sizeof(message_length), index); if(message_length > def_send_buffer_size*2 || message_length == 0) { - boost::system::error_code ec; - fc_elog( logger,"incoming message length unexpected (${i}), from ${p}", - ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec))) ); - close(conn); - return; + fc_elog( logger,"incoming message length unexpected (${i})", ("i", message_length) ); + close_connection = true; + break; } auto total_message_bytes = message_length + message_header_size; @@ -2108,38 +2132,44 @@ namespace eosio { conn->pending_message_buffer.add_space( outstanding_message_bytes - available_buffer_bytes ); } - conn->outstanding_read_bytes.emplace(outstanding_message_bytes); + conn->outstanding_read_bytes = outstanding_message_bytes; break; } } } - start_read_message(conn); + if( !close_connection ) start_read_message( conn ); } else { - auto pname = conn->peer_name(); if (ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); + fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); } else { - fc_ilog( logger, "Peer ${p} closed connection",("p",pname) ); + fc_ilog( logger, "Peer closed connection" ); } - close( conn ); + close_connection = true; } } catch(const std::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.what()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data: ${s}", ("s",ex.what()) ); + close_connection = true; } catch(const fc::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.to_string()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data ${s}", ("s",ex.to_string()) ); + close_connection = true; } catch (...) { - fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); - close( conn ); + fc_elog( logger, "Undefined exception handling read data" ); + close_connection = true; } - }); - })); + + if( close_connection ) { + connection_wptr weak_conn = conn; + app().post( priority::medium, "close conn", [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); + } + }); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); @@ -2150,6 +2180,7 @@ namespace eosio { bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { try { // if next message is a block we already have, exit early +/* auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); @@ -2166,7 +2197,7 @@ namespace eosio { return true; } } - +*/ auto ds = conn->pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); @@ -2547,18 +2578,61 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller &cc = chain_plug->chain(); - block_id_type blk_id = msg->id(); - uint32_t blk_num = msg->block_num(); + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { + signed_block_ptr msg = m; + controller& cc = chain_plug->chain(); + block_id_type blk_id = msg ? msg->id() : block_id_type(); + uint32_t blk_num = msg ? msg->block_num() : 0; fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( cc.fetch_block_by_id(blk_id)) { + if( msg && cc.fetch_block_by_id(blk_id)) { sync_master->recv_block(c, blk_id, blk_num); return; } + signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; + if( prev == nullptr ){ //&& sync_master->is_active(c) ) { + // see if top is ready + if( !sync_master->incoming_blocks.empty() ) { + prev = sync_master->incoming_blocks.top(); + auto prev_prev = cc.fetch_block_by_id( prev->previous ); + if( prev_prev != nullptr ) { + sync_master->incoming_blocks.pop(); + if(msg) sync_master->incoming_blocks.emplace( msg ); + msg = prev; + blk_id = msg->id(); + blk_num = msg->block_num(); + connection_wptr weak = c; + app().post(priority::medium, "re post blk", [this, weak](){ + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + }); + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, "re post blk", [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From c27007616c1e89a74eda90da45e2c0eabaab99fa Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0083/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 062f72f5fb1..1c7d7eb0cad 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -665,7 +665,7 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); @@ -673,7 +673,7 @@ namespace eosio { void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c) impl->handle_message( c, ptr ); }); @@ -683,7 +683,7 @@ namespace eosio { void operator()( T&& msg ) const { connection_wptr weak = c; - app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -2162,7 +2162,7 @@ namespace eosio { if( close_connection ) { connection_wptr weak_conn = conn; - app().post( priority::medium, "close conn", [this, weak_conn]() { + app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2604,7 +2604,7 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; - app().post(priority::medium, "re post blk", [this, weak](){ + app().post(priority::medium, [this, weak](){ connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2613,7 +2613,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2625,7 +2625,7 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; - app().post( priority::medium, "re post blk", [this, weak]() { + app().post( priority::medium, [this, weak]() { connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From bde8ae642cc391f544bd2e938ea94aacdd97bc2a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0084/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 53 ++----------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1c7d7eb0cad..13a0e5d524a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -75,12 +75,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -507,7 +501,7 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; - std::atomic outstanding_read_bytes{0}; + std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads queued_buffer buffer_queue; @@ -724,8 +718,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2024,6 +2016,7 @@ namespace eosio { }); } + // only called from server_ioc thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { @@ -2591,48 +2584,6 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } - signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; - if( prev == nullptr ){ //&& sync_master->is_active(c) ) { - // see if top is ready - if( !sync_master->incoming_blocks.empty() ) { - prev = sync_master->incoming_blocks.top(); - auto prev_prev = cc.fetch_block_by_id( prev->previous ); - if( prev_prev != nullptr ) { - sync_master->incoming_blocks.pop(); - if(msg) sync_master->incoming_blocks.emplace( msg ); - msg = prev; - blk_id = msg->id(); - blk_num = msg->block_num(); - connection_wptr weak = c; - app().post(priority::medium, [this, weak](){ - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - }); - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; - app().post( priority::medium, [this, weak]() { - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From d44dae9effb98ef04d045d772ba1ec44fd55f659 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:49:28 -0600 Subject: [PATCH 0085/1648] Revert unneeded changes to handle_message --- plugins/net_plugin/net_plugin.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 13a0e5d524a..a654a4fa699 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2571,16 +2571,15 @@ namespace eosio { }); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { - signed_block_ptr msg = m; + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); - block_id_type blk_id = msg ? msg->id() : block_id_type(); - uint32_t blk_num = msg ? msg->block_num() : 0; + block_id_type blk_id = msg->id(); + uint32_t blk_num = msg->block_num(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { - if( msg && cc.fetch_block_by_id(blk_id)) { + if( cc.fetch_block_by_id(blk_id) ) { sync_master->recv_block(c, blk_id, blk_num); return; } From 1f605e2fc85fc2c3d3e7dc83f4511e4b9056e600 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:50:46 -0600 Subject: [PATCH 0086/1648] Logging fixes --- plugins/net_plugin/net_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a654a4fa699..ef0e7fc6b02 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1641,6 +1641,7 @@ namespace eosio { uint32_t bnum = bs->block_num; peer_block_state pbstate{bs->id, bnum}; + fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; for( auto& cp : my_impl->connections ) { @@ -1655,7 +1656,7 @@ namespace eosio { if( !send_buffer ) { send_buffer = create_send_buffer( bs->block ); } - fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } } From 3c96f68234ab0b1fa2dd23017346636888d2477e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0087/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 56 ++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index ef0e7fc6b02..eb6e0ac1ac6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -419,6 +419,7 @@ namespace eosio { } } + // thread safe uint32_t write_queue_size() const { return _write_queue_size; } bool is_out_queue_empty() const { return _out_queue.empty(); } @@ -477,7 +478,7 @@ namespace eosio { std::function callback; }; - uint32_t _write_queue_size = 0; + std::atomic _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -507,7 +508,7 @@ namespace eosio { queued_buffer buffer_queue; std::atomic reads_in_flight{0}; - uint32_t trx_in_progress_size = 0; + std::atomic trx_in_progress_size{0}; fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -517,6 +518,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; + std::mutex read_delay_timer_mutex; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -841,7 +843,10 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); - if( read_delay_timer ) read_delay_timer->cancel(); + { + std::lock_guard g( read_delay_timer_mutex ); + if( read_delay_timer ) read_delay_timer->cancel(); + } } void connection::txn_send_pending(const vector& ids) { @@ -2042,27 +2047,36 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); - } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); + uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); + uint32_t trx_in_progress_size = conn->trx_in_progress_size; + uint32_t reads_in_flight = conn->reads_in_flight; + if( write_queue_size > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); + } else if( reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); } else { - peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); + peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } - if( conn->buffer_queue.write_queue_size() > 2*def_max_write_queue_size || - conn->reads_in_flight > 2*def_max_reads_in_flight || - conn->trx_in_progress_size > 2*def_max_trx_in_progress_size ) + if( write_queue_size > 2*def_max_write_queue_size || + reads_in_flight > 2*def_max_reads_in_flight || + trx_in_progress_size > 2*def_max_trx_in_progress_size ) { - fc_wlog( logger, "queues over full, giving up on connection ${p}", ("p", conn->peer_name()) ); - my_impl->close( conn ); + fc_wlog( logger, "queues over full, giving up on connection" ); + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + my_impl->close( conn ); + }); return; } + std::lock_guard g( conn->read_delay_timer_mutex ); if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( @@ -2073,7 +2087,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2155,7 +2169,6 @@ namespace eosio { } if( close_connection ) { - connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2165,9 +2178,14 @@ namespace eosio { } }); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); - close( conn ); + fc_elog( logger, "Undefined exception in start_read_message" ); + connection_wptr weak_conn = conn; + app().post( priority::medium, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + close( conn ); + }); } } From 85f929898331815939421af6eccb4fe8aabf3b23 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0088/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 45 +++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index eb6e0ac1ac6..b47ee19118d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2169,6 +2169,7 @@ namespace eosio { } if( close_connection ) { + connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2602,6 +2603,50 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } +<<<<<<< HEAD +======= + signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; + if( prev == nullptr ){ //&& sync_master->is_active(c) ) { + // see if top is ready + if( !sync_master->incoming_blocks.empty() ) { + prev = sync_master->incoming_blocks.top(); + auto prev_prev = cc.fetch_block_by_id( prev->previous ); + if( prev_prev != nullptr ) { + sync_master->incoming_blocks.pop(); + if(msg) sync_master->incoming_blocks.emplace( msg ); + msg = prev; + blk_id = msg->id(); + blk_num = msg->block_num(); + connection_wptr weak = c; + app().post(priority::medium, [this, weak](){ + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + }); + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } else { + if( msg ) { + sync_master->incoming_blocks.emplace( msg ); + + connection_wptr weak = c; + app().post( priority::medium, [this, weak]() { + connection_ptr c = weak.lock(); + if( c ) handle_message( c, signed_block_ptr() ); + } ); + } + return; + } + } } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 53e79fb6bb902125274f7c81453b3d1ece4cbf39 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0089/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b47ee19118d..f6b8919c548 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2604,6 +2604,7 @@ namespace eosio { return; } <<<<<<< HEAD +<<<<<<< HEAD ======= signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; if( prev == nullptr ){ //&& sync_master->is_active(c) ) { @@ -2647,6 +2648,8 @@ namespace eosio { return; } } +======= +>>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 4021df9cb9b080737b4fd4ff2f50ac3af0d05a24 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0090/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f6b8919c548..eda987724f7 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2169,7 +2169,6 @@ namespace eosio { } if( close_connection ) { - connection_wptr weak_conn = conn; app().post( priority::medium, [this, weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; From 6732a0fe7e843b17647ba9616945391a953c846a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 2 Mar 2019 11:22:30 -0600 Subject: [PATCH 0091/1648] Remove unneeded access to atomic --- plugins/net_plugin/net_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index eda987724f7..b91bdaf21f7 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -26,6 +26,8 @@ #include #include +#include + using namespace eosio::chain::plugin_interface::compat; namespace fc { @@ -2031,7 +2033,9 @@ namespace eosio { } connection_wptr weak_conn = conn; - std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; + std::size_t minimum_read = + std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); + minimum_read = minimum_read != 0 ? minimum_read : message_header_size; if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2099,7 +2103,6 @@ namespace eosio { } --conn->reads_in_flight; - conn->outstanding_read_bytes = 0; bool close_connection = false; try { From eceaa9bb5cd551e3d38b15ddeeb5404421fdc786 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 4 Mar 2019 15:44:26 -0500 Subject: [PATCH 0092/1648] Move sig recovery in producer plugin to use thread pool of chain controller --- plugins/producer_plugin/producer_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..6b7646e9cdd 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,8 +351,11 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { + fc::microseconds max_trx_cpu_usage{ cfg.max_transaction_cpu_usage }; + auto& tp = *_thread_pool; + boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { From c6766b0f2f41a85a55a8131d6275d52fce2b94fd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 4 Mar 2019 15:47:26 -0500 Subject: [PATCH 0093/1648] Cache db_read_mode to avoid accessing controller --- plugins/net_plugin/net_plugin.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b91bdaf21f7..e27d8ac0168 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -145,6 +145,7 @@ namespace eosio { bool network_version_match = false; chain_id_type chain_id; fc::sha256 node_id; + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; chain_plugin* chain_plug = nullptr; @@ -2072,7 +2073,7 @@ namespace eosio { trx_in_progress_size > 2*def_max_trx_in_progress_size ) { fc_wlog( logger, "queues over full, giving up on connection" ); - app().post( priority::medium, [this, weak_conn]() { + app().post( priority::medium, [weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2555,8 +2556,7 @@ namespace eosio { void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); - controller& cc = my_impl->chain_plug->chain(); - if( cc.get_read_mode() == eosio::db_read_mode::READ_ONLY ) { + if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; } @@ -3165,7 +3165,8 @@ namespace eosio { my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { + my->db_read_mode = cc.get_read_mode(); + if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; fc_ilog( logger, "node in read-only mode setting max_nodes_per_host to 0 to prevent connections" ); } From e63fdbf35c9e3976f47e878fe8db3e978df9bddc Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:13 -0500 Subject: [PATCH 0094/1648] Make chain_id const since read from thread pool threads --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..b8470993c05 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -124,7 +124,7 @@ struct controller_impl { resource_limits_manager resource_limits; authorization_manager authorization; controller::config conf; - chain_id_type chain_id; + const chain_id_type chain_id; // read by thread_pool threads bool replaying= false; optional replay_head_time; db_read_mode read_mode = db_read_mode::SPECULATIVE; From a7c46d7c8a87c1887e9ab01269dd085e25e83a8f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:54 -0500 Subject: [PATCH 0095/1648] Add trx id to log message --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e27d8ac0168..6a3f72f23e8 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2581,7 +2581,7 @@ namespace eosio { } else { auto trace = result.get(); if (!trace->except) { - fc_dlog(logger, "chain accepted transaction"); + fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); this->dispatcher->bcast_transaction(ptrx); return; } From 2453be99b6c02d7aaaa0564ec36713df007c5238 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:18:34 -0500 Subject: [PATCH 0096/1648] Use command line max_transaction_time for limit of transaction sig recovery. This was done so that global properties configuration does not have to be accessed since the plan is to move this code into the thread pool. --- plugins/producer_plugin/producer_plugin.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6b7646e9cdd..c2d600c467b 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -134,7 +134,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _thread_pool; - int32_t _max_transaction_time_ms; + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool fc::microseconds _max_irreversible_block_age_us; int32_t _produce_time_offset_us = 0; int32_t _last_block_time_offset_us = 0; @@ -350,8 +350,7 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - const auto& cfg = chain.get_global_properties().configuration; - fc::microseconds max_trx_cpu_usage{ cfg.max_transaction_cpu_usage }; + fc::microseconds max_trx_cpu_usage = fc::milliseconds( _max_transaction_time_ms.load() ); auto& tp = *_thread_pool; boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress From eb1efbdce026b5b117c2b60bed129663137482c0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:58:27 -0500 Subject: [PATCH 0097/1648] Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr --- plugins/net_plugin/net_plugin.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6a3f72f23e8..37fafb45685 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -158,8 +158,6 @@ namespace eosio { bool use_socket_read_watermark = false; - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -189,7 +187,6 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); - void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -2837,17 +2834,6 @@ namespace eosio { dispatcher->bcast_block(block); } - void net_plugin_impl::transaction_ack(const std::pair& results) { - const auto& id = results.second->id; - if (results.first) { - fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); - } else { - fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); - dispatcher->bcast_transaction(results.second); - } - } - bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3163,8 +3149,6 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From fb9c07b3f0949053ea369a2178be9682da451412 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 15:51:47 -0500 Subject: [PATCH 0098/1648] Revert "Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr" This reverts commit 7f340f7347da146b360251214fbf655d11a955cd. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 37fafb45685..6a3f72f23e8 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -158,6 +158,8 @@ namespace eosio { bool use_socket_read_watermark = false; + channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -187,6 +189,7 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); + void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -2834,6 +2837,17 @@ namespace eosio { dispatcher->bcast_block(block); } + void net_plugin_impl::transaction_ack(const std::pair& results) { + const auto& id = results.second->id; + if (results.first) { + fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + dispatcher->rejected_transaction(id); + } else { + fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); + dispatcher->bcast_transaction(results.second); + } + } + bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3149,6 +3163,8 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } + my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From 8663321960cc1dc9afcbc9b5cf5e7008f1fe7b11 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 18:54:14 -0500 Subject: [PATCH 0099/1648] Fix tests, handle -1 as unlimited max_transaction_tim_ms. --- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c2d600c467b..217f931a532 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -350,7 +350,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - fc::microseconds max_trx_cpu_usage = fc::milliseconds( _max_transaction_time_ms.load() ); + const auto max_trx_time_ms = _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); auto& tp = *_thread_pool; boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress From 9e53b90063bdb7562fc5773c8527b4d18a5ccb9e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 20:25:24 -0500 Subject: [PATCH 0100/1648] force a build --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6a3f72f23e8..12a88400353 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2779,8 +2779,8 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } - fc_dlog(logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); + fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", + ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); } void net_plugin_impl::expire_local_txns() { From d28405837b1939ea3f1bd1d7472a7ff58ee9ef86 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Mar 2019 13:55:44 -0500 Subject: [PATCH 0101/1648] Move more of incoming transaction processing to thread pool --- plugins/net_plugin/net_plugin.cpp | 144 +++++++++++++++++++----------- 1 file changed, 94 insertions(+), 50 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 12a88400353..78b7caa80e2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -152,6 +152,7 @@ namespace eosio { producer_plugin* producer_plug = nullptr; int started_sessions = 0; + std::mutex local_txns_mtx; node_transaction_index local_txns; shared_ptr resolver; @@ -671,11 +672,7 @@ namespace eosio { } void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if( c) impl->handle_message( c, ptr ); - }); + impl.handle_message( c, ptr ); } template @@ -855,21 +852,35 @@ namespace eosio { void connection::txn_send_pending(const vector& ids) { const std::set known_ids(ids.cbegin(), ids.cend()); my_impl->expire_local_txns(); - for(auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ){ - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + vector>> trx_to_send; + { + std::lock_guard g( my_impl->local_txns_mtx ); + for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { + const bool found = known_ids.find( tx->id ) != known_ids.cend(); + if( !found ) { + trx_to_send.emplace_back( tx->serialized_txn ); + } } } + for( const auto& t : trx_to_send ) { + queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } } void connection::txn_send(const vector& ids) { - for(const auto& t : ids) { - auto tx = my_impl->local_txns.get().find(t); - if( tx != my_impl->local_txns.end() ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + vector>> trx_to_send; + { + std::lock_guard g( my_impl->local_txns_mtx ); + for( const auto& t : ids ) { + auto tx = my_impl->local_txns.get().find( t ); + if( tx != my_impl->local_txns.end()) { + trx_to_send.emplace_back( tx->serialized_txn ); + } } } + for( const auto& t : trx_to_send ) { + queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); + } } void connection::blk_send_branch() { @@ -1481,7 +1492,10 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - note.known_trx.pending = my_impl->local_txns.size(); + { + std::lock_guard g( my_impl->local_txns_mtx ); + note.known_trx.pending = my_impl->local_txns.size(); + } c->enqueue( note ); return; } @@ -1713,9 +1727,12 @@ namespace eosio { } received_transactions.erase(range.first, range.second); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end() ) { //found - fc_dlog(logger, "found trxid in local_trxs" ); - return; + { + std::lock_guard g( my_impl->local_txns_mtx ); + if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + fc_dlog( logger, "found trxid in local_trxs" ); + return; + } } time_point_sec trx_expiration = ptrx->packed_trx->expiration(); @@ -1724,7 +1741,10 @@ namespace eosio { auto buff = create_send_buffer( trx ); node_transaction_state nts = {id, trx_expiration, 0, buff}; - my_impl->local_txns.insert(std::move(nts)); + { + std::lock_guard g( my_impl->local_txns_mtx ); + my_impl->local_txns.insert( std::move( nts )); + } my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { @@ -2457,6 +2477,7 @@ namespace eosio { // plan to get all except what we already know about. req.req_trx.mode = catch_up; send_req = true; + std::lock_guard g( my_impl->local_txns_mtx ); size_t known_sum = local_txns.size(); if( known_sum ) { for( const auto& t : local_txns.get() ) { @@ -2553,6 +2574,7 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } + // called from thread_pool threads void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2560,36 +2582,47 @@ namespace eosio { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; } - if( sync_master->is_active(c) ) { - fc_dlog(logger, "got a txn during sync - dropping"); - return; - } auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - if(local_txns.get().find(tid) != local_txns.end()) { - fc_dlog(logger, "got a duplicate transaction - dropping"); - return; + { + std::lock_guard g( local_txns_mtx ); + if( local_txns.get().find( tid ) != local_txns.end()) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; + } } - dispatcher->recv_transaction(c, tid); + connection_wptr weak_ptr = c; + app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ + auto c = weak_ptr.lock(); + dispatcher->recv_transaction(c, tid); + }); c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); + bool accepted = false; if (result.contains()) { peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); } else { auto trace = result.get(); if (!trace->except) { fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); - this->dispatcher->bcast_transaction(ptrx); - return; + accepted = true; } - peer_elog(c, "bad packed_transaction : ${m}", ("m",trace->except->what())); + if( !accepted ) { + peer_elog( c, "bad packed_transaction : ${m}", ("m", trace->except->what())); + } } - dispatcher->rejected_transaction(ptrx->id); + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}]() { + if( accepted ) { + dispatcher->bcast_transaction( ptrx ); + } else { + dispatcher->rejected_transaction( ptrx->id ); + } + }); }); } @@ -2687,15 +2720,19 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - for (const auto &recpt : msg->transactions) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() : recpt.trx.get().id(); - auto ltx = local_txns.get().find(id); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find(id); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); + { + std::lock_guard g( local_txns_mtx ); + for( const auto& recpt : msg->transactions ) { + auto id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto ltx = local_txns.get().find( id ); + if( ltx != local_txns.end()) { + local_txns.modify( ltx, ubn ); + } + auto ctx = c->trx_state.get().find( id ); + if( ctx != c->trx_state.end()) { + c->trx_state.modify( ctx, ubn ); + } } } sync_master->recv_block(c, blk_id, blk_num); @@ -2764,8 +2801,6 @@ namespace eosio { start_txn_timer(); auto now = time_point::now(); - auto start_size = local_txns.size(); - expire_local_txns(); controller& cc = chain_plug->chain(); @@ -2779,20 +2814,29 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } - fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); + fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); } void net_plugin_impl::expire_local_txns() { - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec(0) ); - auto ex_up = old.upper_bound( time_point::now() ); - old.erase( ex_lo, ex_up ); - - auto& stale = local_txns.get(); controller& cc = chain_plug->chain(); uint32_t lib = cc.last_irreversible_block_num(); - stale.erase( stale.lower_bound(1), stale.upper_bound(lib) ); + size_t start_size = 0, end_size = 0; + + { + std::lock_guard g( local_txns_mtx ); + + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); + auto ex_up = old.upper_bound( time_point::now()); + old.erase( ex_lo, ex_up ); + + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); + end_size = local_txns.size(); + } + + fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); } void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { From 11e2252a7132ff9d9505cc73272acab723957a0d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0102/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 90 ++++++++++++++++++++++++++++++- 1 file changed, 88 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 78b7caa80e2..d9725977e53 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -77,6 +77,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -506,13 +512,21 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; +<<<<<<< HEAD std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads +======= + std::atomic outstanding_read_bytes{0}; +>>>>>>> Test of multi-threaded reading queued_buffer buffer_queue; std::atomic reads_in_flight{0}; +<<<<<<< HEAD std::atomic trx_in_progress_size{0}; +======= + uint32_t trx_in_progress_size = 0; +>>>>>>> Test of multi-threaded reading fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -665,21 +679,37 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; +<<<<<<< HEAD app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { +======= + app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); } void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); +<<<<<<< HEAD impl.handle_message( c, ptr ); +======= + connection_wptr weak = c; + app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if( c) impl->handle_message( c, ptr ); + }); +>>>>>>> Test of multi-threaded reading } template void operator()( T&& msg ) const { connection_wptr weak = c; +<<<<<<< HEAD app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { +======= + app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -720,6 +750,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -843,10 +875,14 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); +<<<<<<< HEAD { std::lock_guard g( read_delay_timer_mutex ); if( read_delay_timer ) read_delay_timer->cancel(); } +======= + if( read_delay_timer ) read_delay_timer->cancel(); +>>>>>>> Test of multi-threaded reading } void connection::txn_send_pending(const vector& ids) { @@ -2054,9 +2090,13 @@ namespace eosio { } connection_wptr weak_conn = conn; +<<<<<<< HEAD std::size_t minimum_read = std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); minimum_read = minimum_read != 0 ? minimum_read : message_header_size; +======= + std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; +>>>>>>> Test of multi-threaded reading if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2072,12 +2112,13 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule +<<<<<<< HEAD uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); uint32_t trx_in_progress_size = conn->trx_in_progress_size; uint32_t reads_in_flight = conn->reads_in_flight; @@ -2085,6 +2126,12 @@ namespace eosio { peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); } else if( reads_in_flight > def_max_reads_in_flight ) { peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); +======= + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); + } else if( conn->reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); +>>>>>>> Test of multi-threaded reading } else { peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } @@ -2112,7 +2159,7 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2124,6 +2171,10 @@ namespace eosio { } --conn->reads_in_flight; +<<<<<<< HEAD +======= + conn->outstanding_read_bytes = 0; +>>>>>>> Test of multi-threaded reading bool close_connection = false; try { @@ -2193,7 +2244,12 @@ namespace eosio { } if( close_connection ) { +<<<<<<< HEAD app().post( priority::medium, [this, weak_conn]() { +======= + connection_wptr weak_conn = conn; + app().post( priority::medium, "close conn", [this, weak_conn]() { +>>>>>>> Test of multi-threaded reading auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2626,14 +2682,23 @@ namespace eosio { }); } +<<<<<<< HEAD void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); block_id_type blk_id = msg->id(); uint32_t blk_num = msg->block_num(); +======= + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { + signed_block_ptr msg = m; + controller& cc = chain_plug->chain(); + block_id_type blk_id = msg ? msg->id() : block_id_type(); + uint32_t blk_num = msg ? msg->block_num() : 0; +>>>>>>> Test of multi-threaded reading fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { +<<<<<<< HEAD if( cc.fetch_block_by_id(blk_id) ) { sync_master->recv_block(c, blk_id, blk_num); return; @@ -2641,6 +2706,12 @@ namespace eosio { <<<<<<< HEAD <<<<<<< HEAD ======= +======= + if( msg && cc.fetch_block_by_id(blk_id)) { + sync_master->recv_block(c, blk_id, blk_num); + return; + } +>>>>>>> Test of multi-threaded reading signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; if( prev == nullptr ){ //&& sync_master->is_active(c) ) { // see if top is ready @@ -2654,7 +2725,11 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; +<<<<<<< HEAD app().post(priority::medium, [this, weak](){ +======= + app().post(priority::medium, "re post blk", [this, weak](){ +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2663,7 +2738,11 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; +<<<<<<< HEAD app().post( priority::medium, [this, weak]() { +======= + app().post( priority::medium, "re post blk", [this, weak]() { +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2675,7 +2754,11 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; +<<<<<<< HEAD app().post( priority::medium, [this, weak]() { +======= + app().post( priority::medium, "re post blk", [this, weak]() { +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2683,8 +2766,11 @@ namespace eosio { return; } } +<<<<<<< HEAD ======= >>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. +======= +>>>>>>> Test of multi-threaded reading } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 0cb84d61f02fc44ac3f0f9ba43df942c8c2ef24b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0103/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d9725977e53..0aaf1f66336 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -679,11 +679,15 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; +<<<<<<< HEAD <<<<<<< HEAD app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { ======= app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { >>>>>>> Test of multi-threaded reading +======= + app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); @@ -694,7 +698,7 @@ namespace eosio { impl.handle_message( c, ptr ); ======= connection_wptr weak = c; - app().post(priority::low, "handle trx", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); if( c) impl->handle_message( c, ptr ); }); @@ -705,11 +709,15 @@ namespace eosio { void operator()( T&& msg ) const { connection_wptr weak = c; +<<<<<<< HEAD <<<<<<< HEAD app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { ======= app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { >>>>>>> Test of multi-threaded reading +======= + app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -2248,8 +2256,12 @@ namespace eosio { app().post( priority::medium, [this, weak_conn]() { ======= connection_wptr weak_conn = conn; +<<<<<<< HEAD app().post( priority::medium, "close conn", [this, weak_conn]() { >>>>>>> Test of multi-threaded reading +======= + app().post( priority::medium, [this, weak_conn]() { +>>>>>>> Remove descriptions of tasks as not merged into develop yet auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2725,11 +2737,15 @@ namespace eosio { blk_id = msg->id(); blk_num = msg->block_num(); connection_wptr weak = c; +<<<<<<< HEAD <<<<<<< HEAD app().post(priority::medium, [this, weak](){ ======= app().post(priority::medium, "re post blk", [this, weak](){ >>>>>>> Test of multi-threaded reading +======= + app().post(priority::medium, [this, weak](){ +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2738,11 +2754,15 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak]() { ======= app().post( priority::medium, "re post blk", [this, weak]() { >>>>>>> Test of multi-threaded reading +======= + app().post( priority::medium, [this, weak]() { +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2754,11 +2774,15 @@ namespace eosio { sync_master->incoming_blocks.emplace( msg ); connection_wptr weak = c; +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak]() { ======= app().post( priority::medium, "re post blk", [this, weak]() { >>>>>>> Test of multi-threaded reading +======= + app().post( priority::medium, [this, weak]() { +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From 6444c1a380e0744dee455b29b5068050754a37cc Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0104/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0aaf1f66336..8446c09b2ce 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -77,12 +77,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -512,11 +506,15 @@ namespace eosio { socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; +<<<<<<< HEAD <<<<<<< HEAD std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads ======= std::atomic outstanding_read_bytes{0}; >>>>>>> Test of multi-threaded reading +======= + std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads +>>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. queued_buffer buffer_queue; @@ -758,8 +756,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2723,6 +2719,7 @@ namespace eosio { sync_master->recv_block(c, blk_id, blk_num); return; } +<<<<<<< HEAD >>>>>>> Test of multi-threaded reading signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; if( prev == nullptr ){ //&& sync_master->is_active(c) ) { @@ -2795,6 +2792,8 @@ namespace eosio { >>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. ======= >>>>>>> Test of multi-threaded reading +======= +>>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 8d4e010b0eed1e111a3e52627c2eb3c92dd17a9e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:49:28 -0600 Subject: [PATCH 0105/1648] Revert unneeded changes to handle_message --- plugins/net_plugin/net_plugin.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8446c09b2ce..24d9a625c60 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2690,6 +2690,7 @@ namespace eosio { }); } +<<<<<<< HEAD <<<<<<< HEAD void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); @@ -2702,10 +2703,17 @@ namespace eosio { block_id_type blk_id = msg ? msg->id() : block_id_type(); uint32_t blk_num = msg ? msg->block_num() : 0; >>>>>>> Test of multi-threaded reading +======= + void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { + controller& cc = chain_plug->chain(); + block_id_type blk_id = msg->id(); + uint32_t blk_num = msg->block_num(); +>>>>>>> Revert unneeded changes to handle_message fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { +<<<<<<< HEAD <<<<<<< HEAD if( cc.fetch_block_by_id(blk_id) ) { sync_master->recv_block(c, blk_id, blk_num); @@ -2716,6 +2724,9 @@ namespace eosio { ======= ======= if( msg && cc.fetch_block_by_id(blk_id)) { +======= + if( cc.fetch_block_by_id(blk_id) ) { +>>>>>>> Revert unneeded changes to handle_message sync_master->recv_block(c, blk_id, blk_num); return; } From 8f47db7641a0900803d1921338ba1b21bb6764ea Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0106/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 24d9a625c60..ba9ee05358a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -520,11 +520,15 @@ namespace eosio { queued_buffer buffer_queue; std::atomic reads_in_flight{0}; +<<<<<<< HEAD <<<<<<< HEAD std::atomic trx_in_progress_size{0}; ======= uint32_t trx_in_progress_size = 0; >>>>>>> Test of multi-threaded reading +======= + std::atomic trx_in_progress_size{0}; +>>>>>>> Make delay_timer thread safe fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -880,13 +884,19 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> Make delay_timer thread safe { std::lock_guard g( read_delay_timer_mutex ); if( read_delay_timer ) read_delay_timer->cancel(); } +<<<<<<< HEAD ======= if( read_delay_timer ) read_delay_timer->cancel(); >>>>>>> Test of multi-threaded reading +======= +>>>>>>> Make delay_timer thread safe } void connection::txn_send_pending(const vector& ids) { @@ -2116,13 +2126,16 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> Make delay_timer thread safe uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); uint32_t trx_in_progress_size = conn->trx_in_progress_size; uint32_t reads_in_flight = conn->reads_in_flight; @@ -2130,12 +2143,15 @@ namespace eosio { peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); } else if( reads_in_flight > def_max_reads_in_flight ) { peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); +<<<<<<< HEAD ======= if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); } else if( conn->reads_in_flight > def_max_reads_in_flight ) { peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); >>>>>>> Test of multi-threaded reading +======= +>>>>>>> Make delay_timer thread safe } else { peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } @@ -2144,7 +2160,11 @@ namespace eosio { trx_in_progress_size > 2*def_max_trx_in_progress_size ) { fc_wlog( logger, "queues over full, giving up on connection" ); +<<<<<<< HEAD app().post( priority::medium, [weak_conn]() { +======= + app().post( priority::medium, [this, weak_conn]() { +>>>>>>> Make delay_timer thread safe auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); @@ -2163,7 +2183,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2248,6 +2268,7 @@ namespace eosio { } if( close_connection ) { +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak_conn]() { ======= @@ -2256,6 +2277,8 @@ namespace eosio { app().post( priority::medium, "close conn", [this, weak_conn]() { >>>>>>> Test of multi-threaded reading ======= +======= +>>>>>>> Make delay_timer thread safe app().post( priority::medium, [this, weak_conn]() { >>>>>>> Remove descriptions of tasks as not merged into develop yet auto conn = weak_conn.lock(); From de7418f4a1d0230ec312f8025111a6a770e4e179 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:08:40 -0600 Subject: [PATCH 0107/1648] Test of multi-threaded reading --- plugins/net_plugin/net_plugin.cpp | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index ba9ee05358a..aa4d5880b07 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -77,6 +77,12 @@ namespace eosio { } }; + struct block_greater { + bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { + return lhs->block_num() > rhs->block_num(); + } + }; + typedef multi_index_container< node_transaction_state, indexed_by< @@ -760,6 +766,8 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); + + std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2126,7 +2134,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; - +/* if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2183,7 +2191,7 @@ namespace eosio { } ) ); return; } - +*/ ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2744,6 +2752,7 @@ namespace eosio { } <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= ======= if( msg && cc.fetch_block_by_id(blk_id)) { @@ -2754,6 +2763,8 @@ namespace eosio { return; } <<<<<<< HEAD +>>>>>>> Test of multi-threaded reading +======= >>>>>>> Test of multi-threaded reading signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; if( prev == nullptr ){ //&& sync_master->is_active(c) ) { @@ -2769,6 +2780,7 @@ namespace eosio { blk_num = msg->block_num(); connection_wptr weak = c; <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post(priority::medium, [this, weak](){ ======= @@ -2777,6 +2789,9 @@ namespace eosio { ======= app().post(priority::medium, [this, weak](){ >>>>>>> Remove descriptions of tasks as not merged into develop yet +======= + app().post(priority::medium, "re post blk", [this, weak](){ +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2786,6 +2801,7 @@ namespace eosio { connection_wptr weak = c; <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak]() { ======= @@ -2794,6 +2810,9 @@ namespace eosio { ======= app().post( priority::medium, [this, weak]() { >>>>>>> Remove descriptions of tasks as not merged into develop yet +======= + app().post( priority::medium, "re post blk", [this, weak]() { +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2806,6 +2825,7 @@ namespace eosio { connection_wptr weak = c; <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak]() { ======= @@ -2814,6 +2834,9 @@ namespace eosio { ======= app().post( priority::medium, [this, weak]() { >>>>>>> Remove descriptions of tasks as not merged into develop yet +======= + app().post( priority::medium, "re post blk", [this, weak]() { +>>>>>>> Test of multi-threaded reading connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2822,12 +2845,15 @@ namespace eosio { } } <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. ======= >>>>>>> Test of multi-threaded reading ======= >>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. +======= +>>>>>>> Test of multi-threaded reading } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 8bd71aa32ae4c1c0a08c17caddf7a262bbb96bd9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 08:16:39 -0600 Subject: [PATCH 0108/1648] Remove descriptions of tasks as not merged into develop yet --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index aa4d5880b07..feabdadd631 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2278,6 +2278,10 @@ namespace eosio { if( close_connection ) { <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD +======= + connection_wptr weak_conn = conn; +>>>>>>> Remove descriptions of tasks as not merged into develop yet app().post( priority::medium, [this, weak_conn]() { ======= connection_wptr weak_conn = conn; @@ -2781,6 +2785,7 @@ namespace eosio { connection_wptr weak = c; <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post(priority::medium, [this, weak](){ ======= @@ -2792,6 +2797,9 @@ namespace eosio { ======= app().post(priority::medium, "re post blk", [this, weak](){ >>>>>>> Test of multi-threaded reading +======= + app().post(priority::medium, [this, weak](){ +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); }); @@ -2802,6 +2810,7 @@ namespace eosio { connection_wptr weak = c; <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak]() { ======= @@ -2813,6 +2822,9 @@ namespace eosio { ======= app().post( priority::medium, "re post blk", [this, weak]() { >>>>>>> Test of multi-threaded reading +======= + app().post( priority::medium, [this, weak]() { +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); @@ -2826,6 +2838,7 @@ namespace eosio { connection_wptr weak = c; <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post( priority::medium, [this, weak]() { ======= @@ -2837,6 +2850,9 @@ namespace eosio { ======= app().post( priority::medium, "re post blk", [this, weak]() { >>>>>>> Test of multi-threaded reading +======= + app().post( priority::medium, [this, weak]() { +>>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) handle_message( c, signed_block_ptr() ); } ); From c1b652dec0353d30e001998a7e50df113dfb8d8c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 11:45:04 -0600 Subject: [PATCH 0109/1648] Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. --- plugins/net_plugin/net_plugin.cpp | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index feabdadd631..16c0a83eef5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -77,12 +77,6 @@ namespace eosio { } }; - struct block_greater { - bool operator()( const std::shared_ptr& lhs, const std::shared_ptr& rhs ) const { - return lhs->block_num() > rhs->block_num(); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< @@ -766,8 +760,6 @@ namespace eosio { void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); void recv_notice(const connection_ptr& c, const notice_message& msg); - - std::priority_queue, std::deque>, block_greater> incoming_blocks; }; class dispatch_manager { @@ -2767,6 +2759,7 @@ namespace eosio { return; } <<<<<<< HEAD +<<<<<<< HEAD >>>>>>> Test of multi-threaded reading ======= >>>>>>> Test of multi-threaded reading @@ -2870,6 +2863,8 @@ namespace eosio { >>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. ======= >>>>>>> Test of multi-threaded reading +======= +>>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); From 76768c8c2ef06d2060bc7c81e886fa8ad14f2cb0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 18:26:06 -0600 Subject: [PATCH 0110/1648] Make delay_timer thread safe --- plugins/net_plugin/net_plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 16c0a83eef5..d7d17b585c7 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2126,7 +2126,7 @@ namespace eosio { return minimum_read - bytes_transferred; } }; -/* + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || conn->reads_in_flight > def_max_reads_in_flight || conn->trx_in_progress_size > def_max_trx_in_progress_size ) @@ -2183,7 +2183,7 @@ namespace eosio { } ) ); return; } -*/ + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, @@ -2271,9 +2271,12 @@ namespace eosio { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= connection_wptr weak_conn = conn; >>>>>>> Remove descriptions of tasks as not merged into develop yet +======= +>>>>>>> Make delay_timer thread safe app().post( priority::medium, [this, weak_conn]() { ======= connection_wptr weak_conn = conn; From 223c2d5855d530e57dd8047afe74dd32a6f0cdfd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 2 Mar 2019 11:22:30 -0600 Subject: [PATCH 0111/1648] Remove unneeded access to atomic --- plugins/net_plugin/net_plugin.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d7d17b585c7..0cb134be77c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2104,6 +2104,7 @@ namespace eosio { } connection_wptr weak_conn = conn; +<<<<<<< HEAD <<<<<<< HEAD std::size_t minimum_read = std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); @@ -2111,6 +2112,11 @@ namespace eosio { ======= std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; >>>>>>> Test of multi-threaded reading +======= + std::size_t minimum_read = + std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); + minimum_read = minimum_read != 0 ? minimum_read : message_header_size; +>>>>>>> Remove unneeded access to atomic if (use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2196,9 +2202,12 @@ namespace eosio { --conn->reads_in_flight; <<<<<<< HEAD +<<<<<<< HEAD ======= conn->outstanding_read_bytes = 0; >>>>>>> Test of multi-threaded reading +======= +>>>>>>> Remove unneeded access to atomic bool close_connection = false; try { From e03e1fb10fdaf2d9146b2991fb20fad4edd9f03b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:16:54 -0500 Subject: [PATCH 0112/1648] Add trx id to log message --- plugins/net_plugin/net_plugin.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0cb134be77c..915f7774a26 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2711,7 +2711,12 @@ namespace eosio { auto trace = result.get(); if (!trace->except) { fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); +<<<<<<< HEAD accepted = true; +======= + this->dispatcher->bcast_transaction(ptrx); + return; +>>>>>>> Add trx id to log message } if( !accepted ) { From a5567cded12caba5b3b134e93e365d705c703e37 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 14:58:27 -0500 Subject: [PATCH 0113/1648] Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr --- plugins/net_plugin/net_plugin.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 915f7774a26..23ddfe2fc36 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -159,8 +159,6 @@ namespace eosio { bool use_socket_read_watermark = false; - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -190,7 +188,6 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); - void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -3078,17 +3075,6 @@ namespace eosio { dispatcher->bcast_block(block); } - void net_plugin_impl::transaction_ack(const std::pair& results) { - const auto& id = results.second->id; - if (results.first) { - fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); - } else { - fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); - dispatcher->bcast_transaction(results.second); - } - } - bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3404,8 +3390,6 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From 4a5aa34ab92aceede64153c1602cdafbbf60d8a8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 15:51:47 -0500 Subject: [PATCH 0114/1648] Revert "Remove unneeded ack of transactions. Already processed by handle_message of packed_transaction_ptr" This reverts commit 7f340f7347da146b360251214fbf655d11a955cd. --- plugins/net_plugin/net_plugin.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 23ddfe2fc36..915f7774a26 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -159,6 +159,8 @@ namespace eosio { bool use_socket_read_watermark = false; + channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + uint16_t thread_pool_size = 4; optional thread_pool; std::shared_ptr server_ioc; @@ -188,6 +190,7 @@ namespace eosio { void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); void accepted_block(const block_state_ptr&); + void transaction_ack(const std::pair&); bool is_valid( const handshake_message &msg); @@ -3075,6 +3078,17 @@ namespace eosio { dispatcher->bcast_block(block); } + void net_plugin_impl::transaction_ack(const std::pair& results) { + const auto& id = results.second->id; + if (results.first) { + fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + dispatcher->rejected_transaction(id); + } else { + fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); + dispatcher->bcast_transaction(results.second); + } + } + bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { if(allowed_connections == None) return false; @@ -3390,6 +3404,8 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } + my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; From 8ce8f45e79acff196ad1d2fa2b9e65bbdf7b4093 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 20:25:24 -0500 Subject: [PATCH 0115/1648] force a build --- plugins/net_plugin/net_plugin.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 915f7774a26..692cbeb76f4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3011,7 +3011,12 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } +<<<<<<< HEAD fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); +======= + fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", + ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); +>>>>>>> force a build } void net_plugin_impl::expire_local_txns() { From 47c1ce83a1cd5f41a9378350846de22c4d30593e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Mar 2019 13:55:44 -0500 Subject: [PATCH 0116/1648] Move more of incoming transaction processing to thread pool --- plugins/net_plugin/net_plugin.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 692cbeb76f4..a476fabb53e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -696,6 +696,7 @@ namespace eosio { } void operator()( packed_transaction&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); +<<<<<<< HEAD <<<<<<< HEAD impl.handle_message( c, ptr ); ======= @@ -705,6 +706,9 @@ namespace eosio { if( c) impl->handle_message( c, ptr ); }); >>>>>>> Test of multi-threaded reading +======= + impl.handle_message( c, ptr ); +>>>>>>> Move more of incoming transaction processing to thread pool } template @@ -2711,12 +2715,16 @@ namespace eosio { auto trace = result.get(); if (!trace->except) { fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); +<<<<<<< HEAD <<<<<<< HEAD accepted = true; ======= this->dispatcher->bcast_transaction(ptrx); return; >>>>>>> Add trx id to log message +======= + accepted = true; +>>>>>>> Move more of incoming transaction processing to thread pool } if( !accepted ) { @@ -3011,12 +3019,16 @@ namespace eosio { auto &stale_blk = c->blk_state.get(); stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } +<<<<<<< HEAD <<<<<<< HEAD fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); ======= fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); >>>>>>> force a build +======= + fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); +>>>>>>> Move more of incoming transaction processing to thread pool } void net_plugin_impl::expire_local_txns() { From ab91589cd32e1a0eea40203fb3647ef14214c05d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 7 Mar 2019 09:10:13 -0500 Subject: [PATCH 0117/1648] Use unique_lock instead of lock_guard to clean up code --- plugins/net_plugin/net_plugin.cpp | 119 +++++++++++++++--------------- 1 file changed, 59 insertions(+), 60 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a476fabb53e..f4e80ae6231 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -538,7 +538,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; - std::mutex read_delay_timer_mutex; + std::mutex read_delay_timer_mtx; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -889,6 +889,7 @@ namespace eosio { cancel_wait(); <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> Make delay_timer thread safe { @@ -901,21 +902,25 @@ namespace eosio { >>>>>>> Test of multi-threaded reading ======= >>>>>>> Make delay_timer thread safe +======= + + std::lock_guard g( read_delay_timer_mtx ); + if( read_delay_timer ) read_delay_timer->cancel(); +>>>>>>> Use unique_lock instead of lock_guard to clean up code } void connection::txn_send_pending(const vector& ids) { const std::set known_ids(ids.cbegin(), ids.cend()); my_impl->expire_local_txns(); vector>> trx_to_send; - { - std::lock_guard g( my_impl->local_txns_mtx ); - for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - trx_to_send.emplace_back( tx->serialized_txn ); - } + std::unique_lock g( my_impl->local_txns_mtx ); + for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { + const bool found = known_ids.find( tx->id ) != known_ids.cend(); + if( !found ) { + trx_to_send.emplace_back( tx->serialized_txn ); } } + g.unlock(); for( const auto& t : trx_to_send ) { queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); } @@ -923,15 +928,14 @@ namespace eosio { void connection::txn_send(const vector& ids) { vector>> trx_to_send; - { - std::lock_guard g( my_impl->local_txns_mtx ); - for( const auto& t : ids ) { - auto tx = my_impl->local_txns.get().find( t ); - if( tx != my_impl->local_txns.end()) { - trx_to_send.emplace_back( tx->serialized_txn ); - } + std::unique_lock g( my_impl->local_txns_mtx ); + for( const auto& t : ids ) { + auto tx = my_impl->local_txns.get().find( t ); + if( tx != my_impl->local_txns.end()) { + trx_to_send.emplace_back( tx->serialized_txn ); } } + g.unlock(); for( const auto& t : trx_to_send ) { queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); } @@ -1546,10 +1550,9 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - { - std::lock_guard g( my_impl->local_txns_mtx ); - note.known_trx.pending = my_impl->local_txns.size(); - } + std::unique_lock g( my_impl->local_txns_mtx ); + note.known_trx.pending = my_impl->local_txns.size(); + g.unlock(); c->enqueue( note ); return; } @@ -1781,13 +1784,12 @@ namespace eosio { } received_transactions.erase(range.first, range.second); - { - std::lock_guard g( my_impl->local_txns_mtx ); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found - fc_dlog( logger, "found trxid in local_trxs" ); - return; - } + std::unique_lock g( my_impl->local_txns_mtx ); + if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + fc_dlog( logger, "found trxid in local_trxs" ); + return; } + g.unlock(); time_point_sec trx_expiration = ptrx->packed_trx->expiration(); const packed_transaction& trx = *ptrx->packed_trx; @@ -1795,10 +1797,9 @@ namespace eosio { auto buff = create_send_buffer( trx ); node_transaction_state nts = {id, trx_expiration, 0, buff}; - { - std::lock_guard g( my_impl->local_txns_mtx ); - my_impl->local_txns.insert( std::move( nts )); - } + g.lock(); + my_impl->local_txns.insert( std::move( nts )); + g.unlock(); my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { @@ -2182,7 +2183,7 @@ namespace eosio { }); return; } - std::lock_guard g( conn->read_delay_timer_mutex ); + std::lock_guard g( conn->read_delay_timer_mtx ); if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( @@ -2693,13 +2694,12 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - { - std::lock_guard g( local_txns_mtx ); - if( local_txns.get().find( tid ) != local_txns.end()) { - fc_dlog( logger, "got a duplicate transaction - dropping" ); - return; - } + std::unique_lock g( local_txns_mtx ); + if( local_txns.get().find( tid ) != local_txns.end()) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; } + g.unlock(); connection_wptr weak_ptr = c; app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ auto c = weak_ptr.lock(); @@ -2925,21 +2925,20 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - { - std::lock_guard g( local_txns_mtx ); - for( const auto& recpt : msg->transactions ) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() - : recpt.trx.get().id(); - auto ltx = local_txns.get().find( id ); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find( id ); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); - } + std::unique_lock g( local_txns_mtx ); + for( const auto& recpt : msg->transactions ) { + auto id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto ltx = local_txns.get().find( id ); + if( ltx != local_txns.end()) { + local_txns.modify( ltx, ubn ); + } + auto ctx = c->trx_state.get().find( id ); + if( ctx != c->trx_state.end()) { + c->trx_state.modify( ctx, ubn ); } } + g.unlock(); sync_master->recv_block(c, blk_id, blk_num); } else { @@ -3036,19 +3035,19 @@ namespace eosio { uint32_t lib = cc.last_irreversible_block_num(); size_t start_size = 0, end_size = 0; - { - std::lock_guard g( local_txns_mtx ); + std::unique_lock g( local_txns_mtx ); - start_size = local_txns.size(); - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); - auto ex_up = old.upper_bound( time_point::now()); - old.erase( ex_lo, ex_up ); + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); + auto ex_up = old.upper_bound( time_point::now()); + old.erase( ex_lo, ex_up ); - auto& stale = local_txns.get(); - stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); - end_size = local_txns.size(); - } + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); + end_size = local_txns.size(); + + g.unlock(); fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); } From 679852fd1225dd52245eaa4863e4aa70d5cf85bd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 8 Mar 2019 09:34:05 -0500 Subject: [PATCH 0118/1648] Move creation of future outside post to prevent wait on future right after creation --- plugins/producer_plugin/producer_plugin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 217f931a532..569fce37dd8 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -352,10 +352,10 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto max_trx_time_ms = _max_transaction_time_ms.load(); fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); auto& tp = *_thread_pool; - boost::asio::post( tp, [self = this, &chain, max_trx_cpu_usage, trx, persist_until_expired, next]() { - // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress - transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); + boost::asio::post( tp, [self = this, trx, persist_until_expired, next]() { if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { From 050e84aad9e1b5a59a3a8260993deef7ca97b4c7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 11 Mar 2019 11:36:25 -0500 Subject: [PATCH 0119/1648] Test run with producer thread pool instead of chain controller thread pool --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 569fce37dd8..50f6c531d0d 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -352,9 +352,9 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto max_trx_time_ms = _max_transaction_time_ms.load(); fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); - // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress - transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage ); auto& tp = *_thread_pool; + // use chain thread pool for sig recovery so that future wait below is not in the same thread pool preventing progress + transaction_metadata::create_signing_keys_future( trx, tp, chain.get_chain_id(), max_trx_cpu_usage ); boost::asio::post( tp, [self = this, trx, persist_until_expired, next]() { if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); From 48024faccfea29e0d2bc7d2babbbd99ded8e1ac6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 11 Mar 2019 15:57:18 -0500 Subject: [PATCH 0120/1648] Combine received_blocks and blck_state. Make blck_state thread safe and use it for short-cut out when receiving block. --- plugins/net_plugin/net_plugin.cpp | 135 ++++++++++++++++-------------- 1 file changed, 70 insertions(+), 65 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f4e80ae6231..98606c73929 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -110,6 +110,7 @@ namespace eosio { uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; uint32_t num_clients = 0; + uint32_t current_connection_id = 0; vector supplied_peers; vector allowed_peers; ///< peer keys allowed to connect @@ -363,20 +364,24 @@ namespace eosio { > transaction_state_index; - /** - * - */ struct peer_block_state { block_id_type id; uint32_t block_num; + uint32_t connection_id; }; typedef multi_index_container< eosio::peer_block_state, indexed_by< - ordered_unique< tag, member, sha256_less >, - ordered_unique< tag, member > - > + ordered_unique< tag, + composite_key< peer_block_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, + ordered_non_unique< tag, member > + > > peer_block_state_index; @@ -498,7 +503,6 @@ namespace eosio { ~connection(); void initialize(); - peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive @@ -530,6 +534,7 @@ namespace eosio { std::atomic trx_in_progress_size{0}; >>>>>>> Make delay_timer thread safe fc::sha256 node_id; + const uint32_t connection_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; @@ -631,9 +636,6 @@ namespace eosio { bool to_sync_queue = false); void do_queue_write(int priority); - bool add_peer_block(const peer_block_state& pbs); - bool peer_has_block(const block_id_type& blkid); - fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { if (!_logger_variant) { @@ -767,10 +769,11 @@ namespace eosio { }; class dispatch_manager { - public: - std::multimap received_blocks; + std::mutex blk_state_mtx; + peer_block_state_index blk_state; std::multimap received_transactions; + public: void bcast_transaction(const transaction_metadata_ptr& trx); void rejected_transaction(const transaction_id_type& msg); void bcast_block(const block_state_ptr& bs); @@ -782,18 +785,22 @@ namespace eosio { void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); void retry_fetch(const connection_ptr& conn); + + bool add_peer_block(const peer_block_state& pbs); + bool peer_has_block(const block_id_type& blkid, uint32_t connection_id); + bool have_block(const block_id_type& blkid); }; //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : blk_state(), - trx_state(), + : trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), strand( app().get_io_service() ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), + connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), last_handshake_sent(), sent_handshake_count(0), @@ -813,13 +820,13 @@ namespace eosio { } connection::connection( socket_ptr s ) - : blk_state(), - trx_state(), + : trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), strand( app().get_io_service() ), socket( s ), node_id(), + connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), last_handshake_sent(), sent_handshake_count(0), @@ -859,7 +866,6 @@ namespace eosio { void connection::reset() { peer_requested.reset(); - blk_state.clear(); trx_state.clear(); } @@ -997,7 +1003,7 @@ namespace eosio { signed_block_ptr b = cc.fetch_block_by_id(blkid); if(b) { fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - add_peer_block({blkid, block_header::num_from_id(blkid)}); + my_impl->dispatcher->add_peer_block({blkid, block_header::num_from_id(blkid), connection_id}); enqueue_block( b ); } else { fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", @@ -1319,20 +1325,6 @@ namespace eosio { sync_wait(); } - bool connection::add_peer_block(const peer_block_state& entry) { - auto bptr = blk_state.get().find(entry.id); - bool added = (bptr == blk_state.end()); - if (added){ - blk_state.insert(entry); - } - return added; - } - - bool connection::peer_has_block( const block_id_type& blkid ) { - auto blk_itr = blk_state.get().find(blkid); - return blk_itr != blk_state.end(); - } - //----------------------------------------------------------- sync_manager::sync_manager( uint32_t req_span ) @@ -1710,26 +1702,48 @@ namespace eosio { //------------------------------------------------------------------------ - void dispatch_manager::bcast_block(const block_state_ptr& bs) { - std::set skips; - auto range = received_blocks.equal_range(bs->id); - for (auto org = range.first; org != range.second; ++org) { - skips.insert(org->second); + bool dispatch_manager::add_peer_block(const peer_block_state& entry) { + std::lock_guard g(blk_state_mtx); + auto bptr = blk_state.get().find(std::make_tuple(std::ref(entry.id), entry.connection_id)); + bool added = (bptr == blk_state.end()); + if (added){ + blk_state.insert(entry); } - received_blocks.erase(range.first, range.second); + return added; + } + bool dispatch_manager::peer_has_block( const block_id_type& blkid, uint32_t connection_id ) { + std::lock_guard g(blk_state_mtx); + auto blk_itr = blk_state.get().find(std::make_tuple(std::ref(blkid), connection_id)); + return blk_itr != blk_state.end(); + } + + bool dispatch_manager::have_block( const block_id_type& blkid ) { + std::lock_guard g(blk_state_mtx); + auto blk_itr = blk_state.get().find( blkid ); + return blk_itr != blk_state.end(); + } + + void dispatch_manager::expire_blocks( uint32_t lib_num ) { + std::lock_guard g(blk_state_mtx); + auto& stale_blk = blk_state.get(); + stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib_num) ); + } + + void dispatch_manager::bcast_block(const block_state_ptr& bs) { uint32_t bnum = bs->block_num; peer_block_state pbstate{bs->id, bnum}; fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; for( auto& cp : my_impl->connections ) { - if( skips.find( cp ) != skips.end() || !cp->current() ) { + if( !cp->current() ) { continue; } bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; if( !has_block ) { - if( !cp->add_peer_block( pbstate ) ) { + pbstate.connection_id = cp->connection_id; + if( !add_peer_block( pbstate ) ) { continue; } if( !send_buffer ) { @@ -1743,7 +1757,8 @@ namespace eosio { } void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { - received_blocks.insert(std::make_pair(id, c)); + peer_block_state pbstate{id, bnum, c->connection_id}; + add_peer_block( pbstate ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -1758,20 +1773,6 @@ namespace eosio { void dispatch_manager::rejected_block(const block_id_type& id) { fc_dlog( logger, "rejected block ${id}", ("id", id) ); - auto range = received_blocks.equal_range(id); - received_blocks.erase(range.first, range.second); - } - - void dispatch_manager::expire_blocks( uint32_t lib_num ) { - for( auto i = received_blocks.begin(); i != received_blocks.end(); ) { - const block_id_type& blk_id = i->first; - uint32_t blk_num = block_header::num_from_id( blk_id ); - if( blk_num <= lib_num ) { - i = received_blocks.erase( i ); - } else { - ++i; - } - } } void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { @@ -1860,7 +1861,7 @@ namespace eosio { try { b = cc.fetch_block_by_id(blkid); // if exists if(b) { - c->add_peer_block({blkid, block_header::num_from_id(blkid)}); + add_peer_block({blkid, block_header::num_from_id(blkid), c->connection_id}); } } catch (const assert_exception &ex) { fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what()) ); @@ -1916,7 +1917,7 @@ namespace eosio { sendit = trx != conn->trx_state.end(); } else { - sendit = conn->peer_has_block(bid); + sendit = peer_has_block(bid, c->connection_id); } if (sendit) { conn->enqueue(*c->last_req); @@ -2324,7 +2325,6 @@ namespace eosio { bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { try { // if next message is a block we already have, exit early -/* auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); @@ -2332,16 +2332,23 @@ namespace eosio { block_header bh; fc::raw::unpack( peek_ds, bh ); - controller& cc = chain_plug->chain(); block_id_type blk_id = bh.id(); - uint32_t blk_num = bh.block_num(); - if( cc.fetch_block_by_id( blk_id ) ) { - sync_master->recv_block( conn, blk_id, blk_num ); + if( dispatcher->have_block( blk_id ) ) { + connection_wptr weak = conn; + app().post(priority::low, + [dispatcher = dispatcher.get(), sync_master = sync_master.get(), weak{std::move(weak)}, blk_id] { + connection_ptr c = weak.lock(); + if(c) { + auto blk_num = block_header::num_from_id(blk_id); + dispatcher->recv_block(c, blk_id, blk_num); + sync_master->recv_block( c, blk_id, blk_num ); + } + }); conn->pending_message_buffer.advance_read_ptr( message_length ); return true; } } -*/ + auto ds = conn->pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); @@ -3015,8 +3022,6 @@ namespace eosio { stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); auto &stale_txn_e = c->trx_state.get(); stale_txn_e.erase(stale_txn_e.lower_bound(time_point_sec()), stale_txn_e.upper_bound(time_point::now())); - auto &stale_blk = c->blk_state.get(); - stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); } <<<<<<< HEAD <<<<<<< HEAD From cf6e9ca9685b1ec60c296af5fb87f141d1f27267 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 13:27:28 -0500 Subject: [PATCH 0121/1648] Avoid tying up a thread to wait on transaction future. Also removes likely safe but undefined behavior. --- .../eosio/chain/transaction_metadata.hpp | 7 ++++++- libraries/chain/transaction_metadata.cpp | 19 ++++++++++--------- plugins/producer_plugin/producer_plugin.cpp | 12 +++++------- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..947b557a1d4 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -52,8 +52,13 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); + // must be called from main application thread. signing_keys_future must be accessed only from main application thread. + // next() should only be called on main application thread after future is valid, to avoid dependency on appbase, + // it is up to the caller to have next() post to the application thread which makes sure future is only accessed from + // application thread and that assignment to future in this method has completed. static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + const chain_id_type& chain_id, fc::microseconds time_limit, + std::function next = std::function() ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..e9994167aa0 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -24,21 +24,22 @@ const flat_set& transaction_metadata::recover_keys( const chain } void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { - if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created + boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit, + std::function next ) +{ + if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) {// already created + if( next ) next(); return; + } - std::weak_ptr mtrx_wp = mtrx; - mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { + mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx, next{std::move(next)}]() { fc::time_point deadline = time_limit == fc::microseconds::maximum() ? fc::time_point::maximum() : fc::time_point::now() + time_limit; - auto mtrx = mtrx_wp.lock(); fc::microseconds cpu_usage; flat_set recovered_pub_keys; - if( mtrx ) { - const signed_transaction& trn = mtrx->packed_trx->get_signed_transaction(); - cpu_usage = trn.get_signature_keys( chain_id, deadline, recovered_pub_keys ); - } + const signed_transaction& trn = mtrx->packed_trx->get_signed_transaction(); + cpu_usage = trn.get_signature_keys( chain_id, deadline, recovered_pub_keys ); + if( next ) next(); return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); } ); } diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 50f6c531d0d..68113333f66 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -354,13 +354,11 @@ class producer_plugin_impl : public std::enable_shared_from_thissigning_keys_future.valid() ) - trx->signing_keys_future.wait(); - app().post(priority::low, [self, trx, persist_until_expired, next]() { - self->process_incoming_transaction_async( trx, persist_until_expired, next ); - }); + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage, + [self = this, trx, persist_until_expired, next{std::move(next)}]() mutable { + app().post(priority::low, [self, trx{std::move(trx)}, persist_until_expired, next{std::move(next)}]() { + self->process_incoming_transaction_async( trx, persist_until_expired, next ); + }); }); } From f5596ebaabda61c0841c38876a1a555cfc04e834 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 13:38:50 -0500 Subject: [PATCH 0122/1648] Update comment --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 68113333f66..dbc42ed9282 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -353,7 +353,7 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Thu, 14 Mar 2019 09:22:04 -0500 Subject: [PATCH 0123/1648] Assign future on main application thread --- plugins/producer_plugin/producer_plugin.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index dbc42ed9282..6f81308aa83 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -352,13 +352,18 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto max_trx_time_ms = _max_transaction_time_ms.load(); fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); - auto& tp = *_thread_pool; - // use chain thread pool for sig recovery - transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), max_trx_cpu_usage, + + auto after_sig_recovery = [self = this, trx, persist_until_expired, next{std::move(next)}]() mutable { app().post(priority::low, [self, trx{std::move(trx)}, persist_until_expired, next{std::move(next)}]() { self->process_incoming_transaction_async( trx, persist_until_expired, next ); }); + }; + + app().post(priority::low, [trx, &chain, max_trx_cpu_usage, after_sig_recovery{std::move(after_sig_recovery)}]() mutable { + // use chain thread pool for sig recovery + transaction_metadata::create_signing_keys_future( trx, chain.get_thread_pool(), chain.get_chain_id(), + max_trx_cpu_usage, std::move( after_sig_recovery ) ); }); } From 3c2fe4b10de784ecec2d15cba12d28582f7b0413 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Mar 2019 12:37:34 -0500 Subject: [PATCH 0124/1648] Acquire lib via lib channel. Make expire_local_txns() thread safe. --- plugins/net_plugin/net_plugin.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 98606c73929..b2a0e9119c5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -28,7 +28,7 @@ #include -using namespace eosio::chain::plugin_interface::compat; +using namespace eosio::chain::plugin_interface; namespace fc { extern std::unordered_map& get_logger_map(); @@ -146,6 +146,7 @@ namespace eosio { bool network_version_match = false; chain_id_type chain_id; fc::sha256 node_id; + std::atomic lib_num{0}; eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; @@ -160,7 +161,8 @@ namespace eosio { bool use_socket_read_watermark = false; - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; uint16_t thread_pool_size = 4; optional thread_pool; @@ -192,6 +194,9 @@ namespace eosio { void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); + void on_irreversible_block( const block_state_ptr& blk ) { + lib_num = blk->block_num; + } bool is_valid( const handshake_message &msg); @@ -3014,10 +3019,9 @@ namespace eosio { auto now = time_point::now(); expire_local_txns(); - controller& cc = chain_plug->chain(); - uint32_t lib = cc.last_irreversible_block_num(); + uint32_t lib = lib_num.load(); dispatcher->expire_blocks( lib ); - for ( auto &c : connections ) { + for ( auto& c : connections ) { auto &stale_txn = c->trx_state.get(); stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); auto &stale_txn_e = c->trx_state.get(); @@ -3035,9 +3039,9 @@ namespace eosio { >>>>>>> Move more of incoming transaction processing to thread pool } + // thread safe void net_plugin_impl::expire_local_txns() { - controller& cc = chain_plug->chain(); - uint32_t lib = cc.last_irreversible_block_num(); + uint32_t lib = lib_num.load(); size_t start_size = 0, end_size = 0; std::unique_lock g( local_txns_mtx ); @@ -3425,7 +3429,12 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->incoming_transaction_ack_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); + my->incoming_irreversible_block_subscription = app().get_channel().subscribe( + [this]( block_state_ptr s ) { + my->on_irreversible_block( s ); + }); my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { From e754a794d1a4afce8d3ab438c350a4a88022ed7a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 12 Mar 2019 14:18:21 -0500 Subject: [PATCH 0125/1648] Remove unneeded request for arbitrary list of transactions --- plugins/net_plugin/net_plugin.cpp | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b2a0e9119c5..9dcbe8e756e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -612,7 +612,6 @@ namespace eosio { const string peer_name(); void txn_send_pending(const vector& ids); - void txn_send(const vector& txn_lis); void blk_send_branch(); void blk_send(const block_id_type& blkid); @@ -937,21 +936,6 @@ namespace eosio { } } - void connection::txn_send(const vector& ids) { - vector>> trx_to_send; - std::unique_lock g( my_impl->local_txns_mtx ); - for( const auto& t : ids ) { - auto tx = my_impl->local_txns.get().find( t ); - if( tx != my_impl->local_txns.end()) { - trx_to_send.emplace_back( tx->serialized_txn ); - } - } - g.unlock(); - for( const auto& t : trx_to_send ) { - queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } - } - void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); uint32_t head_num = cc.fork_db_head_block_num(); @@ -2600,6 +2584,7 @@ namespace eosio { std::lock_guard g( my_impl->local_txns_mtx ); size_t known_sum = local_txns.size(); if( known_sum ) { + expire_local_txns(); for( const auto& t : local_txns.get() ) { req.req_trx.ids.push_back( t.id ); } @@ -2665,12 +2650,16 @@ namespace eosio { case catch_up : c->txn_send_pending(msg.req_trx.ids); break; - case normal : - c->txn_send(msg.req_trx.ids); - break; case none : if(msg.req_blocks.mode == none) c->stop_send(); + // no break + case normal : + if( !msg.req_trx.ids.empty() ) { + elog( "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); + close(c); + return; + } break; default:; } From 10d96f31cb56fd708e4a6bfdd8ddd6edcbfd0a87 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 14:19:32 -0500 Subject: [PATCH 0126/1648] Remove txn_send_pending to avoid large # trxs at end of sync --- plugins/net_plugin/net_plugin.cpp | 33 ------------------------------- 1 file changed, 33 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9dcbe8e756e..4c25f256163 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -611,8 +611,6 @@ namespace eosio { const string peer_name(); - void txn_send_pending(const vector& ids); - void blk_send_branch(); void blk_send(const block_id_type& blkid); void stop_send(); @@ -919,23 +917,6 @@ namespace eosio { >>>>>>> Use unique_lock instead of lock_guard to clean up code } - void connection::txn_send_pending(const vector& ids) { - const std::set known_ids(ids.cbegin(), ids.cend()); - my_impl->expire_local_txns(); - vector>> trx_to_send; - std::unique_lock g( my_impl->local_txns_mtx ); - for( auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ) { - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - trx_to_send.emplace_back( tx->serialized_txn ); - } - } - g.unlock(); - for( const auto& t : trx_to_send ) { - queue_write( t, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } - } - void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); uint32_t head_num = cc.fork_db_head_block_num(); @@ -2577,19 +2558,6 @@ namespace eosio { break; } case catch_up : { - if( msg.known_trx.pending > 0) { - // plan to get all except what we already know about. - req.req_trx.mode = catch_up; - send_req = true; - std::lock_guard g( my_impl->local_txns_mtx ); - size_t known_sum = local_txns.size(); - if( known_sum ) { - expire_local_txns(); - for( const auto& t : local_txns.get() ) { - req.req_trx.ids.push_back( t.id ); - } - } - } break; } case normal: { @@ -2648,7 +2616,6 @@ namespace eosio { switch (msg.req_trx.mode) { case catch_up : - c->txn_send_pending(msg.req_trx.ids); break; case none : if(msg.req_blocks.mode == none) From 9611a24eb5ea7a21ddd215f8db803644eb59c9a7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Mar 2019 13:15:20 -0500 Subject: [PATCH 0127/1648] Remove dead code --- plugins/net_plugin/net_plugin.cpp | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4c25f256163..73b3ca5dd60 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1789,13 +1789,6 @@ namespace eosio { void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_id_type& id) { received_transactions.insert(std::make_pair(id, c)); - if (c && - c->last_req && - c->last_req->req_trx.mode != none && - !c->last_req->req_trx.ids.empty() && - c->last_req->req_trx.ids.back() == id) { - c->last_req.reset(); - } fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); @@ -1862,17 +1855,10 @@ namespace eosio { return; } fc_wlog( logger, "failed to fetch from ${p}",("p",c->peer_name())); - transaction_id_type tid; block_id_type bid; - bool is_txn = false; - if( c->last_req->req_trx.mode == normal && !c->last_req->req_trx.ids.empty() ) { - is_txn = true; - tid = c->last_req->req_trx.ids.back(); - } - else if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { + if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { bid = c->last_req->req_blocks.ids.back(); - } - else { + } else { fc_wlog( logger,"no retry, block mpde = ${b} trx mode = ${t}", ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); return; @@ -1881,14 +1867,7 @@ namespace eosio { if (conn == c || conn->last_req) { continue; } - bool sendit = false; - if (is_txn) { - auto trx = conn->trx_state.get().find(tid); - sendit = trx != conn->trx_state.end(); - } - else { - sendit = peer_has_block(bid, c->connection_id); - } + bool sendit = peer_has_block( bid, c->connection_id ); if (sendit) { conn->enqueue(*c->last_req); conn->fetch_wait(); From e3ce304a3ac0e3becf470f7dfd0113cc9a1c4c6b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Mar 2019 14:03:58 -0500 Subject: [PATCH 0128/1648] Remove dead code including serialized transaction --- plugins/net_plugin/net_plugin.cpp | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 73b3ca5dd60..2d372bd5f62 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -63,7 +63,6 @@ namespace eosio { transaction_id_type id; time_point_sec expires; /// time after which this may be purged. uint32_t block_num = 0; /// block transaction was included in - std::shared_ptr> serialized_txn; /// the received raw bundle }; struct by_expiry; @@ -767,7 +766,7 @@ namespace eosio { void rejected_block(const connection_ptr& c, uint32_t blk_num); void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void recv_notice(const connection_ptr& c, const notice_message& msg); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg); }; class dispatch_manager { @@ -1512,9 +1511,7 @@ namespace eosio { notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - std::unique_lock g( my_impl->local_txns_mtx ); - note.known_trx.pending = my_impl->local_txns.size(); - g.unlock(); + note.known_trx.pending = 0; c->enqueue( note ); return; } @@ -1588,8 +1585,10 @@ namespace eosio { c->enqueue( req ); } - void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { + void sync_manager::sync_recv_notice( const connection_ptr& c, const notice_message& msg) { fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); + EOS_ASSERT( msg.known_blocks.mode == catch_up || msg.known_blocks.mode == last_irr_catch_up, plugin_exception, + "sync_recv_notice only called on catch_up" ); if( msg.known_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); @@ -1602,8 +1601,7 @@ namespace eosio { } else { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } - } - else { + } else if (msg.known_blocks.mode == last_irr_catch_up) { c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; reset_lib_num(c); start_sync(c, msg.known_trx.pending); @@ -1757,21 +1755,18 @@ namespace eosio { std::unique_lock g( my_impl->local_txns_mtx ); if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found + g.unlock(); fc_dlog( logger, "found trxid in local_trxs" ); return; } + time_point_sec trx_expiration = ptrx->packed_trx->expiration(); + node_transaction_state nts = {id, trx_expiration, 0}; + my_impl->local_txns.insert( std::move( nts )); g.unlock(); - time_point_sec trx_expiration = ptrx->packed_trx->expiration(); const packed_transaction& trx = *ptrx->packed_trx; - auto buff = create_send_buffer( trx ); - node_transaction_state nts = {id, trx_expiration, 0, buff}; - g.lock(); - my_impl->local_txns.insert( std::move( nts )); - g.unlock(); - my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { if( skips.find(c) != skips.end() || c->syncing ) { return false; @@ -2553,7 +2548,7 @@ namespace eosio { } case last_irr_catch_up: case catch_up: { - sync_master->recv_notice(c,msg); + sync_master->sync_recv_notice(c,msg); break; } case normal : { @@ -2643,6 +2638,7 @@ namespace eosio { std::unique_lock g( local_txns_mtx ); if( local_txns.get().find( tid ) != local_txns.end()) { + g.unlock(); fc_dlog( logger, "got a duplicate transaction - dropping" ); return; } From 2281543a65ed525fd89bc9f716710a1f196d36d0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 08:58:06 -0500 Subject: [PATCH 0129/1648] Consolidate transaction tracking, reducing memory requirements and making thread safe. --- plugins/net_plugin/net_plugin.cpp | 466 ++++++++++++++---------------- 1 file changed, 224 insertions(+), 242 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2d372bd5f62..de838d20bc4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -50,9 +50,6 @@ namespace eosio { class connection; - class sync_manager; - class dispatch_manager; - using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; @@ -61,8 +58,9 @@ namespace eosio { struct node_transaction_state { transaction_id_type id; - time_point_sec expires; /// time after which this may be purged. - uint32_t block_num = 0; /// block transaction was included in + time_point_sec expires; /// time after which this may be purged. + uint32_t block_num = 0; /// block transaction was included in + uint32_t connection_id = 0; }; struct by_expiry; @@ -80,26 +78,119 @@ namespace eosio { node_transaction_state, indexed_by< ordered_unique< - tag< by_id >, - member < node_transaction_state, - transaction_id_type, - &node_transaction_state::id >, - sha256_less >, + tag, + composite_key< node_transaction_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, ordered_non_unique< tag< by_expiry >, - member< node_transaction_state, - fc::time_point_sec, - &node_transaction_state::expires > - >, + member< node_transaction_state, fc::time_point_sec, &node_transaction_state::expires > >, ordered_non_unique< tag, - member< node_transaction_state, - uint32_t, - &node_transaction_state::block_num > > + member< node_transaction_state, uint32_t, &node_transaction_state::block_num > > > > node_transaction_index; + struct peer_block_state { + block_id_type id; + uint32_t block_num = 0; + uint32_t connection_id = 0; + }; + + typedef multi_index_container< + eosio::peer_block_state, + indexed_by< + ordered_unique< tag, + composite_key< peer_block_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, + ordered_non_unique< tag, member > + > + > peer_block_state_index; + + + struct update_block_num { + uint32_t new_bnum; + update_block_num(uint32_t bnum) : new_bnum(bnum) {} + void operator() (node_transaction_state& nts) { + nts.block_num = new_bnum; + } + }; + + class sync_manager { + private: + enum stages { + lib_catchup, + head_catchup, + in_sync + }; + + uint32_t sync_known_lib_num; + uint32_t sync_last_requested_num; + uint32_t sync_next_expected_num; + uint32_t sync_req_span; + connection_ptr source; + stages state; + + chain_plugin* chain_plug = nullptr; + + constexpr static auto stage_str(stages s); + + public: + explicit sync_manager(uint32_t span); + void set_state(stages s); + bool sync_required(); + void send_handshakes(); + bool is_active(const connection_ptr& conn); + void reset_lib_num(const connection_ptr& conn); + void request_next_chunk(const connection_ptr& conn = connection_ptr()); + void start_sync(const connection_ptr& c, uint32_t target); + void reassign_fetch(const connection_ptr& c, go_away_reason reason); + void verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); + void rejected_block(const connection_ptr& c, uint32_t blk_num); + void sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); + void recv_handshake(const connection_ptr& c, const handshake_message& msg); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg); + }; + + class dispatch_manager { + std::mutex blk_state_mtx; + peer_block_state_index blk_state; + std::mutex local_txns_mtx; + node_transaction_index local_txns; + + public: + void bcast_transaction(const transaction_metadata_ptr& trx); + void rejected_transaction(const transaction_id_type& msg, uint32_t head_blk_num); + void bcast_block(const block_state_ptr& bs); + void rejected_block(const block_id_type& id); + + void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); + void expire_blocks( uint32_t bnum ); + void recv_transaction(const connection_ptr& conn, const transaction_metadata_ptr& txn); + void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); + + void retry_fetch(const connection_ptr& conn); + + bool add_peer_block(const peer_block_state& pbs); + bool peer_has_block(const block_id_type& blkid, uint32_t connection_id); + bool have_block(const block_id_type& blkid); + + bool add_peer_txn( const node_transaction_state& nts ); + void update_txns_block_num( const signed_block_ptr& sb ); + void update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ); + bool peer_has_txn( const transaction_id_type& tid, uint32_t connection_id ); + bool have_txn( const transaction_id_type& tid ); + void expire_txns( uint32_t lib_num ); + }; + class net_plugin_impl { public: unique_ptr acceptor; @@ -146,6 +237,7 @@ namespace eosio { chain_id_type chain_id; fc::sha256 node_id; std::atomic lib_num{0}; + uint32_t head_blk_num{0}; eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; string user_agent_name; @@ -153,9 +245,6 @@ namespace eosio { producer_plugin* producer_plug = nullptr; int started_sessions = 0; - std::mutex local_txns_mtx; - node_transaction_index local_txns; - shared_ptr resolver; bool use_socket_read_watermark = false; @@ -188,13 +277,12 @@ namespace eosio { void close(const connection_ptr& c); size_t count_open_sockets() const; - template - void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); - void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& blk ) { lib_num = blk->block_num; + controller& cc = chain_plug->chain(); + head_blk_num = cc.head_block_num(); } bool is_valid( const handshake_message &msg); @@ -230,7 +318,6 @@ namespace eosio { void start_monitors(); void expire_txns(); - void expire_local_txns(); void connection_monitor(std::weak_ptr from_connection); /** \name Peer Timestamps * Time message handling @@ -348,58 +435,6 @@ namespace eosio { constexpr uint16_t net_version = proto_explicit_sync; - struct transaction_state { - transaction_id_type id; - uint32_t block_num = 0; ///< the block number the transaction was included in - time_point_sec expires; - }; - - typedef multi_index_container< - transaction_state, - indexed_by< - ordered_unique< tag, member, sha256_less >, - ordered_non_unique< tag< by_expiry >, member< transaction_state,fc::time_point_sec,&transaction_state::expires >>, - ordered_non_unique< - tag, - member< transaction_state, - uint32_t, - &transaction_state::block_num > > - > - - > transaction_state_index; - - struct peer_block_state { - block_id_type id; - uint32_t block_num; - uint32_t connection_id; - }; - - typedef multi_index_container< - eosio::peer_block_state, - indexed_by< - ordered_unique< tag, - composite_key< peer_block_state, - member, - member - >, - composite_key_compare< sha256_less, std::less > - >, - ordered_non_unique< tag, member > - > - > peer_block_state_index; - - - struct update_block_num { - uint32_t new_bnum; - update_block_num(uint32_t bnum) : new_bnum(bnum) {} - void operator() (node_transaction_state& nts) { - nts.block_num = new_bnum; - } - void operator() (transaction_state& ts) { - ts.block_num = new_bnum; - } - }; - /** * Index by start_block_num */ @@ -507,7 +542,6 @@ namespace eosio { ~connection(); void initialize(); - transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive boost::asio::io_context::strand strand; @@ -733,70 +767,10 @@ namespace eosio { } }; - class sync_manager { - private: - enum stages { - lib_catchup, - head_catchup, - in_sync - }; - - uint32_t sync_known_lib_num; - uint32_t sync_last_requested_num; - uint32_t sync_next_expected_num; - uint32_t sync_req_span; - connection_ptr source; - stages state; - - chain_plugin* chain_plug = nullptr; - - constexpr static auto stage_str(stages s); - - public: - explicit sync_manager(uint32_t span); - void set_state(stages s); - bool sync_required(); - void send_handshakes(); - bool is_active(const connection_ptr& conn); - void reset_lib_num(const connection_ptr& conn); - void request_next_chunk(const connection_ptr& conn = connection_ptr()); - void start_sync(const connection_ptr& c, uint32_t target); - void reassign_fetch(const connection_ptr& c, go_away_reason reason); - void verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); - void rejected_block(const connection_ptr& c, uint32_t blk_num); - void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); - void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void sync_recv_notice( const connection_ptr& c, const notice_message& msg); - }; - - class dispatch_manager { - std::mutex blk_state_mtx; - peer_block_state_index blk_state; - std::multimap received_transactions; - - public: - void bcast_transaction(const transaction_metadata_ptr& trx); - void rejected_transaction(const transaction_id_type& msg); - void bcast_block(const block_state_ptr& bs); - void rejected_block(const block_id_type& id); - - void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); - void expire_blocks( uint32_t bnum ); - void recv_transaction(const connection_ptr& conn, const transaction_id_type& id); - void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); - - void retry_fetch(const connection_ptr& conn); - - bool add_peer_block(const peer_block_state& pbs); - bool peer_has_block(const block_id_type& blkid, uint32_t connection_id); - bool have_block(const block_id_type& blkid); - }; - //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : trx_state(), - peer_requested(), + : peer_requested(), server_ioc( my_impl->server_ioc ), strand( app().get_io_service() ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), @@ -821,8 +795,7 @@ namespace eosio { } connection::connection( socket_ptr s ) - : trx_state(), - peer_requested(), + : peer_requested(), server_ioc( my_impl->server_ioc ), strand( app().get_io_service() ), socket( s ), @@ -867,7 +840,6 @@ namespace eosio { void connection::reset() { peer_requested.reset(); - trx_state.clear(); } void connection::flush_queues() { @@ -1618,7 +1590,7 @@ namespace eosio { send_handshakes(); } } - void sync_manager::recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { + void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); if (state == lib_catchup) { if (blk_num != sync_next_expected_num) { @@ -1692,6 +1664,68 @@ namespace eosio { return blk_itr != blk_state.end(); } + bool dispatch_manager::add_peer_txn( const node_transaction_state& nts ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( std::make_tuple( std::ref( nts.id ), nts.connection_id ) ); + bool added = (tptr == local_txns.end()); + if( added ) { + local_txns.insert( nts ); + } + return added; + } + + void dispatch_manager::update_txns_block_num( const signed_block_ptr& sb ) { + update_block_num ubn( sb->block_num() ); + std::lock_guard g( local_txns_mtx ); + for( const auto& recpt : sb->transactions ) { + const transaction_id_type& id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto range = local_txns.get().equal_range( id ); + for( auto itr = range.first; itr != range.second; ++itr ) { + local_txns.modify( itr, ubn ); + } + } + } + + void dispatch_manager::update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ) { + update_block_num ubn( blk_num ); + std::lock_guard g( local_txns_mtx ); + auto range = local_txns.get().equal_range( id ); + for( auto itr = range.first; itr != range.second; ++itr ) { + local_txns.modify( itr, ubn ); + } + } + + bool dispatch_manager::peer_has_txn( const transaction_id_type& tid, uint32_t connection_id ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( std::make_tuple( std::ref( tid ), connection_id ) ); + return tptr != local_txns.end(); + } + + bool dispatch_manager::have_txn( const transaction_id_type& tid ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( tid ); + return tptr != local_txns.end(); + } + + void dispatch_manager::expire_txns( uint32_t lib_num ) { + size_t start_size = 0, end_size = 0; + { + std::lock_guard g( local_txns_mtx ); + + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); + auto ex_up = old.upper_bound( time_point::now() ); + old.erase( ex_lo, ex_up ); + + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); + end_size = local_txns.size(); + } + fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)( "r", start_size - end_size ) ); + } + void dispatch_manager::expire_blocks( uint32_t lib_num ) { std::lock_guard g(blk_state_mtx); auto& stale_blk = blk_state.get(); @@ -1744,55 +1778,41 @@ namespace eosio { } void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { - std::set skips; const auto& id = ptrx->id; - - auto range = received_transactions.equal_range(id); - for (auto org = range.first; org != range.second; ++org) { - skips.insert(org->second); - } - received_transactions.erase(range.first, range.second); - - std::unique_lock g( my_impl->local_txns_mtx ); - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end()) { //found - g.unlock(); - fc_dlog( logger, "found trxid in local_trxs" ); - return; - } - time_point_sec trx_expiration = ptrx->packed_trx->expiration(); - node_transaction_state nts = {id, trx_expiration, 0}; - my_impl->local_txns.insert( std::move( nts )); - g.unlock(); - const packed_transaction& trx = *ptrx->packed_trx; - auto buff = create_send_buffer( trx ); + time_point_sec trx_expiration = trx.expiration(); + node_transaction_state nts = {id, trx_expiration, 0, 0}; - my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { - if( skips.find(c) != skips.end() || c->syncing ) { - return false; - } - const auto& bs = c->trx_state.find(id); - bool unknown = bs == c->trx_state.end(); - if( unknown ) { - c->trx_state.insert(transaction_state({id,0,trx_expiration})); - fc_dlog(logger, "sending trx to ${n}", ("n",c->peer_name() ) ); - } - return unknown; - }); + std::shared_ptr> send_buffer; + for( auto& cp : my_impl->connections ) { + if( !cp->current() ) { + continue; + } + if( !add_peer_txn(nts) ) { + continue; + } + if( !send_buffer ) { + send_buffer = create_send_buffer( trx ); + } + fc_dlog(logger, "sending trx to ${n}", ("n", cp->peer_name() ) ); + cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); + } } - void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_id_type& id) { - received_transactions.insert(std::make_pair(id, c)); + void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_metadata_ptr& txn) { + node_transaction_state nts = {txn->id, txn->packed_trx->expiration(), 0, c->connection_id}; + add_peer_txn( nts ); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); } - void dispatch_manager::rejected_transaction(const transaction_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); - auto range = received_transactions.equal_range(id); - received_transactions.erase(range.first, range.second); + void dispatch_manager::rejected_transaction(const transaction_id_type& id, uint32_t head_blk_num) { + fc_dlog( logger, "not sending rejected transaction ${tid}", ("tid", id) ); + // keep rejected transaction around for awhile so we don't broadcast it + // update its block number so it will be purged when current block number is lib + update_txns_block_num( id, head_blk_num ); } void dispatch_manager::recv_notice(const connection_ptr& c, const notice_message& msg, bool generated) { @@ -2285,7 +2305,7 @@ namespace eosio { if(c) { auto blk_num = block_header::num_from_id(blk_id); dispatcher->recv_block(c, blk_id, blk_num); - sync_master->recv_block( c, blk_id, blk_num ); + sync_master->sync_recv_block( c, blk_id, blk_num ); } }); conn->pending_message_buffer.advance_read_ptr( message_length ); @@ -2323,16 +2343,6 @@ namespace eosio { return count; } - - template - void net_plugin_impl::send_transaction_to_all(const std::shared_ptr>& send_buffer, VerifierFunc verify) { - for( auto &c : connections) { - if( c->current() && verify( c )) { - c->enqueue_buffer( send_buffer, true, priority::low, no_reason ); - } - } - } - bool net_plugin_impl::is_valid(const handshake_message& msg) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without @@ -2636,18 +2646,17 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - std::unique_lock g( local_txns_mtx ); - if( local_txns.get().find( tid ) != local_txns.end()) { - g.unlock(); - fc_dlog( logger, "got a duplicate transaction - dropping" ); - return; - } - g.unlock(); + bool have_trx = dispatcher->have_txn( tid ); connection_wptr weak_ptr = c; - app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, tid](){ + app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, ptrx](){ auto c = weak_ptr.lock(); - dispatcher->recv_transaction(c, tid); + dispatcher->recv_transaction(c, ptrx); }); + if( have_trx ) { + fc_dlog( logger, "got a duplicate transaction - dropping" ); + return; + } + c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); @@ -2675,11 +2684,11 @@ namespace eosio { } } - app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}]() { + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num = this->head_blk_num]() { if( accepted ) { dispatcher->bcast_transaction( ptrx ); } else { - dispatcher->rejected_transaction( ptrx->id ); + dispatcher->rejected_transaction( ptrx->id, head_blk_num ); } }); }); @@ -2722,8 +2731,12 @@ namespace eosio { if( msg && cc.fetch_block_by_id(blk_id)) { ======= if( cc.fetch_block_by_id(blk_id) ) { +<<<<<<< HEAD >>>>>>> Revert unneeded changes to handle_message sync_master->recv_block(c, blk_id, blk_num); +======= + sync_master->sync_recv_block(c, blk_id, blk_num); +>>>>>>> Consolidate transaction tracking, reducing memory requirements and making thread safe. return; } <<<<<<< HEAD @@ -2868,21 +2881,10 @@ namespace eosio { update_block_num ubn(blk_num); if( reason == no_reason ) { - std::unique_lock g( local_txns_mtx ); - for( const auto& recpt : msg->transactions ) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() - : recpt.trx.get().id(); - auto ltx = local_txns.get().find( id ); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find( id ); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); - } - } - g.unlock(); - sync_master->recv_block(c, blk_id, blk_num); + boost::asio::post( *server_ioc, [self = this, msg]() { + self->dispatcher->update_txns_block_num( msg ); + }); + sync_master->sync_recv_block(c, blk_id, blk_num); } else { sync_master->rejected_block(c, blk_num); @@ -2948,10 +2950,9 @@ namespace eosio { start_txn_timer(); auto now = time_point::now(); - expire_local_txns(); - uint32_t lib = lib_num.load(); dispatcher->expire_blocks( lib ); +<<<<<<< HEAD for ( auto& c : connections ) { auto &stale_txn = c->trx_state.get(); stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); @@ -2966,32 +2967,13 @@ namespace eosio { ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); >>>>>>> force a build ======= +======= + dispatcher->expire_txns( lib ); +>>>>>>> Consolidate transaction tracking, reducing memory requirements and making thread safe. fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); >>>>>>> Move more of incoming transaction processing to thread pool } - // thread safe - void net_plugin_impl::expire_local_txns() { - uint32_t lib = lib_num.load(); - size_t start_size = 0, end_size = 0; - - std::unique_lock g( local_txns_mtx ); - - start_size = local_txns.size(); - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec( 0 )); - auto ex_up = old.upper_bound( time_point::now()); - old.erase( ex_lo, ex_up ); - - auto& stale = local_txns.get(); - stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib )); - end_size = local_txns.size(); - - g.unlock(); - - fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)("r", start_size - end_size) ); - } - void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); @@ -3038,7 +3020,7 @@ namespace eosio { const auto& id = results.second->id; if (results.first) { fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); + dispatcher->rejected_transaction(id, head_blk_num); } else { fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); dispatcher->bcast_transaction(results.second); From 565f39ded9e4e1778da0964c61b86946fe56ba02 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 09:47:01 -0500 Subject: [PATCH 0130/1648] Fix for missing connection_id --- plugins/net_plugin/net_plugin.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index de838d20bc4..d8ce6fa0652 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1788,6 +1788,7 @@ namespace eosio { if( !cp->current() ) { continue; } + nts.connection_id = cp->connection_id; if( !add_peer_txn(nts) ) { continue; } From 3218065417fcaff856a8cbe8ebd8f7df12459b91 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 12:32:49 -0500 Subject: [PATCH 0131/1648] Make all timers thread safe. Expire trx/blks on thread pool. --- plugins/net_plugin/net_plugin.cpp | 117 +++++++++++++++++++----------- 1 file changed, 74 insertions(+), 43 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d8ce6fa0652..c589280d649 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -222,14 +222,17 @@ namespace eosio { unique_ptr< sync_manager > sync_master; unique_ptr< dispatch_manager > dispatcher; - unique_ptr connector_check; - unique_ptr transaction_check; + std::mutex connector_check_timer_mtx; + unique_ptr connector_check_timer; + std::mutex expire_timer_mtx; + unique_ptr expire_timer; + std::mutex keepalive_timer_mtx; unique_ptr keepalive_timer; boost::asio::steady_timer::duration connector_period; boost::asio::steady_timer::duration txn_exp_period; boost::asio::steady_timer::duration resp_expected_period; boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; - int max_cleanup_time_ms = 0; + int max_cleanup_time_ms = 0; const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. @@ -314,10 +317,10 @@ namespace eosio { void handle_message(const connection_ptr& c, const packed_transaction_ptr& msg); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); - void start_txn_timer(); + void start_expire_timer(); void start_monitors(); - void expire_txns(); + void expire(); void connection_monitor(std::weak_ptr from_connection); /** \name Peer Timestamps * Time message handling @@ -540,7 +543,9 @@ namespace eosio { explicit connection( socket_ptr s ); ~connection(); - void initialize(); + private: + void initialize(); // only called from constructor + public: optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive @@ -580,7 +585,8 @@ namespace eosio { bool syncing = false; uint16_t protocol_version = 0; string peer_addr; - unique_ptr response_expected; + std::mutex response_expected_timer_mtx; + unique_ptr response_expected_timer; std::mutex read_delay_timer_mtx; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; @@ -783,7 +789,7 @@ namespace eosio { syncing(false), protocol_version(0), peer_addr(endpoint), - response_expected(), + response_expected_timer(), read_delay_timer(), no_retry(no_reason), fork_head(), @@ -808,7 +814,7 @@ namespace eosio { syncing(false), protocol_version(0), peer_addr(), - response_expected(), + response_expected_timer(), read_delay_timer(), no_retry(no_reason), fork_head(), @@ -826,7 +832,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + // called only from constructor, no mutex needed + response_expected_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); } @@ -1181,15 +1188,19 @@ namespace eosio { to_sync_queue); } + // thread safe void connection::cancel_wait() { - if (response_expected) - response_expected->cancel(); + std::lock_guard g( response_expected_timer_mtx ); + if (response_expected_timer) + response_expected_timer->cancel(); } + // thread safe void connection::sync_wait() { - response_expected->expires_from_now( my_impl->resp_expected_period); connection_wptr c(shared_from_this()); - response_expected->async_wait( [c]( boost::system::error_code ec ) { + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer->expires_from_now( my_impl->resp_expected_period); + response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -1202,10 +1213,12 @@ namespace eosio { } ); } + // thread safe void connection::fetch_wait() { - response_expected->expires_from_now( my_impl->resp_expected_period); connection_wptr c(shared_from_this()); - response_expected->async_wait( [c]( boost::system::error_code ec ) { + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer->expires_from_now( my_impl->resp_expected_period); + response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -2894,8 +2907,9 @@ namespace eosio { } void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { - connector_check->expires_from_now( du); - connector_check->async_wait( [this, from_connection](boost::system::error_code ec) { + std::lock_guard g( connector_check_timer_mtx ); + connector_check_timer->expires_from_now( du); + connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { app().post( priority::low, [this, from_connection, ec]() { if( !ec) { connection_monitor(from_connection); @@ -2908,22 +2922,23 @@ namespace eosio { }); } - void net_plugin_impl::start_txn_timer() { - transaction_check->expires_from_now( txn_exp_period); - transaction_check->async_wait( [this]( boost::system::error_code ec ) { - int lower_than_low = priority::low - 1; - app().post( lower_than_low, [this, ec]() { - if( !ec ) { - expire_txns(); - } else { - fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message())); - start_txn_timer(); - } - } ); - }); + // thread safe + void net_plugin_impl::start_expire_timer() { + std::lock_guard g( expire_timer_mtx ); + expire_timer->expires_from_now( txn_exp_period); + expire_timer->async_wait( [this]( boost::system::error_code ec ) { + if( !ec ) { + expire(); + } else { + fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message()) ); + start_expire_timer(); + } + } ); } + // thread safe void net_plugin_impl::ticker() { + std::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { app().post( priority::low, [this, ec]() { @@ -2941,14 +2956,20 @@ namespace eosio { } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( *server_ioc )); - transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); + { + std::lock_guard g( connector_check_timer_mtx ); + connector_check_timer.reset(new boost::asio::steady_timer( *server_ioc )); + } + { + std::lock_guard g( expire_timer_mtx ); + expire_timer.reset( new boost::asio::steady_timer( *server_ioc ) ); + } start_conn_timer(connector_period, std::weak_ptr()); - start_txn_timer(); + start_expire_timer(); } - void net_plugin_impl::expire_txns() { - start_txn_timer(); + void net_plugin_impl::expire() { + start_expire_timer(); auto now = time_point::now(); uint32_t lib = lib_num.load(); @@ -3321,7 +3342,10 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + { + std::lock_guard g( my->keepalive_timer_mtx ); + my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + } my->ticker(); if( my->acceptor ) { @@ -3375,12 +3399,19 @@ namespace eosio { if( my->server_ioc_work ) my->server_ioc_work->reset(); - if( my->connector_check ) - my->connector_check->cancel(); - if( my->transaction_check ) - my->transaction_check->cancel(); - if( my->keepalive_timer ) - my->keepalive_timer->cancel(); + { + std::lock_guard g( my->connector_check_timer_mtx ); + if( my->connector_check_timer ) + my->connector_check_timer->cancel(); + }{ + std::lock_guard g( my->expire_timer_mtx ); + if( my->expire_timer ) + my->expire_timer->cancel(); + }{ + std::lock_guard g( my->keepalive_timer_mtx ); + if( my->keepalive_timer ) + my->keepalive_timer->cancel(); + } my->done = true; if( my->acceptor ) { From 22742eb8aa95f088713c111eb133d0ef5b156b5c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 14:44:48 -0500 Subject: [PATCH 0132/1648] Break expire into two steps --- plugins/net_plugin/net_plugin.cpp | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c589280d649..c9d148e1c9a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1723,19 +1723,21 @@ namespace eosio { void dispatch_manager::expire_txns( uint32_t lib_num ) { size_t start_size = 0, end_size = 0; - { - std::lock_guard g( local_txns_mtx ); - start_size = local_txns.size(); - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); - auto ex_up = old.upper_bound( time_point::now() ); - old.erase( ex_lo, ex_up ); + std::unique_lock g( local_txns_mtx ); + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); + auto ex_up = old.upper_bound( time_point::now() ); + old.erase( ex_lo, ex_up ); + g.unlock(); // allow other threads opportunity to use local_txns + + g.lock(); + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); + end_size = local_txns.size(); + g.unlock(); - auto& stale = local_txns.get(); - stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); - end_size = local_txns.size(); - } fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)( "r", start_size - end_size ) ); } @@ -2969,8 +2971,6 @@ namespace eosio { } void net_plugin_impl::expire() { - start_expire_timer(); - auto now = time_point::now(); uint32_t lib = lib_num.load(); dispatcher->expire_blocks( lib ); @@ -2993,7 +2993,12 @@ namespace eosio { dispatcher->expire_txns( lib ); >>>>>>> Consolidate transaction tracking, reducing memory requirements and making thread safe. fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); +<<<<<<< HEAD >>>>>>> Move more of incoming transaction processing to thread pool +======= + + start_expire_timer(); +>>>>>>> Break expire into two steps } void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { From 8f216a5af692732291552a991d5220b75ca36bcf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 18:17:52 -0500 Subject: [PATCH 0133/1648] Fix issue with sync and known block optimization --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c9d148e1c9a..c0795f044a6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2315,7 +2315,7 @@ namespace eosio { block_id_type blk_id = bh.id(); if( dispatcher->have_block( blk_id ) ) { connection_wptr weak = conn; - app().post(priority::low, + app().post(priority::high, // high since block processing is high and this needs to run before next block [dispatcher = dispatcher.get(), sync_master = sync_master.get(), weak{std::move(weak)}, blk_id] { connection_ptr c = weak.lock(); if(c) { From b6f9911c3f82b33855a8bedfb948b9c3e9cffa5f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 15 Mar 2019 18:44:16 -0500 Subject: [PATCH 0134/1648] Add transaction id to log message --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c0795f044a6..e1f1138f855 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2653,7 +2653,6 @@ namespace eosio { // called from thread_pool threads void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); - peer_ilog(c, "received packed_transaction"); if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { fc_dlog(logger, "got a txn in read-only mode - dropping"); return; @@ -2661,6 +2660,7 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; + peer_ilog(c, "received packed_transaction ${id}", ("id", tid)); bool have_trx = dispatcher->have_txn( tid ); connection_wptr weak_ptr = c; @@ -2669,7 +2669,7 @@ namespace eosio { dispatcher->recv_transaction(c, ptrx); }); if( have_trx ) { - fc_dlog( logger, "got a duplicate transaction - dropping" ); + fc_dlog( logger, "got a duplicate transaction - dropping ${id}", ("id", tid) ); return; } From e3f2869b046f7dbb996e2f7e90b1e2affbc164db Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 07:35:16 -0500 Subject: [PATCH 0135/1648] Remove dead code --- .../include/eosio/net_plugin/net_plugin.hpp | 1 - plugins/net_plugin/net_plugin.cpp | 15 --------------- 2 files changed, 16 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 01c1383468d..3bc594dd313 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -38,7 +38,6 @@ namespace eosio { optional status( const string& endpoint )const; vector connections()const; - size_t num_peers() const; private: std::unique_ptr my; }; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e1f1138f855..7286d4eac28 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -278,7 +278,6 @@ namespace eosio { bool process_next_message(const connection_ptr& conn, uint32_t message_length); void close(const connection_ptr& c); - size_t count_open_sockets() const; void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); @@ -2349,16 +2348,6 @@ namespace eosio { return true; } - size_t net_plugin_impl::count_open_sockets() const - { - size_t count = 0; - for( auto &c : connections) { - if(c->socket->is_open()) - ++count; - } - return count; - } - bool net_plugin_impl::is_valid(const handshake_message& msg) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without @@ -3443,10 +3432,6 @@ namespace eosio { FC_CAPTURE_AND_RETHROW() } - size_t net_plugin::num_peers() const { - return my->count_open_sockets(); - } - /** * Used to trigger a new connection from RPC API */ From c2b934417bacdcf00bb982413038e3b5f8fdd924 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 08:21:39 -0500 Subject: [PATCH 0136/1648] Protect start_read_message via strand --- plugins/net_plugin/net_plugin.cpp | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7286d4eac28..976ea7e44c7 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -549,7 +549,11 @@ namespace eosio { optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive boost::asio::io_context::strand strand; +<<<<<<< HEAD socket_ptr socket; +======= + socket_ptr socket; // only accessed through strand after construction +>>>>>>> Protect start_read_message via strand fc::message_buffer<1024*1024> pending_message_buffer; <<<<<<< HEAD @@ -777,7 +781,11 @@ namespace eosio { connection::connection( string endpoint ) : peer_requested(), server_ioc( my_impl->server_ioc ), +<<<<<<< HEAD strand( app().get_io_service() ), +======= + strand( *my_impl->server_ioc ), +>>>>>>> Protect start_read_message via strand socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), connection_id( ++my_impl->current_connection_id ), @@ -802,7 +810,11 @@ namespace eosio { connection::connection( socket_ptr s ) : peer_requested(), server_ioc( my_impl->server_ioc ), +<<<<<<< HEAD strand( app().get_io_service() ), +======= + strand( *my_impl->server_ioc ), +>>>>>>> Protect start_read_message via strand socket( s ), node_id(), connection_id( ++my_impl->current_connection_id ), @@ -2001,7 +2013,7 @@ namespace eosio { return false; } else { - boost::asio::post(*server_ioc, [this, con]() { + con->strand.post( [this, con]() { start_read_message( con ); }); ++started_sessions; @@ -2080,7 +2092,7 @@ namespace eosio { }); } - // only called from server_ioc thread + // only called from strand thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { @@ -2167,7 +2179,7 @@ namespace eosio { if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); conn->read_delay_timer->async_wait( - app().get_priority_queue().wrap( priority::low, [this, weak_conn]( boost::system::error_code ) { + boost::asio::bind_executor(conn->strand, [this, weak_conn]( boost::system::error_code ) { auto conn = weak_conn.lock(); if( !conn ) return; start_read_message( conn ); @@ -2288,7 +2300,7 @@ namespace eosio { close( conn ); }); } - }); + })); } catch (...) { fc_elog( logger, "Undefined exception in start_read_message" ); connection_wptr weak_conn = conn; From 3b9b19517d775a27d516b968d0b5c3f63849b8e0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 23:19:07 -0500 Subject: [PATCH 0137/1648] Made all access to impl->connections thread safe --- plugins/net_plugin/net_plugin.cpp | 394 ++++++++++++++++-------------- 1 file changed, 217 insertions(+), 177 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 976ea7e44c7..d1b899c32c6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -56,6 +56,14 @@ namespace eosio { using socket_ptr = std::shared_ptr; using io_work_t = boost::asio::executor_work_guard; + template + void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { + if( !strand.running_in_this_thread() ) { + elog( "wrong strand: ${f} : line ${n}, exiting", ("f", func)("n", line) ); + app().quit(); + } + } + struct node_transaction_state { transaction_id_type id; time_point_sec expires; /// time after which this may be purged. @@ -199,7 +207,6 @@ namespace eosio { string p2p_server_address; uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; - uint32_t num_clients = 0; uint32_t current_connection_id = 0; vector supplied_peers; @@ -217,7 +224,8 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - std::set< connection_ptr > connections; + mutable std::mutex connections_mtx; + std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; unique_ptr< dispatch_manager > dispatcher; @@ -261,7 +269,7 @@ namespace eosio { optional server_ioc_work; - void connect(const connection_ptr& c); + bool resolve_and_connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); bool start_session(const connection_ptr& c); void start_listen_loop(); @@ -277,8 +285,6 @@ namespace eosio { */ bool process_next_message(const connection_ptr& conn, uint32_t message_length); - void close(const connection_ptr& c); - void accepted_block(const block_state_ptr&); void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& blk ) { @@ -542,8 +548,19 @@ namespace eosio { explicit connection( socket_ptr s ); ~connection(); + + void start(); + + bool socket_is_open() const { return socket_open.load(); } // thread safe + const string& peer_address() const { return peer_addr; } // thread safe + const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start() + private: + static const string unknown; + void initialize(); // only called from constructor + void update_endpoints(); + public: optional peer_requested; // this peer is requesting info from us @@ -553,7 +570,13 @@ namespace eosio { socket_ptr socket; ======= socket_ptr socket; // only accessed through strand after construction +<<<<<<< HEAD >>>>>>> Protect start_read_message via strand +======= + private: + std::atomic socket_open{false}; + public: +>>>>>>> Made all access to impl->connections thread safe fc::message_buffer<1024*1024> pending_message_buffer; <<<<<<< HEAD @@ -587,14 +610,21 @@ namespace eosio { bool connecting = false; bool syncing = false; uint16_t protocol_version = 0; - string peer_addr; + private: + const string peer_addr; + string remote_endpoint_ip; // not updated after start + string remote_endpoint_port; // not updated after start + string local_endpoint_ip; // not updated after start + string local_endpoint_port; // not updated after start + public: + std::mutex response_expected_timer_mtx; unique_ptr response_expected_timer; std::mutex read_delay_timer_mtx; unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; - uint32_t fork_head_num = 0; + std::atomic fork_head_num{0}; // provides memory barrier for fork_head optional last_req; connection_status get_status()const { @@ -683,30 +713,24 @@ namespace eosio { fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { if (!_logger_variant) { - boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); - string ip = ec ? "" : rep.address().to_string(); - string port = ec ? "" : std::to_string(rep.port()); - - auto lep = socket->local_endpoint(ec); - string lip = ec ? "" : lep.address().to_string(); - string lport = ec ? "" : std::to_string(lep.port()); - _logger_variant.emplace(fc::mutable_variant_object() ("_name", peer_name()) ("_id", node_id) ("_sid", ((string)node_id).substr(0, 7)) - ("_ip", ip) - ("_port", port) - ("_lip", lip) - ("_lport", lport) + ("_ip", remote_endpoint_ip) + ("_port", remote_endpoint_port) + ("_lip", local_endpoint_ip) + ("_lport", local_endpoint_port) ); } return *_logger_variant; } }; - struct msg_handler : public fc::visitor { + const string connection::unknown = ""; + + +struct msg_handler : public fc::visitor { net_plugin_impl& impl; connection_ptr c; msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} @@ -799,8 +823,6 @@ namespace eosio { response_expected_timer(), read_delay_timer(), no_retry(no_reason), - fork_head(), - fork_head_num(0), last_req() { fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); @@ -828,8 +850,6 @@ namespace eosio { response_expected_timer(), read_delay_timer(), no_retry(no_reason), - fork_head(), - fork_head_num(0), last_req() { fc_ilog( logger, "accepted network connection" ); @@ -848,8 +868,26 @@ namespace eosio { read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); } + void connection::update_endpoints() { + boost::system::error_code ec; + auto rep = socket->remote_endpoint(ec); + remote_endpoint_ip = ec ? unknown : rep.address().to_string(); + remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); + + auto lep = socket->local_endpoint(ec); + local_endpoint_ip = ec ? unknown : lep.address().to_string(); + local_endpoint_port = ec ? unknown : std::to_string(lep.port()); + } + + void connection::start() { + verify_strand_in_this_thread( strand, __func__, __LINE__ ); + + update_endpoints(); + socket_open = true; + } + bool connection::connected() { - return (socket && socket->is_open() && !connecting); + return socket_is_open() && !connecting; } bool connection::current() { @@ -865,12 +903,8 @@ namespace eosio { } void connection::close() { - if(socket) { - socket->close(); - } - else { - fc_wlog( logger, "no socket to close!" ); - } + socket_open = false; + socket->close(); flush_queues(); connecting = false; syncing = false; @@ -1016,7 +1050,7 @@ namespace eosio { if( !buffer_queue.add_write_queue( buff, callback, to_sync_queue )) { fc_wlog( logger, "write_queue full ${s} bytes, giving up on connection ${p}", ("s", buffer_queue.write_queue_size())("p", peer_name()) ); - my_impl->close( shared_from_this() ); + close(); return; } if( buffer_queue.is_out_queue_empty() && trigger_send) { @@ -1030,7 +1064,7 @@ namespace eosio { connection_wptr c(shared_from_this()); if(!socket->is_open()) { fc_elog(logger,"socket not open to ${p}",("p",peer_name())); - my_impl->close(c.lock()); + close(); return; } std::vector bufs; @@ -1054,7 +1088,7 @@ namespace eosio { else { fc_wlog( logger, "connection closure detected on write to ${p}",("p",pname) ); } - my_impl->close(conn); + conn->close(); return; } conn->buffer_queue.clear_out_queue(); @@ -1189,7 +1223,7 @@ namespace eosio { if (close_after_send != no_reason) { fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", ("r", reason_str(close_after_send))("p", conn->peer_name()) ); - my_impl->close(conn); + conn->close(); return; } } else { @@ -1257,15 +1291,11 @@ namespace eosio { if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; } - if( !peer_addr.empty() ) { - return peer_addr; + if( !peer_address().empty() ) { + return peer_address(); } - if( socket != nullptr ) { - boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); - if( !ec ) { - return rep.address().to_string() + ':' + std::to_string( rep.port() ); - } + if( remote_endpoint_port != unknown ) { + return remote_endpoint_ip + ":" + remote_endpoint_port; } return "connecting client"; } @@ -1321,12 +1351,13 @@ namespace eosio { state = newstate; } + // uses controller, only call from application thread bool sync_manager::is_active(const connection_ptr& c) { if (state == head_catchup && c) { + auto fork_head_num = c->fork_head_num.load(); // provide memory barrier for c->fork_head bool fhset = c->fork_head != block_id_type(); - fc_dlog(logger, "fork_head_num = ${fn} fork_head set = ${s}", - ("fn", c->fork_head_num)("s", fhset)); - return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_head_block_num(); + fc_dlog( logger, "fork_head_num = ${fn} fork_head set = ${s}", ("fn", fork_head_num)( "s", fhset ) ); + return c->fork_head != block_id_type() && fork_head_num < chain_plug->chain().fork_db_head_block_num(); } return state != in_sync; } @@ -1370,14 +1401,13 @@ namespace eosio { if (conn && conn->current() ) { source = conn; - } - else { + } else { + std::lock_guard g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { if (!source) { source = *my_impl->connections.begin(); } - } - else { + } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); @@ -1438,8 +1468,9 @@ namespace eosio { void sync_manager::send_handshakes() { - for( auto &ci : my_impl->connections) { - if( ci->current()) { + std::lock_guard g( my_impl->connections_mtx ); + for( auto& ci : my_impl->connections ) { + if( ci->current() ) { ci->send_handshake(); } } @@ -1557,13 +1588,15 @@ namespace eosio { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; + std::unique_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { - if (cc->fork_head == id || - cc->fork_head_num > num) { + // fork_head_num provides memory barrier for fork_head + if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; break; } } + g.unlock(); if( req.req_blocks.mode == catch_up ) { c->fork_head = id; c->fork_head_num = num; @@ -1572,8 +1605,7 @@ namespace eosio { if (state == lib_catchup) return; set_state(head_catchup); - } - else { + } else { c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -1588,7 +1620,7 @@ namespace eosio { if( msg.known_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); - my_impl->close(c); + c->close(); return; } if (msg.known_blocks.mode == catch_up) { @@ -1609,7 +1641,7 @@ namespace eosio { fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); sync_last_requested_num = 0; source.reset(); - my_impl->close(c); + c->close(); set_state(in_sync); send_handshakes(); } @@ -1620,7 +1652,7 @@ namespace eosio { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); - my_impl->close(c); + c->close(); return; } sync_next_expected_num = blk_num + 1; @@ -1631,18 +1663,20 @@ namespace eosio { source.reset(); block_id_type null_id; - for (const auto& cp : my_impl->connections) { + std::unique_lock g( my_impl->connections_mtx ); + for( const auto& cp : my_impl->connections ) { + uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head if (cp->fork_head == null_id) { continue; } - if (cp->fork_head == blk_id || cp->fork_head_num < blk_num) { + if( fork_head_num < blk_num || cp->fork_head == blk_id ) { c->fork_head = null_id; c->fork_head_num = 0; - } - else { + } else { set_state(head_catchup); } } + g.unlock(); if (state == in_sync) { send_handshakes(); @@ -1764,6 +1798,7 @@ namespace eosio { fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); std::shared_ptr> send_buffer; + std::lock_guard g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1810,6 +1845,7 @@ namespace eosio { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; + std::lock_guard g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1905,6 +1941,7 @@ namespace eosio { ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); return; } + std::unique_lock g( my_impl->connections_mtx ); for (auto& conn : my_impl->connections) { if (conn == c || conn->last_req) { continue; @@ -1917,6 +1954,7 @@ namespace eosio { return; } } + g.unlock(); // at this point no other peer has it, re-request or do nothing? if( c->connected() ) { @@ -1927,29 +1965,30 @@ namespace eosio { //------------------------------------------------------------------------ - void net_plugin_impl::connect(const connection_ptr& c) { + bool net_plugin_impl::resolve_and_connect(const connection_ptr& c) { if( c->no_retry != go_away_reason::no_reason) { fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); - return; + return false; } - auto colon = c->peer_addr.find(':'); + auto colon = c->peer_address().find(':'); if (colon == std::string::npos || colon == 0) { - fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_addr) ); - for ( auto itr : connections ) { - if((*itr).peer_addr == c->peer_addr) { - (*itr).reset(); - close(itr); - connections.erase(itr); + fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_address()) ); + std::lock_guard g( my_impl->connections_mtx ); + for ( auto& cp : connections ) { + if( cp->peer_address() == c->peer_address() ) { + cp->reset(); + cp->close(); + connections.erase( cp ); break; } } - return; + return false; } - auto host = c->peer_addr.substr( 0, colon ); - auto port = c->peer_addr.substr( colon + 1); + auto host = c->peer_address().substr( 0, colon ); + auto port = c->peer_address().substr( colon + 1); idump((host)(port)); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); connection_wptr weak_conn = c; @@ -1963,11 +2002,16 @@ namespace eosio { if( !err ) { connect( c, endpoint_itr ); } else { - fc_elog( logger, "Unable to resolve ${peer_addr}: ${error}", - ("peer_addr", c->peer_name())( "error", err.message()) ); + fc_elog( logger, "Unable to resolve ${add}: ${error}", + ("add", c->peer_name())( "error", err.message()) ); } } ); +<<<<<<< HEAD } ) ); +======= + } ); + return true; +>>>>>>> Made all access to impl->connections thread safe } void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { @@ -1990,12 +2034,12 @@ namespace eosio { } } else { if( endpoint_itr != tcp::resolver::iterator()) { - close( c ); + c->close(); connect( c, endpoint_itr ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; - my_impl->close( c ); + c->close(); } } } ); @@ -2009,11 +2053,11 @@ namespace eosio { if (ec) { fc_elog( logger, "connection failed to ${peer}: ${error}", ( "peer", con->peer_name())("error",ec.message()) ); con->connecting = false; - close(con); + con->close(); return false; - } - else { + } else { con->strand.post( [this, con]() { + con->start(); start_read_message( con ); }); ++started_sessions; @@ -2026,69 +2070,65 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { - if( !ec ) { - uint32_t visitors = 0; - uint32_t from_addr = 0; - boost::system::error_code rec; - auto paddr = socket->remote_endpoint(rec).address(); - if (rec) { - fc_elog(logger,"Error getting remote endpoint: ${m}",("m", rec.message())); - } - else { - for (auto &conn : connections) { - if(conn->socket->is_open()) { - if (conn->peer_addr.empty()) { - visitors++; - boost::system::error_code ec; - if (paddr == conn->socket->remote_endpoint(ec).address()) { - from_addr++; - } + auto new_socket = std::make_shared( std::ref( *server_ioc ) ); + acceptor->async_accept( *new_socket, [new_socket, this, ioc = server_ioc]( boost::system::error_code ec ) { + // called from thread_pool threads, new_socket not shared yet + if( !ec ) { + uint32_t visitors = 0; + uint32_t from_addr = 0; + boost::system::error_code rec; + const auto& paddr_add = new_socket->remote_endpoint( rec ).address(); + string paddr_str; + if( rec ) { + fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); + } else { + paddr_str = paddr_add.to_string(); + std::unique_lock g( connections_mtx ); + for( auto& conn : connections ) { + if( conn->socket_is_open() ) { + if( conn->peer_address().empty() ) { + ++visitors; + if( paddr_str == conn->remote_address() ) { + ++from_addr; } } } - if (num_clients != visitors) { - fc_ilog( logger,"checking max client, visitors = ${v} num clients ${n}",("v",visitors)("n",num_clients) ); - num_clients = visitors; + } + g.unlock(); + if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { + connection_ptr new_connection = std::make_shared( new_socket ); + if( start_session( new_connection ) ) { + g.lock(); + connections.insert( new_connection ); + g.unlock(); } - if( from_addr < max_nodes_per_host && (max_client_count == 0 || num_clients < max_client_count )) { - ++num_clients; - connection_ptr c = std::make_shared( socket ); - connections.insert( c ); - start_session( c ); + } else { + if( from_addr >= max_nodes_per_host ) { + fc_elog( logger, "Number of connections (${n}) from ${ra} exceeds limit", + ("n", from_addr + 1)( "ra", paddr_str ) ); + } else { + fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count) ); } - else { - if (from_addr >= max_nodes_per_host) { - fc_elog(logger, "Number of connections (${n}) from ${ra} exceeds limit", - ("n", from_addr+1)("ra",paddr.to_string())); - } - else { - fc_elog(logger, "Error max_client_count ${m} exceeded", - ( "m", max_client_count) ); - } - socket->close(); - } - } - } else { - fc_elog( logger, "Error accepting connection: ${m}",( "m", ec.message() ) ); - // For the listed error codes below, recall start_listen_loop() - switch (ec.value()) { - case ECONNABORTED: - case EMFILE: - case ENFILE: - case ENOBUFS: - case ENOMEM: - case EPROTO: - break; - default: - return; + new_socket->close(); // new_socket never associated with a connection } } - start_listen_loop(); - }); + } else { + fc_elog( logger, "Error accepting connection: ${m}", ("m", ec.message()) ); + // For the listed error codes below, recall start_listen_loop() + switch( ec.value() ) { + case ECONNABORTED: + case EMFILE: + case ENFILE: + case ENOBUFS: + case ENOMEM: + case EPROTO: + break; + default: + return; + } + } + start_listen_loop(); }); } @@ -2171,7 +2211,7 @@ namespace eosio { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - my_impl->close( conn ); + conn->close(); }); return; } @@ -2278,6 +2318,7 @@ namespace eosio { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= connection_wptr weak_conn = conn; >>>>>>> Remove descriptions of tasks as not merged into develop yet @@ -2294,21 +2335,24 @@ namespace eosio { >>>>>>> Make delay_timer thread safe app().post( priority::medium, [this, weak_conn]() { >>>>>>> Remove descriptions of tasks as not merged into develop yet +======= + app().post( priority::medium, [weak_conn]() { +>>>>>>> Made all access to impl->connections thread safe auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - close( conn ); + conn->close(); }); } })); } catch (...) { fc_elog( logger, "Undefined exception in start_read_message" ); connection_wptr weak_conn = conn; - app().post( priority::medium, [this, weak_conn]() { + app().post( priority::medium, [weak_conn]() { auto conn = weak_conn.lock(); if( !conn ) return; fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - close( conn ); + conn->close(); }); } } @@ -2354,7 +2398,7 @@ namespace eosio { } catch( const fc::exception& e ) { fc_elog( logger, "Exception in handling message from ${p}: ${s}", ("p", conn->peer_name())("s", e.to_detail_string()) ); - close( conn ); + conn->close(); return false; } return true; @@ -2409,9 +2453,10 @@ namespace eosio { return; } - if( c->peer_addr.empty() || c->last_handshake_recv.node_id == fc::sha256()) { + if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { fc_dlog(logger, "checking for duplicate" ); - for(const auto &check : connections) { + std::lock_guard g( connections_mtx ); + for(const auto& check : connections) { if(check == c) continue; if(check->connected() && check->peer_name() == msg.p2p_address) { @@ -2430,9 +2475,9 @@ namespace eosio { return; } } - } - else { - fc_dlog(logger, "skipping duplicate check, addr == ${pa}, id = ${ni}",("pa",c->peer_addr)("ni",c->last_handshake_recv.node_id)); + } else { + fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", + ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); } if( msg.chain_id != chain_id) { @@ -2503,7 +2548,7 @@ namespace eosio { c->node_id = msg.node_id; } c->flush_queues(); - close(c); + c->close(); } void net_plugin_impl::handle_message(const connection_ptr& c, const time_message& msg) { @@ -2596,7 +2641,7 @@ namespace eosio { if( msg.req_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}, closing ${p}", ("s", msg.req_blocks.ids.size())("p",c->peer_name()) ); - close(c); + c->close(); return; } @@ -2624,8 +2669,8 @@ namespace eosio { // no break case normal : if( !msg.req_trx.ids.empty() ) { - elog( "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); - close(c); + fc_elog( logger, "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); + c->close(); return; } break; @@ -2949,8 +2994,9 @@ namespace eosio { if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } + std::lock_guard g( connections_mtx ); for( auto& c : connections ) { - if( c->socket->is_open()) { + if( c->socket_is_open() ) { c->send_time(); } } @@ -3006,6 +3052,7 @@ namespace eosio { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); + std::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { @@ -3013,32 +3060,20 @@ namespace eosio { start_conn_timer(std::chrono::milliseconds(1), *it); // avoid exhausting return; } - if( !(*it)->socket->is_open() && !(*it)->connecting) { - if( (*it)->peer_addr.length() > 0) { - connect(*it); - } - else { + if( !(*it)->socket_is_open() && !(*it)->connecting) { + if( (*it)->peer_address().length() > 0) { + resolve_and_connect(*it); + } else { it = connections.erase(it); continue; } } ++it; } + g.unlock(); start_conn_timer(connector_period, std::weak_ptr()); } - void net_plugin_impl::close(const connection_ptr& c) { - if( c->peer_addr.empty() && c->socket->is_open() ) { - if (num_clients == 0) { - fc_wlog( logger, "num_clients already at 0"); - } - else { - --num_clients; - } - } - c->close(); - } - void net_plugin_impl::accepted_block(const block_state_ptr& block) { fc_dlog(logger,"signaled, id = ${id}",("id", block->id)); dispatcher->bcast_block(block); @@ -3241,7 +3276,6 @@ namespace eosio { my->resp_expected_period = def_resp_expected_wait; my->max_client_count = options.at( "max-clients" ).as(); my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); - my->num_clients = 0; my->started_sessions = 0; my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); @@ -3388,7 +3422,7 @@ namespace eosio { my->start_monitors(); - for( auto seed_node : my->supplied_peers ) { + for( const auto& seed_node : my->supplied_peers ) { connect( seed_node ); } handle_sighup(); @@ -3426,9 +3460,10 @@ namespace eosio { my->acceptor->close(); fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); + std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); - my->close( con ); + con->close(); } my->connections.clear(); } @@ -3453,18 +3488,21 @@ namespace eosio { connection_ptr c = std::make_shared(host); fc_dlog(logger,"adding new connection to the list"); + std::unique_lock g( my->connections_mtx ); my->connections.insert( c ); + g.unlock(); fc_dlog(logger,"calling active connector"); - my->connect( c ); + my->resolve_and_connect( c ); return "added connection"; } string net_plugin::disconnect( const string& host ) { + std::lock_guard g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { - if( (*itr)->peer_addr == host ) { + if( (*itr)->peer_address() == host ) { (*itr)->reset(); fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); - my->close(*itr); + (*itr)->close(); my->connections.erase(itr); return "connection removed"; } @@ -3481,6 +3519,7 @@ namespace eosio { vector net_plugin::connections()const { vector result; + std::lock_guard g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); @@ -3488,8 +3527,9 @@ namespace eosio { return result; } connection_ptr net_plugin_impl::find_connection(const string& host )const { + std::lock_guard g( connections_mtx ); for( const auto& c : connections ) - if( c->peer_addr == host ) return c; + if( c->peer_address() == host ) return c; return connection_ptr(); } From 4a40807dbbf04fa71772a82edc1bc6a888d4267e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 21 Mar 2019 23:51:21 -0500 Subject: [PATCH 0138/1648] Fix deadlock on close when closing connection that is syncing from peer --- plugins/net_plugin/net_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d1b899c32c6..8cdccb7c9ee 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -915,7 +915,7 @@ struct msg_handler : public fc::visitor { sent_handshake_count = 0; last_handshake_recv = handshake_message(); last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num(shared_from_this()); + my_impl->sync_master->reset_lib_num(nullptr); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); <<<<<<< HEAD @@ -1366,6 +1366,7 @@ struct msg_handler : public fc::visitor { if(state == in_sync) { source.reset(); } + if( !c ) return; if( c->current() ) { if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num) { sync_known_lib_num =c->last_handshake_recv.last_irreversible_block_num; From a0489e137a10d375079f4636570a053340691c11 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 13:28:15 -0500 Subject: [PATCH 0139/1648] Move socket ownership into connection. --- plugins/net_plugin/net_plugin.cpp | 279 ++++++++++++++++-------------- 1 file changed, 149 insertions(+), 130 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8cdccb7c9ee..7d201ee720f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -53,7 +53,6 @@ namespace eosio { using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; - using socket_ptr = std::shared_ptr; using io_work_t = boost::asio::executor_work_guard; template @@ -207,7 +206,7 @@ namespace eosio { string p2p_server_address; uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; - uint32_t current_connection_id = 0; + std::atomic current_connection_id{0}; vector supplied_peers; vector allowed_peers; ///< peer keys allowed to connect @@ -224,7 +223,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable std::mutex connections_mtx; + mutable std::mutex connections_mtx; // switch to shared_mutex in C++17 std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -254,7 +253,6 @@ namespace eosio { string user_agent_name; chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; - int started_sessions = 0; shared_ptr resolver; @@ -271,7 +269,6 @@ namespace eosio { bool resolve_and_connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); - bool start_session(const connection_ptr& c); void start_listen_loop(); void start_read_message(const connection_ptr& c); @@ -546,19 +543,18 @@ namespace eosio { public: explicit connection( string endpoint ); - explicit connection( socket_ptr s ); + connection(); ~connection(); - void start(); + bool start_session(); - bool socket_is_open() const { return socket_open.load(); } // thread safe - const string& peer_address() const { return peer_addr; } // thread safe - const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start() + bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic + const string& peer_address() const { return peer_addr; } // thread safe, const + const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start_session() private: static const string unknown; - void initialize(); // only called from constructor void update_endpoints(); public: @@ -566,6 +562,7 @@ namespace eosio { optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive boost::asio::io_context::strand strand; +<<<<<<< HEAD <<<<<<< HEAD socket_ptr socket; ======= @@ -573,6 +570,9 @@ namespace eosio { <<<<<<< HEAD >>>>>>> Protect start_read_message via strand ======= +======= + tcp::socket socket; // only accessed through strand after construction +>>>>>>> Move socket ownership into connection. private: std::atomic socket_open{false}; public: @@ -619,9 +619,9 @@ namespace eosio { public: std::mutex response_expected_timer_mtx; - unique_ptr response_expected_timer; + boost::asio::steady_timer response_expected_timer; std::mutex read_delay_timer_mtx; - unique_ptr read_delay_timer; + boost::asio::steady_timer read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head @@ -657,6 +657,9 @@ namespace eosio { bool current(); void reset(); void close(); + private: + static void _close( connection* self ); // for easy capture + public: void send_handshake(); /** \name Peer Timestamps @@ -809,8 +812,12 @@ struct msg_handler : public fc::visitor { strand( app().get_io_service() ), ======= strand( *my_impl->server_ioc ), +<<<<<<< HEAD >>>>>>> Protect start_read_message via strand socket( std::make_shared( std::ref( *my_impl->server_ioc ))), +======= + socket( *my_impl->server_ioc ), +>>>>>>> Move socket ownership into connection. node_id(), connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), @@ -820,24 +827,28 @@ struct msg_handler : public fc::visitor { syncing(false), protocol_version(0), peer_addr(endpoint), - response_expected_timer(), - read_delay_timer(), + response_expected_timer( *my_impl->server_ioc ), + read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), last_req() { fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); - initialize(); + node_id.data()[0] = 0; } - connection::connection( socket_ptr s ) + connection::connection() : peer_requested(), server_ioc( my_impl->server_ioc ), <<<<<<< HEAD strand( app().get_io_service() ), ======= strand( *my_impl->server_ioc ), +<<<<<<< HEAD >>>>>>> Protect start_read_message via strand socket( s ), +======= + socket( *my_impl->server_ioc ), +>>>>>>> Move socket ownership into connection. node_id(), connection_id( ++my_impl->current_connection_id ), last_handshake_recv(), @@ -847,43 +858,47 @@ struct msg_handler : public fc::visitor { syncing(false), protocol_version(0), peer_addr(), - response_expected_timer(), - read_delay_timer(), + response_expected_timer( *my_impl->server_ioc ), + read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), last_req() { fc_ilog( logger, "accepted network connection" ); - initialize(); + node_id.data()[0] = 0; } connection::~connection() { pending_message_buffer.reset(); } - void connection::initialize() { - auto *rnd = node_id.data(); - rnd[0] = 0; - // called only from constructor, no mutex needed - response_expected_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - } - void connection::update_endpoints() { boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); + auto rep = socket.remote_endpoint(ec); remote_endpoint_ip = ec ? unknown : rep.address().to_string(); remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); - auto lep = socket->local_endpoint(ec); + auto lep = socket.local_endpoint(ec); local_endpoint_ip = ec ? unknown : lep.address().to_string(); local_endpoint_port = ec ? unknown : std::to_string(lep.port()); } - void connection::start() { + bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); update_endpoints(); - socket_open = true; + boost::asio::ip::tcp::no_delay nodelay( true ); + boost::system::error_code ec; + socket.set_option( nodelay, ec ); + if( ec ) { + fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", peer_name())( "error", ec.message() ) ); + connecting = false; + close(); + return false; + } else { + socket_open = true; + my_impl->start_read_message( shared_from_this() ); + return true; + } } bool connection::connected() { @@ -903,14 +918,21 @@ struct msg_handler : public fc::visitor { } void connection::close() { - socket_open = false; - socket->close(); - flush_queues(); - connecting = false; - syncing = false; - if( last_req ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); + strand.dispatch( [self = shared_from_this()]() { + connection::_close( self.get() ); + }); + } + + void connection::_close( connection* self ) { + self->socket_open = false; + self->socket.close(); + self->flush_queues(); + self->connecting = false; + self->syncing = false; + if( self->last_req ) { + my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } +<<<<<<< HEAD reset(); sent_handshake_count = 0; last_handshake_recv = handshake_message(); @@ -938,6 +960,18 @@ struct msg_handler : public fc::visitor { std::lock_guard g( read_delay_timer_mtx ); if( read_delay_timer ) read_delay_timer->cancel(); >>>>>>> Use unique_lock instead of lock_guard to clean up code +======= + self->reset(); + self->sent_handshake_count = 0; + self->last_handshake_recv = handshake_message(); + self->last_handshake_sent = handshake_message(); + my_impl->sync_master->reset_lib_num( nullptr ); + fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); + self->cancel_wait(); + + std::lock_guard g( self->read_delay_timer_mtx ); + self->read_delay_timer.cancel(); +>>>>>>> Move socket ownership into connection. } void connection::blk_send_branch() { @@ -1061,56 +1095,45 @@ struct msg_handler : public fc::visitor { void connection::do_queue_write(int priority) { if( !buffer_queue.ready_to_send() ) return; - connection_wptr c(shared_from_this()); - if(!socket->is_open()) { - fc_elog(logger,"socket not open to ${p}",("p",peer_name())); - close(); - return; - } + connection_ptr c(shared_from_this()); + std::vector bufs; buffer_queue.fill_out_buffer( bufs ); +<<<<<<< HEAD boost::asio::async_write(*socket, bufs, boost::asio::bind_executor(strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { app().post(priority, [c, priority, ec, w]() { +======= + boost::asio::async_write( socket, bufs, + boost::asio::bind_executor( strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { +>>>>>>> Move socket ownership into connection. try { - auto conn = c.lock(); - if(!conn) - return; + c->buffer_queue.out_callback( ec, w ); - conn->buffer_queue.out_callback( ec, w ); - - if(ec) { - string pname = conn ? conn->peer_name() : "no connection name"; - if( ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p",pname)("i", ec.message()) ); - } - else { - fc_wlog( logger, "connection closure detected on write to ${p}",("p",pname) ); + if( ec ) { + if( ec.value() != boost::asio::error::eof ) { + fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p", c->peer_name())( "i", ec.message() ) ); + } else { + fc_wlog( logger, "connection closure detected on write to ${p}", ("p", c->peer_name()) ); } - conn->close(); + c->close(); return; } - conn->buffer_queue.clear_out_queue(); - conn->enqueue_sync_block(); - conn->do_queue_write( priority ); - } - catch(const std::exception &ex) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.what()) ); - } - catch(const fc::exception &ex) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.to_string()) ); - } - catch(...) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); + c->buffer_queue.clear_out_queue(); + c->enqueue_sync_block(); + c->do_queue_write( priority ); + } catch( const std::exception& ex ) { + fc_elog( logger, "Exception in do_queue_write to ${p} ${s}", ("p", c->peer_name())( "s", ex.what() ) ); + } catch( const fc::exception& ex ) { + fc_elog( logger, "Exception in do_queue_write to ${p} ${s}", ("p", c->peer_name())( "s", ex.to_string() ) ); + } catch( ... ) { + fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } +<<<<<<< HEAD }); +======= +>>>>>>> Move socket ownership into connection. })); } @@ -1215,20 +1238,15 @@ struct msg_handler : public fc::visitor { bool trigger_send, int priority, go_away_reason close_after_send, bool to_sync_queue) { - connection_wptr weak_this = shared_from_this(); + connection_ptr self = shared_from_this(); queue_write(send_buffer,trigger_send, priority, - [weak_this, close_after_send](boost::system::error_code ec, std::size_t ) { - connection_ptr conn = weak_this.lock(); - if (conn) { + [conn{std::move(self)}, close_after_send](boost::system::error_code ec, std::size_t ) { if (close_after_send != no_reason) { fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", ("r", reason_str(close_after_send))("p", conn->peer_name()) ); conn->close(); return; } - } else { - fc_wlog(logger, "connection expired before enqueued net_message called callback!"); - } }, to_sync_queue); } @@ -1236,16 +1254,15 @@ struct msg_handler : public fc::visitor { // thread safe void connection::cancel_wait() { std::lock_guard g( response_expected_timer_mtx ); - if (response_expected_timer) - response_expected_timer->cancel(); + response_expected_timer.cancel(); } // thread safe void connection::sync_wait() { connection_wptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer->expires_from_now( my_impl->resp_expected_period); - response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { + response_expected_timer.expires_from_now( my_impl->resp_expected_period); + response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -1262,8 +1279,8 @@ struct msg_handler : public fc::visitor { void connection::fetch_wait() { connection_wptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer->expires_from_now( my_impl->resp_expected_period); - response_expected_timer->async_wait( [c]( boost::system::error_code ec ) { + response_expected_timer.expires_from_now( my_impl->resp_expected_period); + response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { @@ -2023,6 +2040,7 @@ struct msg_handler : public fc::visitor { auto current_endpoint = *endpoint_itr; ++endpoint_itr; c->connecting = true; +<<<<<<< HEAD connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { @@ -2031,6 +2049,12 @@ struct msg_handler : public fc::visitor { if( !c ) return; if( !err && c->socket->is_open()) { if( start_session( c )) { +======= + c->socket.async_connect( current_endpoint, + boost::asio::bind_executor( c->strand, [c, endpoint_itr, this]( const boost::system::error_code& err ) { + if( !err && c->socket.is_open()) { + if( c->start_session() ) { +>>>>>>> Move socket ownership into connection. c->send_handshake(); } } else { @@ -2043,6 +2067,7 @@ struct msg_handler : public fc::visitor { c->close(); } } +<<<<<<< HEAD } ); } ) ); } @@ -2067,18 +2092,20 @@ struct msg_handler : public fc::visitor { // con->readloop_complete = bf::async( [=](){ read_loop( con ); } ); // con->writeloop_complete = bf::async( [=](){ write_loop con ); } ); } +======= + } ) ); +>>>>>>> Move socket ownership into connection. } - void net_plugin_impl::start_listen_loop() { - auto new_socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *new_socket, [new_socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - // called from thread_pool threads, new_socket not shared yet + connection_ptr new_connection = std::make_shared(); + acceptor->async_accept( new_connection->socket, + boost::asio::bind_executor( new_connection->strand, [new_connection, this]( boost::system::error_code ec ) { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; - const auto& paddr_add = new_socket->remote_endpoint( rec ).address(); + const auto& paddr_add = new_connection->socket.remote_endpoint( rec ).address(); string paddr_str; if( rec ) { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); @@ -2097,8 +2124,7 @@ struct msg_handler : public fc::visitor { } g.unlock(); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { - connection_ptr new_connection = std::make_shared( new_socket ); - if( start_session( new_connection ) ) { + if( new_connection->start_session() ) { g.lock(); connections.insert( new_connection ); g.unlock(); @@ -2111,7 +2137,8 @@ struct msg_handler : public fc::visitor { } else { fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count) ); } - new_socket->close(); // new_socket never associated with a connection + // new_connection never added to connections and start_session not called, lifetime will end + new_connection->socket.close(); } } } else { @@ -2130,16 +2157,13 @@ struct msg_handler : public fc::visitor { } } start_listen_loop(); - }); + })); } // only called from strand thread void net_plugin_impl::start_read_message(const connection_ptr& conn) { try { - if(!conn->socket) { - return; - } connection_wptr weak_conn = conn; <<<<<<< HEAD @@ -2160,7 +2184,7 @@ struct msg_handler : public fc::visitor { const size_t max_socket_read_watermark = 4096; std::size_t socket_read_watermark = std::min(minimum_read, max_socket_read_watermark); boost::asio::socket_base::receive_low_watermark read_watermark_opt(socket_read_watermark); - conn->socket->set_option(read_watermark_opt); + conn->socket.set_option(read_watermark_opt); } auto completion_handler = [minimum_read](boost::system::error_code ec, std::size_t bytes_transferred) -> std::size_t { @@ -2203,6 +2227,7 @@ struct msg_handler : public fc::visitor { reads_in_flight > 2*def_max_reads_in_flight || trx_in_progress_size > 2*def_max_trx_in_progress_size ) { +<<<<<<< HEAD fc_wlog( logger, "queues over full, giving up on connection" ); <<<<<<< HEAD app().post( priority::medium, [weak_conn]() { @@ -2214,12 +2239,16 @@ struct msg_handler : public fc::visitor { fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); conn->close(); }); +======= + fc_elog( logger, "queues over full, giving up on connection, closing connection to: ${p}", + ("p", conn->peer_name()) ); + conn->close(); +>>>>>>> Move socket ownership into connection. return; } std::lock_guard g( conn->read_delay_timer_mtx ); - if( !conn->read_delay_timer ) return; - conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); - conn->read_delay_timer->async_wait( + conn->read_delay_timer.expires_from_now( def_read_delay_for_full_write_queue ); + conn->read_delay_timer.async_wait( boost::asio::bind_executor(conn->strand, [this, weak_conn]( boost::system::error_code ) { auto conn = weak_conn.lock(); if( !conn ) return; @@ -2229,15 +2258,10 @@ struct msg_handler : public fc::visitor { } ++conn->reads_in_flight; - boost::asio::async_read(*conn->socket, + boost::asio::async_read( conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, boost::asio::bind_executor( conn->strand, - [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { - auto conn = weak_conn.lock(); - if (!conn) { - return; - } - + [this, conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { --conn->reads_in_flight; <<<<<<< HEAD <<<<<<< HEAD @@ -2320,6 +2344,7 @@ struct msg_handler : public fc::visitor { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= connection_wptr weak_conn = conn; >>>>>>> Remove descriptions of tasks as not merged into develop yet @@ -2344,17 +2369,15 @@ struct msg_handler : public fc::visitor { fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); conn->close(); }); +======= + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + conn->close(); +>>>>>>> Move socket ownership into connection. } })); } catch (...) { - fc_elog( logger, "Undefined exception in start_read_message" ); - connection_wptr weak_conn = conn; - app().post( priority::medium, [weak_conn]() { - auto conn = weak_conn.lock(); - if( !conn ) return; - fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); - }); + fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", conn->peer_name()) ); + conn->close(); } } @@ -3277,7 +3300,6 @@ struct msg_handler : public fc::visitor { my->resp_expected_period = def_resp_expected_wait; my->max_client_count = options.at( "max-clients" ).as(); my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); - my->started_sessions = 0; my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); @@ -3440,6 +3462,9 @@ struct msg_handler : public fc::visitor { if( my->server_ioc_work ) my->server_ioc_work->reset(); + if( my->server_ioc ) + my->server_ioc->stop(); + { std::lock_guard g( my->connector_check_timer_mtx ); if( my->connector_check_timer ) @@ -3455,22 +3480,16 @@ struct msg_handler : public fc::visitor { } my->done = true; - if( my->acceptor ) { - fc_ilog( logger, "close acceptor" ); - my->acceptor->cancel(); - my->acceptor->close(); - - fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); + { + fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { - fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); + fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close(); } my->connections.clear(); } - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { my->thread_pool->join(); my->thread_pool->stop(); From 75832046b01bf6a4bd32f7bbc289135ea20f206c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 13:55:14 -0500 Subject: [PATCH 0140/1648] Make queued_buffer thread safe --- plugins/net_plugin/net_plugin.cpp | 53 ++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7d201ee720f..1214e32060b 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -458,33 +458,44 @@ namespace eosio { static void populate(handshake_message &hello); }; + // thread safe class queued_buffer : boost::noncopyable { public: void clear_write_queue() { + std::lock_guard g( _mtx ); _write_queue.clear(); _sync_write_queue.clear(); _write_queue_size = 0; } void clear_out_queue() { + std::lock_guard g( _mtx ); while ( _out_queue.size() > 0 ) { _out_queue.pop_front(); } } - // thread safe - uint32_t write_queue_size() const { return _write_queue_size; } + uint32_t write_queue_size() const { + std::lock_guard g( _mtx ); + return _write_queue_size; + } - bool is_out_queue_empty() const { return _out_queue.empty(); } + bool is_out_queue_empty() const { + std::lock_guard g( _mtx ); + return _out_queue.empty(); + } bool ready_to_send() const { + std::lock_guard g( _mtx ); // if out_queue is not empty then async_write is in progress return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); } + // @param callback must not callback into queued_buffer bool add_write_queue( const std::shared_ptr>& buff, std::function callback, bool to_sync_queue ) { + std::lock_guard g( _mtx ); if( to_sync_queue ) { _sync_write_queue.push_back( {buff, callback} ); } else { @@ -498,6 +509,7 @@ namespace eosio { } void fill_out_buffer( std::vector& bufs ) { + std::lock_guard g( _mtx ); if( _sync_write_queue.size() > 0 ) { // always send msgs from sync_write_queue first fill_out_buffer( bufs, _sync_write_queue ); } else { // postpone real_time write_queue if sync queue is not empty @@ -507,6 +519,7 @@ namespace eosio { } void out_callback( boost::system::error_code ec, std::size_t w ) { + std::lock_guard g( _mtx ); for( auto& m : _out_queue ) { m.callback( ec, w ); } @@ -531,7 +544,8 @@ namespace eosio { std::function callback; }; - std::atomic _write_queue_size{0}; + mutable std::mutex _mtx; + uint32_t _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -918,7 +932,7 @@ struct msg_handler : public fc::visitor { } void connection::close() { - strand.dispatch( [self = shared_from_this()]() { + strand.post( [self = shared_from_this()]() { connection::_close( self.get() ); }); } @@ -1051,12 +1065,15 @@ struct msg_handler : public fc::visitor { syncing = false; } + // thread safe void connection::send_handshake() { - handshake_initializer::populate(last_handshake_sent); - last_handshake_sent.generation = ++sent_handshake_count; - fc_dlog(logger, "Sending handshake generation ${g} to ${ep}", - ("g",last_handshake_sent.generation)("ep", peer_name())); - enqueue(last_handshake_sent); + app().post( priority::low, [c = shared_from_this()]() { + handshake_initializer::populate( c->last_handshake_sent ); + c->last_handshake_sent.generation = ++c->sent_handshake_count; + fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", + ("g", c->last_handshake_sent.generation)( "ep", c->peer_name() ) ); + c->enqueue( c->last_handshake_sent ); + }); } void connection::send_time() { @@ -1092,6 +1109,7 @@ struct msg_handler : public fc::visitor { } } + // called from connection strand and application thread void connection::do_queue_write(int priority) { if( !buffer_queue.ready_to_send() ) return; @@ -1100,6 +1118,7 @@ struct msg_handler : public fc::visitor { std::vector bufs; buffer_queue.fill_out_buffer( bufs ); +<<<<<<< HEAD <<<<<<< HEAD boost::asio::async_write(*socket, bufs, boost::asio::bind_executor(strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { @@ -1108,6 +1127,11 @@ struct msg_handler : public fc::visitor { boost::asio::async_write( socket, bufs, boost::asio::bind_executor( strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { >>>>>>> Move socket ownership into connection. +======= + strand.dispatch( [c{std::move(c)}, bufs{std::move(bufs)}, priority]() { + boost::asio::async_write( c->socket, bufs, + boost::asio::bind_executor( c->strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { +>>>>>>> Make queued_buffer thread safe try { c->buffer_queue.out_callback( ec, w ); @@ -1130,11 +1154,16 @@ struct msg_handler : public fc::visitor { } catch( ... ) { fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } +<<<<<<< HEAD <<<<<<< HEAD }); ======= >>>>>>> Move socket ownership into connection. })); +======= + })); + }); +>>>>>>> Make queued_buffer thread safe } void connection::cancel_sync(go_away_reason reason) { @@ -3188,8 +3217,8 @@ struct msg_handler : public fc::visitor { return chain::signature_type(); } - void - handshake_initializer::populate( handshake_message &hello) { + // call from main application thread + void handshake_initializer::populate( handshake_message& hello ) { hello.network_version = net_version_base + net_version; hello.chain_id = my_impl->chain_id; hello.node_id = my_impl->node_id; From b9861c8f570bbdb00b0c84f4207759302ba7d7ee Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 15:58:17 -0500 Subject: [PATCH 0141/1648] Make use of resolver thread safe --- plugins/net_plugin/net_plugin.cpp | 78 +++++++++++++++++-------------- 1 file changed, 42 insertions(+), 36 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1214e32060b..d070275b350 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -254,8 +254,6 @@ namespace eosio { chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; - shared_ptr resolver; - bool use_socket_read_watermark = false; compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; @@ -268,7 +266,7 @@ namespace eosio { bool resolve_and_connect(const connection_ptr& c); - void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); + void connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr); void start_listen_loop(); void start_read_message(const connection_ptr& c); @@ -621,7 +619,7 @@ namespace eosio { handshake_message last_handshake_recv; handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; - bool connecting = false; + std::atomic connecting{false}; bool syncing = false; uint16_t protocol_version = 0; private: @@ -636,7 +634,7 @@ namespace eosio { boost::asio::steady_timer response_expected_timer; std::mutex read_delay_timer_mtx; boost::asio::steady_timer read_delay_timer; - go_away_reason no_retry = no_reason; + std::atomic no_retry{no_reason}; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head optional last_req; @@ -2012,35 +2010,27 @@ struct msg_handler : public fc::visitor { //------------------------------------------------------------------------ + // called from any thread bool net_plugin_impl::resolve_and_connect(const connection_ptr& c) { if( c->no_retry != go_away_reason::no_reason) { fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); return false; } - auto colon = c->peer_address().find(':'); - + string::size_type colon = c->peer_address().find(':'); if (colon == std::string::npos || colon == 0) { fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_address()) ); - std::lock_guard g( my_impl->connections_mtx ); - for ( auto& cp : connections ) { - if( cp->peer_address() == c->peer_address() ) { - cp->reset(); - cp->close(); - connections.erase( cp ); - break; - } - } return false; } - auto host = c->peer_address().substr( 0, colon ); - auto port = c->peer_address().substr( colon + 1); + string host = c->peer_address().substr( 0, colon ); + string port = c->peer_address().substr( colon + 1); idump((host)(port)); - tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); + tcp::resolver::query query( tcp::v4(), host, port ); connection_wptr weak_conn = c; // Note: need to add support for IPv6 too +<<<<<<< HEAD resolver->async_resolve( query, boost::asio::bind_executor( c->strand, [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { @@ -2057,11 +2047,25 @@ struct msg_handler : public fc::visitor { } ) ); ======= } ); +======= + auto resolver = std::make_shared( *server_ioc ); + resolver->async_resolve( query, + [resolver, ioc = server_ioc, weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { + auto c = weak_conn.lock(); + if( !c ) return; + if( !err ) { + connect( c, resolver, endpoint_itr ); + } else { + fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); + } + } ); +>>>>>>> Make use of resolver thread safe return true; >>>>>>> Made all access to impl->connections thread safe } - void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { + // called from any thread + void net_plugin_impl::connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr) { if( c->no_retry != go_away_reason::no_reason) { string rsn = reason_str(c->no_retry); return; @@ -2080,7 +2084,7 @@ struct msg_handler : public fc::visitor { if( start_session( c )) { ======= c->socket.async_connect( current_endpoint, - boost::asio::bind_executor( c->strand, [c, endpoint_itr, this]( const boost::system::error_code& err ) { + boost::asio::bind_executor( c->strand, [resolver, c, endpoint_itr, this]( const boost::system::error_code& err ) { if( !err && c->socket.is_open()) { if( c->start_session() ) { >>>>>>> Move socket ownership into connection. @@ -2089,7 +2093,7 @@ struct msg_handler : public fc::visitor { } else { if( endpoint_itr != tcp::resolver::iterator()) { c->close(); - connect( c, endpoint_itr ); + connect( c, resolver, endpoint_itr ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; @@ -3007,19 +3011,17 @@ struct msg_handler : public fc::visitor { } } + // called from any thread void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { std::lock_guard g( connector_check_timer_mtx ); connector_check_timer->expires_from_now( du); connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { - app().post( priority::low, [this, from_connection, ec]() { if( !ec) { connection_monitor(from_connection); - } - else { + } else { fc_elog( logger, "Error from connection check monitor: ${m}",( "m", ec.message())); start_conn_timer( connector_period, std::weak_ptr()); } - }); }); } @@ -3101,6 +3103,7 @@ struct msg_handler : public fc::visitor { >>>>>>> Break expire into two steps } + // called from any thread void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); @@ -3115,7 +3118,10 @@ struct msg_handler : public fc::visitor { } if( !(*it)->socket_is_open() && !(*it)->connecting) { if( (*it)->peer_address().length() > 0) { - resolve_and_connect(*it); + if( !resolve_and_connect(*it) ) { + it = connections.erase(it); + continue; + } } else { it = connections.erase(it); continue; @@ -3405,14 +3411,14 @@ struct msg_handler : public fc::visitor { boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); } - my->resolver = std::make_shared( std::ref( *my->server_ioc )); if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); // Note: need to add support for IPv6 too? - my->listen_endpoint = *my->resolver->resolve( query ); + tcp::resolver resolver( *my->server_ioc ); + my->listen_endpoint = *resolver.resolve( query ); my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); @@ -3535,13 +3541,13 @@ struct msg_handler : public fc::visitor { if( my->find_connection( host ) ) return "already connected"; - connection_ptr c = std::make_shared(host); - fc_dlog(logger,"adding new connection to the list"); - std::unique_lock g( my->connections_mtx ); - my->connections.insert( c ); - g.unlock(); - fc_dlog(logger,"calling active connector"); - my->resolve_and_connect( c ); + connection_ptr c = std::make_shared( host ); + fc_dlog( logger, "calling active connector" ); + if( my->resolve_and_connect( c ) ) { + fc_dlog( logger, "adding new connection to the list" ); + std::unique_lock g( my->connections_mtx ); + my->connections.insert( c ); + } return "added connection"; } From c5b543d2dee2b9acf6d96513bcc986bf4e85f3d3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Mar 2019 23:08:28 -0500 Subject: [PATCH 0142/1648] Use boost::shared_mutex for connections. Multithread broadcast block. --- plugins/net_plugin/net_plugin.cpp | 88 ++++++++++++++++++------------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d070275b350..077c069e0f3 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include @@ -223,7 +225,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable std::mutex connections_mtx; // switch to shared_mutex in C++17 + mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17 std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -620,7 +622,7 @@ namespace eosio { handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; std::atomic connecting{false}; - bool syncing = false; + std::atomic syncing{false}; uint16_t protocol_version = 0; private: const string peer_addr; @@ -1447,7 +1449,7 @@ struct msg_handler : public fc::visitor { if (conn && conn->current() ) { source = conn; } else { - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { if (!source) { source = *my_impl->connections.begin(); @@ -1513,7 +1515,7 @@ struct msg_handler : public fc::visitor { void sync_manager::send_handshakes() { - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& ci : my_impl->connections ) { if( ci->current() ) { ci->send_handshake(); @@ -1633,7 +1635,7 @@ struct msg_handler : public fc::visitor { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - std::unique_lock g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { // fork_head_num provides memory barrier for fork_head if( cc->fork_head_num > num || cc->fork_head == id ) { @@ -1708,7 +1710,7 @@ struct msg_handler : public fc::visitor { source.reset(); block_id_type null_id; - std::unique_lock g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head if (cp->fork_head == null_id) { @@ -1837,31 +1839,43 @@ struct msg_handler : public fc::visitor { stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib_num) ); } + // thread safe void dispatch_manager::bcast_block(const block_state_ptr& bs) { - uint32_t bnum = bs->block_num; - peer_block_state pbstate{bs->id, bnum}; - fc_dlog( logger, "bcast block ${b}", ("b", bnum) ); + fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) ); - std::shared_ptr> send_buffer; - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); + bool have_connection = false; for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; } - bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; - if( !has_block ) { - pbstate.connection_id = cp->connection_id; - if( !add_peer_block( pbstate ) ) { - continue; - } - if( !send_buffer ) { - send_buffer = create_send_buffer( bs->block ); - } - fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); - cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); - } + have_connection = true; + break; } + g.unlock(); + if( !have_connection ) return; + std::shared_ptr> send_buffer = create_send_buffer( bs->block ); + + g.lock(); + for( auto& cp : my_impl->connections ) { + if( !cp->current() ) { + continue; + } + cp->strand.post( [this, cp, bs, send_buffer]() { + uint32_t bnum = bs->block_num; + // todo protect cp->last_handshake_recv + bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; + if( !has_block ) { + peer_block_state pbstate{bs->id, bnum, cp->connection_id}; + if( !add_peer_block( pbstate ) ) { + return; + } + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); + cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); + } + }); + } } void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { @@ -1890,7 +1904,7 @@ struct msg_handler : public fc::visitor { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; - std::lock_guard g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1986,7 +2000,7 @@ struct msg_handler : public fc::visitor { ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); return; } - std::unique_lock g( my_impl->connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for (auto& conn : my_impl->connections) { if (conn == c || conn->last_req) { continue; @@ -2144,7 +2158,7 @@ struct msg_handler : public fc::visitor { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); } else { paddr_str = paddr_add.to_string(); - std::unique_lock g( connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& conn : connections ) { if( conn->socket_is_open() ) { if( conn->peer_address().empty() ) { @@ -2512,7 +2526,7 @@ struct msg_handler : public fc::visitor { if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { fc_dlog(logger, "checking for duplicate" ); - std::lock_guard g( connections_mtx ); + boost::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { if(check == c) continue; @@ -3049,7 +3063,7 @@ struct msg_handler : public fc::visitor { if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - std::lock_guard g( connections_mtx ); + boost::shared_lock g( connections_mtx ); for( auto& c : connections ) { if( c->socket_is_open() ) { c->send_time(); @@ -3108,7 +3122,7 @@ struct msg_handler : public fc::visitor { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); - std::unique_lock g( connections_mtx ); + boost::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { @@ -3134,8 +3148,10 @@ struct msg_handler : public fc::visitor { } void net_plugin_impl::accepted_block(const block_state_ptr& block) { - fc_dlog(logger,"signaled, id = ${id}",("id", block->id)); - dispatcher->bcast_block(block); + boost::asio::post( *server_ioc, [this, ioc=server_ioc, block]() { + fc_dlog( logger, "signaled, id = ${id}", ("id", block->id) ); + dispatcher->bcast_block( block ); + }); } void net_plugin_impl::transaction_ack(const std::pair& results) { @@ -3517,7 +3533,7 @@ struct msg_handler : public fc::visitor { my->done = true; { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); - std::lock_guard g( my->connections_mtx ); + boost::unique_lock g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close(); @@ -3545,14 +3561,14 @@ struct msg_handler : public fc::visitor { fc_dlog( logger, "calling active connector" ); if( my->resolve_and_connect( c ) ) { fc_dlog( logger, "adding new connection to the list" ); - std::unique_lock g( my->connections_mtx ); + boost::unique_lock g( my->connections_mtx ); my->connections.insert( c ); } return "added connection"; } string net_plugin::disconnect( const string& host ) { - std::lock_guard g( my->connections_mtx ); + boost::unique_lock g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { (*itr)->reset(); @@ -3574,7 +3590,7 @@ struct msg_handler : public fc::visitor { vector net_plugin::connections()const { vector result; - std::lock_guard g( my->connections_mtx ); + boost::shared_lock g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); @@ -3582,7 +3598,7 @@ struct msg_handler : public fc::visitor { return result; } connection_ptr net_plugin_impl::find_connection(const string& host )const { - std::lock_guard g( connections_mtx ); + boost::shared_lock g( connections_mtx ); for( const auto& c : connections ) if( c->peer_address() == host ) return c; return connection_ptr(); From 742181f85493dfe39b0d2d520b4d44276f45a290 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 07:16:05 -0500 Subject: [PATCH 0143/1648] Work toward making sync_manager and handshake message thread safe. --- plugins/net_plugin/net_plugin.cpp | 694 ++++++++++++++++++------------ 1 file changed, 409 insertions(+), 285 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 077c069e0f3..0677261b2d1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -141,28 +141,29 @@ namespace eosio { in_sync }; + std::mutex sync_mtx; uint32_t sync_known_lib_num; uint32_t sync_last_requested_num; uint32_t sync_next_expected_num; uint32_t sync_req_span; - connection_ptr source; - stages state; + connection_ptr sync_source; + stages sync_state; chain_plugin* chain_plug = nullptr; - constexpr static auto stage_str(stages s); + private: + constexpr static auto stage_str( stages s ); + void set_state( stages s ); + bool is_sync_required(); + void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); + void start_sync( const connection_ptr& c, uint32_t target ); + void verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); + static void send_handshakes(); public: explicit sync_manager(uint32_t span); - void set_state(stages s); - bool sync_required(); - void send_handshakes(); - bool is_active(const connection_ptr& conn); - void reset_lib_num(const connection_ptr& conn); - void request_next_chunk(const connection_ptr& conn = connection_ptr()); - void start_sync(const connection_ptr& c, uint32_t target); - void reassign_fetch(const connection_ptr& c, go_away_reason reason); - void verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); + void sync_reset_lib_num(const connection_ptr& conn); + void sync_reassign_fetch(const connection_ptr& c, go_away_reason reason); void rejected_block(const connection_ptr& c, uint32_t blk_num); void sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); void recv_handshake(const connection_ptr& c, const handshake_message& msg); @@ -214,6 +215,7 @@ namespace eosio { vector allowed_peers; ///< peer keys allowed to connect std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes + // thread safe, only updated in plugin initialize enum possible_connections : char { None = 0, @@ -246,17 +248,24 @@ namespace eosio { const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. bool network_version_match = false; - chain_id_type chain_id; - fc::sha256 node_id; - std::atomic lib_num{0}; - uint32_t head_blk_num{0}; + chain_id_type chain_id; // thread safe, only updated in plugin_initialize + fc::sha256 node_id; // thread safe, only updated in plugin initialize + string user_agent_name; // thread safe, only updated in plugin initialize + + mutable std::mutex chain_info_mtx; + uint32_t chain_lib_num{0}; + uint32_t chain_head_blk_num{0}; + uint32_t chain_fork_head_blk_num{0}; + block_id_type chain_lib_id; + block_id_type chain_head_blk_id; + block_id_type chain_fork_head_blk_id; + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; - string user_agent_name; chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; - bool use_socket_read_watermark = false; + bool use_socket_read_watermark = false; // thread safe, not modified outside plugin_initialize compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; @@ -266,31 +275,17 @@ namespace eosio { std::shared_ptr server_ioc; optional server_ioc_work; + void update_chain_info(); + // lib_num, head_block_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id + std::tuple get_chain_info() const; - bool resolve_and_connect(const connection_ptr& c); - void connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr); void start_listen_loop(); - void start_read_message(const connection_ptr& c); - - /** \brief Process the next message from the pending message buffer - * - * Process the next message from the pending_message_buffer. - * message_length is the already determined length of the data - * part of the message that will handle the message. - * Returns true is successful. Returns false if an error was - * encountered unpacking or processing the message. - */ - bool process_next_message(const connection_ptr& conn, uint32_t message_length); - void accepted_block(const block_state_ptr&); + void on_accepted_block( const block_state_ptr& bs ); void transaction_ack(const std::pair&); - void on_irreversible_block( const block_state_ptr& blk ) { - lib_num = blk->block_num; - controller& cc = chain_plug->chain(); - head_blk_num = cc.head_block_num(); - } + void on_irreversible_block( const block_state_ptr& blk ); - bool is_valid( const handshake_message &msg); + static bool is_valid( const handshake_message& msg ); void handle_message(const connection_ptr& c, const handshake_message& msg); void handle_message(const connection_ptr& c, const chain_size_message& msg); @@ -455,7 +450,7 @@ namespace eosio { }; struct handshake_initializer { - static void populate(handshake_message &hello); + static void populate(handshake_message& hello); }; // thread safe @@ -674,6 +669,21 @@ namespace eosio { private: static void _close( connection* self ); // for easy capture public: + + bool resolve_and_connect(); + void connect(const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr); + void start_read_message(); + + /** \brief Process the next message from the pending message buffer + * + * Process the next message from the pending_message_buffer. + * message_length is the already determined length of the data + * part of the message that will handle the message. + * Returns true is successful. Returns false if an error was + * encountered unpacking or processing the message. + */ + bool process_next_message(uint32_t message_length); + void send_handshake(); /** \name Peer Timestamps @@ -746,8 +756,8 @@ namespace eosio { const string connection::unknown = ""; - -struct msg_handler : public fc::visitor { + // called from connection strand + struct msg_handler : public fc::visitor { net_plugin_impl& impl; connection_ptr c; msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} @@ -781,7 +791,9 @@ struct msg_handler : public fc::visitor { if( c ) impl->handle_message( c, ptr ); }); } + void operator()( packed_transaction&& msg ) const { + // continue call to handle_message on connection strand shared_ptr ptr = std::make_shared( std::move( msg ) ); <<<<<<< HEAD <<<<<<< HEAD @@ -798,11 +810,15 @@ struct msg_handler : public fc::visitor { >>>>>>> Move more of incoming transaction processing to thread pool } - template - void operator()( T&& msg ) const - { + void operator()( const handshake_message& msg ) const { + // continue call to handle_message on connection strand + impl.handle_message( c, msg ); + } + + void operator()( const chain_size_message& msg ) const { connection_wptr weak = c; <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { ======= @@ -811,6 +827,49 @@ struct msg_handler : public fc::visitor { ======= app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { >>>>>>> Remove descriptions of tasks as not merged into develop yet +======= + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const go_away_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const time_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const notice_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const request_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { + connection_ptr c = weak.lock(); + if(c) impl->handle_message( c, msg ); + }); + } + + void operator()( const sync_request_message& msg ) const { + connection_wptr weak = c; + app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { +>>>>>>> Work toward making sync_manager and handshake message thread safe. connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); @@ -910,7 +969,7 @@ struct msg_handler : public fc::visitor { return false; } else { socket_open = true; - my_impl->start_read_message( shared_from_this() ); + start_read_message(); return true; } } @@ -979,7 +1038,7 @@ struct msg_handler : public fc::visitor { self->sent_handshake_count = 0; self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num( nullptr ); + my_impl->sync_master->sync_reset_lib_num( nullptr ); fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); self->cancel_wait(); @@ -1065,9 +1124,8 @@ struct msg_handler : public fc::visitor { syncing = false; } - // thread safe void connection::send_handshake() { - app().post( priority::low, [c = shared_from_this()]() { + strand.post( [c = shared_from_this()]() { handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", @@ -1288,20 +1346,13 @@ struct msg_handler : public fc::visitor { // thread safe void connection::sync_wait() { - connection_wptr c(shared_from_this()); + connection_ptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); response_expected_timer.expires_from_now( my_impl->resp_expected_period); - response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { - app().post(priority::low, [c, ec]() { - connection_ptr conn = c.lock(); - if (!conn) { - // connection was destroyed before this lambda was delivered - return; - } - - conn->sync_timeout(ec); - }); - } ); + response_expected_timer.async_wait( + boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { + c->sync_timeout( ec ); + } ) ); } // thread safe @@ -1322,17 +1373,17 @@ struct msg_handler : public fc::visitor { } ); } + // called from connection strand void connection::sync_timeout( boost::system::error_code ec ) { if( !ec ) { - my_impl->sync_master->reassign_fetch(shared_from_this(), benign_other); - } - else if( ec == boost::asio::error::operation_aborted) { - } - else { - fc_elog( logger,"setting timer for sync request got error ${ec}",("ec", ec.message()) ); + my_impl->sync_master->sync_reassign_fetch( shared_from_this(), benign_other ); + } else if( ec == boost::asio::error::operation_aborted ) { + } else { + fc_elog( logger, "setting timer for sync request got error ${ec}", ("ec", ec.message()) ); } } + // todo: last_handshake_recv not thread safe const string connection::peer_name() { if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; @@ -1373,8 +1424,8 @@ struct msg_handler : public fc::visitor { ,sync_last_requested_num( 0 ) ,sync_next_expected_num( 1 ) ,sync_req_span( req_span ) - ,source() - ,state(in_sync) + ,sync_source() + ,sync_state(in_sync) { chain_plug = app().find_plugin(); EOS_ASSERT( chain_plug, chain::missing_chain_plugin_exception, "" ); @@ -1390,53 +1441,48 @@ struct msg_handler : public fc::visitor { } void sync_manager::set_state(stages newstate) { - if (state == newstate) { + if( sync_state == newstate ) { return; } - fc_dlog(logger, "old state ${os} becoming ${ns}",("os",stage_str(state))("ns",stage_str(newstate))); - state = newstate; + fc_dlog( logger, "old state ${os} becoming ${ns}", ("os", stage_str( sync_state ))( "ns", stage_str( newstate ) ) ); + sync_state = newstate; } - // uses controller, only call from application thread - bool sync_manager::is_active(const connection_ptr& c) { - if (state == head_catchup && c) { - auto fork_head_num = c->fork_head_num.load(); // provide memory barrier for c->fork_head - bool fhset = c->fork_head != block_id_type(); - fc_dlog( logger, "fork_head_num = ${fn} fork_head set = ${s}", ("fn", fork_head_num)( "s", fhset ) ); - return c->fork_head != block_id_type() && fork_head_num < chain_plug->chain().fork_db_head_block_num(); - } - return state != in_sync; - } - - void sync_manager::reset_lib_num(const connection_ptr& c) { - if(state == in_sync) { - source.reset(); + void sync_manager::sync_reset_lib_num(const connection_ptr& c) { + std::unique_lock g( sync_mtx ); + if( sync_state == in_sync ) { + sync_source.reset(); } if( !c ) return; if( c->current() ) { - if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num) { - sync_known_lib_num =c->last_handshake_recv.last_irreversible_block_num; + if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num ) { + sync_known_lib_num = c->last_handshake_recv.last_irreversible_block_num; } - } else if( c == source ) { + } else if( c == sync_source ) { sync_last_requested_num = 0; - request_next_chunk(); + request_next_chunk( std::move(g) ); } } - bool sync_manager::sync_required() { - fc_dlog(logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_head_block_num())); + bool sync_manager::is_sync_required() { + fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", + ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) + ("head", chain_plug->chain().fork_db_head_block_num() ) ); return( sync_last_requested_num < sync_known_lib_num || chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); } - void sync_manager::request_next_chunk( const connection_ptr& conn ) { - uint32_t head_block = chain_plug->chain().fork_db_head_block_num(); + // call with g_sync locked + void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { + uint32_t fork_head_block_num = 0; + uint32_t lib_block_num = 0; + std::tie( lib_block_num, std::ignore, fork_head_block_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); - if (head_block < sync_last_requested_num && source && source->current()) { - fc_ilog(logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", - ("h",head_block)("r",sync_last_requested_num)("p",source->peer_name())); + if( fork_head_block_num < sync_last_requested_num && sync_source && sync_source->current() ) { + fc_ilog( logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", + ("h", fork_head_block_num)( "r", sync_last_requested_num )( "p", sync_source->peer_address() ) ); return; } @@ -1447,25 +1493,25 @@ struct msg_handler : public fc::visitor { */ if (conn && conn->current() ) { - source = conn; + sync_source = conn; } else { boost::shared_lock g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { - if (!source) { - source = *my_impl->connections.begin(); + if (!sync_source) { + sync_source = *my_impl->connections.begin(); } } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); // do we remember the previous source? - if (source) { + if (sync_source) { //try to find it in the list - cptr = my_impl->connections.find(source); + cptr = my_impl->connections.find( sync_source ); cend = cptr; if (cptr == my_impl->connections.end()) { //not there - must have been closed! cend is now connections.end, so just flatten the ring. - source.reset(); + sync_source.reset(); cptr = my_impl->connections.begin(); } else { //was found - advance the start to the next. cend is the old source. @@ -1479,8 +1525,8 @@ struct msg_handler : public fc::visitor { auto cstart_it = cptr; do { //select the first one which is current and break out. - if((*cptr)->current()) { - source = *cptr; + if( (*cptr)->current() ) { + sync_source = *cptr; break; } if(++cptr == my_impl->connections.end()) @@ -1491,11 +1537,11 @@ struct msg_handler : public fc::visitor { } // verify there is an available source - if (!source || !source->current() ) { + if( !sync_source || !sync_source->current() ) { fc_elog( logger, "Unable to continue syncing at this time"); - sync_known_lib_num = chain_plug->chain().last_irreversible_block_num(); + sync_known_lib_num = lib_block_num; sync_last_requested_num = 0; - set_state(in_sync); // probably not, but we can't do anything else + set_state( in_sync ); // probably not, but we can't do anything else return; } @@ -1505,16 +1551,18 @@ struct msg_handler : public fc::visitor { if( end > sync_known_lib_num ) end = sync_known_lib_num; if( end > 0 && end >= start ) { - fc_ilog(logger, "requesting range ${s} to ${e}, from ${n}", - ("n",source->peer_name())("s",start)("e",end)); - source->request_sync_blocks(start, end); sync_last_requested_num = end; + connection_ptr c = sync_source; + g_sync.unlock(); + fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", + ("n", c->peer_address())( "s", start )( "e", end ) ); + c->request_sync_blocks( start, end ); } } } - void sync_manager::send_handshakes() - { + // static, thread safe + void sync_manager::send_handshakes() { boost::shared_lock g( my_impl->connections_mtx ); for( auto& ci : my_impl->connections ) { if( ci->current() ) { @@ -1524,37 +1572,40 @@ struct msg_handler : public fc::visitor { } void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { + std::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } - if (!sync_required()) { + if( !is_sync_required() ) { uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); uint32_t hnum = chain_plug->chain().fork_db_head_block_num(); fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", - ("b",bnum)("h",hnum)("t",target)); + ("b", bnum)( "h", hnum )( "t", target ) ); return; } - if (state == in_sync) { - set_state(lib_catchup); + if( sync_state == in_sync ) { + set_state( lib_catchup ); sync_next_expected_num = chain_plug->chain().last_irreversible_block_num() + 1; } - fc_ilog(logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", - ( "cc",sync_last_requested_num)("t",target)("p",c->peer_name())); + fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", + ("cc", sync_last_requested_num)( "t", target )( "p", c->peer_address() ) ); - request_next_chunk(c); + request_next_chunk( std::move( g_sync ), c ); } - void sync_manager::reassign_fetch(const connection_ptr& c, go_away_reason reason) { - fc_ilog(logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", - ( "cc",sync_last_requested_num)("ne",sync_next_expected_num)("p",c->peer_name())); + // called from connection strand + void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { + std::unique_lock g( sync_mtx ); + fc_ilog( logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", + ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_address() ) ); - if (c == source) { + if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - request_next_chunk(); + request_next_chunk( std::move(g) ); } } @@ -1562,7 +1613,7 @@ struct msg_handler : public fc::visitor { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); uint32_t peer_lib = msg.last_irreversible_block_num; - reset_lib_num(c); + sync_reset_lib_num(c); c->syncing = false; //-------------------------------- @@ -1647,11 +1698,14 @@ struct msg_handler : public fc::visitor { if( req.req_blocks.mode == catch_up ) { c->fork_head = id; c->fork_head_num = num; - fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} target LIB = ${lib} next_expected = ${ne}", - ("s",stage_str(state))("fhn",num)("lib",sync_known_lib_num)("ne", sync_next_expected_num) ); - if (state == lib_catchup) + std::lock_guard g( sync_mtx ); + fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} " + "target LIB = ${lib} next_expected = ${ne}", + ("s", stage_str( sync_state ))( "fhn", num )( "lib", sync_known_lib_num ) + ("ne", sync_next_expected_num ) ); + if( sync_state == lib_catchup ) return; - set_state(head_catchup); + set_state( head_catchup ); } else { c->fork_head = block_id_type(); c->fork_head_num = 0; @@ -1661,12 +1715,12 @@ struct msg_handler : public fc::visitor { } void sync_manager::sync_recv_notice( const connection_ptr& c, const notice_message& msg) { - fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); + fc_ilog( logger, "sync_manager got ${m} block notice", ("m", modes_str( msg.known_blocks.mode )) ); EOS_ASSERT( msg.known_blocks.mode == catch_up || msg.known_blocks.mode == last_irr_catch_up, plugin_exception, "sync_recv_notice only called on catch_up" ); if( msg.known_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", - ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); + ("s", msg.known_blocks.ids.size())("p", c->peer_address()) ); c->close(); return; } @@ -1678,24 +1732,31 @@ struct msg_handler : public fc::visitor { } } else if (msg.known_blocks.mode == last_irr_catch_up) { c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; - reset_lib_num(c); + sync_reset_lib_num(c); start_sync(c, msg.known_trx.pending); } } - void sync_manager::rejected_block(const connection_ptr& c, uint32_t blk_num) { - if (state != in_sync ) { - fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); + // called from connection strand + void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { + std::unique_lock g( sync_mtx ); + if( sync_state != in_sync ) { + fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_address()) ); sync_last_requested_num = 0; - source.reset(); + sync_source.reset(); + set_state( in_sync ); + g.unlock(); c->close(); - set_state(in_sync); send_handshakes(); } } + + // called from connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { - fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); - if (state == lib_catchup) { + fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_address() ) ); + std::unique_lock g_sync( sync_mtx ); + stages state = sync_state; + if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); @@ -1703,13 +1764,14 @@ struct msg_handler : public fc::visitor { return; } sync_next_expected_num = blk_num + 1; - } - if (state == head_catchup) { - fc_dlog(logger, "sync_manager in head_catchup state"); - set_state(in_sync); - source.reset(); + } else if( state == head_catchup ) { + fc_dlog( logger, "sync_manager in head_catchup state" ); + set_state( in_sync ); + sync_source.reset(); + g_sync.unlock(); block_id_type null_id; + bool set_state_to_head_catchup = false; boost::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head @@ -1720,26 +1782,29 @@ struct msg_handler : public fc::visitor { c->fork_head = null_id; c->fork_head_num = 0; } else { - set_state(head_catchup); + set_state_to_head_catchup = true; } } g.unlock(); - if (state == in_sync) { + if( set_state_to_head_catchup ) { + g_sync.lock(); + set_state( head_catchup ); + g_sync.unlock(); + } else { send_handshakes(); } - } - else if (state == lib_catchup) { + } else if( state == lib_catchup ) { if( blk_num == sync_known_lib_num ) { - fc_dlog( logger, "All caught up with last known last irreversible block resending handshake"); - set_state(in_sync); + fc_dlog( logger, "All caught up with last known last irreversible block resending handshake" ); + set_state( in_sync ); + g_sync.unlock(); send_handshakes(); - } - else if (blk_num == sync_last_requested_num) { - request_next_chunk(); - } - else { - fc_dlog(logger,"calling sync_wait on connection ${p}",("p",c->peer_name())); + } else if( blk_num == sync_last_requested_num ) { + request_next_chunk( std::move( g_sync) ); + } else { + g_sync.unlock(); + fc_dlog( logger, "calling sync_wait on connection ${p}", ("p", c->peer_address()) ); c->sync_wait(); } } @@ -1747,11 +1812,12 @@ struct msg_handler : public fc::visitor { //------------------------------------------------------------------------ + // thread safe bool dispatch_manager::add_peer_block(const peer_block_state& entry) { - std::lock_guard g(blk_state_mtx); + std::lock_guard g( blk_state_mtx ); auto bptr = blk_state.get().find(std::make_tuple(std::ref(entry.id), entry.connection_id)); bool added = (bptr == blk_state.end()); - if (added){ + if( added ) { blk_state.insert(entry); } return added; @@ -1779,6 +1845,7 @@ struct msg_handler : public fc::visitor { return added; } + // thread safe void dispatch_manager::update_txns_block_num( const signed_block_ptr& sb ) { update_block_num ubn( sb->block_num() ); std::lock_guard g( local_txns_mtx ); @@ -1792,6 +1859,7 @@ struct msg_handler : public fc::visitor { } } + // thread safe void dispatch_manager::update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ) { update_block_num ubn( blk_num ); std::lock_guard g( local_txns_mtx ); @@ -1878,6 +1946,7 @@ struct msg_handler : public fc::visitor { } } + // called from connection strand void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { peer_block_state pbstate{id, bnum, c->connection_id}; add_peer_block( pbstate ); @@ -2025,23 +2094,23 @@ struct msg_handler : public fc::visitor { //------------------------------------------------------------------------ // called from any thread - bool net_plugin_impl::resolve_and_connect(const connection_ptr& c) { - if( c->no_retry != go_away_reason::no_reason) { - fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); + bool connection::resolve_and_connect() { + if( no_retry != go_away_reason::no_reason) { + fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( no_retry ))); return false; } - string::size_type colon = c->peer_address().find(':'); + string::size_type colon = peer_address().find(':'); if (colon == std::string::npos || colon == 0) { - fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_address()) ); + fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p", peer_address()) ); return false; } - string host = c->peer_address().substr( 0, colon ); - string port = c->peer_address().substr( colon + 1); + string host = peer_address().substr( 0, colon ); + string port = peer_address().substr( colon + 1); idump((host)(port)); tcp::resolver::query query( tcp::v4(), host, port ); - connection_wptr weak_conn = c; + connection_wptr weak_conn = shared_from_this(); // Note: need to add support for IPv6 too <<<<<<< HEAD @@ -2063,29 +2132,33 @@ struct msg_handler : public fc::visitor { } ); ======= auto resolver = std::make_shared( *server_ioc ); - resolver->async_resolve( query, - [resolver, ioc = server_ioc, weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { + resolver->async_resolve( query, boost::asio::bind_executor( strand, + [resolver, ioc = server_ioc, weak_conn]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { auto c = weak_conn.lock(); if( !c ) return; if( !err ) { - connect( c, resolver, endpoint_itr ); + c->connect( resolver, endpoint_itr ); } else { fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); } +<<<<<<< HEAD } ); >>>>>>> Make use of resolver thread safe +======= + } ) ); +>>>>>>> Work toward making sync_manager and handshake message thread safe. return true; >>>>>>> Made all access to impl->connections thread safe } - // called from any thread - void net_plugin_impl::connect(const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr) { - if( c->no_retry != go_away_reason::no_reason) { - string rsn = reason_str(c->no_retry); + // called from connection strand + void connection::connect( const std::shared_ptr& resolver, tcp::resolver::iterator endpoint_itr ) { + if( no_retry != go_away_reason::no_reason) { return; } auto current_endpoint = *endpoint_itr; ++endpoint_itr; +<<<<<<< HEAD c->connecting = true; <<<<<<< HEAD connection_wptr weak_conn = c; @@ -2100,14 +2173,20 @@ struct msg_handler : public fc::visitor { c->socket.async_connect( current_endpoint, boost::asio::bind_executor( c->strand, [resolver, c, endpoint_itr, this]( const boost::system::error_code& err ) { if( !err && c->socket.is_open()) { +======= + connecting = true; + socket.async_connect( current_endpoint, + boost::asio::bind_executor( strand, [resolver, c = shared_from_this(), endpoint_itr]( const boost::system::error_code& err ) { + if( !err && c->socket.is_open() ) { +>>>>>>> Work toward making sync_manager and handshake message thread safe. if( c->start_session() ) { >>>>>>> Move socket ownership into connection. c->send_handshake(); } } else { - if( endpoint_itr != tcp::resolver::iterator()) { + if( endpoint_itr != tcp::resolver::iterator() ) { c->close(); - connect( c, resolver, endpoint_itr ); + c->connect( resolver, endpoint_itr ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; @@ -2208,9 +2287,9 @@ struct msg_handler : public fc::visitor { } // only called from strand thread - void net_plugin_impl::start_read_message(const connection_ptr& conn) { - + void connection::start_read_message() { try { +<<<<<<< HEAD connection_wptr weak_conn = conn; <<<<<<< HEAD @@ -2222,16 +2301,18 @@ struct msg_handler : public fc::visitor { std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; >>>>>>> Test of multi-threaded reading ======= +======= +>>>>>>> Work toward making sync_manager and handshake message thread safe. std::size_t minimum_read = - std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); + std::atomic_exchange( &outstanding_read_bytes, 0 ); minimum_read = minimum_read != 0 ? minimum_read : message_header_size; >>>>>>> Remove unneeded access to atomic - if (use_socket_read_watermark) { + if (my_impl->use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; std::size_t socket_read_watermark = std::min(minimum_read, max_socket_read_watermark); boost::asio::socket_base::receive_low_watermark read_watermark_opt(socket_read_watermark); - conn->socket.set_option(read_watermark_opt); + socket.set_option(read_watermark_opt); } auto completion_handler = [minimum_read](boost::system::error_code ec, std::size_t bytes_transferred) -> std::size_t { @@ -2242,21 +2323,28 @@ struct msg_handler : public fc::visitor { } }; - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || - conn->reads_in_flight > def_max_reads_in_flight || - conn->trx_in_progress_size > def_max_trx_in_progress_size ) + if( buffer_queue.write_queue_size() > def_max_write_queue_size || + reads_in_flight > def_max_reads_in_flight || + trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> Make delay_timer thread safe uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); uint32_t trx_in_progress_size = conn->trx_in_progress_size; uint32_t reads_in_flight = conn->reads_in_flight; +======= + uint32_t write_queue_size = buffer_queue.write_queue_size(); + uint32_t trx_in_progress_size = this->trx_in_progress_size.load(); + uint32_t reads_in_flight = this->reads_in_flight.load(); +>>>>>>> Work toward making sync_manager and handshake message thread safe. if( write_queue_size > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", write_queue_size) ); + peer_wlog( this, "write_queue full ${s} bytes", ("s", write_queue_size) ); } else if( reads_in_flight > def_max_reads_in_flight ) { +<<<<<<< HEAD peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); <<<<<<< HEAD ======= @@ -2267,12 +2355,15 @@ struct msg_handler : public fc::visitor { >>>>>>> Test of multi-threaded reading ======= >>>>>>> Make delay_timer thread safe +======= + peer_wlog( this, "max reads in flight ${s}", ("s", reads_in_flight) ); +>>>>>>> Work toward making sync_manager and handshake message thread safe. } else { - peer_wlog( conn, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); + peer_wlog( this, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } if( write_queue_size > 2*def_max_write_queue_size || reads_in_flight > 2*def_max_reads_in_flight || - trx_in_progress_size > 2*def_max_trx_in_progress_size ) + trx_in_progress_size > 2*def_max_trx_in_progress_size ) { <<<<<<< HEAD fc_wlog( logger, "queues over full, giving up on connection" ); @@ -2288,27 +2379,32 @@ struct msg_handler : public fc::visitor { }); ======= fc_elog( logger, "queues over full, giving up on connection, closing connection to: ${p}", +<<<<<<< HEAD ("p", conn->peer_name()) ); conn->close(); >>>>>>> Move socket ownership into connection. +======= + ("p", peer_name()) ); + close(); +>>>>>>> Work toward making sync_manager and handshake message thread safe. return; } - std::lock_guard g( conn->read_delay_timer_mtx ); - conn->read_delay_timer.expires_from_now( def_read_delay_for_full_write_queue ); - conn->read_delay_timer.async_wait( - boost::asio::bind_executor(conn->strand, [this, weak_conn]( boost::system::error_code ) { + std::lock_guard g( read_delay_timer_mtx ); + read_delay_timer.expires_from_now( def_read_delay_for_full_write_queue ); + connection_wptr weak_conn = shared_from_this(); + read_delay_timer.async_wait( boost::asio::bind_executor(strand, [weak_conn]( boost::system::error_code ) { auto conn = weak_conn.lock(); if( !conn ) return; - start_read_message( conn ); + conn->start_read_message(); } ) ); return; } - ++conn->reads_in_flight; - boost::asio::async_read( conn->socket, - conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, - boost::asio::bind_executor( conn->strand, - [this, conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { + ++reads_in_flight; + boost::asio::async_read( socket, + pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( strand, + [conn = shared_from_this()]( boost::system::error_code ec, std::size_t bytes_transferred ) { --conn->reads_in_flight; <<<<<<< HEAD <<<<<<< HEAD @@ -2347,7 +2443,7 @@ struct msg_handler : public fc::visitor { if (bytes_in_buffer >= total_message_bytes) { conn->pending_message_buffer.advance_read_ptr(message_header_size); - if (!process_next_message(conn, message_length)) { + if (!conn->process_next_message(message_length)) { return; } } else { @@ -2362,7 +2458,7 @@ struct msg_handler : public fc::visitor { } } } - if( !close_connection ) start_read_message( conn ); + if( !close_connection ) conn->start_read_message(); } else { if (ec.value() != boost::asio::error::eof) { fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); @@ -2423,15 +2519,16 @@ struct msg_handler : public fc::visitor { } })); } catch (...) { - fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); + fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", peer_name()) ); + close(); } } - bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { + // called from connection strand + bool connection::process_next_message( uint32_t message_length ) { try { // if next message is a block we already have, exit early - auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); + auto peek_ds = pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); if( which == signed_block_which ) { @@ -2439,26 +2536,21 @@ struct msg_handler : public fc::visitor { fc::raw::unpack( peek_ds, bh ); block_id_type blk_id = bh.id(); - if( dispatcher->have_block( blk_id ) ) { - connection_wptr weak = conn; - app().post(priority::high, // high since block processing is high and this needs to run before next block - [dispatcher = dispatcher.get(), sync_master = sync_master.get(), weak{std::move(weak)}, blk_id] { - connection_ptr c = weak.lock(); - if(c) { - auto blk_num = block_header::num_from_id(blk_id); - dispatcher->recv_block(c, blk_id, blk_num); - sync_master->sync_recv_block( c, blk_id, blk_num ); - } - }); - conn->pending_message_buffer.advance_read_ptr( message_length ); + if( my_impl->dispatcher->have_block( blk_id ) ) { + auto blk_num = block_header::num_from_id( blk_id ); + connection_ptr c = shared_from_this(); + my_impl->dispatcher->recv_block( c, blk_id, blk_num ); + my_impl->sync_master->sync_recv_block( c, blk_id, blk_num ); + + pending_message_buffer.advance_read_ptr( message_length ); return true; } } - auto ds = conn->pending_message_buffer.create_datastream(); + auto ds = pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); - msg_handler m( *this, conn ); + msg_handler m( *my_impl, shared_from_this() ); if( msg.contains() ) { m( std::move( msg.get() ) ); } else if( msg.contains() ) { @@ -2468,14 +2560,35 @@ struct msg_handler : public fc::visitor { } } catch( const fc::exception& e ) { fc_elog( logger, "Exception in handling message from ${p}: ${s}", - ("p", conn->peer_name())("s", e.to_detail_string()) ); - conn->close(); + ("p", peer_name())("s", e.to_detail_string()) ); + close(); return false; } return true; } - bool net_plugin_impl::is_valid(const handshake_message& msg) { + // call only from main application thread + void net_plugin_impl::update_chain_info() { + controller& cc = chain_plug->chain(); + std::lock_guard g( chain_info_mtx ); + chain_lib_num = cc.last_irreversible_block_num(); + chain_lib_id = cc.last_irreversible_block_id(); + chain_head_blk_num = cc.head_block_num(); + chain_head_blk_id = cc.head_block_id(); + chain_fork_head_blk_num = cc.fork_db_head_block_num(); + chain_fork_head_blk_id = cc.fork_db_head_block_id(); + } + + // lib_num, head_blk_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id + std::tuple + net_plugin_impl::get_chain_info() const { + std::lock_guard g( chain_info_mtx ); + return std::make_tuple( + chain_lib_num, chain_head_blk_num, chain_fork_head_blk_num, + chain_lib_id, chain_head_blk_id, chain_fork_head_blk_id ); + } + + bool net_plugin_impl::is_valid( const handshake_message& msg ) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without // affecting state. @@ -2504,16 +2617,14 @@ struct msg_handler : public fc::visitor { peer_ilog(c, "received chain_size_message"); } - void net_plugin_impl::handle_message(const connection_ptr& c, const handshake_message& msg) { + // called from connection strand + void net_plugin_impl::handle_message( const connection_ptr& c, const handshake_message& msg ) { peer_ilog(c, "received handshake_message"); - if (!is_valid(msg)) { + if( !is_valid( msg ) ) { peer_elog( c, "bad handshake message"); - c->enqueue( go_away_message( fatal_other )); + c->enqueue( go_away_message( fatal_other ) ); return; } - controller& cc = chain_plug->chain(); - uint32_t lib_num = cc.last_irreversible_block_num(); - uint32_t peer_lib = msg.last_irreversible_block_num; if( c->connecting ) { c->connecting = false; } @@ -2579,28 +2690,32 @@ struct msg_handler : public fc::visitor { return; } - bool on_fork = false; - fc_dlog(logger, "lib_num = ${ln} peer_lib = ${pl}",("ln",lib_num)("pl",peer_lib)); + uint32_t peer_lib = msg.last_irreversible_block_num; + app().post( priority::low, [peer_lib, chain_plug = this->chain_plug, c, msg_lib_id = msg.last_irreversible_block_id]() { + controller& cc = chain_plug->chain(); + uint32_t lib_num = cc.last_irreversible_block_num(); - if( peer_lib <= lib_num && peer_lib > 0) { - try { - block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib); - on_fork =( msg.last_irreversible_block_id != peer_lib_id); - } - catch( const unknown_block_exception &ex) { - fc_wlog( logger, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); - on_fork = true; - } - catch( ...) { - fc_wlog( logger, "caught an exception getting block id for ${pl}",("pl",peer_lib) ); - on_fork = true; - } - if( on_fork) { - fc_elog( logger, "Peer chain is forked" ); - c->enqueue( go_away_message( forked )); - return; + bool on_fork = false; + fc_dlog( logger, "lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); + + if( peer_lib <= lib_num && peer_lib > 0 ) { + try { + block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib ); + on_fork = (msg_lib_id != peer_lib_id); + } catch( const unknown_block_exception& ex ) { + fc_wlog( logger, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); + on_fork = true; + } catch( ... ) { + fc_wlog( logger, "caught an exception getting block id for ${pl}", ("pl", peer_lib) ); + on_fork = true; + } + if( on_fork ) { + fc_elog( logger, "Peer chain is forked" ); + c->enqueue( go_away_message( forked ) ); + return; + } } - } + }); if (c->sent_handshake_count == 0) { c->send_handshake(); @@ -2767,7 +2882,7 @@ struct msg_handler : public fc::visitor { trx->get_signatures().size() * sizeof(signature_type); } - // called from thread_pool threads + // called from connection strand void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { @@ -2792,6 +2907,7 @@ struct msg_handler : public fc::visitor { c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { + // next (this lambda) called from application thread c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); bool accepted = false; if (result.contains()) { @@ -2817,7 +2933,10 @@ struct msg_handler : public fc::visitor { } } - app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num = this->head_blk_num]() { + controller& cc = chain_plug->chain(); + uint32_t head_blk_num = cc.head_block_num(); + + app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num]() { if( accepted ) { dispatcher->bcast_transaction( ptrx ); } else { @@ -2829,6 +2948,10 @@ struct msg_handler : public fc::visitor { <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD +======= + // called from application thread +>>>>>>> Work toward making sync_manager and handshake message thread safe. void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); block_id_type blk_id = msg->id(); @@ -2853,6 +2976,7 @@ struct msg_handler : public fc::visitor { <<<<<<< HEAD <<<<<<< HEAD if( cc.fetch_block_by_id(blk_id) ) { +<<<<<<< HEAD sync_master->recv_block(c, blk_id, blk_num); return; } @@ -2870,6 +2994,11 @@ struct msg_handler : public fc::visitor { ======= sync_master->sync_recv_block(c, blk_id, blk_num); >>>>>>> Consolidate transaction tracking, reducing memory requirements and making thread safe. +======= + c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + sync_master->sync_recv_block( c, blk_id, blk_num ); + }); +>>>>>>> Work toward making sync_manager and handshake message thread safe. return; } <<<<<<< HEAD @@ -2984,14 +3113,16 @@ struct msg_handler : public fc::visitor { fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } - dispatcher->recv_block(c, blk_id, blk_num); + c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + dispatcher->recv_block( c, blk_id, blk_num ); + }); fc::microseconds age( fc::time_point::now() - msg->timestamp); peer_ilog(c, "received signed_block : #${n} block age in secs = ${age}", ("n",blk_num)("age",age.to_seconds())); go_away_reason reason = fatal_other; try { - chain_plug->accept_block(msg); //, sync_master->is_active(c)); + chain_plug->accept_block(msg); reason = no_reason; } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); @@ -3012,16 +3143,18 @@ struct msg_handler : public fc::visitor { fc_elog( logger, "handle sync block caught something else from ${p}",("num",blk_num)("p",c->peer_name())); } - update_block_num ubn(blk_num); if( reason == no_reason ) { boost::asio::post( *server_ioc, [self = this, msg]() { self->dispatcher->update_txns_block_num( msg ); }); - sync_master->sync_recv_block(c, blk_id, blk_num); - } - else { - sync_master->rejected_block(c, blk_num); - dispatcher->rejected_block( blk_id ); + c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + sync_master->sync_recv_block( c, blk_id, blk_num ); + }); + } else { + c->strand.post( [sync_master = sync_master.get(), dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + sync_master->rejected_block( c, blk_num ); + dispatcher->rejected_block( blk_id ); + }); } } @@ -3088,7 +3221,8 @@ struct msg_handler : public fc::visitor { void net_plugin_impl::expire() { auto now = time_point::now(); - uint32_t lib = lib_num.load(); + uint32_t lib = 0; + std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); dispatcher->expire_blocks( lib ); <<<<<<< HEAD for ( auto& c : connections ) { @@ -3132,7 +3266,7 @@ struct msg_handler : public fc::visitor { } if( !(*it)->socket_is_open() && !(*it)->connecting) { if( (*it)->peer_address().length() > 0) { - if( !resolve_and_connect(*it) ) { + if( !(*it)->resolve_and_connect() ) { it = connections.erase(it); continue; } @@ -3147,17 +3281,28 @@ struct msg_handler : public fc::visitor { start_conn_timer(connector_period, std::weak_ptr()); } - void net_plugin_impl::accepted_block(const block_state_ptr& block) { + // called from application thread + void net_plugin_impl::on_accepted_block(const block_state_ptr& block) { + update_chain_info(); boost::asio::post( *server_ioc, [this, ioc=server_ioc, block]() { fc_dlog( logger, "signaled, id = ${id}", ("id", block->id) ); dispatcher->bcast_block( block ); }); } + // called from application thread + void net_plugin_impl::on_irreversible_block( const block_state_ptr& ) { + update_chain_info(); + } + + // called from application thread void net_plugin_impl::transaction_ack(const std::pair& results) { const auto& id = results.second->id; if (results.first) { fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + + controller& cc = chain_plug->chain(); + uint32_t head_blk_num = cc.head_block_num(); dispatcher->rejected_transaction(id, head_blk_num); } else { fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); @@ -3239,7 +3384,7 @@ struct msg_handler : public fc::visitor { return chain::signature_type(); } - // call from main application thread + // call from connection strand void handshake_initializer::populate( handshake_message& hello ) { hello.network_version = net_version_base + net_version; hello.chain_id = my_impl->chain_id; @@ -3263,29 +3408,8 @@ struct msg_handler : public fc::visitor { #endif hello.agent = my_impl->user_agent_name; - - controller& cc = my_impl->chain_plug->chain(); - hello.head_id = fc::sha256(); - hello.last_irreversible_block_id = fc::sha256(); - hello.head_num = cc.fork_db_head_block_num(); - hello.last_irreversible_block_num = cc.last_irreversible_block_num(); - if( hello.last_irreversible_block_num ) { - try { - hello.last_irreversible_block_id = cc.get_block_id_for_num(hello.last_irreversible_block_num); - } - catch( const unknown_block_exception &ex) { - fc_wlog( logger, "caught unkown_block" ); - hello.last_irreversible_block_num = 0; - } - } - if( hello.head_num ) { - try { - hello.head_id = cc.get_block_id_for_num( hello.head_num ); - } - catch( const unknown_block_exception &ex) { - hello.head_num = 0; - } - } + std::tie( hello.last_irreversible_block_num, std::ignore, hello.head_num, + hello.last_irreversible_block_id, std::ignore, hello.head_id ) = my_impl->get_chain_info(); } net_plugin::net_plugin() @@ -3478,7 +3602,7 @@ struct msg_handler : public fc::visitor { } chain::controller&cc = my->chain_plug->chain(); { - cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); + cc.accepted_block.connect( boost::bind(&net_plugin_impl::on_accepted_block, my.get(), _1)); } my->incoming_transaction_ack_subscription = app().get_channel().subscribe( @@ -3559,7 +3683,7 @@ struct msg_handler : public fc::visitor { connection_ptr c = std::make_shared( host ); fc_dlog( logger, "calling active connector" ); - if( my->resolve_and_connect( c ) ) { + if( c->resolve_and_connect() ) { fc_dlog( logger, "adding new connection to the list" ); boost::unique_lock g( my->connections_mtx ); my->connections.insert( c ); From 2cc95a6055533342d3595e20ca76e1fdfeb76bd1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 11:36:38 -0500 Subject: [PATCH 0144/1648] Make sync_manager::recv_handshake connection strand safe --- plugins/net_plugin/net_plugin.cpp | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0677261b2d1..2cca95804fe 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -161,13 +161,13 @@ namespace eosio { static void send_handshakes(); public: - explicit sync_manager(uint32_t span); - void sync_reset_lib_num(const connection_ptr& conn); - void sync_reassign_fetch(const connection_ptr& c, go_away_reason reason); - void rejected_block(const connection_ptr& c, uint32_t blk_num); - void sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num); - void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void sync_recv_notice( const connection_ptr& c, const notice_message& msg); + explicit sync_manager( uint32_t span ); + void sync_reset_lib_num( const connection_ptr& conn ); + void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); + void rejected_block( const connection_ptr& c, uint32_t blk_num ); + void sync_recv_block( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num ); + void recv_handshake( const connection_ptr& c, const handshake_message& msg ); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg ); }; class dispatch_manager { @@ -1609,10 +1609,15 @@ namespace eosio { } } - void sync_manager::recv_handshake(const connection_ptr& c, const handshake_message& msg) { - controller& cc = chain_plug->chain(); - uint32_t lib_num = cc.last_irreversible_block_num(); + void sync_manager::recv_handshake( const connection_ptr& c, const handshake_message& msg ) { + uint32_t lib_num = 0; uint32_t peer_lib = msg.last_irreversible_block_num; + uint32_t head = 0; + block_id_type head_id; + + std::tie( lib_num, std::ignore, head, + std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); + sync_reset_lib_num(c); c->syncing = false; @@ -1628,8 +1633,6 @@ namespace eosio { // //----------------------------- - uint32_t head = cc.fork_db_head_block_num(); - block_id_type head_id = cc.fork_db_head_block_id(); if (head_id == msg.head_id) { fc_dlog(logger, "sync check state 0"); // notify peer of our pending transactions @@ -1644,7 +1647,7 @@ namespace eosio { fc_dlog(logger, "sync check state 1"); // wait for receipt of a notice message before initiating sync if (c->protocol_version < proto_explicit_sync) { - start_sync( c, peer_lib); + start_sync( c, peer_lib ); } return; } From c7b252013251704aa55b343f2e996f3bef1ad927 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 22:26:39 -0500 Subject: [PATCH 0145/1648] Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. --- plugins/net_plugin/net_plugin.cpp | 394 +++++++++++++++--------------- 1 file changed, 202 insertions(+), 192 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2cca95804fe..045fcd94b1a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -147,14 +147,14 @@ namespace eosio { uint32_t sync_next_expected_num; uint32_t sync_req_span; connection_ptr sync_source; - stages sync_state; + std::atomic sync_state; chain_plugin* chain_plug = nullptr; private: constexpr static auto stage_str( stages s ); void set_state( stages s ); - bool is_sync_required(); + bool is_sync_required( uint32_t fork_head_block_num ); void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); void start_sync( const connection_ptr& c, uint32_t target ); void verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); @@ -162,6 +162,7 @@ namespace eosio { public: explicit sync_manager( uint32_t span ); + bool syncing_with_peer() const { return sync_state == lib_catchup; } void sync_reset_lib_num( const connection_ptr& conn ); void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); void rejected_block( const connection_ptr& c, uint32_t blk_num ); @@ -664,7 +665,6 @@ namespace eosio { bool connected(); bool current(); - void reset(); void close(); private: static void _close( connection* self ); // for easy capture @@ -794,6 +794,7 @@ namespace eosio { void operator()( packed_transaction&& msg ) const { // continue call to handle_message on connection strand + fc_dlog( logger, "handle packed_transaction" ); shared_ptr ptr = std::make_shared( std::move( msg ) ); <<<<<<< HEAD <<<<<<< HEAD @@ -812,10 +813,12 @@ namespace eosio { void operator()( const handshake_message& msg ) const { // continue call to handle_message on connection strand + fc_dlog( logger, "handle handshake_message" ); impl.handle_message( c, msg ); } void operator()( const chain_size_message& msg ) const { +<<<<<<< HEAD connection_wptr weak = c; <<<<<<< HEAD <<<<<<< HEAD @@ -832,47 +835,50 @@ namespace eosio { connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); +======= + // continue call to handle_message on connection strand + fc_dlog( logger, "handle chain_size_message" ); + impl.handle_message( c, msg ); +>>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. } void operator()( const go_away_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle go_away_message" ); + impl.handle_message( c, msg ); } void operator()( const time_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle time_message" ); + impl.handle_message( c, msg ); } void operator()( const notice_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle notice_message" ); + impl.handle_message( c, msg ); } void operator()( const request_message& msg ) const { - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); + // continue call to handle_message on connection strand + fc_dlog( logger, "handle request_message" ); + impl.handle_message( c, msg ); } void operator()( const sync_request_message& msg ) const { +<<<<<<< HEAD connection_wptr weak = c; app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { >>>>>>> Work toward making sync_manager and handshake message thread safe. connection_ptr c = weak.lock(); if(c) impl->handle_message( c, msg ); }); +======= + // continue call to handle_message on connection strand + fc_dlog( logger, "handle sync_request_message" ); + impl.handle_message( c, msg ); +>>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. } }; @@ -982,10 +988,6 @@ namespace eosio { return (connected() && !syncing); } - void connection::reset() { - peer_requested.reset(); - } - void connection::flush_queues() { buffer_queue.clear_write_queue(); } @@ -1005,6 +1007,7 @@ namespace eosio { if( self->last_req ) { my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } +<<<<<<< HEAD <<<<<<< HEAD reset(); sent_handshake_count = 0; @@ -1035,6 +1038,9 @@ namespace eosio { >>>>>>> Use unique_lock instead of lock_guard to clean up code ======= self->reset(); +======= + self->peer_requested.reset(); +>>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. self->sent_handshake_count = 0; self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); @@ -1048,8 +1054,11 @@ namespace eosio { } void connection::blk_send_branch() { - controller& cc = my_impl->chain_plug->chain(); - uint32_t head_num = cc.fork_db_head_block_num(); + uint32_t head_num = 0; + block_id_type head_id; + std::tie( std::ignore, std::ignore, head_num, + std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); + notice_message note; note.known_blocks.mode = normal; note.known_blocks.pending = 0; @@ -1058,30 +1067,16 @@ namespace eosio { enqueue(note); return; } - block_id_type head_id; - block_id_type lib_id; block_id_type remote_head_id; uint32_t remote_head_num = 0; - try { - if (last_handshake_recv.generation >= 1) { - remote_head_id = last_handshake_recv.head_id; - remote_head_num = block_header::num_from_id(remote_head_id); - fc_dlog(logger, "maybe truncating branch at = ${h}:${id}",("h",remote_head_num)("id",remote_head_id)); - } - - lib_id = last_handshake_recv.last_irreversible_block_id; - head_id = cc.fork_db_head_block_id(); - } - catch (const assert_exception& ex) { - fc_elog( logger, "unable to retrieve block info: ${n} for ${p}",("n",ex.to_string())("p",peer_name()) ); - enqueue(note); - return; - } - catch (const fc::exception& ex) { - } - catch (...) { + if( last_handshake_recv.generation >= 1 ) { + remote_head_id = last_handshake_recv.head_id; + remote_head_num = block_header::num_from_id(remote_head_id); + fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", remote_head_num)( "id", remote_head_id ) ); } + block_id_type lib_id = last_handshake_recv.last_irreversible_block_id; + if( !peer_requested ) { peer_requested = sync_state( block_header::num_from_id(lib_id)+1, block_header::num_from_id(head_id), @@ -1097,27 +1092,29 @@ namespace eosio { syncing = false; } - void connection::blk_send(const block_id_type& blkid) { - controller &cc = my_impl->chain_plug->chain(); - try { - signed_block_ptr b = cc.fetch_block_by_id(blkid); - if(b) { - fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - my_impl->dispatcher->add_peer_block({blkid, block_header::num_from_id(blkid), connection_id}); - enqueue_block( b ); - } else { - fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", - ("id",blkid)("p",peer_name()) ); + void connection::blk_send( const block_id_type& blkid ) { + app().post( priority::low, [blkid, c = shared_from_this()]() { + controller& cc = my_impl->chain_plug->chain(); + try { + signed_block_ptr b = cc.fetch_block_by_id( blkid ); + if( b ) { + fc_dlog( logger, "found block for id at num ${n}", ("n", b->block_num()) ); + my_impl->dispatcher->add_peer_block( {blkid, block_header::num_from_id( blkid ), c->connection_id} ); + c->strand.post( [c, b{std::move(b)}]() { + c->enqueue_block( b ); + } ); + } else { + fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", + ("id", blkid)( "p", c->peer_address() ) ); + } + } catch( const assert_exception& ex ) { + fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", + ("ex", ex.to_string())( "id", blkid )( "p", c->peer_address() ) ); + } catch( ... ) { + fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", + ("id", blkid)( "p", c->peer_address() ) ); } - } - catch (const assert_exception &ex) { - fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", - ("ex",ex.to_string())("id",blkid)("p",peer_name()) ); - } - catch (...) { - fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", - ("id",blkid)("p",peer_name()) ); - } + }); } void connection::stop_send() { @@ -1129,7 +1126,7 @@ namespace eosio { handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", - ("g", c->last_handshake_sent.generation)( "ep", c->peer_name() ) ); + ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) ); c->enqueue( c->last_handshake_sent ); }); } @@ -1167,7 +1164,6 @@ namespace eosio { } } - // called from connection strand and application thread void connection::do_queue_write(int priority) { if( !buffer_queue.ready_to_send() ) return; @@ -1225,8 +1221,8 @@ namespace eosio { } void connection::cancel_sync(go_away_reason reason) { - fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", - ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); + fc_dlog( logger, "cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", + ("m", reason_str( reason ))( "o", buffer_queue.write_queue_size() )( "p", peer_address() ) ); cancel_wait(); flush_queues(); switch (reason) { @@ -1237,33 +1233,38 @@ namespace eosio { break; } default: - fc_dlog(logger, "sending empty request but not calling sync wait on ${p}", ("p",peer_name())); + fc_dlog(logger, "sending empty request but not calling sync wait on ${p}", ("p",peer_address())); enqueue( ( sync_request_message ) {0,0} ); } } bool connection::enqueue_sync_block() { - if (!peer_requested) + if( !peer_requested ) { + fc_dlog( logger, "enqueue sync block, with no peer_requested" ); return false; + } else { + fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); + } uint32_t num = ++peer_requested->last; - bool trigger_send = num == peer_requested->start_block; + bool trigger_send = true; // todo: = num == peer_requested->start_block; if(num == peer_requested->end_block) { peer_requested.reset(); } - try { + app().post( priority::low, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); - signed_block_ptr sb = cc.fetch_block_by_number(num); - if(sb) { - enqueue_block( sb, trigger_send, true); - return true; + signed_block_ptr sb = cc.fetch_block_by_number( num ); + if( sb ) { + c->strand.post( [c, sb{std::move(sb)}, trigger_send]() { + c->enqueue_block( sb, trigger_send, true ); + }); } - } catch ( ... ) { - fc_wlog( logger, "write loop exception" ); - } - return false; + }); + + return true; } void connection::enqueue( const net_message& m, bool trigger_send ) { + verify_strand_in_this_thread( strand, __func__, __LINE__ ); go_away_reason close_after_send = no_reason; if (m.contains()) { close_after_send = m.get().reason; @@ -1318,6 +1319,8 @@ namespace eosio { } void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send, bool to_sync_queue) { + fc_dlog( logger, "enqueue block ${num}", ("num", sb->block_num()) ); + verify_strand_in_this_thread( strand, __func__, __LINE__ ); enqueue_buffer( create_send_buffer( sb ), trigger_send, priority::low, no_reason, to_sync_queue); } @@ -1348,7 +1351,7 @@ namespace eosio { void connection::sync_wait() { connection_ptr c(shared_from_this()); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer.expires_from_now( my_impl->resp_expected_period); + response_expected_timer.expires_from_now( my_impl->resp_expected_period ); response_expected_timer.async_wait( boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { c->sync_timeout( ec ); @@ -1357,20 +1360,13 @@ namespace eosio { // thread safe void connection::fetch_wait() { - connection_wptr c(shared_from_this()); + connection_ptr c( shared_from_this() ); std::lock_guard g( response_expected_timer_mtx ); - response_expected_timer.expires_from_now( my_impl->resp_expected_period); - response_expected_timer.async_wait( [c]( boost::system::error_code ec ) { - app().post(priority::low, [c, ec]() { - connection_ptr conn = c.lock(); - if (!conn) { - // connection was destroyed before this lambda was delivered - return; - } - - conn->fetch_timeout(ec); - }); - } ); + response_expected_timer.expires_from_now( my_impl->resp_expected_period ); + response_expected_timer.async_wait( + boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { + c->fetch_timeout(ec); + } ) ); } // called from connection strand @@ -1399,21 +1395,19 @@ namespace eosio { void connection::fetch_timeout( boost::system::error_code ec ) { if( !ec ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); - } - else if( ec == boost::asio::error::operation_aborted ) { + my_impl->dispatcher->retry_fetch( shared_from_this() ); + } else if( ec == boost::asio::error::operation_aborted ) { if( !connected() ) { - fc_dlog(logger, "fetch timeout was cancelled due to dead connection"); + fc_dlog( logger, "fetch timeout was cancelled due to dead connection" ); } - } - else { + } else { fc_elog( logger, "setting timer for fetch request got error ${ec}", ("ec", ec.message() ) ); } } void connection::request_sync_blocks(uint32_t start, uint32_t end) { sync_request_message srm = {start,end}; - enqueue( net_message(srm)); + enqueue( net_message(srm) ); sync_wait(); } @@ -1464,15 +1458,6 @@ namespace eosio { } } - bool sync_manager::is_sync_required() { - fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) - ("head", chain_plug->chain().fork_db_head_block_num() ) ); - - return( sync_last_requested_num < sync_known_lib_num || - chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); - } - // call with g_sync locked void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { uint32_t fork_head_block_num = 0; @@ -1554,9 +1539,10 @@ namespace eosio { sync_last_requested_num = end; connection_ptr c = sync_source; g_sync.unlock(); - fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", - ("n", c->peer_address())( "s", start )( "e", end ) ); - c->request_sync_blocks( start, end ); + c->strand.post( [c, start, end]() { + fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", ("n", c->peer_address())( "s", start )( "e", end ) ); + c->request_sync_blocks( start, end ); + } ); } } } @@ -1571,23 +1557,35 @@ namespace eosio { } } + bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { + fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", + ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) + ("head", fork_head_block_num ) ); + + return( sync_last_requested_num < sync_known_lib_num || + fork_head_block_num < sync_last_requested_num ); + } + void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { std::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } - if( !is_sync_required() ) { - uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); - uint32_t hnum = chain_plug->chain().fork_db_head_block_num(); + uint32_t lib_num = 0; + uint32_t fork_head_block_num = 0; + std::tie( lib_num, std::ignore, fork_head_block_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); + + if( !is_sync_required( fork_head_block_num ) ) { fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", - ("b", bnum)( "h", hnum )( "t", target ) ); + ("b", lib_num)( "h", fork_head_block_num )( "t", target ) ); return; } if( sync_state == in_sync ) { set_state( lib_catchup ); - sync_next_expected_num = chain_plug->chain().last_irreversible_block_num() + 1; + sync_next_expected_num = lib_num + 1; } fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", @@ -1759,6 +1757,7 @@ namespace eosio { fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_address() ) ); std::unique_lock g_sync( sync_mtx ); stages state = sync_state; + fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", @@ -1767,7 +1766,8 @@ namespace eosio { return; } sync_next_expected_num = blk_num + 1; - } else if( state == head_catchup ) { + } + if( state == head_catchup ) { fc_dlog( logger, "sync_manager in head_catchup state" ); set_state( in_sync ); sync_source.reset(); @@ -1914,9 +1914,14 @@ namespace eosio { void dispatch_manager::bcast_block(const block_state_ptr& bs) { fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) ); - boost::shared_lock g( my_impl->connections_mtx ); + if( my_impl->sync_master->syncing_with_peer() ) return; bool have_connection = false; + boost::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { + + peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", + ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load()) ); + if( !cp->current() ) { continue; } @@ -1942,7 +1947,7 @@ namespace eosio { if( !add_peer_block( pbstate ) ) { return; } - fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_address() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } }); @@ -1989,8 +1994,10 @@ namespace eosio { send_buffer = create_send_buffer( trx ); } - fc_dlog(logger, "sending trx to ${n}", ("n", cp->peer_name() ) ); - cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); + cp->strand.post( [cp, send_buffer]() { + fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_address()) ); + cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); + } ); } } @@ -2009,67 +2016,65 @@ namespace eosio { update_txns_block_num( id, head_blk_num ); } + // called from connection strand void dispatch_manager::recv_notice(const connection_ptr& c, const notice_message& msg, bool generated) { request_message req; req.req_trx.mode = none; req.req_blocks.mode = none; - bool send_req = false; if (msg.known_trx.mode == normal) { req.req_trx.mode = normal; req.req_trx.pending = 0; - send_req = false; - } - else if (msg.known_trx.mode != none) { - fc_elog( logger,"passed a notice_message with something other than a normal on none known_trx" ); + } else if (msg.known_trx.mode != none) { + fc_elog( logger, "passed a notice_message with something other than a normal on none known_trx" ); return; } if (msg.known_blocks.mode == normal) { req.req_blocks.mode = normal; - controller& cc = my_impl->chain_plug->chain(); // known_blocks.ids is never > 1 if( !msg.known_blocks.ids.empty() ) { - const block_id_type& blkid = msg.known_blocks.ids.back(); - signed_block_ptr b; - try { - b = cc.fetch_block_by_id(blkid); // if exists - if(b) { - add_peer_block({blkid, block_header::num_from_id(blkid), c->connection_id}); + app().post( priority::low, [this, msg{std::move(msg)}, req{std::move(req)}, c]() mutable { + const block_id_type& blkid = msg.known_blocks.ids.back(); + signed_block_ptr b; + try { + controller& cc = my_impl->chain_plug->chain(); + b = cc.fetch_block_by_id( blkid ); // if exists + if( b ) { + add_peer_block( {blkid, block_header::num_from_id( blkid ), c->connection_id} ); + } + } catch( const assert_exception& ex ) { + fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}", ("ex", ex.what()) ); + // keep going, client can ask another peer + } catch( ... ) { + fc_elog( logger, "failed to retrieve block for id" ); } - } catch (const assert_exception &ex) { - fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what()) ); - // keep going, client can ask another peer - } catch (...) { - fc_elog( logger, "failed to retrieve block for id"); - } - if (!b) { - send_req = true; - req.req_blocks.ids.push_back( blkid ); - } + if( !b ) { + req.req_blocks.ids.push_back( blkid ); + c->strand.post( [req{std::move(req)}, c{std::move(c)}]() mutable { + fc_dlog( logger, "send req" ); + c->enqueue( req ); + c->fetch_wait(); + c->last_req = std::move( req ); + }); + } + }); } - } - else if (msg.known_blocks.mode != none) { + } else if (msg.known_blocks.mode != none) { fc_elog( logger, "passed a notice_message with something other than a normal on none known_blocks" ); return; } - fc_dlog( logger, "send req = ${sr}", ("sr",send_req)); - if( send_req) { - c->enqueue(req); - c->fetch_wait(); - c->last_req = std::move(req); - } } void dispatch_manager::retry_fetch(const connection_ptr& c) { if (!c->last_req) { return; } - fc_wlog( logger, "failed to fetch from ${p}",("p",c->peer_name())); + fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); block_id_type bid; if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { bid = c->last_req->req_blocks.ids.back(); } else { - fc_wlog( logger,"no retry, block mpde = ${b} trx mode = ${t}", - ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); + fc_wlog( logger, "no retry, block mpde = ${b} trx mode = ${t}", + ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } boost::shared_lock g( my_impl->connections_mtx ); @@ -2079,9 +2084,11 @@ namespace eosio { } bool sendit = peer_has_block( bid, c->connection_id ); if (sendit) { - conn->enqueue(*c->last_req); - conn->fetch_wait(); - conn->last_req = c->last_req; + conn->strand.post( [conn, last_req = *c->last_req]() { + conn->enqueue( last_req ); + conn->fetch_wait(); + conn->last_req = last_req; + } ); return; } } @@ -2580,6 +2587,8 @@ namespace eosio { chain_head_blk_id = cc.head_block_id(); chain_fork_head_blk_num = cc.fork_db_head_block_num(); chain_fork_head_blk_id = cc.fork_db_head_block_id(); + fc_dlog( logger, "updating chain info lib ${lib}, head ${head}, fork ${fork}", + ("lib", chain_lib_num)("head", chain_head_blk_num)("fork", chain_fork_head_blk_num) ); } // lib_num, head_blk_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id @@ -2698,10 +2707,10 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); - bool on_fork = false; - fc_dlog( logger, "lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); + fc_dlog( logger, "handshake, check for fork lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); if( peer_lib <= lib_num && peer_lib > 0 ) { + bool on_fork = false; try { block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib ); on_fork = (msg_lib_id != peer_lib_id); @@ -2713,9 +2722,10 @@ namespace eosio { on_fork = true; } if( on_fork ) { - fc_elog( logger, "Peer chain is forked" ); - c->enqueue( go_away_message( forked ) ); - return; + c->strand.post( [c]() { + fc_elog( logger, "Peer chain is forked" ); + c->enqueue( go_away_message( forked ) ); + } ); } } }); @@ -2727,13 +2737,13 @@ namespace eosio { c->last_handshake_recv = msg; c->_logger_variant.reset(); - sync_master->recv_handshake(c,msg); + sync_master->recv_handshake( c, msg ); } void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { - peer_wlog(c, "received go_away_message, reason = ${r}", ("r",reason_str( msg.reason )) ); + peer_wlog( c, "received go_away_message, reason = ${r}", ("r", reason_str( msg.reason )) ); c->no_retry = msg.reason; - if(msg.reason == duplicate ) { + if( msg.reason == duplicate ) { c->node_id = msg.node_id; } c->flush_queues(); @@ -2741,7 +2751,7 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const time_message& msg) { - peer_ilog(c, "received time_message"); + peer_ilog( c, "received time_message" ); /* We've already lost however many microseconds it took to dispatch * the message, but it can't be helped. */ @@ -2777,12 +2787,13 @@ namespace eosio { // peer tells us about one or more blocks or txns. When done syncing, forward on // notices of previously unknown blocks or txns, // - peer_ilog(c, "received notice_message"); + peer_ilog( c, "received notice_message" ); c->connecting = false; request_message req; bool send_req = false; - if (msg.known_trx.mode != none) { - fc_dlog(logger,"this is a ${m} notice with ${n} transactions", ("m",modes_str(msg.known_trx.mode))("n",msg.known_trx.pending)); + if( msg.known_trx.mode != none ) { + fc_dlog( logger, "this is a ${m} notice with ${n} transactions", + ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); } switch (msg.known_trx.mode) { case none: @@ -2868,12 +2879,13 @@ namespace eosio { } - void net_plugin_impl::handle_message(const connection_ptr& c, const sync_request_message& msg) { - if( msg.end_block == 0) { + void net_plugin_impl::handle_message( const connection_ptr& c, const sync_request_message& msg ) { + fc_dlog( logger, "peer requested ${start} to ${end}", ("start", msg.start_block)("end", msg.end_block) ); + if( msg.end_block == 0 ) { c->peer_requested.reset(); c->flush_queues(); } else { - c->peer_requested = sync_state( msg.start_block,msg.end_block,msg.start_block-1); + c->peer_requested = sync_state( msg.start_block, msg.end_block, msg.start_block-1); c->enqueue_sync_block(); } } @@ -2898,11 +2910,7 @@ namespace eosio { peer_ilog(c, "received packed_transaction ${id}", ("id", tid)); bool have_trx = dispatcher->have_txn( tid ); - connection_wptr weak_ptr = c; - app().post(priority::low, [weak_ptr{std::move(weak_ptr)}, &dispatcher = dispatcher, ptrx](){ - auto c = weak_ptr.lock(); - dispatcher->recv_transaction(c, ptrx); - }); + dispatcher->recv_transaction(c, ptrx); if( have_trx ) { fc_dlog( logger, "got a duplicate transaction - dropping ${id}", ("id", tid) ); return; @@ -2939,11 +2947,11 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t head_blk_num = cc.head_block_num(); - app().post(priority::low, [accepted, &dispatcher = dispatcher, ptrx{std::move(ptrx)}, head_blk_num]() { + boost::asio::post( *my_impl->server_ioc, [accepted, ptrx{std::move(ptrx)}, head_blk_num]() { if( accepted ) { - dispatcher->bcast_transaction( ptrx ); + my_impl->dispatcher->bcast_transaction( ptrx ); } else { - dispatcher->rejected_transaction( ptrx->id, head_blk_num ); + my_impl->dispatcher->rejected_transaction( ptrx->id, head_blk_num ); } }); }); @@ -3194,7 +3202,6 @@ namespace eosio { std::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { - app().post( priority::low, [this, ec]() { ticker(); if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); @@ -3202,11 +3209,12 @@ namespace eosio { boost::shared_lock g( connections_mtx ); for( auto& c : connections ) { if( c->socket_is_open() ) { - c->send_time(); + c->strand.post( [c]() { + c->send_time(); + } ); } } } ); - } ); } void net_plugin_impl::start_monitors() { @@ -3544,6 +3552,8 @@ namespace eosio { } void net_plugin::plugin_startup() { + handle_sighup(); + my->producer_plug = app().find_plugin(); my->thread_pool.emplace( my->thread_pool_size ); @@ -3623,10 +3633,11 @@ namespace eosio { my->start_monitors(); + my->update_chain_info(); + for( const auto& seed_node : my->supplied_peers ) { connect( seed_node ); } - handle_sighup(); } void net_plugin::handle_sighup() { @@ -3698,7 +3709,6 @@ namespace eosio { boost::unique_lock g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { - (*itr)->reset(); fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); (*itr)->close(); my->connections.erase(itr); From 26bcddd7cf753bc87e9f20e486b14ce19313e3f0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 07:34:57 -0500 Subject: [PATCH 0146/1648] Accept block_state_ptr as const& --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 045fcd94b1a..a18fc0f1895 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3621,7 +3621,7 @@ namespace eosio { my->incoming_transaction_ack_subscription = app().get_channel().subscribe( boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); my->incoming_irreversible_block_subscription = app().get_channel().subscribe( - [this]( block_state_ptr s ) { + [this]( const block_state_ptr& s ) { my->on_irreversible_block( s ); }); From 2e0de468238c5ad6bcebeb9b5361d23df94aa043 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 12:10:16 -0500 Subject: [PATCH 0147/1648] Protect last_req and fix syncing issue --- plugins/net_plugin/net_plugin.cpp | 49 +++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a18fc0f1895..690d85b2f6f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -228,7 +228,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17 + mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17, also protects connection::last_req std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -635,7 +635,7 @@ namespace eosio { std::atomic no_retry{no_reason}; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head - optional last_req; + optional last_req; // mutex protected by connections_mtx connection_status get_status()const { connection_status stat; @@ -1004,7 +1004,11 @@ namespace eosio { self->flush_queues(); self->connecting = false; self->syncing = false; - if( self->last_req ) { + + boost::shared_lock g_conn( my_impl->connections_mtx ); + bool has_last_req = !!self->last_req; + g_conn.unlock(); + if( has_last_req ) { my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } <<<<<<< HEAD @@ -1044,7 +1048,7 @@ namespace eosio { self->sent_handshake_count = 0; self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); - my_impl->sync_master->sync_reset_lib_num( nullptr ); + my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); self->cancel_wait(); @@ -1958,13 +1962,16 @@ namespace eosio { void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { peer_block_state pbstate{id, bnum, c->connection_id}; add_peer_block( pbstate ); + boost::unique_lock g( my_impl->connections_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && !c->last_req->req_blocks.ids.empty() && c->last_req->req_blocks.ids.back() == id) { + fc_dlog( logger, "reseting last_req" ); c->last_req.reset(); } + g.unlock(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); @@ -2053,6 +2060,7 @@ namespace eosio { fc_dlog( logger, "send req" ); c->enqueue( req ); c->fetch_wait(); + boost::unique_lock g( my_impl->connections_mtx ); c->last_req = std::move( req ); }); } @@ -2065,6 +2073,8 @@ namespace eosio { } void dispatch_manager::retry_fetch(const connection_ptr& c) { + fc_dlog( logger, "retry fetch" ); + boost::shared_lock g( my_impl->connections_mtx ); if (!c->last_req) { return; } @@ -2077,24 +2087,37 @@ namespace eosio { ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } - boost::shared_lock g( my_impl->connections_mtx ); - for (auto& conn : my_impl->connections) { - if (conn == c || conn->last_req) { + for( auto& conn : my_impl->connections ) { + if( conn == c || conn->last_req ) { continue; } - bool sendit = peer_has_block( bid, c->connection_id ); - if (sendit) { + bool sendit = peer_has_block( bid, conn->connection_id ); + if( sendit ) { conn->strand.post( [conn, last_req = *c->last_req]() { conn->enqueue( last_req ); conn->fetch_wait(); + boost::unique_lock g( my_impl->connections_mtx ); conn->last_req = last_req; } ); return; } } - g.unlock(); + // found no peer that we know has it, so ask some random connection + for( auto& conn : my_impl->connections ) { + if( conn == c || conn->last_req ) { + continue; + } + conn->strand.post( [conn, last_req = *c->last_req]() { + conn->enqueue( last_req ); + conn->fetch_wait(); + boost::unique_lock g( my_impl->connections_mtx ); + conn->last_req = last_req; + } ); + return; + } // at this point no other peer has it, re-request or do nothing? + fc_wlog( logger, "no peer has last_req" ); if( c->connected() ) { c->enqueue(*c->last_req); c->fetch_wait(); @@ -2195,8 +2218,10 @@ namespace eosio { } } else { if( endpoint_itr != tcp::resolver::iterator() ) { - c->close(); - c->connect( resolver, endpoint_itr ); + c->close(); // close posts to strand, so also post connect otherwise connect will happen before close + c->strand.post( [resolver, c, endpoint_itr]() { + c->connect( resolver, endpoint_itr ); + } ); } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; From 9e88fd63036dc23d3037fd36e25d9bfb8d8c1eb2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 13:04:21 -0500 Subject: [PATCH 0148/1648] Get sync block at medium priority --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 690d85b2f6f..f2395b0dec1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1254,7 +1254,7 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - app().post( priority::low, [num, trigger_send, c = shared_from_this()]() { + app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { From eb98a90d744207d1b8197b44bd3277bc132735b0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 13:29:35 -0500 Subject: [PATCH 0149/1648] Remove dead code --- plugins/net_plugin/net_plugin.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f2395b0dec1..5e4a95e1293 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1129,8 +1129,10 @@ namespace eosio { strand.post( [c = shared_from_this()]() { handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; - fc_dlog( logger, "Sending handshake generation ${g} to ${ep}", - ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) ); + fc_dlog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}", + ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) + ( "lib", c->last_handshake_sent.last_irreversible_block_num ) + ( "head", c->last_handshake_sent.head_num ) ); c->enqueue( c->last_handshake_sent ); }); } @@ -2815,7 +2817,6 @@ namespace eosio { peer_ilog( c, "received notice_message" ); c->connecting = false; request_message req; - bool send_req = false; if( msg.known_trx.mode != none ) { fc_dlog( logger, "this is a ${m} notice with ${n} transactions", ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); @@ -2856,10 +2857,6 @@ namespace eosio { peer_elog(c, "bad notice_message : invalid known_blocks.mode ${m}",("m",static_cast(msg.known_blocks.mode))); } } - fc_dlog(logger, "send req = ${sr}", ("sr",send_req)); - if( send_req) { - c->enqueue(req); - } } void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { From 712802265ce91d580e20c7de09e128a4be9069e9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 07:48:45 -0500 Subject: [PATCH 0150/1648] Add protection for last_handshake_*, last_req, and socket close --- .../include/eosio/net_plugin/protocol.hpp | 2 +- plugins/net_plugin/net_plugin.cpp | 153 ++++++++++++------ 2 files changed, 102 insertions(+), 53 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 7170c1abd20..cdb3d98fd2f 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -36,7 +36,7 @@ namespace eosio { block_id_type head_id; string os; string agent; - int16_t generation; + int16_t generation = 0; }; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 5e4a95e1293..2f823964dbd 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -614,8 +614,6 @@ namespace eosio { >>>>>>> Make delay_timer thread safe fc::sha256 node_id; const uint32_t connection_id; - handshake_message last_handshake_recv; - handshake_message last_handshake_sent; int16_t sent_handshake_count = 0; std::atomic connecting{false}; std::atomic syncing{false}; @@ -635,16 +633,13 @@ namespace eosio { std::atomic no_retry{no_reason}; block_id_type fork_head; std::atomic fork_head_num{0}; // provides memory barrier for fork_head - optional last_req; // mutex protected by connections_mtx - connection_status get_status()const { - connection_status stat; - stat.peer = peer_addr; - stat.connecting = connecting; - stat.syncing = syncing; - stat.last_handshake = last_handshake_recv; - return stat; - } + mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent + optional last_req; + handshake_message last_handshake_recv; + handshake_message last_handshake_sent; + + connection_status get_status()const; /** \name Peer Timestamps * Time message handling @@ -899,8 +894,6 @@ namespace eosio { >>>>>>> Move socket ownership into connection. node_id(), connection_id( ++my_impl->current_connection_id ), - last_handshake_recv(), - last_handshake_sent(), sent_handshake_count(0), connecting(false), syncing(false), @@ -909,7 +902,9 @@ namespace eosio { response_expected_timer( *my_impl->server_ioc ), read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), - last_req() + last_req(), + last_handshake_recv(), + last_handshake_sent() { fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); node_id.data()[0] = 0; @@ -930,8 +925,6 @@ namespace eosio { >>>>>>> Move socket ownership into connection. node_id(), connection_id( ++my_impl->current_connection_id ), - last_handshake_recv(), - last_handshake_sent(), sent_handshake_count(0), connecting(true), syncing(false), @@ -940,14 +933,15 @@ namespace eosio { response_expected_timer( *my_impl->server_ioc ), read_delay_timer( *my_impl->server_ioc ), no_retry(no_reason), - last_req() + last_req(), + last_handshake_recv(), + last_handshake_sent() { fc_ilog( logger, "accepted network connection" ); node_id.data()[0] = 0; } connection::~connection() { - pending_message_buffer.reset(); } void connection::update_endpoints() { @@ -961,6 +955,16 @@ namespace eosio { local_endpoint_port = ec ? unknown : std::to_string(lep.port()); } + connection_status connection::get_status()const { + connection_status stat; + stat.peer = peer_addr; + stat.connecting = connecting; + stat.syncing = syncing; + std::lock_guard g( conn_mtx ); + stat.last_handshake = last_handshake_recv; + return stat; + } + bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); @@ -1005,7 +1009,7 @@ namespace eosio { self->connecting = false; self->syncing = false; - boost::shared_lock g_conn( my_impl->connections_mtx ); + std::unique_lock g_conn( self->conn_mtx ); bool has_last_req = !!self->last_req; g_conn.unlock(); if( has_last_req ) { @@ -1046,15 +1050,22 @@ namespace eosio { self->peer_requested.reset(); >>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. self->sent_handshake_count = 0; + g_conn.lock(); self->last_handshake_recv = handshake_message(); self->last_handshake_sent = handshake_message(); + g_conn.unlock(); my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); - fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); + fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx self->cancel_wait(); std::lock_guard g( self->read_delay_timer_mtx ); self->read_delay_timer.cancel(); +<<<<<<< HEAD >>>>>>> Move socket ownership into connection. +======= + + self->pending_message_buffer.reset(); +>>>>>>> Add protection for last_handshake_*, last_req, and socket close } void connection::blk_send_branch() { @@ -1073,13 +1084,15 @@ namespace eosio { } block_id_type remote_head_id; uint32_t remote_head_num = 0; + std::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation >= 1 ) { remote_head_id = last_handshake_recv.head_id; remote_head_num = block_header::num_from_id(remote_head_id); - fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", remote_head_num)( "id", remote_head_id ) ); + fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", remote_head_num)( "id", remote_head_id ) ); } block_id_type lib_id = last_handshake_recv.last_irreversible_block_id; + g_conn.unlock(); if( !peer_requested ) { peer_requested = sync_state( block_header::num_from_id(lib_id)+1, @@ -1090,6 +1103,7 @@ namespace eosio { uint32_t end = std::max( peer_requested->end_block, block_header::num_from_id(head_id) ); peer_requested = sync_state( start, end, start - 1 ); } + fc_dlog( logger, "enqueue ${s} - ${e}", ("s", peer_requested->start_block)("e", peer_requested->end_block) ); enqueue_sync_block(); // still want to send transactions along during blk branch sync @@ -1127,13 +1141,16 @@ namespace eosio { void connection::send_handshake() { strand.post( [c = shared_from_this()]() { + std::unique_lock g_conn( c->conn_mtx ); handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; fc_dlog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}", ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) ( "lib", c->last_handshake_sent.last_irreversible_block_num ) ( "head", c->last_handshake_sent.head_num ) ); - c->enqueue( c->last_handshake_sent ); + auto cpy = c->last_handshake_sent; + g_conn.unlock(); + c->enqueue( cpy ); }); } @@ -1193,6 +1210,9 @@ namespace eosio { boost::asio::bind_executor( c->strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { >>>>>>> Make queued_buffer thread safe try { + // May have closed connection and cleared buffer_queue + if( !c->socket_is_open() ) return; + c->buffer_queue.out_callback( ec, w ); if( ec ) { @@ -1256,7 +1276,8 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { + const int higher_than_low = priority::low + 2; // otherwise client gets very little if we are syncing to a peer + app().post( higher_than_low, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { @@ -1385,11 +1406,13 @@ namespace eosio { } } - // todo: last_handshake_recv not thread safe + // locks conn_mtx, do not call while holding conn_mtx const string connection::peer_name() { + std::unique_lock g_conn( conn_mtx ); if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; } + g_conn.unlock(); if( !peer_address().empty() ) { return peer_address(); } @@ -1455,6 +1478,7 @@ namespace eosio { } if( !c ) return; if( c->current() ) { + std::lock_guard g_conn( c->conn_mtx ); if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num ) { sync_known_lib_num = c->last_handshake_recv.last_irreversible_block_num; } @@ -1738,7 +1762,9 @@ namespace eosio { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } } else if (msg.known_blocks.mode == last_irr_catch_up) { + std::unique_lock g_conn( c->conn_mtx ); c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; + g_conn.unlock(); sync_reset_lib_num(c); start_sync(c, msg.known_trx.pending); } @@ -1751,7 +1777,7 @@ namespace eosio { fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_address()) ); sync_last_requested_num = 0; sync_source.reset(); - set_state( in_sync ); + //todo: set_state( in_sync ); g.unlock(); c->close(); send_handshakes(); @@ -1767,7 +1793,7 @@ namespace eosio { if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", - ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); + ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_address()) ); c->close(); return; } @@ -1946,14 +1972,15 @@ namespace eosio { } cp->strand.post( [this, cp, bs, send_buffer]() { uint32_t bnum = bs->block_num; - // todo protect cp->last_handshake_recv + std::unique_lock g_conn( cp->conn_mtx ); bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; + g_conn.unlock(); if( !has_block ) { peer_block_state pbstate{bs->id, bnum, cp->connection_id}; if( !add_peer_block( pbstate ) ) { return; } - fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_address() ) ); + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } }); @@ -1964,7 +1991,7 @@ namespace eosio { void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { peer_block_state pbstate{id, bnum, c->connection_id}; add_peer_block( pbstate ); - boost::unique_lock g( my_impl->connections_mtx ); + std::unique_lock g( c->conn_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -2062,7 +2089,7 @@ namespace eosio { fc_dlog( logger, "send req" ); c->enqueue( req ); c->fetch_wait(); - boost::unique_lock g( my_impl->connections_mtx ); + std::lock_guard g( c->conn_mtx ); c->last_req = std::move( req ); }); } @@ -2076,8 +2103,8 @@ namespace eosio { void dispatch_manager::retry_fetch(const connection_ptr& c) { fc_dlog( logger, "retry fetch" ); - boost::shared_lock g( my_impl->connections_mtx ); - if (!c->last_req) { + std::unique_lock g_c_conn( c->conn_mtx ); + if( !c->last_req ) { return; } fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); @@ -2089,39 +2116,39 @@ namespace eosio { ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } + g_c_conn.unlock(); + boost::shared_lock g( my_impl->connections_mtx ); for( auto& conn : my_impl->connections ) { - if( conn == c || conn->last_req ) { + if( conn == c ) continue; + + std::unique_lock g_conn_conn( conn->conn_mtx ); + if( conn->last_req ) { continue; } + g_conn_conn.unlock(); bool sendit = peer_has_block( bid, conn->connection_id ); if( sendit ) { - conn->strand.post( [conn, last_req = *c->last_req]() { + g.unlock(); + g_c_conn.lock(); + auto last_req = *c->last_req; + g_c_conn.unlock(); + conn->strand.post( [conn, last_req{std::move(last_req)}]() { conn->enqueue( last_req ); conn->fetch_wait(); - boost::unique_lock g( my_impl->connections_mtx ); + std::lock_guard g_conn_conn( conn->conn_mtx ); conn->last_req = last_req; } ); return; } } - // found no peer that we know has it, so ask some random connection - for( auto& conn : my_impl->connections ) { - if( conn == c || conn->last_req ) { - continue; - } - conn->strand.post( [conn, last_req = *c->last_req]() { - conn->enqueue( last_req ); - conn->fetch_wait(); - boost::unique_lock g( my_impl->connections_mtx ); - conn->last_req = last_req; - } ); - return; - } // at this point no other peer has it, re-request or do nothing? fc_wlog( logger, "no peer has last_req" ); if( c->connected() ) { - c->enqueue(*c->last_req); + g_c_conn.lock(); + auto last_req = *c->last_req; + g_c_conn.unlock(); + c->enqueue( last_req ); c->fetch_wait(); } } @@ -2445,13 +2472,20 @@ namespace eosio { --conn->reads_in_flight; <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= conn->outstanding_read_bytes = 0; >>>>>>> Test of multi-threaded reading ======= >>>>>>> Remove unneeded access to atomic bool close_connection = false; +======= +>>>>>>> Add protection for last_handshake_*, last_req, and socket close + + // may have closed connection and cleared pending_message_buffer + if( !conn->socket_is_open() ) return; + bool close_connection = false; try { if( !ec ) { if (bytes_transferred > conn->pending_message_buffer.bytes_to_write()) { @@ -2664,6 +2698,10 @@ namespace eosio { c->enqueue( go_away_message( fatal_other ) ); return; } + fc_dlog( logger, "received handshake gen ${g} from ${ep}, lib ${lib}, head ${head}", + ("g", msg.generation)( "ep", c->peer_address() ) + ( "lib", msg.last_irreversible_block_num )( "head", msg.head_num ) ); + if( c->connecting ) { c->connecting = false; } @@ -2674,7 +2712,9 @@ namespace eosio { return; } + std::unique_lock g_conn( c->conn_mtx ); if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { + g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); boost::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { @@ -2685,7 +2725,13 @@ namespace eosio { // we need to avoid the case where they would both tell a different connection to go away. // Using the sum of the initial handshake times of the two connections, we will // arbitrarily (but consistently between the two peers) keep one of them. - if (msg.time + c->last_handshake_sent.time <= check->last_handshake_sent.time + check->last_handshake_recv.time) + std::unique_lock g_check_conn( check->conn_mtx ); + auto check_time = check->last_handshake_sent.time + check->last_handshake_recv.time; + g_check_conn.unlock(); + g_conn.lock(); + auto c_time = c->last_handshake_sent.time; + g_conn.unlock(); + if (msg.time + c_time <= check_time) continue; fc_dlog( logger, "sending go_away duplicate to ${ep}", ("ep",msg.p2p_address) ); @@ -2699,6 +2745,7 @@ namespace eosio { } else { fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); + g_conn.unlock(); } if( msg.chain_id != chain_id) { @@ -2762,7 +2809,9 @@ namespace eosio { } } + std::unique_lock g_conn( c->conn_mtx ); c->last_handshake_recv = msg; + g_conn.unlock(); c->_logger_variant.reset(); sync_master->recv_handshake( c, msg ); } @@ -2816,7 +2865,6 @@ namespace eosio { // peer_ilog( c, "received notice_message" ); c->connecting = false; - request_message req; if( msg.known_trx.mode != none ) { fc_dlog( logger, "this is a ${m} notice with ${n} transactions", ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); @@ -2825,8 +2873,9 @@ namespace eosio { case none: break; case last_irr_catch_up: { + std::unique_lock g_conn( c->conn_mtx ); c->last_handshake_recv.head_num = msg.known_trx.pending; - req.req_trx.mode = none; + g_conn.unlock(); break; } case catch_up : { From a93ef99fbe7e866861c9e43ba75c2b9e023d3429 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 09:21:34 -0500 Subject: [PATCH 0151/1648] Protect connection fork_head, fork_head_num. Improve logging. --- plugins/net_plugin/net_plugin.cpp | 32 +++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2f823964dbd..c0e581ba0e0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -631,13 +631,13 @@ namespace eosio { std::mutex read_delay_timer_mtx; boost::asio::steady_timer read_delay_timer; std::atomic no_retry{no_reason}; - block_id_type fork_head; - std::atomic fork_head_num{0}; // provides memory barrier for fork_head - mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent + mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent, fork_head, fork_head_num optional last_req; handshake_message last_handshake_recv; handshake_message last_handshake_sent; + block_id_type fork_head; + uint32_t fork_head_num{0}; connection_status get_status()const; @@ -1266,7 +1266,6 @@ namespace eosio { bool connection::enqueue_sync_block() { if( !peer_requested ) { - fc_dlog( logger, "enqueue sync block, with no peer_requested" ); return false; } else { fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); @@ -1719,7 +1718,7 @@ namespace eosio { req.req_blocks.mode = catch_up; boost::shared_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { - // fork_head_num provides memory barrier for fork_head + std::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; break; @@ -1727,8 +1726,10 @@ namespace eosio { } g.unlock(); if( req.req_blocks.mode == catch_up ) { + std::unique_lock g_conn( c->conn_mtx ); c->fork_head = id; c->fork_head_num = num; + g_conn.unlock(); std::lock_guard g( sync_mtx ); fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} " "target LIB = ${lib} next_expected = ${ne}", @@ -1738,6 +1739,7 @@ namespace eosio { return; set_state( head_catchup ); } else { + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -1786,14 +1788,16 @@ namespace eosio { // called from connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { - fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_address() ) ); + fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_name() ) ); std::unique_lock g_sync( sync_mtx ); stages state = sync_state; fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { + auto sync_next_expected = sync_next_expected_num; + g_sync.unlock(); fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", - ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_address()) ); + ("ne", sync_next_expected)( "bn", blk_num )( "p", c->peer_name() ) ); c->close(); return; } @@ -1809,11 +1813,15 @@ namespace eosio { bool set_state_to_head_catchup = false; boost::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { - uint32_t fork_head_num = cp->fork_head_num.load(); // fork_head_num provides memory barrier for fork_head - if (cp->fork_head == null_id) { + std::unique_lock g_cp_conn( cp->conn_mtx ); + uint32_t fork_head_num = cp->fork_head_num; + block_id_type fork_head_id = cp->fork_head; + g_cp_conn.unlock(); + if( fork_head_id == null_id ) { continue; } - if( fork_head_num < blk_num || cp->fork_head == blk_id ) { + if( fork_head_num < blk_num || fork_head_id == blk_id ) { + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; } else { @@ -2031,7 +2039,7 @@ namespace eosio { } cp->strand.post( [cp, send_buffer]() { - fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_address()) ); + fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_name()) ); cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); } ); } @@ -2699,7 +2707,7 @@ namespace eosio { return; } fc_dlog( logger, "received handshake gen ${g} from ${ep}, lib ${lib}, head ${head}", - ("g", msg.generation)( "ep", c->peer_address() ) + ("g", msg.generation)( "ep", c->peer_name() ) ( "lib", msg.last_irreversible_block_num )( "head", msg.head_num ) ); if( c->connecting ) { From 9f906a9b6dc26c0c59c6560867b9692116f511f1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 12:37:34 -0500 Subject: [PATCH 0152/1648] Fix protection of connections. Record block after verify it links. --- plugins/net_plugin/net_plugin.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c0e581ba0e0..c8302335d40 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1275,8 +1275,7 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - const int higher_than_low = priority::low + 2; // otherwise client gets very little if we are syncing to a peer - app().post( higher_than_low, [num, trigger_send, c = shared_from_this()]() { + app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { @@ -2323,9 +2322,9 @@ namespace eosio { g.unlock(); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { if( new_connection->start_session() ) { - g.lock(); + boost::unique_lock g_unique( connections_mtx ); connections.insert( new_connection ); - g.unlock(); + g_unique.unlock(); } } else { @@ -3203,9 +3202,9 @@ namespace eosio { fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } - c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { - dispatcher->recv_block( c, blk_id, blk_num ); - }); +// c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { +// dispatcher->recv_block( c, blk_id, blk_num ); +// }); fc::microseconds age( fc::time_point::now() - msg->timestamp); peer_ilog(c, "received signed_block : #${n} block age in secs = ${age}", ("n",blk_num)("age",age.to_seconds())); @@ -3237,7 +3236,8 @@ namespace eosio { boost::asio::post( *server_ioc, [self = this, msg]() { self->dispatcher->update_txns_block_num( msg ); }); - c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + c->strand.post( [sync_master = sync_master.get(), dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + dispatcher->recv_block( c, blk_id, blk_num ); sync_master->sync_recv_block( c, blk_id, blk_num ); }); } else { @@ -3375,7 +3375,7 @@ namespace eosio { void net_plugin_impl::on_accepted_block(const block_state_ptr& block) { update_chain_info(); boost::asio::post( *server_ioc, [this, ioc=server_ioc, block]() { - fc_dlog( logger, "signaled, id = ${id}", ("id", block->id) ); + fc_dlog( logger, "signaled, blk id = ${id}", ("id", block->id) ); dispatcher->bcast_block( block ); }); } From a5b764ae6d2561f9bed537a237b085d4087fd790 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 14:44:51 -0500 Subject: [PATCH 0153/1648] Merge with develop --- plugins/net_plugin/net_plugin.cpp | 493 ------------------------------ 1 file changed, 493 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c8302335d40..eda58c0f794 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -572,46 +572,19 @@ namespace eosio { optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive boost::asio::io_context::strand strand; -<<<<<<< HEAD -<<<<<<< HEAD - socket_ptr socket; -======= - socket_ptr socket; // only accessed through strand after construction -<<<<<<< HEAD ->>>>>>> Protect start_read_message via strand -======= -======= tcp::socket socket; // only accessed through strand after construction ->>>>>>> Move socket ownership into connection. private: std::atomic socket_open{false}; public: ->>>>>>> Made all access to impl->connections thread safe fc::message_buffer<1024*1024> pending_message_buffer; -<<<<<<< HEAD -<<<<<<< HEAD std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads -======= - std::atomic outstanding_read_bytes{0}; ->>>>>>> Test of multi-threaded reading -======= - std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads ->>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. queued_buffer buffer_queue; std::atomic reads_in_flight{0}; -<<<<<<< HEAD -<<<<<<< HEAD - std::atomic trx_in_progress_size{0}; -======= - uint32_t trx_in_progress_size = 0; ->>>>>>> Test of multi-threaded reading -======= std::atomic trx_in_progress_size{0}; ->>>>>>> Make delay_timer thread safe fc::sha256 node_id; const uint32_t connection_id; int16_t sent_handshake_count = 0; @@ -773,15 +746,7 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; -<<<<<<< HEAD -<<<<<<< HEAD app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { -======= - app().post(priority::high, "handle blk", [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { ->>>>>>> Test of multi-threaded reading -======= - app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { ->>>>>>> Remove descriptions of tasks as not merged into develop yet connection_ptr c = weak.lock(); if( c ) impl->handle_message( c, ptr ); }); @@ -791,19 +756,7 @@ namespace eosio { // continue call to handle_message on connection strand fc_dlog( logger, "handle packed_transaction" ); shared_ptr ptr = std::make_shared( std::move( msg ) ); -<<<<<<< HEAD -<<<<<<< HEAD - impl.handle_message( c, ptr ); -======= - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if( c) impl->handle_message( c, ptr ); - }); ->>>>>>> Test of multi-threaded reading -======= impl.handle_message( c, ptr ); ->>>>>>> Move more of incoming transaction processing to thread pool } void operator()( const handshake_message& msg ) const { @@ -813,28 +766,9 @@ namespace eosio { } void operator()( const chain_size_message& msg ) const { -<<<<<<< HEAD - connection_wptr weak = c; -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { -======= - app().post(priority::low, "handle msg", [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { ->>>>>>> Test of multi-threaded reading -======= - app().post(priority::low, [impl = &impl, msg{std::forward(msg)}, weak{std::move(weak)}] { ->>>>>>> Remove descriptions of tasks as not merged into develop yet -======= - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); -======= // continue call to handle_message on connection strand fc_dlog( logger, "handle chain_size_message" ); impl.handle_message( c, msg ); ->>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. } void operator()( const go_away_message& msg ) const { @@ -862,18 +796,9 @@ namespace eosio { } void operator()( const sync_request_message& msg ) const { -<<<<<<< HEAD - connection_wptr weak = c; - app().post(priority::low, [impl = &impl, msg{std::move(msg)}, weak{std::move(weak)}] { ->>>>>>> Work toward making sync_manager and handshake message thread safe. - connection_ptr c = weak.lock(); - if(c) impl->handle_message( c, msg ); - }); -======= // continue call to handle_message on connection strand fc_dlog( logger, "handle sync_request_message" ); impl.handle_message( c, msg ); ->>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. } }; @@ -882,16 +807,8 @@ namespace eosio { connection::connection( string endpoint ) : peer_requested(), server_ioc( my_impl->server_ioc ), -<<<<<<< HEAD - strand( app().get_io_service() ), -======= strand( *my_impl->server_ioc ), -<<<<<<< HEAD ->>>>>>> Protect start_read_message via strand - socket( std::make_shared( std::ref( *my_impl->server_ioc ))), -======= socket( *my_impl->server_ioc ), ->>>>>>> Move socket ownership into connection. node_id(), connection_id( ++my_impl->current_connection_id ), sent_handshake_count(0), @@ -913,16 +830,8 @@ namespace eosio { connection::connection() : peer_requested(), server_ioc( my_impl->server_ioc ), -<<<<<<< HEAD - strand( app().get_io_service() ), -======= strand( *my_impl->server_ioc ), -<<<<<<< HEAD ->>>>>>> Protect start_read_message via strand - socket( s ), -======= socket( *my_impl->server_ioc ), ->>>>>>> Move socket ownership into connection. node_id(), connection_id( ++my_impl->current_connection_id ), sent_handshake_count(0), @@ -1015,40 +924,7 @@ namespace eosio { if( has_last_req ) { my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } -<<<<<<< HEAD -<<<<<<< HEAD - reset(); - sent_handshake_count = 0; - last_handshake_recv = handshake_message(); - last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num(nullptr); - fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); - cancel_wait(); -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> Make delay_timer thread safe - { - std::lock_guard g( read_delay_timer_mutex ); - if( read_delay_timer ) read_delay_timer->cancel(); - } -<<<<<<< HEAD -======= - if( read_delay_timer ) read_delay_timer->cancel(); ->>>>>>> Test of multi-threaded reading -======= ->>>>>>> Make delay_timer thread safe -======= - - std::lock_guard g( read_delay_timer_mtx ); - if( read_delay_timer ) read_delay_timer->cancel(); ->>>>>>> Use unique_lock instead of lock_guard to clean up code -======= - self->reset(); -======= self->peer_requested.reset(); ->>>>>>> Handle almost every net_message on net_plugin thread pool. Optimize bcast_block to not send when syncing. self->sent_handshake_count = 0; g_conn.lock(); self->last_handshake_recv = handshake_message(); @@ -1060,12 +936,8 @@ namespace eosio { std::lock_guard g( self->read_delay_timer_mtx ); self->read_delay_timer.cancel(); -<<<<<<< HEAD ->>>>>>> Move socket ownership into connection. -======= self->pending_message_buffer.reset(); ->>>>>>> Add protection for last_handshake_*, last_req, and socket close } void connection::blk_send_branch() { @@ -1195,20 +1067,9 @@ namespace eosio { std::vector bufs; buffer_queue.fill_out_buffer( bufs ); -<<<<<<< HEAD -<<<<<<< HEAD - boost::asio::async_write(*socket, bufs, - boost::asio::bind_executor(strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { - app().post(priority, [c, priority, ec, w]() { -======= - boost::asio::async_write( socket, bufs, - boost::asio::bind_executor( strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { ->>>>>>> Move socket ownership into connection. -======= strand.dispatch( [c{std::move(c)}, bufs{std::move(bufs)}, priority]() { boost::asio::async_write( c->socket, bufs, boost::asio::bind_executor( c->strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { ->>>>>>> Make queued_buffer thread safe try { // May have closed connection and cleared buffer_queue if( !c->socket_is_open() ) return; @@ -1234,16 +1095,8 @@ namespace eosio { } catch( ... ) { fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } -<<<<<<< HEAD -<<<<<<< HEAD - }); -======= ->>>>>>> Move socket ownership into connection. - })); -======= })); }); ->>>>>>> Make queued_buffer thread safe } void connection::cancel_sync(go_away_reason reason) { @@ -2182,24 +2035,6 @@ namespace eosio { connection_wptr weak_conn = shared_from_this(); // Note: need to add support for IPv6 too -<<<<<<< HEAD - resolver->async_resolve( query, boost::asio::bind_executor( c->strand, - [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { - app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { - auto c = weak_conn.lock(); - if( !c ) return; - if( !err ) { - connect( c, endpoint_itr ); - } else { - fc_elog( logger, "Unable to resolve ${add}: ${error}", - ("add", c->peer_name())( "error", err.message()) ); - } - } ); -<<<<<<< HEAD - } ) ); -======= - } ); -======= auto resolver = std::make_shared( *server_ioc ); resolver->async_resolve( query, boost::asio::bind_executor( strand, [resolver, ioc = server_ioc, weak_conn]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { @@ -2210,14 +2045,8 @@ namespace eosio { } else { fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); } -<<<<<<< HEAD - } ); ->>>>>>> Make use of resolver thread safe -======= } ) ); ->>>>>>> Work toward making sync_manager and handshake message thread safe. return true; ->>>>>>> Made all access to impl->connections thread safe } // called from connection strand @@ -2227,29 +2056,11 @@ namespace eosio { } auto current_endpoint = *endpoint_itr; ++endpoint_itr; -<<<<<<< HEAD - c->connecting = true; -<<<<<<< HEAD - connection_wptr weak_conn = c; - c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, - [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { - app().post( priority::low, [weak_conn, endpoint_itr, this, err]() { - auto c = weak_conn.lock(); - if( !c ) return; - if( !err && c->socket->is_open()) { - if( start_session( c )) { -======= - c->socket.async_connect( current_endpoint, - boost::asio::bind_executor( c->strand, [resolver, c, endpoint_itr, this]( const boost::system::error_code& err ) { - if( !err && c->socket.is_open()) { -======= connecting = true; socket.async_connect( current_endpoint, boost::asio::bind_executor( strand, [resolver, c = shared_from_this(), endpoint_itr]( const boost::system::error_code& err ) { if( !err && c->socket.is_open() ) { ->>>>>>> Work toward making sync_manager and handshake message thread safe. if( c->start_session() ) { ->>>>>>> Move socket ownership into connection. c->send_handshake(); } } else { @@ -2264,34 +2075,7 @@ namespace eosio { c->close(); } } -<<<<<<< HEAD - } ); - } ) ); - } - - bool net_plugin_impl::start_session(const connection_ptr& con) { - boost::asio::ip::tcp::no_delay nodelay( true ); - boost::system::error_code ec; - con->socket->set_option( nodelay, ec ); - if (ec) { - fc_elog( logger, "connection failed to ${peer}: ${error}", ( "peer", con->peer_name())("error",ec.message()) ); - con->connecting = false; - con->close(); - return false; - } else { - con->strand.post( [this, con]() { - con->start(); - start_read_message( con ); - }); - ++started_sessions; - return true; - // for now, we can just use the application main loop. - // con->readloop_complete = bf::async( [=](){ read_loop( con ); } ); - // con->writeloop_complete = bf::async( [=](){ write_loop con ); } ); - } -======= } ) ); ->>>>>>> Move socket ownership into connection. } void net_plugin_impl::start_listen_loop() { @@ -2360,24 +2144,9 @@ namespace eosio { // only called from strand thread void connection::start_read_message() { try { -<<<<<<< HEAD - connection_wptr weak_conn = conn; - -<<<<<<< HEAD -<<<<<<< HEAD - std::size_t minimum_read = - std::atomic_exchangeoutstanding_read_bytes.load())>( &conn->outstanding_read_bytes, 0 ); - minimum_read = minimum_read != 0 ? minimum_read : message_header_size; -======= - std::size_t minimum_read = conn->outstanding_read_bytes != 0 ? conn->outstanding_read_bytes.load() : message_header_size; ->>>>>>> Test of multi-threaded reading -======= -======= ->>>>>>> Work toward making sync_manager and handshake message thread safe. std::size_t minimum_read = std::atomic_exchange( &outstanding_read_bytes, 0 ); minimum_read = minimum_read != 0 ? minimum_read : message_header_size; ->>>>>>> Remove unneeded access to atomic if (my_impl->use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; @@ -2399,36 +2168,13 @@ namespace eosio { trx_in_progress_size > def_max_trx_in_progress_size ) { // too much queued up, reschedule -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> Make delay_timer thread safe - uint32_t write_queue_size = conn->buffer_queue.write_queue_size(); - uint32_t trx_in_progress_size = conn->trx_in_progress_size; - uint32_t reads_in_flight = conn->reads_in_flight; -======= uint32_t write_queue_size = buffer_queue.write_queue_size(); uint32_t trx_in_progress_size = this->trx_in_progress_size.load(); uint32_t reads_in_flight = this->reads_in_flight.load(); ->>>>>>> Work toward making sync_manager and handshake message thread safe. if( write_queue_size > def_max_write_queue_size ) { peer_wlog( this, "write_queue full ${s} bytes", ("s", write_queue_size) ); } else if( reads_in_flight > def_max_reads_in_flight ) { -<<<<<<< HEAD - peer_wlog( conn, "max reads in flight ${s}", ("s", reads_in_flight) ); -<<<<<<< HEAD -======= - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { - peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); - } else if( conn->reads_in_flight > def_max_reads_in_flight ) { - peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight.load()) ); ->>>>>>> Test of multi-threaded reading -======= ->>>>>>> Make delay_timer thread safe -======= peer_wlog( this, "max reads in flight ${s}", ("s", reads_in_flight) ); ->>>>>>> Work toward making sync_manager and handshake message thread safe. } else { peer_wlog( this, "max trx in progress ${s} bytes", ("s", trx_in_progress_size) ); } @@ -2436,28 +2182,9 @@ namespace eosio { reads_in_flight > 2*def_max_reads_in_flight || trx_in_progress_size > 2*def_max_trx_in_progress_size ) { -<<<<<<< HEAD - fc_wlog( logger, "queues over full, giving up on connection" ); -<<<<<<< HEAD - app().post( priority::medium, [weak_conn]() { -======= - app().post( priority::medium, [this, weak_conn]() { ->>>>>>> Make delay_timer thread safe - auto conn = weak_conn.lock(); - if( !conn ) return; - fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); - }); -======= fc_elog( logger, "queues over full, giving up on connection, closing connection to: ${p}", -<<<<<<< HEAD - ("p", conn->peer_name()) ); - conn->close(); ->>>>>>> Move socket ownership into connection. -======= ("p", peer_name()) ); close(); ->>>>>>> Work toward making sync_manager and handshake message thread safe. return; } std::lock_guard g( read_delay_timer_mtx ); @@ -2477,17 +2204,6 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this()]( boost::system::error_code ec, std::size_t bytes_transferred ) { --conn->reads_in_flight; -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= - conn->outstanding_read_bytes = 0; ->>>>>>> Test of multi-threaded reading -======= ->>>>>>> Remove unneeded access to atomic - bool close_connection = false; -======= ->>>>>>> Add protection for last_handshake_*, last_req, and socket close // may have closed connection and cleared pending_message_buffer if( !conn->socket_is_open() ) return; @@ -2560,40 +2276,8 @@ namespace eosio { } if( close_connection ) { -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= - connection_wptr weak_conn = conn; ->>>>>>> Remove descriptions of tasks as not merged into develop yet -======= ->>>>>>> Make delay_timer thread safe - app().post( priority::medium, [this, weak_conn]() { -======= - connection_wptr weak_conn = conn; -<<<<<<< HEAD - app().post( priority::medium, "close conn", [this, weak_conn]() { ->>>>>>> Test of multi-threaded reading -======= -======= ->>>>>>> Make delay_timer thread safe - app().post( priority::medium, [this, weak_conn]() { ->>>>>>> Remove descriptions of tasks as not merged into develop yet -======= - app().post( priority::medium, [weak_conn]() { ->>>>>>> Made all access to impl->connections thread safe - auto conn = weak_conn.lock(); - if( !conn ) return; - fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); - }); -======= fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); conn->close(); ->>>>>>> Move socket ownership into connection. } })); } catch (...) { @@ -3005,16 +2689,7 @@ namespace eosio { auto trace = result.get(); if (!trace->except) { fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); -<<<<<<< HEAD -<<<<<<< HEAD accepted = true; -======= - this->dispatcher->bcast_transaction(ptrx); - return; ->>>>>>> Add trx id to log message -======= - accepted = true; ->>>>>>> Move more of incoming transaction processing to thread pool } if( !accepted ) { @@ -3035,168 +2710,21 @@ namespace eosio { }); } -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= // called from application thread ->>>>>>> Work toward making sync_manager and handshake message thread safe. void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { controller& cc = chain_plug->chain(); block_id_type blk_id = msg->id(); uint32_t blk_num = msg->block_num(); -======= - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& m) { - signed_block_ptr msg = m; - controller& cc = chain_plug->chain(); - block_id_type blk_id = msg ? msg->id() : block_id_type(); - uint32_t blk_num = msg ? msg->block_num() : 0; ->>>>>>> Test of multi-threaded reading -======= - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller& cc = chain_plug->chain(); - block_id_type blk_id = msg->id(); - uint32_t blk_num = msg->block_num(); ->>>>>>> Revert unneeded changes to handle_message fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); try { -<<<<<<< HEAD -<<<<<<< HEAD if( cc.fetch_block_by_id(blk_id) ) { -<<<<<<< HEAD - sync_master->recv_block(c, blk_id, blk_num); - return; - } -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= -======= - if( msg && cc.fetch_block_by_id(blk_id)) { -======= - if( cc.fetch_block_by_id(blk_id) ) { -<<<<<<< HEAD ->>>>>>> Revert unneeded changes to handle_message - sync_master->recv_block(c, blk_id, blk_num); -======= - sync_master->sync_recv_block(c, blk_id, blk_num); ->>>>>>> Consolidate transaction tracking, reducing memory requirements and making thread safe. -======= c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { sync_master->sync_recv_block( c, blk_id, blk_num ); }); ->>>>>>> Work toward making sync_manager and handshake message thread safe. return; } -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> Test of multi-threaded reading -======= ->>>>>>> Test of multi-threaded reading - signed_block_ptr prev = msg ? cc.fetch_block_by_id( msg->previous ) : msg; - if( prev == nullptr ){ //&& sync_master->is_active(c) ) { - // see if top is ready - if( !sync_master->incoming_blocks.empty() ) { - prev = sync_master->incoming_blocks.top(); - auto prev_prev = cc.fetch_block_by_id( prev->previous ); - if( prev_prev != nullptr ) { - sync_master->incoming_blocks.pop(); - if(msg) sync_master->incoming_blocks.emplace( msg ); - msg = prev; - blk_id = msg->id(); - blk_num = msg->block_num(); - connection_wptr weak = c; -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - app().post(priority::medium, [this, weak](){ -======= - app().post(priority::medium, "re post blk", [this, weak](){ ->>>>>>> Test of multi-threaded reading -======= - app().post(priority::medium, [this, weak](){ ->>>>>>> Remove descriptions of tasks as not merged into develop yet -======= - app().post(priority::medium, "re post blk", [this, weak](){ ->>>>>>> Test of multi-threaded reading -======= - app().post(priority::medium, [this, weak](){ ->>>>>>> Remove descriptions of tasks as not merged into develop yet - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - }); - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - app().post( priority::medium, [this, weak]() { -======= - app().post( priority::medium, "re post blk", [this, weak]() { ->>>>>>> Test of multi-threaded reading -======= - app().post( priority::medium, [this, weak]() { ->>>>>>> Remove descriptions of tasks as not merged into develop yet -======= - app().post( priority::medium, "re post blk", [this, weak]() { ->>>>>>> Test of multi-threaded reading -======= - app().post( priority::medium, [this, weak]() { ->>>>>>> Remove descriptions of tasks as not merged into develop yet - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } else { - if( msg ) { - sync_master->incoming_blocks.emplace( msg ); - - connection_wptr weak = c; -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - app().post( priority::medium, [this, weak]() { -======= - app().post( priority::medium, "re post blk", [this, weak]() { ->>>>>>> Test of multi-threaded reading -======= - app().post( priority::medium, [this, weak]() { ->>>>>>> Remove descriptions of tasks as not merged into develop yet -======= - app().post( priority::medium, "re post blk", [this, weak]() { ->>>>>>> Test of multi-threaded reading -======= - app().post( priority::medium, [this, weak]() { ->>>>>>> Remove descriptions of tasks as not merged into develop yet - connection_ptr c = weak.lock(); - if( c ) handle_message( c, signed_block_ptr() ); - } ); - } - return; - } - } -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. -======= ->>>>>>> Test of multi-threaded reading -======= ->>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. -======= ->>>>>>> Test of multi-threaded reading -======= ->>>>>>> Use appbase with FIFO priority queue. priority queue in net_plugin no longer needed. } catch( ...) { // should this even be caught? fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); @@ -3314,31 +2842,10 @@ namespace eosio { uint32_t lib = 0; std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); dispatcher->expire_blocks( lib ); -<<<<<<< HEAD - for ( auto& c : connections ) { - auto &stale_txn = c->trx_state.get(); - stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); - auto &stale_txn_e = c->trx_state.get(); - stale_txn_e.erase(stale_txn_e.lower_bound(time_point_sec()), stale_txn_e.upper_bound(time_point::now())); - } -<<<<<<< HEAD -<<<<<<< HEAD - fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); -======= - fc_dlog( logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); ->>>>>>> force a build -======= -======= dispatcher->expire_txns( lib ); ->>>>>>> Consolidate transaction tracking, reducing memory requirements and making thread safe. fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); -<<<<<<< HEAD ->>>>>>> Move more of incoming transaction processing to thread pool -======= start_expire_timer(); ->>>>>>> Break expire into two steps } // called from any thread From aa79e4ae91cdc198b3f4fb457ba7989c9184facd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 15:39:55 -0500 Subject: [PATCH 0154/1648] Temporarily disable bnet tests and mongo tests to speed up testing. --- tests/CMakeLists.txt | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0eea67cbce3..97c73c270d6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,5 @@ + find_package( Gperftools QUIET ) if( GPERFTOOLS_FOUND ) message( STATUS "Found gperftools; compiling tests with TCMalloc") @@ -51,24 +52,29 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) + +# +# TODO: DO NOT MERGE, temp disable of some tests +# + +# add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +# set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) +# add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +# set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) if(BUILD_MONGO_DB_PLUGIN) - add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) - set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) +# add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +# set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) +# add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +# set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -85,8 +91,8 @@ add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${C # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) +# add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +# set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_remote_lr_test COMMAND tests/nodeos_run_remote_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -101,8 +107,8 @@ set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) +# add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +# set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) From abd93b105dc1a69a4d07d2af21b5c62c34f6a872 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 15:41:09 -0500 Subject: [PATCH 0155/1648] Add a send_handshakes since we can't be sure chain has not progressed --- plugins/net_plugin/net_plugin.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index eda58c0f794..79602f5a352 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1421,7 +1421,7 @@ namespace eosio { connection_ptr c = sync_source; g_sync.unlock(); c->strand.post( [c, start, end]() { - fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", ("n", c->peer_address())( "s", start )( "e", end ) ); + fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", ("n", c->peer_name())( "s", start )( "e", end ) ); c->request_sync_blocks( start, end ); } ); } @@ -1470,7 +1470,7 @@ namespace eosio { } fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", - ("cc", sync_last_requested_num)( "t", target )( "p", c->peer_address() ) ); + ("cc", sync_last_requested_num)( "t", target )( "p", c->peer_name() ) ); request_next_chunk( std::move( g_sync ), c ); } @@ -1479,7 +1479,7 @@ namespace eosio { void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { std::unique_lock g( sync_mtx ); fc_ilog( logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", - ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_address() ) ); + ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_name() ) ); if( c == sync_source ) { c->cancel_sync(reason); @@ -1686,6 +1686,7 @@ namespace eosio { g_sync.lock(); set_state( head_catchup ); g_sync.unlock(); + send_handshakes(); } else { send_handshakes(); } From 05231a2d9fbdb05a0686c5a3607910693eaf7302 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 16:14:19 -0500 Subject: [PATCH 0156/1648] Remove add of send_handshakes --- plugins/net_plugin/net_plugin.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 33eabe4bfcb..cceb95dc788 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1479,7 +1479,7 @@ namespace eosio { void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { std::unique_lock g( sync_mtx ); fc_ilog( logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", - ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_address() ) ); + ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_name() ) ); if( c == sync_source ) { c->cancel_sync(reason); @@ -1686,7 +1686,6 @@ namespace eosio { g_sync.lock(); set_state( head_catchup ); g_sync.unlock(); - send_handshakes(); } else { send_handshakes(); } From f6ee906457d417c05f2746e8834eb3d98bdeb589 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 19:01:36 -0500 Subject: [PATCH 0157/1648] Temp remove duplicate check --- plugins/net_plugin/net_plugin.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index cceb95dc788..b5b046e4734 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2406,6 +2406,7 @@ namespace eosio { std::unique_lock g_conn( c->conn_mtx ); if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { g_conn.unlock(); + /* todo temp fc_dlog(logger, "checking for duplicate" ); boost::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { @@ -2433,6 +2434,7 @@ namespace eosio { return; } } + */ } else { fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); From 2e7b27442fb93316c2bcdb93bbfb2e924285f2d4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Mar 2019 20:20:31 -0500 Subject: [PATCH 0158/1648] Break out of handle signed block if disconnected --- plugins/net_plugin/net_plugin.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b5b046e4734..add33b26d8e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2720,6 +2720,10 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); + // if we have closed connection then stop processing + if( !c->socket_is_open() ) + return; + try { if( cc.fetch_block_by_id(blk_id) ) { c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { From d288bd9d13fdec6f232f9d3cdee72c7be0357679 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 09:29:53 -0500 Subject: [PATCH 0159/1648] Add shutdown to connection close --- plugins/net_plugin/net_plugin.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index add33b26d8e..38561c6d4b2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -913,6 +913,8 @@ namespace eosio { void connection::_close( connection* self ) { self->socket_open = false; + boost::system::error_code ec; + self->socket.shutdown( tcp::socket::shutdown_both, ec ); self->socket.close(); self->flush_queues(); self->connecting = false; @@ -1699,7 +1701,7 @@ namespace eosio { request_next_chunk( std::move( g_sync) ); } else { g_sync.unlock(); - fc_dlog( logger, "calling sync_wait on connection ${p}", ("p", c->peer_address()) ); + fc_dlog( logger, "calling sync_wait on connection ${p}", ("p", c->peer_name()) ); c->sync_wait(); } } @@ -2113,8 +2115,8 @@ namespace eosio { } else { if( from_addr >= max_nodes_per_host ) { - fc_elog( logger, "Number of connections (${n}) from ${ra} exceeds limit", - ("n", from_addr + 1)( "ra", paddr_str ) ); + fc_elog( logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", + ("n", from_addr + 1)( "ra", paddr_str )("l", max_nodes_per_host) ); } else { fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count) ); } From d09ad42a01865ec22dfbc89dc7a9f240de35d304 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Sat, 30 Mar 2019 20:36:00 -0400 Subject: [PATCH 0160/1648] Buildkite: Temporarily disabled AWS2 steps because "Yum update failed" --- .buildkite/long_running_tests.yml | 78 ++++++++++---------- .buildkite/pipeline.yml | 118 +++++++++++++++--------------- 2 files changed, 98 insertions(+), 98 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 6383f57c392..489259a2f08 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -84,26 +84,26 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 + # - command: | + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build/ + # label: ":aws: 2 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 - command: | echo "+++ :hammer: Building" @@ -236,25 +236,25 @@ steps: workdir: /data/job timeout: 90 - - command: | # Amazon AWS-2 Linux Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: 2 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 90 + # - command: | # Amazon AWS-2 Linux Tests + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + # echo "+++ :microscope: Running LR Tests" + # ./scripts/long-running-test.sh + # label: ":aws: 2 LR Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 90 - command: | # Fedora Tests echo "--- :arrow_down: Downloading Build Directory" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 57ce31e5a6c..fa1f88fc3ca 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -84,26 +84,26 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 + # - command: | + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build/ + # label: ":aws: 2 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 - command: | echo "+++ :hammer: Building" @@ -321,45 +321,45 @@ steps: timeout: 60 # Amazon AWS-2 Linux Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: 2 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: 2 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":aws: 2 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 + + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":aws: 2 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 # Fedora Tests - command: | From 4e410eed7b4e14538ca70b70a89320476e108f58 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Sat, 30 Mar 2019 21:58:12 -0400 Subject: [PATCH 0161/1648] AWS2 image fixed by hand, revert commit d09ad42 --- .buildkite/long_running_tests.yml | 78 ++++++++++---------- .buildkite/pipeline.yml | 118 +++++++++++++++--------------- 2 files changed, 98 insertions(+), 98 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 489259a2f08..6383f57c392 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -84,26 +84,26 @@ steps: workdir: /data/job timeout: 60 - # - command: | - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build/ - # label: ":aws: 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 + - command: | + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build/ + label: ":aws: 2 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 - command: | echo "+++ :hammer: Building" @@ -236,25 +236,25 @@ steps: workdir: /data/job timeout: 90 - # - command: | # Amazon AWS-2 Linux Tests - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" - # echo "+++ :microscope: Running LR Tests" - # ./scripts/long-running-test.sh - # label: ":aws: 2 LR Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 90 + - command: | # Amazon AWS-2 Linux Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":aws: 2 LR Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 90 - command: | # Fedora Tests echo "--- :arrow_down: Downloading Build Directory" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index fa1f88fc3ca..57ce31e5a6c 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -84,26 +84,26 @@ steps: workdir: /data/job timeout: 60 - # - command: | - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build/ - # label: ":aws: 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 + - command: | + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build/ + label: ":aws: 2 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 - command: | echo "+++ :hammer: Building" @@ -321,45 +321,45 @@ steps: timeout: 60 # Amazon AWS-2 Linux Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":aws: 2 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":aws: 2 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":aws: 2 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":aws: 2 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 # Fedora Tests - command: | From 6491771813f6e823675d4970e7bc3d8b9d6e3bdd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:45:33 -0400 Subject: [PATCH 0162/1648] Do not reset pending_message_buffer in close since async_read might be in flight --- plugins/net_plugin/net_plugin.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 38561c6d4b2..8d42ee74fe8 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -938,8 +938,6 @@ namespace eosio { std::lock_guard g( self->read_delay_timer_mtx ); self->read_delay_timer.cancel(); - - self->pending_message_buffer.reset(); } void connection::blk_send_branch() { @@ -2059,6 +2057,7 @@ namespace eosio { auto current_endpoint = *endpoint_itr; ++endpoint_itr; connecting = true; + pending_message_buffer.reset(); socket.async_connect( current_endpoint, boost::asio::bind_executor( strand, [resolver, c = shared_from_this(), endpoint_itr]( const boost::system::error_code& err ) { if( !err && c->socket.is_open() ) { From 805a80ed3e2209f9a924c776c2ac35efb7b361f7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 17:38:54 -0400 Subject: [PATCH 0163/1648] Add back duplicate check --- plugins/net_plugin/net_plugin.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8d42ee74fe8..cc59cc5ec93 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2407,7 +2407,6 @@ namespace eosio { std::unique_lock g_conn( c->conn_mtx ); if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { g_conn.unlock(); - /* todo temp fc_dlog(logger, "checking for duplicate" ); boost::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { @@ -2435,7 +2434,6 @@ namespace eosio { return; } } - */ } else { fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); From 108a08b0d7d0af7529838e9d5c5e5aa75685ba1d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 18:25:53 -0400 Subject: [PATCH 0164/1648] Use std::shared_timed_mutex instead of boost::shared_mutex --- plugins/net_plugin/net_plugin.cpp | 39 +++++++++++++++---------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index cc59cc5ec93..df8bc94215d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -25,10 +25,9 @@ #include #include #include -#include -#include #include +#include using namespace eosio::chain::plugin_interface; @@ -228,7 +227,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; - mutable boost::shared_mutex connections_mtx; // switch to std::shared_mutex in C++17, also protects connection::last_req + mutable std::shared_timed_mutex connections_mtx; // switch to std::shared_mutex in C++17 std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection bool done = false; unique_ptr< sync_manager > sync_master; @@ -1361,7 +1360,7 @@ namespace eosio { if (conn && conn->current() ) { sync_source = conn; } else { - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); if (my_impl->connections.size() == 1) { if (!sync_source) { sync_source = *my_impl->connections.begin(); @@ -1430,7 +1429,7 @@ namespace eosio { // static, thread safe void sync_manager::send_handshakes() { - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& ci : my_impl->connections ) { if( ci->current() ) { ci->send_handshake(); @@ -1568,7 +1567,7 @@ namespace eosio { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { std::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { @@ -1663,7 +1662,7 @@ namespace eosio { block_id_type null_id; bool set_state_to_head_catchup = false; - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { std::unique_lock g_cp_conn( cp->conn_mtx ); uint32_t fork_head_num = cp->fork_head_num; @@ -1808,7 +1807,7 @@ namespace eosio { if( my_impl->sync_master->syncing_with_peer() ) return; bool have_connection = false; - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", @@ -1877,7 +1876,7 @@ namespace eosio { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1977,7 +1976,7 @@ namespace eosio { return; } g_c_conn.unlock(); - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& conn : my_impl->connections ) { if( conn == c ) continue; @@ -2093,7 +2092,7 @@ namespace eosio { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); } else { paddr_str = paddr_add.to_string(); - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& conn : connections ) { if( conn->socket_is_open() ) { if( conn->peer_address().empty() ) { @@ -2107,7 +2106,7 @@ namespace eosio { g.unlock(); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { if( new_connection->start_session() ) { - boost::unique_lock g_unique( connections_mtx ); + std::unique_lock g_unique( connections_mtx ); connections.insert( new_connection ); g_unique.unlock(); } @@ -2408,7 +2407,7 @@ namespace eosio { if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); - boost::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for(const auto& check : connections) { if(check == c) continue; @@ -2818,7 +2817,7 @@ namespace eosio { if( ec ) { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - boost::shared_lock g( connections_mtx ); + std::shared_lock g( connections_mtx ); for( auto& c : connections ) { if( c->socket_is_open() ) { c->strand.post( [c]() { @@ -2858,7 +2857,7 @@ namespace eosio { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); - boost::unique_lock g( connections_mtx ); + std::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { @@ -3262,7 +3261,7 @@ namespace eosio { my->done = true; { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); - boost::unique_lock g( my->connections_mtx ); + std::unique_lock g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close(); @@ -3290,14 +3289,14 @@ namespace eosio { fc_dlog( logger, "calling active connector" ); if( c->resolve_and_connect() ) { fc_dlog( logger, "adding new connection to the list" ); - boost::unique_lock g( my->connections_mtx ); + std::unique_lock g( my->connections_mtx ); my->connections.insert( c ); } return "added connection"; } string net_plugin::disconnect( const string& host ) { - boost::unique_lock g( my->connections_mtx ); + std::unique_lock g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); @@ -3318,7 +3317,7 @@ namespace eosio { vector net_plugin::connections()const { vector result; - boost::shared_lock g( my->connections_mtx ); + std::shared_lock g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); @@ -3326,7 +3325,7 @@ namespace eosio { return result; } connection_ptr net_plugin_impl::find_connection(const string& host )const { - boost::shared_lock g( connections_mtx ); + std::shared_lock g( connections_mtx ); for( const auto& c : connections ) if( c->peer_address() == host ) return c; return connection_ptr(); From f84d4f88b3c43bf6eed3bf182fa198b497339a42 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 09:52:52 -0400 Subject: [PATCH 0165/1648] Try to connect after close --- plugins/net_plugin/net_plugin.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index df8bc94215d..123af89d0f2 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -229,7 +229,6 @@ namespace eosio { mutable std::shared_timed_mutex connections_mtx; // switch to std::shared_mutex in C++17 std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection - bool done = false; unique_ptr< sync_manager > sync_master; unique_ptr< dispatch_manager > dispatcher; @@ -935,8 +934,12 @@ namespace eosio { fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx self->cancel_wait(); - std::lock_guard g( self->read_delay_timer_mtx ); + std::unique_lock g( self->read_delay_timer_mtx ); self->read_delay_timer.cancel(); + g.unlock(); + + // try to re-connect now + my_impl->connection_monitor( connection_wptr() ); } void connection::blk_send_branch() { @@ -3258,7 +3261,6 @@ namespace eosio { my->keepalive_timer->cancel(); } - my->done = true; { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); std::unique_lock g( my->connections_mtx ); From 6b1650a8f6c265e2f07116cfc67692ea0da6516a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 17:06:19 -0400 Subject: [PATCH 0166/1648] Fix infinite connection loop --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 123af89d0f2..f0805782ec5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -939,7 +939,7 @@ namespace eosio { g.unlock(); // try to re-connect now - my_impl->connection_monitor( connection_wptr() ); + my_impl->start_conn_timer( std::chrono::milliseconds( 1 ), connection_wptr()); } void connection::blk_send_branch() { From 276d3c9deeaa5a3df9ed38e6441bbc0c2596c67a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 18:35:40 -0400 Subject: [PATCH 0167/1648] Fix for infinite connection retry loop --- plugins/net_plugin/net_plugin.cpp | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f0805782ec5..86f05d6141d 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -317,7 +317,7 @@ namespace eosio { void start_monitors(); void expire(); - void connection_monitor(std::weak_ptr from_connection); + void connection_monitor(std::weak_ptr from_connection, bool reschedule); /** \name Peer Timestamps * Time message handling * @{ @@ -939,7 +939,7 @@ namespace eosio { g.unlock(); // try to re-connect now - my_impl->start_conn_timer( std::chrono::milliseconds( 1 ), connection_wptr()); + my_impl->connection_monitor( connection_wptr(), false ); } void connection::blk_send_branch() { @@ -2786,10 +2786,10 @@ namespace eosio { // called from any thread void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { std::lock_guard g( connector_check_timer_mtx ); - connector_check_timer->expires_from_now( du); + connector_check_timer->expires_from_now( du ); connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { - if( !ec) { - connection_monitor(from_connection); + if( !ec ) { + connection_monitor(from_connection, true); } else { fc_elog( logger, "Error from connection check monitor: ${m}",( "m", ec.message())); start_conn_timer( connector_period, std::weak_ptr()); @@ -2856,7 +2856,7 @@ namespace eosio { } // called from any thread - void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { + void net_plugin_impl::connection_monitor(std::weak_ptr from_connection, bool reschedule) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); @@ -2865,7 +2865,9 @@ namespace eosio { if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { if (fc::time_point::now() >= max_time) { - start_conn_timer(std::chrono::milliseconds(1), *it); // avoid exhausting + if( reschedule ) { + start_conn_timer( std::chrono::milliseconds( 1 ), *it ); // avoid exhausting + } return; } if( !(*it)->socket_is_open() && !(*it)->connecting) { @@ -2882,7 +2884,9 @@ namespace eosio { ++it; } g.unlock(); - start_conn_timer(connector_period, std::weak_ptr()); + if( reschedule ) { + start_conn_timer( connector_period, std::weak_ptr()); + } } // called from application thread From 3254df88997ac44b2422b44ce9714a42f4860d2a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 20:56:52 -0400 Subject: [PATCH 0168/1648] Pause for 5ms before trying to connect again --- plugins/net_plugin/net_plugin.cpp | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 86f05d6141d..e1d5aa28c7e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -234,6 +234,7 @@ namespace eosio { std::mutex connector_check_timer_mtx; unique_ptr connector_check_timer; + std::atomic connector_check_canceled{false}; std::mutex expire_timer_mtx; unique_ptr expire_timer; std::mutex keepalive_timer_mtx; @@ -312,12 +313,12 @@ namespace eosio { void handle_message(const connection_ptr& c, const packed_transaction& msg) = delete; // packed_transaction_ptr overload used instead void handle_message(const connection_ptr& c, const packed_transaction_ptr& msg); - void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); + void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection, bool cancel_previous); void start_expire_timer(); void start_monitors(); void expire(); - void connection_monitor(std::weak_ptr from_connection, bool reschedule); + void connection_monitor(std::weak_ptr from_connection); /** \name Peer Timestamps * Time message handling * @{ @@ -939,7 +940,7 @@ namespace eosio { g.unlock(); // try to re-connect now - my_impl->connection_monitor( connection_wptr(), false ); + my_impl->start_conn_timer( std::chrono::milliseconds( 5 ), connection_wptr(), true ); } void connection::blk_send_branch() { @@ -2784,15 +2785,18 @@ namespace eosio { } // called from any thread - void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { + void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection, bool cancel_previous) { std::lock_guard g( connector_check_timer_mtx ); + connector_check_canceled = cancel_previous; connector_check_timer->expires_from_now( du ); connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { if( !ec ) { - connection_monitor(from_connection, true); + connection_monitor(from_connection); } else { - fc_elog( logger, "Error from connection check monitor: ${m}",( "m", ec.message())); - start_conn_timer( connector_period, std::weak_ptr()); + if( connector_check_canceled.exchange( false ) ) { + fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); + start_conn_timer( connector_period, std::weak_ptr(), false ); + } } }); } @@ -2840,7 +2844,7 @@ namespace eosio { std::lock_guard g( expire_timer_mtx ); expire_timer.reset( new boost::asio::steady_timer( *server_ioc ) ); } - start_conn_timer(connector_period, std::weak_ptr()); + start_conn_timer(connector_period, std::weak_ptr(), false); start_expire_timer(); } @@ -2856,7 +2860,7 @@ namespace eosio { } // called from any thread - void net_plugin_impl::connection_monitor(std::weak_ptr from_connection, bool reschedule) { + void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); @@ -2865,9 +2869,7 @@ namespace eosio { if (it == connections.end()) it = connections.begin(); while (it != connections.end()) { if (fc::time_point::now() >= max_time) { - if( reschedule ) { - start_conn_timer( std::chrono::milliseconds( 1 ), *it ); // avoid exhausting - } + start_conn_timer( std::chrono::milliseconds( 1 ), *it, false ); // avoid exhausting return; } if( !(*it)->socket_is_open() && !(*it)->connecting) { @@ -2884,9 +2886,7 @@ namespace eosio { ++it; } g.unlock(); - if( reschedule ) { - start_conn_timer( connector_period, std::weak_ptr()); - } + start_conn_timer( connector_period, std::weak_ptr(), false ); } // called from application thread From 241acaa5fc50558d8ffa144192a284697ca2ed5f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 22:11:09 -0400 Subject: [PATCH 0169/1648] Correct bool check --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e1d5aa28c7e..4154a8a2b6b 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2793,7 +2793,7 @@ namespace eosio { if( !ec ) { connection_monitor(from_connection); } else { - if( connector_check_canceled.exchange( false ) ) { + if( !connector_check_canceled.exchange( false ) ) { fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); start_conn_timer( connector_period, std::weak_ptr(), false ); } From f939ead6edb43c31c98388a6814370aa0d4122cf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 10:15:53 -0400 Subject: [PATCH 0170/1648] Fix connection monitor --- plugins/net_plugin/net_plugin.cpp | 39 +++++++++++++++++++------------ 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4154a8a2b6b..8c132e3c0bc 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -234,7 +234,7 @@ namespace eosio { std::mutex connector_check_timer_mtx; unique_ptr connector_check_timer; - std::atomic connector_check_canceled{false}; + int connector_checks_in_flight{-1}; std::mutex expire_timer_mtx; unique_ptr expire_timer; std::mutex keepalive_timer_mtx; @@ -313,7 +313,7 @@ namespace eosio { void handle_message(const connection_ptr& c, const packed_transaction& msg) = delete; // packed_transaction_ptr overload used instead void handle_message(const connection_ptr& c, const packed_transaction_ptr& msg); - void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection, bool cancel_previous); + void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_expire_timer(); void start_monitors(); @@ -634,7 +634,7 @@ namespace eosio { bool current(); void close(); private: - static void _close( connection* self ); // for easy capture + static void _close( connection* self, bool reconnect ); // for easy capture public: bool resolve_and_connect(); @@ -906,11 +906,11 @@ namespace eosio { void connection::close() { strand.post( [self = shared_from_this()]() { - connection::_close( self.get() ); + connection::_close( self.get(), true ); }); } - void connection::_close( connection* self ) { + void connection::_close( connection* self, bool reconnect ) { self->socket_open = false; boost::system::error_code ec; self->socket.shutdown( tcp::socket::shutdown_both, ec ); @@ -939,8 +939,9 @@ namespace eosio { self->read_delay_timer.cancel(); g.unlock(); - // try to re-connect now - my_impl->start_conn_timer( std::chrono::milliseconds( 5 ), connection_wptr(), true ); + if( reconnect ) { + my_impl->start_conn_timer( std::chrono::milliseconds( 5 ), connection_wptr() ); + } } void connection::blk_send_branch() { @@ -2069,8 +2070,8 @@ namespace eosio { } } else { if( endpoint_itr != tcp::resolver::iterator() ) { - c->close(); // close posts to strand, so also post connect otherwise connect will happen before close c->strand.post( [resolver, c, endpoint_itr]() { + connection::_close( c.get(), false ); // close posts to strand, so also post connect otherwise connect can happen before close c->connect( resolver, endpoint_itr ); } ); } else { @@ -2785,17 +2786,20 @@ namespace eosio { } // called from any thread - void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection, bool cancel_previous) { + void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { std::lock_guard g( connector_check_timer_mtx ); - connector_check_canceled = cancel_previous; + ++connector_checks_in_flight; connector_check_timer->expires_from_now( du ); connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { + std::unique_lock g( connector_check_timer_mtx ); + int num_in_flight = --connector_checks_in_flight; + g.unlock(); if( !ec ) { connection_monitor(from_connection); } else { - if( !connector_check_canceled.exchange( false ) ) { + if( num_in_flight == 0 ) { fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); - start_conn_timer( connector_period, std::weak_ptr(), false ); + start_conn_timer( connector_period, std::weak_ptr() ); } } }); @@ -2844,7 +2848,7 @@ namespace eosio { std::lock_guard g( expire_timer_mtx ); expire_timer.reset( new boost::asio::steady_timer( *server_ioc ) ); } - start_conn_timer(connector_period, std::weak_ptr(), false); + start_conn_timer(connector_period, std::weak_ptr()); start_expire_timer(); } @@ -2867,9 +2871,13 @@ namespace eosio { std::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); + size_t num_rm = 0; while (it != connections.end()) { if (fc::time_point::now() >= max_time) { - start_conn_timer( std::chrono::milliseconds( 1 ), *it, false ); // avoid exhausting + connection_wptr wit = *it; + g.unlock(); + fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) ); + start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting return; } if( !(*it)->socket_is_open() && !(*it)->connecting) { @@ -2886,7 +2894,8 @@ namespace eosio { ++it; } g.unlock(); - start_conn_timer( connector_period, std::weak_ptr(), false ); + fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) ); + start_conn_timer( connector_period, std::weak_ptr() ); } // called from application thread From 2af05d70c413306a690cd1a2015fa095b944042a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 12:55:32 -0400 Subject: [PATCH 0171/1648] Fix for too many re-try connections --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8c132e3c0bc..fe0c6e97330 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -234,7 +234,7 @@ namespace eosio { std::mutex connector_check_timer_mtx; unique_ptr connector_check_timer; - int connector_checks_in_flight{-1}; + int connector_checks_in_flight{0}; std::mutex expire_timer_mtx; unique_ptr expire_timer; std::mutex keepalive_timer_mtx; From 93276047dbfe92a8cd783f7f0d6fc90520c827c5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 15:20:39 -0400 Subject: [PATCH 0172/1648] Fix connect race condition --- plugins/net_plugin/net_plugin.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index fe0c6e97330..49210bcd6d6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -886,6 +886,7 @@ namespace eosio { close(); return false; } else { + fc_dlog( logger, "connected to ${peer}", ("peer", peer_name()) ); socket_open = true; start_read_message(); return true; @@ -3297,14 +3298,14 @@ namespace eosio { * Used to trigger a new connection from RPC API */ string net_plugin::connect( const string& host ) { + std::unique_lock g( my->connections_mtx ); if( my->find_connection( host ) ) return "already connected"; connection_ptr c = std::make_shared( host ); - fc_dlog( logger, "calling active connector" ); + fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); if( c->resolve_and_connect() ) { - fc_dlog( logger, "adding new connection to the list" ); - std::unique_lock g( my->connections_mtx ); + fc_dlog( logger, "adding new connection to the list: ${c}", ("c", c->peer_name()) ); my->connections.insert( c ); } return "added connection"; @@ -3324,6 +3325,7 @@ namespace eosio { } optional net_plugin::status( const string& host )const { + std::shared_lock g( my->connections_mtx ); auto con = my->find_connection( host ); if( con ) return con->get_status(); @@ -3339,8 +3341,9 @@ namespace eosio { } return result; } - connection_ptr net_plugin_impl::find_connection(const string& host )const { - std::shared_lock g( connections_mtx ); + + // call with connections_mtx + connection_ptr net_plugin_impl::find_connection( const string& host )const { for( const auto& c : connections ) if( c->peer_address() == host ) return c; return connection_ptr(); From 1f3503312bd308c5925ab99bd64d1c6731d3252f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 18:03:05 -0400 Subject: [PATCH 0173/1648] Update log message to use peer_name --- plugins/net_plugin/net_plugin.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 49210bcd6d6..c425819ad4c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1021,13 +1021,13 @@ namespace eosio { std::unique_lock g_conn( c->conn_mtx ); handshake_initializer::populate( c->last_handshake_sent ); c->last_handshake_sent.generation = ++c->sent_handshake_count; - fc_dlog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}", - ("g", c->last_handshake_sent.generation)( "ep", c->peer_address() ) - ( "lib", c->last_handshake_sent.last_irreversible_block_num ) - ( "head", c->last_handshake_sent.head_num ) ); - auto cpy = c->last_handshake_sent; + auto last_handshake_sent = c->last_handshake_sent; g_conn.unlock(); - c->enqueue( cpy ); + fc_dlog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}", + ("g", last_handshake_sent.generation)( "ep", c->peer_name() ) + ( "lib", last_handshake_sent.last_irreversible_block_num ) + ( "head", last_handshake_sent.head_num ) ); + c->enqueue( last_handshake_sent ); }); } From d1ea5f59951074a520c326ddfa7ec3a01a300060 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 22:47:00 -0400 Subject: [PATCH 0174/1648] Send handshakes on go_away --- plugins/net_plugin/net_plugin.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c425819ad4c..d7a5fc9a537 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -157,10 +157,10 @@ namespace eosio { void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); void start_sync( const connection_ptr& c, uint32_t target ); void verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); - static void send_handshakes(); public: explicit sync_manager( uint32_t span ); + static void send_handshakes(); bool syncing_with_peer() const { return sync_state == lib_catchup; } void sync_reset_lib_num( const connection_ptr& conn ); void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); @@ -1353,7 +1353,7 @@ namespace eosio { if( fork_head_block_num < sync_last_requested_num && sync_source && sync_source->current() ) { fc_ilog( logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", - ("h", fork_head_block_num)( "r", sync_last_requested_num )( "p", sync_source->peer_address() ) ); + ("h", fork_head_block_num)( "r", sync_last_requested_num )( "p", sync_source->peer_name() ) ); return; } @@ -2413,7 +2413,7 @@ namespace eosio { if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g_cnts( my_impl->connections_mtx ); for(const auto& check : connections) { if(check == c) continue; @@ -2431,11 +2431,13 @@ namespace eosio { if (msg.time + c_time <= check_time) continue; + g_cnts.unlock(); fc_dlog( logger, "sending go_away duplicate to ${ep}", ("ep",msg.p2p_address) ); go_away_message gam(duplicate); gam.node_id = node_id; c->enqueue(gam); c->no_retry = duplicate; + sync_master->send_handshakes(); return; } } From 5a998094fb9db1a3efc26c2fb3b0615b3d57b7f0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 09:01:28 -0400 Subject: [PATCH 0175/1648] Fix spin on running connection monitor --- plugins/net_plugin/net_plugin.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d7a5fc9a537..2308b3e6539 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -318,7 +318,7 @@ namespace eosio { void start_monitors(); void expire(); - void connection_monitor(std::weak_ptr from_connection); + void connection_monitor(std::weak_ptr from_connection, bool reschedule); /** \name Peer Timestamps * Time message handling * @{ @@ -2798,7 +2798,7 @@ namespace eosio { int num_in_flight = --connector_checks_in_flight; g.unlock(); if( !ec ) { - connection_monitor(from_connection); + connection_monitor(from_connection, num_in_flight == 0 ); } else { if( num_in_flight == 0 ) { fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); @@ -2867,7 +2867,7 @@ namespace eosio { } // called from any thread - void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { + void net_plugin_impl::connection_monitor(std::weak_ptr from_connection, bool reschedule ) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); @@ -2880,7 +2880,9 @@ namespace eosio { connection_wptr wit = *it; g.unlock(); fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) ); - start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting + if( reschedule ) { + start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting + } return; } if( !(*it)->socket_is_open() && !(*it)->connecting) { @@ -2898,7 +2900,9 @@ namespace eosio { } g.unlock(); fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) ); - start_conn_timer( connector_period, std::weak_ptr() ); + if( reschedule ) { + start_conn_timer( connector_period, std::weak_ptr()); + } } // called from application thread From 8b5271e9e4b091a24236920b413f8856d1c04965 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 11:03:01 -0400 Subject: [PATCH 0176/1648] Fix spam of reconnect --- plugins/net_plugin/net_plugin.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2308b3e6539..b8995edca72 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -632,7 +632,7 @@ namespace eosio { bool connected(); bool current(); - void close(); + void close( bool reconnect = false ); private: static void _close( connection* self, bool reconnect ); // for easy capture public: @@ -905,9 +905,9 @@ namespace eosio { buffer_queue.clear_write_queue(); } - void connection::close() { - strand.post( [self = shared_from_this()]() { - connection::_close( self.get(), true ); + void connection::close( bool reconnect ) { + strand.post( [self = shared_from_this(), reconnect]() { + connection::_close( self.get(), reconnect ); }); } @@ -2078,7 +2078,7 @@ namespace eosio { } else { fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; - c->close(); + c->close( false ); } } } ) ); From 518a23d7b06f3e52d4ea416e45897206cd191c92 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 4 Apr 2019 15:23:45 -0400 Subject: [PATCH 0177/1648] Removed deprecated operating systems --- .buildkite/long_running_tests.yml | 118 +------------- .buildkite/pipeline.yml | 262 ++++-------------------------- 2 files changed, 33 insertions(+), 347 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index dd0d6cbee9d..c242d219b0e 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -21,7 +20,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -42,7 +41,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -63,28 +62,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -105,28 +83,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait - command: | # Ubuntu 16.04 Tests @@ -216,26 +159,6 @@ steps: workdir: /data/job timeout: 90 - - command: | # Amazon AWS-1 Linux Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: 1 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 90 - - command: | # Amazon AWS-2 Linux Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" @@ -256,37 +179,6 @@ steps: workdir: /data/job timeout: 90 - - command: | # Fedora Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":fedora: 27 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 90 - - - command: | # High Sierra Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: High Sierra LR Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 90 - - command: | # Mojave Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f83249df044..19bbdf114ff 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -21,7 +20,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -42,7 +41,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -63,28 +62,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -105,28 +83,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait # Ubuntu 16.04 Tests @@ -279,47 +222,6 @@ steps: workdir: /data/job timeout: 60 - # Amazon AWS-1 Linux Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: 1 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: 1 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - # Amazon AWS-2 Linux Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -361,71 +263,6 @@ steps: workdir: /data/job timeout: 60 - # Fedora Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":fedora: 27 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":fedora: 27 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - # High Sierra Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job - ./scripts/parallel-test.sh - label: ":darwin: High Sierra Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh - label: ":darwin: High Sierra NP Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - # Mojave Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -452,37 +289,7 @@ steps: - wait - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: High Sierra Package Builder" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | + - command: | # Ubuntu 16.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" tar -zxf build.tar.gz @@ -508,7 +315,7 @@ steps: PKGTYPE: "deb" timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" tar -zxf build.tar.gz @@ -534,9 +341,9 @@ steps: PKGTYPE: "deb" timeout: 60 - - command: | + - command: | # CentOS 7 Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" yum install -y rpm-build @@ -547,7 +354,7 @@ steps: mkdir -p /root/rpmbuild/SPECS mkdir -p /root/rpmbuild/SRPMS cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: 27 Package builder" + label: ":centos: 7 Package builder" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -560,57 +367,44 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job env: - OS: "fc27" + OS: "el7" PKGTYPE: "rpm" timeout: 60 - - command: | + - command: | # macOS Mojave Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":centos: 7 Package builder" + ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" agents: - queue: "automation-large-builder-fleet" + - "role=builder-v2-1" + - "os=mojave" artifact_paths: - - "build/packages/*.rpm" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" - workdir: /data/job - env: - OS: "el7" - PKGTYPE: "rpm" + - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - wait - command: | echo "--- :arrow_down: Downloading brew files" - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" timeout: 60 + + - command: | + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git submodule regression check" + agents: + queue: "automation-large-builder-fleet" + timeout: 240 From 4e6aef547f46ba6218e36d81476f7226ab25e7ff Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 18:44:40 -0400 Subject: [PATCH 0178/1648] Add expiration and block time to expired transaction exception log message --- plugins/producer_plugin/producer_plugin.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6f81308aa83..eaef3336a42 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -407,7 +407,10 @@ class producer_plugin_impl : public std::enable_shared_from_thisid; if( fc::time_point(trx->packed_trx->expiration()) < block_time ) { - send_response(std::static_pointer_cast(std::make_shared(FC_LOG_MESSAGE(error, "expired transaction ${id}", ("id", id)) ))); + send_response(std::static_pointer_cast( + std::make_shared( + FC_LOG_MESSAGE(error, "expired transaction ${id}, expiration ${e}, block time ${bt}", + ("id", id)("e", trx->packed_trx->expiration())("bt", block_time)) ))); return; } From b3905db26e9c7392773b4408be0c0d18518bc5aa Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 20:21:11 -0400 Subject: [PATCH 0179/1648] Do not broadcast blocks when blk branch sync --- plugins/net_plugin/net_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b8995edca72..6f9a319b4de 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -984,7 +984,8 @@ namespace eosio { enqueue_sync_block(); // still want to send transactions along during blk branch sync - syncing = false; + // todo: need diff variable for tranasaction sending + // todo: syncing = false; } void connection::blk_send( const block_id_type& blkid ) { From 95d08ca0e9c2a7445fd4dc703e9f11cc7ecad65a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 5 Apr 2019 19:53:30 -0400 Subject: [PATCH 0180/1648] Increase the timeout for nodeos_startup_catchup_lr_test from default of 1500 to 3000 seconds --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f2e6958eb15..d57f4d9d81b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -103,6 +103,7 @@ add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(nodeos_startup_catchup_lr_test PROPERTIES TIMEOUT 3000) set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From f3a38bb8cceef108e4bfde08c9a8893c7e3d2f48 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sun, 7 Apr 2019 00:00:42 -0500 Subject: [PATCH 0181/1648] Protect net_plugin_impl until net_plugin jobs all finished --- .../include/eosio/net_plugin/net_plugin.hpp | 2 +- plugins/net_plugin/net_plugin.cpp | 15 +-------------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 3bc594dd313..0fe35dd4277 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -39,7 +39,7 @@ namespace eosio { vector connections()const; private: - std::unique_ptr my; + std::shared_ptr my; }; } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d60fca3b5e7..2be31bf4caf 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3257,20 +3257,6 @@ namespace eosio { void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); - { - std::lock_guard g( my->connector_check_timer_mtx ); - if( my->connector_check_timer ) - my->connector_check_timer->cancel(); - }{ - std::lock_guard g( my->expire_timer_mtx ); - if( my->expire_timer ) - my->expire_timer->cancel(); - }{ - std::lock_guard g( my->keepalive_timer_mtx ); - if( my->keepalive_timer ) - my->keepalive_timer->cancel(); - } - { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); std::unique_lock g( my->connections_mtx ); @@ -3284,6 +3270,7 @@ namespace eosio { if( my->thread_pool ) { my->thread_pool->stop(); } + app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained fc_ilog( logger, "exit shutdown" ); } FC_CAPTURE_AND_RETHROW() From bc9bce6da3dcd4231a1e988985c46df5e751c4b0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 8 Apr 2019 08:43:36 -0500 Subject: [PATCH 0182/1648] nodeos can take a considerable amount of time to shutdown if busy --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 3e31c396d5f..abbb11e48b6 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1243,7 +1243,7 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self, timeout=15): + def interruptAndVerifyExitStatus(self, timeout=60): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) From 243f0b6f90ee94230abbc6e82a94a3d85f3d36f8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 8 Apr 2019 14:57:11 -0500 Subject: [PATCH 0183/1648] Safely shutdown timers --- plugins/net_plugin/net_plugin.cpp | 48 ++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2be31bf4caf..3e99bfee1be 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -203,7 +203,7 @@ namespace eosio { void expire_txns( uint32_t lib_num ); }; - class net_plugin_impl { + class net_plugin_impl : public std::enable_shared_from_this { public: unique_ptr acceptor; tcp::endpoint listen_endpoint; @@ -246,6 +246,7 @@ namespace eosio { boost::asio::steady_timer::duration resp_expected_period; boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; int max_cleanup_time_ms = 0; + std::atomic in_shutdown{false}; const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. @@ -2791,19 +2792,21 @@ namespace eosio { // called from any thread void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { + if( in_shutdown ) return; std::lock_guard g( connector_check_timer_mtx ); ++connector_checks_in_flight; connector_check_timer->expires_from_now( du ); - connector_check_timer->async_wait( [this, from_connection](boost::system::error_code ec) { - std::unique_lock g( connector_check_timer_mtx ); - int num_in_flight = --connector_checks_in_flight; + connector_check_timer->async_wait( [my = shared_from_this(), from_connection](boost::system::error_code ec) { + std::unique_lock g( my->connector_check_timer_mtx ); + int num_in_flight = --my->connector_checks_in_flight; g.unlock(); if( !ec ) { - connection_monitor(from_connection, num_in_flight == 0 ); + my->connection_monitor(from_connection, num_in_flight == 0 ); } else { if( num_in_flight == 0 ) { + if( my->in_shutdown ) return; fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); - start_conn_timer( connector_period, std::weak_ptr() ); + my->start_conn_timer( my->connector_period, std::weak_ptr() ); } } }); @@ -2811,29 +2814,33 @@ namespace eosio { // thread safe void net_plugin_impl::start_expire_timer() { + if( in_shutdown ) return; std::lock_guard g( expire_timer_mtx ); expire_timer->expires_from_now( txn_exp_period); - expire_timer->async_wait( [this]( boost::system::error_code ec ) { + expire_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { if( !ec ) { - expire(); + my->expire(); } else { + if( my->in_shutdown ) return; fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message()) ); - start_expire_timer(); + my->start_expire_timer(); } } ); } // thread safe void net_plugin_impl::ticker() { + if( in_shutdown ) return; std::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); - keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { - ticker(); + keepalive_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { + my->ticker(); if( ec ) { + if( my->in_shutdown ) return; fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - std::shared_lock g( connections_mtx ); - for( auto& c : connections ) { + std::shared_lock g( my->connections_mtx ); + for( auto& c : my->connections ) { if( c->socket_is_open() ) { c->strand.post( [c]() { c->send_time(); @@ -3257,6 +3264,21 @@ namespace eosio { void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); + my->in_shutdown = true; + { + std::lock_guard g( my->connector_check_timer_mtx ); + if( my->connector_check_timer ) + my->connector_check_timer->cancel(); + }{ + std::lock_guard g( my->expire_timer_mtx ); + if( my->expire_timer ) + my->expire_timer->cancel(); + }{ + std::lock_guard g( my->keepalive_timer_mtx ); + if( my->keepalive_timer ) + my->keepalive_timer->cancel(); + } + { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); std::unique_lock g( my->connections_mtx ); From af2fa989cecab29aad5e4897b64472ad686b57ef Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 9 Apr 2019 12:21:43 -0500 Subject: [PATCH 0184/1648] Don't always immediately try and reconnect. Do not close connection on first rejected block. --- plugins/net_plugin/net_plugin.cpp | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 3e99bfee1be..47663d458c5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -402,6 +402,7 @@ namespace eosio { constexpr boost::asio::chrono::milliseconds def_read_delay_for_full_write_queue{100}; constexpr auto def_max_reads_in_flight = 1000; constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB + constexpr auto def_max_consecutive_rejected_blocks = 3; // num of rejected blocks before disconnect constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; constexpr auto def_conn_retry_wait = 30; @@ -591,6 +592,7 @@ namespace eosio { std::atomic connecting{false}; std::atomic syncing{false}; uint16_t protocol_version = 0; + uint16_t consecutive_rejected_blocks = 0; private: const string peer_addr; string remote_endpoint_ip; // not updated after start @@ -633,7 +635,7 @@ namespace eosio { bool connected(); bool current(); - void close( bool reconnect = false ); + void close( bool reconnect = true ); private: static void _close( connection* self, bool reconnect ); // for easy capture public: @@ -920,7 +922,7 @@ namespace eosio { self->flush_queues(); self->connecting = false; self->syncing = false; - + self->consecutive_rejected_blocks = 0; std::unique_lock g_conn( self->conn_mtx ); bool has_last_req = !!self->last_req; g_conn.unlock(); @@ -1635,19 +1637,22 @@ namespace eosio { void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { std::unique_lock g( sync_mtx ); if( sync_state != in_sync ) { - fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_address()) ); - sync_last_requested_num = 0; - sync_source.reset(); - //todo: set_state( in_sync ); - g.unlock(); - c->close(); - send_handshakes(); + if( ++c->consecutive_rejected_blocks > def_max_consecutive_rejected_blocks ) { + fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn", blk_num)( "p", c->peer_name() ) ); + sync_last_requested_num = 0; + sync_source.reset(); + //todo: set_state( in_sync ); + g.unlock(); + c->close(); + send_handshakes(); + } } } // called from connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_name() ) ); + c->consecutive_rejected_blocks = 0; std::unique_lock g_sync( sync_mtx ); stages state = sync_state; fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); @@ -3284,7 +3289,7 @@ namespace eosio { std::unique_lock g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); - con->close(); + con->close( false ); } my->connections.clear(); } From 217ae1d5d27cb3d70815dd4a537c5d7dd8a15992 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 12:19:20 -0500 Subject: [PATCH 0185/1648] Update to fc with longer application lifetime --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index b6b55f5ff99..641a420d9c8 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit b6b55f5ff993f4be954d2aa556538636fbdaabb4 +Subproject commit 641a420d9c86984d132a1f31f7cd0022ecc370b3 From e2a3f8b5a434d800f0935c507cc321b42d984564 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 13:01:38 -0500 Subject: [PATCH 0186/1648] Temporarly disable gelf logging --- programs/eosio-launcher/main.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 7e6bfbaf7b3..08ab8c44fc2 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1196,12 +1196,12 @@ launcher_def::write_logging_config_file(tn_node_def &node) { auto log_config = fc::logging_config::default_config(); if(gelf_enabled) { - log_config.appenders.push_back( - fc::appender_config( "net", "gelf", - fc::mutable_variant_object() - ( "endpoint", node.gelf_endpoint ) - ( "host", instance.name ) - ) ); +// log_config.appenders.push_back( +// fc::appender_config( "net", "gelf", +// fc::mutable_variant_object() +// ( "endpoint", node.gelf_endpoint ) +// ( "host", instance.name ) +// ) ); log_config.loggers.front().appenders.push_back("net"); fc::logger_config p2p ("net_plugin_impl"); p2p.level=fc::log_level::debug; From c7805f9293ddf4ae4b3020c762d40af5ed08ec83 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 13:32:16 -0500 Subject: [PATCH 0187/1648] Temporarily disable gelf logging --- programs/eosio-launcher/main.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 08ab8c44fc2..ca704d73b48 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1195,13 +1195,14 @@ launcher_def::write_logging_config_file(tn_node_def &node) { } auto log_config = fc::logging_config::default_config(); + gelf_enabled = false; // todo remove this if(gelf_enabled) { -// log_config.appenders.push_back( -// fc::appender_config( "net", "gelf", -// fc::mutable_variant_object() -// ( "endpoint", node.gelf_endpoint ) -// ( "host", instance.name ) -// ) ); + log_config.appenders.push_back( + fc::appender_config( "net", "gelf", + fc::mutable_variant_object() + ( "endpoint", node.gelf_endpoint ) + ( "host", instance.name ) + ) ); log_config.loggers.front().appenders.push_back("net"); fc::logger_config p2p ("net_plugin_impl"); p2p.level=fc::log_level::debug; From a6b56d2c0d0beefe18c53253e2e0fbfb525b8445 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 15:20:40 -0500 Subject: [PATCH 0188/1648] Cleaner shutdown of thread pool --- libraries/chain/thread_utils.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp index 1d8a2707c14..83bf249326b 100644 --- a/libraries/chain/thread_utils.cpp +++ b/libraries/chain/thread_utils.cpp @@ -26,11 +26,9 @@ named_thread_pool::named_thread_pool( std::string name_prefix, size_t num_thread } named_thread_pool::~named_thread_pool() { - stop(); } void named_thread_pool::stop() { - _ioc_work.reset(); _ioc.stop(); _thread_pool.join(); _thread_pool.stop(); From 0d181ae368e2efe0126d4926db393bcfb39e0482 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 16:24:31 -0500 Subject: [PATCH 0189/1648] re-enable gelf logging --- programs/eosio-launcher/main.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index ca704d73b48..7e6bfbaf7b3 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1195,7 +1195,6 @@ launcher_def::write_logging_config_file(tn_node_def &node) { } auto log_config = fc::logging_config::default_config(); - gelf_enabled = false; // todo remove this if(gelf_enabled) { log_config.appenders.push_back( fc::appender_config( "net", "gelf", From 0b98f71d6f40088bb52c8393797a2885a93fe9b4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 20:54:51 -0500 Subject: [PATCH 0190/1648] Restore destructor call to stop and ioc_worker reset. Call stop() before join(). --- libraries/chain/thread_utils.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp index 83bf249326b..c17d8cde3e7 100644 --- a/libraries/chain/thread_utils.cpp +++ b/libraries/chain/thread_utils.cpp @@ -26,12 +26,14 @@ named_thread_pool::named_thread_pool( std::string name_prefix, size_t num_thread } named_thread_pool::~named_thread_pool() { + stop(); } void named_thread_pool::stop() { + _ioc_work.reset(); _ioc.stop(); - _thread_pool.join(); _thread_pool.stop(); + _thread_pool.join(); } From e93fff5938d959f99bef30da1ac6afb3983bf5d7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Apr 2019 20:55:28 -0500 Subject: [PATCH 0191/1648] Add close of acceptor and explicit reset of thread_pool --- plugins/net_plugin/net_plugin.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 47663d458c5..6b4579f498a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3284,6 +3284,11 @@ namespace eosio { my->keepalive_timer->cancel(); } + if( my->acceptor ) { + boost::system::error_code ec; + my->acceptor->close( ec ); + } + { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); std::unique_lock g( my->connections_mtx ); @@ -3296,6 +3301,7 @@ namespace eosio { if( my->thread_pool ) { my->thread_pool->stop(); + my->thread_pool.reset(); } app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained fc_ilog( logger, "exit shutdown" ); From fe9168764b0508dd735a13c2d86b79019ec6e301 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Apr 2019 07:16:02 -0500 Subject: [PATCH 0192/1648] Revert appbase to master --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 641a420d9c8..b6b55f5ff99 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 641a420d9c86984d132a1f31f7cd0022ecc370b3 +Subproject commit b6b55f5ff993f4be954d2aa556538636fbdaabb4 From 9386e813fd3ae44fe741b6c9face89c127753e76 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Apr 2019 09:42:46 -0500 Subject: [PATCH 0193/1648] Code cleanup. Indicate thread safety of net_plugin_impl attributes. --- plugins/net_plugin/net_plugin.cpp | 413 +++++++++++++++--------------- 1 file changed, 212 insertions(+), 201 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6b4579f498a..8b0a500c3ca 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -206,56 +206,70 @@ namespace eosio { class net_plugin_impl : public std::enable_shared_from_this { public: unique_ptr acceptor; - tcp::endpoint listen_endpoint; - string p2p_address; - string p2p_server_address; - uint32_t max_client_count = 0; - uint32_t max_nodes_per_host = 1; std::atomic current_connection_id{0}; - vector supplied_peers; - vector allowed_peers; ///< peer keys allowed to connect - std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes - // thread safe, only updated in plugin initialize + unique_ptr< sync_manager > sync_master; + unique_ptr< dispatch_manager > dispatcher; + + /** + * Thread safe, only updated in plugin initialize + * @{ + */ + string p2p_address; + string p2p_server_address; + vector supplied_peers; + vector allowed_peers; ///< peer keys allowed to connect + std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes enum possible_connections : char { None = 0, Producers = 1 << 0, Specified = 1 << 1, Any = 1 << 2 }; - possible_connections allowed_connections{None}; + possible_connections allowed_connections{None}; - connection_ptr find_connection(const string& host)const; + boost::asio::steady_timer::duration connector_period; + boost::asio::steady_timer::duration txn_exp_period; + boost::asio::steady_timer::duration resp_expected_period; + boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; - mutable std::shared_timed_mutex connections_mtx; // switch to std::shared_mutex in C++17 - std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection - unique_ptr< sync_manager > sync_master; - unique_ptr< dispatch_manager > dispatcher; + int max_cleanup_time_ms = 0; + uint32_t max_client_count = 0; + uint32_t max_nodes_per_host = 1; + + /// Peer clock may be no more than 1 second skewed from our clock, including network latency. + const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; + + bool network_version_match = false; + chain_id_type chain_id; + fc::sha256 node_id; + string user_agent_name; + + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; + chain_plugin* chain_plug = nullptr; + producer_plugin* producer_plug = nullptr; + bool use_socket_read_watermark = false; + /** @} */ - std::mutex connector_check_timer_mtx; + mutable std::shared_timed_mutex connections_mtx; // switch to std::shared_mutex in C++17 + std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection + + std::mutex connector_check_timer_mtx; unique_ptr connector_check_timer; int connector_checks_in_flight{0}; + std::mutex expire_timer_mtx; unique_ptr expire_timer; + std::mutex keepalive_timer_mtx; unique_ptr keepalive_timer; - boost::asio::steady_timer::duration connector_period; - boost::asio::steady_timer::duration txn_exp_period; - boost::asio::steady_timer::duration resp_expected_period; - boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; - int max_cleanup_time_ms = 0; - std::atomic in_shutdown{false}; - - const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. - bool network_version_match = false; - chain_id_type chain_id; // thread safe, only updated in plugin_initialize - fc::sha256 node_id; // thread safe, only updated in plugin initialize - string user_agent_name; // thread safe, only updated in plugin initialize + std::atomic in_shutdown{false}; - mutable std::mutex chain_info_mtx; + private: + mutable std::mutex chain_info_mtx; // protects chain_* uint32_t chain_lib_num{0}; uint32_t chain_head_blk_num{0}; uint32_t chain_fork_head_blk_num{0}; @@ -263,13 +277,7 @@ namespace eosio { block_id_type chain_head_blk_id; block_id_type chain_fork_head_blk_id; - eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; - - chain_plugin* chain_plug = nullptr; - producer_plugin* producer_plug = nullptr; - - bool use_socket_read_watermark = false; // thread safe, not modified outside plugin_initialize - + public: compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; @@ -286,34 +294,6 @@ namespace eosio { void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& blk ); - static bool is_valid( const handshake_message& msg ); - - void handle_message(const connection_ptr& c, const handshake_message& msg); - void handle_message(const connection_ptr& c, const chain_size_message& msg); - void handle_message(const connection_ptr& c, const go_away_message& msg ); - /** \name Peer Timestamps - * Time message handling - * @{ - */ - /** \brief Process time_message - * - * Calculate offset, delay and dispersion. Note carefully the - * implied processing. The first-order difference is done - * directly in 64-bit arithmetic, then the result is converted - * to floating double. All further processing is in - * floating-double arithmetic with rounding done by the hardware. - * This is necessary in order to avoid overflow and preserve precision. - */ - void handle_message(const connection_ptr& c, const time_message& msg); - /** @} */ - void handle_message(const connection_ptr& c, const notice_message& msg); - void handle_message(const connection_ptr& c, const request_message& msg); - void handle_message(const connection_ptr& c, const sync_request_message& msg); - void handle_message(const connection_ptr& c, const signed_block& msg) = delete; // signed_block_ptr overload used instead - void handle_message(const connection_ptr& c, const signed_block_ptr& msg); - void handle_message(const connection_ptr& c, const packed_transaction& msg) = delete; // packed_transaction_ptr overload used instead - void handle_message(const connection_ptr& c, const packed_transaction_ptr& msg); - void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_expire_timer(); void start_monitors(); @@ -352,7 +332,9 @@ namespace eosio { */ chain::signature_type sign_compact(const chain::public_key_type& signer, const fc::sha256& digest) const; - uint16_t to_protocol_version(uint16_t v); + constexpr uint16_t to_protocol_version(uint16_t v); + + connection_ptr find_connection(const string& host)const; // must call with held mutex }; const fc::string logger_name("net_plugin_impl"); @@ -672,7 +654,7 @@ namespace eosio { * packet is placed on the send queue. Calls the kernel time of * day routine and converts to a (at least) 64 bit integer. */ - tstamp get_time() + static tstamp get_time() { return std::chrono::system_clock::now().time_since_epoch().count(); } @@ -707,6 +689,34 @@ namespace eosio { bool to_sync_queue = false); void do_queue_write(int priority); + static bool is_valid( const handshake_message& msg ); + + void handle_message( const handshake_message& msg ); + void handle_message( const chain_size_message& msg ); + void handle_message( const go_away_message& msg ); + /** \name Peer Timestamps + * Time message handling + * @{ + */ + /** \brief Process time_message + * + * Calculate offset, delay and dispersion. Note carefully the + * implied processing. The first-order difference is done + * directly in 64-bit arithmetic, then the result is converted + * to floating double. All further processing is in + * floating-double arithmetic with rounding done by the hardware. + * This is necessary in order to avoid overflow and preserve precision. + */ + void handle_message( const time_message& msg ); + /** @} */ + void handle_message( const notice_message& msg ); + void handle_message( const request_message& msg ); + void handle_message( const sync_request_message& msg ); + void handle_message( const signed_block& msg ) = delete; // signed_block_ptr overload used instead + void handle_message( const signed_block_ptr& msg ); + void handle_message( const packed_transaction& msg ) = delete; // packed_transaction_ptr overload used instead + void handle_message( const packed_transaction_ptr& msg ); + fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { if (!_logger_variant) { @@ -728,9 +738,8 @@ namespace eosio { // called from connection strand struct msg_handler : public fc::visitor { - net_plugin_impl& impl; connection_ptr c; - msg_handler( net_plugin_impl& imp, const connection_ptr& conn) : impl(imp), c(conn) {} + explicit msg_handler( const connection_ptr& conn) : c(conn) {} void operator()( const signed_block& msg ) const { EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); @@ -748,9 +757,9 @@ namespace eosio { void operator()( signed_block&& msg ) const { shared_ptr ptr = std::make_shared( std::move( msg ) ); connection_wptr weak = c; - app().post(priority::high, [impl = &impl, ptr{std::move(ptr)}, weak{std::move(weak)}] { + app().post(priority::high, [ptr{std::move(ptr)}, weak{std::move(weak)}] { connection_ptr c = weak.lock(); - if( c ) impl->handle_message( c, ptr ); + if( c ) c->handle_message( ptr ); }); } @@ -758,49 +767,49 @@ namespace eosio { // continue call to handle_message on connection strand fc_dlog( logger, "handle packed_transaction" ); shared_ptr ptr = std::make_shared( std::move( msg ) ); - impl.handle_message( c, ptr ); + c->handle_message( ptr ); } void operator()( const handshake_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle handshake_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } void operator()( const chain_size_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle chain_size_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } void operator()( const go_away_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle go_away_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } void operator()( const time_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle time_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } void operator()( const notice_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle notice_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } void operator()( const request_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle request_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } void operator()( const sync_request_message& msg ) const { // continue call to handle_message on connection strand fc_dlog( logger, "handle sync_request_message" ); - impl.handle_message( c, msg ); + c->handle_message( msg ); } }; @@ -2325,7 +2334,7 @@ namespace eosio { auto ds = pending_message_buffer.create_datastream(); net_message msg; fc::raw::unpack( ds, msg ); - msg_handler m( *my_impl, shared_from_this() ); + msg_handler m( shared_from_this() ); if( msg.contains() ) { m( std::move( msg.get() ) ); } else if( msg.contains() ) { @@ -2365,7 +2374,7 @@ namespace eosio { chain_lib_id, chain_head_blk_id, chain_fork_head_blk_id ); } - bool net_plugin_impl::is_valid( const handshake_message& msg ) { + bool connection::is_valid( const handshake_message& msg ) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without // affecting state. @@ -2390,39 +2399,38 @@ namespace eosio { return valid; } - void net_plugin_impl::handle_message(const connection_ptr& c, const chain_size_message& msg) { - peer_ilog(c, "received chain_size_message"); + void connection::handle_message( const chain_size_message& msg ) { + peer_ilog(this, "received chain_size_message"); } - // called from connection strand - void net_plugin_impl::handle_message( const connection_ptr& c, const handshake_message& msg ) { - peer_ilog(c, "received handshake_message"); + void connection::handle_message( const handshake_message& msg ) { + peer_ilog( this, "received handshake_message" ); if( !is_valid( msg ) ) { - peer_elog( c, "bad handshake message"); - c->enqueue( go_away_message( fatal_other ) ); + peer_elog( this, "bad handshake message"); + enqueue( go_away_message( fatal_other ) ); return; } fc_dlog( logger, "received handshake gen ${g} from ${ep}, lib ${lib}, head ${head}", - ("g", msg.generation)( "ep", c->peer_name() ) + ("g", msg.generation)( "ep", peer_name() ) ( "lib", msg.last_irreversible_block_num )( "head", msg.head_num ) ); - if( c->connecting ) { - c->connecting = false; + if( connecting ) { + connecting = false; } if (msg.generation == 1) { if( msg.node_id == node_id) { fc_elog( logger, "Self connection detected. Closing connection" ); - c->enqueue( go_away_message( self ) ); + enqueue( go_away_message( self ) ); return; } - std::unique_lock g_conn( c->conn_mtx ); - if( c->peer_address().empty() || c->last_handshake_recv.node_id == fc::sha256()) { + std::unique_lock g_conn( conn_mtx ); + if( peer_address().empty() || last_handshake_recv.node_id == fc::sha256()) { g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); std::shared_lock g_cnts( my_impl->connections_mtx ); - for(const auto& check : connections) { - if(check == c) + for(const auto& check : my_impl->connections) { + if(check.get() == this) continue; if(check->connected() && check->peer_name() == msg.p2p_address) { // It's possible that both peers could arrive here at relatively the same time, so @@ -2433,7 +2441,7 @@ namespace eosio { auto check_time = check->last_handshake_sent.time + check->last_handshake_recv.time; g_check_conn.unlock(); g_conn.lock(); - auto c_time = c->last_handshake_sent.time; + auto c_time = last_handshake_sent.time; g_conn.unlock(); if (msg.time + c_time <= check_time) continue; @@ -2442,48 +2450,49 @@ namespace eosio { fc_dlog( logger, "sending go_away duplicate to ${ep}", ("ep",msg.p2p_address) ); go_away_message gam(duplicate); gam.node_id = node_id; - c->enqueue(gam); - c->no_retry = duplicate; - sync_master->send_handshakes(); + enqueue(gam); + no_retry = duplicate; + my_impl->sync_master->send_handshakes(); return; } } } else { fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", - ("pa", c->peer_address())( "ni", c->last_handshake_recv.node_id ) ); + ("pa", peer_address())( "ni", last_handshake_recv.node_id ) ); g_conn.unlock(); } - if( msg.chain_id != chain_id) { + if( msg.chain_id != my_impl->chain_id ) { fc_elog( logger, "Peer on a different chain. Closing connection" ); - c->enqueue( go_away_message(go_away_reason::wrong_chain) ); + enqueue( go_away_message(go_away_reason::wrong_chain) ); return; } - c->protocol_version = to_protocol_version(msg.network_version); - if(c->protocol_version != net_version) { - if (network_version_match) { + protocol_version = my_impl->to_protocol_version(msg.network_version); + if( protocol_version != net_version ) { + if( my_impl->network_version_match ) { fc_elog( logger, "Peer network version does not match expected ${nv} but got ${mnv}", - ("nv", net_version)("mnv", c->protocol_version) ); - c->enqueue(go_away_message(wrong_version)); + ("nv", net_version)("mnv", protocol_version) ); + enqueue( go_away_message( wrong_version ) ); return; } else { fc_ilog( logger, "Local network version: ${nv} Remote version: ${mnv}", - ("nv", net_version)("mnv", c->protocol_version)); + ("nv", net_version)("mnv", protocol_version)); } } - if( c->node_id != msg.node_id) { - c->node_id = msg.node_id; + if( node_id != msg.node_id ) { + node_id = msg.node_id; } - if(!authenticate_peer(msg)) { + if( !my_impl->authenticate_peer( msg ) ) { fc_elog( logger, "Peer not authenticated. Closing connection." ); - c->enqueue(go_away_message(authentication)); + enqueue( go_away_message( authentication ) ); return; } uint32_t peer_lib = msg.last_irreversible_block_num; - app().post( priority::low, [peer_lib, chain_plug = this->chain_plug, c, msg_lib_id = msg.last_irreversible_block_id]() { + app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, c = shared_from_this(), + msg_lib_id = msg.last_irreversible_block_id]() { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); @@ -2510,67 +2519,67 @@ namespace eosio { } }); - if (c->sent_handshake_count == 0) { - c->send_handshake(); + if( sent_handshake_count == 0 ) { + send_handshake(); } } - std::unique_lock g_conn( c->conn_mtx ); - c->last_handshake_recv = msg; + std::unique_lock g_conn( conn_mtx ); + last_handshake_recv = msg; g_conn.unlock(); - c->_logger_variant.reset(); - sync_master->recv_handshake( c, msg ); + _logger_variant.reset(); + my_impl->sync_master->recv_handshake( shared_from_this(), msg ); } - void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { - peer_wlog( c, "received go_away_message, reason = ${r}", ("r", reason_str( msg.reason )) ); - c->no_retry = msg.reason; + void connection::handle_message( const go_away_message& msg ) { + peer_wlog( this, "received go_away_message, reason = ${r}", ("r", reason_str( msg.reason )) ); + no_retry = msg.reason; if( msg.reason == duplicate ) { - c->node_id = msg.node_id; + node_id = msg.node_id; } - c->flush_queues(); - c->close(); + flush_queues(); + close(); } - void net_plugin_impl::handle_message(const connection_ptr& c, const time_message& msg) { - peer_ilog( c, "received time_message" ); + void connection::handle_message( const time_message& msg ) { + peer_ilog( this, "received time_message" ); /* We've already lost however many microseconds it took to dispatch * the message, but it can't be helped. */ - msg.dst = c->get_time(); + msg.dst = get_time(); // If the transmit timestamp is zero, the peer is horribly broken. if(msg.xmt == 0) return; /* invalid timestamp */ - if(msg.xmt == c->xmt) + if(msg.xmt == xmt) return; /* duplicate packet */ - c->xmt = msg.xmt; - c->rec = msg.rec; - c->dst = msg.dst; + xmt = msg.xmt; + rec = msg.rec; + dst = msg.dst; - if(msg.org == 0) - { - c->send_time(msg); - return; // We don't have enough data to perform the calculation yet. - } + if( msg.org == 0 ) { + send_time( msg ); + return; // We don't have enough data to perform the calculation yet. + } - c->offset = (double(c->rec - c->org) + double(msg.xmt - c->dst)) / 2; + offset = (double(rec - org) + double(msg.xmt - dst)) / 2; double NsecPerUsec{1000}; - if(logger.is_enabled(fc::log_level::all)) - logger.log(FC_LOG_MESSAGE(all, "Clock offset is ${o}ns (${us}us)", ("o", c->offset)("us", c->offset/NsecPerUsec))); - c->org = 0; - c->rec = 0; + if( logger.is_enabled( fc::log_level::all ) ) + logger.log( FC_LOG_MESSAGE( all, "Clock offset is ${o}ns (${us}us)", + ("o", offset)( "us", offset / NsecPerUsec ) ) ); + org = 0; + rec = 0; } - void net_plugin_impl::handle_message(const connection_ptr& c, const notice_message& msg) { + void connection::handle_message( const notice_message& msg ) { // peer tells us about one or more blocks or txns. When done syncing, forward on // notices of previously unknown blocks or txns, // - peer_ilog( c, "received notice_message" ); - c->connecting = false; + peer_ilog( this, "received notice_message" ); + connecting = false; if( msg.known_trx.mode != none ) { fc_dlog( logger, "this is a ${m} notice with ${n} transactions", ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); @@ -2579,8 +2588,8 @@ namespace eosio { case none: break; case last_irr_catch_up: { - std::unique_lock g_conn( c->conn_mtx ); - c->last_handshake_recv.head_num = msg.known_trx.pending; + std::unique_lock g_conn( conn_mtx ); + last_handshake_recv.head_num = msg.known_trx.pending; g_conn.unlock(); break; } @@ -2588,12 +2597,13 @@ namespace eosio { break; } case normal: { - dispatcher->recv_notice(c, msg, false); + my_impl->dispatcher->recv_notice( shared_from_this(), msg, false ); } } - if (msg.known_blocks.mode != none) { - fc_dlog(logger,"this is a ${m} notice with ${n} blocks", ("m",modes_str(msg.known_blocks.mode))("n",msg.known_blocks.pending)); + if( msg.known_blocks.mode != none ) { + fc_dlog( logger, "this is a ${m} notice with ${n} blocks", + ("m", modes_str( msg.known_blocks.mode ))( "n", msg.known_blocks.pending ) ); } switch (msg.known_blocks.mode) { case none : { @@ -2601,36 +2611,37 @@ namespace eosio { } case last_irr_catch_up: case catch_up: { - sync_master->sync_recv_notice(c,msg); + my_impl->sync_master->sync_recv_notice( shared_from_this(), msg ); break; } case normal : { - dispatcher->recv_notice(c, msg, false); + my_impl->dispatcher->recv_notice( shared_from_this(), msg, false ); break; } default: { - peer_elog(c, "bad notice_message : invalid known_blocks.mode ${m}",("m",static_cast(msg.known_blocks.mode))); + peer_elog( this, "bad notice_message : invalid known_blocks.mode ${m}", + ("m", static_cast(msg.known_blocks.mode)) ); } } } - void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { + void connection::handle_message( const request_message& msg ) { if( msg.req_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}, closing ${p}", - ("s", msg.req_blocks.ids.size())("p",c->peer_name()) ); - c->close(); + ("s", msg.req_blocks.ids.size())( "p", peer_name() ) ); + close(); return; } switch (msg.req_blocks.mode) { case catch_up : - peer_ilog(c, "received request_message:catch_up"); - c->blk_send_branch(); + peer_ilog( this, "received request_message:catch_up" ); + blk_send_branch(); break; case normal : - peer_ilog(c, "received request_message:normal"); + peer_ilog( this, "received request_message:normal" ); if( !msg.req_blocks.ids.empty() ) { - c->blk_send(msg.req_blocks.ids.back()); + blk_send( msg.req_blocks.ids.back() ); } break; default:; @@ -2641,29 +2652,29 @@ namespace eosio { case catch_up : break; case none : - if(msg.req_blocks.mode == none) - c->stop_send(); + if( msg.req_blocks.mode == none ) { + stop_send(); + } // no break case normal : if( !msg.req_trx.ids.empty() ) { fc_elog( logger, "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); - c->close(); + close(); return; } break; default:; } - } - void net_plugin_impl::handle_message( const connection_ptr& c, const sync_request_message& msg ) { + void connection::handle_message( const sync_request_message& msg ) { fc_dlog( logger, "peer requested ${start} to ${end}", ("start", msg.start_block)("end", msg.end_block) ); if( msg.end_block == 0 ) { - c->peer_requested.reset(); - c->flush_queues(); + peer_requested.reset(); + flush_queues(); } else { - c->peer_requested = sync_state( msg.start_block, msg.end_block, msg.start_block-1); - c->enqueue_sync_block(); + peer_requested = sync_state( msg.start_block, msg.end_block, msg.start_block-1); + enqueue_sync_block(); } } @@ -2674,27 +2685,27 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } - // called from connection strand - void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { - fc_dlog(logger, "got a packed transaction, cancel wait"); - if( db_read_mode == eosio::db_read_mode::READ_ONLY ) { - fc_dlog(logger, "got a txn in read-only mode - dropping"); + void connection::handle_message( const packed_transaction_ptr& trx ) { + fc_dlog( logger, "got a packed transaction, cancel wait" ); + if( my_impl->db_read_mode == eosio::db_read_mode::READ_ONLY ) { + fc_dlog( logger, "got a txn in read-only mode - dropping" ); return; } auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - peer_ilog(c, "received packed_transaction ${id}", ("id", tid)); + peer_ilog( this, "received packed_transaction ${id}", ("id", tid) ); - bool have_trx = dispatcher->have_txn( tid ); - dispatcher->recv_transaction(c, ptrx); + bool have_trx = my_impl->dispatcher->have_txn( tid ); + my_impl->dispatcher->recv_transaction( shared_from_this(), ptrx ); if( have_trx ) { fc_dlog( logger, "got a duplicate transaction - dropping ${id}", ("id", tid) ); return; } - c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); - chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { + trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); + my_impl->chain_plug->accept_transaction( ptrx, + [c = shared_from_this(), ptrx](const static_variant& result) { // next (this lambda) called from application thread c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); bool accepted = false; @@ -2712,7 +2723,7 @@ namespace eosio { } } - controller& cc = chain_plug->chain(); + controller& cc = my_impl->chain_plug->chain(); uint32_t head_blk_num = cc.head_block_num(); boost::asio::post( my_impl->thread_pool->get_executor(), [accepted, ptrx{std::move(ptrx)}, head_blk_num]() { @@ -2726,11 +2737,13 @@ namespace eosio { } // called from application thread - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller& cc = chain_plug->chain(); + void connection::handle_message( const signed_block_ptr& msg ) { + controller& cc = my_impl->chain_plug->chain(); block_id_type blk_id = msg->id(); uint32_t blk_num = msg->block_num(); - fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); + fc_dlog( logger, "canceling wait on ${p}", ("p", peer_name()) ); + // use c in this method instead of this to highlight that all methods called on c-> must be thread safe + connection_ptr c = shared_from_this(); c->cancel_wait(); // if we have closed connection then stop processing @@ -2739,7 +2752,7 @@ namespace eosio { try { if( cc.fetch_block_by_id(blk_id) ) { - c->strand.post( [sync_master = sync_master.get(), c, blk_id, blk_num]() { + c->strand.post( [sync_master = my_impl->sync_master.get(), c, blk_id, blk_num]() { sync_master->sync_recv_block( c, blk_id, blk_num ); }); return; @@ -2749,16 +2762,13 @@ namespace eosio { fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } -// c->strand.post( [dispatcher = dispatcher.get(), c, blk_id, blk_num]() { -// dispatcher->recv_block( c, blk_id, blk_num ); -// }); fc::microseconds age( fc::time_point::now() - msg->timestamp); - peer_ilog(c, "received signed_block : #${n} block age in secs = ${age}", - ("n",blk_num)("age",age.to_seconds())); + peer_ilog( c, "received signed_block : #${n} block age in secs = ${age}", + ("n", blk_num)( "age", age.to_seconds() ) ); go_away_reason reason = fatal_other; try { - chain_plug->accept_block(msg); + my_impl->chain_plug->accept_block(msg); reason = no_reason; } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); @@ -2780,15 +2790,15 @@ namespace eosio { } if( reason == no_reason ) { - boost::asio::post( my_impl->thread_pool->get_executor(), [self = this, msg]() { - self->dispatcher->update_txns_block_num( msg ); + boost::asio::post( my_impl->thread_pool->get_executor(), [dispatcher = my_impl->dispatcher.get(), msg]() { + dispatcher->update_txns_block_num( msg ); }); - c->strand.post( [sync_master = sync_master.get(), dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + c->strand.post( [sync_master = my_impl->sync_master.get(), dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { dispatcher->recv_block( c, blk_id, blk_num ); sync_master->sync_recv_block( c, blk_id, blk_num ); }); } else { - c->strand.post( [sync_master = sync_master.get(), dispatcher = dispatcher.get(), c, blk_id, blk_num]() { + c->strand.post( [sync_master = my_impl->sync_master.get(), dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { sync_master->rejected_block( c, blk_num ); dispatcher->rejected_block( blk_id ); }); @@ -3185,6 +3195,7 @@ namespace eosio { // currently thread_pool only used for server_ioc my->thread_pool.emplace( "net", my->thread_pool_size ); + tcp::endpoint listen_endpoint; if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); @@ -3192,14 +3203,14 @@ namespace eosio { // Note: need to add support for IPv6 too? tcp::resolver resolver( my->thread_pool->get_executor() ); - my->listen_endpoint = *resolver.resolve( query ); + listen_endpoint = *resolver.resolve( query ); my->acceptor.reset( new tcp::acceptor( my_impl->thread_pool->get_executor() ) ); if( !my->p2p_server_address.empty() ) { my->p2p_address = my->p2p_server_address; } else { - if( my->listen_endpoint.address().to_v4() == address_v4::any()) { + if( listen_endpoint.address().to_v4() == address_v4::any()) { boost::system::error_code ec; auto host = host_name( ec ); if( ec.value() != boost::system::errc::success ) { @@ -3221,13 +3232,13 @@ namespace eosio { my->ticker(); if( my->acceptor ) { - my->acceptor->open(my->listen_endpoint.protocol()); + my->acceptor->open(listen_endpoint.protocol()); my->acceptor->set_option(tcp::acceptor::reuse_address(true)); try { - my->acceptor->bind(my->listen_endpoint); + my->acceptor->bind(listen_endpoint); } catch (const std::exception& e) { fc_elog( logger, "net_plugin::plugin_startup failed to bind to port ${port}", - ("port", my->listen_endpoint.port())); + ("port", listen_endpoint.port())); throw e; } my->acceptor->listen(); @@ -3364,7 +3375,7 @@ namespace eosio { return connection_ptr(); } - uint16_t net_plugin_impl::to_protocol_version(uint16_t v) { + constexpr uint16_t net_plugin_impl::to_protocol_version(uint16_t v) { if (v >= net_version_base) { v -= net_version_base; return (v > net_version_range) ? 0 : v; From b11a6368eab6016e86b2b8b70d89b21bdf61824a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Apr 2019 11:18:04 -0500 Subject: [PATCH 0194/1648] Add explicit cancel of acceptor, reverting to original behavior --- plugins/net_plugin/net_plugin.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8b0a500c3ca..6e2bde31324 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3297,6 +3297,7 @@ namespace eosio { if( my->acceptor ) { boost::system::error_code ec; + my->acceptor->cancel( ec ); my->acceptor->close( ec ); } From 33a2e306d90ea01959f0ab1e8bbafd6ecc96daea Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Apr 2019 14:05:10 -0500 Subject: [PATCH 0195/1648] Fix corner case of no connections when trying to determine who to sync to --- plugins/net_plugin/net_plugin.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 6e2bde31324..52e96ce3057 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1380,7 +1380,9 @@ namespace eosio { sync_source = conn; } else { std::shared_lock g( my_impl->connections_mtx ); - if (my_impl->connections.size() == 1) { + if( my_impl->connections.size() == 0 ) { + sync_source.reset(); + } else if( my_impl->connections.size() == 1 ) { if (!sync_source) { sync_source = *my_impl->connections.begin(); } @@ -1406,16 +1408,18 @@ namespace eosio { } //scan the list of peers looking for another able to provide sync blocks. - auto cstart_it = cptr; - do { - //select the first one which is current and break out. - if( (*cptr)->current() ) { - sync_source = *cptr; - break; - } - if(++cptr == my_impl->connections.end()) + if( cptr != my_impl->connections.end() ) { + auto cstart_it = cptr; + do { + //select the first one which is current and break out. + if( (*cptr)->current() ) { + sync_source = *cptr; + break; + } + if( ++cptr == my_impl->connections.end() ) cptr = my_impl->connections.begin(); - } while(cptr != cstart_it); + } while( cptr != cstart_it ); + } // no need to check the result, either source advanced or the whole list was checked and the old source is reused. } } From 1415b61440de3b904f4577e287c2a80ff65d0262 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 07:36:13 -0500 Subject: [PATCH 0196/1648] Code cleanup --- plugins/net_plugin/net_plugin.cpp | 89 ++++++++++--------------------- 1 file changed, 28 insertions(+), 61 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 52e96ce3057..f9afeed611c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -268,6 +268,12 @@ namespace eosio { std::atomic in_shutdown{false}; + compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; + + uint16_t thread_pool_size = 4; + optional thread_pool; + private: mutable std::mutex chain_info_mtx; // protects chain_* uint32_t chain_lib_num{0}; @@ -278,12 +284,6 @@ namespace eosio { block_id_type chain_fork_head_blk_id; public: - compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; - - uint16_t thread_pool_size = 4; - optional thread_pool; - void update_chain_info(); // lib_num, head_block_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id std::tuple get_chain_info() const; @@ -535,34 +535,36 @@ namespace eosio { class connection : public std::enable_shared_from_this { public: explicit connection( string endpoint ); - connection(); - ~connection(); bool start_session(); bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic const string& peer_address() const { return peer_addr; } // thread safe, const - const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } // thread safe, not updated after start_session() + // thread safe, not updated after start_session() + const string& remote_address() const { return socket_open.load() ? remote_endpoint_ip : unknown; } private: static const string unknown; void update_endpoints(); - public: - optional peer_requested; // this peer is requesting info from us - boost::asio::io_context& server_ioc; - boost::asio::io_context::strand strand; - tcp::socket socket; // only accessed through strand after construction - private: + std::atomic socket_open{false}; + + const string peer_addr; + string remote_endpoint_ip; // not updated after start + string remote_endpoint_port; // not updated after start + string local_endpoint_ip; // not updated after start + string local_endpoint_port; // not updated after start + public: + boost::asio::io_context::strand strand; + tcp::socket socket; // only accessed through strand after construction fc::message_buffer<1024*1024> pending_message_buffer; - std::atomic outstanding_read_bytes{0}; // accessed only from server_ioc threads - + std::atomic outstanding_read_bytes{0}; // accessed only from strand threads queued_buffer buffer_queue; @@ -573,20 +575,15 @@ namespace eosio { int16_t sent_handshake_count = 0; std::atomic connecting{false}; std::atomic syncing{false}; - uint16_t protocol_version = 0; + uint16_t protocol_version = 0; uint16_t consecutive_rejected_blocks = 0; - private: - const string peer_addr; - string remote_endpoint_ip; // not updated after start - string remote_endpoint_port; // not updated after start - string local_endpoint_ip; // not updated after start - string local_endpoint_port; // not updated after start - public: std::mutex response_expected_timer_mtx; boost::asio::steady_timer response_expected_timer; + std::mutex read_delay_timer_mtx; boost::asio::steady_timer read_delay_timer; + std::atomic no_retry{no_reason}; mutable std::mutex conn_mtx; // mtx for last_req, last_handshake_recv, last_handshake_sent, fork_head, fork_head_num @@ -607,12 +604,6 @@ namespace eosio { tstamp rec{0}; //!< receive timestamp tstamp dst{0}; //!< destination timestamp tstamp xmt{0}; //!< transmit timestamp - - // Computed data - double offset{0}; //!< peer offset - - static const size_t ts_buffer_size{32}; - char ts[ts_buffer_size]; //!< working buffer for making human readable timestamps /** @} */ bool connected(); @@ -654,8 +645,7 @@ namespace eosio { * packet is placed on the send queue. Calls the kernel time of * day routine and converts to a (at least) 64 bit integer. */ - static tstamp get_time() - { + static tstamp get_time() { return std::chrono::system_clock::now().time_since_epoch().count(); } /** @} */ @@ -816,21 +806,12 @@ namespace eosio { //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : peer_requested(), - server_ioc( my_impl->thread_pool->get_executor() ), - strand( my_impl->thread_pool->get_executor() ), + : strand( my_impl->thread_pool->get_executor() ), socket( my_impl->thread_pool->get_executor() ), - node_id(), connection_id( ++my_impl->current_connection_id ), - sent_handshake_count(0), - connecting(false), - syncing(false), - protocol_version(0), - peer_addr(endpoint), + peer_addr( endpoint ), response_expected_timer( my_impl->thread_pool->get_executor() ), read_delay_timer( my_impl->thread_pool->get_executor() ), - no_retry(no_reason), - last_req(), last_handshake_recv(), last_handshake_sent() { @@ -839,21 +820,12 @@ namespace eosio { } connection::connection() - : peer_requested(), - server_ioc( my_impl->thread_pool->get_executor() ), - strand( my_impl->thread_pool->get_executor() ), + : strand( my_impl->thread_pool->get_executor() ), socket( my_impl->thread_pool->get_executor() ), - node_id(), connection_id( ++my_impl->current_connection_id ), - sent_handshake_count(0), - connecting(true), - syncing(false), - protocol_version(0), peer_addr(), response_expected_timer( my_impl->thread_pool->get_executor() ), read_delay_timer( my_impl->thread_pool->get_executor() ), - no_retry(no_reason), - last_req(), last_handshake_recv(), last_handshake_sent() { @@ -861,9 +833,6 @@ namespace eosio { node_id.data()[0] = 0; } - connection::~connection() { - } - void connection::update_endpoints() { boost::system::error_code ec; auto rep = socket.remote_endpoint(ec); @@ -1142,7 +1111,7 @@ namespace eosio { fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); } uint32_t num = ++peer_requested->last; - bool trigger_send = true; // todo: = num == peer_requested->start_block; + bool trigger_send = true; if(num == peer_requested->end_block) { peer_requested.reset(); } @@ -1654,7 +1623,6 @@ namespace eosio { fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn", blk_num)( "p", c->peer_name() ) ); sync_last_requested_num = 0; sync_source.reset(); - //todo: set_state( in_sync ); g.unlock(); c->close(); send_handshakes(); @@ -2568,7 +2536,7 @@ namespace eosio { return; // We don't have enough data to perform the calculation yet. } - offset = (double(rec - org) + double(msg.xmt - dst)) / 2; + double offset = (double(rec - org) + double(msg.xmt - dst)) / 2; double NsecPerUsec{1000}; if( logger.is_enabled( fc::log_level::all ) ) @@ -3196,7 +3164,6 @@ namespace eosio { my->producer_plug = app().find_plugin(); - // currently thread_pool only used for server_ioc my->thread_pool.emplace( "net", my->thread_pool_size ); tcp::endpoint listen_endpoint; From ade69babf35c188b4d003ef69b55b36bfa4a5156 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 07:47:55 -0500 Subject: [PATCH 0197/1648] Better handling syncing flag --- plugins/net_plugin/net_plugin.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f9afeed611c..5ad787ac402 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -806,10 +806,10 @@ namespace eosio { //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : strand( my_impl->thread_pool->get_executor() ), + : peer_addr( endpoint ), + strand( my_impl->thread_pool->get_executor() ), socket( my_impl->thread_pool->get_executor() ), connection_id( ++my_impl->current_connection_id ), - peer_addr( endpoint ), response_expected_timer( my_impl->thread_pool->get_executor() ), read_delay_timer( my_impl->thread_pool->get_executor() ), last_handshake_recv(), @@ -820,10 +820,10 @@ namespace eosio { } connection::connection() - : strand( my_impl->thread_pool->get_executor() ), + : peer_addr(), + strand( my_impl->thread_pool->get_executor() ), socket( my_impl->thread_pool->get_executor() ), connection_id( ++my_impl->current_connection_id ), - peer_addr(), response_expected_timer( my_impl->thread_pool->get_executor() ), read_delay_timer( my_impl->thread_pool->get_executor() ), last_handshake_recv(), @@ -963,10 +963,6 @@ namespace eosio { } fc_dlog( logger, "enqueue ${s} - ${e}", ("s", peer_requested->start_block)("e", peer_requested->end_block) ); enqueue_sync_block(); - - // still want to send transactions along during blk branch sync - // todo: need diff variable for tranasaction sending - // todo: syncing = false; } void connection::blk_send( const block_id_type& blkid ) { @@ -1106,6 +1102,7 @@ namespace eosio { bool connection::enqueue_sync_block() { if( !peer_requested ) { + syncing = false; return false; } else { fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); From 1881d00ee421a905dddf2e8d4aaaf21cf0af8b93 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 10:58:58 -0500 Subject: [PATCH 0198/1648] revert back to join before stop --- libraries/chain/thread_utils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp index c17d8cde3e7..1d8a2707c14 100644 --- a/libraries/chain/thread_utils.cpp +++ b/libraries/chain/thread_utils.cpp @@ -32,8 +32,8 @@ named_thread_pool::~named_thread_pool() { void named_thread_pool::stop() { _ioc_work.reset(); _ioc.stop(); - _thread_pool.stop(); _thread_pool.join(); + _thread_pool.stop(); } From 2772d230031dfaa1cb8571171600d3146ee5785e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 10:59:11 -0500 Subject: [PATCH 0199/1648] clean shutdown --- plugins/net_plugin/net_plugin.cpp | 33 +++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 5ad787ac402..a6aa0eab404 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -537,6 +537,8 @@ namespace eosio { explicit connection( string endpoint ); connection(); + ~connection() {} + bool start_session(); bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic @@ -966,9 +968,11 @@ namespace eosio { } void connection::blk_send( const block_id_type& blkid ) { - app().post( priority::low, [blkid, c = shared_from_this()]() { - controller& cc = my_impl->chain_plug->chain(); + app().post( priority::low, [blkid, weak = weak_from_this()]() { + connection_ptr c = weak.lock(); + if( !c ) return; try { + controller& cc = my_impl->chain_plug->chain(); signed_block_ptr b = cc.fetch_block_by_id( blkid ); if( b ) { fc_dlog( logger, "found block for id at num ${n}", ("n", b->block_num()) ); @@ -1112,7 +1116,9 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - app().post( priority::medium, [num, trigger_send, c = shared_from_this()]() { + app().post( priority::medium, [num, trigger_send, weak = weak_from_this()]() { + connection_ptr c = weak.lock(); + if( !c ) return; controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number( num ); if( sb ) { @@ -1918,7 +1924,10 @@ namespace eosio { req.req_blocks.mode = normal; // known_blocks.ids is never > 1 if( !msg.known_blocks.ids.empty() ) { - app().post( priority::low, [this, msg{std::move(msg)}, req{std::move(req)}, c]() mutable { + connection_wptr weak = c; + app().post( priority::low, [this, msg{std::move(msg)}, req{std::move(req)}, weak{std::move(weak)}]() mutable { + connection_ptr c = weak.lock(); + if( !c ) return; const block_id_type& blkid = msg.known_blocks.ids.back(); signed_block_ptr b; try { @@ -2460,8 +2469,10 @@ namespace eosio { } uint32_t peer_lib = msg.last_irreversible_block_num; - app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, c = shared_from_this(), + app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, weak = weak_from_this(), msg_lib_id = msg.last_irreversible_block_id]() { + connection_ptr c = weak.lock(); + if( !c ) return; controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); @@ -2674,12 +2685,15 @@ namespace eosio { trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); my_impl->chain_plug->accept_transaction( ptrx, - [c = shared_from_this(), ptrx](const static_variant& result) { + [weak = weak_from_this(), ptrx](const static_variant& result) { // next (this lambda) called from application thread - c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); + connection_ptr conn = weak.lock(); + if( conn ) { + conn->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); + } bool accepted = false; if (result.contains()) { - peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); + fc_dlog( logger, "bad packed_transaction : ${m}", ("m", result.get()->what()) ); } else { auto trace = result.get(); if (!trace->except) { @@ -2688,7 +2702,7 @@ namespace eosio { } if( !accepted ) { - peer_elog( c, "bad packed_transaction : ${m}", ("m", trace->except->what())); + fc_elog( logger, "bad packed_transaction : ${m}", ("m", trace->except->what())); } } @@ -3281,7 +3295,6 @@ namespace eosio { if( my->thread_pool ) { my->thread_pool->stop(); - my->thread_pool.reset(); } app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained fc_ilog( logger, "exit shutdown" ); From 1c74c08acabf24968249e2e1ffd1359368bac3ac Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 12:11:56 -0500 Subject: [PATCH 0200/1648] Some platforms do not have weak_from_this() --- plugins/net_plugin/net_plugin.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a6aa0eab404..385c9f274e6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -968,7 +968,8 @@ namespace eosio { } void connection::blk_send( const block_id_type& blkid ) { - app().post( priority::low, [blkid, weak = weak_from_this()]() { + connection_wptr weak = shared_from_this(); + app().post( priority::low, [blkid, weak{std::move(weak)}]() { connection_ptr c = weak.lock(); if( !c ) return; try { @@ -1116,7 +1117,8 @@ namespace eosio { if(num == peer_requested->end_block) { peer_requested.reset(); } - app().post( priority::medium, [num, trigger_send, weak = weak_from_this()]() { + connection_wptr weak = shared_from_this(); + app().post( priority::medium, [num, trigger_send, weak{std::move(weak)}]() { connection_ptr c = weak.lock(); if( !c ) return; controller& cc = my_impl->chain_plug->chain(); @@ -2469,7 +2471,8 @@ namespace eosio { } uint32_t peer_lib = msg.last_irreversible_block_num; - app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, weak = weak_from_this(), + connection_wptr weak = shared_from_this(); + app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, weak{std::move(weak)}, msg_lib_id = msg.last_irreversible_block_id]() { connection_ptr c = weak.lock(); if( !c ) return; @@ -2684,8 +2687,9 @@ namespace eosio { } trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); + connection_wptr weak = shared_from_this(); my_impl->chain_plug->accept_transaction( ptrx, - [weak = weak_from_this(), ptrx](const static_variant& result) { + [weak{std::move(weak)}, ptrx](const static_variant& result) { // next (this lambda) called from application thread connection_ptr conn = weak.lock(); if( conn ) { From 70362ec7511128ec6558bbe703747280ae5703f7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 13:06:22 -0500 Subject: [PATCH 0201/1648] Cut down on reconnect spam. Faster shutdown while syncing. --- plugins/net_plugin/net_plugin.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 385c9f274e6..97602a240fe 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -924,7 +924,7 @@ namespace eosio { g.unlock(); if( reconnect ) { - my_impl->start_conn_timer( std::chrono::milliseconds( 5 ), connection_wptr() ); + my_impl->start_conn_timer( std::chrono::milliseconds( 500 ), connection_wptr() ); } } @@ -1638,6 +1638,10 @@ namespace eosio { // called from connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_name() ) ); + if( app().is_quiting() ) { + c->close( false ); + return; + } c->consecutive_rejected_blocks = 0; std::unique_lock g_sync( sync_mtx ); stages state = sync_state; @@ -2211,6 +2215,7 @@ namespace eosio { if( !conn->socket_is_open() ) return; bool close_connection = false; + bool reconnect = true; try { if( !ec ) { if (bytes_transferred > conn->pending_message_buffer.bytes_to_write()) { @@ -2261,6 +2266,7 @@ namespace eosio { } else { fc_ilog( logger, "Peer closed connection" ); } + reconnect = false; close_connection = true; } } @@ -2275,11 +2281,12 @@ namespace eosio { catch (...) { fc_elog( logger, "Undefined exception handling read data" ); close_connection = true; + reconnect = false; } if( close_connection ) { fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close(); + conn->close( reconnect ); } })); } catch (...) { @@ -2756,6 +2763,7 @@ namespace eosio { go_away_reason reason = fatal_other; try { my_impl->chain_plug->accept_block(msg); + my_impl->update_chain_info(); reason = no_reason; } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); From 4426e8183f66668a50d7291f207ab6c4b66c5662 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 13:52:35 -0500 Subject: [PATCH 0202/1648] Reduce info level logging --- plugins/net_plugin/net_plugin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 97602a240fe..af7fb809545 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2684,7 +2684,7 @@ namespace eosio { auto ptrx = std::make_shared( trx ); const auto& tid = ptrx->id; - peer_ilog( this, "received packed_transaction ${id}", ("id", tid) ); + peer_dlog( this, "received packed_transaction ${id}", ("id", tid) ); bool have_trx = my_impl->dispatcher->have_txn( tid ); my_impl->dispatcher->recv_transaction( shared_from_this(), ptrx ); @@ -2941,13 +2941,13 @@ namespace eosio { void net_plugin_impl::transaction_ack(const std::pair& results) { const auto& id = results.second->id; if (results.first) { - fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); + fc_dlog( logger, "signaled NACK, trx-id = ${id} : ${why}", ("id", id)( "why", results.first->to_detail_string() ) ); controller& cc = chain_plug->chain(); uint32_t head_blk_num = cc.head_block_num(); dispatcher->rejected_transaction(id, head_blk_num); } else { - fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); + fc_dlog( logger, "signaled ACK, trx-id = ${id}", ("id", id) ); dispatcher->bcast_transaction(results.second); } } From f04318f3d885e58f9cf4e667c6f25bbc01d66f50 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 16:52:59 -0500 Subject: [PATCH 0203/1648] Cut down on reconnect spam --- plugins/net_plugin/net_plugin.cpp | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index af7fb809545..9adecd1e9a4 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -385,6 +385,7 @@ namespace eosio { constexpr auto def_max_reads_in_flight = 1000; constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB constexpr auto def_max_consecutive_rejected_blocks = 3; // num of rejected blocks before disconnect + constexpr auto def_max_consecutive_immediate_connection_close = 3; // back off if client keeps closing constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; constexpr auto def_conn_retry_wait = 30; @@ -579,6 +580,8 @@ namespace eosio { std::atomic syncing{false}; uint16_t protocol_version = 0; uint16_t consecutive_rejected_blocks = 0; + uint16_t consecutive_immediate_connection_close = 0; + fc::time_point last_close; std::mutex response_expected_timer_mtx; boost::asio::steady_timer response_expected_timer; @@ -903,6 +906,8 @@ namespace eosio { self->connecting = false; self->syncing = false; self->consecutive_rejected_blocks = 0; + ++self->consecutive_immediate_connection_close; + self->last_close = fc::time_point::now(); std::unique_lock g_conn( self->conn_mtx ); bool has_last_req = !!self->last_req; g_conn.unlock(); @@ -1648,11 +1653,13 @@ namespace eosio { fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); if( state == lib_catchup ) { if (blk_num != sync_next_expected_num) { - auto sync_next_expected = sync_next_expected_num; - g_sync.unlock(); - fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", - ("ne", sync_next_expected)( "bn", blk_num )( "p", c->peer_name() ) ); - c->close(); + if( ++c->consecutive_rejected_blocks > def_max_consecutive_rejected_blocks ) { + auto sync_next_expected = sync_next_expected_num; + g_sync.unlock(); + fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", + ("ne", sync_next_expected)( "bn", blk_num )( "p", c->peer_name() ) ); + c->close(); + } return; } sync_next_expected_num = blk_num + 1; @@ -2033,6 +2040,13 @@ namespace eosio { return false; } + if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close ) { + auto connector_period_us = std::chrono::duration_cast( my_impl->connector_period ); + if( last_close > fc::time_point::now() - fc::microseconds( connector_period_us.count() ) ) { + return true; // true so doesn't remove from valid connections + } + } + string host = peer_address().substr( 0, colon ); string port = peer_address().substr( colon + 1); idump((host)(port)); @@ -2049,6 +2063,7 @@ namespace eosio { c->connect( resolver, endpoint_itr ); } else { fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); + ++c->consecutive_immediate_connection_close; } } ) ); return true; @@ -2215,7 +2230,6 @@ namespace eosio { if( !conn->socket_is_open() ) return; bool close_connection = false; - bool reconnect = true; try { if( !ec ) { if (bytes_transferred > conn->pending_message_buffer.bytes_to_write()) { @@ -2244,6 +2258,7 @@ namespace eosio { if (bytes_in_buffer >= total_message_bytes) { conn->pending_message_buffer.advance_read_ptr(message_header_size); + conn->consecutive_immediate_connection_close = 0; if (!conn->process_next_message(message_length)) { return; } @@ -2266,7 +2281,6 @@ namespace eosio { } else { fc_ilog( logger, "Peer closed connection" ); } - reconnect = false; close_connection = true; } } @@ -2281,12 +2295,11 @@ namespace eosio { catch (...) { fc_elog( logger, "Undefined exception handling read data" ); close_connection = true; - reconnect = false; } if( close_connection ) { fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); - conn->close( reconnect ); + conn->close(); } })); } catch (...) { From 5b47b81accc76c39cc1d7aa946542c8809590b0d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 12 Apr 2019 21:14:05 -0500 Subject: [PATCH 0204/1648] Reconnect sooner. Do not cancel wait on transaction recv. --- plugins/net_plugin/net_plugin.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9adecd1e9a4..2094198dd3f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -385,7 +385,7 @@ namespace eosio { constexpr auto def_max_reads_in_flight = 1000; constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB constexpr auto def_max_consecutive_rejected_blocks = 3; // num of rejected blocks before disconnect - constexpr auto def_max_consecutive_immediate_connection_close = 3; // back off if client keeps closing + constexpr auto def_max_consecutive_immediate_connection_close = 9; // back off if client keeps closing constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; constexpr auto def_conn_retry_wait = 30; @@ -929,7 +929,7 @@ namespace eosio { g.unlock(); if( reconnect ) { - my_impl->start_conn_timer( std::chrono::milliseconds( 500 ), connection_wptr() ); + my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); } } @@ -1911,7 +1911,7 @@ namespace eosio { add_peer_txn( nts ); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); - c->cancel_wait(); + //todo c->cancel_wait(); } void dispatch_manager::rejected_transaction(const transaction_id_type& id, uint32_t head_blk_num) { From 59b1ea975bcf58360ff789544cbb9f6e55c252c1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 15 Apr 2019 15:26:08 -0500 Subject: [PATCH 0205/1648] Added print of lib when retrieved --- tests/Node.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Node.py b/tests/Node.py index abbb11e48b6..0d3d3caaaab 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1199,6 +1199,7 @@ def getIrreversibleBlockNum(self): if not self.enableMongo: info=self.getInfo(exitOnError=True) if info is not None: + Utils.Print("current lib: %d" % (info["last_irreversible_block_num"])) return info["last_irreversible_block_num"] else: # Either this implementation or the one in getHeadBlockNum are likely wrong. From 40c9e6870c989c338ee2fd1b0822c7dcbc01b766 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 15 Apr 2019 21:03:44 -0500 Subject: [PATCH 0206/1648] Fix transition from sync --- plugins/net_plugin/net_plugin.cpp | 35 +++++++++++++++++++------------ 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 2094198dd3f..f12dd353862 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -142,7 +142,7 @@ namespace eosio { in_sync }; - std::mutex sync_mtx; + mutable std::mutex sync_mtx; uint32_t sync_known_lib_num; uint32_t sync_last_requested_num; uint32_t sync_next_expected_num; @@ -164,6 +164,7 @@ namespace eosio { explicit sync_manager( uint32_t span ); static void send_handshakes(); bool syncing_with_peer() const { return sync_state == lib_catchup; } + bool block_while_syncing_with_other_peer( const connection_ptr& c ) const; void sync_reset_lib_num( const connection_ptr& conn ); void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); void rejected_block( const connection_ptr& c, uint32_t blk_num ); @@ -921,7 +922,7 @@ namespace eosio { self->last_handshake_sent = handshake_message(); g_conn.unlock(); my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); - fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx + fc_dlog( logger, "closed, canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx self->cancel_wait(); std::unique_lock g( self->read_delay_timer_mtx ); @@ -1112,7 +1113,6 @@ namespace eosio { bool connection::enqueue_sync_block() { if( !peer_requested ) { - syncing = false; return false; } else { fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); @@ -1319,6 +1319,14 @@ namespace eosio { sync_state = newstate; } + bool sync_manager::block_while_syncing_with_other_peer( const connection_ptr& c ) const { + if( syncing_with_peer() ) { + std::lock_guard g( sync_mtx ); + return c != sync_source; + } + return false; + } + void sync_manager::sync_reset_lib_num(const connection_ptr& c) { std::unique_lock g( sync_mtx ); if( sync_state == in_sync ) { @@ -1499,7 +1507,6 @@ namespace eosio { std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); sync_reset_lib_num(c); - c->syncing = false; //-------------------------------- // sync need checks; (lib == last irreversible block) @@ -1515,6 +1522,7 @@ namespace eosio { if (head_id == msg.head_id) { fc_dlog(logger, "sync check state 0"); + c->syncing = false; // notify peer of our pending transactions notice_message note; note.known_blocks.mode = none; @@ -1525,6 +1533,7 @@ namespace eosio { } if (head < peer_lib) { fc_dlog(logger, "sync check state 1"); + c->syncing = false; // wait for receipt of a notice message before initiating sync if (c->protocol_version < proto_explicit_sync) { start_sync( c, peer_lib ); @@ -1547,6 +1556,7 @@ namespace eosio { if (head < msg.head_num ) { fc_dlog(logger, "sync check state 3"); + c->syncing = false; verify_catchup(c, msg.head_num, msg.head_id); return; } @@ -1563,6 +1573,7 @@ namespace eosio { c->syncing = true; return; } + c->syncing = false; fc_elog( logger, "sync check failed to resolve status" ); } @@ -1850,7 +1861,7 @@ namespace eosio { return; } fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)( "p", cp->peer_name() ) ); - cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); + cp->enqueue_buffer( send_buffer, true, priority::medium, no_reason ); } }); } @@ -1909,9 +1920,6 @@ namespace eosio { void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_metadata_ptr& txn) { node_transaction_state nts = {txn->id, txn->packed_trx->expiration(), 0, c->connection_id}; add_peer_txn( nts ); - - fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); - //todo c->cancel_wait(); } void dispatch_manager::rejected_transaction(const transaction_id_type& id, uint32_t head_blk_num) { @@ -2419,7 +2427,7 @@ namespace eosio { } if (msg.generation == 1) { if( msg.node_id == node_id) { - fc_elog( logger, "Self connection detected. Closing connection" ); + fc_elog( logger, "Self connection detected node_id ${id}. Closing connection", ("id", node_id) ); enqueue( go_away_message( self ) ); return; } @@ -2689,7 +2697,6 @@ namespace eosio { } void connection::handle_message( const packed_transaction_ptr& trx ) { - fc_dlog( logger, "got a packed transaction, cancel wait" ); if( my_impl->db_read_mode == eosio::db_read_mode::READ_ONLY ) { fc_dlog( logger, "got a txn in read-only mode - dropping" ); return; @@ -2748,15 +2755,16 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); block_id_type blk_id = msg->id(); uint32_t blk_num = msg->block_num(); - fc_dlog( logger, "canceling wait on ${p}", ("p", peer_name()) ); // use c in this method instead of this to highlight that all methods called on c-> must be thread safe connection_ptr c = shared_from_this(); - c->cancel_wait(); // if we have closed connection then stop processing if( !c->socket_is_open() ) return; + if( my_impl->sync_master->block_while_syncing_with_other_peer(c) ) + return; + try { if( cc.fetch_block_by_id(blk_id) ) { c->strand.post( [sync_master = my_impl->sync_master.get(), c, blk_id, blk_num]() { @@ -3190,7 +3198,6 @@ namespace eosio { EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); my->chain_id = my->chain_plug->get_chain_id(); fc::rand_pseudo_bytes( my->node_id.data(), my->node_id.data_size()); - fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); } FC_LOG_AND_RETHROW() } @@ -3198,6 +3205,8 @@ namespace eosio { void net_plugin::plugin_startup() { handle_sighup(); + fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); + my->producer_plug = app().find_plugin(); my->thread_pool.emplace( "net", my->thread_pool_size ); From 2337fc53b219883231a02e76228f454ce16c8bc6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 16 Apr 2019 21:03:18 -0500 Subject: [PATCH 0207/1648] Optimize lib catchup to head catchup --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index f12dd353862..4b86cfc1ccb 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1475,7 +1475,7 @@ namespace eosio { if( sync_state == in_sync ) { set_state( lib_catchup ); - sync_next_expected_num = lib_num + 1; + sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num ); } fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", From 571165c1be1fdd3dd80aa6682691110940ecd4d3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 17 Apr 2019 09:07:06 -0500 Subject: [PATCH 0208/1648] Change default thread pool size to 2 --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4b86cfc1ccb..70b2b4fa198 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -272,7 +272,7 @@ namespace eosio { compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; - uint16_t thread_pool_size = 4; + uint16_t thread_pool_size = 2; optional thread_pool; private: From c6a7f14dffcf8ca2704748075063bab8eaa50b80 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 17 Apr 2019 11:56:31 -0500 Subject: [PATCH 0209/1648] Hook up to lib signal instead of channel to get notificaiton sooner --- plugins/net_plugin/net_plugin.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index ff374715587..8ac40792f25 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3263,15 +3263,16 @@ namespace eosio { } chain::controller&cc = my->chain_plug->chain(); { - cc.accepted_block.connect( boost::bind(&net_plugin_impl::on_accepted_block, my.get(), _1)); + cc.accepted_block.connect( [my = my]( const block_state_ptr& s ) { + my->on_accepted_block( s ); + } ); + cc.irreversible_block.connect( [my = my]( const block_state_ptr& s ) { + my->on_irreversible_block( s ); + } ); } my->incoming_transaction_ack_subscription = app().get_channel().subscribe( boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - my->incoming_irreversible_block_subscription = app().get_channel().subscribe( - [this]( const block_state_ptr& s ) { - my->on_irreversible_block( s ); - }); my->db_read_mode = cc.get_read_mode(); if( my->db_read_mode == chain::db_read_mode::READ_ONLY ) { From c7ceebb43a6364f7fb7951f93a194a0809fe980f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 18 Apr 2019 08:01:12 -0500 Subject: [PATCH 0210/1648] Fix shutdown of catchup node --- tests/nodeos_startup_catchup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 6d811fbce67..5934750d3a9 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -165,7 +165,7 @@ def waitForNodeStarted(node): catchupNode.interruptAndVerifyExitStatus(60) Print("Restart catchup node") - catchupNode.relaunch(catchupNodeNum) + catchupNode.relaunch(catchupNodeNum, cachePopen=True) waitForNodeStarted(catchupNode) lastCatchupLibNum=lib(catchupNode) From 130a74f33a0de6a067b8b9b0b0c637d04b84a8ae Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 19 Apr 2019 10:56:59 -0500 Subject: [PATCH 0211/1648] Test with logging disabled --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8ac40792f25..0747a1ce6b0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -338,7 +338,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; // must call with held mutex }; - const fc::string logger_name("net_plugin_impl"); + const fc::string logger_name("net_plugin_impl_todo_test"); fc::logger logger; std::string peer_log_format; From 8e3586c13cff6d00423b92b47950254b1c528634 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 19 Apr 2019 13:22:33 -0500 Subject: [PATCH 0212/1648] Restore logging --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0747a1ce6b0..8ac40792f25 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -338,7 +338,7 @@ namespace eosio { connection_ptr find_connection(const string& host)const; // must call with held mutex }; - const fc::string logger_name("net_plugin_impl_todo_test"); + const fc::string logger_name("net_plugin_impl"); fc::logger logger; std::string peer_log_format; From 3f2b658c185b35cdc0b0592a49ce0c200fe8ba11 Mon Sep 17 00:00:00 2001 From: UMU618 Date: Thu, 25 Apr 2019 06:02:37 +0000 Subject: [PATCH 0213/1648] fc::variant to BSON --- .../include/eosio/mongo_db_plugin/bson.hpp | 248 ++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp diff --git a/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp b/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp new file mode 100644 index 00000000000..37e21d332b4 --- /dev/null +++ b/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp @@ -0,0 +1,248 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace eosio { +void to_bson(const fc::variant_object& o, bsoncxx::builder::core& c); +void to_bson(const fc::variants& v, bsoncxx::builder::core& c); +void to_bson(const fc::variant& v, bsoncxx::builder::core& c); +bsoncxx::document::value to_bson(const fc::variant& v); + +void from_bson(const bsoncxx::document::view& view, fc::mutable_variant_object& o); +void from_bson(const bsoncxx::array::view& bson_array, fc::variants& a); +template void from_bson(const T& ele, fc::variant& v); +fc::variant from_bson(const bsoncxx::document::view& view); +} // namespace eosio + +namespace eosio { + +void to_bson(const fc::variant_object& o, bsoncxx::builder::core& c) +{ + auto itr = o.begin(); + while (itr != o.end()) { + c.key_owned(itr->key()); + to_bson(itr->value(), c); + ++itr; + } +} + +void to_bson(const fc::variants& a, bsoncxx::builder::core& c) +{ + auto itr = a.begin(); + while (itr != a.end()) { + to_bson(*itr, c); + ++itr; + } +} + +void to_bson(const fc::variant& v, bsoncxx::builder::core& c) +{ + switch (v.get_type()) { + case fc::variant::null_type: { + c.append(bsoncxx::types::b_null{}); + return; + } + case fc::variant::int64_type: + case fc::variant::uint64_type: { + c.append(v.as_int64()); + return; + } + case fc::variant::double_type: + c.append(v.as_double()); + return; + case fc::variant::bool_type: + c.append(v.as_bool()); + return; + case fc::variant::string_type: { + c.append(v.as_string()); + return; + } + case fc::variant::blob_type: { + bsoncxx::types::b_binary bin; + bin.sub_type = bsoncxx::binary_sub_type::k_binary; + bin.size = v.as_blob().data.size(); + bin.bytes = reinterpret_cast(&(*v.as_blob().data.begin())); + c.append(bin); + return; + } + case fc::variant::array_type: { + const fc::variants& a = v.get_array(); + bsoncxx::builder::core sub(true); + to_bson(a, sub); + c.append(sub.extract_array()); + return; + } + case fc::variant::object_type: { + const fc::variant_object& o = v.get_object(); + if (o.size() == 1) { + const auto value = o.begin()->value(); + if (o.begin()->key().compare("$oid") == 0) { + if (value.get_type() == fc::variant::string_type + && value.as_string().size() == 12 * 2) { + bsoncxx::oid oid(value.as_string()); + c.append(oid); + break; + } + } + else if (o.begin()->key().compare("$date") == 0) { + if (value.get_type() == fc::variant::int64_type) { + bsoncxx::types::b_date date(std::chrono::milliseconds(value.as_int64())); + c.append(date); + break; + } + else if (value.get_type() == fc::variant::object_type) { + const fc::variant_object& obj = value.get_object(); + if (obj.size() == 1) { + auto number = obj.begin(); + if (number->key().compare("$numberLong") == 0) { + bsoncxx::types::b_date date(std::chrono::milliseconds(number->value().as_int64())); + c.append(date); + break; + } + } + } + } + else if (o.begin()->key().compare("$timestamp") == 0) { + if (value.get_type() == fc::variant::object_type) { + const fc::variant_object& obj = value.get_object(); + if (obj.size() == 2) { + auto t = obj.begin(); + auto i = t; + ++i; + if (t->key().compare("t") == 0 && i->key().compare("i") == 0) { + bsoncxx::types::b_timestamp ts; + ts.timestamp = static_cast(t->value().as_uint64()); + ts.increment = static_cast(i->value().as_uint64()); + c.append(ts); + break; + } + } + } + } + } + bsoncxx::builder::core sub(false); + to_bson(o, sub); + c.append(sub.extract_document()); + return; + } + default: + FC_THROW_EXCEPTION( + fc::invalid_arg_exception, + "Unsupported fc::variant type: " + std::to_string(v.get_type())); + } +} + +bsoncxx::document::value to_bson(const fc::variant& v) +{ + bsoncxx::builder::core doc(false); + if (v.get_type() == fc::variant::object_type) { + const fc::variant_object& o = v.get_object(); + to_bson(o, doc); + } + else if (v.get_type() != fc::variant::null_type) { + FC_THROW_EXCEPTION( + fc::invalid_arg_exception, + "Unsupported root fc::variant type: " + std::to_string(v.get_type())); + } + return doc.extract_document(); +} + +void from_bson(const bsoncxx::document::view& view, fc::mutable_variant_object& o) +{ + for (bsoncxx::document::element ele : view) { + fc::variant v; + from_bson(ele, v); + o(ele.key().data(), v); + } +} + +void from_bson(const bsoncxx::array::view& bson_array, fc::variants& a) +{ + a.reserve(std::distance(bson_array.cbegin(), bson_array.cend())); + for (bsoncxx::array::element ele : bson_array) { + fc::variant v; + from_bson(ele, v); + a.push_back(v); + } +} + +template +void from_bson(const T& ele, fc::variant& v) +{ + switch (ele.type()) { + case bsoncxx::type::k_double: + v = ele.get_double().value; + return; + case bsoncxx::type::k_utf8: + v = bsoncxx::string::to_string(ele.get_utf8().value); + return; + case bsoncxx::type::k_document: { + fc::mutable_variant_object o; + from_bson(ele.get_document().value, o); + v = o; + return; + } + case bsoncxx::type::k_array: { + bsoncxx::array::view sub_array{ele.get_array().value}; + fc::variants a; + from_bson(sub_array, a); + v = a; + return; + } + case bsoncxx::type::k_binary: { + fc::blob blob; + blob.data.resize(ele.get_binary().size); + std::copy(ele.get_binary().bytes, ele.get_binary().bytes + ele.get_binary().size, blob.data.begin()); + v = blob; + return; + } + case bsoncxx::type::k_undefined: + case bsoncxx::type::k_null: + v = fc::variant(); + return; + case bsoncxx::type::k_oid: + v = fc::variant_object("$oid", ele.get_oid().value.to_string()); + return; + case bsoncxx::type::k_bool: + v = ele.get_bool().value; + return; + case bsoncxx::type::k_date: + v = fc::variant_object("$date", ele.get_date().to_int64()); + return; + case bsoncxx::type::k_int32: + v = ele.get_int32().value; + return; + case bsoncxx::type::k_timestamp: + v = fc::variant_object("$timestamp", fc::mutable_variant_object("t", ele.get_timestamp().timestamp)("i", ele.get_timestamp().increment)); + return; + case bsoncxx::type::k_int64: + v = ele.get_int64().value; + return; + default: + FC_THROW_EXCEPTION( + fc::invalid_arg_exception, + "Unsupported BSON type: " + bsoncxx::to_string(ele.type())); + } +} + +fc::variant from_bson(const bsoncxx::document::view& view) +{ + fc::mutable_variant_object o; + from_bson(view, o); + return o; +} + +} // namespace eosio + From f0014e2c46f02fba3949aabc0a567fc2ad663822 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 25 Apr 2019 07:20:01 +0000 Subject: [PATCH 0214/1648] convert fc::variant to BSON directly --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 127 +++++++------------- 1 file changed, 41 insertions(+), 86 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index e05f1a01815..865ad8809a8 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -3,6 +3,7 @@ * @copyright defined in eos/LICENSE */ #include +#include #include #include #include @@ -22,9 +23,6 @@ #include #include -#include -#include -#include #include #include @@ -615,7 +613,7 @@ optional mongo_db_plugin_impl::get_abi_serializer( account_name abi_def abi; if( view.find( "abi" ) != view.end()) { try { - abi = fc::json::from_string( bsoncxx::to_json( view["abi"].get_document())).as(); + abi = from_bson( view["abi"].get_document() ).as(); } catch (...) { ilog( "Unable to convert account abi to abi_def for ${n}", ( "n", n )); return optional(); @@ -763,42 +761,34 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti trans_doc.append( kvp( "trx_id", trx_id_str ) ); auto v = to_variant_with_abi( trx ); - string trx_json = fc::json::to_string( v ); try { - const auto& trx_value = bsoncxx::from_json( trx_json ); + const auto& trx_value = to_bson( v ); trans_doc.append( bsoncxx::builder::concatenate_doc{trx_value.view()} ); - } catch( bsoncxx::exception& ) { - try { - trx_json = fc::prune_invalid_utf8( trx_json ); - const auto& trx_value = bsoncxx::from_json( trx_json ); - trans_doc.append( bsoncxx::builder::concatenate_doc{trx_value.view()} ); - trans_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", trx_json) ); - } + } catch( bsoncxx::exception& e) { + elog( "Unable to convert transaction to BSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", fc::json::to_string( v )) ); } - string signing_keys_json; + fc::variant signing_keys; if( t->signing_keys_future.valid() ) { - signing_keys_json = fc::json::to_string( std::get<2>( t->signing_keys_future.get() ) ); + signing_keys = std::get<2>( t->signing_keys_future.get() ); } else { flat_set keys; trx.get_signature_keys( *chain_id, fc::time_point::maximum(), keys, false ); if( !keys.empty() ) { - signing_keys_json = fc::json::to_string( keys ); + signing_keys = keys; } } - if( !signing_keys_json.empty() ) { + if( signing_keys.get_type() == fc::variant::array_type && signing_keys.get_array().size() > 0) { try { - const auto& keys_value = bsoncxx::from_json( signing_keys_json ); - trans_doc.append( kvp( "signing_keys", keys_value ) ); + bsoncxx::builder::core keys_value(true); + to_bson( signing_keys.get_array(), keys_value ); + trans_doc.append( kvp( "signing_keys", keys_value.extract_array() ) ); } catch( bsoncxx::exception& e ) { - // should never fail, so don't attempt to remove invalid utf8 - elog( "Unable to convert signing keys JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", signing_keys_json) ); + elog( "Unable to convert signing keys to BSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", fc::json::to_string(signing_keys)) ); } } @@ -844,20 +834,11 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces action_traces_doc.append( kvp( "_id", make_custom_oid() ) ); auto v = to_variant_with_abi( atrace ); - string json = fc::json::to_string( v ); try { - const auto& value = bsoncxx::from_json( json ); - action_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - } catch( bsoncxx::exception& ) { - try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - action_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - action_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert action trace JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + action_traces_doc.append( bsoncxx::builder::concatenate_doc{to_bson( v )} ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert action trace to BSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", fc::json::to_string( v )) ); } if( t->receipt.valid() ) { action_traces_doc.append( kvp( "trx_status", std::string( t->receipt->status ) ) ); @@ -904,20 +885,11 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio if( store_transaction_traces && write_ttrace ) { try { auto v = to_variant_with_abi( *t ); - string json = fc::json::to_string( v ); try { - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - } catch( bsoncxx::exception& ) { - try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - trans_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + trans_traces_doc.append( bsoncxx::builder::concatenate_doc{to_bson( v )} ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert transaction to BSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", fc::json::to_string( v )) ); } trans_traces_doc.append( kvp( "createdAt", b_date{now} ) ); @@ -926,7 +898,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id) ); } } catch( ... ) { - handle_mongo_exception( "trans_traces insert: " + json, __LINE__ ); + handle_mongo_exception( "trans_traces insert: " + fc::json::to_string( v ), __LINE__ ); } } catch( ... ) { handle_mongo_exception( "trans_traces serialization: " + t->id.str(), __LINE__ ); @@ -973,20 +945,11 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr const chain::block_header_state& bhs = *bs; - auto json = fc::json::to_string( bhs ); try { - const auto& value = bsoncxx::from_json( json ); - block_state_doc.append( kvp( "block_header_state", value ) ); - } catch( bsoncxx::exception& ) { - try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - block_state_doc.append( kvp( "block_header_state", value ) ); - block_state_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert block_header_state JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + block_state_doc.append( kvp( "block_header_state", to_bson( fc::variant(bhs) ) ) ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert block_header_state to BSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", fc::json::to_string( bhs )) ); } block_state_doc.append( kvp( "createdAt", b_date{now} ) ); @@ -1003,7 +966,7 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr } } } catch( ... ) { - handle_mongo_exception( "block_states insert: " + json, __LINE__ ); + handle_mongo_exception( "block_states insert: " + fc::json::to_string( bhs ), __LINE__ ); } } @@ -1013,20 +976,11 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr kvp( "block_id", block_id_str ) ); auto v = to_variant_with_abi( *bs->block ); - auto json = fc::json::to_string( v ); try { - const auto& value = bsoncxx::from_json( json ); - block_doc.append( kvp( "block", value ) ); - } catch( bsoncxx::exception& ) { - try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - block_doc.append( kvp( "block", value ) ); - block_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert block JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + block_doc.append( kvp( "block", to_bson( v ) ) ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert block to BSON: ${e}", ("e", e.what()) ); + elog( " JSON: ${j}", ("j", fc::json::to_string( v )) ); } block_doc.append( kvp( "createdAt", b_date{now} ) ); @@ -1043,7 +997,7 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr } } } catch( ... ) { - handle_mongo_exception( "blocks insert: " + json, __LINE__ ); + handle_mongo_exception( "blocks insert: " + fc::json::to_string( v ), __LINE__ ); } } } @@ -1321,11 +1275,11 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) } if( account ) { abi_def abi_def = fc::raw::unpack( setabi.abi ); - const string json_str = fc::json::to_string( abi_def ); + auto v = fc::variant( abi_def ); - try{ + try { auto update_from = make_document( - kvp( "$set", make_document( kvp( "abi", bsoncxx::from_json( json_str )), + kvp( "$set", make_document( kvp( "abi", to_bson( v )), kvp( "updatedAt", b_date{now} )))); try { @@ -1337,8 +1291,8 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) handle_mongo_exception( "account update", __LINE__ ); } } catch( bsoncxx::exception& e ) { - elog( "Unable to convert abi JSON to MongoDB JSON: ${e}", ("e", e.what())); - elog( " JSON: ${j}", ("j", json_str)); + elog( "Unable to convert abi JSON to BSON: ${e}", ("e", e.what())); + elog( " JSON: ${j}", ("j", fc::json::to_string( v ))); } } } @@ -1439,7 +1393,7 @@ void mongo_db_plugin_impl::init() { auto& mongo_conn = *client; auto accounts = mongo_conn[db_name][accounts_col]; - if( accounts.count( make_document()) == 0 ) { + if( accounts.estimated_document_count() == 0 ) { auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); @@ -1725,3 +1679,4 @@ void mongo_db_plugin::plugin_shutdown() } } // namespace eosio + From 806338ee903414750aae2773d5ae5d015824e278 Mon Sep 17 00:00:00 2001 From: UMU618 Date: Fri, 26 Apr 2019 17:20:52 +0800 Subject: [PATCH 0215/1648] * fix normalizeJsonObject --- tests/Node.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Node.py b/tests/Node.py index fc519a41569..0dc753c7de2 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -170,6 +170,7 @@ def normalizeJsonObject(extJStr): tmpStr=re.sub(r'ObjectId\("(\w+)"\)', r'"ObjectId-\1"', tmpStr) tmpStr=re.sub(r'ISODate\("([\w|\-|\:|\.]+)"\)', r'"ISODate-\1"', tmpStr) tmpStr=re.sub(r'NumberLong\("(\w+)"\)', r'"NumberLong-\1"', tmpStr) + tmpStr=re.sub(r'NumberLong\((\w+)\)', r'\1', tmpStr) return tmpStr @staticmethod From e2b77b11524f2ba910214c5dd0237d7a9bb3189e Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sun, 28 Apr 2019 04:20:43 -0400 Subject: [PATCH 0216/1648] embedding eos-vm --- libraries/CMakeLists.txt | 3 ++ libraries/chain/CMakeLists.txt | 4 +- .../eosio/chain/webassembly/eos-vm.hpp | 32 ++++++++++++ libraries/chain/webassembly/eos-vm.cpp | 51 +++++++++++++++++++ 4 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp create mode 100644 libraries/chain/webassembly/eos-vm.cpp diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 54bb2f80e09..292e7021f46 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -14,6 +14,9 @@ set(RUN_RE2C OFF CACHE BOOL "Run re2c") set(WITH_EXCEPTIONS ON CACHE BOOL "Build with exceptions enabled" FORCE) add_subdirectory( wabt ) +set(USE_EXISTING_SOFTFLOAT 1) +add_subdirectory( eos-vm ) + set(ENABLE_STATIC ON) set(CMAKE_MACOSX_RPATH OFF) set(BUILD_ONLY_LIB ON CACHE BOOL "Library only build") diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 254d462c5ed..2b424855263 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -39,6 +39,7 @@ add_library( eosio_chain webassembly/wavm.cpp webassembly/wabt.cpp + webassembly/eos-vm.cpp # get_config.cpp # @@ -56,11 +57,12 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain fc chainbase Logging IR WAST WASM Runtime - softfloat builtins wabt + softfloat builtins wabt eos-vm ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" + "${CMAKE_CURRENT_SOURCE_DIR}/libraries/eos-vm/include" "${CMAKE_SOURCE_DIR}/libraries/wabt" "${CMAKE_BINARY_DIR}/libraries/wabt" ) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp new file mode 100644 index 00000000000..c60eac758a0 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include +#include +#include +#include +#include + +//eos-vm includes +#include + +namespace eosio { namespace chain { namespace webassembly { namespace eos_vm_runtime { + +using namespace fc; +using namespace eosio::wasm_backend; +using namespace eosio::chain::webassembly::common; + +class eos_vm_runtime : public eosio::chain::wasm_runtime_interface { + public: + eos_vm_runtime(); + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector) override; + + void immediately_exit_currently_running_module() override; + + private: + backend* _bkend; // non owning pointer to allow for immediate exit +}; + +#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ + eosio::wasm_backend::registered_host_functions::add<&CLS::METHOD, eosio::wasm_backend::backend>(#MOD, #NAME); + +} } } }// eosio::chain::webassembly::wabt_runtime diff --git a/libraries/chain/webassembly/eos-vm.cpp b/libraries/chain/webassembly/eos-vm.cpp new file mode 100644 index 00000000000..04882cb6fb3 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm.cpp @@ -0,0 +1,51 @@ +#include +#include +#include + +//eos-vm includes +#include + +namespace eosio { namespace chain { namespace webassembly { namespace eos_vm_runtime { + +using namespace eosio::wasm_backend; + +namespace wasm_constraints = eosio::chain::wasm_constraints; + +using backend_t = backend>; + +class eos_vm_instantiated_module : public wasm_instantiated_module_interface { + public: + + eos_vm_instantiated_module(std::unique_ptr mod) : + _instantiated_module(std::move(mod)) {} + + void apply(apply_context& context) override { + _instantiated_module->set_wasm_allocator( context.get_wasm_allocator() ); + if (!(const auto& res = _instantiated_module->run_start())) + EOS_ASSERT(false, wasm_execution_error, "eos-vm start function failure (${s})", ("s", res.to_string())); + + if (!(const auto& res = _instantiated_module(context.get_registered_host_functions(), "env", "apply", + (uint64_t)context.get_receiver(), + (uint64_t)context.get_action().account(), + (uint64_t)context.get_action().name)) + EOS_ASSERT(false, wasm_execution_error, "eos-vm execution failure (${s})", ("s", res.to_string())); + } + + private: + std::unique_ptr _instantiated_module; +}; + +eos_vm_runtime::eos_vm_runtime() {} + +std::unique_ptr eos_vm_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector) { + std::vector cb((uint8_t*)code_bytes, (uint8_t*)code_bytes+code_size); + std::unique_ptr bkend = std::make_unique( cb ); + _bkend = bkend.get(); + return std::make_unique(std::move(bkend)); +} + +void eos_vm_runtime::immediately_exit_currently_running_module() { + _bkend->immediate_exit(); +} + +}}}} From 75b9ca6f22d77f6be70903862b13642475ad47a5 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 29 Apr 2019 01:19:13 -0400 Subject: [PATCH 0217/1648] changes needed for new backend --- .../include/eosio/chain/wasm_interface.hpp | 9 +- .../eosio/chain/wasm_interface_private.hpp | 17 +++- .../eosio/chain/webassembly/common.hpp | 3 + .../eosio/chain/webassembly/eos-vm.hpp | 51 +++++++++- libraries/chain/wasm_interface.cpp | 96 ++++++++++--------- libraries/chain/webassembly/eos-vm.cpp | 31 +++--- .../testing/include/eosio/testing/tester.hpp | 2 + plugins/chain_plugin/chain_plugin.cpp | 2 +- 8 files changed, 139 insertions(+), 72 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 341331989e5..7c2bfa9eb75 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include "Runtime/Linker.h" #include "Runtime/Runtime.h" @@ -73,7 +74,8 @@ namespace eosio { namespace chain { public: enum class vm_type { wavm, - wabt + wabt, + eos_vm }; wasm_interface(vm_type vm, const chainbase::database& db); @@ -84,6 +86,9 @@ namespace eosio { namespace chain { //validates code -- does a WASM validation pass and checks the wasm against EOSIO specific constraints static void validate(const controller& control, const bytes& code); + + //get the wasm_allocator used for the linear memory for wasm + static wasm_backend::wasm_allocator* get_wasm_allocator(); //indicate that a particular code probably won't be used after given block_num void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num); @@ -108,4 +113,4 @@ namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); }} -FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(wabt) ) +FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(wabt)(eos_vm) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 45f70460b02..cf8df0afa9e 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -16,10 +17,14 @@ #include "WAST/WAST.h" #include "IR/Validate.h" +#include + using namespace fc; using namespace eosio::chain::webassembly; +using namespace eosio::wasm_backend; using namespace IR; using namespace Runtime; + using boost::multi_index_container; namespace eosio { namespace chain { @@ -42,6 +47,8 @@ namespace eosio { namespace chain { runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::wabt) runtime_interface = std::make_unique(); + else if(vm == wasm_interface::vm_type::eos_vm) + runtime_interface = std::make_unique(); else EOS_THROW(wasm_exception, "wasm_interface_impl fall through"); } @@ -54,6 +61,11 @@ namespace eosio { namespace chain { }); } + static wasm_allocator* get_wasm_allocator() { + thread_local wasm_allocator walloc; + return &walloc; + } + std::vector parse_initial_memory(const Module& module) { std::vector mem_image; @@ -167,8 +179,9 @@ namespace eosio { namespace chain { }; #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) + _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #define _REGISTER_INTRINSIC4(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG ) diff --git a/libraries/chain/include/eosio/chain/webassembly/common.hpp b/libraries/chain/include/eosio/chain/webassembly/common.hpp index 723bd3039b6..542d3c5b37e 100644 --- a/libraries/chain/include/eosio/chain/webassembly/common.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/common.hpp @@ -58,6 +58,8 @@ namespace eosio { namespace chain { */ template struct array_ptr { + using type = T; + array_ptr() = default; explicit array_ptr (T * value) : value(value) {} typename std::add_lvalue_reference::type operator*() const { @@ -80,6 +82,7 @@ namespace eosio { namespace chain { * class to represent an in-wasm-memory char array that must be null terminated */ struct null_terminated_ptr { + null_terminated_ptr() = default; explicit null_terminated_ptr(char* value) : value(value) {} typename std::add_lvalue_reference::type operator*() const { diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp index c60eac758a0..fbd2b996b2d 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp @@ -9,6 +9,45 @@ //eos-vm includes #include +// eosio specific specializations +namespace eosio { namespace wasm_backend { + template <> + struct reduce_type { + typedef uint64_t type; + }; + + template + constexpr auto get_value(Backend& backend, T&& val) -> std::enable_if_t && + std::is_same_v>, S> { + return {(uint64_t)val.data.ui}; + } + // we can clean these up if we go with custom vms + template + struct reduce_type> { + typedef uint32_t type; + }; + + template + constexpr auto get_value(Backend& backend, T&& val) -> std::enable_if_t && + std::is_same_v< eosio::chain::array_ptr, S> && + !std::is_lvalue_reference_v && !std::is_pointer_v, S> { + return eosio::chain::array_ptr((typename S::type*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui)); + } + + template <> + struct reduce_type { + typedef uint32_t type; + }; + + template + constexpr auto get_value(Backend& backend, T&& val) -> std::enable_if_t && + std::is_same_v< eosio::chain::null_terminated_ptr, S> && + !std::is_lvalue_reference_v && !std::is_pointer_v, S> { + return eosio::chain::null_terminated_ptr((char*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui)); + } + +}} // ns eosio::wasm_backend + namespace eosio { namespace chain { namespace webassembly { namespace eos_vm_runtime { using namespace fc; @@ -20,13 +59,17 @@ class eos_vm_runtime : public eosio::chain::wasm_runtime_interface { eos_vm_runtime(); std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector) override; - void immediately_exit_currently_running_module() override; + void immediately_exit_currently_running_module() override { _bkend->immediate_exit(); } private: - backend* _bkend; // non owning pointer to allow for immediate exit + backend* _bkend; // non owning pointer to allow for immediate exit }; +} } } }// eosio::chain::webassembly::wabt_runtime + +#define __EOS_VM_INTRINSIC_NAME(LBL, SUF) LBL##SUF +#define _EOS_VM_INTRINSIC_NAME(LBL, SUF) __INTRINSIC_NAME(LBL, SUF) + #define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - eosio::wasm_backend::registered_host_functions::add<&CLS::METHOD, eosio::wasm_backend::backend>(#MOD, #NAME); + eosio::wasm_backend::registered_function> _EOS_VM_INTRINSIC_NAME(__eos_vm_intrinsic_fn, __COUNTER__){MOD, NAME}; -} } } }// eosio::chain::webassembly::wabt_runtime diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 079983c89e9..ba616952216 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -25,6 +25,8 @@ #include #include +#include + namespace eosio { namespace chain { using namespace webassembly; using namespace webassembly::common; @@ -33,6 +35,8 @@ namespace eosio { namespace chain { wasm_interface::~wasm_interface() {} + wasm_allocator* wasm_interface::get_wasm_allocator() { return wasm_interface_impl::get_wasm_allocator(); } + void wasm_interface::validate(const controller& control, const bytes& code) { Module module; try { @@ -246,19 +250,19 @@ class softfloat_api : public context_aware_api { #pragma GCC diagnostic ignored "-Wstrict-aliasing" // float binops float _eosio_f32_add( float a, float b ) { - float32_t ret = f32_add( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_add( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } float _eosio_f32_sub( float a, float b ) { - float32_t ret = f32_sub( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_sub( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } float _eosio_f32_div( float a, float b ) { - float32_t ret = f32_div( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_div( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } float _eosio_f32_mul( float a, float b ) { - float32_t ret = f32_mul( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_mul( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } #pragma GCC diagnostic pop @@ -274,7 +278,7 @@ class softfloat_api : public context_aware_api { if ( f32_sign_bit(a) != f32_sign_bit(b) ) { return f32_sign_bit(a) ? af : bf; } - return f32_lt(a,b) ? af : bf; + return ::f32_lt(a,b) ? af : bf; } float _eosio_f32_max( float af, float bf ) { float32_t a = to_softfloat32(af); @@ -288,7 +292,7 @@ class softfloat_api : public context_aware_api { if ( f32_sign_bit(a) != f32_sign_bit(b) ) { return f32_sign_bit(a) ? bf : af; } - return f32_lt( a, b ) ? bf : af; + return ::f32_lt( a, b ) ? bf : af; } float _eosio_f32_copysign( float af, float bf ) { float32_t a = to_softfloat32(af); @@ -313,7 +317,7 @@ class softfloat_api : public context_aware_api { return from_softfloat32(a); } float _eosio_f32_sqrt( float a ) { - float32_t ret = f32_sqrt( to_softfloat32(a) ); + float32_t ret = ::f32_sqrt( to_softfloat32(a) ); return from_softfloat32(ret); } // ceil, floor, trunc and nearest are lifted from libc @@ -382,19 +386,19 @@ class softfloat_api : public context_aware_api { if (e >= 0x7f+23) return af; if (s) - y = f32_add( f32_sub( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); + y = ::f32_add( ::f32_sub( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); else - y = f32_sub( f32_add( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); - if (f32_eq( y, {0} ) ) + y = ::f32_sub( ::f32_add( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); + if (::f32_eq( y, {0} ) ) return s ? -0.0f : 0.0f; return from_softfloat32(y); } // float relops - bool _eosio_f32_eq( float a, float b ) { return f32_eq( to_softfloat32(a), to_softfloat32(b) ); } - bool _eosio_f32_ne( float a, float b ) { return !f32_eq( to_softfloat32(a), to_softfloat32(b) ); } - bool _eosio_f32_lt( float a, float b ) { return f32_lt( to_softfloat32(a), to_softfloat32(b) ); } - bool _eosio_f32_le( float a, float b ) { return f32_le( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_eq( float a, float b ) { return ::f32_eq( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_ne( float a, float b ) { return !::f32_eq( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_lt( float a, float b ) { return ::f32_lt( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_le( float a, float b ) { return ::f32_le( to_softfloat32(a), to_softfloat32(b) ); } bool _eosio_f32_gt( float af, float bf ) { float32_t a = to_softfloat32(af); float32_t b = to_softfloat32(bf); @@ -402,7 +406,7 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f32_le( a, b ); + return !::f32_le( a, b ); } bool _eosio_f32_ge( float af, float bf ) { float32_t a = to_softfloat32(af); @@ -411,24 +415,24 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f32_lt( a, b ); + return !::f32_lt( a, b ); } // double binops double _eosio_f64_add( double a, double b ) { - float64_t ret = f64_add( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_add( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_sub( double a, double b ) { - float64_t ret = f64_sub( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_sub( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_div( double a, double b ) { - float64_t ret = f64_div( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_div( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_mul( double a, double b ) { - float64_t ret = f64_mul( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_mul( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_min( double af, double bf ) { @@ -440,7 +444,7 @@ class softfloat_api : public context_aware_api { return bf; if (f64_sign_bit(a) != f64_sign_bit(b)) return f64_sign_bit(a) ? af : bf; - return f64_lt( a, b ) ? af : bf; + return ::f64_lt( a, b ) ? af : bf; } double _eosio_f64_max( double af, double bf ) { float64_t a = to_softfloat64(af); @@ -451,7 +455,7 @@ class softfloat_api : public context_aware_api { return bf; if (f64_sign_bit(a) != f64_sign_bit(b)) return f64_sign_bit(a) ? bf : af; - return f64_lt( a, b ) ? bf : af; + return ::f64_lt( a, b ) ? bf : af; } double _eosio_f64_copysign( double af, double bf ) { float64_t a = to_softfloat64(af); @@ -477,7 +481,7 @@ class softfloat_api : public context_aware_api { return from_softfloat64(a); } double _eosio_f64_sqrt( double a ) { - float64_t ret = f64_sqrt( to_softfloat64(a) ); + float64_t ret = ::f64_sqrt( to_softfloat64(a) ); return from_softfloat64(ret); } // ceil, floor, trunc and nearest are lifted from libc @@ -486,22 +490,22 @@ class softfloat_api : public context_aware_api { float64_t ret; int e = a.v >> 52 & 0x7ff; float64_t y; - if (e >= 0x3ff+52 || f64_eq( a, { 0 } )) + if (e >= 0x3ff+52 || ::f64_eq( a, { 0 } )) return af; /* y = int(x) - x, where int(x) is an integer neighbor of x */ if (a.v >> 63) - y = f64_sub( f64_add( f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_add( ::f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); else - y = f64_sub( f64_sub( f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_sub( ::f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); /* special case because of non-nearest rounding modes */ if (e <= 0x3ff-1) { return a.v >> 63 ? -0.0 : 1.0; //float64_t{0x8000000000000000} : float64_t{0xBE99999A3F800000}; //either -0.0 or 1 } - if (f64_lt( y, to_softfloat64(0) )) { - ret = f64_add( f64_add( a, y ), to_softfloat64(1) ); // 0xBE99999A3F800000 } ); // plus 1 + if (::f64_lt( y, to_softfloat64(0) )) { + ret = ::f64_add( ::f64_add( a, y ), to_softfloat64(1) ); // 0xBE99999A3F800000 } ); // plus 1 return from_softfloat64(ret); } - ret = f64_add( a, y ); + ret = ::f64_add( a, y ); return from_softfloat64(ret); } double _eosio_f64_floor( double af ) { @@ -517,17 +521,17 @@ class softfloat_api : public context_aware_api { return af; } if (a.v >> 63) - y = f64_sub( f64_add( f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_add( ::f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); else - y = f64_sub( f64_sub( f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_sub( ::f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); if (e <= 0x3FF-1) { return a.v>>63 ? -1.0 : 0.0; //float64_t{0xBFF0000000000000} : float64_t{0}; // -1 or 0 } - if ( !f64_le( y, float64_t{0} ) ) { - ret = f64_sub( f64_add(a,y), to_softfloat64(1.0)); + if ( !::f64_le( y, float64_t{0} ) ) { + ret = ::f64_sub( ::f64_add(a,y), to_softfloat64(1.0)); return from_softfloat64(ret); } - ret = f64_add( a, y ); + ret = ::f64_add( a, y ); return from_softfloat64(ret); } double _eosio_f64_trunc( double af ) { @@ -553,19 +557,19 @@ class softfloat_api : public context_aware_api { if ( e >= 0x3FF+52 ) return af; if ( s ) - y = f64_add( f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); + y = ::f64_add( ::f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); else - y = f64_sub( f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); - if ( f64_eq( y, float64_t{0} ) ) + y = ::f64_sub( ::f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); + if ( ::f64_eq( y, float64_t{0} ) ) return s ? -0.0 : 0.0; return from_softfloat64(y); } // double relops - bool _eosio_f64_eq( double a, double b ) { return f64_eq( to_softfloat64(a), to_softfloat64(b) ); } - bool _eosio_f64_ne( double a, double b ) { return !f64_eq( to_softfloat64(a), to_softfloat64(b) ); } - bool _eosio_f64_lt( double a, double b ) { return f64_lt( to_softfloat64(a), to_softfloat64(b) ); } - bool _eosio_f64_le( double a, double b ) { return f64_le( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_eq( double a, double b ) { return ::f64_eq( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_ne( double a, double b ) { return !::f64_eq( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_lt( double a, double b ) { return ::f64_lt( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_le( double a, double b ) { return ::f64_le( to_softfloat64(a), to_softfloat64(b) ); } bool _eosio_f64_gt( double af, double bf ) { float64_t a = to_softfloat64(af); float64_t b = to_softfloat64(bf); @@ -573,7 +577,7 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f64_le( a, b ); + return !::f64_le( a, b ); } bool _eosio_f64_ge( double af, double bf ) { float64_t a = to_softfloat64(af); @@ -582,7 +586,7 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f64_lt( a, b ); + return !::f64_lt( a, b ); } // float and double conversions @@ -906,8 +910,8 @@ class authorization_api : public context_aware_api { return context.has_authorization( account ); } - void require_authorization(const account_name& account, - const permission_name& permission) { + void require_authorization2(const account_name& account, + const permission_name& permission) { context.require_authorization( account, permission ); } @@ -1846,7 +1850,7 @@ REGISTER_INTRINSICS(action_api, REGISTER_INTRINSICS(authorization_api, (require_recipient, void(int64_t) ) (require_authorization, void(int64_t), "require_auth", void(authorization_api::*)(const account_name&) ) - (require_authorization, void(int64_t, int64_t), "require_auth2", void(authorization_api::*)(const account_name&, const permission_name& permission) ) + (require_authorization2, void(int64_t, int64_t), "require_auth2", void(authorization_api::*)(const account_name&, const permission_name& permission) ) (has_authorization, int(int64_t), "has_auth", bool(authorization_api::*)(const account_name&)const ) (is_account, int(int64_t) ) ); diff --git a/libraries/chain/webassembly/eos-vm.cpp b/libraries/chain/webassembly/eos-vm.cpp index 04882cb6fb3..176a1383fa5 100644 --- a/libraries/chain/webassembly/eos-vm.cpp +++ b/libraries/chain/webassembly/eos-vm.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include @@ -11,7 +11,7 @@ using namespace eosio::wasm_backend; namespace wasm_constraints = eosio::chain::wasm_constraints; -using backend_t = backend>; +using backend_t = backend; class eos_vm_instantiated_module : public wasm_instantiated_module_interface { public: @@ -20,15 +20,15 @@ class eos_vm_instantiated_module : public wasm_instantiated_module_interface { _instantiated_module(std::move(mod)) {} void apply(apply_context& context) override { - _instantiated_module->set_wasm_allocator( context.get_wasm_allocator() ); - if (!(const auto& res = _instantiated_module->run_start())) - EOS_ASSERT(false, wasm_execution_error, "eos-vm start function failure (${s})", ("s", res.to_string())); - - if (!(const auto& res = _instantiated_module(context.get_registered_host_functions(), "env", "apply", - (uint64_t)context.get_receiver(), - (uint64_t)context.get_action().account(), - (uint64_t)context.get_action().name)) - EOS_ASSERT(false, wasm_execution_error, "eos-vm execution failure (${s})", ("s", res.to_string())); + _instantiated_module->set_wasm_allocator( wasm_interface::get_wasm_allocator() ); + //if (!(const auto& res = _instantiated_module->run_start())) + // EOS_ASSERT(false, wasm_execution_error, "eos-vm start function failure (${s})", ("s", res.to_string())); + + const auto& res = _instantiated_module->call(&context, "env", "apply", + (uint64_t)context.get_receiver(), + (uint64_t)context.get_action().account, + (uint64_t)context.get_action().name); + //EOS_ASSERT(res, wasm_execution_error, "eos-vm execution failure (${s})", ("s", res.to_string())); } private: @@ -39,13 +39,10 @@ eos_vm_runtime::eos_vm_runtime() {} std::unique_ptr eos_vm_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector) { std::vector cb((uint8_t*)code_bytes, (uint8_t*)code_bytes+code_size); - std::unique_ptr bkend = std::make_unique( cb ); + std::unique_ptr bkend = std::make_unique( cb ); + registered_host_functions::resolve(bkend->get_module()); _bkend = bkend.get(); - return std::make_unique(std::move(bkend)); -} - -void eos_vm_runtime::immediately_exit_currently_running_module() { - _bkend->immediate_exit(); + return std::make_unique(std::move(bkend)); } }}}} diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index cda857ff2c2..83140682834 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -387,6 +387,8 @@ namespace eosio { namespace testing { vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm")) + vcfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm; } return vcfg; } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 4c0ef6acdb0..27bc99aad9f 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -213,7 +213,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("protocol-features-dir", bpo::value()->default_value("protocol_features"), "the location of the protocol_features directory (absolute path or relative to application config dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("wasm-runtime", bpo::value()->value_name("wavm/wabt"), "Override default WASM runtime") + ("wasm-runtime", bpo::value()->value_name("wavm/wabt/eos-vm"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), "Override default maximum ABI serialization time allowed in ms") ("chain-state-db-size-mb", bpo::value()->default_value(config::default_state_size / (1024 * 1024)), "Maximum size (in MiB) of the chain state database") From 15d31ffb8003c3d06a701f43016ac5a981364dfe Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 29 Apr 2019 11:04:57 -0400 Subject: [PATCH 0218/1648] bump version to 1.9.0-develop --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index abba4ccde42..8c6753dd7a5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,9 +30,9 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 8) +set(VERSION_MINOR 9) set(VERSION_PATCH 0) -set(VERSION_SUFFIX rc1) +set(VERSION_SUFFIX develop) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") From d62678921ed624027b3ef01a099df2e457675923 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 29 Apr 2019 13:13:27 -0400 Subject: [PATCH 0219/1648] solved issue with linking --- libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp index fbd2b996b2d..01a3ea19483 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp @@ -70,6 +70,7 @@ class eos_vm_runtime : public eosio::chain::wasm_runtime_interface { #define __EOS_VM_INTRINSIC_NAME(LBL, SUF) LBL##SUF #define _EOS_VM_INTRINSIC_NAME(LBL, SUF) __INTRINSIC_NAME(LBL, SUF) -#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - eosio::wasm_backend::registered_function> _EOS_VM_INTRINSIC_NAME(__eos_vm_intrinsic_fn, __COUNTER__){MOD, NAME}; +#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + eosio::wasm_backend::registered_function> _EOS_VM_INTRINSIC_NAME(__eos_vm_intrinsic_fn, __COUNTER__)(std::string(MOD), std::string(NAME)); + From a01d6471330d48c7b52891ed83ef4ac165e4a406 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 29 Apr 2019 13:25:05 -0400 Subject: [PATCH 0220/1648] Remove unused cpack bits; these are not being used --- CMakeLists.txt | 4 --- CMakeModules/installer.cmake | 49 ------------------------------ debian/CMakeLists.txt | 1 - debian/postinst | 58 ------------------------------------ 4 files changed, 112 deletions(-) delete mode 100644 CMakeModules/installer.cmake delete mode 100644 debian/CMakeLists.txt delete mode 100755 debian/postinst diff --git a/CMakeLists.txt b/CMakeLists.txt index 8c6753dd7a5..cd7896c6154 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,9 +43,6 @@ endif() set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) set( KEY_STORE_EXECUTABLE_NAME keosd ) -set( GUI_CLIENT_EXECUTABLE_NAME eosio ) -set( CUSTOM_URL_SCHEME "gcs" ) -set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") @@ -221,7 +218,6 @@ add_subdirectory( scripts ) add_subdirectory( unittests ) add_subdirectory( tests ) add_subdirectory( tools ) -add_subdirectory( debian ) install_directory_permissions(DIRECTORY ${CMAKE_INSTALL_FULL_SYSCONFDIR}/eosio) diff --git a/CMakeModules/installer.cmake b/CMakeModules/installer.cmake deleted file mode 100644 index e4ca2b7e01f..00000000000 --- a/CMakeModules/installer.cmake +++ /dev/null @@ -1,49 +0,0 @@ -include(InstallRequiredSystemLibraries) - -#install_directory_permissions( DIRECTORY usr/${CMAKE_INSTALL_INCLUDEDIR}/eosio ) - -set(CPACK_PACKAGE_CONTACT "support@block.one") -set(CPACK_OUTPUT_FILE_PREFIX ${CMAKE_BINARY_DIR}/packages) -if(NOT DEFINED CMAKE_INSTALL_PREFIX) - set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install) -endif() - -SET(CPACK_PACKAGE_DIRECTORY "${CMAKE_BINARY_DIR}/install") -set(CPACK_PACKAGE_NAME "EOS.IO") -set(CPACK_PACKAGE_VENDOR "block.one") -set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}") -set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}") -set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}") -set(CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}") -set(CPACK_PACKAGE_DESCRIPTION "Software for the EOS.IO network") -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Software for the EOS.IO network") -set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") -set(CPACK_PACKAGE_INSTALL_DIRECTORY "EOS.IO ${CPACK_PACKAGE_VERSION}") - -if(WIN32) - set(CPACK_GENERATOR "ZIP;NSIS") - set(CPACK_NSIS_EXECUTABLES_DIRECTORY .) - set(CPACK_NSIS_PACKAGE_NAME "EOS.IO v${CPACK_PACKAGE_VERSION}") - set(CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}") - set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"EOS.IO\\\"") - # windows zip files usually don't have a single directory inside them, unix tgz usually do - set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0) -elseif(APPLE) - set(CPACK_GENERATOR "DragNDrop") -else() - set(CPACK_GENERATOR "DEB") - set(CPACK_DEBIAN_PACKAGE_RELEASE 0) - if(CMAKE_VERSION VERSION_GREATER 3.6.0) # Buggy in 3.5, behaves like VERSION_GREATER_EQUAL - set(CPACK_DEBIAN_FILE_NAME "DEB-DEFAULT") - else() - string(TOLOWER ${CPACK_PACKAGE_NAME} CPACK_DEBIAN_PACKAGE_NAME) - execute_process(COMMAND dpkg --print-architecture OUTPUT_VARIABLE CPACK_DEBIAN_PACKAGE_ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE) - SET(CPACK_PACKAGE_FILE_NAME ${CPACK_DEBIAN_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}-${CPACK_DEBIAN_PACKAGE_RELEASE}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}) - endif() - set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) - set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY TRUE) - set(CPACK_DEBIAN_PACKAGE_CONTROL_STRICT_PERMISSION TRUE) - set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/EOSIO/eos") -endif() - -include(CPack) diff --git a/debian/CMakeLists.txt b/debian/CMakeLists.txt deleted file mode 100644 index d56b72cde2a..00000000000 --- a/debian/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/postinst" PARENT_SCOPE) diff --git a/debian/postinst b/debian/postinst deleted file mode 100755 index 42123695c90..00000000000 --- a/debian/postinst +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -# postinst script for eosio - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -set -e - -PACKAGE="eosio" -USER="eosio" -GROUP=${USER} - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see http://www.debian.org/doc/debian-policy/ or -# the debian-policy package - -# source debconf library -. /usr/share/debconf/confmodule - -case "$1" in - - configure) - set +e - getent passwd ${USER} > /dev/null 2>&1 - if [ $? -ne 0 ]; then - adduser --no-create-home --group --system ${USER} - fi - set -e - chown ${USER}:${GROUP} /var/log/${PACKAGE} - chown ${USER}:${GROUP} /var/lib/${PACKAGE} - chown ${USER}:${GROUP} /etc/${PACKAGE} - chown ${USER}:${GROUP} /etc/${PACKAGE}/node_00 - chown ${USER} /usr/bin/nodeos - chmod u+s /usr/bin/nodeos - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - exit 0 - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; - -esac - -exit 0 From cc0092bc948d2fab18e5c26f257e6f598d0bce5e Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 29 Apr 2019 19:28:32 -0400 Subject: [PATCH 0221/1648] new problems --- .../eosio/chain/webassembly/eos-vm.hpp | 33 ++++++++++++++----- libraries/chain/webassembly/eos-vm.cpp | 10 ++++-- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp index 01a3ea19483..f6d12ca7406 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp @@ -16,8 +16,8 @@ namespace eosio { namespace wasm_backend { typedef uint64_t type; }; - template - constexpr auto get_value(Backend& backend, T&& val) -> std::enable_if_t && + template + constexpr auto get_value(eosio::wasm_backend::operand_stack& op, Cleanups&, Backend& backend, T&& val) -> std::enable_if_t && std::is_same_v>, S> { return {(uint64_t)val.data.ui}; } @@ -27,11 +27,26 @@ namespace eosio { namespace wasm_backend { typedef uint32_t type; }; - template - constexpr auto get_value(Backend& backend, T&& val) -> std::enable_if_t && + template + constexpr auto get_value(eosio::wasm_backend::operand_stack& op, Cleanups& cleanups, Backend& backend, T&& val) -> std::enable_if_t && std::is_same_v< eosio::chain::array_ptr, S> && - !std::is_lvalue_reference_v && !std::is_pointer_v, S> { - return eosio::chain::array_ptr((typename S::type*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui)); + !std::is_lvalue_reference_v && !std::is_pointer_v, S> { + size_t i = std::tuple_size::value-1; + auto* ptr = (*((std::remove_reference_t*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui))); + if constexpr (std::tuple_size::value > I) { + const auto& len = std::get::type>>(op.get_back(i-I)); + if ((uintptr_t)ptr % alignof(S) != 0) { + align_ptr_triple apt; + apt.s = sizeof(S)*len; + std::vector> cpy(len > 0 ? len : 1); + apt.o = ptr; + S* ptr = &cpy[0]; + apt.n = ptr; + memcpy(apt.n, apt.o, apt.s); + cleanups.emplace_back(std::move(apt)); + } + } + return eosio::chain::array_ptr(ptr); } template <> @@ -39,9 +54,9 @@ namespace eosio { namespace wasm_backend { typedef uint32_t type; }; - template - constexpr auto get_value(Backend& backend, T&& val) -> std::enable_if_t && - std::is_same_v< eosio::chain::null_terminated_ptr, S> && + template + constexpr auto get_value(eosio::wasm_backend::operand_stack& op, Cleanups&, Backend& backend, T&& val) -> std::enable_if_t && + std::is_same_v< eosio::chain::null_terminated_ptr, S> && !std::is_lvalue_reference_v && !std::is_pointer_v, S> { return eosio::chain::null_terminated_ptr((char*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui)); } diff --git a/libraries/chain/webassembly/eos-vm.cpp b/libraries/chain/webassembly/eos-vm.cpp index 176a1383fa5..8693aaf4931 100644 --- a/libraries/chain/webassembly/eos-vm.cpp +++ b/libraries/chain/webassembly/eos-vm.cpp @@ -1,7 +1,7 @@ #include #include #include - +#include //eos-vm includes #include @@ -38,7 +38,13 @@ class eos_vm_instantiated_module : public wasm_instantiated_module_interface { eos_vm_runtime::eos_vm_runtime() {} std::unique_ptr eos_vm_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector) { - std::vector cb((uint8_t*)code_bytes, (uint8_t*)code_bytes+code_size); + //std::vector cb((uint8_t*)code_bytes, (uint8_t*)code_bytes+code_size); + std::vector cb; + cb.resize(code_size); + memcpy(cb.data(), code_bytes, code_size); + std::ofstream mf("temp.wasm"); + mf.write((char*)cb.data(), cb.size()); + mf.close(); std::unique_ptr bkend = std::make_unique( cb ); registered_host_functions::resolve(bkend->get_module()); _bkend = bkend.get(); From f546b2676021bfcde60f9a158def714c14cc402a Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 25 Apr 2019 17:00:50 -0400 Subject: [PATCH 0222/1648] Created pipeline configuration file --- pipeline.jsonc | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 pipeline.jsonc diff --git a/pipeline.jsonc b/pipeline.jsonc new file mode 100644 index 00000000000..1db80900c51 --- /dev/null +++ b/pipeline.jsonc @@ -0,0 +1,50 @@ +{ + "eosio": + { + "pipeline-branch": "legacy-os", + "environment": + { + "DEBUG": "false", + "PIPELINE_BRANCH" : "legacy-os", + "BUILDKITE_LABEL" : "Ubuntu 18.04 Unit Tests", + } + }, + "eosio-dot-cdt": + { + "pipeline-branch": "master", + "dependencies": // dependencies to pull for cdt integration tests, by branch, tag, or commit hash + { + "eosio": "release/1.7.x" + }, + "environment": + { + + } + }, + "eosio-dot-contracts": + { + "pipeline-branch": "master", + "dependencies": // dependencies to pull for a build of contracts, by branch, tag, or commit hash + { + "eosio": "9dfd8e2e1b2ab9acc9087b5c6a0bb878b4ad1d0f", + "eosio.cdt": "release/1.6.x" + } + }, + "eos-multiversion-tests": + { + "pipeline-branch": "master", + "configuration": + [ + "17=release/1.7.x", + "16=release/1.6.x", + ] + }, + "eosio-sync-tests": + { + "pipeline-branch": "protocol-features-sync-nodes", + "environment": + { + "DEBUG": "false", + } + }, +} \ No newline at end of file From 9e88287fe4d24c739521313fea355a684ba02d76 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 25 Apr 2019 18:32:28 -0400 Subject: [PATCH 0223/1648] Fix commas --- pipeline.jsonc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pipeline.jsonc b/pipeline.jsonc index 1db80900c51..af12c9570b9 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -6,7 +6,7 @@ { "DEBUG": "false", "PIPELINE_BRANCH" : "legacy-os", - "BUILDKITE_LABEL" : "Ubuntu 18.04 Unit Tests", + "BUILDKITE_LABEL" : "Ubuntu 18.04 Unit Tests" } }, "eosio-dot-cdt": @@ -36,7 +36,7 @@ "configuration": [ "17=release/1.7.x", - "16=release/1.6.x", + "16=release/1.6.x" ] }, "eosio-sync-tests": @@ -44,7 +44,7 @@ "pipeline-branch": "protocol-features-sync-nodes", "environment": { - "DEBUG": "false", + "DEBUG": "false" } - }, + } } \ No newline at end of file From c46cb9ac4a6a143c972357b3ae97437d7cdc7305 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 26 Apr 2019 00:21:41 -0400 Subject: [PATCH 0224/1648] Updated multiversion configuration --- pipeline.jsonc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pipeline.jsonc b/pipeline.jsonc index af12c9570b9..c9f9729263a 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -35,8 +35,7 @@ "pipeline-branch": "master", "configuration": [ - "17=release/1.7.x", - "16=release/1.6.x" + "170=v1.7.0" ] }, "eosio-sync-tests": From c930318ec8cc9b0ef1efac6b04d99bb08706a60b Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Mon, 29 Apr 2019 19:29:55 -0400 Subject: [PATCH 0225/1648] Multiversion tests now point to Areg's sync nodes branch --- pipeline.jsonc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipeline.jsonc b/pipeline.jsonc index c9f9729263a..15449bb271d 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -32,7 +32,7 @@ }, "eos-multiversion-tests": { - "pipeline-branch": "master", + "pipeline-branch": "protocol-features-sync-nodes", "configuration": [ "170=v1.7.0" From 80fcfe28ae26c184a02b352b5139874081ef32cc Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Mon, 29 Apr 2019 19:38:17 -0400 Subject: [PATCH 0226/1648] Pipeline config needs a separate object for my beta testing pipeline --- pipeline.jsonc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pipeline.jsonc b/pipeline.jsonc index 15449bb271d..546e27ff7a0 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -38,6 +38,14 @@ "170=v1.7.0" ] }, + "eos-multiversion-beta-tests": + { + "pipeline-branch": "protocol-features-sync-nodes", + "configuration": + [ + "170=v1.7.0" + ] + }, "eosio-sync-tests": { "pipeline-branch": "protocol-features-sync-nodes", From ca5cd6a74c783d346f2cb3870eaf9fb014fd4a92 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Mon, 29 Apr 2019 20:10:53 -0400 Subject: [PATCH 0227/1648] Pipeline config points to correct multiversion pipeline branch --- .pipelinebranch | 1 - pipeline.jsonc | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 .pipelinebranch diff --git a/.pipelinebranch b/.pipelinebranch deleted file mode 100644 index e763e7d7fdc..00000000000 --- a/.pipelinebranch +++ /dev/null @@ -1 +0,0 @@ -protocol-features-sync-nodes diff --git a/pipeline.jsonc b/pipeline.jsonc index 546e27ff7a0..fb048d07359 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -40,7 +40,7 @@ }, "eos-multiversion-beta-tests": { - "pipeline-branch": "protocol-features-sync-nodes", + "pipeline-branch": "zach-pipeline-config-pfsn", "configuration": [ "170=v1.7.0" From c6dd8c8094925fab689756e0c0fcac369c2d14dd Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 29 Apr 2019 20:41:28 -0400 Subject: [PATCH 0228/1648] fixed issue --- .../include/eosio/chain/webassembly/eos-vm.hpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp index f6d12ca7406..e195f9822bc 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp @@ -32,16 +32,17 @@ namespace eosio { namespace wasm_backend { std::is_same_v< eosio::chain::array_ptr, S> && !std::is_lvalue_reference_v && !std::is_pointer_v, S> { size_t i = std::tuple_size::value-1; - auto* ptr = (*((std::remove_reference_t*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui))); + using ptr_ty = typename S::type; + auto* ptr = (ptr_ty*)(backend.get_wasm_allocator()->template get_base_ptr()+val.data.ui); if constexpr (std::tuple_size::value > I) { - const auto& len = std::get::type>>(op.get_back(i-I)); + const auto& len = std::get::type>>(op.get_back(i-I)).data.ui; if ((uintptr_t)ptr % alignof(S) != 0) { align_ptr_triple apt; apt.s = sizeof(S)*len; - std::vector> cpy(len > 0 ? len : 1); - apt.o = ptr; - S* ptr = &cpy[0]; - apt.n = ptr; + std::vector::type> cpy(len > 0 ? len : 1); + apt.o = (void*)ptr; + ptr = &cpy[0]; + apt.n = (void*)ptr; memcpy(apt.n, apt.o, apt.s); cleanups.emplace_back(std::move(apt)); } From 231798f1f2f6cdb2aff432829650ab453a537b01 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Mon, 29 Apr 2019 20:54:08 -0400 Subject: [PATCH 0229/1648] Pipeline configuration file now only has eos stuff --- pipeline.jsonc | 30 +----------------------------- tests/multiversion.conf | 2 -- 2 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 tests/multiversion.conf diff --git a/pipeline.jsonc b/pipeline.jsonc index fb048d07359..cda5a7ae4d8 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -1,33 +1,9 @@ { "eosio": - { - "pipeline-branch": "legacy-os", - "environment": - { - "DEBUG": "false", - "PIPELINE_BRANCH" : "legacy-os", - "BUILDKITE_LABEL" : "Ubuntu 18.04 Unit Tests" - } - }, - "eosio-dot-cdt": { "pipeline-branch": "master", - "dependencies": // dependencies to pull for cdt integration tests, by branch, tag, or commit hash - { - "eosio": "release/1.7.x" - }, "environment": { - - } - }, - "eosio-dot-contracts": - { - "pipeline-branch": "master", - "dependencies": // dependencies to pull for a build of contracts, by branch, tag, or commit hash - { - "eosio": "9dfd8e2e1b2ab9acc9087b5c6a0bb878b4ad1d0f", - "eosio.cdt": "release/1.6.x" } }, "eos-multiversion-tests": @@ -48,10 +24,6 @@ }, "eosio-sync-tests": { - "pipeline-branch": "protocol-features-sync-nodes", - "environment": - { - "DEBUG": "false" - } + "pipeline-branch": "protocol-features-sync-nodes" } } \ No newline at end of file diff --git a/tests/multiversion.conf b/tests/multiversion.conf deleted file mode 100644 index 544263173a9..00000000000 --- a/tests/multiversion.conf +++ /dev/null @@ -1,2 +0,0 @@ -[eosio] -170=v1.7.0 From 1d3f79fada3d6aedabf4e52cb6d49d14c89a79ec Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 30 Apr 2019 02:39:33 -0400 Subject: [PATCH 0230/1648] still working on it --- .../include/eosio/chain/webassembly/wabt.hpp | 25 +++++++++++++++++++ .../include/eosio/chain/webassembly/wavm.hpp | 25 +++++++++++++++++++ libraries/chain/wasm_interface.cpp | 1 + libraries/chain/webassembly/eos-vm.cpp | 3 ++- 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp index 31456dc1dda..040505b014b 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -686,23 +686,48 @@ struct intrinsic_function_invoker { template struct intrinsic_function_invoker_wrapper; +template +struct void_ret_wrapper { + using type = T; +}; + +template<> +struct void_ret_wrapper { + using type = char; +}; + +template +using void_ret_wrapper_t = typename void_ret_wrapper::type; + template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index 5bce9db8b40..04c36e8f153 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -675,26 +675,51 @@ struct intrinsic_function_invoker { } }; +template +struct void_ret_wrapper { + using type = T; +}; + +template<> +struct void_ret_wrapper { + using type = char; +}; + +template +using void_ret_wrapper_t = typename void_ret_wrapper::type; + template struct intrinsic_function_invoker_wrapper; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index ba616952216..3b7fda46a5a 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1010,6 +1010,7 @@ class action_api : public context_aware_api { } int action_data_size() { + std::cout << "ADS " << context.get_action().data.size() << "\n"; return context.get_action().data.size(); } diff --git a/libraries/chain/webassembly/eos-vm.cpp b/libraries/chain/webassembly/eos-vm.cpp index 8693aaf4931..1b2deb1bff5 100644 --- a/libraries/chain/webassembly/eos-vm.cpp +++ b/libraries/chain/webassembly/eos-vm.cpp @@ -23,7 +23,8 @@ class eos_vm_instantiated_module : public wasm_instantiated_module_interface { _instantiated_module->set_wasm_allocator( wasm_interface::get_wasm_allocator() ); //if (!(const auto& res = _instantiated_module->run_start())) // EOS_ASSERT(false, wasm_execution_error, "eos-vm start function failure (${s})", ("s", res.to_string())); - + + std::cout << "CTX " << &context << "\n"; const auto& res = _instantiated_module->call(&context, "env", "apply", (uint64_t)context.get_receiver(), (uint64_t)context.get_action().account, From fe508006931aa48ac5547845ea72c62fc315845b Mon Sep 17 00:00:00 2001 From: Kayan Date: Tue, 30 Apr 2019 18:12:50 +0800 Subject: [PATCH 0231/1648] add signal tests --- unittests/signal_tests.cpp | 409 +++++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100644 unittests/signal_tests.cpp diff --git a/unittests/signal_tests.cpp b/unittests/signal_tests.cpp new file mode 100644 index 00000000000..cd078638be1 --- /dev/null +++ b/unittests/signal_tests.cpp @@ -0,0 +1,409 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "fork_test_utilities.hpp" + +#include + +#define TESTER tester + +using namespace eosio::chain; +using namespace eosio::testing; + +class signal_tester : public base_tester { +public: + void init(const controller::config &config) { + cfg = config; + control.reset( new controller(cfg, make_protocol_feature_set()) ); + control->add_indices(); + } + + void startup() { + control->startup( []() { return false; }, nullptr); + chain_transactions.clear(); + control->accepted_block.connect([this]( const block_state_ptr& block_state ){ + FC_ASSERT( block_state->block ); + for( const auto& receipt : block_state->block->transactions ) { + if( receipt.trx.contains() ) { + auto &pt = receipt.trx.get(); + chain_transactions[pt.get_transaction().id()] = receipt; + } else { + auto& id = receipt.trx.get(); + chain_transactions[id] = receipt; + } + } + }); + } + + signal_tester(controller::config config, int ordinal) { + FC_ASSERT(config.blocks_dir.filename().generic_string() != "." + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); + + controller::config copied_config = config; + copied_config.blocks_dir = + config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); + copied_config.state_dir = + config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + + init(copied_config); + } + + signal_tester(controller::config config, int ordinal, int copy_block_log_from_ordinal) { + FC_ASSERT(config.blocks_dir.filename().generic_string() != "." + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); + + controller::config copied_config = config; + copied_config.blocks_dir = + config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); + copied_config.state_dir = + config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + + // create a copy of the desired block log and reversible + auto block_log_path = config.blocks_dir.parent_path() / std::to_string(copy_block_log_from_ordinal).append(config.blocks_dir.filename().generic_string()); + fc::create_directories(copied_config.blocks_dir); + fc::copy(block_log_path / "blocks.log", copied_config.blocks_dir / "blocks.log"); + fc::copy(block_log_path / config::reversible_blocks_dir_name, copied_config.blocks_dir / config::reversible_blocks_dir_name ); + + init(copied_config); + } + + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); + } + + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + control->abort_block(); + return _produce_block(skip_time, true); + } + + signed_block_ptr finish_block()override { + return _finish_block(); + } + + bool validate() { return true; } +}; + + +BOOST_AUTO_TEST_SUITE(signal_tests) + +BOOST_AUTO_TEST_CASE(no_signal) +{ + TESTER test; + test.produce_block(); +} + +BOOST_AUTO_TEST_CASE(signal_basic_replay) +{ + controller::config conf; + + { + TESTER c; + conf = c.get_config(); + } + + signal_tester test(conf, 1); + test.startup(); + + test.produce_block(); + + { + int irr_count = 0; + uint32_t last_irr_blocknum = 0; + + int pre_acc_blk_count = 0; + uint32_t last_pre_acc_num = 0; + + int acc_blk_hdr_count = 0; + uint32_t last_acc_blk_hdr_num = 0; + + int acc_blk_count = 0; + uint32_t last_acc_blk_num = 0; + + int acc_txn_count = 0; + transaction_id_type last_acc_txn_id; + + int app_txn_count = 0; + transaction_id_type last_app_txn_id; + transaction_trace_ptr last_signal_trace; + + test.control->irreversible_block.connect([&](const block_state_ptr& bsp) { + ++irr_count; + if (last_irr_blocknum) { + BOOST_CHECK_EQUAL(bsp->block_num, last_irr_blocknum + 1); + } + last_irr_blocknum = bsp->block_num; + }); + + test.control->pre_accepted_block.connect([&](const signed_block_ptr& sbp) { // <- never invoked + ++pre_acc_blk_count; + last_pre_acc_num = sbp->block_num(); + }); + + test.control->accepted_block_header.connect([&](const block_state_ptr& sbp) { + ++acc_blk_hdr_count; + last_acc_blk_hdr_num = sbp->block_num; + BOOST_CHECK_EQUAL(acc_blk_hdr_count, acc_blk_count + 1); // ensure ordering + }); + + test.control->accepted_block.connect([&](const block_state_ptr& sbp) { + ++acc_blk_count; + last_acc_blk_num = sbp->block_num; + }); + + test.control->accepted_transaction.connect([&](const transaction_metadata_ptr& ptr) { + ++acc_txn_count; + last_acc_txn_id = ptr->id; + }); + + test.control->applied_transaction.connect([&](std::tuple t) { + ++app_txn_count; + last_app_txn_id = std::get<0>(t)->id; + last_signal_trace = std::get<0>(t); + }); + + auto trace = test.create_account(N(abc), N(eosio)); + BOOST_CHECK_EQUAL(trace, last_signal_trace); + BOOST_CHECK_EQUAL(acc_txn_count, 1); + BOOST_CHECK_EQUAL(app_txn_count, 1); + + test.produce_block(); + BOOST_CHECK_EQUAL(irr_count, 1); + BOOST_CHECK_EQUAL(last_irr_blocknum, test.control->last_irreversible_block_num()); + BOOST_CHECK_EQUAL(pre_acc_blk_count, 0); // <-- no pre accepted signal if blocks are produced + BOOST_CHECK_EQUAL(acc_blk_hdr_count, 1); + BOOST_CHECK_EQUAL(last_acc_blk_hdr_num, test.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_count, 1); + BOOST_CHECK_EQUAL(last_acc_blk_num, test.control->head_block_num()); + + test.produce_block(); + BOOST_CHECK_EQUAL(irr_count, 2); + BOOST_CHECK_EQUAL(last_irr_blocknum, test.control->last_irreversible_block_num()); + BOOST_CHECK_EQUAL(pre_acc_blk_count, 0); // <-- no pre accepted signal if blocks are produced + BOOST_CHECK_EQUAL(acc_blk_hdr_count, 2); + BOOST_CHECK_EQUAL(last_acc_blk_hdr_num, test.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_count, 2); + BOOST_CHECK_EQUAL(last_acc_blk_num, test.control->head_block_num()); + } + + signal_tester replaychain(conf, 2, 1); + + // test replay + { + int irr_count = 0; + uint32_t last_irr_blocknum = 0; + + int pre_acc_blk_count = 0; + uint32_t last_pre_acc_num = 0; + + int acc_blk_hdr_count = 0; + uint32_t last_acc_blk_hdr_num = 0; + + int acc_blk_count = 0; + uint32_t last_acc_blk_num = 0; + + int acc_txn_count = 0; + transaction_id_type last_acc_txn_id; + + int app_txn_count = 0; + transaction_id_type last_app_txn_id; + transaction_trace_ptr last_signal_trace; + + replaychain.control->irreversible_block.connect([&](const block_state_ptr& bsp) { + ++irr_count; + if (last_irr_blocknum) { + BOOST_CHECK_EQUAL(bsp->block_num, last_irr_blocknum + 1); + } + last_irr_blocknum = bsp->block_num; + }); + + replaychain.control->pre_accepted_block.connect([&](const signed_block_ptr& sbp) { + ++pre_acc_blk_count; + last_pre_acc_num = sbp->block_num(); + BOOST_CHECK_EQUAL(pre_acc_blk_count, acc_blk_hdr_count + 1); // ensure ordering + }); + + replaychain.control->accepted_block_header.connect([&](const block_state_ptr& sbp) { + ++acc_blk_hdr_count; + last_acc_blk_hdr_num = sbp->block_num; + BOOST_CHECK_EQUAL(acc_blk_hdr_count, acc_blk_count + 1); // ensure ordering + }); + + replaychain.control->accepted_block.connect([&](const block_state_ptr& sbp) { + ++acc_blk_count; + last_acc_blk_num = sbp->block_num; + }); + + replaychain.control->accepted_transaction.connect([&](const transaction_metadata_ptr& ptr) { + ++acc_txn_count; + last_acc_txn_id = ptr->id; + }); + + replaychain.control->applied_transaction.connect([&](std::tuple t) { + ++app_txn_count; + last_app_txn_id = std::get<0>(t)->id; + last_signal_trace = std::get<0>(t); + }); + + replaychain.startup(); + + BOOST_CHECK_EQUAL(acc_txn_count, 3); + BOOST_CHECK_EQUAL(app_txn_count, 3); + + BOOST_CHECK_EQUAL(irr_count, 2); + BOOST_CHECK_EQUAL(last_irr_blocknum, replaychain.control->last_irreversible_block_num()); + BOOST_CHECK_EQUAL(pre_acc_blk_count, 2); // replay has pre_acc signal + BOOST_CHECK_EQUAL(last_pre_acc_num, replaychain.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_hdr_count, 2); + BOOST_CHECK_EQUAL(last_acc_blk_hdr_num, replaychain.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_count, 2); + BOOST_CHECK_EQUAL(last_acc_blk_num, replaychain.control->head_block_num()); + } +} + +BOOST_AUTO_TEST_CASE(irreversible_block_multi_producers) +{ + int irr_count = 0; + uint32_t last_irr_blocknum = 0; + TESTER test; + test.produce_block(); + test.control->irreversible_block.connect([&](const block_state_ptr& bsp) { + ++irr_count; + if (last_irr_blocknum) { + BOOST_CHECK_EQUAL(bsp->block_num, last_irr_blocknum + 1); + } + last_irr_blocknum = bsp->block_num; + }); + test.produce_block(); + BOOST_CHECK_EQUAL(irr_count, 1); + + test.create_accounts( {N(dan),N(sam),N(pam),N(scott)} ); + test.set_producers( {N(dan),N(sam),N(pam),N(scott)} ); + test.produce_blocks(100); + + BOOST_CHECK_EQUAL(irr_count, 57); + BOOST_CHECK_EQUAL(last_irr_blocknum, test.control->last_irreversible_block_num()); +} + +BOOST_AUTO_TEST_CASE(signal_fork) +{ + int irr_count = 0; + uint32_t last_irr_blocknum = 0; + + int pre_acc_blk_count = 0; + uint32_t last_pre_acc_num = 0; + + int acc_blk_hdr_count = 0; + uint32_t last_acc_blk_hdr_num = 0; + + int acc_blk_count = 0; + uint32_t last_acc_blk_num = 0; + + TESTER c; + c.produce_block(); + c.control->irreversible_block.connect([&](const block_state_ptr& bsp) { + ++irr_count; + if (last_irr_blocknum) { + BOOST_CHECK_EQUAL(bsp->block_num, last_irr_blocknum + 1); + } + last_irr_blocknum = bsp->block_num; + }); + + c.control->pre_accepted_block.connect([&](const signed_block_ptr& sbp) { + ++pre_acc_blk_count; + last_pre_acc_num = sbp->block_num(); + }); + + c.control->accepted_block_header.connect([&](const block_state_ptr& sbp) { + ++acc_blk_hdr_count; + last_acc_blk_hdr_num = sbp->block_num; + BOOST_CHECK_EQUAL(acc_blk_hdr_count, acc_blk_count + 1); // ensure ordering + }); + + c.control->accepted_block.connect([&](const block_state_ptr& sbp) { + ++acc_blk_count; + last_acc_blk_num = sbp->block_num; + }); + + c.produce_block(); + BOOST_CHECK_EQUAL(irr_count, 1); + + c.create_accounts( {N(dan),N(sam),N(pam),N(scott)} ); + c.set_producers( {N(dan),N(sam),N(pam),N(scott)} ); + c.produce_blocks(100); + + BOOST_CHECK_EQUAL(irr_count, 57); + BOOST_CHECK_EQUAL(last_irr_blocknum, c.control->last_irreversible_block_num()); + + BOOST_CHECK_EQUAL(pre_acc_blk_count, 0); + //BOOST_CHECK_EQUAL(last_pre_acc_num, c.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_hdr_count, 101); + BOOST_CHECK_EQUAL(last_acc_blk_hdr_num, c.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_count, 101); + BOOST_CHECK_EQUAL(last_acc_blk_num, c.control->head_block_num()); + + TESTER c2; + push_blocks(c, c2); + + BOOST_REQUIRE_EQUAL(c.control->head_block_num(), c2.control->head_block_num()); + + uint32_t fork_num = c.control->head_block_num(); + + auto nextproducer = [](TESTER &c, int skip_interval) ->account_name { + auto head_time = c.control->head_block_time(); + auto next_time = head_time + fc::milliseconds(config::block_interval_ms * skip_interval); + return c.control->head_block_state()->get_scheduled_producer(next_time).producer_name; + }; + + // fork c: 1 producers: dan + // fork c2: 3 producer: sam, pam, scott + int skip1 = 1, skip2 = 1; + for (int i = 0; i < 200; ++i) { + account_name next1 = nextproducer(c, skip1); + if (next1 == N(dan)) { + c.produce_block(fc::milliseconds(config::block_interval_ms * skip1)); skip1 = 1; + } + else ++skip1; + account_name next2 = nextproducer(c2, skip2); + if (next2 == N(sam) || next2 == N(pam) || next2 == N(scott)) { + c2.produce_block(fc::milliseconds(config::block_interval_ms * skip2)); skip2 = 1; + } + else ++skip2; + } + BOOST_CHECK_EQUAL(last_irr_blocknum, c.control->last_irreversible_block_num()); + BOOST_REQUIRE_EQUAL(last_irr_blocknum <= fork_num, true); + BOOST_CHECK_EQUAL(pre_acc_blk_count, 0); + //BOOST_CHECK_EQUAL(last_pre_acc_num, c.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_hdr_count, 152); + BOOST_CHECK_EQUAL(last_acc_blk_hdr_num, c.control->head_block_num()); + BOOST_CHECK_EQUAL(acc_blk_count, 152); + BOOST_CHECK_EQUAL(last_acc_blk_num, c.control->head_block_num()); + + // push fork from c2 => c + size_t p = fork_num; + size_t count = 0; + while ( p < c2.control->head_block_num()) { + signed_block_ptr fb = c2.control->fetch_block_by_number(++p); + c.push_block(fb); + BOOST_CHECK_EQUAL(pre_acc_blk_count, ++count); + BOOST_CHECK_EQUAL(last_pre_acc_num, fb->block_num()); + } + + BOOST_CHECK_EQUAL(last_irr_blocknum, c.control->last_irreversible_block_num()); + BOOST_REQUIRE_EQUAL(last_irr_blocknum > fork_num, true); +} + +BOOST_AUTO_TEST_SUITE_END() From 06d083e83e3edf3166512c5d44b0e4f60431242b Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 30 Apr 2019 12:38:05 -0400 Subject: [PATCH 0232/1648] Removed debugging code --- pipeline.jsonc | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/pipeline.jsonc b/pipeline.jsonc index cda5a7ae4d8..80edbf13da2 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -1,10 +1,7 @@ { "eosio": { - "pipeline-branch": "master", - "environment": - { - } + "pipeline-branch": "master" }, "eos-multiversion-tests": { @@ -14,14 +11,6 @@ "170=v1.7.0" ] }, - "eos-multiversion-beta-tests": - { - "pipeline-branch": "zach-pipeline-config-pfsn", - "configuration": - [ - "170=v1.7.0" - ] - }, "eosio-sync-tests": { "pipeline-branch": "protocol-features-sync-nodes" From f29e60076f18fac48419f4016f2e388b08152f07 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 30 Apr 2019 12:42:30 -0400 Subject: [PATCH 0233/1648] eos pipeline is not centralized yet --- pipeline.jsonc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pipeline.jsonc b/pipeline.jsonc index 80edbf13da2..ba7e16d68d2 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -1,8 +1,4 @@ { - "eosio": - { - "pipeline-branch": "master" - }, "eos-multiversion-tests": { "pipeline-branch": "protocol-features-sync-nodes", From 5c3a5709211892fc0d02971b78c891566379a64b Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 30 Apr 2019 14:19:20 -0400 Subject: [PATCH 0234/1648] Added eosio-build-unpinned pipeline --- pipeline.jsonc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pipeline.jsonc b/pipeline.jsonc index ba7e16d68d2..cf98935463b 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -1,4 +1,8 @@ { + "eosio-build-unpinned": + { + "pipeline-branch": "protocol-features-sync-nodes" + }, "eos-multiversion-tests": { "pipeline-branch": "protocol-features-sync-nodes", From 2fc682fd89681975f7ce59956659750457ed5668 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 1 May 2019 12:05:03 -0500 Subject: [PATCH 0235/1648] Change default log level from debug to info. --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index b06f25475a3..a6f94bae0b2 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit b06f25475a3bb327bfd94b17ea25c2b1864d89e0 +Subproject commit a6f94bae0b2b4a0b68dc1db2677331cafa4716a5 From 37a86e964e4d5d96d01d46f634f2ad0d7750edf9 Mon Sep 17 00:00:00 2001 From: Ben Rush Date: Wed, 1 May 2019 14:35:56 -0400 Subject: [PATCH 0236/1648] Pass env to build script installs --- scripts/eosio_build_amazon.sh | 4 ++-- scripts/eosio_build_ubuntu.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index c379e3ed96a..d5ffb7f2c83 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -46,7 +46,7 @@ printf "Yum installation found at ${YUM}.\\n" if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi case $ANSWER in 1 | [Yy]* ) - if ! sudo $YUM -y update; then + if ! sudo -E $YUM -y update; then printf " - YUM update failed.\\n" exit 1; else @@ -76,7 +76,7 @@ if [ "${COUNT}" -gt 1 ]; then if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi case $ANSWER in 1 | [Yy]* ) - if ! sudo $YUM -y install ${DEP}; then + if ! sudo -E $YUM -y install ${DEP}; then printf " - YUM dependency installation failed!\\n" exit 1; else diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index a5b1563af8d..3a3d26e8366 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -81,7 +81,7 @@ fi if [ $ANSWER != 1 ]; then read -p "Do you wish to update repositories with apt-get update? (y/n) " ANSWER; fi case $ANSWER in 1 | [Yy]* ) - if ! sudo apt-get update; then + if ! sudo -E apt-get update; then printf " - APT update failed.\\n" exit 1; else @@ -111,7 +111,7 @@ if [ "${COUNT}" -gt 1 ]; then if [ $ANSWER != 1 ]; then read -p "Do you wish to install these packages? (y/n) " ANSWER; fi case $ANSWER in 1 | [Yy]* ) - if ! sudo apt-get -y install ${DEP}; then + if ! sudo -E apt-get -y install ${DEP}; then printf " - APT dependency failed.\\n" exit 1 else From 12bc5232a76d2828c97cb523679f624093fb5482 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 1 May 2019 15:10:01 -0500 Subject: [PATCH 0237/1648] Add configurable logging to http_plugin --- plugins/http_plugin/http_plugin.cpp | 90 +++++++++++-------- .../include/eosio/http_plugin/http_plugin.hpp | 1 + 2 files changed, 55 insertions(+), 36 deletions(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 1fe6bd2fac3..ff9c895c711 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -27,6 +27,13 @@ #include #include +namespace fc { + extern std::unordered_map& get_logger_map(); +} + +const fc::string logger_name("http_plugin"); +fc::logger logger; + namespace eosio { static appbase::abstract_plugin& _http_plugin = app().register_plugin(); @@ -208,9 +215,9 @@ namespace eosio { "!DHE:!RSA:!AES128:!RC4:!DES:!3DES:!DSS:!SRP:!PSK:!EXP:!MD5:!LOW:!aNULL:!eNULL") != 1) EOS_THROW(chain::http_exception, "Failed to set HTTPS cipher list"); } catch (const fc::exception& e) { - elog("https server initialization error: ${w}", ("w", e.to_detail_string())); + fc_elog( logger, "https server initialization error: ${w}", ("w", e.to_detail_string()) ); } catch(std::exception& e) { - elog("https server initialization error: ${w}", ("w", e.what())); + fc_elog( logger, "https server initialization error: ${w}", ("w", e.what()) ); } return ctx; @@ -225,13 +232,13 @@ namespace eosio { throw; } catch (const fc::exception& e) { err += e.to_detail_string(); - elog( "${e}", ("e", err)); + fc_elog( logger, "${e}", ("e", err) ); error_results results{websocketpp::http::status_code::internal_server_error, "Internal Service Error", error_results::error_info(e, verbose_http_errors )}; con->set_body( fc::json::to_string( results )); } catch (const std::exception& e) { err += e.what(); - elog( "${e}", ("e", err)); + fc_elog( logger, "${e}", ("e", err) ); error_results results{websocketpp::http::status_code::internal_server_error, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors )}; con->set_body( fc::json::to_string( results )); @@ -244,7 +251,7 @@ namespace eosio { } } catch (...) { con->set_body( R"xxx({"message": "Internal Server Error"})xxx" ); - std::cerr << "Exception attempting to handle exception: " << err << std::endl; + fc_elog( logger, "Exception attempting to handle exception: ${e}", ("e", err) ); } } @@ -291,7 +298,7 @@ namespace eosio { con->append_header( "Content-type", "application/json" ); if( bytes_in_flight > max_bytes_in_flight ) { - dlog( "503 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight.load()) ); + fc_dlog( logger, "503 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight.load()) ); error_results results{websocketpp::http::status_code::too_many_requests, "Busy", error_results::error_info()}; con->set_body( fc::json::to_string( results )); con->set_status( websocketpp::http::status_code::too_many_requests ); @@ -329,7 +336,7 @@ namespace eosio { } ); } else { - dlog( "404 - not found: ${ep}", ("ep", resource)); + fc_dlog( logger, "404 - not found: ${ep}", ("ep", resource) ); error_results results{websocketpp::http::status_code::not_found, "Not Found", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Endpoint" )), verbose_http_errors )}; con->set_body( fc::json::to_string( results )); @@ -352,11 +359,11 @@ namespace eosio { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ - elog( "http: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "http: ${e}", ("e", e.to_detail_string()) ); } catch ( const std::exception& e ){ - elog( "http: ${e}", ("e",e.what())); + fc_elog( logger, "http: ${e}", ("e", e.what()) ); } catch (...) { - elog("error thrown from http io service"); + fc_elog( logger, "error thrown from http io service" ); } } @@ -409,26 +416,29 @@ namespace eosio { ("access-control-allow-origin", bpo::value()->notifier([this](const string& v) { my->access_control_allow_origin = v; - ilog("configured http with Access-Control-Allow-Origin: ${o}", ("o", my->access_control_allow_origin)); + fc_ilog( logger, "configured http with Access-Control-Allow-Origin: ${o}", + ("o", my->access_control_allow_origin) ); }), "Specify the Access-Control-Allow-Origin to be returned on each request.") ("access-control-allow-headers", bpo::value()->notifier([this](const string& v) { my->access_control_allow_headers = v; - ilog("configured http with Access-Control-Allow-Headers : ${o}", ("o", my->access_control_allow_headers)); + fc_ilog( logger, "configured http with Access-Control-Allow-Headers : ${o}", + ("o", my->access_control_allow_headers) ); }), "Specify the Access-Control-Allow-Headers to be returned on each request.") ("access-control-max-age", bpo::value()->notifier([this](const string& v) { my->access_control_max_age = v; - ilog("configured http with Access-Control-Max-Age : ${o}", ("o", my->access_control_max_age)); + fc_ilog( logger, "configured http with Access-Control-Max-Age : ${o}", + ("o", my->access_control_max_age) ); }), "Specify the Access-Control-Max-Age to be returned on each request.") ("access-control-allow-credentials", bpo::bool_switch()->notifier([this](bool v) { my->access_control_allow_credentials = v; - if (v) ilog("configured http with Access-Control-Allow-Credentials: true"); + if( v ) fc_ilog( logger, "configured http with Access-Control-Allow-Credentials: true" ); })->default_value(false), "Specify if Access-Control-Allow-Credentials: true should be returned on each request.") ("max-body-size", bpo::value()->default_value(1024*1024), @@ -529,23 +539,25 @@ namespace eosio { void http_plugin::plugin_startup() { + handle_sighup(); // setup logging + my->thread_pool.emplace( "http", my->thread_pool_size ); if(my->listen_endpoint) { try { my->create_server_for_endpoint(*my->listen_endpoint, my->server); - ilog("start listening for http requests"); + fc_ilog( logger, "start listening for http requests" ); my->server.listen(*my->listen_endpoint); my->server.start_accept(); } catch ( const fc::exception& e ){ - elog( "http service failed to start: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "http service failed to start: ${e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - elog( "http service failed to start: ${e}", ("e",e.what())); + fc_elog( logger, "http service failed to start: ${e}", ("e", e.what()) ); throw; } catch (...) { - elog("error thrown from http io service"); + fc_elog( logger, "error thrown from http io service" ); throw; } } @@ -561,13 +573,13 @@ namespace eosio { }); my->unix_server.start_accept(); } catch ( const fc::exception& e ){ - elog( "unix socket service failed to start: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "unix socket service failed to start: ${e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - elog( "unix socket service failed to start: ${e}", ("e",e.what())); + fc_elog( logger, "unix socket service failed to start: ${e}", ("e", e.what()) ); throw; } catch (...) { - elog("error thrown from unix socket io service"); + fc_elog( logger, "error thrown from unix socket io service" ); throw; } } @@ -579,17 +591,17 @@ namespace eosio { return my->on_tls_init(hdl); }); - ilog("start listening for https requests"); + fc_ilog( logger, "start listening for https requests" ); my->https_server.listen(*my->https_listen_endpoint); my->https_server.start_accept(); } catch ( const fc::exception& e ){ - elog( "https service failed to start: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "https service failed to start: ${e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - elog( "https service failed to start: ${e}", ("e",e.what())); + fc_elog( logger, "https service failed to start: ${e}", ("e", e.what()) ); throw; } catch (...) { - elog("error thrown from https io service"); + fc_elog( logger, "error thrown from https io service" ); throw; } } @@ -608,6 +620,12 @@ namespace eosio { }}); } + void http_plugin::handle_sighup() { + if( fc::get_logger_map().find( logger_name ) != fc::get_logger_map().end() ) { + logger = fc::get_logger_map()[logger_name]; + } + } + void http_plugin::plugin_shutdown() { if(my->server.is_listening()) my->server.stop_listening(); @@ -622,7 +640,7 @@ namespace eosio { } void http_plugin::add_handler(const string& url, const url_handler& handler) { - ilog( "add api url: ${c}", ("c",url) ); + fc_ilog( logger, "add api url: ${c}", ("c", url) ); my->url_handlers.insert(std::make_pair(url,handler)); } @@ -642,28 +660,28 @@ namespace eosio { } catch (fc::eof_exception& e) { error_results results{422, "Unprocessable Entity", error_results::error_info(e, verbose_http_errors)}; cb( 422, fc::variant( results )); - elog( "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name )); - dlog("Bad arguments: ${args}", ("args", body)); + fc_elog( logger, "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Bad arguments: ${args}", ("args", body) ); } catch (fc::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(e, verbose_http_errors)}; cb( 500, fc::variant( results )); if (e.code() != chain::greylist_net_usage_exceeded::code_value && e.code() != chain::greylist_cpu_usage_exceeded::code_value) { - elog( "FC Exception encountered while processing ${api}.${call}", - ("api", api_name)( "call", call_name )); - dlog( "Exception Details: ${e}", ("e", e.to_detail_string())); + fc_elog( logger, "FC Exception encountered while processing ${api}.${call}", + ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Exception Details: ${e}", ("e", e.to_detail_string()) ); } } catch (std::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors)}; cb( 500, fc::variant( results )); - elog( "STD Exception encountered while processing ${api}.${call}", - ("api", api_name)( "call", call_name )); - dlog( "Exception Details: ${e}", ("e", e.what())); + fc_elog( logger, "STD Exception encountered while processing ${api}.${call}", + ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Exception Details: ${e}", ("e", e.what()) ); } catch (...) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Exception" )), verbose_http_errors)}; cb( 500, fc::variant( results )); - elog( "Unknown Exception encountered while processing ${api}.${call}", - ("api", api_name)( "call", call_name )); + fc_elog( logger, "Unknown Exception encountered while processing ${api}.${call}", + ("api", api_name)( "call", call_name ) ); } } catch (...) { std::cerr << "Exception attempting to handle exception for " << api_name << "." << call_name << std::endl; diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index eaa132ce0e4..5b84a18f3ac 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -79,6 +79,7 @@ namespace eosio { void plugin_initialize(const variables_map& options); void plugin_startup(); void plugin_shutdown(); + void handle_sighup() override; void add_handler(const string& url, const url_handler&); void add_api(const api_description& api) { From 73708ffe3a5905397c16e5ed9dc52735ec1ffff4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 1 May 2019 15:11:04 -0500 Subject: [PATCH 0238/1648] Add http_plugin and transaction_tracing options --- programs/nodeos/logging.json | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/programs/nodeos/logging.json b/programs/nodeos/logging.json index 07771457d72..54947425291 100644 --- a/programs/nodeos/logging.json +++ b/programs/nodeos/logging.json @@ -64,6 +64,15 @@ "stderr", "net" ] + },{ + "name": "http_plugin", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] },{ "name": "producer_plugin", "level": "debug", @@ -73,6 +82,15 @@ "stderr", "net" ] + },{ + "name": "transaction_tracing", + "level": "info", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] } ] } From fff7bf9508a61b15ea503f0960b6cbbe642af6cf Mon Sep 17 00:00:00 2001 From: Adam Mitz Date: Wed, 1 May 2019 15:38:55 -0500 Subject: [PATCH 0239/1648] wabt: reduce redundant memset --- libraries/chain/webassembly/wabt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index a23919e0ec6..23feff65ee5 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -46,8 +46,8 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { Memory* memory = this_run_vars.memory = _env->GetMemory(0); memory->page_limits = _initial_memory_configuration; memory->data.resize(_initial_memory_configuration.initial * WABT_PAGE_SIZE); - memset(memory->data.data(), 0, memory->data.size()); memcpy(memory->data.data(), _initial_memory.data(), _initial_memory.size()); + memset(memory->data.data() + _initial_memory.size(), 0, memory->data.size() - _initial_memory.size()); } _params[0].set_i64(uint64_t(context.get_receiver())); From 77d99d5295b978b49f6014fd98cc9bf9f819c9c2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 1 May 2019 16:11:23 -0500 Subject: [PATCH 0240/1648] Add scope lock instead of explicit unlock --- plugins/net_plugin/net_plugin.cpp | 38 ++++++++++++++++--------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 431f89875db..c0a5d043608 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -902,25 +902,25 @@ namespace eosio { self->consecutive_rejected_blocks = 0; ++self->consecutive_immediate_connection_close; self->last_close = fc::time_point::now(); - std::unique_lock g_conn( self->conn_mtx ); - bool has_last_req = !!self->last_req; - g_conn.unlock(); + bool has_last_req = false; + { + std::lock_guard g_conn( self->conn_mtx ); + has_last_req = !!self->last_req; + self->last_handshake_recv = handshake_message(); + self->last_handshake_sent = handshake_message(); + } if( has_last_req ) { my_impl->dispatcher->retry_fetch( self->shared_from_this() ); } self->peer_requested.reset(); self->sent_handshake_count = 0; - g_conn.lock(); - self->last_handshake_recv = handshake_message(); - self->last_handshake_sent = handshake_message(); - g_conn.unlock(); my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); fc_dlog( logger, "closed, canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx self->cancel_wait(); - - std::unique_lock g( self->read_delay_timer_mtx ); - self->read_delay_timer.cancel(); - g.unlock(); + { + std::lock_guard g( self->read_delay_timer_mtx ); + self->read_delay_timer.cancel(); + } if( reconnect ) { my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); @@ -1583,10 +1583,11 @@ namespace eosio { } g.unlock(); if( req.req_blocks.mode == catch_up ) { - std::unique_lock g_conn( c->conn_mtx ); - c->fork_head = id; - c->fork_head_num = num; - g_conn.unlock(); + { + std::lock_guard g_conn( c->conn_mtx ); + c->fork_head = id; + c->fork_head_num = num; + } std::lock_guard g( sync_mtx ); fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} " "target LIB = ${lib} next_expected = ${ne}", @@ -1621,9 +1622,10 @@ namespace eosio { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } } else if (msg.known_blocks.mode == last_irr_catch_up) { - std::unique_lock g_conn( c->conn_mtx ); - c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; - g_conn.unlock(); + { + std::lock_guard g_conn( c->conn_mtx ); + c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; + } sync_reset_lib_num(c); start_sync(c, msg.known_trx.pending); } From 6755a94ad0cf9f7c4c08d7147ad01ea29ae295cb Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 2 May 2019 18:24:30 +0800 Subject: [PATCH 0241/1648] add test case for replay with/without optimizations --- unittests/signal_tests.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/unittests/signal_tests.cpp b/unittests/signal_tests.cpp index cd078638be1..81bffaffebc 100644 --- a/unittests/signal_tests.cpp +++ b/unittests/signal_tests.cpp @@ -120,6 +120,8 @@ BOOST_AUTO_TEST_CASE(signal_basic_replay) test.produce_block(); + transaction_id_type txn_id_to_verify; + { int irr_count = 0; uint32_t last_irr_blocknum = 0; @@ -176,6 +178,8 @@ BOOST_AUTO_TEST_CASE(signal_basic_replay) }); auto trace = test.create_account(N(abc), N(eosio)); + txn_id_to_verify = last_app_txn_id; + BOOST_CHECK_EQUAL(last_app_txn_id != transaction_id_type(), true); BOOST_CHECK_EQUAL(trace, last_signal_trace); BOOST_CHECK_EQUAL(acc_txn_count, 1); BOOST_CHECK_EQUAL(app_txn_count, 1); @@ -199,10 +203,13 @@ BOOST_AUTO_TEST_CASE(signal_basic_replay) BOOST_CHECK_EQUAL(last_acc_blk_num, test.control->head_block_num()); } - signal_tester replaychain(conf, 2, 1); + // test replay with/without optimizations + for (int i = 0; i < 2; ++i) { + if (i == 1) { + conf.disable_replay_opts = !conf.disable_replay_opts; + } + signal_tester replaychain(conf, 2 + i, 1); - // test replay - { int irr_count = 0; uint32_t last_irr_blocknum = 0; @@ -262,6 +269,7 @@ BOOST_AUTO_TEST_CASE(signal_basic_replay) BOOST_CHECK_EQUAL(acc_txn_count, 3); BOOST_CHECK_EQUAL(app_txn_count, 3); + BOOST_CHECK_EQUAL(last_app_txn_id, txn_id_to_verify); BOOST_CHECK_EQUAL(irr_count, 2); BOOST_CHECK_EQUAL(last_irr_blocknum, replaychain.control->last_irreversible_block_num()); From 806188dc694c9a65052c2b50cbd8f5ee09738cf2 Mon Sep 17 00:00:00 2001 From: Steven Strand Date: Mon, 11 Mar 2019 17:35:27 -0400 Subject: [PATCH 0242/1648] add new functions to eosio-blocklog program #6884 --- .../chain/include/eosio/chain/exceptions.hpp | 4 +- programs/eosio-blocklog/main.cpp | 191 +++++++++++++++++- unittests/misc_tests.cpp | 9 + 3 files changed, 202 insertions(+), 2 deletions(-) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 590c0e879b6..927675a9585 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -531,8 +531,10 @@ namespace eosio { namespace chain { 3190003, "block log can not be found" ) FC_DECLARE_DERIVED_EXCEPTION( block_log_backup_dir_exist, block_log_exception, 3190004, "block log backup dir already exists" ) + FC_DECLARE_DERIVED_EXCEPTION( block_index_not_found, block_log_exception, + 3190005, "block index can not be found" ) - FC_DECLARE_DERIVED_EXCEPTION( http_exception, chain_exception, + FC_DECLARE_DERIVED_EXCEPTION( http_exception, chain_exception, 3200000, "http exception" ) FC_DECLARE_DERIVED_EXCEPTION( invalid_http_client_root_cert, http_exception, 3200001, "invalid http client root certificate" ) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 31db5b25c70..f90cb44218a 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -36,6 +37,8 @@ struct blocklog { uint32_t last_block; bool no_pretty_print; bool as_json_array; + bool make_index; + uint32_t trim_block; }; void blocklog::read_log() { @@ -44,6 +47,7 @@ void blocklog::read_log() { EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" ); + //fix message below, first block might not be 1, first_block_num is not set yet ilog( "existing block log contains block num 1 through block num ${n}", ("n",end->block_num()) ); optional reversible_blocks; @@ -145,9 +149,12 @@ void blocklog::set_program_options(options_description& cli) "Do not pretty print the output. Useful if piping to jq to improve performance.") ("as-json-array", bpo::bool_switch(&as_json_array)->default_value(false), "Print out json blocks wrapped in json array (otherwise the output is free-standing json objects).") + ("make-index", bpo::bool_switch(&make_index)->default_value(false), + "Create blocks.index from blocks.log. Must give 'blocks-dir'. Give 'output-file' relative to blocks-dir (default is blocks.index).") + ("trim-block-log", bpo::bool_switch(&make_index)->default_value(false), + "Trim blocks.log and blocks.index in place. Must give 'blocks-dir' and 'last' (else nothing is done).") ("help", "Print this help message and exit.") ; - } void blocklog::initialize(const variables_map& options) { @@ -169,6 +176,176 @@ void blocklog::initialize(const variables_map& options) { } +//struct used by truncBlockLog() and makeIndex() to read first 18 bytes of a block from blocks.log +struct __attribute__((packed)) BlockStart { //first 18 bytes of each block + block_timestamp_type timestamp; + account_name prodname; + uint16_t confirmed; + uint32_t blknum; //low 32 bits of previous blockid, is big endian block number of previous block +} blkStart; + +int truncBlockLog(bfs::path blockDir, uint32_t n) { //n is last block to keep + using namespace std; + cout << "Will truncate blocks.log and blocks.index after block " << n << '\n'; + + //read blocks.log to see if version 1 or 2 and get first block number + filebuf fin0; + string blockFileName= (blockDir/"blocks.log").generic_string(); + fin0.open(blockFileName.c_str(), ios::in|ios::binary); + EOS_ASSERT( fin0.is_open(), block_log_not_found, "cannot read blocks.log" ); + uint32_t version=0, firstBlock; + fin0.sgetn((char*)&version,sizeof(version)); + EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "unsupported version of block log" ); + if (version == 1) + firstBlock= 1; + else + fin0.sgetn((char*)&firstBlock,sizeof(firstBlock)); + cout << "version= " << version << "\nfirst block= " << firstBlock << '\n'; + if (n <= firstBlock) { + cout << n << " is before first block so nothing to do\n"; + return 2; + } + + //open blocks.index and get blocks.log position for block 'n' + filebuf fin1; + string indexFileName= (blockDir/"blocks.index").generic_string(); + fin1.open(indexFileName.c_str(),ios::in|ios::binary); + EOS_ASSERT( fin1.is_open(), block_index_not_found, "cannot read blocks.index" ); + uint64_t indexPos= 8*(n-firstBlock); + uint64_t pos= fin1.pubseekoff(indexPos,ios::beg,ios::in); + EOS_ASSERT( pos==indexPos, block_log_exception, "cannot read blocks.index entry for trim-after-block" ); + uint64_t fpos0, fpos1; //filepos of block n and block n+1, will read from blocks.index + fin1.sgetn((char*)&fpos0,sizeof(fpos0)); + fin1.sgetn((char*)&fpos1,sizeof(fpos1)); + fin1.close(); + cout << "According to blocks.index:\n"; + cout << " block " << n << " starts at position " << fpos0 << '\n'; + cout << " block " << n+1 << " starts at position " << fpos1 << '\n'; + + //read blocks.log and verify block number n is found at file position fpos0 + fin0.pubseekoff(fpos0,ios::beg,ios::in); + fin0.sgetn((char*)&blkStart,sizeof(blkStart)); + fin0.close(); + uint32_t bnum= endian_reverse_u32(blkStart.blknum)+1; //convert from big endian to little endian, add 1 since prior block + EOS_ASSERT( bnum==n, block_log_exception, "blocks.index does not agree with blocks.log" ); + cout << "In blocks.log at position " << fpos0 << " find block " << bnum << " as expected\n"; + EOS_ASSERT( truncate(blockFileName.c_str(),fpos1)==0, block_log_exception, "truncate blocks.log fails"); + indexPos+= sizeof(uint64_t); //advance to after record for block n + EOS_ASSERT( truncate(indexFileName.c_str(),indexPos)==0, block_log_exception, "truncate blocks.index fails"); + cout << "blocks.log has been truncated to " << fpos1 << " bytes\n"; + cout << "blocks.index has been truncated to " << indexPos << " bytes\n"; + return 0; +} + +int makeIndex(bfs::path blockDir, string outFile) { + //this code makes blocks.index much faster than nodeos (in recent test 80 seconds vs. 90 minutes) + using namespace std; + cout << "Will make blocks.index from blocks.log\n"; + string blockFileName= (blockDir / "blocks.log").generic_string(); + string outFileName= (blockDir / outFile ).generic_string(); + int fin = open(blockFileName.c_str(), O_RDONLY); + EOS_ASSERT( fin>0, block_log_not_found, "cannot read blocks.log" ); + + //will read big chunks of blocks.log into buf, will fill fposList with file positions before write to blocks.index + constexpr uint32_t bufLen{1U<<24}; //bufLen must be power of 2 >= largest possible block == one MByte + char* buf= new char[bufLen+8]; //first 8 bytes of prior buf are put past end of current buf + constexpr uint64_t fposListLen{1U<<22}; //length of fposList[] in bytes + uint64_t* fposList= new uint64_t[fposListLen>>3]; + + //read blocks.log to see if version 1 or 2 and get firstblocknum (implicit 1 if version 1) + uint32_t version=0, firstBlock; + read(fin,(char*)&version,sizeof(version)); + EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "unsupported version of block log" ); + if (version == 1) + firstBlock= 1; + else + read(fin,(char*)&firstBlock,sizeof(firstBlock)); + cout << "block log version= " << version << '\n'; + cout << "first block= " << firstBlock << '\n'; + + uint64_t pos= lseek(fin,0,ios::end); //get blocks.log file length + uint64_t lastBufLen= pos & ((uint64_t)bufLen-1); //bufLen is a power of 2 so -1 creates low bits all 1 + if (!lastBufLen) //will read integral number of bufLen and one time read lastBufLen + lastBufLen= bufLen; + pos= lseek(fin,-(uint64_t)lastBufLen,ios::end); + uint64_t didread= read(fin,buf,lastBufLen); //read tail of file into buf + EOS_ASSERT( didread==lastBufLen, block_log_exception, "blocks.log read fails" ); + + uint32_t indexStart; //buf index for block start + uint32_t indexEnd; //buf index for block end == buf index for block start file position + uint64_t filePos; //file pos of block start + BlockStart* bst; //pointer to BlockStart + + indexStart= lastBufLen; //pretend a block starts just past end of buf then read prior block + indexEnd= indexStart-8; //index in buf where block ends and block file position starts + filePos= *(uint64_t*)(buf+indexEnd); //file pos of block start + indexStart= filePos - pos; //buf index for block start + bst= (BlockStart*)(buf+indexStart); //pointer to BlockStart + uint32_t lastBlock= endian_reverse_u32(bst->blknum); //convert from big endian to little endian + uint32_t bnum= ++lastBlock; //add 1 since took block number from prior block id + cout << "last block= " << lastBlock << '\n'; + cout << '\n'; + cout << "block " << setw(10) << bnum << " filePos " << setw(14) << filePos << '\n'; //first progress indicator + + //we use low level file IO because it is distinctly faster than C++ filebuf or iostream + mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; //if create file permissions will be 644 + int fout = open(outFileName.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); //create if no exists, truncate if does exist + EOS_ASSERT( fout>0, block_index_not_found, "cannot write blocks.index" ); + + uint64_t indFileLen= bnum<<3; //index file holds 8 bytes per block in blocks.log + uint64_t lastIndBufLen= indFileLen & (fposListLen-1); //fposListLen is a power of 2 so -1 creates low bits all 1 + if (!lastIndBufLen) //will write integral number of bufLen and lastIndBufLen one time to index file + lastIndBufLen= bufLen; + uint64_t indPos= lseek(fout,indFileLen-lastIndBufLen,ios::beg); + uint64_t blkBase= (indPos>>3) + firstBlock; //first entry in fposList is for block blkBase + //cout << "indPos= " << indPos << " blkBase= " << blkBase << '\n'; + fposList[bnum-blkBase]= filePos; //write filepos for block bnum + + for (;;) { + if (bnum==blkBase) { //if fposList is full + write(fout,(char*)fposList,lastIndBufLen); //write fposList to index file + if (indPos==0) { //if done writing index file + cout << "block " << setw(10) << bnum << " filePos " << setw(14) << filePos << '\n'; //last progress indicator + EOS_ASSERT( bnum == firstBlock, block_log_exception, "blocks.log does not contain consecutive block numbers" ); + break; + } + indPos-= fposListLen; + blkBase-= fposListLen>>3; + didread= lseek(fout,indPos,ios::beg); + EOS_ASSERT( didread==indPos, block_log_exception, "blocks.log seek fails" ); + lastIndBufLen= fposListLen; //from now on all writes to index file write a full fposList[] + } + if (indexStart < 8) { //if block start is split across buf boundary + memcpy(buf+bufLen,buf,8); //copy portion at start of buf to past end of buf + pos-= bufLen; //file position of buf + lseek(fin,pos,ios::beg); + didread= read(fin,buf,bufLen); //read next buf + EOS_ASSERT( didread==bufLen, block_log_exception, "blocks.log read fails" ); + indexStart+= bufLen; + } + indexEnd= indexStart-8; //index in buf where block ends and block file position starts + filePos= *(uint64_t*)(buf+indexEnd); //file pos of block start + if (filePos < pos) { //if block start is in prior buf + pos-= bufLen; //file position of buf + lseek(fin,pos,ios::beg); + didread= read(fin,buf,bufLen); //read next buf + EOS_ASSERT( didread==bufLen, block_log_exception, "blocks.log read fails" ); + indexEnd+= bufLen; + } + indexStart= filePos - pos; //buf index for block start + --bnum; + fposList[bnum-blkBase]= filePos; //write filepos for block bnum + if ((bnum & 0xfffff) == 0) //periodically print a progress indicator + cout << "block " << setw(10) << bnum << " filePos " << setw(14) << filePos << '\n'; + } + + close(fout); + close(fin); + delete[] buf; + delete[] fposList; + cout << "\nwrote " << lastBlock << " file positions to " << outFileName << '\n'; + return 0; +} int main(int argc, char** argv) { @@ -184,6 +361,18 @@ int main(int argc, char** argv) cli.print(std::cerr); return 0; } + if (vmap.at("trim-block-log").as()) { + uint32_t last= vmap.at("last").as(); + if (last == std::numeric_limits::max()) { //if 'last' was not given on command line + std::cout << "'trim-block-log' does nothing unless specify 'last' block."; + return -1; + } + return truncBlockLog(vmap.at("blocks-dir").as(), last); + } + if (vmap.at("make-index").as()) { + string outFile{vmap.count("output-file")==0? string("blocks.index"): vmap.at("output-file").as().generic_string()}; + return makeIndex(vmap.at("blocks-dir").as(), outFile); + } blog.initialize(vmap); blog.read_log(); } catch( const fc::exception& e ) { diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 611d9f1f40e..47e101b40f7 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -119,6 +120,14 @@ static constexpr uint64_t name_suffix( uint64_t n ) { BOOST_AUTO_TEST_SUITE(misc_tests) +BOOST_AUTO_TEST_CASE(reverse_endian_tests) +{ + BOOST_CHECK_EQUAL( endian_reverse_u64(0x0123456789abcdef), 0xefcdab8967452301 ); + BOOST_CHECK_EQUAL( endian_reverse_u64(0x0102030405060708), 0x0807060504030201 ); + BOOST_CHECK_EQUAL( endian_reverse_u32(0x01234567), 0x67452301 ); + BOOST_CHECK_EQUAL( endian_reverse_u32(0x01020304), 0x04030201 ); +} + BOOST_AUTO_TEST_CASE(name_suffix_tests) { BOOST_CHECK_EQUAL( name{name_suffix(0)}, name{0} ); From 44b524a32168e7712e23a908f00de946feaad71a Mon Sep 17 00:00:00 2001 From: Steven Strand Date: Tue, 12 Mar 2019 18:36:35 -0400 Subject: [PATCH 0243/1648] add suggested changes #6884 --- .../chain/include/eosio/chain/exceptions.hpp | 2 +- programs/eosio-blocklog/main.cpp | 275 ++++++++++-------- 2 files changed, 148 insertions(+), 129 deletions(-) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 927675a9585..fa92cf984d8 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -534,7 +534,7 @@ namespace eosio { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( block_index_not_found, block_log_exception, 3190005, "block index can not be found" ) - FC_DECLARE_DERIVED_EXCEPTION( http_exception, chain_exception, + FC_DECLARE_DERIVED_EXCEPTION( http_exception, chain_exception, 3200000, "http exception" ) FC_DECLARE_DERIVED_EXCEPTION( invalid_http_client_root_cert, http_exception, 3200001, "invalid http client root certificate" ) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index f90cb44218a..642417a4ddb 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -2,6 +2,7 @@ * @file * @copyright defined in eosio/LICENSE.txt */ +#include #include #include #include @@ -140,10 +141,10 @@ void blocklog::set_program_options(options_description& cli) ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to the current directory)") ("output-file,o", bpo::value(), - "the file to write the block log output to (absolute or relative path). If not specified then output is to stdout.") - ("first", bpo::value(&first_block)->default_value(1), + "the file to write the output to (absolute or relative path). If not specified then output is to stdout.") + ("first,f", bpo::value(&first_block)->default_value(1), "the first block number to log") - ("last", bpo::value(&last_block)->default_value(std::numeric_limits::max()), + ("last,l", bpo::value(&last_block)->default_value(std::numeric_limits::max()), "the last block number (inclusive) to log") ("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false), "Do not pretty print the output. Useful if piping to jq to improve performance.") @@ -151,9 +152,9 @@ void blocklog::set_program_options(options_description& cli) "Print out json blocks wrapped in json array (otherwise the output is free-standing json objects).") ("make-index", bpo::bool_switch(&make_index)->default_value(false), "Create blocks.index from blocks.log. Must give 'blocks-dir'. Give 'output-file' relative to blocks-dir (default is blocks.index).") - ("trim-block-log", bpo::bool_switch(&make_index)->default_value(false), + ("trim-blocklog", bpo::bool_switch(&make_index)->default_value(false), "Trim blocks.log and blocks.index in place. Must give 'blocks-dir' and 'last' (else nothing is done).") - ("help", "Print this help message and exit.") + ("help,h", "Print this help message and exit.") ; } @@ -176,174 +177,192 @@ void blocklog::initialize(const variables_map& options) { } -//struct used by truncBlockLog() and makeIndex() to read first 18 bytes of a block from blocks.log -struct __attribute__((packed)) BlockStart { //first 18 bytes of each block +//struct used by trunc_blocklog() and make_index() to read first 18 bytes of a block from blocks.log +struct __attribute__((packed)) Block_Start { //first 18 bytes of each block (must match block_header) block_timestamp_type timestamp; account_name prodname; uint16_t confirmed; uint32_t blknum; //low 32 bits of previous blockid, is big endian block number of previous block -} blkStart; +} blk_start; +static_assert( sizeof(Block_Start) == 18, "Update block_start if block_header changes"); -int truncBlockLog(bfs::path blockDir, uint32_t n) { //n is last block to keep +int trunc_blocklog(bfs::path block_dir, uint32_t n) { //n is last block to keep using namespace std; - cout << "Will truncate blocks.log and blocks.index after block " << n << '\n'; - //read blocks.log to see if version 1 or 2 and get first block number - filebuf fin0; - string blockFileName= (blockDir/"blocks.log").generic_string(); - fin0.open(blockFileName.c_str(), ios::in|ios::binary); - EOS_ASSERT( fin0.is_open(), block_log_not_found, "cannot read blocks.log" ); - uint32_t version=0, firstBlock; - fin0.sgetn((char*)&version,sizeof(version)); - EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "unsupported version of block log" ); + filebuf fin_blocks; + cout << "In directory " << block_dir << " will truncate blocks.log and blocks.index after block " << n << '\n'; + string block_file_name= (block_dir/"blocks.log").generic_string(); + fin_blocks.open(block_file_name, ios::in|ios::binary); + EOS_ASSERT( fin_blocks.is_open(), block_log_not_found, "cannot read ${file}", ("file",block_file_name) ); + uint32_t version=0, first_block; + fin_blocks.sgetn((char*)&version,sizeof(version)); + cout << "block log version= " << version << '\n'; + EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); if (version == 1) - firstBlock= 1; + first_block= 1; else - fin0.sgetn((char*)&firstBlock,sizeof(firstBlock)); - cout << "version= " << version << "\nfirst block= " << firstBlock << '\n'; - if (n <= firstBlock) { - cout << n << " is before first block so nothing to do\n"; + fin_blocks.sgetn((char*)&first_block,sizeof(first_block)); + cout << "first block= " << first_block << '\n'; + if (n <= first_block) { + cerr << n << " is before first block so nothing to do\n"; return 2; } - //open blocks.index and get blocks.log position for block 'n' - filebuf fin1; - string indexFileName= (blockDir/"blocks.index").generic_string(); - fin1.open(indexFileName.c_str(),ios::in|ios::binary); - EOS_ASSERT( fin1.is_open(), block_index_not_found, "cannot read blocks.index" ); - uint64_t indexPos= 8*(n-firstBlock); - uint64_t pos= fin1.pubseekoff(indexPos,ios::beg,ios::in); - EOS_ASSERT( pos==indexPos, block_log_exception, "cannot read blocks.index entry for trim-after-block" ); + //open blocks.index, get last_block and blocks.log position for block 'n' + filebuf fin_index; + string index_file_name= (block_dir/"blocks.index").generic_string(); + fin_index.open(index_file_name,ios::in|ios::binary); + EOS_ASSERT( fin_index.is_open(), block_index_not_found, "cannot read ${file}", ("file",index_file_name) ); + uint64_t file_end= fin_index.pubseekoff(0,ios::end,ios::in); + uint32_t last_block= file_end/sizeof(uint64_t); + cout << "last block= " << last_block << '\n'; + if (n >= last_block) { + cerr << n << " is not before last block so nothing to do\n"; + return 2; + } + uint64_t index_pos= sizeof(uint64_t)*(n-first_block); + uint64_t pos= fin_index.pubseekoff(index_pos,ios::beg,ios::in); + EOS_ASSERT( pos==index_pos, block_log_exception, "cannot read blocks.index entry for block ${b}", ("b",n) ); uint64_t fpos0, fpos1; //filepos of block n and block n+1, will read from blocks.index - fin1.sgetn((char*)&fpos0,sizeof(fpos0)); - fin1.sgetn((char*)&fpos1,sizeof(fpos1)); - fin1.close(); + fin_index.sgetn((char*)&fpos0,sizeof(fpos0)); + fin_index.sgetn((char*)&fpos1,sizeof(fpos1)); + fin_index.close(); cout << "According to blocks.index:\n"; cout << " block " << n << " starts at position " << fpos0 << '\n'; cout << " block " << n+1 << " starts at position " << fpos1 << '\n'; //read blocks.log and verify block number n is found at file position fpos0 - fin0.pubseekoff(fpos0,ios::beg,ios::in); - fin0.sgetn((char*)&blkStart,sizeof(blkStart)); - fin0.close(); - uint32_t bnum= endian_reverse_u32(blkStart.blknum)+1; //convert from big endian to little endian, add 1 since prior block + fin_blocks.pubseekoff(fpos0,ios::beg,ios::in); + fin_blocks.sgetn((char*)&blk_start,sizeof(blk_start)); //read first 18 bytes of block + fin_blocks.close(); + uint32_t bnum= endian_reverse_u32(blk_start.blknum)+1; //convert from big endian to little endian, add 1 since prior block + cout << "At position " << fpos0 << " in blocks.log find block " << bnum << (bnum==n? " as expected\n": " - not good!\n"); EOS_ASSERT( bnum==n, block_log_exception, "blocks.index does not agree with blocks.log" ); - cout << "In blocks.log at position " << fpos0 << " find block " << bnum << " as expected\n"; - EOS_ASSERT( truncate(blockFileName.c_str(),fpos1)==0, block_log_exception, "truncate blocks.log fails"); - indexPos+= sizeof(uint64_t); //advance to after record for block n - EOS_ASSERT( truncate(indexFileName.c_str(),indexPos)==0, block_log_exception, "truncate blocks.index fails"); + EOS_ASSERT( truncate(block_file_name.c_str(),fpos1)==0, block_log_exception, "truncate blocks.log fails"); + index_pos+= sizeof(uint64_t); //advance to after record for block n + EOS_ASSERT( truncate(index_file_name.c_str(),index_pos)==0, block_log_exception, "truncate blocks.index fails"); cout << "blocks.log has been truncated to " << fpos1 << " bytes\n"; - cout << "blocks.index has been truncated to " << indexPos << " bytes\n"; + cout << "blocks.index has been truncated to " << index_pos << " bytes\n"; return 0; } -int makeIndex(bfs::path blockDir, string outFile) { +int make_index(bfs::path block_dir, string out_file) { //this code makes blocks.index much faster than nodeos (in recent test 80 seconds vs. 90 minutes) using namespace std; - cout << "Will make blocks.index from blocks.log\n"; - string blockFileName= (blockDir / "blocks.log").generic_string(); - string outFileName= (blockDir / outFile ).generic_string(); - int fin = open(blockFileName.c_str(), O_RDONLY); - EOS_ASSERT( fin>0, block_log_not_found, "cannot read blocks.log" ); - - //will read big chunks of blocks.log into buf, will fill fposList with file positions before write to blocks.index + string block_file_name= (block_dir / "blocks.log").generic_string(); + string out_file_name= (block_dir / out_file ).generic_string(); + cout << "Will read blocks.log file " << block_file_name << '\n'; + cout << "Will write blocks.index file " << out_file_name << '\n'; + int fin = open(block_file_name.c_str(), O_RDONLY); + EOS_ASSERT( fin>0, block_log_not_found, "cannot read block file ${file}", ("file",block_file_name) ); + + //will read big chunks of blocks.log into buf, will fill fpos_list with file positions before write to blocks.index constexpr uint32_t bufLen{1U<<24}; //bufLen must be power of 2 >= largest possible block == one MByte - char* buf= new char[bufLen+8]; //first 8 bytes of prior buf are put past end of current buf - constexpr uint64_t fposListLen{1U<<22}; //length of fposList[] in bytes - uint64_t* fposList= new uint64_t[fposListLen>>3]; - - //read blocks.log to see if version 1 or 2 and get firstblocknum (implicit 1 if version 1) - uint32_t version=0, firstBlock; + auto buffer= make_unique(bufLen+8); //first 8 bytes of prior buf are put past end of current buf + char* buf= buffer.get(); + constexpr uint64_t fpos_list_len{1U<<22}; //length of fpos_list[] in bytes + auto fpos_buffer= make_unique(fpos_list_len>>3); + uint64_t* fpos_list= fpos_buffer.get(); + + //read blocks.log to see if version 1 or 2 and get first_blocknum (implicit 1 if version 1) + uint32_t version=0, first_block; read(fin,(char*)&version,sizeof(version)); - EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "unsupported version of block log" ); + cout << "block log version= " << version << '\n'; + EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); if (version == 1) - firstBlock= 1; + first_block= 1; else - read(fin,(char*)&firstBlock,sizeof(firstBlock)); - cout << "block log version= " << version << '\n'; - cout << "first block= " << firstBlock << '\n'; - - uint64_t pos= lseek(fin,0,ios::end); //get blocks.log file length - uint64_t lastBufLen= pos & ((uint64_t)bufLen-1); //bufLen is a power of 2 so -1 creates low bits all 1 - if (!lastBufLen) //will read integral number of bufLen and one time read lastBufLen - lastBufLen= bufLen; - pos= lseek(fin,-(uint64_t)lastBufLen,ios::end); - uint64_t didread= read(fin,buf,lastBufLen); //read tail of file into buf - EOS_ASSERT( didread==lastBufLen, block_log_exception, "blocks.log read fails" ); - - uint32_t indexStart; //buf index for block start - uint32_t indexEnd; //buf index for block end == buf index for block start file position - uint64_t filePos; //file pos of block start - BlockStart* bst; //pointer to BlockStart - - indexStart= lastBufLen; //pretend a block starts just past end of buf then read prior block - indexEnd= indexStart-8; //index in buf where block ends and block file position starts - filePos= *(uint64_t*)(buf+indexEnd); //file pos of block start - indexStart= filePos - pos; //buf index for block start - bst= (BlockStart*)(buf+indexStart); //pointer to BlockStart - uint32_t lastBlock= endian_reverse_u32(bst->blknum); //convert from big endian to little endian - uint32_t bnum= ++lastBlock; //add 1 since took block number from prior block id - cout << "last block= " << lastBlock << '\n'; + read(fin,(char*)&first_block,sizeof(first_block)); + cout << "first block= " << first_block << '\n'; + + uint64_t pos= lseek(fin,0,SEEK_END); //get blocks.log file length + uint64_t last_buf_len= pos & ((uint64_t)bufLen-1); //bufLen is a power of 2 so -1 creates low bits all 1 + if (!last_buf_len) //will read integral number of bufLen and one time read last_buf_len + last_buf_len= bufLen; + pos= lseek(fin,-(uint64_t)last_buf_len,SEEK_END); + uint64_t did_read= read(fin,buf,last_buf_len); //read tail of file into buf + EOS_ASSERT( did_read==last_buf_len, block_log_exception, "blocks.log read fails" ); + + uint32_t index_start; //buf index for block start + uint32_t index_end; //buf index for block end == buf index for block start file position + uint64_t file_pos; //file pos of block start + uint64_t last_file_pos; //used to check that file_pos is strictly decreasing + Block_Start* bst; //pointer to Block_Start + + index_start= last_buf_len; //pretend a block starts just past end of buf then read prior block + index_end= index_start-8; //index in buf where block ends and block file position starts + last_file_pos= file_pos= *(uint64_t*)(buf+index_end); //file pos of block start + index_start= file_pos - pos; //buf index for block start + bst= (Block_Start*)(buf+index_start); //pointer to Block_Start + uint32_t last_block= endian_reverse_u32(bst->blknum); //convert from big endian to little endian + uint32_t bnum= ++last_block; //add 1 since took block number from prior block id + cout << "last block= " << last_block << '\n'; cout << '\n'; - cout << "block " << setw(10) << bnum << " filePos " << setw(14) << filePos << '\n'; //first progress indicator + cout << "block " << setw(10) << bnum << " file_pos " << setw(14) << file_pos << '\n'; //first progress indicator //we use low level file IO because it is distinctly faster than C++ filebuf or iostream mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; //if create file permissions will be 644 - int fout = open(outFileName.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); //create if no exists, truncate if does exist + int fout = open(out_file_name.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); //create if no exists, truncate if does exist EOS_ASSERT( fout>0, block_index_not_found, "cannot write blocks.index" ); - uint64_t indFileLen= bnum<<3; //index file holds 8 bytes per block in blocks.log - uint64_t lastIndBufLen= indFileLen & (fposListLen-1); //fposListLen is a power of 2 so -1 creates low bits all 1 - if (!lastIndBufLen) //will write integral number of bufLen and lastIndBufLen one time to index file - lastIndBufLen= bufLen; - uint64_t indPos= lseek(fout,indFileLen-lastIndBufLen,ios::beg); - uint64_t blkBase= (indPos>>3) + firstBlock; //first entry in fposList is for block blkBase - //cout << "indPos= " << indPos << " blkBase= " << blkBase << '\n'; - fposList[bnum-blkBase]= filePos; //write filepos for block bnum + uint64_t ind_file_len= bnum<<3; //index file holds 8 bytes per block in blocks.log + uint64_t last_ind_buf_len= ind_file_len & (fpos_list_len-1); //fpos_list_len is a power of 2 so -1 creates low bits all 1 + if (!last_ind_buf_len) //will write integral number of bufLen and last_ind_buf_len one time to index file + last_ind_buf_len= bufLen; + uint64_t ind_pos= lseek(fout,ind_file_len-last_ind_buf_len,SEEK_SET); + uint64_t blk_base= (ind_pos>>3) + first_block; //first entry in fpos_list is for block blk_base + //cout << "ind_pos= " << ind_pos << " blk_base= " << blk_base << '\n'; + fpos_list[bnum-blk_base]= file_pos; //write filepos for block bnum for (;;) { - if (bnum==blkBase) { //if fposList is full - write(fout,(char*)fposList,lastIndBufLen); //write fposList to index file - if (indPos==0) { //if done writing index file - cout << "block " << setw(10) << bnum << " filePos " << setw(14) << filePos << '\n'; //last progress indicator - EOS_ASSERT( bnum == firstBlock, block_log_exception, "blocks.log does not contain consecutive block numbers" ); + if (bnum==blk_base) { //if fpos_list is full + write(fout,(char*)fpos_list,last_ind_buf_len); //write fpos_list to index file + if (ind_pos==0) { //if done writing index file + cout << "block " << setw(10) << bnum << " file_pos " << setw(14) << file_pos << '\n'; //last progress indicator + EOS_ASSERT( bnum == first_block, block_log_exception, "blocks.log does not contain consecutive block numbers" ); break; } - indPos-= fposListLen; - blkBase-= fposListLen>>3; - didread= lseek(fout,indPos,ios::beg); - EOS_ASSERT( didread==indPos, block_log_exception, "blocks.log seek fails" ); - lastIndBufLen= fposListLen; //from now on all writes to index file write a full fposList[] + ind_pos-= fpos_list_len; + blk_base-= fpos_list_len>>3; + did_read= lseek(fout,ind_pos,SEEK_SET); + EOS_ASSERT( did_read==ind_pos, block_log_exception, "blocks.log seek fails" ); + last_ind_buf_len= fpos_list_len; //from now on all writes to index file write a full fpos_list[] } - if (indexStart < 8) { //if block start is split across buf boundary + if (index_start < 8) { //if block start is split across buf boundary memcpy(buf+bufLen,buf,8); //copy portion at start of buf to past end of buf pos-= bufLen; //file position of buf - lseek(fin,pos,ios::beg); - didread= read(fin,buf,bufLen); //read next buf - EOS_ASSERT( didread==bufLen, block_log_exception, "blocks.log read fails" ); - indexStart+= bufLen; + lseek(fin,pos,SEEK_SET); + did_read= read(fin,buf,bufLen); //read next buf + EOS_ASSERT( did_read==bufLen, block_log_exception, "blocks.log read fails" ); + index_start+= bufLen; + } + --bnum; //now move index_start and index_end to prior block + index_end= index_start-8; //index in buf where block ends and block file position starts + file_pos= *(uint64_t*)(buf+index_end); //file pos of block start + if (file_pos >= last_file_pos) { //file_pos will decrease if linked list is not corrupt + cout << '\n'; + cout << "file pos for block " << bnum+1 << " is " << last_file_pos << '\n'; + cout << "file pos for block " << bnum << " is " << file_pos << '\n'; + cout << "The linked list of blocks in blocks.log should run from last block to first block in reverse order\n"; + EOS_ASSERT( file_pos()) { + if (vmap.at("trim-blocklog").as()) { uint32_t last= vmap.at("last").as(); if (last == std::numeric_limits::max()) { //if 'last' was not given on command line - std::cout << "'trim-block-log' does nothing unless specify 'last' block."; + std::cout << "'trim-block-blocklog' does nothing unless specify 'last' block."; return -1; } - return truncBlockLog(vmap.at("blocks-dir").as(), last); + return trunc_blocklog(vmap.at("blocks-dir").as(), last); } if (vmap.at("make-index").as()) { - string outFile{vmap.count("output-file")==0? string("blocks.index"): vmap.at("output-file").as().generic_string()}; - return makeIndex(vmap.at("blocks-dir").as(), outFile); + string out_file{vmap.count("output-file")==0? string("blocks.index"): vmap.at("output-file").as().generic_string()}; + return make_index(vmap.at("blocks-dir").as(), out_file); } blog.initialize(vmap); blog.read_log(); From 3ed180cac5e948b5dd127b3a260809e658634bc9 Mon Sep 17 00:00:00 2001 From: Steven Strand Date: Tue, 12 Mar 2019 19:07:02 -0400 Subject: [PATCH 0244/1648] changed packed attribute --- programs/eosio-blocklog/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 642417a4ddb..060a75c6e01 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -178,7 +178,7 @@ void blocklog::initialize(const variables_map& options) { } //struct used by trunc_blocklog() and make_index() to read first 18 bytes of a block from blocks.log -struct __attribute__((packed)) Block_Start { //first 18 bytes of each block (must match block_header) +struct __attribute__((__packed__)) Block_Start { //first 18 bytes of each block (must match block_header) block_timestamp_type timestamp; account_name prodname; uint16_t confirmed; From 98f156a39eb8a5d3bbcf09e6d023e47ed4ad4ddc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 29 Mar 2019 00:03:42 -0500 Subject: [PATCH 0245/1648] Remaining changes from Steve's branch. GH #6884 --- programs/eosio-blocklog/main.cpp | 437 +++++++++++++++++++++++-------- 1 file changed, 332 insertions(+), 105 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 060a75c6e01..5be91a32651 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -39,7 +39,9 @@ struct blocklog { bool no_pretty_print; bool as_json_array; bool make_index; - uint32_t trim_block; + bool trim_log; + bool smoke_test; + bool help; }; void blocklog::read_log() { @@ -142,19 +144,21 @@ void blocklog::set_program_options(options_description& cli) "the location of the blocks directory (absolute path or relative to the current directory)") ("output-file,o", bpo::value(), "the file to write the output to (absolute or relative path). If not specified then output is to stdout.") - ("first,f", bpo::value(&first_block)->default_value(1), - "the first block number to log") + ("first,f", bpo::value(&first_block)->default_value(0), + "the first block number to log or the first to keep if trim-blocklog") ("last,l", bpo::value(&last_block)->default_value(std::numeric_limits::max()), - "the last block number (inclusive) to log") + "the last block number to log or the last to keep if trim-blocklog") ("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false), "Do not pretty print the output. Useful if piping to jq to improve performance.") ("as-json-array", bpo::bool_switch(&as_json_array)->default_value(false), "Print out json blocks wrapped in json array (otherwise the output is free-standing json objects).") ("make-index", bpo::bool_switch(&make_index)->default_value(false), "Create blocks.index from blocks.log. Must give 'blocks-dir'. Give 'output-file' relative to blocks-dir (default is blocks.index).") - ("trim-blocklog", bpo::bool_switch(&make_index)->default_value(false), - "Trim blocks.log and blocks.index in place. Must give 'blocks-dir' and 'last' (else nothing is done).") - ("help,h", "Print this help message and exit.") + ("trim-blocklog", bpo::bool_switch(&trim_log)->default_value(false), + "Trim blocks.log and blocks.index. Must give 'blocks-dir' and 'first and/or 'last'.") + ("smoke-test", bpo::bool_switch(&smoke_test)->default_value(false), + "Quick test that blocks.log and blocks.index are well formed and agree with each other.") + ("help,h", bpo::bool_switch(&help)->default_value(false), "Print this help message and exit.") ; } @@ -177,95 +181,280 @@ void blocklog::initialize(const variables_map& options) { } -//struct used by trunc_blocklog() and make_index() to read first 18 bytes of a block from blocks.log -struct __attribute__((__packed__)) Block_Start { //first 18 bytes of each block (must match block_header) - block_timestamp_type timestamp; - account_name prodname; - uint16_t confirmed; - uint32_t blknum; //low 32 bits of previous blockid, is big endian block number of previous block -} blk_start; -static_assert( sizeof(Block_Start) == 18, "Update block_start if block_header changes"); +constexpr int blknum_offset{14}; //offset from start of block to 4 byte block number +//this offset is valid for version 1 and 2 blocklogs, version is checked before using blknum_offset -int trunc_blocklog(bfs::path block_dir, uint32_t n) { //n is last block to keep +//to derive blknum_offset==14 see block_header.hpp and note on disk struct is packed +// block_timestamp_type timestamp; //bytes 0:3 +// account_name producer; //bytes 4:11 +// uint16_t confirmed; //bytes 12:13 +// block_id_type previous; //bytes 14:45, low 4 bytes is big endian block number of previous block + +struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end(), and smoke_test() + trim_data(bfs::path block_dir); + ~trim_data() { + close(blk_in); + close(ind_in); + } + void find_block_pos(uint32_t n); + std::string block_file_name, index_file_name; //full pathname for blocks.log and blocks.index + uint32_t version; //blocklog version (1 or 2) + uint32_t first_block, last_block; //first and last block in blocks.log + int blk_in, ind_in; //C style file descriptors for reading blocks.log and blocks.index + //we use low level file IO because it is distinctly faster than C++ filebuf or iostream + uint64_t index_pos; //filepos in blocks.index for block n, +8 for block n+1 + uint64_t fpos0, fpos1; //filepos in blocks.log for block n and block n+1 +}; + + +trim_data::trim_data(bfs::path block_dir) { using namespace std; - //read blocks.log to see if version 1 or 2 and get first block number - filebuf fin_blocks; - cout << "In directory " << block_dir << " will truncate blocks.log and blocks.index after block " << n << '\n'; - string block_file_name= (block_dir/"blocks.log").generic_string(); - fin_blocks.open(block_file_name, ios::in|ios::binary); - EOS_ASSERT( fin_blocks.is_open(), block_log_not_found, "cannot read ${file}", ("file",block_file_name) ); - uint32_t version=0, first_block; - fin_blocks.sgetn((char*)&version,sizeof(version)); + block_file_name= (block_dir/"blocks.log").generic_string(); + index_file_name= (block_dir/"blocks.index").generic_string(); + blk_in = open(block_file_name.c_str(), O_RDONLY); + EOS_ASSERT( blk_in>0, block_log_not_found, "cannot read file ${file}", ("file",block_file_name) ); + ind_in = open(index_file_name.c_str(), O_RDONLY); + EOS_ASSERT( ind_in>0, block_log_not_found, "cannot read file ${file}", ("file",index_file_name) ); + read(blk_in,(char*)&version,sizeof(version)); cout << "block log version= " << version << '\n'; EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); if (version == 1) first_block= 1; else - fin_blocks.sgetn((char*)&first_block,sizeof(first_block)); + read(blk_in,(char*)&first_block,sizeof(first_block)); cout << "first block= " << first_block << '\n'; - if (n <= first_block) { - cerr << n << " is before first block so nothing to do\n"; - return 2; - } - - //open blocks.index, get last_block and blocks.log position for block 'n' - filebuf fin_index; - string index_file_name= (block_dir/"blocks.index").generic_string(); - fin_index.open(index_file_name,ios::in|ios::binary); - EOS_ASSERT( fin_index.is_open(), block_index_not_found, "cannot read ${file}", ("file",index_file_name) ); - uint64_t file_end= fin_index.pubseekoff(0,ios::end,ios::in); - uint32_t last_block= file_end/sizeof(uint64_t); + uint64_t file_end= lseek(ind_in,0,SEEK_END); //get length of blocks.index (gives number of blocks) + last_block= first_block + file_end/sizeof(uint64_t) - 1; cout << "last block= " << last_block << '\n'; - if (n >= last_block) { - cerr << n << " is not before last block so nothing to do\n"; - return 2; - } - uint64_t index_pos= sizeof(uint64_t)*(n-first_block); - uint64_t pos= fin_index.pubseekoff(index_pos,ios::beg,ios::in); - EOS_ASSERT( pos==index_pos, block_log_exception, "cannot read blocks.index entry for block ${b}", ("b",n) ); - uint64_t fpos0, fpos1; //filepos of block n and block n+1, will read from blocks.index - fin_index.sgetn((char*)&fpos0,sizeof(fpos0)); - fin_index.sgetn((char*)&fpos1,sizeof(fpos1)); - fin_index.close(); +} + +void trim_data::find_block_pos(uint32_t n) { + //get file position of block n from blocks.index then confirm block n is found in blocks.log at that position + //sets fpos0 and fpos1, throws exception if block at fpos0 is not block n + using namespace std; + index_pos= sizeof(uint64_t)*(n-first_block); + uint64_t pos= lseek(ind_in,index_pos,SEEK_SET); + EOS_ASSERT( pos==index_pos, block_log_exception, "cannot seek to blocks.index entry for block ${b}", ("b",n) ); + read(ind_in,(char*)&fpos0,sizeof(fpos0)); //filepos of block n + read(ind_in,(char*)&fpos1,sizeof(fpos1)); //filepos of block n+1 cout << "According to blocks.index:\n"; cout << " block " << n << " starts at position " << fpos0 << '\n'; - cout << " block " << n+1 << " starts at position " << fpos1 << '\n'; - + cout << " block " << n+1; + if (n!=last_block) + cout << " starts at position " << fpos1 << '\n'; + else + cout << " is past end\n"; //read blocks.log and verify block number n is found at file position fpos0 - fin_blocks.pubseekoff(fpos0,ios::beg,ios::in); - fin_blocks.sgetn((char*)&blk_start,sizeof(blk_start)); //read first 18 bytes of block - fin_blocks.close(); - uint32_t bnum= endian_reverse_u32(blk_start.blknum)+1; //convert from big endian to little endian, add 1 since prior block + lseek(blk_in,fpos0+blknum_offset,SEEK_SET); + uint32_t prior_blknum; + read(blk_in,(char*)&prior_blknum,sizeof(prior_blknum)); //read bigendian block number of prior block + uint32_t bnum= endian_reverse_u32(prior_blknum)+1; //convert to little endian, add 1 since prior block cout << "At position " << fpos0 << " in blocks.log find block " << bnum << (bnum==n? " as expected\n": " - not good!\n"); EOS_ASSERT( bnum==n, block_log_exception, "blocks.index does not agree with blocks.log" ); - EOS_ASSERT( truncate(block_file_name.c_str(),fpos1)==0, block_log_exception, "truncate blocks.log fails"); - index_pos+= sizeof(uint64_t); //advance to after record for block n - EOS_ASSERT( truncate(index_file_name.c_str(),index_pos)==0, block_log_exception, "truncate blocks.index fails"); - cout << "blocks.log has been truncated to " << fpos1 << " bytes\n"; - cout << "blocks.index has been truncated to " << index_pos << " bytes\n"; +} + +int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block to keep (remove later blocks) + using namespace std; + cout << "\nIn directory " << block_dir << " will trim all blocks after block " << n << " from blocks.log and blocks.index.\n"; + trim_data td(block_dir); + if (n < td.first_block) { + cerr << "All blocks are after block " << n << " so do nothing (trim_end would delete entire blocks.log)\n"; + return 1; + } + if (n >= td.last_block) { + cerr << "There are no blocks after block " << n << " so do nothing\n"; + return 2; + } + td.find_block_pos(n); + EOS_ASSERT( truncate(td.block_file_name.c_str(),td.fpos1)==0, block_log_exception, "truncate blocks.log fails"); + uint64_t index_end= td.index_pos+sizeof(uint64_t); //advance past record for block n + EOS_ASSERT( truncate(td.index_file_name.c_str(),index_end)==0, block_log_exception, "truncate blocks.index fails"); + cout << "blocks.log has been trimmed to " << td.fpos1 << " bytes\n"; + cout << "blocks.index has been trimmed to " << index_end << " bytes\n"; + return 0; +} + +int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first block to keep (remove prior blocks) + using namespace std; + cout << "\nIn directory " << block_dir << " will trim all blocks before block " << n << " from blocks.log and blocks.index.\n"; + trim_data td(block_dir); + if (n <= td.first_block) { + cerr << "There are no blocks before block " << n << " so do nothing\n"; + return 1; + } + if (n > td.last_block) { + cerr << "All blocks are before block " << n << " so do nothing (trim_front would delete entire blocks.log)\n"; + return 2; + } + td.find_block_pos(n); + + constexpr uint32_t buf_len{1U<<24}; //buf_len must be a power of 2 + auto buffer= make_unique(buf_len); //read big chunks of old blocks.log into this buffer + char* buf= buffer.get(); + + string block_out_filename= (block_dir/"blocks.out").generic_string(); + mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; //if create file the permissions will be 644 + int blk_out = open(block_out_filename.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); + EOS_ASSERT( blk_out>0, block_log_not_found, "cannot write ${file}", ("file",block_out_filename) ); + + //in version 1 file: version number, no first block number, rest of header length 0x6e, at 0x72 first block + //in version 2 file: version number, first block number, rest of header length 0x6e, at 0x76 totem, at 0x7e first block + *(uint32_t*)buf= 2; + write(blk_out,buf,sizeof(uint32_t)); //write version number 2 + write(blk_out,(char*)&n,sizeof(n)); //write first block number + + lseek(td.blk_in,td.version==1? 4: 8,SEEK_SET) ; //position past version number and maybe block number + read(td.blk_in,buf,0x6e); //copy rest of header length 0x6e + memset(buf+0x6e,0xff,8); //totem is 8 bytes of 0xff + write(blk_out,buf,0x6e + 8); //write header and totem + + //pos_delta is the amount to subtract from every file position record in blocks.log because the blocks < n are removed + uint64_t pos_delta= td.fpos0 - 0x7e; //bytes removed from the blocklog + //bytes removed is file position of block n minus file position 'where file header ends' + //even if version 1 blocklog 'where file header ends' is where the new version 2 file header of length 0x7e ends + + //read big chunks of blocks.log into buf, update the file position records, then write to blk_out + uint64_t pos= lseek(td.blk_in,0,SEEK_END); //get blocks.log file length + uint32_t last_buf_len= (pos-td.fpos0 >= buf_len)? buf_len: pos-td.fpos0; //bytes to read from blk_in + pos= lseek(td.blk_in,-(uint64_t)last_buf_len,SEEK_END); //pos is where read last buf from blk_in + uint64_t did_read= read(td.blk_in,buf,last_buf_len); //read tail of blocks.log file into buf + cout << "seek blocks.log to " << pos << " read " << last_buf_len << " bytes\n";//debug + EOS_ASSERT( did_read==last_buf_len, block_log_exception, "blocks.log read fails" ); + + //prepare to write index_out_filename + uint64_t total_fpos_len= (td.last_block+1-n)*sizeof(uint64_t); + auto fpos_buffer= make_unique(buf_len); + uint64_t* fpos_list= fpos_buffer.get(); //list of file positions, periodically write to blocks.index + string index_out_filename= (block_dir/"index.out" ).generic_string(); + int ind_out = open(index_out_filename.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); + EOS_ASSERT( ind_out>0, block_log_not_found, "cannot write ${file}", ("file",index_out_filename) ); + uint64_t last_fpos_len= total_fpos_len & ((uint64_t)buf_len-1);//buf_len is a power of 2 so -1 creates low bits all 1 + if (!last_fpos_len) //will write integral number of buf_len and one time write last_fpos_len + last_fpos_len= buf_len; + uint32_t blk_base= td.last_block + 1 - (last_fpos_len>>3); //first entry in fpos_list is for block blk_base + cout << "******filling index buf blk_base " << blk_base << " last_fpos_len " << last_fpos_len << '\n';//debug + + //as we traverse linked list of blocks in buf (from end toward start), for each block we know this: + int32_t index_start; //buf index for block start + int32_t index_end; //buf index for block end == buf index for block start file position + uint64_t file_pos; //file position of block start + uint32_t bnum; //block number + + //some code here is repeated in the loop below but we do it twice so can print a status message before the loop starts + index_end= last_buf_len-8; //buf index where last block ends and the block file position starts + file_pos= *(uint64_t*)(buf+index_end); //file pos of block start + index_start= file_pos - pos; //buf index for block start + bnum= *(uint32_t*)(buf + index_start + blknum_offset); //block number of previous block (is big endian) + bnum= endian_reverse_u32(bnum)+1; //convert from big endian to little endian and add 1 + EOS_ASSERT( bnum==td.last_block, block_log_exception, "last block from blocks.index ${ind} != last block from blocks.log ${log}", ("ind", td.last_block) ("log", bnum) ); + + cout << '\n'; //print header for periodic update messages + cout << setw(10) << "block" << setw(16) << "old file pos" << setw(16) << "new file pos" << '\n'; + cout << setw(10) << bnum << setw(16) << file_pos << setw(16) << file_pos-pos_delta << '\n'; + uint64_t last_file_pos= file_pos+1; //used to check that file_pos is strictly decreasing + constexpr uint32_t fk{4096}; //buffer shift that keeps buf disk block aligned + + for (;;) { //invariant: at top of loop index_end and bnum are set and index_end is >= 0 + file_pos = *(uint64_t *) (buf + index_end); //get file pos of block start + if (file_pos >= last_file_pos) { //check that file_pos decreases + cout << '\n'; + cout << "file pos for block " << bnum + 1 << " is " << last_file_pos << '\n'; + cout << "file pos for block " << bnum << " is " << file_pos << '\n'; + EOS_ASSERT(file_pos < last_file_pos, block_log_exception, "blocks.log linked list of blocks is corrupt"); + } + last_file_pos = file_pos; + *(uint64_t *) (buf + index_end) = file_pos - pos_delta; //update file position in buf + fpos_list[bnum - blk_base] = file_pos - pos_delta; //save updated file position for new index file + index_start = file_pos - pos; //will go negative when pass first block in buf + + if ((bnum & 0xfffff) == 0) //periodic progress indicator + cout << setw(10) << bnum << setw(16) << file_pos << setw(16) << file_pos-pos_delta << '\n'; + + if (bnum == blk_base) { //if fpos_list is full write it to file + cout << "****** bnum=" << bnum << " blk_base= " << blk_base << " so write index buf seek to " << (blk_base - n) * sizeof(uint64_t) << " write len " << last_fpos_len << '\n';//debug + lseek(ind_out, (blk_base - n) * sizeof(uint64_t), SEEK_SET); + write(ind_out, (char *) fpos_list, last_fpos_len); //write fpos_list to index file + last_fpos_len = buf_len; + if (bnum == n) { //if done with all blocks >= block n + cout << setw(10) << bnum << setw(16) << file_pos << setw(16) << file_pos-pos_delta << '\n'; + EOS_ASSERT( index_start==0, block_log_exception, "block ${n} found with unexpected index_start ${s}", ("n",n) ("s",index_start)); + EOS_ASSERT( pos==td.fpos0, block_log_exception, "block ${n} found at unexpected file position ${p}", ("n",n) ("p",file_pos)); + lseek(blk_out, pos-pos_delta, SEEK_SET); + write(blk_out, buf, last_buf_len); + break; + } else { + blk_base -= (buf_len>>3); + cout << "******filling index buf blk_base " << blk_base << " fpos_len " << buf_len << '\n';//debug + } + } + + if (index_start <= 0) { //if buf is ready to write (all file pos are updated) + //cout << "index_start = " << index_start << " so write buf len " << last_buf_len << " bnum= " << bnum << '\n';//debug + EOS_ASSERT(file_pos>td.fpos0, block_log_exception, "reverse scan of blocks.log did not halt at block ${n}",("n",n)); + lseek(blk_out, pos-pos_delta, SEEK_SET); + write(blk_out, buf, last_buf_len); + last_buf_len= (pos-td.fpos0 > buf_len)? buf_len: pos-td.fpos0; + pos -= last_buf_len; + index_start += last_buf_len; + //cout << "seek blocks to " << pos << " read " << last_buf_len << " bytes\n";//debug + lseek(td.blk_in, pos, SEEK_SET); + did_read = read(td.blk_in, buf, last_buf_len); //read next buf from blocklog + EOS_ASSERT(did_read == last_buf_len, block_log_exception, "blocks.log read fails"); + } + + index_end = index_start - sizeof(uint64_t); //move to prior block in buf + --bnum; + + if (index_end < 0) { //the file pos record straddles buf boundary + cout << "**** index_end= " << index_end << " so save buf-4K len= " << last_buf_len-fk << " bnum= " << bnum << '\n';//debug + lseek(blk_out, pos-pos_delta+fk, SEEK_SET); + write(blk_out, buf + fk, last_buf_len - fk); //skip first 4K of buf write out the rest + last_buf_len= (pos-td.fpos0 >= buf_len-fk)? buf_len-fk: pos-td.fpos0; //bytes to read from blk_in + memcpy(buf + last_buf_len, buf, fk); //move first 4K of buf after zone will read + pos-= last_buf_len; + index_end += last_buf_len;cout << "seek blocks to " << pos << " read " << last_buf_len << " bytes before saved 4k\n";//debug + lseek(td.blk_in, pos, SEEK_SET); + did_read = read(td.blk_in, buf, last_buf_len); + EOS_ASSERT(did_read == last_buf_len, block_log_exception, "blocks.log read fails"); + last_buf_len+= fk; //bytes in buf will eventually write to disk + } + } + + close(blk_out); + close(ind_out); + string old_log= (block_dir/"old.log").generic_string(); + string old_ind= (block_dir/"old.index").generic_string(); + rename(td.block_file_name,old_log); + rename(td.index_file_name,old_ind); + rename(block_out_filename,td.block_file_name); + rename(index_out_filename,td.index_file_name); + cout << "The new blocks.log and blocks.index files contain blocks " << n << " through " << td.last_block << '\n'; + cout << "The original (before trim front) files have been renamed to old.log and old.index.\n"; return 0; } + int make_index(bfs::path block_dir, string out_file) { //this code makes blocks.index much faster than nodeos (in recent test 80 seconds vs. 90 minutes) using namespace std; string block_file_name= (block_dir / "blocks.log").generic_string(); string out_file_name= (block_dir / out_file ).generic_string(); - cout << "Will read blocks.log file " << block_file_name << '\n'; - cout << "Will write blocks.index file " << out_file_name << '\n'; + cout << '\n'; + cout << "Will read existing blocks.log file " << block_file_name << '\n'; + cout << "Will write new blocks.index file " << out_file_name << '\n'; int fin = open(block_file_name.c_str(), O_RDONLY); EOS_ASSERT( fin>0, block_log_not_found, "cannot read block file ${file}", ("file",block_file_name) ); //will read big chunks of blocks.log into buf, will fill fpos_list with file positions before write to blocks.index - constexpr uint32_t bufLen{1U<<24}; //bufLen must be power of 2 >= largest possible block == one MByte - auto buffer= make_unique(bufLen+8); //first 8 bytes of prior buf are put past end of current buf + constexpr uint32_t buf_len{1U<<24}; //buf_len must be power of 2 >= largest possible block == one MByte + auto buffer= make_unique(buf_len+8); //can write up to 8 bytes past end of buf char* buf= buffer.get(); constexpr uint64_t fpos_list_len{1U<<22}; //length of fpos_list[] in bytes auto fpos_buffer= make_unique(fpos_list_len>>3); uint64_t* fpos_list= fpos_buffer.get(); //read blocks.log to see if version 1 or 2 and get first_blocknum (implicit 1 if version 1) - uint32_t version=0, first_block; + uint32_t version=0, first_block=0; read(fin,(char*)&version,sizeof(version)); cout << "block log version= " << version << '\n'; EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); @@ -276,39 +465,39 @@ int make_index(bfs::path block_dir, string out_file) { cout << "first block= " << first_block << '\n'; uint64_t pos= lseek(fin,0,SEEK_END); //get blocks.log file length - uint64_t last_buf_len= pos & ((uint64_t)bufLen-1); //bufLen is a power of 2 so -1 creates low bits all 1 - if (!last_buf_len) //will read integral number of bufLen and one time read last_buf_len - last_buf_len= bufLen; - pos= lseek(fin,-(uint64_t)last_buf_len,SEEK_END); - uint64_t did_read= read(fin,buf,last_buf_len); //read tail of file into buf + uint64_t last_buf_len= pos & ((uint64_t)buf_len-1); //buf_len is a power of 2 so -1 creates low bits all 1 + if (!last_buf_len) //will read integral number of buf_len and one time read last_buf_len + last_buf_len= buf_len; + pos= lseek(fin,-(uint64_t)last_buf_len,SEEK_END); //one time read last_buf_len + uint64_t did_read= read(fin,buf,last_buf_len); //read tail of blocks.log file into buf EOS_ASSERT( did_read==last_buf_len, block_log_exception, "blocks.log read fails" ); + //we traverse linked list of blocks in buf (from end to start), for each block we know this: uint32_t index_start; //buf index for block start uint32_t index_end; //buf index for block end == buf index for block start file position uint64_t file_pos; //file pos of block start - uint64_t last_file_pos; //used to check that file_pos is strictly decreasing - Block_Start* bst; //pointer to Block_Start + uint32_t bnum; //block number - index_start= last_buf_len; //pretend a block starts just past end of buf then read prior block - index_end= index_start-8; //index in buf where block ends and block file position starts - last_file_pos= file_pos= *(uint64_t*)(buf+index_end); //file pos of block start + index_end= last_buf_len-8; //index in buf where last block ends and block file position starts + file_pos= *(uint64_t*)(buf+index_end); //file pos of block start index_start= file_pos - pos; //buf index for block start - bst= (Block_Start*)(buf+index_start); //pointer to Block_Start - uint32_t last_block= endian_reverse_u32(bst->blknum); //convert from big endian to little endian - uint32_t bnum= ++last_block; //add 1 since took block number from prior block id - cout << "last block= " << last_block << '\n'; + bnum= *(uint32_t*)(buf + index_start + blknum_offset); //block number of previous block (is big endian) + bnum= endian_reverse_u32(bnum)+1; //convert from big endian to little endian and add 1 + cout << "last block= " << bnum << '\n'; cout << '\n'; cout << "block " << setw(10) << bnum << " file_pos " << setw(14) << file_pos << '\n'; //first progress indicator + uint64_t last_file_pos= file_pos; //used to check that file_pos is strictly decreasing + uint32_t end_block{bnum}; //save for message at end //we use low level file IO because it is distinctly faster than C++ filebuf or iostream mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; //if create file permissions will be 644 - int fout = open(out_file_name.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); //create if no exists, truncate if does exist + int fout = open(out_file_name.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); EOS_ASSERT( fout>0, block_index_not_found, "cannot write blocks.index" ); - uint64_t ind_file_len= bnum<<3; //index file holds 8 bytes per block in blocks.log + uint64_t ind_file_len= (bnum+1-first_block)<<3; //index file holds 8 bytes for each block in blocks.log uint64_t last_ind_buf_len= ind_file_len & (fpos_list_len-1); //fpos_list_len is a power of 2 so -1 creates low bits all 1 - if (!last_ind_buf_len) //will write integral number of bufLen and last_ind_buf_len one time to index file - last_ind_buf_len= bufLen; + if (!last_ind_buf_len) //will write integral number of buf_len and last_ind_buf_len one time to index file + last_ind_buf_len= buf_len; uint64_t ind_pos= lseek(fout,ind_file_len-last_ind_buf_len,SEEK_SET); uint64_t blk_base= (ind_pos>>3) + first_block; //first entry in fpos_list is for block blk_base //cout << "ind_pos= " << ind_pos << " blk_base= " << blk_base << '\n'; @@ -323,18 +512,18 @@ int make_index(bfs::path block_dir, string out_file) { break; } ind_pos-= fpos_list_len; - blk_base-= fpos_list_len>>3; + blk_base-= (fpos_list_len>>3); did_read= lseek(fout,ind_pos,SEEK_SET); EOS_ASSERT( did_read==ind_pos, block_log_exception, "blocks.log seek fails" ); last_ind_buf_len= fpos_list_len; //from now on all writes to index file write a full fpos_list[] } if (index_start < 8) { //if block start is split across buf boundary - memcpy(buf+bufLen,buf,8); //copy portion at start of buf to past end of buf - pos-= bufLen; //file position of buf + memcpy(buf+buf_len,buf,8); //copy portion at start of buf to past end of buf + pos-= buf_len; //file position of buf lseek(fin,pos,SEEK_SET); - did_read= read(fin,buf,bufLen); //read next buf - EOS_ASSERT( did_read==bufLen, block_log_exception, "blocks.log read fails" ); - index_start+= bufLen; + did_read= read(fin,buf,buf_len); //read next buf + EOS_ASSERT( did_read==buf_len, block_log_exception, "blocks.log read fails" ); + index_start+= buf_len; } --bnum; //now move index_start and index_end to prior block index_end= index_start-8; //index in buf where block ends and block file position starts @@ -348,26 +537,52 @@ int make_index(bfs::path block_dir, string out_file) { } last_file_pos= file_pos; if (file_pos < pos) { //if block start is in prior buf - pos-= bufLen; //file position of buf + pos-= buf_len; //file position of buf lseek(fin,pos,SEEK_SET); - did_read= read(fin,buf,bufLen); //read next buf - EOS_ASSERT( did_read==bufLen, block_log_exception, "blocks.log read fails" ); - index_end+= bufLen; + did_read= read(fin,buf,buf_len); //read next buf + EOS_ASSERT( did_read==buf_len, block_log_exception, "blocks.log read fails" ); + index_end+= buf_len; } index_start= file_pos - pos; //buf index for block start - fpos_list[bnum-blk_base]= file_pos; //write filepos for block bnum + fpos_list[bnum-blk_base]= file_pos; //write filepos for block bnum if ((bnum & 0xfffff) == 0) //periodically print a progress indicator cout << "block " << setw(10) << bnum << " file_pos " << setw(14) << file_pos << '\n'; } close(fout); close(fin); - cout << "\nwrote " << last_block << " file positions to " << out_file_name << '\n'; + cout << "\nwrote " << (end_block+1-first_block) << " file positions to " << out_file_name << '\n'; return 0; } -int main(int argc, char** argv) -{ +void smoke_test(bfs::path block_dir) { + using namespace std; + cout << "\nSmoke test of blocks.log and blocks.index in directory " << block_dir << '\n'; + trim_data td(block_dir); + lseek(td.blk_in,-sizeof(uint64_t),SEEK_END); //get last_block from blocks.log, compare to from blocks.index + uint64_t file_pos; + read(td.blk_in,&file_pos,sizeof(uint64_t)); + lseek(td.blk_in,file_pos+blknum_offset,SEEK_SET); + uint32_t bnum; + read(td.blk_in,&bnum,sizeof(uint32_t)); + bnum= endian_reverse_u32(bnum)+1; //convert from big endian to little endian and add 1 + EOS_ASSERT( td.last_block==bnum, block_log_exception, "blocks.log says last block is ${lb} which disagrees with blocks.index", ("lb",bnum) ); + cout << "blocks.log and blocks.index agree on number of blocks\n"; + uint32_t delta= (td.last_block+8-td.first_block)>>3; + if (delta<1) + delta= 1; + for (uint32_t n= td.first_block; ; n+= delta) { + if (n>td.last_block) + n= td.last_block; + cout << '\n'; + td.find_block_pos(n); //check block 'n' is where blocks.index says + if (n==td.last_block) + break; + } + cout << "\nno problems found\n"; //if get here there were no exceptions +} + +int main(int argc, char** argv) { std::ios::sync_with_stdio(false); // for potential performance boost for large block log files options_description cli ("eosio-blocklog command line options"); try { @@ -376,22 +591,34 @@ int main(int argc, char** argv) variables_map vmap; bpo::store(bpo::parse_command_line(argc, argv, cli), vmap); bpo::notify(vmap); - if (vmap.count("help") > 0) { - cli.print(std::cerr); - return 0; + if (blog.help) { + cli.print(std::cerr); + return 0; + } + if (blog.smoke_test) { + smoke_test(vmap.at("blocks-dir").as()); + return 0; } - if (vmap.at("trim-blocklog").as()) { - uint32_t last= vmap.at("last").as(); - if (last == std::numeric_limits::max()) { //if 'last' was not given on command line - std::cout << "'trim-block-blocklog' does nothing unless specify 'last' block."; + if (blog.trim_log) { + if (blog.first_block==0 && blog.last_block==std::numeric_limits::max()) { + std::cerr << "trim-blocklog does nothing unless specify first and/or last block."; return -1; } - return trunc_blocklog(vmap.at("blocks-dir").as(), last); + if (blog.last_block != std::numeric_limits::max()) { + if (trim_blocklog_end(vmap.at("blocks-dir").as(), blog.last_block) != 0) + return -1; + } + if (blog.first_block != 0) { + if (trim_blocklog_front(vmap.at("blocks-dir").as(), blog.first_block) != 0) + return -1; + } + return 0; } - if (vmap.at("make-index").as()) { + if (blog.make_index) { string out_file{vmap.count("output-file")==0? string("blocks.index"): vmap.at("output-file").as().generic_string()}; return make_index(vmap.at("blocks-dir").as(), out_file); } + //else print blocks.log as JSON blog.initialize(vmap); blog.read_log(); } catch( const fc::exception& e ) { From dea42ccb4b8f4e0ea6e63e03f5533ed9a7f4363e Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Thu, 25 Apr 2019 15:34:45 -0500 Subject: [PATCH 0246/1648] Added class to report how long each task takes. --- programs/eosio-blocklog/main.cpp | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 5be91a32651..8232ff9059d 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -18,6 +18,8 @@ #include #include +#include + using namespace eosio::chain; namespace bfs = boost::filesystem; namespace bpo = boost::program_options; @@ -44,7 +46,21 @@ struct blocklog { bool help; }; +struct report_time { + report_time() + : _start(std::chrono::high_resolution_clock::now()) { + } + + void report() { + const auto duration = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - _start).count() / 1000; + ilog("blocklog action took ${t} msec", ("t",duration)); + } + + const std::chrono::high_resolution_clock::time_point _start; +}; + void blocklog::read_log() { + report_time rt; block_log block_logger(blocks_dir); const auto end = block_logger.read_head(); EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); @@ -135,6 +151,7 @@ void blocklog::read_log() { } if (as_json_array) *out << "]"; + rt.report(); } void blocklog::set_program_options(options_description& cli) @@ -208,6 +225,7 @@ struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end trim_data::trim_data(bfs::path block_dir) { + report_time rt; using namespace std; block_file_name= (block_dir/"blocks.log").generic_string(); index_file_name= (block_dir/"blocks.index").generic_string(); @@ -226,11 +244,13 @@ trim_data::trim_data(bfs::path block_dir) { uint64_t file_end= lseek(ind_in,0,SEEK_END); //get length of blocks.index (gives number of blocks) last_block= first_block + file_end/sizeof(uint64_t) - 1; cout << "last block= " << last_block << '\n'; + rt.report(); } void trim_data::find_block_pos(uint32_t n) { //get file position of block n from blocks.index then confirm block n is found in blocks.log at that position //sets fpos0 and fpos1, throws exception if block at fpos0 is not block n + report_time rt; using namespace std; index_pos= sizeof(uint64_t)*(n-first_block); uint64_t pos= lseek(ind_in,index_pos,SEEK_SET); @@ -251,9 +271,11 @@ void trim_data::find_block_pos(uint32_t n) { uint32_t bnum= endian_reverse_u32(prior_blknum)+1; //convert to little endian, add 1 since prior block cout << "At position " << fpos0 << " in blocks.log find block " << bnum << (bnum==n? " as expected\n": " - not good!\n"); EOS_ASSERT( bnum==n, block_log_exception, "blocks.index does not agree with blocks.log" ); + rt.report(); } int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block to keep (remove later blocks) + report_time rt; using namespace std; cout << "\nIn directory " << block_dir << " will trim all blocks after block " << n << " from blocks.log and blocks.index.\n"; trim_data td(block_dir); @@ -271,10 +293,12 @@ int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block EOS_ASSERT( truncate(td.index_file_name.c_str(),index_end)==0, block_log_exception, "truncate blocks.index fails"); cout << "blocks.log has been trimmed to " << td.fpos1 << " bytes\n"; cout << "blocks.index has been trimmed to " << index_end << " bytes\n"; + rt.report(); return 0; } int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first block to keep (remove prior blocks) + report_time rt; using namespace std; cout << "\nIn directory " << block_dir << " will trim all blocks before block " << n << " from blocks.log and blocks.index.\n"; trim_data td(block_dir); @@ -430,11 +454,13 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b rename(index_out_filename,td.index_file_name); cout << "The new blocks.log and blocks.index files contain blocks " << n << " through " << td.last_block << '\n'; cout << "The original (before trim front) files have been renamed to old.log and old.index.\n"; + rt.report(); return 0; } int make_index(bfs::path block_dir, string out_file) { + report_time rt; //this code makes blocks.index much faster than nodeos (in recent test 80 seconds vs. 90 minutes) using namespace std; string block_file_name= (block_dir / "blocks.log").generic_string(); @@ -552,6 +578,7 @@ int make_index(bfs::path block_dir, string out_file) { close(fout); close(fin); cout << "\nwrote " << (end_block+1-first_block) << " file positions to " << out_file_name << '\n'; + rt.report(); return 0; } From 367876c2b90282bd1130a23b133a9ec0dd342889 Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Thu, 25 Apr 2019 21:53:41 -0500 Subject: [PATCH 0247/1648] Converted from posix methods to standard C API methods. --- programs/eosio-blocklog/main.cpp | 429 ++++++++++++++++++------------- 1 file changed, 245 insertions(+), 184 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 8232ff9059d..7d645485ed8 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -210,14 +210,15 @@ constexpr int blknum_offset{14}; //offset from start of blo struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end(), and smoke_test() trim_data(bfs::path block_dir); ~trim_data() { - close(blk_in); - close(ind_in); + fclose(blk_in); + fclose(ind_in); } void find_block_pos(uint32_t n); - std::string block_file_name, index_file_name; //full pathname for blocks.log and blocks.index + bfs::path block_file_name, index_file_name; //full pathname for blocks.log and blocks.index uint32_t version; //blocklog version (1 or 2) uint32_t first_block, last_block; //first and last block in blocks.log - int blk_in, ind_in; //C style file descriptors for reading blocks.log and blocks.index + FILE* blk_in; //C style files for reading blocks.log and blocks.index + FILE* ind_in; //C style files for reading blocks.log and blocks.index //we use low level file IO because it is distinctly faster than C++ filebuf or iostream uint64_t index_pos; //filepos in blocks.index for block n, +8 for block n+1 uint64_t fpos0, fpos1; //filepos in blocks.log for block n and block n+1 @@ -227,22 +228,27 @@ struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end trim_data::trim_data(bfs::path block_dir) { report_time rt; using namespace std; - block_file_name= (block_dir/"blocks.log").generic_string(); - index_file_name= (block_dir/"blocks.index").generic_string(); - blk_in = open(block_file_name.c_str(), O_RDONLY); - EOS_ASSERT( blk_in>0, block_log_not_found, "cannot read file ${file}", ("file",block_file_name) ); - ind_in = open(index_file_name.c_str(), O_RDONLY); - EOS_ASSERT( ind_in>0, block_log_not_found, "cannot read file ${file}", ("file",index_file_name) ); - read(blk_in,(char*)&version,sizeof(version)); + block_file_name = block_dir / "blocks.log"; + blk_in = fopen(block_file_name.c_str(), "r"); + EOS_ASSERT( blk_in != nullptr, block_log_not_found, "cannot read file ${file}", ("file",block_file_name.string()) ); + ind_in = fopen(index_file_name.c_str(), "r"); + EOS_ASSERT( ind_in != nullptr, block_log_not_found, "cannot read file ${file}", ("file",index_file_name.string()) ); + auto size = fread((void*)&version,sizeof(version), 1, blk_in); + EOS_ASSERT( size == 1, block_log_unsupported_version, "invalid format for file ${file}", ("file",block_file_name.string())); cout << "block log version= " << version << '\n'; - EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); + EOS_ASSERT( version == 1 || version == 2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); if (version == 1) - first_block= 1; - else - read(blk_in,(char*)&first_block,sizeof(first_block)); + first_block = 1; + else { + size = fread((void *) &first_block, sizeof(first_block), 1, blk_in); + EOS_ASSERT(size == 1, block_log_exception, "invalid format for file ${file}", + ("file", block_file_name.string())); + } cout << "first block= " << first_block << '\n'; - uint64_t file_end= lseek(ind_in,0,SEEK_END); //get length of blocks.index (gives number of blocks) - last_block= first_block + file_end/sizeof(uint64_t) - 1; + const auto status = fseek(ind_in, 0, SEEK_END); //get length of blocks.index (gives number of blocks) + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} end", ("file", index_file_name.string()) ); + const uint64_t file_end = ftell(ind_in); //get length of blocks.index (gives number of blocks) + last_block = first_block + file_end/sizeof(uint64_t) - 1; cout << "last block= " << last_block << '\n'; rt.report(); } @@ -252,33 +258,44 @@ void trim_data::find_block_pos(uint32_t n) { //sets fpos0 and fpos1, throws exception if block at fpos0 is not block n report_time rt; using namespace std; - index_pos= sizeof(uint64_t)*(n-first_block); - uint64_t pos= lseek(ind_in,index_pos,SEEK_SET); - EOS_ASSERT( pos==index_pos, block_log_exception, "cannot seek to blocks.index entry for block ${b}", ("b",n) ); - read(ind_in,(char*)&fpos0,sizeof(fpos0)); //filepos of block n - read(ind_in,(char*)&fpos1,sizeof(fpos1)); //filepos of block n+1 + index_pos = sizeof(uint64_t) * (n - first_block); + auto status = fseek(ind_in, index_pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file for block ${b}", ("file", index_file_name.string())("pos", index_pos)("b",n) ); + const uint64_t pos = ftell(ind_in); + EOS_ASSERT( pos == index_pos, block_log_exception, "cannot seek to ${file} entry for block ${b}", ("file", index_file_name.string())("b",n) ); + auto size = fread((void*)&fpos0, sizeof(fpos0), 1, ind_in); //filepos of block n + EOS_ASSERT( size == 1, block_log_exception, "cannot read ${file} entry for block ${b}", ("file", index_file_name.string())("b",n) ); + size = fread((void*)&fpos1,sizeof(fpos1), 1, ind_in); //filepos of block n+1 + EOS_ASSERT( size == 1, block_log_exception, "cannot read ${file} entry for block ${b}", ("file", index_file_name.string())("b",n + 1) ); + cout << "According to blocks.index:\n"; cout << " block " << n << " starts at position " << fpos0 << '\n'; - cout << " block " << n+1; + cout << " block " << n + 1; + if (n!=last_block) cout << " starts at position " << fpos1 << '\n'; else cout << " is past end\n"; + //read blocks.log and verify block number n is found at file position fpos0 - lseek(blk_in,fpos0+blknum_offset,SEEK_SET); + status = fseek(blk_in, fpos0 + blknum_offset, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", block_file_name.string())("pos", fpos0 + blknum_offset) ); + const uint64_t block_offset_pos = ftell(blk_in); + EOS_ASSERT( block_offset_pos == fpos0 + blknum_offset, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", block_file_name.string())("pos", fpos0 + blknum_offset) ); uint32_t prior_blknum; - read(blk_in,(char*)&prior_blknum,sizeof(prior_blknum)); //read bigendian block number of prior block - uint32_t bnum= endian_reverse_u32(prior_blknum)+1; //convert to little endian, add 1 since prior block - cout << "At position " << fpos0 << " in blocks.log find block " << bnum << (bnum==n? " as expected\n": " - not good!\n"); - EOS_ASSERT( bnum==n, block_log_exception, "blocks.index does not agree with blocks.log" ); + size = fread((void*)&prior_blknum, sizeof(prior_blknum), 1, blk_in); //read bigendian block number of prior block + EOS_ASSERT( size == 1, block_log_exception, "cannot read prior block"); + const uint32_t bnum = endian_reverse_u32(prior_blknum) + 1; //convert to little endian, add 1 since prior block + cout << "At position " << fpos0 << " in " << index_file_name << " find block " << bnum << (bnum == n ? " as expected\n": " - not good!\n"); + EOS_ASSERT( bnum == n, block_log_exception, "${index} does not agree with ${blocks}", ("index", index_file_name.string())("blocks", block_file_name.string()) ); rt.report(); } int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block to keep (remove later blocks) report_time rt; using namespace std; - cout << "\nIn directory " << block_dir << " will trim all blocks after block " << n << " from blocks.log and blocks.index.\n"; trim_data td(block_dir); + cout << "\nIn directory " << block_dir << " will trim all blocks after block " << n << " from " << td.block_file_name << " and " << td.index_file_name << ".\n"; if (n < td.first_block) { cerr << "All blocks are after block " << n << " so do nothing (trim_end would delete entire blocks.log)\n"; return 1; @@ -288,10 +305,9 @@ int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block return 2; } td.find_block_pos(n); - EOS_ASSERT( truncate(td.block_file_name.c_str(),td.fpos1)==0, block_log_exception, "truncate blocks.log fails"); - uint64_t index_end= td.index_pos+sizeof(uint64_t); //advance past record for block n - EOS_ASSERT( truncate(td.index_file_name.c_str(),index_end)==0, block_log_exception, "truncate blocks.index fails"); - cout << "blocks.log has been trimmed to " << td.fpos1 << " bytes\n"; + bfs::resize_file(td.block_file_name, td.fpos1); + uint64_t index_end= td.index_pos + sizeof(uint64_t); //advance past record for block n + bfs::resize_file(td.index_file_name, index_end); cout << "blocks.index has been trimmed to " << index_end << " bytes\n"; rt.report(); return 0; @@ -313,49 +329,61 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b td.find_block_pos(n); constexpr uint32_t buf_len{1U<<24}; //buf_len must be a power of 2 - auto buffer= make_unique(buf_len); //read big chunks of old blocks.log into this buffer - char* buf= buffer.get(); + auto buffer = make_unique(buf_len); //read big chunks of old blocks.log into this buffer + char* buf = buffer.get(); - string block_out_filename= (block_dir/"blocks.out").generic_string(); - mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; //if create file the permissions will be 644 - int blk_out = open(block_out_filename.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); - EOS_ASSERT( blk_out>0, block_log_not_found, "cannot write ${file}", ("file",block_out_filename) ); + bfs::path block_out_filename = block_dir / "blocks.out"; + FILE* blk_out = fopen(block_out_filename.c_str(), "w"); + EOS_ASSERT( blk_out != nullptr, block_log_not_found, "cannot write ${file}", ("file", block_out_filename.string()) ); //in version 1 file: version number, no first block number, rest of header length 0x6e, at 0x72 first block //in version 2 file: version number, first block number, rest of header length 0x6e, at 0x76 totem, at 0x7e first block - *(uint32_t*)buf= 2; - write(blk_out,buf,sizeof(uint32_t)); //write version number 2 - write(blk_out,(char*)&n,sizeof(n)); //write first block number - - lseek(td.blk_in,td.version==1? 4: 8,SEEK_SET) ; //position past version number and maybe block number - read(td.blk_in,buf,0x6e); //copy rest of header length 0x6e - memset(buf+0x6e,0xff,8); //totem is 8 bytes of 0xff - write(blk_out,buf,0x6e + 8); //write header and totem + *(uint32_t*)buf = 2; + auto size = fwrite((void*)buf, sizeof(uint32_t), 1, blk_out); //write version number 2 + EOS_ASSERT( size == 1, block_log_exception, "blocks.out write fails" ); + size = fwrite((void*)&n, sizeof(n), 1, blk_out); //write first block number + EOS_ASSERT( size == 1, block_log_exception, "blocks.out write fails" ); + + const auto past_version_offset = (td.version == 1) ? 4 : 8; + auto status = fseek(td.blk_in, past_version_offset, SEEK_SET) ; //position past version number and maybe block number + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", td.block_file_name.string())("pos", past_version_offset) ); + const auto header_len = 0x6e; + size = fread((void*)buf, header_len, 1, td.blk_in); + EOS_ASSERT( size == 1, block_log_exception, "blocks.log read fails" ); + const auto totem_len = 8; //copy rest of header length 0x6e + memset(buf + header_len, 0xff, totem_len); //totem is 8 bytes of 0xff + size = fwrite(buf, header_len + totem_len, 1, blk_out); //write header and totem + EOS_ASSERT( size == 1, block_log_exception, "blocks.out write fails" ); //pos_delta is the amount to subtract from every file position record in blocks.log because the blocks < n are removed - uint64_t pos_delta= td.fpos0 - 0x7e; //bytes removed from the blocklog + uint64_t pos_delta = td.fpos0 - 0x7e; //bytes removed from the blocklog //bytes removed is file position of block n minus file position 'where file header ends' //even if version 1 blocklog 'where file header ends' is where the new version 2 file header of length 0x7e ends //read big chunks of blocks.log into buf, update the file position records, then write to blk_out - uint64_t pos= lseek(td.blk_in,0,SEEK_END); //get blocks.log file length - uint32_t last_buf_len= (pos-td.fpos0 >= buf_len)? buf_len: pos-td.fpos0; //bytes to read from blk_in - pos= lseek(td.blk_in,-(uint64_t)last_buf_len,SEEK_END); //pos is where read last buf from blk_in - uint64_t did_read= read(td.blk_in,buf,last_buf_len); //read tail of blocks.log file into buf - cout << "seek blocks.log to " << pos << " read " << last_buf_len << " bytes\n";//debug - EOS_ASSERT( did_read==last_buf_len, block_log_exception, "blocks.log read fails" ); + status = fseek(td.blk_in, 0, SEEK_END); //get blocks.log file length + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", td.block_file_name.string())("pos", 0) ); + const uint64_t end = ftell(td.blk_in); + uint32_t last_buf_len = (end - td.fpos0 >= buf_len)? buf_len : end - td.fpos0; //bytes to read from blk_in + status = fseek(td.blk_in, -(uint64_t)last_buf_len, SEEK_END); //pos is where read last buf from blk_in + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", td.block_file_name.string())("pos", last_buf_len) ); + uint64_t pos = ftell(td.blk_in); + EOS_ASSERT( pos == end - last_buf_len, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", td.block_file_name.string())("pos", last_buf_len) ); + uint64_t did_read = fread((void*)buf, last_buf_len, 1, td.blk_in); //read tail of blocks.log file into buf + cout << "seek " << td.block_file_name << " to " << pos << " read " << last_buf_len << " bytes\n";//debug + EOS_ASSERT( did_read == 1, block_log_exception, "${file} read fails", ("file", td.block_file_name.string()) ); //prepare to write index_out_filename - uint64_t total_fpos_len= (td.last_block+1-n)*sizeof(uint64_t); - auto fpos_buffer= make_unique(buf_len); - uint64_t* fpos_list= fpos_buffer.get(); //list of file positions, periodically write to blocks.index - string index_out_filename= (block_dir/"index.out" ).generic_string(); - int ind_out = open(index_out_filename.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); - EOS_ASSERT( ind_out>0, block_log_not_found, "cannot write ${file}", ("file",index_out_filename) ); - uint64_t last_fpos_len= total_fpos_len & ((uint64_t)buf_len-1);//buf_len is a power of 2 so -1 creates low bits all 1 + uint64_t total_fpos_len = (td.last_block + 1 - n) * sizeof(uint64_t); + auto fpos_buffer = make_unique(buf_len); + uint64_t* fpos_list = fpos_buffer.get(); //list of file positions, periodically write to blocks.index + bfs::path index_out_filename = block_dir / "index.out"; + FILE* ind_out = fopen(index_out_filename.c_str(), "w"); + EOS_ASSERT( ind_out != nullptr, block_log_not_found, "cannot write ${file}", ("file",index_out_filename.string()) ); + uint64_t last_fpos_len = total_fpos_len & ((uint64_t)buf_len - 1);//buf_len is a power of 2 so -1 creates low bits all 1 if (!last_fpos_len) //will write integral number of buf_len and one time write last_fpos_len - last_fpos_len= buf_len; - uint32_t blk_base= td.last_block + 1 - (last_fpos_len>>3); //first entry in fpos_list is for block blk_base + last_fpos_len = buf_len; + uint32_t blk_base = td.last_block + 1 - (last_fpos_len >> 3); //first entry in fpos_list is for block blk_base cout << "******filling index buf blk_base " << blk_base << " last_fpos_len " << last_fpos_len << '\n';//debug //as we traverse linked list of blocks in buf (from end toward start), for each block we know this: @@ -365,17 +393,17 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b uint32_t bnum; //block number //some code here is repeated in the loop below but we do it twice so can print a status message before the loop starts - index_end= last_buf_len-8; //buf index where last block ends and the block file position starts - file_pos= *(uint64_t*)(buf+index_end); //file pos of block start - index_start= file_pos - pos; //buf index for block start - bnum= *(uint32_t*)(buf + index_start + blknum_offset); //block number of previous block (is big endian) - bnum= endian_reverse_u32(bnum)+1; //convert from big endian to little endian and add 1 - EOS_ASSERT( bnum==td.last_block, block_log_exception, "last block from blocks.index ${ind} != last block from blocks.log ${log}", ("ind", td.last_block) ("log", bnum) ); + index_end = last_buf_len - 8; //buf index where last block ends and the block file position starts + file_pos = *(uint64_t*)(buf + index_end); //file pos of block start + index_start = file_pos - pos; //buf index for block start + bnum = *(uint32_t*)(buf + index_start + blknum_offset); //block number of previous block (is big endian) + bnum = endian_reverse_u32(bnum) + 1; //convert from big endian to little endian and add 1 + EOS_ASSERT( bnum == td.last_block, block_log_exception, "last block from ${index} ${ind} != last block from ${block} ${log}", ("index", td.index_file_name.string())("ind", td.last_block)("block", td.block_file_name.string())("log", bnum) ); cout << '\n'; //print header for periodic update messages cout << setw(10) << "block" << setw(16) << "old file pos" << setw(16) << "new file pos" << '\n'; cout << setw(10) << bnum << setw(16) << file_pos << setw(16) << file_pos-pos_delta << '\n'; - uint64_t last_file_pos= file_pos+1; //used to check that file_pos is strictly decreasing + uint64_t last_file_pos = file_pos + 1; //used to check that file_pos is strictly decreasing constexpr uint32_t fk{4096}; //buffer shift that keeps buf disk block aligned for (;;) { //invariant: at top of loop index_end and bnum are set and index_end is >= 0 @@ -384,7 +412,7 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b cout << '\n'; cout << "file pos for block " << bnum + 1 << " is " << last_file_pos << '\n'; cout << "file pos for block " << bnum << " is " << file_pos << '\n'; - EOS_ASSERT(file_pos < last_file_pos, block_log_exception, "blocks.log linked list of blocks is corrupt"); + EOS_ASSERT(file_pos < last_file_pos, block_log_exception, "${file} linked list of blocks is corrupt", ("file", td.index_file_name.string()) ); } last_file_pos = file_pos; *(uint64_t *) (buf + index_end) = file_pos - pos_delta; //update file position in buf @@ -396,15 +424,19 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b if (bnum == blk_base) { //if fpos_list is full write it to file cout << "****** bnum=" << bnum << " blk_base= " << blk_base << " so write index buf seek to " << (blk_base - n) * sizeof(uint64_t) << " write len " << last_fpos_len << '\n';//debug - lseek(ind_out, (blk_base - n) * sizeof(uint64_t), SEEK_SET); - write(ind_out, (char *) fpos_list, last_fpos_len); //write fpos_list to index file + status = fseek(ind_out, (blk_base - n) * sizeof(uint64_t), SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", index_out_filename.string())("pos", last_buf_len) ); + size = fwrite((void *) fpos_list, last_fpos_len, 1, ind_out); //write fpos_list to index file + EOS_ASSERT( size == 1, block_log_exception, "${file} write fails", ("file", index_out_filename.string()) ); last_fpos_len = buf_len; if (bnum == n) { //if done with all blocks >= block n cout << setw(10) << bnum << setw(16) << file_pos << setw(16) << file_pos-pos_delta << '\n'; EOS_ASSERT( index_start==0, block_log_exception, "block ${n} found with unexpected index_start ${s}", ("n",n) ("s",index_start)); EOS_ASSERT( pos==td.fpos0, block_log_exception, "block ${n} found at unexpected file position ${p}", ("n",n) ("p",file_pos)); - lseek(blk_out, pos-pos_delta, SEEK_SET); - write(blk_out, buf, last_buf_len); + status = fseek(blk_out, pos - pos_delta, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", index_out_filename.string())("pos", pos - pos_delta) ); + size = fwrite((void*)buf, last_buf_len, 1, blk_out); + EOS_ASSERT( size == 1, block_log_exception, "${file} write fails", ("file", block_out_filename.string()) ); break; } else { blk_base -= (buf_len>>3); @@ -414,16 +446,19 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b if (index_start <= 0) { //if buf is ready to write (all file pos are updated) //cout << "index_start = " << index_start << " so write buf len " << last_buf_len << " bnum= " << bnum << '\n';//debug - EOS_ASSERT(file_pos>td.fpos0, block_log_exception, "reverse scan of blocks.log did not halt at block ${n}",("n",n)); - lseek(blk_out, pos-pos_delta, SEEK_SET); - write(blk_out, buf, last_buf_len); - last_buf_len= (pos-td.fpos0 > buf_len)? buf_len: pos-td.fpos0; + EOS_ASSERT(file_pos > td.fpos0, block_log_exception, "reverse scan of ${file} did not halt at block ${n}", ("file", td.block_file_name.string())("n",n)); + status = fseek(blk_out, pos - pos_delta, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", block_out_filename.string())("pos", pos - pos_delta) ); + size = fwrite((void*)buf, last_buf_len, 1, blk_out); + EOS_ASSERT( size == 1, block_log_exception, "${file} write fails", ("file", block_out_filename.string()) ); + last_buf_len = (pos - td.fpos0 > buf_len) ? buf_len : pos - td.fpos0; pos -= last_buf_len; index_start += last_buf_len; //cout << "seek blocks to " << pos << " read " << last_buf_len << " bytes\n";//debug - lseek(td.blk_in, pos, SEEK_SET); - did_read = read(td.blk_in, buf, last_buf_len); //read next buf from blocklog - EOS_ASSERT(did_read == last_buf_len, block_log_exception, "blocks.log read fails"); + status = fseek(td.blk_in, pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", td.block_file_name.string())("pos", pos) ); + did_read = fread((void*)buf, last_buf_len, 1, td.blk_in); //read next buf from blocklog + EOS_ASSERT(did_read == 1, block_log_exception, "${file} read fails", ("file", td.index_file_name.string())); } index_end = index_start - sizeof(uint64_t); //move to prior block in buf @@ -431,72 +466,83 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b if (index_end < 0) { //the file pos record straddles buf boundary cout << "**** index_end= " << index_end << " so save buf-4K len= " << last_buf_len-fk << " bnum= " << bnum << '\n';//debug - lseek(blk_out, pos-pos_delta+fk, SEEK_SET); - write(blk_out, buf + fk, last_buf_len - fk); //skip first 4K of buf write out the rest - last_buf_len= (pos-td.fpos0 >= buf_len-fk)? buf_len-fk: pos-td.fpos0; //bytes to read from blk_in + status = fseek(blk_out, pos - pos_delta + fk, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", index_out_filename.string())("pos", pos - pos_delta + fk) ); + size = fwrite((void*)(buf + fk), last_buf_len - fk, 1, blk_out); //skip first 4K of buf write out the rest + EOS_ASSERT( size == 1, block_log_exception, "${file} write fails", ("file", index_out_filename.string()) ); + last_buf_len = (pos - td.fpos0 >= buf_len - fk) ? buf_len - fk : pos - td.fpos0; //bytes to read from blk_in memcpy(buf + last_buf_len, buf, fk); //move first 4K of buf after zone will read pos-= last_buf_len; - index_end += last_buf_len;cout << "seek blocks to " << pos << " read " << last_buf_len << " bytes before saved 4k\n";//debug - lseek(td.blk_in, pos, SEEK_SET); - did_read = read(td.blk_in, buf, last_buf_len); - EOS_ASSERT(did_read == last_buf_len, block_log_exception, "blocks.log read fails"); - last_buf_len+= fk; //bytes in buf will eventually write to disk + index_end += last_buf_len; + cout << "seek blocks to " << pos << " read " << last_buf_len << " bytes before saved 4k\n";//debug + status = fseek(td.blk_in, pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", td.block_file_name.string())("pos", pos) ); + did_read = fread(buf, last_buf_len, 1, td.blk_in); + EOS_ASSERT(did_read == 1, block_log_exception, "${file} read fails", ("file", td.block_file_name.string()) ); + last_buf_len += fk; //bytes in buf will eventually write to disk } } - close(blk_out); - close(ind_out); - string old_log= (block_dir/"old.log").generic_string(); - string old_ind= (block_dir/"old.index").generic_string(); - rename(td.block_file_name,old_log); - rename(td.index_file_name,old_ind); - rename(block_out_filename,td.block_file_name); - rename(index_out_filename,td.index_file_name); - cout << "The new blocks.log and blocks.index files contain blocks " << n << " through " << td.last_block << '\n'; - cout << "The original (before trim front) files have been renamed to old.log and old.index.\n"; + fclose(blk_out); + fclose(ind_out); + bfs::path old_log = block_dir / "old.log"; + bfs::path old_ind = block_dir / "old.index"; + rename(td.block_file_name.c_str(), old_log.c_str()); + rename(td.index_file_name.c_str(), old_ind.c_str()); + rename(block_out_filename.c_str(), td.block_file_name.c_str()); + rename(index_out_filename.c_str(), td.index_file_name.c_str()); + cout << "The new " << td.block_file_name << " and " << td.index_file_name << " files contain blocks " << n << " through " << td.last_block << '\n'; + cout << "The original (before trim front) files have been renamed to " << old_log << " and " << old_ind << ".\n"; rt.report(); return 0; } -int make_index(bfs::path block_dir, string out_file) { +int make_index(const bfs::path& block_dir, const bfs::path& out_file) { report_time rt; //this code makes blocks.index much faster than nodeos (in recent test 80 seconds vs. 90 minutes) using namespace std; - string block_file_name= (block_dir / "blocks.log").generic_string(); - string out_file_name= (block_dir / out_file ).generic_string(); + bfs::path block_file_name = block_dir / "blocks.log"; + bfs::path out_file_name = block_dir / out_file; cout << '\n'; cout << "Will read existing blocks.log file " << block_file_name << '\n'; cout << "Will write new blocks.index file " << out_file_name << '\n'; - int fin = open(block_file_name.c_str(), O_RDONLY); - EOS_ASSERT( fin>0, block_log_not_found, "cannot read block file ${file}", ("file",block_file_name) ); + FILE* fin = fopen(block_file_name.c_str(), "r"); + EOS_ASSERT( fin != nullptr, block_log_not_found, "cannot read block file ${file}", ("file", block_file_name.string()) ); //will read big chunks of blocks.log into buf, will fill fpos_list with file positions before write to blocks.index - constexpr uint32_t buf_len{1U<<24}; //buf_len must be power of 2 >= largest possible block == one MByte - auto buffer= make_unique(buf_len+8); //can write up to 8 bytes past end of buf - char* buf= buffer.get(); - constexpr uint64_t fpos_list_len{1U<<22}; //length of fpos_list[] in bytes - auto fpos_buffer= make_unique(fpos_list_len>>3); - uint64_t* fpos_list= fpos_buffer.get(); + constexpr uint32_t buf_len{1U << 24}; //buf_len must be power of 2 >= largest possible block == one MByte + auto buffer = make_unique(buf_len + 8); //can write up to 8 bytes past end of buf + char* buf = buffer.get(); + constexpr uint64_t fpos_list_len{1U << 22}; //length of fpos_list[] in bytes + auto fpos_buffer = make_unique(fpos_list_len >> 3); + uint64_t* fpos_list = fpos_buffer.get(); //read blocks.log to see if version 1 or 2 and get first_blocknum (implicit 1 if version 1) - uint32_t version=0, first_block=0; - read(fin,(char*)&version,sizeof(version)); + uint32_t version = 0, first_block = 0; + auto size = fread((char*)&version, sizeof(version), 1, fin); + EOS_ASSERT( size == 1, block_log_exception, "${file} read fails", ("file", block_file_name.string()) ); cout << "block log version= " << version << '\n'; - EOS_ASSERT( version==1 || version==2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); + EOS_ASSERT( version == 1 || version == 2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); if (version == 1) - first_block= 1; - else - read(fin,(char*)&first_block,sizeof(first_block)); + first_block = 1; + else { + size = fread((void*)&first_block, sizeof(first_block), 1, fin); + EOS_ASSERT( size == 1, block_log_exception, "${file} read fails", ("file", block_file_name.string()) ); + } cout << "first block= " << first_block << '\n'; - uint64_t pos= lseek(fin,0,SEEK_END); //get blocks.log file length - uint64_t last_buf_len= pos & ((uint64_t)buf_len-1); //buf_len is a power of 2 so -1 creates low bits all 1 + auto status = fseek(fin, 0, SEEK_END); //get blocks.log file length + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", block_file_name.string())("pos", 0) ); + uint64_t pos = ftell(fin); + uint64_t last_buf_len = pos & ((uint64_t)buf_len-1); //buf_len is a power of 2 so -1 creates low bits all 1 if (!last_buf_len) //will read integral number of buf_len and one time read last_buf_len - last_buf_len= buf_len; - pos= lseek(fin,-(uint64_t)last_buf_len,SEEK_END); //one time read last_buf_len - uint64_t did_read= read(fin,buf,last_buf_len); //read tail of blocks.log file into buf - EOS_ASSERT( did_read==last_buf_len, block_log_exception, "blocks.log read fails" ); + last_buf_len = buf_len; + status = fseek(fin, -(uint64_t)last_buf_len, SEEK_END); //one time read last_buf_len + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from end of file", ("file", block_file_name.string())("pos", last_buf_len) ); + pos = ftell(fin); + uint64_t did_read = fread((void*)buf, last_buf_len, 1, fin);//read tail of blocks.log file into buf + EOS_ASSERT( did_read == 1, block_log_exception, "blocks.log read fails" ); //we traverse linked list of blocks in buf (from end to start), for each block we know this: uint32_t index_start; //buf index for block start @@ -504,79 +550,88 @@ int make_index(bfs::path block_dir, string out_file) { uint64_t file_pos; //file pos of block start uint32_t bnum; //block number - index_end= last_buf_len-8; //index in buf where last block ends and block file position starts - file_pos= *(uint64_t*)(buf+index_end); //file pos of block start - index_start= file_pos - pos; //buf index for block start - bnum= *(uint32_t*)(buf + index_start + blknum_offset); //block number of previous block (is big endian) - bnum= endian_reverse_u32(bnum)+1; //convert from big endian to little endian and add 1 + index_end = last_buf_len - 8; //index in buf where last block ends and block file position starts + cout << "last_buf_len=" << last_buf_len << " index_end=" << index_end << '\n'; + file_pos = *(uint64_t*)(buf + index_end); //file pos of block start + cout << "file_pos=" << file_pos << " buf=" << (uint64_t)buf << '\n'; + index_start = file_pos - pos; //buf index for block start + bnum = *(uint32_t*)(buf + index_start + blknum_offset); //block number of previous block (is big endian) + bnum = endian_reverse_u32(bnum) + 1; //convert from big endian to little endian and add 1 cout << "last block= " << bnum << '\n'; cout << '\n'; cout << "block " << setw(10) << bnum << " file_pos " << setw(14) << file_pos << '\n'; //first progress indicator - uint64_t last_file_pos= file_pos; //used to check that file_pos is strictly decreasing + uint64_t last_file_pos = file_pos; //used to check that file_pos is strictly decreasing uint32_t end_block{bnum}; //save for message at end //we use low level file IO because it is distinctly faster than C++ filebuf or iostream - mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; //if create file permissions will be 644 - int fout = open(out_file_name.c_str(), O_WRONLY|O_CREAT|O_TRUNC, mode); - EOS_ASSERT( fout>0, block_index_not_found, "cannot write blocks.index" ); - - uint64_t ind_file_len= (bnum+1-first_block)<<3; //index file holds 8 bytes for each block in blocks.log - uint64_t last_ind_buf_len= ind_file_len & (fpos_list_len-1); //fpos_list_len is a power of 2 so -1 creates low bits all 1 - if (!last_ind_buf_len) //will write integral number of buf_len and last_ind_buf_len one time to index file - last_ind_buf_len= buf_len; - uint64_t ind_pos= lseek(fout,ind_file_len-last_ind_buf_len,SEEK_SET); - uint64_t blk_base= (ind_pos>>3) + first_block; //first entry in fpos_list is for block blk_base - //cout << "ind_pos= " << ind_pos << " blk_base= " << blk_base << '\n'; - fpos_list[bnum-blk_base]= file_pos; //write filepos for block bnum + FILE* fout = fopen(out_file_name.c_str(), "w"); + EOS_ASSERT( fout != nullptr, block_index_not_found, "cannot write blocks.index" ); + + uint64_t ind_file_len = (bnum + 1 - first_block)<<3; //index file holds 8 bytes for each block in blocks.log + uint64_t last_ind_buf_len = ind_file_len & (fpos_list_len - 1); //fpos_list_len is a power of 2 so -1 creates low bits all 1 + if (!last_ind_buf_len) //will write integral number of buf_len and last_ind_buf_len one time to index file + last_ind_buf_len = buf_len; + status = fseek(fout, ind_file_len - last_ind_buf_len, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", out_file_name.string())("pos", ind_file_len - last_ind_buf_len) ); + uint64_t ind_pos = ftell(fout); + EOS_ASSERT( ind_pos == ind_file_len - last_ind_buf_len, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", out_file_name.string())("pos", ind_file_len - last_ind_buf_len) ); + uint64_t blk_base = (ind_pos >> 3) + first_block; //first entry in fpos_list is for block blk_base + cout << "ind_pos= " << ind_pos << " blk_base= " << blk_base << '\n'; + fpos_list[bnum - blk_base] = file_pos; //write filepos for block bnum for (;;) { - if (bnum==blk_base) { //if fpos_list is full - write(fout,(char*)fpos_list,last_ind_buf_len); //write fpos_list to index file - if (ind_pos==0) { //if done writing index file + if (bnum == blk_base) { //if fpos_list is full + size = fwrite((void*)fpos_list, last_ind_buf_len, 1, fout); //write fpos_list to index file + EOS_ASSERT( size == 1, block_log_exception, "${file} read fails", ("file", out_file_name.string()) ); + if (ind_pos == 0) { //if done writing index file cout << "block " << setw(10) << bnum << " file_pos " << setw(14) << file_pos << '\n'; //last progress indicator EOS_ASSERT( bnum == first_block, block_log_exception, "blocks.log does not contain consecutive block numbers" ); break; } - ind_pos-= fpos_list_len; - blk_base-= (fpos_list_len>>3); - did_read= lseek(fout,ind_pos,SEEK_SET); - EOS_ASSERT( did_read==ind_pos, block_log_exception, "blocks.log seek fails" ); - last_ind_buf_len= fpos_list_len; //from now on all writes to index file write a full fpos_list[] + ind_pos -= fpos_list_len; + blk_base -= (fpos_list_len>>3); + status = fseek(fout, ind_pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", out_file_name.string())("pos", ind_pos) ); + did_read = ftell(fout); + EOS_ASSERT( did_read == ind_pos, block_log_exception, "${file} seek fails", ("file", out_file_name.string()) ); + last_ind_buf_len = fpos_list_len; //from now on all writes to index file write a full fpos_list[] } if (index_start < 8) { //if block start is split across buf boundary - memcpy(buf+buf_len,buf,8); //copy portion at start of buf to past end of buf - pos-= buf_len; //file position of buf - lseek(fin,pos,SEEK_SET); - did_read= read(fin,buf,buf_len); //read next buf - EOS_ASSERT( did_read==buf_len, block_log_exception, "blocks.log read fails" ); - index_start+= buf_len; + memcpy(buf + buf_len, buf, 8); //copy portion at start of buf to past end of buf + pos -= buf_len; //file position of buf + status = fseek(fin, pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", block_file_name.string())("pos", pos) ); + did_read = fread((void*)buf, buf_len, 1, fin); //read next buf + EOS_ASSERT( did_read == 1, block_log_exception, "blocks.log read fails" ); + index_start += buf_len; } --bnum; //now move index_start and index_end to prior block - index_end= index_start-8; //index in buf where block ends and block file position starts - file_pos= *(uint64_t*)(buf+index_end); //file pos of block start + index_end = index_start - 8; //index in buf where block ends and block file position starts + file_pos = *(uint64_t*)(buf + index_end); //file pos of block start if (file_pos >= last_file_pos) { //file_pos will decrease if linked list is not corrupt cout << '\n'; cout << "file pos for block " << bnum+1 << " is " << last_file_pos << '\n'; cout << "file pos for block " << bnum << " is " << file_pos << '\n'; cout << "The linked list of blocks in blocks.log should run from last block to first block in reverse order\n"; - EOS_ASSERT( file_pos>3; - if (delta<1) - delta= 1; - for (uint32_t n= td.first_block; ; n+= delta) { - if (n>td.last_block) - n= td.last_block; + uint32_t delta = (td.last_block + 8 - td.first_block) >> 3; + if (delta < 1) + delta = 1; + for (uint32_t n = td.first_block; ; n += delta) { + if (n > td.last_block) + n = td.last_block; cout << '\n'; td.find_block_pos(n); //check block 'n' is where blocks.index says - if (n==td.last_block) + if (n == td.last_block) break; } cout << "\nno problems found\n"; //if get here there were no exceptions @@ -627,7 +686,7 @@ int main(int argc, char** argv) { return 0; } if (blog.trim_log) { - if (blog.first_block==0 && blog.last_block==std::numeric_limits::max()) { + if (blog.first_block == 0 && blog.last_block == std::numeric_limits::max()) { std::cerr << "trim-blocklog does nothing unless specify first and/or last block."; return -1; } @@ -642,7 +701,9 @@ int main(int argc, char** argv) { return 0; } if (blog.make_index) { - string out_file{vmap.count("output-file")==0? string("blocks.index"): vmap.at("output-file").as().generic_string()}; + bfs::path out_file = "blocks.index"; + if (vmap.count("output-file") == 0) + out_file = vmap.at("output-file").as(); return make_index(vmap.at("blocks-dir").as(), out_file); } //else print blocks.log as JSON From c2c8e997dd04bb5d063b2c5655fd1cd2c805f9d5 Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Sat, 27 Apr 2019 15:38:07 -0500 Subject: [PATCH 0248/1648] Minor cleanup. --- programs/eosio-blocklog/main.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 7d645485ed8..2df6495aef2 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -439,7 +439,7 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b EOS_ASSERT( size == 1, block_log_exception, "${file} write fails", ("file", block_out_filename.string()) ); break; } else { - blk_base -= (buf_len>>3); + blk_base -= (buf_len >> 3); cout << "******filling index buf blk_base " << blk_base << " fpos_len " << buf_len << '\n';//debug } } @@ -567,7 +567,7 @@ int make_index(const bfs::path& block_dir, const bfs::path& out_file) { FILE* fout = fopen(out_file_name.c_str(), "w"); EOS_ASSERT( fout != nullptr, block_index_not_found, "cannot write blocks.index" ); - uint64_t ind_file_len = (bnum + 1 - first_block)<<3; //index file holds 8 bytes for each block in blocks.log + uint64_t ind_file_len = (bnum + 1 - first_block) << 3; //index file holds 8 bytes for each block in blocks.log uint64_t last_ind_buf_len = ind_file_len & (fpos_list_len - 1); //fpos_list_len is a power of 2 so -1 creates low bits all 1 if (!last_ind_buf_len) //will write integral number of buf_len and last_ind_buf_len one time to index file last_ind_buf_len = buf_len; @@ -589,7 +589,7 @@ int make_index(const bfs::path& block_dir, const bfs::path& out_file) { break; } ind_pos -= fpos_list_len; - blk_base -= (fpos_list_len>>3); + blk_base -= (fpos_list_len >> 3); status = fseek(fout, ind_pos, SEEK_SET); EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", out_file_name.string())("pos", ind_pos) ); did_read = ftell(fout); From 38812455f4e66d6d66bfe06caadd462c99bbe38c Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Tue, 30 Apr 2019 17:48:49 -0500 Subject: [PATCH 0249/1648] Initializing variables for PR comment. --- programs/eosio-blocklog/main.cpp | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 2df6495aef2..df9b0d984c9 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -36,14 +36,14 @@ struct blocklog { bfs::path blocks_dir; bfs::path output_file; - uint32_t first_block; - uint32_t last_block; - bool no_pretty_print; - bool as_json_array; - bool make_index; - bool trim_log; - bool smoke_test; - bool help; + uint32_t first_block = 0; + uint32_t last_block = std::numeric_limits::max(); + bool no_pretty_print = false; + bool as_json_array = false; + bool make_index = false; + bool trim_log = false; + bool smoke_test = false; + bool help = false; }; struct report_time { @@ -215,13 +215,15 @@ struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end } void find_block_pos(uint32_t n); bfs::path block_file_name, index_file_name; //full pathname for blocks.log and blocks.index - uint32_t version; //blocklog version (1 or 2) - uint32_t first_block, last_block; //first and last block in blocks.log - FILE* blk_in; //C style files for reading blocks.log and blocks.index - FILE* ind_in; //C style files for reading blocks.log and blocks.index + uint32_t version = 0; //blocklog version (1 or 2) + uint32_t first_block = 0; //first block in blocks.log + uint32_t last_block = 0; //last block in blocks.log + FILE* blk_in = nullptr; //C style files for reading blocks.log and blocks.index + FILE* ind_in = nullptr; //C style files for reading blocks.log and blocks.index //we use low level file IO because it is distinctly faster than C++ filebuf or iostream - uint64_t index_pos; //filepos in blocks.index for block n, +8 for block n+1 - uint64_t fpos0, fpos1; //filepos in blocks.log for block n and block n+1 + uint64_t index_pos = 0; //filepos in blocks.index for block n, +8 for block n+1 + uint64_t fpos0 = 0; //filepos in blocks.log for block n and block n+1 + uint64_t fpos1 = 0; //filepos in blocks.log for block n and block n+1 }; From 1d3bd09e48e1499e690eebd11bed02d19253d1d5 Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Tue, 30 Apr 2019 17:49:46 -0500 Subject: [PATCH 0250/1648] Added description to report_time struct. --- programs/eosio-blocklog/main.cpp | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index df9b0d984c9..0c36cb32a94 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -47,20 +47,22 @@ struct blocklog { }; struct report_time { - report_time() - : _start(std::chrono::high_resolution_clock::now()) { + report_time(std::string& desc) + : _start(std::chrono::high_resolution_clock::now()) + , _desc(desc) { } void report() { const auto duration = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - _start).count() / 1000; - ilog("blocklog action took ${t} msec", ("t",duration)); + ilog("eosio-blocklog - ${desc} took ${t} msec", ("desc", _desc)("t", duration)); } const std::chrono::high_resolution_clock::time_point _start; + const std::string _desc; }; void blocklog::read_log() { - report_time rt; + report_time rt("reading log"); block_log block_logger(blocks_dir); const auto end = block_logger.read_head(); EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); @@ -228,7 +230,7 @@ struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end trim_data::trim_data(bfs::path block_dir) { - report_time rt; + report_time rt("trimming log"); using namespace std; block_file_name = block_dir / "blocks.log"; blk_in = fopen(block_file_name.c_str(), "r"); @@ -239,8 +241,9 @@ trim_data::trim_data(bfs::path block_dir) { EOS_ASSERT( size == 1, block_log_unsupported_version, "invalid format for file ${file}", ("file",block_file_name.string())); cout << "block log version= " << version << '\n'; EOS_ASSERT( version == 1 || version == 2, block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); - if (version == 1) + if (version == 1) { first_block = 1; + } else { size = fread((void *) &first_block, sizeof(first_block), 1, blk_in); EOS_ASSERT(size == 1, block_log_exception, "invalid format for file ${file}", @@ -258,7 +261,7 @@ trim_data::trim_data(bfs::path block_dir) { void trim_data::find_block_pos(uint32_t n) { //get file position of block n from blocks.index then confirm block n is found in blocks.log at that position //sets fpos0 and fpos1, throws exception if block at fpos0 is not block n - report_time rt; + report_time rt("finding block position"); using namespace std; index_pos = sizeof(uint64_t) * (n - first_block); auto status = fseek(ind_in, index_pos, SEEK_SET); @@ -294,7 +297,7 @@ void trim_data::find_block_pos(uint32_t n) { } int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block to keep (remove later blocks) - report_time rt; + report_time rt("trimming blocklog end"); using namespace std; trim_data td(block_dir); cout << "\nIn directory " << block_dir << " will trim all blocks after block " << n << " from " << td.block_file_name << " and " << td.index_file_name << ".\n"; @@ -316,7 +319,7 @@ int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block } int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first block to keep (remove prior blocks) - report_time rt; + report_time rt("trimming blocklog start"); using namespace std; cout << "\nIn directory " << block_dir << " will trim all blocks before block " << n << " from blocks.log and blocks.index.\n"; trim_data td(block_dir); @@ -501,7 +504,7 @@ int trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first b int make_index(const bfs::path& block_dir, const bfs::path& out_file) { - report_time rt; + report_time rt("making index"); //this code makes blocks.index much faster than nodeos (in recent test 80 seconds vs. 90 minutes) using namespace std; bfs::path block_file_name = block_dir / "blocks.log"; @@ -662,7 +665,6 @@ void smoke_test(bfs::path block_dir) { for (uint32_t n = td.first_block; ; n += delta) { if (n > td.last_block) n = td.last_block; - cout << '\n'; td.find_block_pos(n); //check block 'n' is where blocks.index says if (n == td.last_block) break; From 0c1551556c8ecbc816e1723710446d290bdc3d19 Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Tue, 30 Apr 2019 18:06:06 -0500 Subject: [PATCH 0251/1648] Fixed compile error. --- programs/eosio-blocklog/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 0c36cb32a94..d7139dffc72 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -47,7 +47,7 @@ struct blocklog { }; struct report_time { - report_time(std::string& desc) + report_time(std::string desc) : _start(std::chrono::high_resolution_clock::now()) , _desc(desc) { } From 34c2ea033d178cf2c7b408ad2b165051e9291d06 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 2 May 2019 09:20:11 -0500 Subject: [PATCH 0252/1648] Use shared_mutex now that C++17 is available --- plugins/net_plugin/net_plugin.cpp | 36 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index c0a5d043608..3cc221af4c1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -247,7 +247,7 @@ namespace eosio { bool use_socket_read_watermark = false; /** @} */ - mutable std::shared_timed_mutex connections_mtx; // switch to std::shared_mutex in C++17 + mutable std::shared_mutex connections_mtx; std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection std::mutex connector_check_timer_mtx; @@ -1359,7 +1359,7 @@ namespace eosio { if (conn && conn->current() ) { sync_source = conn; } else { - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); if( my_impl->connections.size() == 0 ) { sync_source.reset(); } else if( my_impl->connections.size() == 1 ) { @@ -1432,7 +1432,7 @@ namespace eosio { // static, thread safe void sync_manager::send_handshakes() { - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& ci : my_impl->connections ) { if( ci->current() ) { ci->send_handshake(); @@ -1573,7 +1573,7 @@ namespace eosio { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for (const auto& cc : my_impl->connections) { std::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { @@ -1678,7 +1678,7 @@ namespace eosio { block_id_type null_id; bool set_state_to_head_catchup = false; - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( const auto& cp : my_impl->connections ) { std::unique_lock g_cp_conn( cp->conn_mtx ); uint32_t fork_head_num = cp->fork_head_num; @@ -1823,7 +1823,7 @@ namespace eosio { if( my_impl->sync_master->syncing_with_peer() ) return; bool have_connection = false; - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", @@ -1892,7 +1892,7 @@ namespace eosio { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& cp : my_impl->connections ) { if( !cp->current() ) { continue; @@ -1992,7 +1992,7 @@ namespace eosio { return; } g_c_conn.unlock(); - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& conn : my_impl->connections ) { if( conn == c ) continue; @@ -2116,7 +2116,7 @@ namespace eosio { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); } else { paddr_str = paddr_add.to_string(); - std::shared_lock g( my_impl->connections_mtx ); + std::shared_lock g( my_impl->connections_mtx ); for( auto& conn : connections ) { if( conn->socket_is_open() ) { if( conn->peer_address().empty() ) { @@ -2130,7 +2130,7 @@ namespace eosio { g.unlock(); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { if( new_connection->start_session() ) { - std::unique_lock g_unique( connections_mtx ); + std::unique_lock g_unique( connections_mtx ); connections.insert( new_connection ); g_unique.unlock(); } @@ -2431,7 +2431,7 @@ namespace eosio { if( peer_address().empty() || last_handshake_recv.node_id == fc::sha256()) { g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); - std::shared_lock g_cnts( my_impl->connections_mtx ); + std::shared_lock g_cnts( my_impl->connections_mtx ); for(const auto& check : my_impl->connections) { if(check.get() == this) continue; @@ -2865,7 +2865,7 @@ namespace eosio { if( my->in_shutdown ) return; fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - std::shared_lock g( my->connections_mtx ); + std::shared_lock g( my->connections_mtx ); for( auto& c : my->connections ) { if( c->socket_is_open() ) { c->strand.post( [c]() { @@ -2905,7 +2905,7 @@ namespace eosio { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); - std::unique_lock g( connections_mtx ); + std::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); size_t num_rm = 0; @@ -3315,7 +3315,7 @@ namespace eosio { { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); - std::unique_lock g( my->connections_mtx ); + std::unique_lock g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close( false ); @@ -3336,7 +3336,7 @@ namespace eosio { * Used to trigger a new connection from RPC API */ string net_plugin::connect( const string& host ) { - std::unique_lock g( my->connections_mtx ); + std::unique_lock g( my->connections_mtx ); if( my->find_connection( host ) ) return "already connected"; @@ -3350,7 +3350,7 @@ namespace eosio { } string net_plugin::disconnect( const string& host ) { - std::unique_lock g( my->connections_mtx ); + std::unique_lock g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); @@ -3363,7 +3363,7 @@ namespace eosio { } optional net_plugin::status( const string& host )const { - std::shared_lock g( my->connections_mtx ); + std::shared_lock g( my->connections_mtx ); auto con = my->find_connection( host ); if( con ) return con->get_status(); @@ -3372,7 +3372,7 @@ namespace eosio { vector net_plugin::connections()const { vector result; - std::shared_lock g( my->connections_mtx ); + std::shared_lock g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); From aa8ca1646a66aac44d3607c75d6e5c5ea501ab39 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 2 May 2019 13:10:15 -0500 Subject: [PATCH 0253/1648] Add for_each_connection method to simplify some use cases --- plugins/net_plugin/net_plugin.cpp | 142 +++++++++++++++--------------- 1 file changed, 70 insertions(+), 72 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 3cc221af4c1..4f85e730bfd 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -250,6 +250,14 @@ namespace eosio { mutable std::shared_mutex connections_mtx; std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection + template + void for_each_connection( Function f ) { + std::shared_lock g( connections_mtx ); + for( auto& c : connections ) { + if ( !f( c ) ) return; + } + } + std::mutex connector_check_timer_mtx; unique_ptr connector_check_timer; int connector_checks_in_flight{0}; @@ -1432,12 +1440,12 @@ namespace eosio { // static, thread safe void sync_manager::send_handshakes() { - std::shared_lock g( my_impl->connections_mtx ); - for( auto& ci : my_impl->connections ) { + my_impl->for_each_connection( []( auto& ci ) { if( ci->current() ) { ci->send_handshake(); } - } + return true; + } ); } bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { @@ -1573,15 +1581,14 @@ namespace eosio { void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - std::shared_lock g( my_impl->connections_mtx ); - for (const auto& cc : my_impl->connections) { + my_impl->for_each_connection( [num, &id, &req]( const auto& cc ) { std::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; - break; + return false; } - } - g.unlock(); + return true; + } ); if( req.req_blocks.mode == catch_up ) { { std::lock_guard g_conn( c->conn_mtx ); @@ -1678,24 +1685,22 @@ namespace eosio { block_id_type null_id; bool set_state_to_head_catchup = false; - std::shared_lock g( my_impl->connections_mtx ); - for( const auto& cp : my_impl->connections ) { + my_impl->for_each_connection( [&null_id, blk_num, &blk_id, &c, &set_state_to_head_catchup]( const auto& cp ) { std::unique_lock g_cp_conn( cp->conn_mtx ); uint32_t fork_head_num = cp->fork_head_num; block_id_type fork_head_id = cp->fork_head; g_cp_conn.unlock(); if( fork_head_id == null_id ) { - continue; - } - if( fork_head_num < blk_num || fork_head_id == blk_id ) { + // continue + } else if( fork_head_num < blk_num || fork_head_id == blk_id ) { std::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; } else { set_state_to_head_catchup = true; } - } - g.unlock(); + return true; + } ); if( set_state_to_head_catchup ) { g_sync.lock(); @@ -1823,27 +1828,23 @@ namespace eosio { if( my_impl->sync_master->syncing_with_peer() ) return; bool have_connection = false; - std::shared_lock g( my_impl->connections_mtx ); - for( auto& cp : my_impl->connections ) { - + my_impl->for_each_connection( [&have_connection]( auto& cp ) { peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load()) ); if( !cp->current() ) { - continue; + return true; } have_connection = true; - break; - } - g.unlock(); + return false; + } ); if( !have_connection ) return; std::shared_ptr> send_buffer = create_send_buffer( bs->block ); - g.lock(); - for( auto& cp : my_impl->connections ) { + my_impl->for_each_connection( [this, bs, send_buffer]( auto& cp ) { if( !cp->current() ) { - continue; + return true; } cp->strand.post( [this, cp, bs, send_buffer]() { uint32_t bnum = bs->block_num; @@ -1859,7 +1860,8 @@ namespace eosio { cp->enqueue_buffer( send_buffer, true, priority::medium, no_reason ); } }); - } + return true; + } ); } // called from connection strand @@ -1892,14 +1894,13 @@ namespace eosio { node_transaction_state nts = {id, trx_expiration, 0, 0}; std::shared_ptr> send_buffer; - std::shared_lock g( my_impl->connections_mtx ); - for( auto& cp : my_impl->connections ) { + my_impl->for_each_connection( [this, &trx, &nts, &send_buffer]( auto& cp ) { if( !cp->current() ) { - continue; + return true; } nts.connection_id = cp->connection_id; if( !add_peer_txn(nts) ) { - continue; + return true; } if( !send_buffer ) { send_buffer = create_send_buffer( trx ); @@ -1909,7 +1910,8 @@ namespace eosio { fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_name()) ); cp->enqueue_buffer( send_buffer, true, priority::low, no_reason ); } ); - } + return true; + } ); } void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_metadata_ptr& txn) { @@ -1978,51 +1980,49 @@ namespace eosio { void dispatch_manager::retry_fetch(const connection_ptr& c) { fc_dlog( logger, "retry fetch" ); - std::unique_lock g_c_conn( c->conn_mtx ); - if( !c->last_req ) { - return; - } - fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); + request_message last_req; block_id_type bid; - if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { - bid = c->last_req->req_blocks.ids.back(); - } else { - fc_wlog( logger, "no retry, block mpde = ${b} trx mode = ${t}", - ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); - return; + { + std::lock_guard g_c_conn( c->conn_mtx ); + if( !c->last_req ) { + return; + } + fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); + if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { + bid = c->last_req->req_blocks.ids.back(); + } else { + fc_wlog( logger, "no retry, block mpde = ${b} trx mode = ${t}", + ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); + return; + } + last_req = *c->last_req; } - g_c_conn.unlock(); - std::shared_lock g( my_impl->connections_mtx ); - for( auto& conn : my_impl->connections ) { - if( conn == c ) continue; - - std::unique_lock g_conn_conn( conn->conn_mtx ); - if( conn->last_req ) { - continue; + my_impl->for_each_connection( [this, &c, &last_req, &bid]( auto& conn ) { + if( conn == c ) + return true; + { + std::lock_guard guard( conn->conn_mtx ); + if( conn->last_req ) { + return true; + } } - g_conn_conn.unlock(); + bool sendit = peer_has_block( bid, conn->connection_id ); if( sendit ) { - g.unlock(); - g_c_conn.lock(); - auto last_req = *c->last_req; - g_c_conn.unlock(); conn->strand.post( [conn, last_req{std::move(last_req)}]() { conn->enqueue( last_req ); conn->fetch_wait(); std::lock_guard g_conn_conn( conn->conn_mtx ); conn->last_req = last_req; } ); - return; + return false; } - } + return true; + } ); // at this point no other peer has it, re-request or do nothing? fc_wlog( logger, "no peer has last_req" ); if( c->connected() ) { - g_c_conn.lock(); - auto last_req = *c->last_req; - g_c_conn.unlock(); c->enqueue( last_req ); c->fetch_wait(); } @@ -2116,8 +2116,7 @@ namespace eosio { fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message()) ); } else { paddr_str = paddr_add.to_string(); - std::shared_lock g( my_impl->connections_mtx ); - for( auto& conn : connections ) { + my_impl->for_each_connection( [&visitors, &from_addr, &paddr_str]( auto& conn ) { if( conn->socket_is_open() ) { if( conn->peer_address().empty() ) { ++visitors; @@ -2126,13 +2125,12 @@ namespace eosio { } } } - } - g.unlock(); + return true; + } ); if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count) ) { if( new_connection->start_session() ) { - std::unique_lock g_unique( connections_mtx ); + std::lock_guard g_unique( connections_mtx ); connections.insert( new_connection ); - g_unique.unlock(); } } else { @@ -2865,14 +2863,14 @@ namespace eosio { if( my->in_shutdown ) return; fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - std::shared_lock g( my->connections_mtx ); - for( auto& c : my->connections ) { + my->for_each_connection( []( auto& c ) { if( c->socket_is_open() ) { c->strand.post( [c]() { c->send_time(); } ); } - } + return true; + } ); } ); } @@ -3315,7 +3313,7 @@ namespace eosio { { fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); - std::unique_lock g( my->connections_mtx ); + std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); con->close( false ); @@ -3336,7 +3334,7 @@ namespace eosio { * Used to trigger a new connection from RPC API */ string net_plugin::connect( const string& host ) { - std::unique_lock g( my->connections_mtx ); + std::lock_guard g( my->connections_mtx ); if( my->find_connection( host ) ) return "already connected"; @@ -3350,7 +3348,7 @@ namespace eosio { } string net_plugin::disconnect( const string& host ) { - std::unique_lock g( my->connections_mtx ); + std::lock_guard g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); From e9c1e5eb0360985449f80079ff84ce1de3ba76cf Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 2 May 2019 16:40:29 -0400 Subject: [PATCH 0254/1648] Removed commented-out code --- .buildkite/pipeline.yml | 83 +---------------------------------------- 1 file changed, 1 insertion(+), 82 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index bf0db2dc12e..81432fe58f6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,46 +1,4 @@ -# env: -# ANKA_WORKDIR: "/data/job" -# ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_40G" -# ANKA_TEMPLATE_TAG: "clean::cicd::git-ssh::nas::brew" -# CHECKSUMABLE: "scripts/eosio_build*" -# MAC_TAG: "eosio_2-3" - steps: - - # - trigger: "mac-anka-fleet" - # label: ":anka: Ensure Mojave Anka Template Tag Exists" - # branches: "*" - # async: false - # build: - # branch: "master" - # env: - # REPO: "${BUILDKITE_REPO}" - # REPO_BRANCH: "${BUILDKITE_BRANCH}" - # CHECKSUMABLE: "${CHECKSUMABLE}" - # TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" - # TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" - # TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into - # PROJECT_TAG: "${MAC_TAG}" - - # - wait - - # - label: ":darwin: [Darwin] Mojave Build" - # command: - # - "./scripts/eosio_build.sh -y -P" - # - "tar -pczf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz build" - # plugins: - # chef/anka#v0.4.3: - # vm-name: $ANKA_MOJAVE_TEMPLATE - # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - # workdir: $ANKA_WORKDIR - # always-pull: true - # debug: true - # wait-network: true - # agents: - # - "queue=mac-anka-node-fleet" - # timeout: 120 - - - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 @@ -324,44 +282,6 @@ steps: workdir: /data/job timeout: 60 - # - label: ":darwin: [Darwin] Mojave Tests" - # command: - # - "./scripts/parallel-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - # artifact_paths: - # - "build/mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # agents: - # - "queue=mac-anka-node-fleet" - # plugins: - # chef/anka#v0.4.3: - # vm-name: $ANKA_MOJAVE_TEMPLATE - # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - # workdir: $ANKA_WORKDIR - # always-pull: true - # debug: true - # wait-network: true - # timeout: 120 - - # - label: ":darwin: [Darwin] Mojave Tests" - # command: - # - "./scripts/serial-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - # artifact_paths: - # - "build/mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # agents: - # - "queue=mac-anka-node-fleet" - # plugins: - # chef/anka#v0.4.3: - # vm-name: $ANKA_MOJAVE_TEMPLATE - # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - # workdir: $ANKA_WORKDIR - # always-pull: true - # debug: true - # wait-network: true - # timeout: 120 - # Mojave Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -386,7 +306,6 @@ steps: - "os=mojave" timeout: 60 - - wait - command: | # CentOS 7 Package Builder @@ -515,4 +434,4 @@ steps: label: "Git Submodule Regression Check" agents: queue: "automation-large-builder-fleet" - timeout: 5 + timeout: 5 \ No newline at end of file From df6b4c99d4205a8beb6e18535915daf507d2a5f2 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 2 May 2019 16:52:49 -0400 Subject: [PATCH 0255/1648] Added test metrics step to Buildkite --- .buildkite/pipeline.yml | 11 + .../node_modules/node-fetch/CHANGELOG.md | 260 +++ .../node_modules/node-fetch/LICENSE.md | 22 + .../metrics/node_modules/node-fetch/README.md | 538 ++++++ .../node_modules/node-fetch/browser.js | 23 + .../node_modules/node-fetch/lib/index.es.js | 1631 ++++++++++++++++ .../node_modules/node-fetch/lib/index.js | 1640 +++++++++++++++++ .../node_modules/node-fetch/lib/index.mjs | 1629 ++++++++++++++++ .../node_modules/node-fetch/package.json | 94 + scripts/metrics/node_modules/sax/LICENSE | 41 + scripts/metrics/node_modules/sax/README.md | 225 +++ scripts/metrics/node_modules/sax/lib/sax.js | 1565 ++++++++++++++++ scripts/metrics/node_modules/sax/package.json | 61 + scripts/metrics/node_modules/xml2js/LICENSE | 19 + scripts/metrics/node_modules/xml2js/README.md | 406 ++++ .../metrics/node_modules/xml2js/lib/bom.js | 12 + .../node_modules/xml2js/lib/builder.js | 127 ++ .../node_modules/xml2js/lib/defaults.js | 72 + .../metrics/node_modules/xml2js/lib/parser.js | 357 ++++ .../node_modules/xml2js/lib/processors.js | 34 + .../metrics/node_modules/xml2js/lib/xml2js.js | 37 + .../metrics/node_modules/xml2js/package.json | 280 +++ .../node_modules/xmlbuilder/.npmignore | 5 + .../node_modules/xmlbuilder/CHANGELOG.md | 423 +++++ .../metrics/node_modules/xmlbuilder/LICENSE | 21 + .../metrics/node_modules/xmlbuilder/README.md | 85 + .../node_modules/xmlbuilder/lib/Utility.js | 73 + .../xmlbuilder/lib/XMLAttribute.js | 31 + .../node_modules/xmlbuilder/lib/XMLCData.js | 32 + .../node_modules/xmlbuilder/lib/XMLComment.js | 32 + .../xmlbuilder/lib/XMLDTDAttList.js | 50 + .../xmlbuilder/lib/XMLDTDElement.js | 35 + .../xmlbuilder/lib/XMLDTDEntity.js | 56 + .../xmlbuilder/lib/XMLDTDNotation.js | 37 + .../xmlbuilder/lib/XMLDeclaration.js | 40 + .../node_modules/xmlbuilder/lib/XMLDocType.js | 107 ++ .../xmlbuilder/lib/XMLDocument.js | 48 + .../xmlbuilder/lib/XMLDocumentCB.js | 402 ++++ .../node_modules/xmlbuilder/lib/XMLElement.js | 111 ++ .../node_modules/xmlbuilder/lib/XMLNode.js | 432 +++++ .../lib/XMLProcessingInstruction.js | 35 + .../node_modules/xmlbuilder/lib/XMLRaw.js | 32 + .../xmlbuilder/lib/XMLStreamWriter.js | 279 +++ .../xmlbuilder/lib/XMLStringWriter.js | 334 ++++ .../xmlbuilder/lib/XMLStringifier.js | 163 ++ .../node_modules/xmlbuilder/lib/XMLText.js | 32 + .../xmlbuilder/lib/XMLWriterBase.js | 90 + .../node_modules/xmlbuilder/lib/index.js | 53 + .../node_modules/xmlbuilder/package.json | 65 + scripts/metrics/package-lock.json | 30 + scripts/metrics/test-metrics.js | 415 +++++ 51 files changed, 12632 insertions(+) create mode 100644 scripts/metrics/node_modules/node-fetch/CHANGELOG.md create mode 100644 scripts/metrics/node_modules/node-fetch/LICENSE.md create mode 100644 scripts/metrics/node_modules/node-fetch/README.md create mode 100644 scripts/metrics/node_modules/node-fetch/browser.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.es.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.mjs create mode 100644 scripts/metrics/node_modules/node-fetch/package.json create mode 100644 scripts/metrics/node_modules/sax/LICENSE create mode 100644 scripts/metrics/node_modules/sax/README.md create mode 100644 scripts/metrics/node_modules/sax/lib/sax.js create mode 100644 scripts/metrics/node_modules/sax/package.json create mode 100644 scripts/metrics/node_modules/xml2js/LICENSE create mode 100644 scripts/metrics/node_modules/xml2js/README.md create mode 100644 scripts/metrics/node_modules/xml2js/lib/bom.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/builder.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/defaults.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/parser.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/processors.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/xml2js.js create mode 100644 scripts/metrics/node_modules/xml2js/package.json create mode 100644 scripts/metrics/node_modules/xmlbuilder/.npmignore create mode 100644 scripts/metrics/node_modules/xmlbuilder/CHANGELOG.md create mode 100644 scripts/metrics/node_modules/xmlbuilder/LICENSE create mode 100644 scripts/metrics/node_modules/xmlbuilder/README.md create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/Utility.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLAttribute.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLCData.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLComment.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDAttList.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDElement.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDEntity.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDNotation.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDeclaration.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocType.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocument.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocumentCB.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLElement.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLNode.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLProcessingInstruction.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLRaw.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStreamWriter.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStringWriter.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStringifier.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLText.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLWriterBase.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/index.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/package.json create mode 100644 scripts/metrics/package-lock.json create mode 100755 scripts/metrics/test-metrics.js diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 81432fe58f6..379d02015cb 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -306,6 +306,17 @@ steps: - "os=mojave" timeout: 60 + - wait: + continue_on_failure: true + + - command: | + cd scripts/metrics + node --max-old-space-size=4096 test-metrics.js + label: ":bar_chart: Test Metrics" + agents: + queue: "automation-apps-builder-fleet" + timeout: 10 + - wait - command: | # CentOS 7 Package Builder diff --git a/scripts/metrics/node_modules/node-fetch/CHANGELOG.md b/scripts/metrics/node_modules/node-fetch/CHANGELOG.md new file mode 100644 index 00000000000..941b6a8d8b7 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/CHANGELOG.md @@ -0,0 +1,260 @@ + +Changelog +========= + + +# 2.x release + +## v2.5.0 + +- Enhance: `Response` object now includes `redirected` property. +- Enhance: `fetch()` now accepts third-party `Blob` implementation as body. +- Other: disable `package-lock.json` generation as we never commit them. +- Other: dev dependency update. +- Other: readme update. + +## v2.4.1 + +- Fix: `Blob` import rule for node < 10, as `Readable` isn't a named export. + +## v2.4.0 + +- Enhance: added `Brotli` compression support (using node's zlib). +- Enhance: updated `Blob` implementation per spec. +- Fix: set content type automatically for `URLSearchParams`. +- Fix: `Headers` now reject empty header names. +- Fix: test cases, as node 12+ no longer accepts invalid header response. + +## v2.3.0 + +- Enhance: added `AbortSignal` support, with README example. +- Enhance: handle invalid `Location` header during redirect by rejecting them explicitly with `FetchError`. +- Fix: update `browser.js` to support react-native environment, where `self` isn't available globally. + +## v2.2.1 + +- Fix: `compress` flag shouldn't overwrite existing `Accept-Encoding` header. +- Fix: multiple `import` rules, where `PassThrough` etc. doesn't have a named export when using node <10 and `--exerimental-modules` flag. +- Other: Better README. + +## v2.2.0 + +- Enhance: Support all `ArrayBuffer` view types +- Enhance: Support Web Workers +- Enhance: Support Node.js' `--experimental-modules` mode; deprecate `.es.js` file +- Fix: Add `__esModule` property to the exports object +- Other: Better example in README for writing response to a file +- Other: More tests for Agent + +## v2.1.2 + +- Fix: allow `Body` methods to work on `ArrayBuffer`-backed `Body` objects +- Fix: reject promise returned by `Body` methods when the accumulated `Buffer` exceeds the maximum size +- Fix: support custom `Host` headers with any casing +- Fix: support importing `fetch()` from TypeScript in `browser.js` +- Fix: handle the redirect response body properly + +## v2.1.1 + +Fix packaging errors in v2.1.0. + +## v2.1.0 + +- Enhance: allow using ArrayBuffer as the `body` of a `fetch()` or `Request` +- Fix: store HTTP headers of a `Headers` object internally with the given case, for compatibility with older servers that incorrectly treated header names in a case-sensitive manner +- Fix: silently ignore invalid HTTP headers +- Fix: handle HTTP redirect responses without a `Location` header just like non-redirect responses +- Fix: include bodies when following a redirection when appropriate + +## v2.0.0 + +This is a major release. Check [our upgrade guide](https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md) for an overview on some key differences between v1 and v2. + +### General changes + +- Major: Node.js 0.10.x and 0.12.x support is dropped +- Major: `require('node-fetch/lib/response')` etc. is now unsupported; use `require('node-fetch').Response` or ES6 module imports +- Enhance: start testing on Node.js v4.x, v6.x, v8.x LTS, as well as v9.x stable +- Enhance: use Rollup to produce a distributed bundle (less memory overhead and faster startup) +- Enhance: make `Object.prototype.toString()` on Headers, Requests, and Responses return correct class strings +- Other: rewrite in ES2015 using Babel +- Other: use Codecov for code coverage tracking +- Other: update package.json script for npm 5 +- Other: `encoding` module is now optional (alpha.7) +- Other: expose browser.js through package.json, avoid bundling mishaps (alpha.9) +- Other: allow TypeScript to `import` node-fetch by exposing default (alpha.9) + +### HTTP requests + +- Major: overwrite user's `Content-Length` if we can be sure our information is correct (per spec) +- Fix: errors in a response are caught before the body is accessed +- Fix: support WHATWG URL objects, created by `whatwg-url` package or `require('url').URL` in Node.js 7+ + +### Response and Request classes + +- Major: `response.text()` no longer attempts to detect encoding, instead always opting for UTF-8 (per spec); use `response.textConverted()` for the v1 behavior +- Major: make `response.json()` throw error instead of returning an empty object on 204 no-content respose (per spec; reverts behavior changed in v1.6.2) +- Major: internal methods are no longer exposed +- Major: throw error when a `GET` or `HEAD` Request is constructed with a non-null body (per spec) +- Enhance: add `response.arrayBuffer()` (also applies to Requests) +- Enhance: add experimental `response.blob()` (also applies to Requests) +- Enhance: `URLSearchParams` is now accepted as a body +- Enhance: wrap `response.json()` json parsing error as `FetchError` +- Fix: fix Request and Response with `null` body + +### Headers class + +- Major: remove `headers.getAll()`; make `get()` return all headers delimited by commas (per spec) +- Enhance: make Headers iterable +- Enhance: make Headers constructor accept an array of tuples +- Enhance: make sure header names and values are valid in HTTP +- Fix: coerce Headers prototype function parameters to strings, where applicable + +### Documentation + +- Enhance: more comprehensive API docs +- Enhance: add a list of default headers in README + + +# 1.x release + +## backport releases (v1.7.0 and beyond) + +See [changelog on 1.x branch](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) for details. + +## v1.6.3 + +- Enhance: error handling document to explain `FetchError` design +- Fix: support `form-data` 2.x releases (requires `form-data` >= 2.1.0) + +## v1.6.2 + +- Enhance: minor document update +- Fix: response.json() returns empty object on 204 no-content response instead of throwing a syntax error + +## v1.6.1 + +- Fix: if `res.body` is a non-stream non-formdata object, we will call `body.toString` and send it as a string +- Fix: `counter` value is incorrectly set to `follow` value when wrapping Request instance +- Fix: documentation update + +## v1.6.0 + +- Enhance: added `res.buffer()` api for convenience, it returns body as a Node.js buffer +- Enhance: better old server support by handling raw deflate response +- Enhance: skip encoding detection for non-HTML/XML response +- Enhance: minor document update +- Fix: HEAD request doesn't need decompression, as body is empty +- Fix: `req.body` now accepts a Node.js buffer + +## v1.5.3 + +- Fix: handle 204 and 304 responses when body is empty but content-encoding is gzip/deflate +- Fix: allow resolving response and cloned response in any order +- Fix: avoid setting `content-length` when `form-data` body use streams +- Fix: send DELETE request with content-length when body is present +- Fix: allow any url when calling new Request, but still reject non-http(s) url in fetch + +## v1.5.2 + +- Fix: allow node.js core to handle keep-alive connection pool when passing a custom agent + +## v1.5.1 + +- Fix: redirect mode `manual` should work even when there is no redirection or broken redirection + +## v1.5.0 + +- Enhance: rejected promise now use custom `Error` (thx to @pekeler) +- Enhance: `FetchError` contains `err.type` and `err.code`, allows for better error handling (thx to @pekeler) +- Enhance: basic support for redirect mode `manual` and `error`, allows for location header extraction (thx to @jimmywarting for the initial PR) + +## v1.4.1 + +- Fix: wrapping Request instance with FormData body again should preserve the body as-is + +## v1.4.0 + +- Enhance: Request and Response now have `clone` method (thx to @kirill-konshin for the initial PR) +- Enhance: Request and Response now have proper string and buffer body support (thx to @kirill-konshin) +- Enhance: Body constructor has been refactored out (thx to @kirill-konshin) +- Enhance: Headers now has `forEach` method (thx to @tricoder42) +- Enhance: back to 100% code coverage +- Fix: better form-data support (thx to @item4) +- Fix: better character encoding detection under chunked encoding (thx to @dsuket for the initial PR) + +## v1.3.3 + +- Fix: make sure `Content-Length` header is set when body is string for POST/PUT/PATCH requests +- Fix: handle body stream error, for cases such as incorrect `Content-Encoding` header +- Fix: when following certain redirects, use `GET` on subsequent request per Fetch Spec +- Fix: `Request` and `Response` constructors now parse headers input using `Headers` + +## v1.3.2 + +- Enhance: allow auto detect of form-data input (no `FormData` spec on node.js, this is form-data specific feature) + +## v1.3.1 + +- Enhance: allow custom host header to be set (server-side only feature, as it's a forbidden header on client-side) + +## v1.3.0 + +- Enhance: now `fetch.Request` is exposed as well + +## v1.2.1 + +- Enhance: `Headers` now normalized `Number` value to `String`, prevent common mistakes + +## v1.2.0 + +- Enhance: now fetch.Headers and fetch.Response are exposed, making testing easier + +## v1.1.2 + +- Fix: `Headers` should only support `String` and `Array` properties, and ignore others + +## v1.1.1 + +- Enhance: now req.headers accept both plain object and `Headers` instance + +## v1.1.0 + +- Enhance: timeout now also applies to response body (in case of slow response) +- Fix: timeout is now cleared properly when fetch is done/has failed + +## v1.0.6 + +- Fix: less greedy content-type charset matching + +## v1.0.5 + +- Fix: when `follow = 0`, fetch should not follow redirect +- Enhance: update tests for better coverage +- Enhance: code formatting +- Enhance: clean up doc + +## v1.0.4 + +- Enhance: test iojs support +- Enhance: timeout attached to socket event only fire once per redirect + +## v1.0.3 + +- Fix: response size limit should reject large chunk +- Enhance: added character encoding detection for xml, such as rss/atom feed (encoding in DTD) + +## v1.0.2 + +- Fix: added res.ok per spec change + +## v1.0.0 + +- Enhance: better test coverage and doc + + +# 0.x release + +## v0.1 + +- Major: initial public release diff --git a/scripts/metrics/node_modules/node-fetch/LICENSE.md b/scripts/metrics/node_modules/node-fetch/LICENSE.md new file mode 100644 index 00000000000..660ffecb58b --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 David Frank + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/scripts/metrics/node_modules/node-fetch/README.md b/scripts/metrics/node_modules/node-fetch/README.md new file mode 100644 index 00000000000..48f4215e4e7 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/README.md @@ -0,0 +1,538 @@ +node-fetch +========== + +[![npm version][npm-image]][npm-url] +[![build status][travis-image]][travis-url] +[![coverage status][codecov-image]][codecov-url] +[![install size][install-size-image]][install-size-url] + +A light-weight module that brings `window.fetch` to Node.js + +(We are looking for [v2 maintainers and collaborators](https://github.com/bitinn/node-fetch/issues/567)) + + + +- [Motivation](#motivation) +- [Features](#features) +- [Difference from client-side fetch](#difference-from-client-side-fetch) +- [Installation](#installation) +- [Loading and configuring the module](#loading-and-configuring-the-module) +- [Common Usage](#common-usage) + - [Plain text or HTML](#plain-text-or-html) + - [JSON](#json) + - [Simple Post](#simple-post) + - [Post with JSON](#post-with-json) + - [Post with form parameters](#post-with-form-parameters) + - [Handling exceptions](#handling-exceptions) + - [Handling client and server errors](#handling-client-and-server-errors) +- [Advanced Usage](#advanced-usage) + - [Streams](#streams) + - [Buffer](#buffer) + - [Accessing Headers and other Meta data](#accessing-headers-and-other-meta-data) + - [Post data using a file stream](#post-data-using-a-file-stream) + - [Post with form-data (detect multipart)](#post-with-form-data-detect-multipart) + - [Request cancellation with AbortSignal](#request-cancellation-with-abortsignal) +- [API](#api) + - [fetch(url[, options])](#fetchurl-options) + - [Options](#options) + - [Class: Request](#class-request) + - [Class: Response](#class-response) + - [Class: Headers](#class-headers) + - [Interface: Body](#interface-body) + - [Class: FetchError](#class-fetcherror) +- [License](#license) +- [Acknowledgement](#acknowledgement) + + + +## Motivation + +Instead of implementing `XMLHttpRequest` in Node.js to run browser-specific [Fetch polyfill](https://github.com/github/fetch), why not go from native `http` to `fetch` API directly? Hence `node-fetch`, minimal code for a `window.fetch` compatible API on Node.js runtime. + +See Matt Andrews' [isomorphic-fetch](https://github.com/matthew-andrews/isomorphic-fetch) or Leonardo Quixada's [cross-fetch](https://github.com/lquixada/cross-fetch) for isomorphic usage (exports `node-fetch` for server-side, `whatwg-fetch` for client-side). + +## Features + +- Stay consistent with `window.fetch` API. +- Make conscious trade-off when following [WHATWG fetch spec][whatwg-fetch] and [stream spec](https://streams.spec.whatwg.org/) implementation details, document known differences. +- Use native promise, but allow substituting it with [insert your favorite promise library]. +- Use native Node streams for body, on both request and response. +- Decode content encoding (gzip/deflate) properly, and convert string output (such as `res.text()` and `res.json()`) to UTF-8 automatically. +- Useful extensions such as timeout, redirect limit, response size limit, [explicit errors](ERROR-HANDLING.md) for troubleshooting. + +## Difference from client-side fetch + +- See [Known Differences](LIMITS.md) for details. +- If you happen to use a missing feature that `window.fetch` offers, feel free to open an issue. +- Pull requests are welcomed too! + +## Installation + +Current stable release (`2.x`) + +```sh +$ npm install node-fetch --save +``` + +## Loading and configuring the module +We suggest you load the module via `require`, pending the stabalizing of es modules in node: +```js +const fetch = require('node-fetch'); +``` + +If you are using a Promise library other than native, set it through fetch.Promise: +```js +const Bluebird = require('bluebird'); + +fetch.Promise = Bluebird; +``` + +## Common Usage + +NOTE: The documentation below is up-to-date with `2.x` releases, [see `1.x` readme](https://github.com/bitinn/node-fetch/blob/1.x/README.md), [changelog](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) and [2.x upgrade guide](UPGRADE-GUIDE.md) for the differences. + +#### Plain text or HTML +```js +fetch('https://github.com/') + .then(res => res.text()) + .then(body => console.log(body)); +``` + +#### JSON + +```js + +fetch('https://api.github.com/users/github') + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Simple Post +```js +fetch('https://httpbin.org/post', { method: 'POST', body: 'a=1' }) + .then(res => res.json()) // expecting a json response + .then(json => console.log(json)); +``` + +#### Post with JSON + +```js +const body = { a: 1 }; + +fetch('https://httpbin.org/post', { + method: 'post', + body: JSON.stringify(body), + headers: { 'Content-Type': 'application/json' }, + }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Post with form parameters +`URLSearchParams` is available in Node.js as of v7.5.0. See [official documentation](https://nodejs.org/api/url.html#url_class_urlsearchparams) for more usage methods. + +NOTE: The `Content-Type` header is only set automatically to `x-www-form-urlencoded` when an instance of `URLSearchParams` is given as such: + +```js +const { URLSearchParams } = require('url'); + +const params = new URLSearchParams(); +params.append('a', 1); + +fetch('https://httpbin.org/post', { method: 'POST', body: params }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Handling exceptions +NOTE: 3xx-5xx responses are *NOT* exceptions, and should be handled in `then()`, see the next section. + +Adding a catch to the fetch promise chain will catch *all* exceptions, such as errors originating from node core libraries, like network errors, and operational errors which are instances of FetchError. See the [error handling document](ERROR-HANDLING.md) for more details. + +```js +fetch('https://domain.invalid/') + .catch(err => console.error(err)); +``` + +#### Handling client and server errors +It is common to create a helper function to check that the response contains no client (4xx) or server (5xx) error responses: + +```js +function checkStatus(res) { + if (res.ok) { // res.status >= 200 && res.status < 300 + return res; + } else { + throw MyCustomError(res.statusText); + } +} + +fetch('https://httpbin.org/status/400') + .then(checkStatus) + .then(res => console.log('will not get here...')) +``` + +## Advanced Usage + +#### Streams +The "Node.js way" is to use streams when possible: + +```js +fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') + .then(res => { + const dest = fs.createWriteStream('./octocat.png'); + res.body.pipe(dest); + }); +``` + +#### Buffer +If you prefer to cache binary data in full, use buffer(). (NOTE: buffer() is a `node-fetch` only API) + +```js +const fileType = require('file-type'); + +fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') + .then(res => res.buffer()) + .then(buffer => fileType(buffer)) + .then(type => { /* ... */ }); +``` + +#### Accessing Headers and other Meta data +```js +fetch('https://github.com/') + .then(res => { + console.log(res.ok); + console.log(res.status); + console.log(res.statusText); + console.log(res.headers.raw()); + console.log(res.headers.get('content-type')); + }); +``` + +#### Post data using a file stream + +```js +const { createReadStream } = require('fs'); + +const stream = createReadStream('input.txt'); + +fetch('https://httpbin.org/post', { method: 'POST', body: stream }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Post with form-data (detect multipart) + +```js +const FormData = require('form-data'); + +const form = new FormData(); +form.append('a', 1); + +fetch('https://httpbin.org/post', { method: 'POST', body: form }) + .then(res => res.json()) + .then(json => console.log(json)); + +// OR, using custom headers +// NOTE: getHeaders() is non-standard API + +const form = new FormData(); +form.append('a', 1); + +const options = { + method: 'POST', + body: form, + headers: form.getHeaders() +} + +fetch('https://httpbin.org/post', options) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Request cancellation with AbortSignal + +> NOTE: You may only cancel streamed requests on Node >= v8.0.0 + +You may cancel requests with `AbortController`. A suggested implementation is [`abort-controller`](https://www.npmjs.com/package/abort-controller). + +An example of timing out a request after 150ms could be achieved as follows: + +```js +import AbortController from 'abort-controller'; + +const controller = new AbortController(); +const timeout = setTimeout( + () => { controller.abort(); }, + 150, +); + +fetch(url, { signal: controller.signal }) + .then(res => res.json()) + .then( + data => { + useData(data) + }, + err => { + if (err.name === 'AbortError') { + // request was aborted + } + }, + ) + .finally(() => { + clearTimeout(timeout); + }); +``` + +See [test cases](https://github.com/bitinn/node-fetch/blob/master/test/test.js) for more examples. + + +## API + +### fetch(url[, options]) + +- `url` A string representing the URL for fetching +- `options` [Options](#fetch-options) for the HTTP(S) request +- Returns: Promise<[Response](#class-response)> + +Perform an HTTP(S) fetch. + +`url` should be an absolute url, such as `https://example.com/`. A path-relative URL (`/file/under/root`) or protocol-relative URL (`//can-be-http-or-https.com/`) will result in a rejected promise. + + +### Options + +The default values are shown after each option key. + +```js +{ + // These properties are part of the Fetch Standard + method: 'GET', + headers: {}, // request headers. format is the identical to that accepted by the Headers constructor (see below) + body: null, // request body. can be null, a string, a Buffer, a Blob, or a Node.js Readable stream + redirect: 'follow', // set to `manual` to extract redirect headers, `error` to reject redirect + signal: null, // pass an instance of AbortSignal to optionally abort requests + + // The following properties are node-fetch extensions + follow: 20, // maximum redirect count. 0 to not follow redirect + timeout: 0, // req/res timeout in ms, it resets on redirect. 0 to disable (OS limit applies). Signal is recommended instead. + compress: true, // support gzip/deflate content encoding. false to disable + size: 0, // maximum response body size in bytes. 0 to disable + agent: null // http(s).Agent instance, allows custom proxy, certificate, dns lookup etc. +} +``` + +##### Default Headers + +If no values are set, the following request headers will be sent automatically: + +Header | Value +------------------- | -------------------------------------------------------- +`Accept-Encoding` | `gzip,deflate` _(when `options.compress === true`)_ +`Accept` | `*/*` +`Connection` | `close` _(when no `options.agent` is present)_ +`Content-Length` | _(automatically calculated, if possible)_ +`Transfer-Encoding` | `chunked` _(when `req.body` is a stream)_ +`User-Agent` | `node-fetch/1.0 (+https://github.com/bitinn/node-fetch)` + + +### Class: Request + +An HTTP(S) request containing information about URL, method, headers, and the body. This class implements the [Body](#iface-body) interface. + +Due to the nature of Node.js, the following properties are not implemented at this moment: + +- `type` +- `destination` +- `referrer` +- `referrerPolicy` +- `mode` +- `credentials` +- `cache` +- `integrity` +- `keepalive` + +The following node-fetch extension properties are provided: + +- `follow` +- `compress` +- `counter` +- `agent` + +See [options](#fetch-options) for exact meaning of these extensions. + +#### new Request(input[, options]) + +*(spec-compliant)* + +- `input` A string representing a URL, or another `Request` (which will be cloned) +- `options` [Options][#fetch-options] for the HTTP(S) request + +Constructs a new `Request` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Request/Request). + +In most cases, directly `fetch(url, options)` is simpler than creating a `Request` object. + + +### Class: Response + +An HTTP(S) response. This class implements the [Body](#iface-body) interface. + +The following properties are not implemented in node-fetch at this moment: + +- `Response.error()` +- `Response.redirect()` +- `type` +- `trailer` + +#### new Response([body[, options]]) + +*(spec-compliant)* + +- `body` A string or [Readable stream][node-readable] +- `options` A [`ResponseInit`][response-init] options dictionary + +Constructs a new `Response` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Response/Response). + +Because Node.js does not implement service workers (for which this class was designed), one rarely has to construct a `Response` directly. + +#### response.ok + +*(spec-compliant)* + +Convenience property representing if the request ended normally. Will evaluate to true if the response status was greater than or equal to 200 but smaller than 300. + +#### response.redirected + +*(spec-compliant)* + +Convenience property representing if the request has been redirected at least once. Will evaluate to true if the internal redirect counter is greater than 0. + + +### Class: Headers + +This class allows manipulating and iterating over a set of HTTP headers. All methods specified in the [Fetch Standard][whatwg-fetch] are implemented. + +#### new Headers([init]) + +*(spec-compliant)* + +- `init` Optional argument to pre-fill the `Headers` object + +Construct a new `Headers` object. `init` can be either `null`, a `Headers` object, an key-value map object, or any iterable object. + +```js +// Example adapted from https://fetch.spec.whatwg.org/#example-headers-class + +const meta = { + 'Content-Type': 'text/xml', + 'Breaking-Bad': '<3' +}; +const headers = new Headers(meta); + +// The above is equivalent to +const meta = [ + [ 'Content-Type', 'text/xml' ], + [ 'Breaking-Bad', '<3' ] +]; +const headers = new Headers(meta); + +// You can in fact use any iterable objects, like a Map or even another Headers +const meta = new Map(); +meta.set('Content-Type', 'text/xml'); +meta.set('Breaking-Bad', '<3'); +const headers = new Headers(meta); +const copyOfHeaders = new Headers(headers); +``` + + +### Interface: Body + +`Body` is an abstract interface with methods that are applicable to both `Request` and `Response` classes. + +The following methods are not yet implemented in node-fetch at this moment: + +- `formData()` + +#### body.body + +*(deviation from spec)* + +* Node.js [`Readable` stream][node-readable] + +The data encapsulated in the `Body` object. Note that while the [Fetch Standard][whatwg-fetch] requires the property to always be a WHATWG `ReadableStream`, in node-fetch it is a Node.js [`Readable` stream][node-readable]. + +#### body.bodyUsed + +*(spec-compliant)* + +* `Boolean` + +A boolean property for if this body has been consumed. Per spec, a consumed body cannot be used again. + +#### body.arrayBuffer() +#### body.blob() +#### body.json() +#### body.text() + +*(spec-compliant)* + +* Returns: Promise + +Consume the body and return a promise that will resolve to one of these formats. + +#### body.buffer() + +*(node-fetch extension)* + +* Returns: Promise<Buffer> + +Consume the body and return a promise that will resolve to a Buffer. + +#### body.textConverted() + +*(node-fetch extension)* + +* Returns: Promise<String> + +Identical to `body.text()`, except instead of always converting to UTF-8, encoding sniffing will be performed and text converted to UTF-8, if possible. + +(This API requires an optional dependency on npm package [encoding](https://www.npmjs.com/package/encoding), which you need to install manually. `webpack` users may see [a warning message](https://github.com/bitinn/node-fetch/issues/412#issuecomment-379007792) due to this optional dependency.) + + +### Class: FetchError + +*(node-fetch extension)* + +An operational error in the fetching process. See [ERROR-HANDLING.md][] for more info. + + +### Class: AbortError + +*(node-fetch extension)* + +An Error thrown when the request is aborted in response to an `AbortSignal`'s `abort` event. It has a `name` property of `AbortError`. See [ERROR-HANDLING.MD][] for more info. + +## Acknowledgement + +Thanks to [github/fetch](https://github.com/github/fetch) for providing a solid implementation reference. + +`node-fetch` v1 was maintained by [@bitinn](https://github.com/bitinn); v2 was maintained by [@TimothyGu](https://github.com/timothygu), [@bitinn](https://github.com/bitinn) and [@jimmywarting](https://github.com/jimmywarting); v2 readme is written by [@jkantr](https://github.com/jkantr). + +## License + +MIT + +[npm-image]: https://flat.badgen.net/npm/v/node-fetch +[npm-url]: https://www.npmjs.com/package/node-fetch +[travis-image]: https://flat.badgen.net/travis/bitinn/node-fetch +[travis-url]: https://travis-ci.org/bitinn/node-fetch +[codecov-image]: https://flat.badgen.net/codecov/c/github/bitinn/node-fetch/master +[codecov-url]: https://codecov.io/gh/bitinn/node-fetch +[install-size-image]: https://flat.badgen.net/packagephobia/install/node-fetch +[install-size-url]: https://packagephobia.now.sh/result?p=node-fetch +[whatwg-fetch]: https://fetch.spec.whatwg.org/ +[response-init]: https://fetch.spec.whatwg.org/#responseinit +[node-readable]: https://nodejs.org/api/stream.html#stream_readable_streams +[mdn-headers]: https://developer.mozilla.org/en-US/docs/Web/API/Headers +[LIMITS.md]: https://github.com/bitinn/node-fetch/blob/master/LIMITS.md +[ERROR-HANDLING.md]: https://github.com/bitinn/node-fetch/blob/master/ERROR-HANDLING.md +[UPGRADE-GUIDE.md]: https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md diff --git a/scripts/metrics/node_modules/node-fetch/browser.js b/scripts/metrics/node_modules/node-fetch/browser.js new file mode 100644 index 00000000000..0ad5de004c4 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/browser.js @@ -0,0 +1,23 @@ +"use strict"; + +// ref: https://github.com/tc39/proposal-global +var getGlobal = function () { + // the only reliable means to get the global object is + // `Function('return this')()` + // However, this causes CSP violations in Chrome apps. + if (typeof self !== 'undefined') { return self; } + if (typeof window !== 'undefined') { return window; } + if (typeof global !== 'undefined') { return global; } + throw new Error('unable to locate global object'); +} + +var global = getGlobal(); + +module.exports = exports = global.fetch; + +// Needed for TypeScript and Webpack. +exports.default = global.fetch.bind(global); + +exports.Headers = global.Headers; +exports.Request = global.Request; +exports.Response = global.Response; \ No newline at end of file diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.es.js b/scripts/metrics/node_modules/node-fetch/lib/index.es.js new file mode 100644 index 00000000000..20ab807872f --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.es.js @@ -0,0 +1,1631 @@ +process.emitWarning("The .es.js file is deprecated. Use .mjs instead."); + +import Stream from 'stream'; +import http from 'http'; +import Url from 'url'; +import https from 'https'; +import zlib from 'zlib'; + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +export default fetch; +export { Headers, Request, Response, FetchError }; diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.js b/scripts/metrics/node_modules/node-fetch/lib/index.js new file mode 100644 index 00000000000..86c7c031229 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.js @@ -0,0 +1,1640 @@ +'use strict'; + +Object.defineProperty(exports, '__esModule', { value: true }); + +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + +var Stream = _interopDefault(require('stream')); +var http = _interopDefault(require('http')); +var Url = _interopDefault(require('url')); +var https = _interopDefault(require('https')); +var zlib = _interopDefault(require('zlib')); + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +module.exports = exports = fetch; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.default = exports; +exports.Headers = Headers; +exports.Request = Request; +exports.Response = Response; +exports.FetchError = FetchError; diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.mjs b/scripts/metrics/node_modules/node-fetch/lib/index.mjs new file mode 100644 index 00000000000..dca525658b4 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.mjs @@ -0,0 +1,1629 @@ +import Stream from 'stream'; +import http from 'http'; +import Url from 'url'; +import https from 'https'; +import zlib from 'zlib'; + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +export default fetch; +export { Headers, Request, Response, FetchError }; diff --git a/scripts/metrics/node_modules/node-fetch/package.json b/scripts/metrics/node_modules/node-fetch/package.json new file mode 100644 index 00000000000..e93129c801f --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/package.json @@ -0,0 +1,94 @@ +{ + "_from": "node-fetch", + "_id": "node-fetch@2.5.0", + "_inBundle": false, + "_integrity": "sha512-YuZKluhWGJwCcUu4RlZstdAxr8bFfOVHakc1mplwHkk8J+tqM1Y5yraYvIUpeX8aY7+crCwiELJq7Vl0o0LWXw==", + "_location": "/node-fetch", + "_phantomChildren": {}, + "_requested": { + "type": "tag", + "registry": true, + "raw": "node-fetch", + "name": "node-fetch", + "escapedName": "node-fetch", + "rawSpec": "", + "saveSpec": null, + "fetchSpec": "latest" + }, + "_requiredBy": [ + "#USER", + "/" + ], + "_resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.5.0.tgz", + "_shasum": "8028c49fc1191bba56a07adc6e2a954644a48501", + "_spec": "node-fetch", + "_where": "/Users/zachary.butler/Work/auto-buildkite-pipelines/scripts/metrics", + "author": { + "name": "David Frank" + }, + "browser": "./browser.js", + "bugs": { + "url": "https://github.com/bitinn/node-fetch/issues" + }, + "bundleDependencies": false, + "dependencies": {}, + "deprecated": false, + "description": "A light-weight module that brings window.fetch to node.js", + "devDependencies": { + "@ungap/url-search-params": "^0.1.2", + "abort-controller": "^1.1.0", + "abortcontroller-polyfill": "^1.3.0", + "babel-core": "^6.26.3", + "babel-plugin-istanbul": "^4.1.6", + "babel-preset-env": "^1.6.1", + "babel-register": "^6.16.3", + "chai": "^3.5.0", + "chai-as-promised": "^7.1.1", + "chai-iterator": "^1.1.1", + "chai-string": "~1.3.0", + "codecov": "^3.3.0", + "cross-env": "^5.2.0", + "form-data": "^2.3.3", + "is-builtin-module": "^1.0.0", + "mocha": "^5.0.0", + "nyc": "11.9.0", + "parted": "^0.1.1", + "promise": "^8.0.3", + "resumer": "0.0.0", + "rollup": "^0.63.4", + "rollup-plugin-babel": "^3.0.7", + "string-to-arraybuffer": "^1.0.2", + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "files": [ + "lib/index.js", + "lib/index.mjs", + "lib/index.es.js", + "browser.js" + ], + "homepage": "https://github.com/bitinn/node-fetch", + "keywords": [ + "fetch", + "http", + "promise" + ], + "license": "MIT", + "main": "lib/index", + "module": "lib/index.mjs", + "name": "node-fetch", + "repository": { + "type": "git", + "url": "git+https://github.com/bitinn/node-fetch.git" + }, + "scripts": { + "build": "cross-env BABEL_ENV=rollup rollup -c", + "coverage": "cross-env BABEL_ENV=coverage nyc --reporter json --reporter text mocha -R spec test/test.js && codecov -f coverage/coverage-final.json", + "prepare": "npm run build", + "report": "cross-env BABEL_ENV=coverage nyc --reporter lcov --reporter text mocha -R spec test/test.js", + "test": "cross-env BABEL_ENV=test mocha --require babel-register --throw-deprecation test/test.js" + }, + "version": "2.5.0" +} diff --git a/scripts/metrics/node_modules/sax/LICENSE b/scripts/metrics/node_modules/sax/LICENSE new file mode 100644 index 00000000000..ccffa082c99 --- /dev/null +++ b/scripts/metrics/node_modules/sax/LICENSE @@ -0,0 +1,41 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +==== + +`String.fromCodePoint` by Mathias Bynens used according to terms of MIT +License, as follows: + + Copyright Mathias Bynens + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/scripts/metrics/node_modules/sax/README.md b/scripts/metrics/node_modules/sax/README.md new file mode 100644 index 00000000000..afcd3f3dd65 --- /dev/null +++ b/scripts/metrics/node_modules/sax/README.md @@ -0,0 +1,225 @@ +# sax js + +A sax-style parser for XML and HTML. + +Designed with [node](http://nodejs.org/) in mind, but should work fine in +the browser or other CommonJS implementations. + +## What This Is + +* A very simple tool to parse through an XML string. +* A stepping stone to a streaming HTML parser. +* A handy way to deal with RSS and other mostly-ok-but-kinda-broken XML + docs. + +## What This Is (probably) Not + +* An HTML Parser - That's a fine goal, but this isn't it. It's just + XML. +* A DOM Builder - You can use it to build an object model out of XML, + but it doesn't do that out of the box. +* XSLT - No DOM = no querying. +* 100% Compliant with (some other SAX implementation) - Most SAX + implementations are in Java and do a lot more than this does. +* An XML Validator - It does a little validation when in strict mode, but + not much. +* A Schema-Aware XSD Thing - Schemas are an exercise in fetishistic + masochism. +* A DTD-aware Thing - Fetching DTDs is a much bigger job. + +## Regarding `Hello, world!').close(); + +// stream usage +// takes the same options as the parser +var saxStream = require("sax").createStream(strict, options) +saxStream.on("error", function (e) { + // unhandled errors will throw, since this is a proper node + // event emitter. + console.error("error!", e) + // clear the error + this._parser.error = null + this._parser.resume() +}) +saxStream.on("opentag", function (node) { + // same object as above +}) +// pipe is supported, and it's readable/writable +// same chunks coming in also go out. +fs.createReadStream("file.xml") + .pipe(saxStream) + .pipe(fs.createWriteStream("file-copy.xml")) +``` + + +## Arguments + +Pass the following arguments to the parser function. All are optional. + +`strict` - Boolean. Whether or not to be a jerk. Default: `false`. + +`opt` - Object bag of settings regarding string formatting. All default to `false`. + +Settings supported: + +* `trim` - Boolean. Whether or not to trim text and comment nodes. +* `normalize` - Boolean. If true, then turn any whitespace into a single + space. +* `lowercase` - Boolean. If true, then lowercase tag names and attribute names + in loose mode, rather than uppercasing them. +* `xmlns` - Boolean. If true, then namespaces are supported. +* `position` - Boolean. If false, then don't track line/col/position. +* `strictEntities` - Boolean. If true, only parse [predefined XML + entities](http://www.w3.org/TR/REC-xml/#sec-predefined-ent) + (`&`, `'`, `>`, `<`, and `"`) + +## Methods + +`write` - Write bytes onto the stream. You don't have to do this all at +once. You can keep writing as much as you want. + +`close` - Close the stream. Once closed, no more data may be written until +it is done processing the buffer, which is signaled by the `end` event. + +`resume` - To gracefully handle errors, assign a listener to the `error` +event. Then, when the error is taken care of, you can call `resume` to +continue parsing. Otherwise, the parser will not continue while in an error +state. + +## Members + +At all times, the parser object will have the following members: + +`line`, `column`, `position` - Indications of the position in the XML +document where the parser currently is looking. + +`startTagPosition` - Indicates the position where the current tag starts. + +`closed` - Boolean indicating whether or not the parser can be written to. +If it's `true`, then wait for the `ready` event to write again. + +`strict` - Boolean indicating whether or not the parser is a jerk. + +`opt` - Any options passed into the constructor. + +`tag` - The current tag being dealt with. + +And a bunch of other stuff that you probably shouldn't touch. + +## Events + +All events emit with a single argument. To listen to an event, assign a +function to `on`. Functions get executed in the this-context of +the parser object. The list of supported events are also in the exported +`EVENTS` array. + +When using the stream interface, assign handlers using the EventEmitter +`on` function in the normal fashion. + +`error` - Indication that something bad happened. The error will be hanging +out on `parser.error`, and must be deleted before parsing can continue. By +listening to this event, you can keep an eye on that kind of stuff. Note: +this happens *much* more in strict mode. Argument: instance of `Error`. + +`text` - Text node. Argument: string of text. + +`doctype` - The ``. Argument: +object with `name` and `body` members. Attributes are not parsed, as +processing instructions have implementation dependent semantics. + +`sgmldeclaration` - Random SGML declarations. Stuff like `` +would trigger this kind of event. This is a weird thing to support, so it +might go away at some point. SAX isn't intended to be used to parse SGML, +after all. + +`opentagstart` - Emitted immediately when the tag name is available, +but before any attributes are encountered. Argument: object with a +`name` field and an empty `attributes` set. Note that this is the +same object that will later be emitted in the `opentag` event. + +`opentag` - An opening tag. Argument: object with `name` and `attributes`. +In non-strict mode, tag names are uppercased, unless the `lowercase` +option is set. If the `xmlns` option is set, then it will contain +namespace binding information on the `ns` member, and will have a +`local`, `prefix`, and `uri` member. + +`closetag` - A closing tag. In loose mode, tags are auto-closed if their +parent closes. In strict mode, well-formedness is enforced. Note that +self-closing tags will have `closeTag` emitted immediately after `openTag`. +Argument: tag name. + +`attribute` - An attribute node. Argument: object with `name` and `value`. +In non-strict mode, attribute names are uppercased, unless the `lowercase` +option is set. If the `xmlns` option is set, it will also contains namespace +information. + +`comment` - A comment node. Argument: the string of the comment. + +`opencdata` - The opening tag of a ``) of a `` tags trigger a `"script"` +event, and their contents are not checked for special xml characters. +If you pass `noscript: true`, then this behavior is suppressed. + +## Reporting Problems + +It's best to write a failing test if you find an issue. I will always +accept pull requests with failing tests if they demonstrate intended +behavior, but it is very hard to figure out what issue you're describing +without a test. Writing a test is also the best way for you yourself +to figure out if you really understand the issue you think you have with +sax-js. diff --git a/scripts/metrics/node_modules/sax/lib/sax.js b/scripts/metrics/node_modules/sax/lib/sax.js new file mode 100644 index 00000000000..795d607ef63 --- /dev/null +++ b/scripts/metrics/node_modules/sax/lib/sax.js @@ -0,0 +1,1565 @@ +;(function (sax) { // wrapper for non-node envs + sax.parser = function (strict, opt) { return new SAXParser(strict, opt) } + sax.SAXParser = SAXParser + sax.SAXStream = SAXStream + sax.createStream = createStream + + // When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns. + // When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)), + // since that's the earliest that a buffer overrun could occur. This way, checks are + // as rare as required, but as often as necessary to ensure never crossing this bound. + // Furthermore, buffers are only tested at most once per write(), so passing a very + // large string into write() might have undesirable effects, but this is manageable by + // the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme + // edge case, result in creating at most one complete copy of the string passed in. + // Set to Infinity to have unlimited buffers. + sax.MAX_BUFFER_LENGTH = 64 * 1024 + + var buffers = [ + 'comment', 'sgmlDecl', 'textNode', 'tagName', 'doctype', + 'procInstName', 'procInstBody', 'entity', 'attribName', + 'attribValue', 'cdata', 'script' + ] + + sax.EVENTS = [ + 'text', + 'processinginstruction', + 'sgmldeclaration', + 'doctype', + 'comment', + 'opentagstart', + 'attribute', + 'opentag', + 'closetag', + 'opencdata', + 'cdata', + 'closecdata', + 'error', + 'end', + 'ready', + 'script', + 'opennamespace', + 'closenamespace' + ] + + function SAXParser (strict, opt) { + if (!(this instanceof SAXParser)) { + return new SAXParser(strict, opt) + } + + var parser = this + clearBuffers(parser) + parser.q = parser.c = '' + parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH + parser.opt = opt || {} + parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags + parser.looseCase = parser.opt.lowercase ? 'toLowerCase' : 'toUpperCase' + parser.tags = [] + parser.closed = parser.closedRoot = parser.sawRoot = false + parser.tag = parser.error = null + parser.strict = !!strict + parser.noscript = !!(strict || parser.opt.noscript) + parser.state = S.BEGIN + parser.strictEntities = parser.opt.strictEntities + parser.ENTITIES = parser.strictEntities ? Object.create(sax.XML_ENTITIES) : Object.create(sax.ENTITIES) + parser.attribList = [] + + // namespaces form a prototype chain. + // it always points at the current tag, + // which protos to its parent tag. + if (parser.opt.xmlns) { + parser.ns = Object.create(rootNS) + } + + // mostly just for error reporting + parser.trackPosition = parser.opt.position !== false + if (parser.trackPosition) { + parser.position = parser.line = parser.column = 0 + } + emit(parser, 'onready') + } + + if (!Object.create) { + Object.create = function (o) { + function F () {} + F.prototype = o + var newf = new F() + return newf + } + } + + if (!Object.keys) { + Object.keys = function (o) { + var a = [] + for (var i in o) if (o.hasOwnProperty(i)) a.push(i) + return a + } + } + + function checkBufferLength (parser) { + var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10) + var maxActual = 0 + for (var i = 0, l = buffers.length; i < l; i++) { + var len = parser[buffers[i]].length + if (len > maxAllowed) { + // Text/cdata nodes can get big, and since they're buffered, + // we can get here under normal conditions. + // Avoid issues by emitting the text node now, + // so at least it won't get any bigger. + switch (buffers[i]) { + case 'textNode': + closeText(parser) + break + + case 'cdata': + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + break + + case 'script': + emitNode(parser, 'onscript', parser.script) + parser.script = '' + break + + default: + error(parser, 'Max buffer length exceeded: ' + buffers[i]) + } + } + maxActual = Math.max(maxActual, len) + } + // schedule the next check for the earliest possible buffer overrun. + var m = sax.MAX_BUFFER_LENGTH - maxActual + parser.bufferCheckPosition = m + parser.position + } + + function clearBuffers (parser) { + for (var i = 0, l = buffers.length; i < l; i++) { + parser[buffers[i]] = '' + } + } + + function flushBuffers (parser) { + closeText(parser) + if (parser.cdata !== '') { + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + } + if (parser.script !== '') { + emitNode(parser, 'onscript', parser.script) + parser.script = '' + } + } + + SAXParser.prototype = { + end: function () { end(this) }, + write: write, + resume: function () { this.error = null; return this }, + close: function () { return this.write(null) }, + flush: function () { flushBuffers(this) } + } + + var Stream + try { + Stream = require('stream').Stream + } catch (ex) { + Stream = function () {} + } + + var streamWraps = sax.EVENTS.filter(function (ev) { + return ev !== 'error' && ev !== 'end' + }) + + function createStream (strict, opt) { + return new SAXStream(strict, opt) + } + + function SAXStream (strict, opt) { + if (!(this instanceof SAXStream)) { + return new SAXStream(strict, opt) + } + + Stream.apply(this) + + this._parser = new SAXParser(strict, opt) + this.writable = true + this.readable = true + + var me = this + + this._parser.onend = function () { + me.emit('end') + } + + this._parser.onerror = function (er) { + me.emit('error', er) + + // if didn't throw, then means error was handled. + // go ahead and clear error, so we can write again. + me._parser.error = null + } + + this._decoder = null + + streamWraps.forEach(function (ev) { + Object.defineProperty(me, 'on' + ev, { + get: function () { + return me._parser['on' + ev] + }, + set: function (h) { + if (!h) { + me.removeAllListeners(ev) + me._parser['on' + ev] = h + return h + } + me.on(ev, h) + }, + enumerable: true, + configurable: false + }) + }) + } + + SAXStream.prototype = Object.create(Stream.prototype, { + constructor: { + value: SAXStream + } + }) + + SAXStream.prototype.write = function (data) { + if (typeof Buffer === 'function' && + typeof Buffer.isBuffer === 'function' && + Buffer.isBuffer(data)) { + if (!this._decoder) { + var SD = require('string_decoder').StringDecoder + this._decoder = new SD('utf8') + } + data = this._decoder.write(data) + } + + this._parser.write(data.toString()) + this.emit('data', data) + return true + } + + SAXStream.prototype.end = function (chunk) { + if (chunk && chunk.length) { + this.write(chunk) + } + this._parser.end() + return true + } + + SAXStream.prototype.on = function (ev, handler) { + var me = this + if (!me._parser['on' + ev] && streamWraps.indexOf(ev) !== -1) { + me._parser['on' + ev] = function () { + var args = arguments.length === 1 ? [arguments[0]] : Array.apply(null, arguments) + args.splice(0, 0, ev) + me.emit.apply(me, args) + } + } + + return Stream.prototype.on.call(me, ev, handler) + } + + // this really needs to be replaced with character classes. + // XML allows all manner of ridiculous numbers and digits. + var CDATA = '[CDATA[' + var DOCTYPE = 'DOCTYPE' + var XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace' + var XMLNS_NAMESPACE = 'http://www.w3.org/2000/xmlns/' + var rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE } + + // http://www.w3.org/TR/REC-xml/#NT-NameStartChar + // This implementation works on strings, a single character at a time + // as such, it cannot ever support astral-plane characters (10000-EFFFF) + // without a significant breaking change to either this parser, or the + // JavaScript language. Implementation of an emoji-capable xml parser + // is left as an exercise for the reader. + var nameStart = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + + var nameBody = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ + + var entityStart = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + var entityBody = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ + + function isWhitespace (c) { + return c === ' ' || c === '\n' || c === '\r' || c === '\t' + } + + function isQuote (c) { + return c === '"' || c === '\'' + } + + function isAttribEnd (c) { + return c === '>' || isWhitespace(c) + } + + function isMatch (regex, c) { + return regex.test(c) + } + + function notMatch (regex, c) { + return !isMatch(regex, c) + } + + var S = 0 + sax.STATE = { + BEGIN: S++, // leading byte order mark or whitespace + BEGIN_WHITESPACE: S++, // leading whitespace + TEXT: S++, // general stuff + TEXT_ENTITY: S++, // & and such. + OPEN_WAKA: S++, // < + SGML_DECL: S++, // + SCRIPT: S++, //