diff --git a/CMakeLists.txt b/CMakeLists.txt index 06f6a77f0..4f1704f0d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -446,7 +446,7 @@ add_library( ${LIB_SOURCES_SHARES} ${LIB_SOURCES_CHAINS}) -file(GLOB_RECURSE TEST_SOURCES test/*.cpp) +file(GLOB_RECURSE TEST_SOURCES test/*.cc) add_executable(unittest ${TEST_SOURCES} 3rdparty/gmock-gtest-all.cc) target_link_libraries(unittest btcpool ${THIRD_LIBRARIES}) diff --git a/docker/dcrd/v1.4.0-rc1/Dockerfile b/docker/dcrd/v1.4.0/Dockerfile similarity index 69% rename from docker/dcrd/v1.4.0-rc1/Dockerfile rename to docker/dcrd/v1.4.0/Dockerfile index 7b28921db..21f4c58dd 100644 --- a/docker/dcrd/v1.4.0-rc1/Dockerfile +++ b/docker/dcrd/v1.4.0/Dockerfile @@ -20,10 +20,10 @@ RUN apt-get update && apt-get install -y net-tools wget # download dcrd RUN mkdir ~/source -RUN cd ~/source && wget https://github.com/decred/decred-binaries/releases/download/v1.4.0-rc1/decred-linux-amd64-v1.4.0-rc1.tar.gz -RUN cd ~/source && [ $(sha256sum decred-linux-amd64-v1.4.0-rc1.tar.gz | cut -d " " -f 1) = "db26781c877a40762bbcaf7eb17c9c8a2da803682612e63b5f798131d766d635" ] && \ - tar zxvf decred-linux-amd64-v1.4.0-rc1.tar.gz && \ - cp decred-linux-amd64-v1.4.0-rc1/dcr* /usr/local/bin +RUN cd ~/source && wget https://github.com/decred/decred-binaries/releases/download/v1.4.0/decred-linux-amd64-v1.4.0.tar.gz +RUN cd ~/source && [ $(sha256sum decred-linux-amd64-v1.4.0.tar.gz | cut -d " " -f 1) = "36375985df1ba9a45bc11b4f6cdaed4f14ff6e5e9c46e17ef6e4f70a3349aba2" ] && \ + tar zxvf decred-linux-amd64-v1.4.0.tar.gz && \ + cp decred-linux-amd64-v1.4.0/dcr* /usr/local/bin # mkdir dcrd data dir RUN mkdir -p /root/.dcrd diff --git a/docker/dcrd/v1.4.0-rc1/README.md b/docker/dcrd/v1.4.0/README.md similarity index 85% rename from docker/dcrd/v1.4.0-rc1/README.md rename to docker/dcrd/v1.4.0/README.md index 4d114c77b..fa834e9ed 100644 --- a/docker/dcrd/v1.4.0-rc1/README.md +++ b/docker/dcrd/v1.4.0/README.md @@ -1,9 +1,9 @@ -Docker for Dcrd v1.4.0-rc1 +Docker for Dcrd v1.4.0 ============================ * OS: `Ubuntu 14.04 LTS` * Docker Image OS: `Ubuntu 16.04 LTS` -* Dcrd: `v1.4.0-rc1` +* Dcrd: `v1.4.0` ## Install Docker @@ -22,13 +22,13 @@ service docker status cd /work git clone https://github.com/btccom/btcpool.git -cd btcpool/docker/dcrd/v1.4.0-rc1 +cd btcpool/docker/dcrd/v1.4.0 # If your server is in China, please check "Dockerfile" and uncomment some lines. # build -docker build -t dcrd:1.4.0-rc1 . -# docker build --no-cache -t dcrd:1.4.0-rc1 . +docker build -t dcrd:1.4.0 . +# docker build --no-cache -t dcrd:1.4.0 . # mkdir for dcrd mkdir -p /work/dcrd @@ -62,7 +62,7 @@ listen=address:port ``` # start docker (mainnet, assuming listening port 9108 & RPC listening port 9109) -docker run -it -v /work/dcrd:/root/.dcrd --name dcrd -p 9108:9108 -p 9109:9109 --restart always -d dcrd:1.4.0-rc1 +docker run -it -v /work/dcrd:/root/.dcrd --name dcrd -p 9108:9108 -p 9109:9109 --restart always -d dcrd:1.4.0 # login docker exec -it dcrd /bin/bash diff --git a/docker/dcrd/v1.4.0-rc1/run b/docker/dcrd/v1.4.0/run similarity index 100% rename from docker/dcrd/v1.4.0-rc1/run rename to docker/dcrd/v1.4.0/run diff --git a/docker/dcrd/v1.4.0-rc1/sources-aliyun.com.list b/docker/dcrd/v1.4.0/sources-aliyun.com.list similarity index 100% rename from docker/dcrd/v1.4.0-rc1/sources-aliyun.com.list rename to docker/dcrd/v1.4.0/sources-aliyun.com.list diff --git a/run-clang-format.sh b/run-clang-format.sh index 48aa5cbbb..1a824c521 100755 --- a/run-clang-format.sh +++ b/run-clang-format.sh @@ -1,6 +1,6 @@ #!/bin/sh for SRC_DIR in src test tools do - find $SRC_DIR -name '*.h' -or -name '*.cc' | xargs clang-format -i -style file + find $SRC_DIR -name '*.h' -or -name '*.inl' -or -name '*.cc' | xargs clang-format -i -style file done diff --git a/src/BlockMaker.cc b/src/BlockMaker.cc index 3307d23f5..140786a13 100644 --- a/src/BlockMaker.cc +++ b/src/BlockMaker.cc @@ -24,12 +24,15 @@ #include "BlockMaker.h" ////////////////////////////////// BlockMaker ////////////////////////////////// -BlockMaker::BlockMaker(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB) +BlockMaker::BlockMaker( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB) : def_(def) , running_(true) - , kafkaConsumerSolvedShare_(kafkaBrokers, def_->solvedShareTopic_.c_str(), 0/* patition */) - , poolDB_(poolDB) -{ + , kafkaConsumerSolvedShare_( + kafkaBrokers, def_->solvedShareTopic_.c_str(), 0 /* patition */) + , poolDB_(poolDB) { } BlockMaker::~BlockMaker() { @@ -67,16 +70,18 @@ void BlockMaker::consumeSolvedShare(rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { diff --git a/src/BlockMaker.h b/src/BlockMaker.h index 2bf325570..2b4c978c2 100644 --- a/src/BlockMaker.h +++ b/src/BlockMaker.h @@ -31,25 +31,22 @@ #include -struct NodeDefinition -{ +struct NodeDefinition { string rpcAddr_; string rpcUserPwd_; }; -struct BlockMakerDefinition -{ +struct BlockMakerDefinition { string chainType_; bool enabled_; - vector nodes; + vector nodes; string solvedShareTopic_; string foundAuxBlockTable_; virtual ~BlockMakerDefinition() {} }; -struct BlockMakerDefinitionBitcoin : public BlockMakerDefinition -{ +struct BlockMakerDefinitionBitcoin : public BlockMakerDefinition { string rawGbtTopic_; string stratumJobTopic_; string auxPowSolvedShareTopic_; // merged mining solved share topic @@ -59,8 +56,7 @@ struct BlockMakerDefinitionBitcoin : public BlockMakerDefinition }; ////////////////////////////////// BlockMaker ////////////////////////////////// -class BlockMaker -{ +class BlockMaker { protected: shared_ptr def_; atomic running_; @@ -74,9 +70,12 @@ class BlockMaker virtual void processSolvedShare(rd_kafka_message_t *rkmessage) = 0; public: - BlockMaker(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB); + BlockMaker( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB); virtual ~BlockMaker(); - + // read-only definition inline shared_ptr def() { return def_; } @@ -85,5 +84,4 @@ class BlockMaker virtual void run(); }; - #endif diff --git a/src/Common.cc b/src/Common.cc index ef40eaa9f..be433ef48 100644 --- a/src/Common.cc +++ b/src/Common.cc @@ -23,11 +23,10 @@ */ #include "Common.h" -uint32_t djb2(const char *s) -{ +uint32_t djb2(const char *s) { uint32_t hash = 5381; int c; - uint8_t* str = (uint8_t*) s; + uint8_t *str = (uint8_t *)s; while ((c = *str++)) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ @@ -51,4 +50,3 @@ uint64_t formatDifficulty(const uint64_t diff) { assert(i <= 63); return 1ULL << i; } - diff --git a/src/Common.h b/src/Common.h index 032c6bdfd..b4dc391bf 100644 --- a/src/Common.h +++ b/src/Common.h @@ -1,4 +1,4 @@ -/* +/* The MIT License (MIT) Copyright (c) [2016] [BTC.COM] @@ -69,7 +69,6 @@ typedef lock_guard ScopeLock; typedef unique_lock UniqueLock; typedef condition_variable Condition; - /** * byte order conversion utils */ @@ -78,20 +77,15 @@ inline uint16_t HToBe(uint16_t v) { return (v >> 8) | (v << 8); } inline uint32_t HToBe(uint32_t v) { - return ((v & 0xff000000) >> 24) | - ((v & 0x00ff0000) >> 8) | - ((v & 0x0000ff00) << 8) | - ((v & 0x000000ff) << 24); + return ((v & 0xff000000) >> 24) | ((v & 0x00ff0000) >> 8) | + ((v & 0x0000ff00) << 8) | ((v & 0x000000ff) << 24); } inline uint64_t HToBe(uint64_t v) { return ((v & 0xff00000000000000ULL) >> 56) | - ((v & 0x00ff000000000000ULL) >> 40) | - ((v & 0x0000ff0000000000ULL) >> 24) | - ((v & 0x000000ff00000000ULL) >> 8) | - ((v & 0x00000000ff000000ULL) << 8) | - ((v & 0x0000000000ff0000ULL) << 24) | - ((v & 0x000000000000ff00ULL) << 40) | - ((v & 0x00000000000000ffULL) << 56); + ((v & 0x00ff000000000000ULL) >> 40) | + ((v & 0x0000ff0000000000ULL) >> 24) | ((v & 0x000000ff00000000ULL) >> 8) | + ((v & 0x00000000ff000000ULL) << 8) | ((v & 0x0000000000ff0000ULL) << 24) | + ((v & 0x000000000000ff00ULL) << 40) | ((v & 0x00000000000000ffULL) << 56); } #else inline uint16_t HToBe(uint16_t v) { diff --git a/src/CreateStratumServerTemp.cc b/src/CreateStratumServerTemp.cc index d0a9e1737..8ea9e4d5f 100644 --- a/src/CreateStratumServerTemp.cc +++ b/src/CreateStratumServerTemp.cc @@ -7,11 +7,15 @@ #include "sia/StratumServerSia.h" #include "decred/StratumServerDecred.h" -Server* createStratumServer(const std::string &type, const int32_t shareAvgSeconds, const libconfig::Config& config) { - LOG(INFO) << "createServer type: " << type << ", shareAvgSeconds: " << shareAvgSeconds; +Server *createStratumServer( + const std::string &type, + const int32_t shareAvgSeconds, + const libconfig::Config &config) { + LOG(INFO) << "createServer type: " << type + << ", shareAvgSeconds: " << shareAvgSeconds; #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == type) -#else +#else if (false) #endif return new ServerBitcoin(shareAvgSeconds, config); @@ -19,10 +23,9 @@ Server* createStratumServer(const std::string &type, const int32_t shareAvgSecon return new ServerEth(shareAvgSeconds); else if ("SIA" == type) return new ServerSia(shareAvgSeconds); - else if ("BTM" == type) - return new ServerBytom (shareAvgSeconds); + else if ("BTM" == type) + return new ServerBytom(shareAvgSeconds); else if ("DCR" == type) return new ServerDecred(shareAvgSeconds, config); return nullptr; } - diff --git a/src/CreateStratumServerTemp.h b/src/CreateStratumServerTemp.h index d5722d899..a2b55204a 100644 --- a/src/CreateStratumServerTemp.h +++ b/src/CreateStratumServerTemp.h @@ -9,6 +9,9 @@ class Config; class Server; -Server* createStratumServer(const std::string &type, const int32_t shareAvgSeconds, const libconfig::Config &config); +Server *createStratumServer( + const std::string &type, + const int32_t shareAvgSeconds, + const libconfig::Config &config); #endif \ No newline at end of file diff --git a/src/DiffController.cc b/src/DiffController.cc index d69ad2a5a..4c0e6876b 100644 --- a/src/DiffController.cc +++ b/src/DiffController.cc @@ -30,7 +30,7 @@ void DiffController::setMinDiff(uint64_t minDiff) { } else if (minDiff > kMaxDiff_) { minDiff = kMaxDiff_; } - + minDiff_ = minDiff; } @@ -40,7 +40,7 @@ void DiffController::setCurDiff(uint64_t curDiff) { } else if (curDiff > kMaxDiff_) { curDiff = kMaxDiff_; } - + curDiff_ = curDiff; } @@ -58,7 +58,6 @@ void DiffController::addAcceptedShare(const uint64_t share) { shares_.insert(k, share); } - // // level: min ~ max, coefficient // @@ -74,7 +73,7 @@ void DiffController::addAcceptedShare(const uint64_t share) { // static int __hashRateDown(int level) { - const int levels[] = {0, 4, 8, 16, 32, 64, 128, 256}; + const int levels[] = {0, 4, 8, 16, 32, 64, 128, 256}; if (level >= 8) { return 512; } @@ -83,10 +82,10 @@ static int __hashRateDown(int level) { } static int __hashRateUp(int level) { - const int levels[] = {4, 8, 16, 32, 64, 128, 256, 512}; + const int levels[] = {4, 8, 16, 32, 64, 128, 256, 512}; assert(level >= 0 && level <= 7); if (level >= 8) { - return 0x7fffffffL; // INT32_MAX + return 0x7fffffffL; // INT32_MAX } return levels[level]; } @@ -96,7 +95,7 @@ int DiffController::adjustHashRateLevel(const double hashRateT) { // hashrate is always danceing, // so need to use rate high and low to check it's level const double rateHigh = 1.50; - const double rateLow = 0.75; + const double rateLow = 0.75; // reduce level if (curHashRateLevel_ > 0 && hashRateT < __hashRateDown(curHashRateLevel_)) { @@ -123,14 +122,14 @@ double DiffController::minerCoefficient(const time_t now, const int64_t idx) { if (now <= startTime_) { return 1.0; } - uint64_t shares = shares_.sum(idx); + uint64_t shares = shares_.sum(idx); time_t shareWindow = isFullWindow(now) ? kDiffWindow_ : (now - startTime_); - double hashRateT = (double)shares * pow(2, 32) / shareWindow / pow(10, 12); + double hashRateT = (double)shares * pow(2, 32) / shareWindow / pow(10, 12); adjustHashRateLevel(hashRateT); assert(curHashRateLevel_ >= 0 && curHashRateLevel_ <= 8); const double c[] = {1.0, 1.0, 1.0, 1.2, 1.5, 2.0, 3.0, 4.0, 6.0}; - assert(sizeof(c)/sizeof(c[0]) == 9); + assert(sizeof(c) / sizeof(c[0]) == 9); return c[curHashRateLevel_]; } @@ -146,12 +145,12 @@ uint64_t DiffController::_calcCurDiff() { const time_t now = time(nullptr); const int64_t k = now / kRecordSeconds_; const double sharesCount = (double)sharesNum_.sum(k); - if (startTime_ == 0) { // first time, we set the start time + if (startTime_ == 0) { // first time, we set the start time startTime_ = time(nullptr); } const double kRateHigh = 1.40; - const double kRateLow = 0.40; + const double kRateLow = 0.40; double expectedCount = round(kDiffWindow_ / (double)kRecordSeconds_); if (isFullWindow(now)) { /* have a full window now */ @@ -159,14 +158,14 @@ uint64_t DiffController::_calcCurDiff() { expectedCount *= minerCoefficient(now, k); } if (expectedCount > kDiffWindow_) { - expectedCount = kDiffWindow_; // one second per share is enough + expectedCount = kDiffWindow_; // one second per share is enough } // this is for very low hashrate miner, eg. USB miners // should received at least one share every 60 seconds if (!isFullWindow(now) && now >= startTime_ + 60 && - sharesCount <= (int32_t)((now - startTime_)/60.0) && - curDiff_ >= minDiff_*2) { + sharesCount <= (int32_t)((now - startTime_) / 60.0) && + curDiff_ >= minDiff_ * 2) { setCurDiff(curDiff_ / 2); sharesNum_.mapMultiply(2.0); return curDiff_; @@ -174,8 +173,7 @@ uint64_t DiffController::_calcCurDiff() { // too fast if (sharesCount > expectedCount * kRateHigh) { - while (sharesNum_.sum(k) > expectedCount && - curDiff_ < kMaxDiff_) { + while (sharesNum_.sum(k) > expectedCount && curDiff_ < kMaxDiff_) { setCurDiff(curDiff_ * 2); sharesNum_.mapDivide(2.0); } @@ -183,15 +181,15 @@ uint64_t DiffController::_calcCurDiff() { } // too slow - if (isFullWindow(now) && curDiff_ >= minDiff_*2) { + if (isFullWindow(now) && curDiff_ >= minDiff_ * 2) { while (sharesNum_.sum(k) < expectedCount * kRateLow && - curDiff_ >= minDiff_*2) { + curDiff_ >= minDiff_ * 2) { setCurDiff(curDiff_ / 2); sharesNum_.mapMultiply(2.0); } assert(curDiff_ >= minDiff_); return curDiff_; } - + return curDiff_; } diff --git a/src/DiffController.h b/src/DiffController.h index df94ca954..81781ff23 100644 --- a/src/DiffController.h +++ b/src/DiffController.h @@ -27,17 +27,15 @@ #include "Common.h" #include "Statistics.h" - //////////////////////////////// DiffController //////////////////////////////// -class DiffController -{ +class DiffController { public: // max diff, cannot large than 2^62. const uint64_t kMaxDiff_; // min diff const uint64_t kMinDiff_; - const time_t kDiffWindow_; // time window, seconds, 60*N + const time_t kDiffWindow_; // time window, seconds, 60*N const time_t kRecordSeconds_; // every N seconds as a record time_t startTime_; // first job send time @@ -45,40 +43,42 @@ class DiffController uint64_t curDiff_; int32_t curHashRateLevel_; StatsWindow sharesNum_; // share count - StatsWindow shares_; // share + StatsWindow shares_; // share void setCurDiff(uint64_t curDiff); // set current diff with bounds checking virtual uint64_t _calcCurDiff(); int adjustHashRateLevel(const double hashRateT); - inline bool isFullWindow(const time_t now) - { + inline bool isFullWindow(const time_t now) { return now >= startTime_ + kDiffWindow_; } + private: double minerCoefficient(const time_t now, const int64_t idx); public: - DiffController(const uint64_t defaultDifficulty, - const uint64_t maxDifficulty, - const uint64_t minDifficulty, - const uint32_t shareAvgSeconds, - const uint32_t diffAdjustPeriod) : - kMaxDiff_(maxDifficulty), - kMinDiff_(minDifficulty), - kDiffWindow_(diffAdjustPeriod), - kRecordSeconds_(shareAvgSeconds), - startTime_(0), - curHashRateLevel_(0), - sharesNum_(kDiffWindow_ / kRecordSeconds_), /* every N seconds as a record */ - shares_(kDiffWindow_ / kRecordSeconds_) - { + DiffController( + const uint64_t defaultDifficulty, + const uint64_t maxDifficulty, + const uint64_t minDifficulty, + const uint32_t shareAvgSeconds, + const uint32_t diffAdjustPeriod) + : kMaxDiff_(maxDifficulty) + , kMinDiff_(minDifficulty) + , kDiffWindow_(diffAdjustPeriod) + , kRecordSeconds_(shareAvgSeconds) + , startTime_(0) + , curHashRateLevel_(0) + , sharesNum_(kDiffWindow_ / kRecordSeconds_) + , /* every N seconds as a record */ + shares_(kDiffWindow_ / kRecordSeconds_) { // Cannot large than 2^62. // If `kMaxDiff_` be 2^63, user can set `kMinDiff_` equals 2^63, // then `kMinDiff_*2` will be zero when next difficulty decrease and // DiffController::_calcCurDiff() will infinite loop. if (kMaxDiff_ > 0x4000000000000000ull) { - LOG(FATAL) << "too large max_difficulty, it should <= 0x4000000000000000."; + LOG(FATAL) + << "too large max_difficulty, it should <= 0x4000000000000000."; } if (kMinDiff_ < 1) { @@ -97,7 +97,7 @@ class DiffController resetCurDiff(defaultDifficulty); } - DiffController(const DiffController& other) + DiffController(const DiffController &other) : kMaxDiff_(other.kMaxDiff_) , kMinDiff_(other.kMinDiff_) , kDiffWindow_(other.kDiffWindow_) @@ -106,9 +106,10 @@ class DiffController , minDiff_(other.minDiff_) , curDiff_(other.curDiff_) , curHashRateLevel_(other.curHashRateLevel_) - , sharesNum_(other.kDiffWindow_ / other.kRecordSeconds_) /* every N seconds as a record */ - , shares_(other.kDiffWindow_ / other.kRecordSeconds_) { - } + , sharesNum_( + other.kDiffWindow_ / + other.kRecordSeconds_) /* every N seconds as a record */ + , shares_(other.kDiffWindow_ / other.kRecordSeconds_) {} virtual ~DiffController() {} diff --git a/src/GwMaker.cc b/src/GwMaker.cc index 484fc1404..dd1495795 100644 --- a/src/GwMaker.cc +++ b/src/GwMaker.cc @@ -35,16 +35,17 @@ #include ///////////////////////////////GwMaker//////////////////////////////////// -GwMaker::GwMaker(shared_ptr handler, - const string &kafkaBrokers) : handler_(handler), - running_(true), - kafkaProducer_(kafkaBrokers.c_str(), - handler->def().rawGwTopic_.c_str(), - 0 /* partition */) -{ +GwMaker::GwMaker(shared_ptr handler, const string &kafkaBrokers) + : handler_(handler) + , running_(true) + , kafkaProducer_( + kafkaBrokers.c_str(), + handler->def().rawGwTopic_.c_str(), + 0 /* partition */) { } -GwMaker::~GwMaker() {} +GwMaker::~GwMaker() { +} bool GwMaker::init() { map options; @@ -62,10 +63,9 @@ bool GwMaker::init() { } if (handler_->def().notifyHost_.length() > 0) { - auto callback = [&]() -> void { - submitRawGwMsg(); - }; - notification_ = make_shared(callback, handler_->def().notifyHost_, handler_->def().notifyPort_); + auto callback = [&]() -> void { submitRawGwMsg(); }; + notification_ = make_shared( + callback, handler_->def().notifyHost_, handler_->def().notifyPort_); notification_->setupHttpd(); } @@ -79,7 +79,8 @@ void GwMaker::stop() { return; } running_ = false; - LOG(INFO) << "stop GwMaker " << handler_->def().chainType_ << ", topic: " << handler_->def().rawGwTopic_; + LOG(INFO) << "stop GwMaker " << handler_->def().chainType_ + << ", topic: " << handler_->def().rawGwTopic_; } void GwMaker::kafkaProduceMsg(const void *payload, size_t len) { @@ -110,7 +111,8 @@ void GwMaker::run() { submitRawGwMsg(); } - LOG(INFO) << "GwMaker " << handler_->def().chainType_ << ", topic: " << handler_->def().rawGwTopic_ << " stopped"; + LOG(INFO) << "GwMaker " << handler_->def().chainType_ + << ", topic: " << handler_->def().rawGwTopic_ << " stopped"; } ///////////////////////////////GwNotification//////////////////////////////////// @@ -118,65 +120,69 @@ void GwMaker::run() { * https://wiki.parity.io/Mining.html * Parity HTTP Notification */ -GwNotification::GwNotification(std::function callback, const string &httpdHost, unsigned short httpdPort) -:callback_(callback), base_(nullptr), httpdHost_(httpdHost), httpdPort_(httpdPort) -{ +GwNotification::GwNotification( + std::function callback, + const string &httpdHost, + unsigned short httpdPort) + : callback_(callback) + , base_(nullptr) + , httpdHost_(httpdHost) + , httpdPort_(httpdPort) { } -GwNotification::~GwNotification() -{ - stop(); +GwNotification::~GwNotification() { + stop(); } -void GwNotification::httpdNotification(struct evhttp_request *req, void *arg) -{ +void GwNotification::httpdNotification(struct evhttp_request *req, void *arg) { struct evbuffer *evb = evbuffer_new(); evbuffer_add_printf(evb, "{\"err_no\":0,\"err_msg\":\"notify success\"}"); evhttp_send_reply(req, HTTP_OK, "OK", evb); evbuffer_free(evb); - string postData = string((char *)EVBUFFER_DATA(req->input_buffer), EVBUFFER_LENGTH(req->input_buffer)); + string postData = string( + (char *)EVBUFFER_DATA(req->input_buffer), + EVBUFFER_LENGTH(req->input_buffer)); LOG(INFO) << "GwNotification: makeRawGwMsg for notify " << postData; GwNotification *notification = (GwNotification *)arg; notification->callback_(); } -void GwNotification::setupHttpd() -{ +void GwNotification::setupHttpd() { boost::thread t(boost::bind(&GwNotification::runHttpd, this)); t.detach(); } -void GwNotification::runHttpd() -{ +void GwNotification::runHttpd() { struct evhttp_bound_socket *handle; struct evhttp *httpd; base_ = event_base_new(); httpd = evhttp_new(base_); - evhttp_set_allowed_methods(httpd, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD); + evhttp_set_allowed_methods( + httpd, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD); evhttp_set_timeout(httpd, 5 /* timeout in seconds */); - evhttp_set_cb(httpd, "/notify", GwNotification::httpdNotification, this); + evhttp_set_cb(httpd, "/notify", GwNotification::httpdNotification, this); - handle = evhttp_bind_socket_with_handle(httpd, httpdHost_.c_str(), httpdPort_); + handle = + evhttp_bind_socket_with_handle(httpd, httpdHost_.c_str(), httpdPort_); if (!handle) { - LOG(ERROR) << "couldn't bind to port: " << httpdPort_ << ", host: " << httpdHost_ << ", exiting."; + LOG(ERROR) << "couldn't bind to port: " << httpdPort_ + << ", host: " << httpdHost_ << ", exiting."; return; } event_base_dispatch(base_); } -void GwNotification::stop() -{ +void GwNotification::stop() { LOG(INFO) << "stop Notification ..."; event_base_loopexit(base_, NULL); } - ///////////////////////////////GwMakerHandler//////////////////////////////////// GwMakerHandler::~GwMakerHandler() { } @@ -186,24 +192,24 @@ string GwMakerHandler::makeRawGwMsg() { if (!callRpcGw(gw)) { return ""; } - LOG(INFO) << "getwork len=" << gw.length() << ", msg: " << gw.substr(0, 500) << (gw.size() > 500 ? "..." : ""); + LOG(INFO) << "getwork len=" << gw.length() << ", msg: " << gw.substr(0, 500) + << (gw.size() > 500 ? "..." : ""); return processRawGw(gw); } -bool GwMakerHandler::callRpcGw(string &response) -{ +bool GwMakerHandler::callRpcGw(string &response) { string request = getRequestData(); string userAgent = getUserAgent(); - bool res = rpcCall(def_.rpcAddr_.c_str(), - def_.rpcUserPwd_.c_str(), - request.empty() ? nullptr : request.c_str(), - request.length(), - response, - userAgent.c_str()); + bool res = rpcCall( + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + request.empty() ? nullptr : request.c_str(), + request.length(), + response, + userAgent.c_str()); - if (!res) - { + if (!res) { LOG(ERROR) << "call RPC failure"; return false; } @@ -211,8 +217,7 @@ bool GwMakerHandler::callRpcGw(string &response) } ///////////////////////////////GwMakerHandlerJson/////////////////////////////////// -string GwMakerHandlerJson::processRawGw(const string& msg) -{ +string GwMakerHandlerJson::processRawGw(const string &msg) { JsonNode r; if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), r)) { LOG(ERROR) << "decode gw failure: " << msg; diff --git a/src/GwMaker.h b/src/GwMaker.h index bb1d279a6..715a7ab18 100644 --- a/src/GwMaker.h +++ b/src/GwMaker.h @@ -28,7 +28,7 @@ @author Martin Medina @copyright RSK Labs Ltd. - @version 1.0 30/03/17 + @version 1.0 30/03/17 maintained by HaoLi (fatrat1117) and YihaoPeng since Feb 20, 2018 */ @@ -41,8 +41,7 @@ #include "utilities_js.hpp" #include -struct GwMakerDefinition -{ +struct GwMakerDefinition { string chainType_; bool enabled_; @@ -57,56 +56,58 @@ struct GwMakerDefinition }; class GwMakerHandler { - public: - virtual ~GwMakerHandler() = 0; // mark it's an abstract class - virtual void init(const GwMakerDefinition &def) { def_ = def; } - - // read-only definition - virtual const GwMakerDefinition& def() { return def_; } - - // Interface with the GwMaker. - // There is a default implementation that use virtual functions below. - // If the implementation does not meet the requirements, you can overload it - // and ignore all the following virtual functions. - virtual string makeRawGwMsg(); - - protected: - - // These virtual functions make it easier to implement the makeRawGwMsg() interface. - // In most cases, you just need to override getRequestData() and processRawGw(). - // If you have overloaded makeRawGwMsg() above, you can ignore all the following functions. - - // Receive rpc response and generate RawGw message for the pool. - virtual string processRawGw(const string &gw) { return ""; } - - // Call RPC `getwork` and get the response. - virtual bool callRpcGw(string &resp); - - // Body of HTTP POST used by callRpcGw(). - // return "" if use HTTP GET. - virtual string getRequestData() { return ""; } - // HTTP header `User-Agent` used by callRpcGw(). - virtual string getUserAgent() { return "curl"; } - - // blockchain and RPC-server definitions - GwMakerDefinition def_; +public: + virtual ~GwMakerHandler() = 0; // mark it's an abstract class + virtual void init(const GwMakerDefinition &def) { def_ = def; } + + // read-only definition + virtual const GwMakerDefinition &def() { return def_; } + + // Interface with the GwMaker. + // There is a default implementation that use virtual functions below. + // If the implementation does not meet the requirements, you can overload it + // and ignore all the following virtual functions. + virtual string makeRawGwMsg(); + +protected: + // These virtual functions make it easier to implement the makeRawGwMsg() + // interface. In most cases, you just need to override getRequestData() and + // processRawGw(). If you have overloaded makeRawGwMsg() above, you can ignore + // all the following functions. + + // Receive rpc response and generate RawGw message for the pool. + virtual string processRawGw(const string &gw) { return ""; } + + // Call RPC `getwork` and get the response. + virtual bool callRpcGw(string &resp); + + // Body of HTTP POST used by callRpcGw(). + // return "" if use HTTP GET. + virtual string getRequestData() { return ""; } + // HTTP header `User-Agent` used by callRpcGw(). + virtual string getUserAgent() { return "curl"; } + + // blockchain and RPC-server definitions + GwMakerDefinition def_; }; -class GwMakerHandlerJson : public GwMakerHandler -{ +class GwMakerHandlerJson : public GwMakerHandler { virtual bool checkFields(JsonNode &r) = 0; virtual string constructRawMsg(JsonNode &r) = 0; string processRawGw(const string &gw) override; }; class GwNotification { - std::function callback_; + std::function callback_; public: - GwNotification(std::function callback, const string &httpdHost, unsigned short httpdPort); + GwNotification( + std::function callback, + const string &httpdHost, + unsigned short httpdPort); ~GwNotification(); - //httpd + // httpd struct event_base *base_; string httpdHost_; unsigned short httpdPort_; @@ -118,7 +119,6 @@ class GwNotification { void stop(); }; - class GwMaker { shared_ptr handler_; atomic running_; diff --git a/src/JobMaker.cc b/src/JobMaker.cc index 7fc79b476..7be95a77c 100644 --- a/src/JobMaker.cc +++ b/src/JobMaker.cc @@ -24,16 +24,19 @@ #include "JobMaker.h" #include "Utils.h" - /////////////////////////////////// JobMaker ///////////////////////////////// -JobMaker::JobMaker(shared_ptr handler, - const string &kafkaBrokers, - const string& zookeeperBrokers) : handler_(handler), - running_(true), - zkLocker_(zookeeperBrokers.c_str()), - kafkaBrokers_(kafkaBrokers), - kafkaProducer_(kafkaBrokers.c_str(), handler->def()->jobTopic_.c_str(), RD_KAFKA_PARTITION_UA) -{ +JobMaker::JobMaker( + shared_ptr handler, + const string &kafkaBrokers, + const string &zookeeperBrokers) + : handler_(handler) + , running_(true) + , zkLocker_(zookeeperBrokers.c_str()) + , kafkaBrokers_(kafkaBrokers) + , kafkaProducer_( + kafkaBrokers.c_str(), + handler->def()->jobTopic_.c_str(), + RD_KAFKA_PARTITION_UA) { } JobMaker::~JobMaker() { @@ -47,8 +50,7 @@ void JobMaker::stop() { LOG(INFO) << "stop jobmaker"; } -bool JobMaker::setupKafkaProducer() -{ +bool JobMaker::setupKafkaProducer() { map options; // set to 1 (0 is an illegal value here), deliver msg as soon as possible. options["queue.buffering.max.ms"] = "1"; @@ -73,15 +75,17 @@ bool JobMaker::init() { } // Lock the path + server id so that we can start job makers with different // server id (up to 8 bits as only 8 bits are embedded in job id) - zkLocker_.getLock(Strings::Format("%s/%" PRIu8, - handler_->def()->zookeeperLockPath_.c_str(), - static_cast(handler_->def()->serverId_)).c_str()); - } catch(const ZookeeperException &zooex) { + zkLocker_.getLock(Strings::Format( + "%s/%" PRIu8, + handler_->def()->zookeeperLockPath_.c_str(), + static_cast(handler_->def()->serverId_)) + .c_str()); + } catch (const ZookeeperException &zooex) { LOG(ERROR) << zooex.what(); return false; } - if(!setupKafkaProducer()) + if (!setupKafkaProducer()) return false; /* setup kafka consumers */ @@ -92,25 +96,22 @@ bool JobMaker::init() { return true; } -bool JobMaker::consumeKafkaMsg(rd_kafka_message_t *rkmessage, JobMakerConsumerHandler &consumerHandler) -{ +bool JobMaker::consumeKafkaMsg( + rd_kafka_message_t *rkmessage, JobMakerConsumerHandler &consumerHandler) { // check error string topic = rd_kafka_topic_name(rkmessage->rkt); - if (rkmessage->err) - { - if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) - { + if (rkmessage->err) { + if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. return false; } - LOG(ERROR) << "consume error for topic " << topic.c_str() - << "[" << rkmessage->partition << "] offset " << rkmessage->offset + LOG(ERROR) << "consume error for topic " << topic.c_str() << "[" + << rkmessage->partition << "] offset " << rkmessage->offset << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || - rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - { + rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { LOG(FATAL) << "consume fatal"; stop(); } @@ -118,7 +119,8 @@ bool JobMaker::consumeKafkaMsg(rd_kafka_message_t *rkmessage, JobMakerConsumerHa } // set json string - LOG(INFO) << "received " << topic.c_str() << " message len: " << rkmessage->len; + LOG(INFO) << "received " << topic.c_str() + << " message len: " << rkmessage->len; string msg((const char *)rkmessage->payload, rkmessage->len); @@ -140,11 +142,12 @@ void JobMaker::produceStratumJob() { } lastJobTime_ = time(nullptr); - + // save send timestamp to file, for monitor system if (!handler_->def()->fileLastJobTime_.empty()) { // TODO: fix Y2K38 issue - writeTime2File(handler_->def()->fileLastJobTime_.c_str(), (uint32_t)lastJobTime_); + writeTime2File( + handler_->def()->fileLastJobTime_.c_str(), (uint32_t)lastJobTime_); } } @@ -153,7 +156,8 @@ void JobMaker::runThreadKafkaConsume(JobMakerConsumerHandler &consumerHandler) { bool jobUpdated = false; while (running_) { - rd_kafka_message_t *rkmessage = consumerHandler.kafkaConsumer_->consumer(timeoutMs); + rd_kafka_message_t *rkmessage = + consumerHandler.kafkaConsumer_->consumer(timeoutMs); if (rkmessage) { jobUpdated = consumeKafkaMsg(rkmessage, consumerHandler); @@ -164,30 +168,41 @@ void JobMaker::runThreadKafkaConsume(JobMakerConsumerHandler &consumerHandler) { // Kafka will not skip any message during your sleep(), you will received // all messages from your beginning offset to the latest in any case. // So sleep() will cause unexpected delay before consumer a new message. - // If the producer's speed is faster than the sleep() here, the consumption - // will be delayed permanently and the latest message will never be received. + // If the producer's speed is faster than the sleep() here, the + // consumption will be delayed permanently and the latest message will + // never be received. // At the same time, there is not a busy waiting. - // KafkaConsumer::consumer(timeoutMs) will return after `timeoutMs` millisecond - // if no new messages. You can increase `timeoutMs` if you want. + // KafkaConsumer::consumer(timeoutMs) will return after `timeoutMs` + // millisecond if no new messages. You can increase `timeoutMs` if you + // want. + } + + // don't trigger timeout if jobKeepAlive_ be false. + if (!consumerHandler.jobKeepAlive_) { + continue; } uint32_t timeDiff; - if (rkmessage == nullptr || (!jobUpdated && (timeDiff = time(nullptr) - lastJobTime_) > handler_->def()->jobInterval_)) { + if (rkmessage == nullptr || + (!jobUpdated && + (timeDiff = time(nullptr) - lastJobTime_) > + handler_->def()->jobInterval_)) { produceStratumJob(); jobUpdated = true; } - timeoutMs = (handler_->def()->jobInterval_ - (jobUpdated ? 0 : timeDiff)) * 1000; + timeoutMs = + (handler_->def()->jobInterval_ - (jobUpdated ? 0 : timeDiff)) * 1000; } } void JobMaker::run() { // running consumer threads - for (JobMakerConsumerHandler &consumerhandler : kafkaConsumerHandlers_) - { - kafkaConsumerWorkers_.push_back(std::make_shared(std::bind(&JobMaker::runThreadKafkaConsume, this, consumerhandler))); + for (JobMakerConsumerHandler &consumerhandler : kafkaConsumerHandlers_) { + kafkaConsumerWorkers_.push_back(std::make_shared( + std::bind(&JobMaker::runThreadKafkaConsume, this, consumerhandler))); } // wait consumer threads exit @@ -200,50 +215,54 @@ void JobMaker::run() { } } - -JobMakerConsumerHandler JobMakerHandler::createConsumerHandler(const string &kafkaBrokers, const string &topic, int64_t offset - , vector> consumerOptions, JobMakerMessageProcessor messageProcessor) -{ +JobMakerConsumerHandler JobMakerHandler::createConsumerHandler( + const string &kafkaBrokers, + const string &topic, + int64_t offset, + vector> consumerOptions, + JobMakerMessageProcessor messageProcessor, + bool jobKeepAlive) { std::map usedConsumerOptions; // default usedConsumerOptions["fetch.wait.max.ms"] = "5"; // passed settings - for(auto& option : consumerOptions) - { + for (auto &option : consumerOptions) { usedConsumerOptions[option.first] = option.second; } JobMakerConsumerHandler result; - auto consumer = std::make_shared(kafkaBrokers.c_str(), topic.c_str(), 0); + auto consumer = + std::make_shared(kafkaBrokers.c_str(), topic.c_str(), 0); if (!consumer->setup(RD_KAFKA_OFFSET_TAIL(offset), &usedConsumerOptions)) { LOG(ERROR) << "kafka consumer " << topic << " setup failure"; - } - else if (!consumer->checkAlive()) { + } else if (!consumer->checkAlive()) { LOG(FATAL) << "kafka consumer " << topic << " is NOT alive"; - } - else - { + } else { + result.kafkaTopic_ = topic; result.kafkaConsumer_ = consumer; result.messageProcessor_ = messageProcessor; + result.jobKeepAlive_ = jobKeepAlive; } return result; } -uint64_t JobMakerHandler::generateJobId(uint32_t hash) const -{ - return (static_cast(time(nullptr)) << 32) | (hash & 0xFFFFFF00) | (def_->serverId_ & 0xFF); +uint64_t JobMakerHandler::generateJobId(uint32_t hash) const { + return (static_cast(time(nullptr)) << 32) | (hash & 0xFFFFFF00) | + (def_->serverId_ & 0xFF); } ////////////////////////////////GwJobMakerHandler////////////////////////////////// -bool GwJobMakerHandler::initConsumerHandlers(const string &kafkaBrokers, vector &handlers) -{ +bool GwJobMakerHandler::initConsumerHandlers( + const string &kafkaBrokers, vector &handlers) { { - auto messageProcessor = std::bind(&GwJobMakerHandler::processMsg, this, std::placeholders::_1); - auto handler = createConsumerHandler(kafkaBrokers, def()->rawGwTopic_, 1, {}, messageProcessor); - if(handler.kafkaConsumer_ == nullptr) + auto messageProcessor = + std::bind(&GwJobMakerHandler::processMsg, this, std::placeholders::_1); + auto handler = createConsumerHandler( + kafkaBrokers, def()->rawGwTopic_, 1, {}, messageProcessor); + if (handler.kafkaConsumer_ == nullptr) return false; handlers.push_back(handler); } - return true; + return true; } diff --git a/src/JobMaker.h b/src/JobMaker.h index 6a2eee793..a48e895d3 100644 --- a/src/JobMaker.h +++ b/src/JobMaker.h @@ -47,10 +47,13 @@ struct JobMakerConsumerHandler { string kafkaTopic_; shared_ptr kafkaConsumer_; JobMakerMessageProcessor messageProcessor_; + + // When be false, jobmaker will not trigger a timeout when the consumer + // has not received the new message for a long time. + bool jobKeepAlive_; }; -struct JobMakerDefinition -{ +struct JobMakerDefinition { virtual ~JobMakerDefinition() {} string chainType_; @@ -64,8 +67,7 @@ struct JobMakerDefinition string fileLastJobTime_; }; -struct GwJobMakerDefinition : public JobMakerDefinition -{ +struct GwJobMakerDefinition : public JobMakerDefinition { virtual ~GwJobMakerDefinition() {} string rawGwTopic_; @@ -73,16 +75,15 @@ struct GwJobMakerDefinition : public JobMakerDefinition uint32_t workLifeTime_; }; -struct GbtJobMakerDefinition : public JobMakerDefinition -{ +struct GbtJobMakerDefinition : public JobMakerDefinition { virtual ~GbtJobMakerDefinition() {} bool testnet_; - + string payoutAddr_; string coinbaseInfo_; uint32_t blockVersion_; - + string rawGbtTopic_; string auxPowGwTopic_; string rskRawGwTopic_; @@ -94,20 +95,29 @@ struct GbtJobMakerDefinition : public JobMakerDefinition uint32_t mergedMiningNotifyPolicy_; }; -class JobMakerHandler -{ +class JobMakerHandler { public: virtual ~JobMakerHandler() {} - virtual bool init(shared_ptr def) { def_ = def; return true; } + virtual bool init(shared_ptr def) { + def_ = def; + return true; + } - virtual bool initConsumerHandlers(const string &kafkaBrokers, vector &handlers) = 0; + virtual bool initConsumerHandlers( + const string &kafkaBrokers, + vector &handlers) = 0; virtual string makeStratumJobMsg() = 0; // read-only definition inline shared_ptr def() { return def_; } - JobMakerConsumerHandler createConsumerHandler(const string &kafkaBrokers, const string &topic, int64_t offset - , vector> consumerOptions, JobMakerMessageProcessor messageProcessor); + JobMakerConsumerHandler createConsumerHandler( + const string &kafkaBrokers, + const string &topic, + int64_t offset, + vector> consumerOptions, + JobMakerMessageProcessor messageProcessor, + bool jobKeepAlive = true); uint64_t generateJobId(uint32_t hash) const; @@ -115,18 +125,21 @@ class JobMakerHandler shared_ptr def_; }; -class GwJobMakerHandler : public JobMakerHandler -{ +class GwJobMakerHandler : public JobMakerHandler { public: virtual ~GwJobMakerHandler() {} - virtual bool initConsumerHandlers(const string &kafkaBrokers, vector &handlers) override; + virtual bool initConsumerHandlers( + const string &kafkaBrokers, + vector &handlers) override; - //return true if need to produce stratum job + // return true if need to produce stratum job virtual bool processMsg(const string &msg) = 0; // read-only definition - inline shared_ptr def() { return std::dynamic_pointer_cast(def_); } + inline shared_ptr def() { + return std::dynamic_pointer_cast(def_); + } }; class JobMaker { @@ -140,26 +153,31 @@ class JobMaker { string kafkaBrokers_; KafkaProducer kafkaProducer_; - + vector kafkaConsumerHandlers_; vector> kafkaConsumerWorkers_; time_t lastJobTime_; - + protected: - bool consumeKafkaMsg(rd_kafka_message_t *rkmessage, JobMakerConsumerHandler &consumerHandler); + bool consumeKafkaMsg( + rd_kafka_message_t *rkmessage, JobMakerConsumerHandler &consumerHandler); public: void produceStratumJob(); void runThreadKafkaConsume(JobMakerConsumerHandler &consumerHandler); public: - JobMaker(shared_ptr handle, const string& kafkaBrokers, const string& zookeeperBrokers); + JobMaker( + shared_ptr handle, + const string &kafkaBrokers, + const string &zookeeperBrokers); virtual ~JobMaker(); bool init(); void stop(); void run(); + private: bool setupKafkaProducer(); }; diff --git a/src/Kafka.cc b/src/Kafka.cc index 642a402b3..54a8d92d5 100644 --- a/src/Kafka.cc +++ b/src/Kafka.cc @@ -26,59 +26,58 @@ #include "Common.h" #include -static -void kafkaLogger(const rd_kafka_t *rk, int level, - const char *fac, const char *buf) { +static void +kafkaLogger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) { LOG(INFO) << "RDKAFKA-" << level << "-" << fac << ": " - << (rk ? rd_kafka_name(rk) : NULL) << buf; + << (rk ? rd_kafka_name(rk) : NULL) << buf; } -static -void print_partition_list(const rd_kafka_topic_partition_list_t *partitions) { +static void +print_partition_list(const rd_kafka_topic_partition_list_t *partitions) { int i; - for (i = 0 ; i < partitions->cnt ; i++) { - LOG(ERROR) << i << " " << partitions->elems[i].topic<< " [" - << partitions->elems[i].partition << "] offset " << partitions->elems[i].offset; + for (i = 0; i < partitions->cnt; i++) { + LOG(ERROR) << i << " " << partitions->elems[i].topic << " [" + << partitions->elems[i].partition << "] offset " + << partitions->elems[i].offset; } } -static -void rebalance_cb(rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque) { +static void rebalance_cb( + rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque) { LOG(ERROR) << "consumer group rebalanced: "; - switch (err) - { - case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: - LOG(ERROR) << "assigned:"; - print_partition_list(partitions); - rd_kafka_assign(rk, partitions); - break; - - case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: - LOG(ERROR) << "revoked:"; - print_partition_list(partitions); - rd_kafka_assign(rk, NULL); - break; - - default: - fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err)); - rd_kafka_assign(rk, NULL); - break; + switch (err) { + case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS: + LOG(ERROR) << "assigned:"; + print_partition_list(partitions); + rd_kafka_assign(rk, partitions); + break; + + case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS: + LOG(ERROR) << "revoked:"; + print_partition_list(partitions); + rd_kafka_assign(rk, NULL); + break; + + default: + fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err)); + rd_kafka_assign(rk, NULL); + break; } } - ///////////////////////////////// KafkaConsumer //////////////////////////////// -KafkaConsumer::KafkaConsumer(const char *brokers, const char *topic, - int partition): -brokers_(brokers), topicStr_(topic), -partition_(partition), conf_(rd_kafka_conf_new()), -consumer_(nullptr), -topic_(nullptr) -{ - rd_kafka_conf_set_log_cb(conf_, kafkaLogger); // set logger +KafkaConsumer::KafkaConsumer( + const char *brokers, const char *topic, int partition) + : brokers_(brokers) + , topicStr_(topic) + , partition_(partition) + , conf_(rd_kafka_conf_new()) + , consumer_(nullptr) + , topic_(nullptr) { + rd_kafka_conf_set_log_cb(conf_, kafkaLogger); // set logger LOG(INFO) << "consumer librdkafka version: " << rd_kafka_version_str(); // Maximum transmit message size. @@ -88,7 +87,8 @@ topic_(nullptr) // Maximum number of kilobytes per topic+partition in the local consumer // queue. This value may be overshot by fetch.message.max.bytes. - defaultOptions_["queued.max.messages.kbytes"] = RDKAFKA_QUEUED_MAX_MESSAGES_KBYTES; + defaultOptions_["queued.max.messages.kbytes"] = + RDKAFKA_QUEUED_MAX_MESSAGES_KBYTES; // Maximum number of bytes per topic+partition to request when // fetching messages from the broker @@ -108,11 +108,10 @@ KafkaConsumer::~KafkaConsumer() { while (rd_kafka_outq_len(consumer_) > 0) { rd_kafka_poll(consumer_, 10); } - rd_kafka_topic_destroy(topic_); // Destroy topic - rd_kafka_destroy(consumer_); // Destroy the handle + rd_kafka_topic_destroy(topic_); // Destroy topic + rd_kafka_destroy(consumer_); // Destroy the handle } - // // offset: // RD_KAFKA_OFFSET_BEGINNING @@ -120,7 +119,8 @@ KafkaConsumer::~KafkaConsumer() { // RD_KAFKA_OFFSET_STORED // RD_KAFKA_OFFSET_TAIL(CNT) // -bool KafkaConsumer::setup(int64_t offset, const std::map *options) { +bool KafkaConsumer::setup( + int64_t offset, const std::map *options) { char errstr[1024]; // rdkafka options: @@ -132,11 +132,14 @@ bool KafkaConsumer::setup(int64_t offset, const std::map *option } for (const auto &itr : defaultOptions_) { - if (rd_kafka_conf_set(conf_, - itr.first.c_str(), itr.second.c_str(), - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set( + conf_, + itr.first.c_str(), + itr.second.c_str(), + errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { LOG(ERROR) << "kafka set conf failure: " << errstr - << ", key: " << itr.first << ", val: " << itr.second; + << ", key: " << itr.first << ", val: " << itr.second; return false; } } @@ -144,8 +147,8 @@ bool KafkaConsumer::setup(int64_t offset, const std::map *option rd_kafka_topic_conf_t *topicConf = rd_kafka_topic_conf_new(); /* create consumer_ */ - if (!(consumer_ = rd_kafka_new(RD_KAFKA_CONSUMER, conf_, - errstr, sizeof(errstr)))) { + if (!(consumer_ = + rd_kafka_new(RD_KAFKA_CONSUMER, conf_, errstr, sizeof(errstr)))) { LOG(ERROR) << "kafka create consumer failure: " << errstr; return false; } @@ -186,13 +189,13 @@ bool KafkaConsumer::checkAlive() { rd_kafka_resp_err_t err; const struct rd_kafka_metadata *metadata; /* Fetch metadata */ - err = rd_kafka_metadata(consumer_, topic_ ? 0 : 1, - topic_, &metadata, 3000/* timeout_ms */); + err = rd_kafka_metadata( + consumer_, topic_ ? 0 : 1, topic_, &metadata, 3000 /* timeout_ms */); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { LOG(FATAL) << "Failed to acquire metadata: " << rd_kafka_err2str(err); return false; } - rd_kafka_metadata_destroy(metadata); // no need to print out meta data + rd_kafka_metadata_destroy(metadata); // no need to print out meta data return true; } @@ -204,16 +207,20 @@ rd_kafka_message_t *KafkaConsumer::consumer(int timeout_ms) { return rd_kafka_consume(topic_, partition_, timeout_ms); } - - //////////////////////////// KafkaHighLevelConsumer //////////////////////////// -KafkaHighLevelConsumer::KafkaHighLevelConsumer(const char *brokers, const char *topic, - int partition, const string &groupStr): -brokers_(brokers), topicStr_(topic), -groupStr_(groupStr), partition_(partition), -conf_(rd_kafka_conf_new()), consumer_(nullptr), topics_(nullptr) -{ - rd_kafka_conf_set_log_cb(conf_, kafkaLogger); // set logger +KafkaHighLevelConsumer::KafkaHighLevelConsumer( + const char *brokers, + const char *topic, + int partition, + const string &groupStr) + : brokers_(brokers) + , topicStr_(topic) + , groupStr_(groupStr) + , partition_(partition) + , conf_(rd_kafka_conf_new()) + , consumer_(nullptr) + , topics_(nullptr) { + rd_kafka_conf_set_log_cb(conf_, kafkaLogger); // set logger LOG(INFO) << "consumer librdkafka version: " << rd_kafka_version_str(); } @@ -248,21 +255,31 @@ bool KafkaHighLevelConsumer::setup() { // // rdkafka options // - const vector conKeys = {"message.max.bytes", "compression.codec", - "queued.max.messages.kbytes","fetch.message.max.bytes","fetch.wait.max.ms", - "group.id" /* Consumer groups require a group id */ + const vector conKeys = { + "message.max.bytes", + "compression.codec", + "queued.max.messages.kbytes", + "fetch.message.max.bytes", + "fetch.wait.max.ms", + "group.id" /* Consumer groups require a group id */ }; - const vector conVals = {RDKAFKA_MESSAGE_MAX_BYTES, RDKAFKA_COMPRESSION_CODEC, - RDKAFKA_QUEUED_MAX_MESSAGES_KBYTES,RDKAFKA_FETCH_MESSAGE_MAX_BYTES, - RDKAFKA_HIGH_LEVEL_CONSUMER_FETCH_WAIT_MAX_MS, groupStr_.c_str()}; + const vector conVals = {RDKAFKA_MESSAGE_MAX_BYTES, + RDKAFKA_COMPRESSION_CODEC, + RDKAFKA_QUEUED_MAX_MESSAGES_KBYTES, + RDKAFKA_FETCH_MESSAGE_MAX_BYTES, + RDKAFKA_HIGH_LEVEL_CONSUMER_FETCH_WAIT_MAX_MS, + groupStr_.c_str()}; assert(conKeys.size() == conVals.size()); for (size_t i = 0; i < conKeys.size(); i++) { - if (rd_kafka_conf_set(conf_, - conKeys[i].c_str(), conVals[i].c_str(), - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set( + conf_, + conKeys[i].c_str(), + conVals[i].c_str(), + errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { LOG(ERROR) << "kafka set conf failure: " << errstr - << ", key: " << conKeys[i] << ", val: " << conVals[i]; + << ", key: " << conKeys[i] << ", val: " << conVals[i]; return false; } } @@ -272,14 +289,16 @@ bool KafkaHighLevelConsumer::setup() { /* Consumer groups always use broker based offset storage */ // offset.store.method - if (rd_kafka_topic_conf_set(topicConf, "offset.store.method", "broker", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_topic_conf_set( + topicConf, "offset.store.method", "broker", errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) { LOG(ERROR) << "kafka set 'offset.store.method' failure: " << errstr; return false; } // auto.offset.reset - if (rd_kafka_topic_conf_set(topicConf, "auto.offset.reset", "smallest", - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_topic_conf_set( + topicConf, "auto.offset.reset", "smallest", errstr, sizeof(errstr)) != + RD_KAFKA_CONF_OK) { LOG(ERROR) << "kafka set 'auto.offset.reset' failure: " << errstr; return false; } @@ -294,8 +313,8 @@ bool KafkaHighLevelConsumer::setup() { rd_kafka_conf_set_rebalance_cb(conf_, rebalance_cb); /* create consumer_ */ - if (!(consumer_ = rd_kafka_new(RD_KAFKA_CONSUMER, conf_, - errstr, sizeof(errstr)))) { + if (!(consumer_ = + rd_kafka_new(RD_KAFKA_CONSUMER, conf_, errstr, sizeof(errstr)))) { LOG(ERROR) << "kafka create consumer failure: " << errstr; return false; } @@ -317,7 +336,7 @@ bool KafkaHighLevelConsumer::setup() { rd_kafka_poll_set_consumer(consumer_); /* Create a new list/vector Topic+Partition container */ - int size = 1; // only 1 container + int size = 1; // only 1 container topics_ = rd_kafka_topic_partition_list_new(size); rd_kafka_topic_partition_list_add(topics_, topicStr_.c_str(), partition_); @@ -336,14 +355,16 @@ rd_kafka_message_t *KafkaHighLevelConsumer::consumer(int timeout_ms) { return rd_kafka_consumer_poll(consumer_, timeout_ms); } - - ///////////////////////////////// KafkaProducer //////////////////////////////// -KafkaProducer::KafkaProducer(const char *brokers, const char *topic, int partition): -brokers_(brokers), topicStr_(topic), partition_(partition), conf_(rd_kafka_conf_new()), -producer_(nullptr), topic_(nullptr) -{ - rd_kafka_conf_set_log_cb(conf_, kafkaLogger); // set logger +KafkaProducer::KafkaProducer( + const char *brokers, const char *topic, int partition) + : brokers_(brokers) + , topicStr_(topic) + , partition_(partition) + , conf_(rd_kafka_conf_new()) + , producer_(nullptr) + , topic_(nullptr) { + rd_kafka_conf_set_log_cb(conf_, kafkaLogger); // set logger LOG(INFO) << "producer librdkafka version: " << rd_kafka_version_str(); // @@ -357,7 +378,8 @@ producer_(nullptr), topic_(nullptr) defaultOptions_["compression.codec"] = RDKAFKA_COMPRESSION_CODEC; // Maximum number of messages allowed on the producer queue. - defaultOptions_["queue.buffering.max.messages"] = RDKAFKA_QUEUE_BUFFERING_MAX_MESSAGES; + defaultOptions_["queue.buffering.max.messages"] = + RDKAFKA_QUEUE_BUFFERING_MAX_MESSAGES; // Maximum time, in milliseconds, for buffering data on the producer queue. // set to 1 (0 is an illegal value here), deliver msg as soon as possible. @@ -375,8 +397,8 @@ KafkaProducer::~KafkaProducer() { while (rd_kafka_outq_len(producer_) > 0) { rd_kafka_poll(producer_, 100); } - rd_kafka_topic_destroy(topic_); // Destroy topic - rd_kafka_destroy(producer_); // Destroy the handle + rd_kafka_topic_destroy(topic_); // Destroy topic + rd_kafka_destroy(producer_); // Destroy the handle } bool KafkaProducer::setup(const std::map *options) { @@ -391,18 +413,21 @@ bool KafkaProducer::setup(const std::map *options) { } for (const auto &itr : defaultOptions_) { - if (rd_kafka_conf_set(conf_, - itr.first.c_str(), itr.second.c_str(), - errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) { + if (rd_kafka_conf_set( + conf_, + itr.first.c_str(), + itr.second.c_str(), + errstr, + sizeof(errstr)) != RD_KAFKA_CONF_OK) { LOG(ERROR) << "kafka set conf failure: " << errstr - << ", key: " << itr.first << ", val: " << itr.second; + << ", key: " << itr.first << ", val: " << itr.second; return false; } } /* create producer */ - if (!(producer_ = rd_kafka_new(RD_KAFKA_PRODUCER, conf_, - errstr, sizeof(errstr)))) { + if (!(producer_ = + rd_kafka_new(RD_KAFKA_PRODUCER, conf_, errstr, sizeof(errstr)))) { LOG(ERROR) << "kafka create producer failure: " << errstr; return false; } @@ -438,47 +463,57 @@ bool KafkaProducer::checkAlive() { rd_kafka_resp_err_t err; const struct rd_kafka_metadata *metadata; /* Fetch metadata */ - err = rd_kafka_metadata(producer_, topic_ ? 0 : 1, - topic_, &metadata, 3000/* timeout_ms */); + err = rd_kafka_metadata( + producer_, topic_ ? 0 : 1, topic_, &metadata, 3000 /* timeout_ms */); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { LOG(FATAL) << "Failed to acquire metadata: " << rd_kafka_err2str(err); return false; } - rd_kafka_metadata_destroy(metadata); // no need to print out meta data + rd_kafka_metadata_destroy(metadata); // no need to print out meta data return true; } - void KafkaProducer::produce(const void *payload, size_t len) { // rd_kafka_produce() is non-blocking // Returns 0 on success or -1 on error - int res = rd_kafka_produce(topic_, partition_, RD_KAFKA_MSG_F_COPY, - (void *)payload, len, - NULL, 0, /* Optional key and its length */ - /* Message opaque, provided in delivery report - * callback as msg_opaque. */ - NULL); + int res = rd_kafka_produce( + topic_, + partition_, + RD_KAFKA_MSG_F_COPY, + (void *)payload, + len, + NULL, + 0, /* Optional key and its length */ + /* Message opaque, provided in delivery report + * callback as msg_opaque. */ + NULL); if (res == -1) { LOG(ERROR) << "produce to topic [ " << rd_kafka_topic_name(topic_) - << "]: " << rd_kafka_err2str(rd_kafka_last_error()); + << "]: " << rd_kafka_err2str(rd_kafka_last_error()); } } -// Although the kafka producer is non-blocking, it will fail immediately in some cases, -// such as the local queue is full. In this case, the sender can choose to try again later. +// Although the kafka producer is non-blocking, it will fail immediately in some +// cases, such as the local queue is full. In this case, the sender can choose +// to try again later. bool KafkaProducer::tryProduce(const void *payload, size_t len) { // rd_kafka_produce() is non-blocking // Returns 0 on success or -1 on error - int res = rd_kafka_produce(topic_, partition_, RD_KAFKA_MSG_F_COPY, - (void *)payload, len, - NULL, 0, /* Optional key and its length */ - /* Message opaque, provided in delivery report - * callback as msg_opaque. */ - NULL); + int res = rd_kafka_produce( + topic_, + partition_, + RD_KAFKA_MSG_F_COPY, + (void *)payload, + len, + NULL, + 0, /* Optional key and its length */ + /* Message opaque, provided in delivery report + * callback as msg_opaque. */ + NULL); if (res == -1) { LOG(ERROR) << "produce to topic [ " << rd_kafka_topic_name(topic_) - << "]: " << rd_kafka_err2str(rd_kafka_last_error()); + << "]: " << rd_kafka_err2str(rd_kafka_last_error()); } return res == 0; diff --git a/src/Kafka.h b/src/Kafka.h index 2200499de..3f9b61ed2 100644 --- a/src/Kafka.h +++ b/src/Kafka.h @@ -36,32 +36,32 @@ // Maximum transmit message size. // The RawGbt message may large than 30MB while the block size reach 8MB. // So allow the message to reach 60MB. -#define RDKAFKA_MESSAGE_MAX_BYTES "60000000" +#define RDKAFKA_MESSAGE_MAX_BYTES "60000000" // Maximum number of bytes per topic+partition to request when // fetching messages from the broker -#define RDKAFKA_FETCH_MESSAGE_MAX_BYTES "60000000" +#define RDKAFKA_FETCH_MESSAGE_MAX_BYTES "60000000" // Maximum number of kilobytes per topic+partition in the local consumer // queue. This value may be overshot by fetch.message.max.bytes. // Tips: the unit is **kBytes**, not Bytes. (60000 means 60 MB) -#define RDKAFKA_QUEUED_MAX_MESSAGES_KBYTES "60000" +#define RDKAFKA_QUEUED_MAX_MESSAGES_KBYTES "60000" // compression codec to use for compressing message sets -#define RDKAFKA_COMPRESSION_CODEC "snappy" +#define RDKAFKA_COMPRESSION_CODEC "snappy" // Maximum number of messages allowed on the producer queue. #define RDKAFKA_QUEUE_BUFFERING_MAX_MESSAGES "100000" // Maximum time, in milliseconds, for buffering data on the producer queue. // set to 1 (0 is an illegal value here), deliver msg as soon as possible. -#define RDKAFKA_QUEUE_BUFFERING_MAX_MS "1000" +#define RDKAFKA_QUEUE_BUFFERING_MAX_MS "1000" // Maximum number of messages batched in one MessageSet. -#define RDKAFKA_BATCH_NUM_MESSAGES "1000" +#define RDKAFKA_BATCH_NUM_MESSAGES "1000" // Maximum time the broker may wait to fill the response with fetch.min.bytes -#define RDKAFKA_CONSUMER_FETCH_WAIT_MAX_MS "10" +#define RDKAFKA_CONSUMER_FETCH_WAIT_MAX_MS "10" #define RDKAFKA_HIGH_LEVEL_CONSUMER_FETCH_WAIT_MAX_MS "50" ///////////////////////////////// KafkaConsumer //////////////////////////////// @@ -69,11 +69,11 @@ class KafkaConsumer { string brokers_; string topicStr_; - int partition_; + int partition_; map defaultOptions_; - rd_kafka_conf_t *conf_; - rd_kafka_t *consumer_; + rd_kafka_conf_t *conf_; + rd_kafka_t *consumer_; rd_kafka_topic_t *topic_; public: @@ -89,32 +89,34 @@ class KafkaConsumer { // RD_KAFKA_OFFSET_STORED // RD_KAFKA_OFFSET_TAIL(CNT) // - bool setup(int64_t offset, const std::map *options=nullptr); + bool setup(int64_t offset, const std::map *options = nullptr); // // don't forget to call rd_kafka_message_destroy() after consumer() // rd_kafka_message_t *consumer(int timeout_ms); }; - //////////////////////////// KafkaHighLevelConsumer //////////////////////////// // High Level Consumer class KafkaHighLevelConsumer { string brokers_; string topicStr_; string groupStr_; - int partition_; + int partition_; - rd_kafka_conf_t *conf_; - rd_kafka_t *consumer_; + rd_kafka_conf_t *conf_; + rd_kafka_t *consumer_; rd_kafka_topic_partition_list_t *topics_; public: - KafkaHighLevelConsumer(const char *brokers, const char *topic, int partition, - const string &groupStr); + KafkaHighLevelConsumer( + const char *brokers, + const char *topic, + int partition, + const string &groupStr); ~KafkaHighLevelConsumer(); -// bool checkAlive(); // I don't know which function should be used to check + // bool checkAlive(); // I don't know which function should be used to check bool setup(); // @@ -123,27 +125,27 @@ class KafkaHighLevelConsumer { rd_kafka_message_t *consumer(int timeout_ms); }; - ///////////////////////////////// KafkaProducer //////////////////////////////// class KafkaProducer { string brokers_; string topicStr_; - int partition_; + int partition_; map defaultOptions_; - rd_kafka_conf_t *conf_; - rd_kafka_t *producer_; + rd_kafka_conf_t *conf_; + rd_kafka_t *producer_; rd_kafka_topic_t *topic_; public: KafkaProducer(const char *brokers, const char *topic, int partition); ~KafkaProducer(); - bool setup(const std::map *options=nullptr); + bool setup(const std::map *options = nullptr); bool checkAlive(); void produce(const void *payload, size_t len); - // Although the kafka producer is non-blocking, it will fail immediately in some cases, - // such as the local queue is full. In this case, the sender can choose to try again later. + // Although the kafka producer is non-blocking, it will fail immediately in + // some cases, such as the local queue is full. In this case, the sender can + // choose to try again later. bool tryProduce(const void *payload, size_t len); }; diff --git a/src/MySQLConnection.cc b/src/MySQLConnection.cc index 9f0bfb8bc..81fdbfa0b 100644 --- a/src/MySQLConnection.cc +++ b/src/MySQLConnection.cc @@ -21,15 +21,15 @@ #include #include -MySQLResult::MySQLResult() : - result(nullptr) { +MySQLResult::MySQLResult() + : result(nullptr) { } -MySQLResult::MySQLResult(MYSQL_RES * result) : - result(result) { +MySQLResult::MySQLResult(MYSQL_RES *result) + : result(result) { } -void MySQLResult::reset(MYSQL_RES * result) { +void MySQLResult::reset(MYSQL_RES *result) { if (this->result) { mysql_free_result(this->result); } @@ -44,7 +44,7 @@ MySQLResult::~MySQLResult() { uint64_t MySQLResult::numRows() { if (result) { - return mysql_num_rows(result); + return mysql_num_rows(result); } return 0; } @@ -53,17 +53,17 @@ uint32_t MySQLResult::fields() { return mysql_num_fields(result); } -char ** MySQLResult::nextRow() { +char **MySQLResult::nextRow() { return mysql_fetch_row(result); } -MySQLConnection::MySQLConnection(const MysqlConnectInfo &connectInfo): -host_(connectInfo.host_.c_str()), -port_(connectInfo.port_), username_(connectInfo.username_.c_str()), -password_(connectInfo.password_.c_str()), -dbName_(connectInfo.dbName_.c_str()), -conn(nullptr) -{ +MySQLConnection::MySQLConnection(const MysqlConnectInfo &connectInfo) + : host_(connectInfo.host_.c_str()) + , port_(connectInfo.port_) + , username_(connectInfo.username_.c_str()) + , password_(connectInfo.password_.c_str()) + , dbName_(connectInfo.dbName_.c_str()) + , conn(nullptr) { } MySQLConnection::~MySQLConnection() { @@ -76,14 +76,21 @@ bool MySQLConnection::open() { if (!conn) { LOG(ERROR) << "create MYSQL failed"; } - if (mysql_real_connect(conn, host_.c_str(), username_.c_str(), password_.c_str(), - dbName_.c_str(), port_, nullptr, 0) == nullptr) { + if (mysql_real_connect( + conn, + host_.c_str(), + username_.c_str(), + password_.c_str(), + dbName_.c_str(), + port_, + nullptr, + 0) == nullptr) { LOG(ERROR) << "mysql_real_connect failed: " << mysql_error(conn); close(); return false; } - //set charaseter + // set charaseter mysql_set_character_set(conn, "utf8"); // set timezone @@ -109,17 +116,20 @@ bool MySQLConnection::ping() { // has gone down and auto-reconnect is enabled an attempt to reconnect is // made. If the connection is down and auto-reconnect is disabled, // mysql_ping() returns an error. - // Zero if the connection to the server is active. Nonzero if an error occurred. + // Zero if the connection to the server is active. Nonzero if an error + // occurred. // - if (!conn) { open(); } + if (!conn) { + open(); + } // ping if (mysql_ping(conn) == 0) { return true; } LOG(ERROR) << "mysql_ping() failure, error_no: " << mysql_errno(conn) - << ", error_info: " << mysql_error(conn); + << ", error_info: " << mysql_error(conn); // re-connect LOG(INFO) << "reconnect to mysql DB"; @@ -131,7 +141,7 @@ bool MySQLConnection::ping() { return true; } LOG(ERROR) << "mysql_ping() failure, error_no: " << mysql_errno(conn) - << ", error_info: " << mysql_error(conn); + << ", error_info: " << mysql_error(conn); return false; } @@ -143,33 +153,35 @@ bool MySQLConnection::reconnect() { return ping(); } -bool MySQLConnection::execute(const char * sql) { +bool MySQLConnection::execute(const char *sql) { uint32_t error_no; int queryTimes = 0; DLOG(INFO) << "[MySQLConnection::execute] SQL: " << sql; query: - if (!conn) { open(); } + if (!conn) { + open(); + } queryTimes++; if (mysql_query(conn, sql) == 0) { - return true; // exec sql success + return true; // exec sql success } // get mysql error error_no = mysql_errno(conn); - LOG(ERROR) << "exec sql failure, error_no: " << error_no << ", error_info: " - << mysql_error(conn) << " , sql: " << sql; + LOG(ERROR) << "exec sql failure, error_no: " << error_no + << ", error_info: " << mysql_error(conn) << " , sql: " << sql; // 2006: MySQL server has gone away // 2013: Lost connection to MySQL server if (!(error_no == 2006 || error_no == 2013)) { - return false; // not a network error + return false; // not a network error } // use mysql_ping() to reconnnect if (queryTimes <= 3 && (error_no == 2006 || error_no == 2013)) { - sleep(10); // rds switch master-slave usually take about 20 seconds + sleep(10); // rds switch master-slave usually take about 20 seconds if (mysql_ping(conn) == 0) { LOG(ERROR) << "reconnect success"; } else { @@ -182,14 +194,14 @@ bool MySQLConnection::execute(const char * sql) { return false; } -bool MySQLConnection::query(const char * sql, MySQLResult & result) { +bool MySQLConnection::query(const char *sql, MySQLResult &result) { bool res = execute(sql); if (res) result.reset(mysql_store_result(conn)); return res; } -int64_t MySQLConnection::update(const char * sql) { +int64_t MySQLConnection::update(const char *sql) { if (execute(sql)) return mysql_affected_rows(conn); else @@ -217,14 +229,18 @@ string MySQLConnection::getVariable(const char *name) { return ""; } char **row = result.nextRow(); - DLOG(INFO) << "msyql get variable: \"" << row[0] << "\" = \"" << row[1] << "\""; + DLOG(INFO) << "msyql get variable: \"" << row[0] << "\" = \"" << row[1] + << "\""; return string(row[1]); } -bool multiInsert(MySQLConnection &db, const string &table, - const string &fields, const vector &values) { - string sqlPrefix = Strings::Format("INSERT INTO `%s`(%s) VALUES ", - table.c_str(), fields.c_str()); +bool multiInsert( + MySQLConnection &db, + const string &table, + const string &fields, + const vector &values) { + string sqlPrefix = Strings::Format( + "INSERT INTO `%s`(%s) VALUES ", table.c_str(), fields.c_str()); if (values.size() == 0 || fields.length() == 0 || table.length() == 0) { return false; @@ -235,7 +251,7 @@ bool multiInsert(MySQLConnection &db, const string &table, sql += Strings::Format("(%s),", it.c_str()); // overthan 16MB // notice: you need to make sure mysql.max_allowed_packet is over than 16MB - if (sql.length() >= 16*1024*1024) { + if (sql.length() >= 16 * 1024 * 1024) { sql.resize(sql.length() - 1); if (!db.execute(sql.c_str())) { return false; diff --git a/src/MySQLConnection.h b/src/MySQLConnection.h index 1e12eafac..ec6bdac28 100644 --- a/src/MySQLConnection.h +++ b/src/MySQLConnection.h @@ -42,44 +42,50 @@ typedef struct st_mysql_res MYSQL_RES; * auto free */ struct MySQLResult { - struct st_mysql_res * result; + struct st_mysql_res *result; MySQLResult(); - MySQLResult(MYSQL_RES * result); - void reset(MYSQL_RES * result); + MySQLResult(MYSQL_RES *result); + void reset(MYSQL_RES *result); ~MySQLResult(); uint64_t numRows(); uint32_t fields(); - char ** nextRow(); + char **nextRow(); }; class MysqlConnectInfo { public: - string host_; + string host_; int32_t port_; - string username_; - string password_; - string dbName_; - - MysqlConnectInfo(const string &host, int32_t port, const string &userName, - const string &password, const string &dbName): - host_(host), port_(port), username_(userName), password_(password), dbName_(dbName) - { - } + string username_; + string password_; + string dbName_; + + MysqlConnectInfo( + const string &host, + int32_t port, + const string &userName, + const string &password, + const string &dbName) + : host_(host) + , port_(port) + , username_(userName) + , password_(password) + , dbName_(dbName) {} MysqlConnectInfo(const MysqlConnectInfo &r) { - host_ = r.host_; - port_ = r.port_; + host_ = r.host_; + port_ = r.port_; username_ = r.username_; password_ = r.password_; - dbName_ = r.dbName_; + dbName_ = r.dbName_; } - MysqlConnectInfo& operator=(const MysqlConnectInfo &r) { - host_ = r.host_; - port_ = r.port_; + MysqlConnectInfo &operator=(const MysqlConnectInfo &r) { + host_ = r.host_; + port_ = r.port_; username_ = r.username_; password_ = r.password_; - dbName_ = r.dbName_; + dbName_ = r.dbName_; return *this; } }; @@ -98,7 +104,7 @@ class MySQLConnection { string password_; string dbName_; - struct st_mysql * conn; + struct st_mysql *conn; public: MySQLConnection(const MysqlConnectInfo &connectInfo); @@ -109,28 +115,27 @@ class MySQLConnection { bool ping(); bool reconnect(); - bool execute(const char * sql); - bool execute(const string &sql) { - return execute(sql.c_str()); - } + bool execute(const char *sql); + bool execute(const string &sql) { return execute(sql.c_str()); } - bool query(const char * sql, MySQLResult & result); - bool query(const string & sql, MySQLResult & result) { + bool query(const char *sql, MySQLResult &result); + bool query(const string &sql, MySQLResult &result) { return query(sql.c_str(), result); } // return -1 on failure - int64_t update(const char * sql); - int64_t update(const string & sql) { - return update(sql.c_str()); - } + int64_t update(const char *sql); + int64_t update(const string &sql) { return update(sql.c_str()); } uint64_t affectedRows(); uint64_t getInsertId(); string getVariable(const char *name); }; -bool multiInsert(MySQLConnection &db, const string &table, - const string &fields, const vector &values); +bool multiInsert( + MySQLConnection &db, + const string &table, + const string &fields, + const vector &values); #endif diff --git a/src/Network.h b/src/Network.h index 26120c33a..050169380 100644 --- a/src/Network.h +++ b/src/Network.h @@ -28,11 +28,11 @@ #include #include - -///////////////////////////////////// IPv4/IPv6 compatible address structure //////////////////////////////////// +///////////////////////////////////// IPv4/IPv6 compatible address structure +/////////////////////////////////////// union IpAddress { // all datas are big endian - uint8_t addrUint8[16]; + uint8_t addrUint8[16]; uint16_t addrUint16[8]; uint32_t addrUint32[4]; uint64_t addrUint64[2]; @@ -65,8 +65,7 @@ union IpAddress { addrUint32[1] = 0; addrUint32[2] = 0; return inet_pton(AF_INET, ipStr.c_str(), (void *)&addrIpv4[3]); - } - else { + } else { return inet_pton(AF_INET, ipStr.c_str(), (void *)&addrIpv6); } } @@ -75,11 +74,10 @@ union IpAddress { const char *pStr; if (isIpv4()) { - char str[INET_ADDRSTRLEN]; + char str[INET_ADDRSTRLEN]; pStr = inet_ntop(AF_INET, (void *)&(addrIpv4[3]), str, sizeof(str)); - } - else { - char str[INET6_ADDRSTRLEN]; + } else { + char str[INET6_ADDRSTRLEN]; pStr = inet_ntop(AF_INET6, &addrIpv6, str, sizeof(str)); } @@ -93,9 +91,7 @@ union IpAddress { addrIpv4[3] = inAddr; } - void fromInAddr(const struct in6_addr &inAddr) { - addrIpv6 = inAddr; - } + void fromInAddr(const struct in6_addr &inAddr) { addrIpv6 = inAddr; } void fromIpv4Int(const uint32_t ipv4Int) { addrUint32[0] = 0; @@ -104,9 +100,7 @@ union IpAddress { addrUint32[3] = ipv4Int; } - uint32_t toIpv4Int() const { - return addrUint32[3]; - } + uint32_t toIpv4Int() const { return addrUint32[3]; } bool isIpv4() const { if (addrUint32[0] == 0 && addrUint32[1] == 0) { @@ -133,6 +127,7 @@ union IpAddress { }; // IpAddress should be 16 bytes -static_assert(sizeof(IpAddress) == 16, "union IpAddress should not large than 16 bytes"); +static_assert( + sizeof(IpAddress) == 16, "union IpAddress should not large than 16 bytes"); #endif // BPOOL_NETWORK_H_ diff --git a/src/RedisConnection.cc b/src/RedisConnection.cc index ba7f37e17..1cfe37022 100644 --- a/src/RedisConnection.cc +++ b/src/RedisConnection.cc @@ -26,15 +26,14 @@ #include "RedisConnection.h" - /////////////////////////////// RedisResult /////////////////////////////// -RedisResult::RedisResult() : - reply_(nullptr) { +RedisResult::RedisResult() + : reply_(nullptr) { } -RedisResult::RedisResult(redisReply *reply) : - reply_(reply) { +RedisResult::RedisResult(redisReply *reply) + : reply_(reply) { } RedisResult::RedisResult(RedisResult &&other) { @@ -85,8 +84,9 @@ long long RedisResult::integer() { /////////////////////////////// RedisConnection /////////////////////////////// -RedisConnection::RedisConnection(const RedisConnectInfo &connInfo) : - connInfo_(connInfo), conn_(nullptr) { +RedisConnection::RedisConnection(const RedisConnectInfo &connInfo) + : connInfo_(connInfo) + , conn_(nullptr) { } bool RedisConnection::open() { @@ -96,7 +96,7 @@ bool RedisConnection::open() { LOG(ERROR) << "Connect to redis failed: conn_ is nullptr"; return false; } - + if (conn_->err) { LOG(ERROR) << "Connect to redis failed: " << conn_->errstr; close(); @@ -115,7 +115,8 @@ bool RedisConnection::open() { } if (result.type() != REDIS_REPLY_STATUS || result.str() != "OK") { - LOG(ERROR) << "redis auth failed: result is " << result.type() << " (" << result.str() << "), " + LOG(ERROR) << "redis auth failed: result is " << result.type() << " (" + << result.str() << "), " << "expected: " << REDIS_REPLY_STATUS << " (OK)."; close(); return false; @@ -143,7 +144,8 @@ bool RedisConnection::_ping() { } if (result.type() != REDIS_REPLY_STATUS || result.str() != "PONG") { - LOG(ERROR) << "ping redis failed: result is " << result.type() << " (" << result.str() << "), " + LOG(ERROR) << "ping redis failed: result is " << result.type() << " (" + << result.str() << "), " << "expected: " << REDIS_REPLY_STATUS << " (PONG)."; return false; } @@ -174,7 +176,7 @@ bool RedisConnection::ping() { } RedisResult RedisConnection::execute(const string &command) { - return RedisResult((redisReply*)redisCommand(conn_, command.c_str())); + return RedisResult((redisReply *)redisCommand(conn_, command.c_str())); } RedisResult RedisConnection::execute(initializer_list args) { @@ -185,19 +187,20 @@ RedisResult RedisConnection::execute(initializer_list args) { auto arg = args.begin(); size_t i = 0; - while (arg != args.end()){ + while (arg != args.end()) { argv[i] = arg->c_str(); argvlen[i] = arg->size(); - + arg++; i++; } - auto result = RedisResult((redisReply*)redisCommandArgv(conn_, argc, argv, argvlen)); + auto result = + RedisResult((redisReply *)redisCommandArgv(conn_, argc, argv, argvlen)); + + delete[] argv; + delete[] argvlen; - delete []argv; - delete []argvlen; - return result; } @@ -209,19 +212,20 @@ RedisResult RedisConnection::execute(const vector &args) { auto arg = args.begin(); size_t i = 0; - while (arg != args.end()){ + while (arg != args.end()) { argv[i] = arg->c_str(); argvlen[i] = arg->size(); - + arg++; i++; } - auto result = RedisResult((redisReply*)redisCommandArgv(conn_, argc, argv, argvlen)); + auto result = + RedisResult((redisReply *)redisCommandArgv(conn_, argc, argv, argvlen)); + + delete[] argv; + delete[] argvlen; - delete []argv; - delete []argvlen; - return result; } @@ -237,18 +241,18 @@ void RedisConnection::prepare(initializer_list args) { auto arg = args.begin(); size_t i = 0; - while (arg != args.end()){ + while (arg != args.end()) { argv[i] = arg->c_str(); argvlen[i] = arg->size(); - + arg++; i++; } redisAppendCommandArgv(conn_, argc, argv, argvlen); - - delete []argv; - delete []argvlen; + + delete[] argv; + delete[] argvlen; } void RedisConnection::prepare(const vector &args) { @@ -259,22 +263,22 @@ void RedisConnection::prepare(const vector &args) { auto arg = args.begin(); size_t i = 0; - while (arg != args.end()){ + while (arg != args.end()) { argv[i] = arg->c_str(); argvlen[i] = arg->size(); - + arg++; i++; } redisAppendCommandArgv(conn_, argc, argv, argvlen); - - delete []argv; - delete []argvlen; + + delete[] argv; + delete[] argvlen; } RedisResult RedisConnection::execute() { void *reply; redisGetReply(conn_, &reply); - return RedisResult((redisReply*)reply); + return RedisResult((redisReply *)reply); } diff --git a/src/RedisConnection.h b/src/RedisConnection.h index 14bd5bbb1..46e0eefea 100644 --- a/src/RedisConnection.h +++ b/src/RedisConnection.h @@ -30,16 +30,16 @@ using namespace std; -/* -* Possible values of `redisReply->type`: -* REDIS_REPLY_STRING 1 -* REDIS_REPLY_ARRAY 2 -* REDIS_REPLY_INTEGER 3 -* REDIS_REPLY_NIL 4 -* REDIS_REPLY_STATUS 5 -* REDIS_REPLY_ERROR 6 -* From: -*/ +/* + * Possible values of `redisReply->type`: + * REDIS_REPLY_STRING 1 + * REDIS_REPLY_ARRAY 2 + * REDIS_REPLY_INTEGER 3 + * REDIS_REPLY_NIL 4 + * REDIS_REPLY_STATUS 5 + * REDIS_REPLY_ERROR 6 + * From: + */ /////////////////////////////// RedisResult /////////////////////////////// class RedisResult { @@ -48,9 +48,9 @@ class RedisResult { public: RedisResult(); RedisResult(redisReply *reply); - RedisResult(RedisResult &&other); // move constructor - RedisResult(const RedisResult &other) = delete; // copy constructor - RedisResult& operator=(const RedisResult& str) = delete; // assign operation + RedisResult(RedisResult &&other); // move constructor + RedisResult(const RedisResult &other) = delete; // copy constructor + RedisResult &operator=(const RedisResult &str) = delete; // assign operation ~RedisResult(); void reset(redisReply *reply); @@ -65,25 +65,25 @@ class RedisResult { /////////////////////////////// RedisConnectInfo /////////////////////////////// class RedisConnectInfo { public: - string host_; + string host_; int32_t port_; string passwd_; - RedisConnectInfo(const string &host, int32_t port, const string &passwd) : - host_(host), port_(port), passwd_(passwd) - { - } + RedisConnectInfo(const string &host, int32_t port, const string &passwd) + : host_(host) + , port_(port) + , passwd_(passwd) {} RedisConnectInfo(const RedisConnectInfo &r) { - host_ = r.host_; - port_ = r.port_; - passwd_ = r.passwd_; + host_ = r.host_; + port_ = r.port_; + passwd_ = r.passwd_; } - RedisConnectInfo& operator=(const RedisConnectInfo &r) { - host_ = r.host_; - port_ = r.port_; - passwd_ = r.passwd_; + RedisConnectInfo &operator=(const RedisConnectInfo &r) { + host_ = r.host_; + port_ = r.port_; + passwd_ = r.passwd_; return *this; } }; diff --git a/src/ShareLogParser.h b/src/ShareLogParser.h index 55dda82d3..a015bfa08 100644 --- a/src/ShareLogParser.h +++ b/src/ShareLogParser.h @@ -24,39 +24,40 @@ #ifndef SHARELOGPARSER_H_ #define SHARELOGPARSER_H_ - #include "MySQLConnection.h" #include "Statistics.h" #include "zlibstream/zstr.hpp" - #include #include #include #include - /////////////////////////////// ShareLogDumper /////////////////////////////// // Interface, used as a pointer type. class ShareLogDumper { public: - virtual ~ShareLogDumper() {}; + virtual ~ShareLogDumper(){}; virtual void dump2stdout() = 0; }; -/////////////////////////////// ShareLogDumperT /////////////////////////////// +/////////////////////////////// ShareLogDumperT /////////////////////////////// // print share.toString() to stdout template class ShareLogDumperT : public ShareLogDumper { - string filePath_; // sharelog data file path - std::set uids_; // if empty dump all user's shares + string filePath_; // sharelog data file path + std::set uids_; // if empty dump all user's shares bool isDumpAll_; void parseShareLog(const uint8_t *buf, size_t len); void parseShare(const SHARE *share); public: - ShareLogDumperT(const char *chainType, const string &dataDir, time_t timestamp, const std::set &uids); + ShareLogDumperT( + const char *chainType, + const string &dataDir, + time_t timestamp, + const std::set &uids); ~ShareLogDumperT(); void dump2stdout(); @@ -71,7 +72,7 @@ class ShareLogParser { virtual bool flushToDB() = 0; virtual bool processUnchangedShareLog() = 0; }; -/////////////////////////////// ShareLogParserT /////////////////////////////// +/////////////////////////////// ShareLogParserT /////////////////////////////// // // 1. read sharelog data files // 2. calculate share & score @@ -81,25 +82,31 @@ template class ShareLogParserT : public ShareLogParser { pthread_rwlock_t rwlock_; // key: WorkerKey, value: share stats - std::unordered_map>> workersStats_; + std::unordered_map< + WorkerKey /* userID + workerID */, + shared_ptr>> + workersStats_; - time_t date_; // date_ % 86400 == 0 - string filePath_; // sharelog data file path + time_t date_; // date_ % 86400 == 0 + string filePath_; // sharelog data file path const string chainType_; // // for processGrowingShareLog() // - zstr::ifstream *f_; // file handler - uint8_t *buf_; // fread buffer + zstr::ifstream *f_; // file handler + uint8_t *buf_; // fread buffer // 48 * 1000000 = 48,000,000 ~ 48 MB - static const size_t kMaxElementsNum_ = 1000000; // num of shares + static const size_t kMaxElementsNum_ = 1000000; // num of shares size_t incompleteShareSize_; - uint32_t bufferlength_ ; + uint32_t bufferlength_; + + MySQLConnection poolDB_; // save stats data - MySQLConnection poolDB_; // save stats data - - shared_ptr> dupShareChecker_; // Used to detect duplicate share attacks. + shared_ptr> + dupShareChecker_; // Used to detect duplicate share attacks. + + bool acceptStale_; // Whether stale shares are accepted inline int32_t getHourIdx(uint32_t ts) { // %H Hour in 24h format (00-23) @@ -109,25 +116,34 @@ class ShareLogParserT : public ShareLogParser { void parseShareLog(const uint8_t *buf, size_t len); void parseShare(SHARE &share); - void generateDailyData(shared_ptr> stats, - const int32_t userId, const int64_t workerId, - vector *valuesWorkersDay, - vector *valuesUsersDay, - vector *valuesPoolDay); - void generateHoursData(shared_ptr> stats, - const int32_t userId, const int64_t workerId, - vector *valuesWorkersHour, - vector *valuesUsersHour, - vector *valuesPoolHour); - void flushHourOrDailyData(const vector values, - const string &tableName, - const string &extraFields); + void generateDailyData( + shared_ptr> stats, + const int32_t userId, + const int64_t workerId, + vector *valuesWorkersDay, + vector *valuesUsersDay, + vector *valuesPoolDay); + void generateHoursData( + shared_ptr> stats, + const int32_t userId, + const int64_t workerId, + vector *valuesWorkersHour, + vector *valuesUsersHour, + vector *valuesPoolHour); + void flushHourOrDailyData( + const vector values, + const string &tableName, + const string &extraFields); void removeExpiredDataFromDB(); public: - ShareLogParserT(const char *chainType, const string &dataDir, - time_t timestamp, const MysqlConnectInfo &poolDBInfo, - shared_ptr> dupShareChecker); + ShareLogParserT( + const char *chainType, + const string &dataDir, + time_t timestamp, + const MysqlConnectInfo &poolDBInfo, + shared_ptr> dupShareChecker, + bool acceptStale); ~ShareLogParserT(); bool init(); @@ -136,7 +152,8 @@ class ShareLogParserT : public ShareLogParser { bool flushToDB(); // get share stats day handler - shared_ptr> getShareStatsDayHandler(const WorkerKey &key); + shared_ptr> + getShareStatsDayHandler(const WorkerKey &key); // read unchanged share data bin file, for example yestoday's file. it will // use mmap() to get high performance. call only once will process @@ -145,20 +162,19 @@ class ShareLogParserT : public ShareLogParser { // today's file is still growing, return processed shares number. int64_t processGrowingShareLog(); - bool isReachEOF(); // only for growing file + bool isReachEOF(); // only for growing file }; - //////////////////////////// ShareLogParserServer //////////////////////////// // Interface, used as a pointer type. class ShareLogParserServer { public: - virtual ~ShareLogParserServer() {}; + virtual ~ShareLogParserServer(){}; virtual void stop() = 0; virtual void run() = 0; }; -//////////////////////////// ShareLogParserServerT //////////////////////////// +//////////////////////////// ShareLogParserServerT //////////////////////////// // // read share binlog, parse shares, calc stats data than save them to database // table.stats_xxxx. meanwhile hold there stats data in memory so it could @@ -170,8 +186,8 @@ class ShareLogParserServerT : public ShareLogParserServer { uint32_t uptime_; uint64_t requestCount_; uint64_t responseBytes_; - uint32_t date_; // Y-m-d - vector stats; // first is today and latest 3 hours + uint32_t date_; // Y-m-d + vector stats; // first is today and latest 3 hours }; //----------------- @@ -179,13 +195,16 @@ class ShareLogParserServerT : public ShareLogParserServer { pthread_rwlock_t rwlock_; time_t uptime_; // share log daily - time_t date_; // date_ % 86400 == 0 + time_t date_; // date_ % 86400 == 0 shared_ptr> shareLogParser_; const string chainType_; string dataDir_; - MysqlConnectInfo poolDBInfo_; // save stats data + MysqlConnectInfo poolDBInfo_; // save stats data time_t kFlushDBInterval_; - shared_ptr> dupShareChecker_; // Used to detect duplicate share attacks. + shared_ptr> + dupShareChecker_; // Used to detect duplicate share attacks. + + bool acceptStale_; // Whether stale shares are accepted // httpd struct event_base *base_; @@ -195,10 +214,15 @@ class ShareLogParserServerT : public ShareLogParserServer { thread threadShareLogParser_; void getServerStatus(ServerStatus &s); - void getShareStats(struct evbuffer *evb, const char *pUserId, - const char *pWorkerId, const char *pHour); - void _getShareStats(const vector &keys, const vector &hours, - vector &shareStats); + void getShareStats( + struct evbuffer *evb, + const char *pUserId, + const char *pWorkerId, + const char *pHour); + void _getShareStats( + const vector &keys, + const vector &hours, + vector &shareStats); void runThreadShareLogParser(); bool initShareLogParser(time_t datets); @@ -211,23 +235,26 @@ class ShareLogParserServerT : public ShareLogParserServer { atomic responseBytes_; public: - ShareLogParserServerT(const char *chainType, const string dataDir, - const string &httpdHost, unsigned short httpdPort, - const MysqlConnectInfo &poolDBInfo, - const uint32_t kFlushDBInterval, - shared_ptr> dupShareChecker); + ShareLogParserServerT( + const char *chainType, + const string dataDir, + const string &httpdHost, + unsigned short httpdPort, + const MysqlConnectInfo &poolDBInfo, + const uint32_t kFlushDBInterval, + shared_ptr> dupShareChecker, + bool acceptStale); ~ShareLogParserServerT(); void stop(); void run(); static void httpdServerStatus(struct evhttp_request *req, void *arg); - static void httpdShareStats (struct evhttp_request *req, void *arg); + static void httpdShareStats(struct evhttp_request *req, void *arg); }; #include "ShareLogParser.inl" /////////////////////////////// Alias /////////////////////////////// - #endif // SHARELOGPARSER_H_ diff --git a/src/ShareLogParser.inl b/src/ShareLogParser.inl index beae58e9d..3960308d0 100644 --- a/src/ShareLogParser.inl +++ b/src/ShareLogParser.inl @@ -26,19 +26,23 @@ #include #include -/////////////////////////////// ShareLogDumperT /////////////////////////////// +/////////////////////////////// ShareLogDumperT /////////////////////////////// template -ShareLogDumperT::ShareLogDumperT(const char *chainType, const string &dataDir, - time_t timestamp, const std::set &uids) -: uids_(uids), isDumpAll_(false) -{ +ShareLogDumperT::ShareLogDumperT( + const char *chainType, + const string &dataDir, + time_t timestamp, + const std::set &uids) + : uids_(uids) + , isDumpAll_(false) { filePath_ = getStatsFilePath(chainType, dataDir, timestamp); if (uids_.empty()) isDumpAll_ = true; } -template ShareLogDumperT::~ShareLogDumperT() { +template +ShareLogDumperT::~ShareLogDumperT() { } template @@ -59,16 +63,20 @@ void ShareLogDumperT::dump2stdout() { uint32_t incompleteShareSize = 0; while (f.peek() != EOF) { - f.read((char *)buf.data() + incompleteShareSize, buf.size() - incompleteShareSize); + f.read( + (char *)buf.data() + incompleteShareSize, + buf.size() - incompleteShareSize); uint32_t readNum = f.gcount() + incompleteShareSize; uint32_t currentpos = 0; while (currentpos + sizeof(uint32_t) < readNum) { - uint32_t sharelength = *(uint32_t*)(buf.data() + currentpos); + uint32_t sharelength = *(uint32_t *)(buf.data() + currentpos); - if (readNum >= currentpos + sizeof(uint32_t) + sharelength) { + if (readNum >= currentpos + sizeof(uint32_t) + sharelength) { - parseShareLog((const uint8_t *)(buf.data() + currentpos + sizeof(uint32_t)), sharelength); + parseShareLog( + (const uint8_t *)(buf.data() + currentpos + sizeof(uint32_t)), + sharelength); currentpos = currentpos + sizeof(uint32_t) + sharelength; } else { LOG(INFO) << "not read enough length " << sharelength << std::endl; @@ -77,11 +85,15 @@ void ShareLogDumperT::dump2stdout() { } incompleteShareSize = readNum - currentpos; if (incompleteShareSize > 0) { - LOG(INFO) << "incompleteShareSize_ " << incompleteShareSize << std::endl; - memcpy((char *)buf.data(), (char *)buf.data() + currentpos, incompleteShareSize); - } + LOG(INFO) << "incompleteShareSize_ " << incompleteShareSize + << std::endl; + memcpy( + (char *)buf.data(), + (char *)buf.data() + currentpos, + incompleteShareSize); + } } - + } catch (...) { LOG(ERROR) << "open file fail: " << filePath_; } @@ -110,15 +122,23 @@ void ShareLogDumperT::parseShare(const SHARE *share) { } } -/////////////////////////////// ShareLogParserT /////////////////////////////// +/////////////////////////////// ShareLogParserT /////////////////////////////// template -ShareLogParserT::ShareLogParserT(const char *chainType, const string &dataDir, - time_t timestamp, const MysqlConnectInfo &poolDBInfo, - shared_ptr> dupShareChecker) -: date_(timestamp), chainType_(chainType), f_(nullptr), buf_(nullptr) -, incompleteShareSize_(0), poolDB_(poolDBInfo) -, dupShareChecker_(dupShareChecker) -{ +ShareLogParserT::ShareLogParserT( + const char *chainType, + const string &dataDir, + time_t timestamp, + const MysqlConnectInfo &poolDBInfo, + shared_ptr> dupShareChecker, + bool acceptStale) + : date_(timestamp) + , chainType_(chainType) + , f_(nullptr) + , buf_(nullptr) + , incompleteShareSize_(0) + , poolDB_(poolDBInfo) + , dupShareChecker_(dupShareChecker) + , acceptStale_(acceptStale) { pthread_rwlock_init(&rwlock_, nullptr); { @@ -159,7 +179,7 @@ template void ShareLogParserT::parseShareLog(const uint8_t *buf, size_t len) { SHARE share; if (!share.ParseFromArray(buf, len)) { - LOG(INFO) << "parse share from base message failed! " ; + LOG(INFO) << "parse share from base message failed! "; return; } parseShare(share); @@ -190,9 +210,9 @@ void ShareLogParserT::parseShare(SHARE &share) { pthread_rwlock_unlock(&rwlock_); const uint32_t hourIdx = getHourIdx(share.timestamp()); - workersStats_[wkey]->processShare(hourIdx, share); - workersStats_[ukey]->processShare(hourIdx, share); - workersStats_[pkey]->processShare(hourIdx, share); + workersStats_[wkey]->processShare(hourIdx, share, acceptStale_); + workersStats_[ukey]->processShare(hourIdx, share, acceptStale_); + workersStats_[pkey]->processShare(hourIdx, share, acceptStale_); } template @@ -213,17 +233,21 @@ bool ShareLogParserT::processUnchangedShareLog() { uint32_t incompleteShareSize = 0; while (f.peek() != EOF) { - f.read((char *)buf.data()+ incompleteShareSize, buf.size() - incompleteShareSize); + f.read( + (char *)buf.data() + incompleteShareSize, + buf.size() - incompleteShareSize); uint32_t readNum = f.gcount() + incompleteShareSize; uint32_t currentpos = 0; while (currentpos + sizeof(uint32_t) < readNum) { - uint32_t sharelength = *(uint32_t*)(buf.data()+ currentpos);//get shareLength + uint32_t sharelength = + *(uint32_t *)(buf.data() + currentpos); // get shareLength DLOG(INFO) << "sharelength = " << sharelength << std::endl; - if (readNum >= currentpos + sizeof(uint32_t) + sharelength) { + if (readNum >= currentpos + sizeof(uint32_t) + sharelength) { - parseShareLog((const uint8_t *)(buf.data() + - currentpos + sizeof(uint32_t)), sharelength); + parseShareLog( + (const uint8_t *)(buf.data() + currentpos + sizeof(uint32_t)), + sharelength); currentpos = currentpos + sizeof(uint32_t) + sharelength; } else { @@ -233,8 +257,12 @@ bool ShareLogParserT::processUnchangedShareLog() { } incompleteShareSize = readNum - currentpos; if (incompleteShareSize > 0) { - LOG(INFO) << "incompleteShareSize_ " << incompleteShareSize << std::endl; - memcpy((char *)buf.data(), (char *)buf.data() + currentpos, incompleteShareSize); + LOG(INFO) << "incompleteShareSize_ " << incompleteShareSize + << std::endl; + memcpy( + (char *)buf.data(), + (char *)buf.data() + currentpos, + incompleteShareSize); } } return true; @@ -246,30 +274,26 @@ bool ShareLogParserT::processUnchangedShareLog() { template int64_t ShareLogParserT::processGrowingShareLog() { - if(f_ == nullptr) - { + if (f_ == nullptr) { bool fileOpened = true; - try - { + try { f_ = new zstr::ifstream(filePath_, std::ios::binary); if (f_ == nullptr) { LOG(WARNING) << "open file fail. Filename: " << filePath_; fileOpened = false; } - } - catch(...) - { - // just log warning instead of error because it's a usual scenario the file not exist and it throw an exception + } catch (...) { + // just log warning instead of error because it's a usual scenario the + // file not exist and it throw an exception LOG(WARNING) << "open file fail with exception. Filename: " << filePath_; fileOpened = false; } - if(!fileOpened) - { + if (!fileOpened) { delete f_; f_ = nullptr; - return -1; + return -1; } } @@ -294,9 +318,12 @@ int64_t ShareLogParserT::processGrowingShareLog() { // Now zlib/gzip file stream is used. // - // If an incomplete share was found at last read, only reading the rest part of it. + // If an incomplete share was found at last read, only reading the rest part + // of it. - f_->read((char *)buf_ + incompleteShareSize_, bufferlength_ - incompleteShareSize_); + f_->read( + (char *)buf_ + incompleteShareSize_, + bufferlength_ - incompleteShareSize_); if (f_->gcount() == 0) { return 0; @@ -306,12 +333,13 @@ int64_t ShareLogParserT::processGrowingShareLog() { uint32_t currentpos = 0, parsedsharenum = 0; while (currentpos + sizeof(uint32_t) < readNum) { - uint32_t sharelength = *(uint32_t*)(buf_ + currentpos);//get shareLength - if (readNum >= currentpos + sizeof(uint32_t) + sharelength) { + uint32_t sharelength = *(uint32_t *)(buf_ + currentpos); // get + // shareLength + if (readNum >= currentpos + sizeof(uint32_t) + sharelength) { parseShareLog(buf_ + currentpos + sizeof(uint32_t), sharelength); currentpos = currentpos + sizeof(uint32_t) + sharelength; - parsedsharenum ++; + parsedsharenum++; } else { break; } @@ -326,7 +354,7 @@ int64_t ShareLogParserT::processGrowingShareLog() { DLOG(INFO) << "processGrowingShareLog share count: " << parsedsharenum; return parsedsharenum; - } catch (...) { + } catch (...) { LOG(ERROR) << "reading file fail with exception: " << filePath_; return -1; } @@ -343,20 +371,23 @@ bool ShareLogParserT::isReachEOF() { } template -void ShareLogParserT::generateHoursData(shared_ptr> stats, - const int32_t userId, - const int64_t workerId, - vector *valuesWorkersHour, - vector *valuesUsersHour, - vector *valuesPoolHour) { - assert(sizeof(stats->shareAccept1h_) / sizeof(stats->shareAccept1h_[0]) == 24); - assert(sizeof(stats->shareReject1h_) / sizeof(stats->shareReject1h_[0]) == 24); - assert(sizeof(stats->score1h_) / sizeof(stats->score1h_[0]) == 24); +void ShareLogParserT::generateHoursData( + shared_ptr> stats, + const int32_t userId, + const int64_t workerId, + vector *valuesWorkersHour, + vector *valuesUsersHour, + vector *valuesPoolHour) { + assert( + sizeof(stats->shareAccept1h_) / sizeof(stats->shareAccept1h_[0]) == 24); + assert( + sizeof(stats->shareReject1h_) / sizeof(stats->shareReject1h_[0]) == 24); + assert(sizeof(stats->score1h_) / sizeof(stats->score1h_[0]) == 24); string table, extraValues; // worker if (userId != 0 && workerId != 0) { - extraValues = Strings::Format("% " PRId64",%d,", workerId, userId); + extraValues = Strings::Format("% " PRId64 ",%d,", workerId, userId); table = "stats_workers_hour"; } // user @@ -367,8 +398,7 @@ void ShareLogParserT::generateHoursData(shared_ptr> // pool else if (userId == 0 && workerId == 0) { table = "stats_pool_hour"; - } - else { + } else { LOG(ERROR) << "unknown stats type"; return; } @@ -382,24 +412,33 @@ void ShareLogParserT::generateHoursData(shared_ptr> if ((stats->modifyHoursFlag_ & flag) == 0x0u) { continue; } - const string hourStr = Strings::Format("%s%02d", date("%Y%m%d", date_).c_str(), i); + const string hourStr = + Strings::Format("%s%02d", date("%Y%m%d", date_).c_str(), i); const int32_t hour = atoi(hourStr.c_str()); - const uint64_t accept = stats->shareAccept1h_[i]; // alias - const uint64_t reject = stats->shareReject1h_[i]; + const uint64_t accept = stats->shareAccept1h_[i]; // alias + const uint64_t reject = stats->shareReject1h_[i]; double rejectRate = 0.0; if (reject) - rejectRate = (double)reject / (accept + reject); - const string nowStr = date("%F %T"); + rejectRate = (double)reject / (accept + reject); + const string nowStr = date("%F %T"); const string scoreStr = score2Str(stats->score1h_[i]); - const double earn = stats->earn1h_[i]; - - valuesStr = Strings::Format("%s %d,%" PRIu64",%" PRIu64"," - " %lf,'%s',%0.0lf,'%s','%s'", - extraValues.c_str(), - hour, accept, reject, rejectRate, scoreStr.c_str(), - earn, nowStr.c_str(), nowStr.c_str()); - } // for scope lock + const double earn = stats->earn1h_[i]; + + valuesStr = Strings::Format( + "%s %d,%" PRIu64 ",%" PRIu64 + "," + " %lf,'%s',%0.0lf,'%s','%s'", + extraValues.c_str(), + hour, + accept, + reject, + rejectRate, + scoreStr.c_str(), + earn, + nowStr.c_str(), + nowStr.c_str()); + } // for scope lock if (table == "stats_workers_hour") { valuesWorkersHour->push_back(valuesStr); @@ -412,16 +451,17 @@ void ShareLogParserT::generateHoursData(shared_ptr> } template -void ShareLogParserT::flushHourOrDailyData(const vector values, - const string &tableName, - const string &extraFields) { +void ShareLogParserT::flushHourOrDailyData( + const vector values, + const string &tableName, + const string &extraFields) { string mergeSQL; string fields; // in case two process use the same tmp table name, we add process id into // tmp table name. - const string tmpTableName = Strings::Format("%s_tmp_%d", - tableName.c_str(), getpid()); + const string tmpTableName = + Strings::Format("%s_tmp_%d", tableName.c_str(), getpid()); if (!poolDB_.ping()) { LOG(ERROR) << "can't connect to pool DB"; @@ -434,11 +474,13 @@ void ShareLogParserT::flushHourOrDailyData(const vector values, } // drop tmp table - const string sqlDropTmpTable = Strings::Format("DROP TEMPORARY TABLE IF EXISTS `%s`;", - tmpTableName.c_str()); + const string sqlDropTmpTable = Strings::Format( + "DROP TEMPORARY TABLE IF EXISTS `%s`;", tmpTableName.c_str()); // create tmp table - const string createTmpTable = Strings::Format("CREATE TEMPORARY TABLE `%s` like `%s`;", - tmpTableName.c_str(), tableName.c_str()); + const string createTmpTable = Strings::Format( + "CREATE TEMPORARY TABLE `%s` like `%s`;", + tmpTableName.c_str(), + tableName.c_str()); if (!poolDB_.execute(sqlDropTmpTable)) { LOG(ERROR) << "DROP TEMPORARY TABLE `" << tmpTableName << "` failure"; @@ -452,8 +494,10 @@ void ShareLogParserT::flushHourOrDailyData(const vector values, } // fields for table.stats_xxxxx_hour - fields = Strings::Format("%s `share_accept`,`share_reject`,`reject_rate`," - "`score`,`earn`,`created_at`,`updated_at`", extraFields.c_str()); + fields = Strings::Format( + "%s `share_accept`,`share_reject`,`reject_rate`," + "`score`,`earn`,`created_at`,`updated_at`", + extraFields.c_str()); if (!multiInsert(poolDB_, tmpTableName, fields, values)) { LOG(ERROR) << "multi-insert table." << tmpTableName << " failure"; @@ -461,17 +505,19 @@ void ShareLogParserT::flushHourOrDailyData(const vector values, } // merge two table items - mergeSQL = Strings::Format("INSERT INTO `%s` " - " SELECT * FROM `%s` AS `t2` " - " ON DUPLICATE KEY " - " UPDATE " - " `share_accept` = `t2`.`share_accept`, " - " `share_reject` = `t2`.`share_reject`, " - " `reject_rate` = `t2`.`reject_rate`, " - " `score` = `t2`.`score`, " - " `earn` = `t2`.`earn`, " - " `updated_at` = `t2`.`updated_at` ", - tableName.c_str(), tmpTableName.c_str()); + mergeSQL = Strings::Format( + "INSERT INTO `%s` " + " SELECT * FROM `%s` AS `t2` " + " ON DUPLICATE KEY " + " UPDATE " + " `share_accept` = `t2`.`share_accept`, " + " `share_reject` = `t2`.`share_reject`, " + " `reject_rate` = `t2`.`reject_rate`, " + " `score` = `t2`.`score`, " + " `earn` = `t2`.`earn`, " + " `updated_at` = `t2`.`updated_at` ", + tableName.c_str(), + tmpTableName.c_str()); if (!poolDB_.update(mergeSQL)) { LOG(ERROR) << "merge mining_workers failure"; return; @@ -484,16 +530,17 @@ void ShareLogParserT::flushHourOrDailyData(const vector values, } template -void ShareLogParserT::generateDailyData(shared_ptr> stats, - const int32_t userId, - const int64_t workerId, - vector *valuesWorkersDay, - vector *valuesUsersDay, - vector *valuesPoolDay) { +void ShareLogParserT::generateDailyData( + shared_ptr> stats, + const int32_t userId, + const int64_t workerId, + vector *valuesWorkersDay, + vector *valuesUsersDay, + vector *valuesPoolDay) { string table, extraValues; // worker if (userId != 0 && workerId != 0) { - extraValues = Strings::Format("% " PRId64",%d,", workerId, userId); + extraValues = Strings::Format("% " PRId64 ",%d,", workerId, userId); table = "stats_workers_day"; } // user @@ -504,8 +551,7 @@ void ShareLogParserT::generateDailyData(shared_ptr> // pool else if (userId == 0 && workerId == 0) { table = "stats_pool_day"; - } - else { + } else { LOG(ERROR) << "unknown stats type"; return; } @@ -515,21 +561,29 @@ void ShareLogParserT::generateDailyData(shared_ptr> ScopeLock sl(stats->lock_); const int32_t day = atoi(date("%Y%m%d", date_).c_str()); - const uint64_t accept = stats->shareAccept1d_; // alias - const uint64_t reject = stats->shareReject1d_; + const uint64_t accept = stats->shareAccept1d_; // alias + const uint64_t reject = stats->shareReject1d_; double rejectRate = 0.0; if (reject) rejectRate = (double)reject / (accept + reject); - const string nowStr = date("%F %T"); + const string nowStr = date("%F %T"); const string scoreStr = score2Str(stats->score1d_); - const double earn = stats->earn1d_; - - valuesStr = Strings::Format("%s %d,%" PRIu64",%" PRIu64"," - " %lf,'%s',%0.0lf,'%s','%s'", - extraValues.c_str(), - day, accept, reject, rejectRate, scoreStr.c_str(), - earn, nowStr.c_str(), nowStr.c_str()); - } // for scope lock + const double earn = stats->earn1d_; + + valuesStr = Strings::Format( + "%s %d,%" PRIu64 ",%" PRIu64 + "," + " %lf,'%s',%0.0lf,'%s','%s'", + extraValues.c_str(), + day, + accept, + reject, + rejectRate, + scoreStr.c_str(), + earn, + nowStr.c_str(), + nowStr.c_str()); + } // for scope lock if (table == "stats_workers_day") { valuesWorkersDay->push_back(valuesStr); @@ -541,7 +595,8 @@ void ShareLogParserT::generateDailyData(shared_ptr> } template -shared_ptr> ShareLogParserT::getShareStatsDayHandler(const WorkerKey &key) { +shared_ptr> +ShareLogParserT::getShareStatsDayHandler(const WorkerKey &key) { pthread_rwlock_rdlock(&rwlock_); auto itr = workersStats_.find(key); pthread_rwlock_unlock(&rwlock_); @@ -570,13 +625,13 @@ void ShareLogParserT::removeExpiredDataFromDB() { // { const int32_t kDailyDataKeepDays_workers = 90; // 3 months - const string dayStr = date("%Y%m%d", - time(nullptr) - 86400 * kDailyDataKeepDays_workers); - sql = Strings::Format("DELETE FROM `stats_workers_day` WHERE `day` < '%s'", - dayStr.c_str()); + const string dayStr = + date("%Y%m%d", time(nullptr) - 86400 * kDailyDataKeepDays_workers); + sql = Strings::Format( + "DELETE FROM `stats_workers_day` WHERE `day` < '%s'", dayStr.c_str()); if (poolDB_.execute(sql)) { - LOG(INFO) << "delete expired workers daily data before '"<< dayStr - << "', count: " << poolDB_.affectedRows(); + LOG(INFO) << "delete expired workers daily data before '" << dayStr + << "', count: " << poolDB_.affectedRows(); } } @@ -584,14 +639,15 @@ void ShareLogParserT::removeExpiredDataFromDB() { // table.stats_workers_hour // { - const int32_t kHourDataKeepDays_workers = 24*3; // 3 days - const string hourStr = date("%Y%m%d%H", - time(nullptr) - 3600 * kHourDataKeepDays_workers); - sql = Strings::Format("DELETE FROM `stats_workers_hour` WHERE `hour` < '%s'", - hourStr.c_str()); + const int32_t kHourDataKeepDays_workers = 24 * 3; // 3 days + const string hourStr = + date("%Y%m%d%H", time(nullptr) - 3600 * kHourDataKeepDays_workers); + sql = Strings::Format( + "DELETE FROM `stats_workers_hour` WHERE `hour` < '%s'", + hourStr.c_str()); if (poolDB_.execute(sql)) { - LOG(INFO) << "delete expired workers hour data before '"<< hourStr - << "', count: " << poolDB_.affectedRows(); + LOG(INFO) << "delete expired workers hour data before '" << hourStr + << "', count: " << poolDB_.affectedRows(); } } @@ -599,14 +655,14 @@ void ShareLogParserT::removeExpiredDataFromDB() { // table.stats_users_hour // { - const int32_t kHourDataKeepDays_users = 24*30; // 30 days - const string hourStr = date("%Y%m%d%H", - time(nullptr) - 3600 * kHourDataKeepDays_users); - sql = Strings::Format("DELETE FROM `stats_users_hour` WHERE `hour` < '%s'", - hourStr.c_str()); + const int32_t kHourDataKeepDays_users = 24 * 30; // 30 days + const string hourStr = + date("%Y%m%d%H", time(nullptr) - 3600 * kHourDataKeepDays_users); + sql = Strings::Format( + "DELETE FROM `stats_users_hour` WHERE `hour` < '%s'", hourStr.c_str()); if (poolDB_.execute(sql)) { - LOG(INFO) << "delete expired users hour data before '"<< hourStr - << "', count: " << poolDB_.affectedRows(); + LOG(INFO) << "delete expired users hour data before '" << hourStr + << "', count: " << poolDB_.affectedRows(); } } } @@ -629,10 +685,10 @@ bool ShareLogParserT::flushToDB() { pthread_rwlock_rdlock(&rwlock_); for (const auto &itr : workersStats_) { if (itr.second->modifyHoursFlag_ == 0x0u) { - continue; // no new data, ignore + continue; // no new data, ignore } keys.push_back(itr.first); - stats.push_back(itr.second); // shared_ptr increase ref here + stats.push_back(itr.second); // shared_ptr increase ref here } pthread_rwlock_unlock(&rwlock_); @@ -650,30 +706,45 @@ bool ShareLogParserT::flushToDB() { // // the lock is in flushDailyData() & flushHoursData(), so maybe we lost // some data between func gaps, but it's not important. we will exec - // processUnchangedShareLog() after the day has been past, no data will lost by than. + // processUnchangedShareLog() after the day has been past, no data will lost + // by than. // - generateHoursData(stats[i], keys[i].userId_, keys[i].workerId_, - &valuesWorkersHour, &valuesUsersHour, &valuesPoolHour); - generateDailyData(stats[i], keys[i].userId_, keys[i].workerId_, - &valuesWorkersDay, &valuesUsersDay, &valuesPoolDay); - - stats[i]->modifyHoursFlag_ = 0x0u; // reset flag + generateHoursData( + stats[i], + keys[i].userId_, + keys[i].workerId_, + &valuesWorkersHour, + &valuesUsersHour, + &valuesPoolHour); + generateDailyData( + stats[i], + keys[i].userId_, + keys[i].workerId_, + &valuesWorkersDay, + &valuesUsersDay, + &valuesPoolDay); + + stats[i]->modifyHoursFlag_ = 0x0u; // reset flag } LOG(INFO) << "generated sql values"; size_t counter = 0; // flush hours data - flushHourOrDailyData(valuesWorkersHour, "stats_workers_hour", "`worker_id`,`puid`,`hour`,"); - flushHourOrDailyData(valuesUsersHour, "stats_users_hour" , "`puid`,`hour`,"); - flushHourOrDailyData(valuesPoolHour, "stats_pool_hour" , "`hour`,"); - counter += valuesWorkersHour.size() + valuesUsersHour.size() + valuesPoolHour.size(); + flushHourOrDailyData( + valuesWorkersHour, "stats_workers_hour", "`worker_id`,`puid`,`hour`,"); + flushHourOrDailyData(valuesUsersHour, "stats_users_hour", "`puid`,`hour`,"); + flushHourOrDailyData(valuesPoolHour, "stats_pool_hour", "`hour`,"); + counter += + valuesWorkersHour.size() + valuesUsersHour.size() + valuesPoolHour.size(); // flush daily data - flushHourOrDailyData(valuesWorkersDay, "stats_workers_day", "`worker_id`,`puid`,`day`,"); - flushHourOrDailyData(valuesUsersDay, "stats_users_day" , "`puid`,`day`,"); - flushHourOrDailyData(valuesPoolDay, "stats_pool_day" , "`day`,"); - counter += valuesWorkersDay.size() + valuesUsersDay.size() + valuesPoolDay.size(); + flushHourOrDailyData( + valuesWorkersDay, "stats_workers_day", "`worker_id`,`puid`,`day`,"); + flushHourOrDailyData(valuesUsersDay, "stats_users_day", "`puid`,`day`,"); + flushHourOrDailyData(valuesPoolDay, "stats_pool_day", "`day`,"); + counter += + valuesWorkersDay.size() + valuesUsersDay.size() + valuesPoolDay.size(); // done: daily data and hour data LOG(INFO) << "flush to DB... done, items: " << counter; @@ -684,26 +755,34 @@ bool ShareLogParserT::flushToDB() { return true; } - -//////////////////////////// ShareLogParserServerT //////////////////////////// +//////////////////////////// ShareLogParserServerT +/////////////////////////////// template -ShareLogParserServerT::ShareLogParserServerT(const char *chainType, - const string dataDir, - const string &httpdHost, - unsigned short httpdPort, - const MysqlConnectInfo &poolDBInfo, - const uint32_t kFlushDBInterval, - shared_ptr> dupShareChecker): -running_(true), chainType_(chainType), dataDir_(dataDir), -poolDBInfo_(poolDBInfo), kFlushDBInterval_(kFlushDBInterval), -dupShareChecker_(dupShareChecker), -base_(nullptr), httpdHost_(httpdHost), httpdPort_(httpdPort), -requestCount_(0), responseBytes_(0) -{ +ShareLogParserServerT::ShareLogParserServerT( + const char *chainType, + const string dataDir, + const string &httpdHost, + unsigned short httpdPort, + const MysqlConnectInfo &poolDBInfo, + const uint32_t kFlushDBInterval, + shared_ptr> dupShareChecker, + bool acceptStale) + : running_(true) + , chainType_(chainType) + , dataDir_(dataDir) + , poolDBInfo_(poolDBInfo) + , kFlushDBInterval_(kFlushDBInterval) + , dupShareChecker_(dupShareChecker) + , acceptStale_(acceptStale) + , base_(nullptr) + , httpdHost_(httpdHost) + , httpdPort_(httpdPort) + , requestCount_(0) + , responseBytes_(0) { const time_t now = time(nullptr); uptime_ = now; - date_ = now - (now % 86400); + date_ = now - (now % 86400); pthread_rwlock_init(&rwlock_, nullptr); } @@ -738,9 +817,15 @@ bool ShareLogParserServerT::initShareLogParser(time_t datets) { shareLogParser_ = nullptr; // set new obj - shared_ptr> parser = std::make_shared>( - chainType_.c_str(), dataDir_, date_, poolDBInfo_, dupShareChecker_); - + shared_ptr> parser = + std::make_shared>( + chainType_.c_str(), + dataDir_, + date_, + poolDBInfo_, + dupShareChecker_, + acceptStale_); + if (!parser->init()) { LOG(ERROR) << "parser check failure, date: " << date("%F", date_); pthread_rwlock_unlock(&rwlock_); @@ -753,12 +838,15 @@ bool ShareLogParserServerT::initShareLogParser(time_t datets) { } template -void ShareLogParserServerT::getShareStats(struct evbuffer *evb, const char *pUserId, - const char *pWorkerId, const char *pHour) { +void ShareLogParserServerT::getShareStats( + struct evbuffer *evb, + const char *pUserId, + const char *pWorkerId, + const char *pHour) { vector vHoursStr; vector vWorkerIdsStr; vector keys; - vector hours; // range: -23, -22, ..., 0, 24 + vector hours; // range: -23, -22, ..., 0, 24 const int32_t userId = atoi(pUserId); // split by ',' @@ -789,7 +877,8 @@ void ShareLogParserServerT::getShareStats(struct evbuffer *evb, const cha // output json string for (size_t i = 0; i < keys.size(); i++) { - evbuffer_add_printf(evb, "%s\"%" PRId64"\":[", (i == 0 ? "" : ","), keys[i].workerId_); + evbuffer_add_printf( + evb, "%s\"%" PRId64 "\":[", (i == 0 ? "" : ","), keys[i].workerId_); for (size_t j = 0; j < hours.size(); j++) { ShareStats *s = &shareStats[i * hours.size() + j]; @@ -797,22 +886,30 @@ void ShareLogParserServerT::getShareStats(struct evbuffer *evb, const cha double rejectRate = 0.0; if (s->shareReject_ != 0) - rejectRate = 1.0 * s->shareReject_ / (s->shareAccept_ + s->shareReject_); - - evbuffer_add_printf(evb, - "%s{\"hour\":%d,\"accept\":%" PRIu64",\"reject\":%" PRIu64"," - "\"reject_rate\":%lf,\"earn\":%0.0lf}", - (j == 0 ? "" : ","), hour, - s->shareAccept_, s->shareReject_, rejectRate, s->earn_); + rejectRate = + 1.0 * s->shareReject_ / (s->shareAccept_ + s->shareReject_); + + evbuffer_add_printf( + evb, + "%s{\"hour\":%d,\"accept\":%" PRIu64 ",\"reject\":%" PRIu64 + "," + "\"reject_rate\":%lf,\"earn\":%0.0lf}", + (j == 0 ? "" : ","), + hour, + s->shareAccept_, + s->shareReject_, + rejectRate, + s->earn_); } evbuffer_add_printf(evb, "]"); } } template -void ShareLogParserServerT::_getShareStats(const vector &keys, - const vector &hours, - vector &shareStats) { +void ShareLogParserServerT::_getShareStats( + const vector &keys, + const vector &hours, + vector &shareStats) { pthread_rwlock_rdlock(&rwlock_); shared_ptr> shareLogParser = shareLogParser_; pthread_rwlock_unlock(&rwlock_); @@ -821,7 +918,8 @@ void ShareLogParserServerT::_getShareStats(const vector &keys, return; for (size_t i = 0; i < keys.size(); i++) { - shared_ptr> statsDay = shareLogParser->getShareStatsDayHandler(keys[i]); + shared_ptr> statsDay = + shareLogParser->getShareStatsDayHandler(keys[i]); if (statsDay == nullptr) continue; @@ -840,15 +938,15 @@ void ShareLogParserServerT::_getShareStats(const vector &keys, } template -void ShareLogParserServerT::httpdShareStats(struct evhttp_request *req, - void *arg) { - evhttp_add_header(evhttp_request_get_output_headers(req), - "Content-Type", "text/json"); +void ShareLogParserServerT::httpdShareStats( + struct evhttp_request *req, void *arg) { + evhttp_add_header( + evhttp_request_get_output_headers(req), "Content-Type", "text/json"); ShareLogParserServerT *server = (ShareLogParserServerT *)arg; server->requestCount_++; evhttp_cmd_type rMethod = evhttp_request_get_command(req); - char *query = nullptr; // remember free it + char *query = nullptr; // remember free it if (rMethod == EVHTTP_REQ_GET) { // GET @@ -858,15 +956,14 @@ void ShareLogParserServerT::httpdShareStats(struct evhttp_request *req, query = strdup(uriQuery); evhttp_uri_free(uri); } - } - else if (rMethod == EVHTTP_REQ_POST) { + } else if (rMethod == EVHTTP_REQ_POST) { // POST struct evbuffer *evbIn = evhttp_request_get_input_buffer(req); size_t len = 0; if (evbIn != nullptr && (len = evbuffer_get_length(evbIn)) > 0) { query = (char *)malloc(len + 1); evbuffer_copyout(evbIn, query, len); - query[len] = '\0'; // evbuffer is not include '\0' + query[len] = '\0'; // evbuffer is not include '\0' } } @@ -885,9 +982,9 @@ void ShareLogParserServerT::httpdShareStats(struct evhttp_request *req, // parse query struct evkeyvalq params; evhttp_parse_query_str(query, ¶ms); - const char *pUserId = evhttp_find_header(¶ms, "user_id"); + const char *pUserId = evhttp_find_header(¶ms, "user_id"); const char *pWorkerId = evhttp_find_header(¶ms, "worker_id"); - const char *pHour = evhttp_find_header(¶ms, "hour"); + const char *pHour = evhttp_find_header(¶ms, "hour"); if (pUserId == nullptr || pWorkerId == nullptr || pHour == nullptr) { evbuffer_add_printf(evb, "{\"err_no\":1,\"err_msg\":\"invalid args\"}"); @@ -910,10 +1007,11 @@ finish: } template -void ShareLogParserServerT::getServerStatus(ShareLogParserServerT::ServerStatus &s) { - s.date_ = date_; - s.uptime_ = (uint32_t)(time(nullptr) - uptime_); - s.requestCount_ = requestCount_; +void ShareLogParserServerT::getServerStatus( + ShareLogParserServerT::ServerStatus &s) { + s.date_ = date_; + s.uptime_ = (uint32_t)(time(nullptr) - uptime_); + s.requestCount_ = requestCount_; s.responseBytes_ = responseBytes_; pthread_rwlock_rdlock(&rwlock_); @@ -921,7 +1019,8 @@ void ShareLogParserServerT::getServerStatus(ShareLogParserServerT: pthread_rwlock_unlock(&rwlock_); WorkerKey pkey(0, 0); - shared_ptr> statsDayPtr = shareLogParser->getShareStatsDayHandler(pkey); + shared_ptr> statsDayPtr = + shareLogParser->getShareStatsDayHandler(pkey); s.stats.resize(2); statsDayPtr->getShareStatsDay(&(s.stats[0])); @@ -929,9 +1028,10 @@ void ShareLogParserServerT::getServerStatus(ShareLogParserServerT: } template -void ShareLogParserServerT::httpdServerStatus(struct evhttp_request *req, void *arg) { - evhttp_add_header(evhttp_request_get_output_headers(req), - "Content-Type", "text/json"); +void ShareLogParserServerT::httpdServerStatus( + struct evhttp_request *req, void *arg) { + evhttp_add_header( + evhttp_request_get_output_headers(req), "Content-Type", "text/json"); ShareLogParserServerT *server = (ShareLogParserServerT *)arg; server->requestCount_++; @@ -942,34 +1042,50 @@ void ShareLogParserServerT::httpdServerStatus(struct evhttp_request *req, double rejectRate0 = 0.0, rejectRate1 = 0.0; if (s.stats[0].shareReject_) - rejectRate0 = s.stats[0].shareReject_ / (s.stats[0].shareAccept_ + s.stats[0].shareReject_); + rejectRate0 = s.stats[0].shareReject_ / + (s.stats[0].shareAccept_ + s.stats[0].shareReject_); if (s.stats[1].shareReject_) - rejectRate1 = s.stats[1].shareReject_ / (s.stats[1].shareAccept_ + s.stats[1].shareReject_); + rejectRate1 = s.stats[1].shareReject_ / + (s.stats[1].shareAccept_ + s.stats[1].shareReject_); time_t now = time(nullptr); if (now % 3600 == 0) - now += 2; // just in case the denominator is zero - - evbuffer_add_printf(evb, "{\"err_no\":0,\"err_msg\":\"\"," - "\"data\":{\"uptime\":\"%04u d %02u h %02u m %02u s\"," - "\"request\":%" PRIu64",\"repbytes\":%" PRIu64"," - "\"pool\":{\"today\":{" - "\"hashrate_t\":%lf,\"accept\":%" PRIu64"," - "\"reject\":%" PRIu64",\"reject_rate\":%lf,\"earn\":%0.0lf}," - "\"curr_hour\":{\"hashrate_t\":%lf,\"accept\":%" PRIu64"," - "\"reject\":%" PRIu64",\"reject_rate\":%lf,\"earn\":%0.0lf}}" - "}}", - s.uptime_/86400, (s.uptime_%86400)/3600, - (s.uptime_%3600)/60, s.uptime_%60, - s.requestCount_, s.responseBytes_, - // pool today - share2HashrateT(s.stats[0].shareAccept_, now % 86400), - s.stats[0].shareAccept_, - s.stats[0].shareReject_, rejectRate0, s.stats[0].earn_, - // pool current hour - share2HashrateT(s.stats[1].shareAccept_, now % 3600), - s.stats[1].shareAccept_, - s.stats[1].shareReject_, rejectRate1, s.stats[1].earn_); + now += 2; // just in case the denominator is zero + + evbuffer_add_printf( + evb, + "{\"err_no\":0,\"err_msg\":\"\"," + "\"data\":{\"uptime\":\"%04u d %02u h %02u m %02u s\"," + "\"request\":%" PRIu64 ",\"repbytes\":%" PRIu64 + "," + "\"pool\":{\"today\":{" + "\"hashrate_t\":%lf,\"accept\":%" PRIu64 + "," + "\"reject\":%" PRIu64 + ",\"reject_rate\":%lf,\"earn\":%0.0lf}," + "\"curr_hour\":{\"hashrate_t\":%lf,\"accept\":%" PRIu64 + "," + "\"reject\":%" PRIu64 + ",\"reject_rate\":%lf,\"earn\":%0.0lf}}" + "}}", + s.uptime_ / 86400, + (s.uptime_ % 86400) / 3600, + (s.uptime_ % 3600) / 60, + s.uptime_ % 60, + s.requestCount_, + s.responseBytes_, + // pool today + share2HashrateT(s.stats[0].shareAccept_, now % 86400), + s.stats[0].shareAccept_, + s.stats[0].shareReject_, + rejectRate0, + s.stats[0].earn_, + // pool current hour + share2HashrateT(s.stats[1].shareAccept_, now % 3600), + s.stats[1].shareAccept_, + s.stats[1].shareReject_, + rejectRate1, + s.stats[1].earn_); server->responseBytes_ += evbuffer_get_length(evb); evhttp_send_reply(req, HTTP_OK, "OK", evb); @@ -984,16 +1100,28 @@ void ShareLogParserServerT::runHttpd() { base_ = event_base_new(); httpd = evhttp_new(base_); - evhttp_set_allowed_methods(httpd, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD); + evhttp_set_allowed_methods( + httpd, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD); evhttp_set_timeout(httpd, 5 /* timeout in seconds */); - evhttp_set_cb(httpd, "/", ShareLogParserServerT::httpdServerStatus, this); - evhttp_set_cb(httpd, "/share_stats", ShareLogParserServerT::httpdShareStats, this); - evhttp_set_cb(httpd, "/share_stats/", ShareLogParserServerT::httpdShareStats, this); - - handle = evhttp_bind_socket_with_handle(httpd, httpdHost_.c_str(), httpdPort_); + evhttp_set_cb( + httpd, "/", ShareLogParserServerT::httpdServerStatus, this); + evhttp_set_cb( + httpd, + "/share_stats", + ShareLogParserServerT::httpdShareStats, + this); + evhttp_set_cb( + httpd, + "/share_stats/", + ShareLogParserServerT::httpdShareStats, + this); + + handle = + evhttp_bind_socket_with_handle(httpd, httpdHost_.c_str(), httpdPort_); if (!handle) { - LOG(ERROR) << "couldn't bind to port: " << httpdPort_ << ", host: " << httpdHost_ << ", exiting."; + LOG(ERROR) << "couldn't bind to port: " << httpdPort_ + << ", host: " << httpdHost_ << ", exiting."; return; } event_base_dispatch(base_); @@ -1001,7 +1129,8 @@ void ShareLogParserServerT::runHttpd() { template bool ShareLogParserServerT::setupThreadShareLogParser() { - threadShareLogParser_ = thread(&ShareLogParserServerT::runThreadShareLogParser, this); + threadShareLogParser_ = + thread(&ShareLogParserServerT::runThreadShareLogParser, this); return true; } @@ -1044,7 +1173,7 @@ void ShareLogParserServerT::runThreadShareLogParser() { // flush data to db if (time(nullptr) > lastFlushDBTime + kFlushDBInterval_) { - shareLogParser->flushToDB(); // will wait util all data flush to DB + shareLogParser->flushToDB(); // will wait util all data flush to DB lastFlushDBTime = time(nullptr); } @@ -1060,18 +1189,19 @@ void ShareLogParserServerT::runThreadShareLogParser() { LOG(INFO) << "thread sharelog parser stop"; - stop(); // if thread exit, we must call server to stop + stop(); // if thread exit, we must call server to stop } template -void ShareLogParserServerT::trySwitchBinFile(shared_ptr> shareLogParser) { +void ShareLogParserServerT::trySwitchBinFile( + shared_ptr> shareLogParser) { assert(shareLogParser != nullptr); const time_t now = time(nullptr); const time_t beginTs = now - (now % 86400); if (beginTs == date_) - return; // still today + return; // still today // // switch file when: @@ -1080,11 +1210,9 @@ void ShareLogParserServerT::trySwitchBinFile(shared_ptr beginTs + 5 && - shareLogParser->isReachEOF() && - fileNonEmpty(filePath.c_str())) - { - shareLogParser->flushToDB(); // flush data + if (now > beginTs + 5 && shareLogParser->isReachEOF() && + fileNonEmpty(filePath.c_str())) { + shareLogParser->flushToDB(); // flush data bool res = initShareLogParser(now); if (!res) { @@ -1106,4 +1234,3 @@ void ShareLogParserServerT::run() { runHttpd(); } - diff --git a/src/ShareLogger.h b/src/ShareLogger.h index c0c124567..aa91b1958 100644 --- a/src/ShareLogger.h +++ b/src/ShareLogger.h @@ -30,25 +30,25 @@ #include "zlibstream/zstr.hpp" - ////////////////////////////// ShareLogWriter /////////////////////////////// // Interface, used as a pointer type. class ShareLogWriter { public: - virtual ~ShareLogWriter() {}; + virtual ~ShareLogWriter(){}; virtual void stop() = 0; virtual void run() = 0; }; -////////////////////////////// ShareLogWriterT ///////////////////////////////// +////////////////////////////// ShareLogWriterT +//////////////////////////////////// // 1. consume topic 'ShareLog' // 2. write sharelog to Disk // -template +template class ShareLogWriterT : public ShareLogWriter { atomic running_; - string dataDir_; // where to put sharelog data files - + string dataDir_; // where to put sharelog data files + // zlib/gzip compression level: -1 to 9. // -1: defaule level, 0: non-compression, 1: best speed, 9: best size. int compressionLevel_; @@ -59,17 +59,21 @@ class ShareLogWriterT : public ShareLogWriter { std::vector shares_; const string chainType_; - KafkaHighLevelConsumer hlConsumer_; // consume topic: shareLogTopic + KafkaHighLevelConsumer hlConsumer_; // consume topic: shareLogTopic - zstr::ofstream* getFileHandler(uint32_t ts); + zstr::ofstream *getFileHandler(uint32_t ts); void consumeShareLog(rd_kafka_message_t *rkmessage); bool flushToDisk(); void tryCloseOldHanders(); public: - ShareLogWriterT(const char *chainType, const char *kafkaBrokers, const string &dataDir, - const string &kafkaGroupID, const char *shareLogTopic, - const int compressionLevel = Z_DEFAULT_COMPRESSION); + ShareLogWriterT( + const char *chainType, + const char *kafkaBrokers, + const string &dataDir, + const string &kafkaGroupID, + const char *shareLogTopic, + const int compressionLevel = Z_DEFAULT_COMPRESSION); ~ShareLogWriterT(); void stop(); diff --git a/src/ShareLogger.inl b/src/ShareLogger.inl index e41a28e58..773cad665 100644 --- a/src/ShareLogger.inl +++ b/src/ShareLogger.inl @@ -25,30 +25,32 @@ #include ////////////////////////////// ShareLogWriterT /////////////////////////////// -template -ShareLogWriterT::ShareLogWriterT(const char *chainType, - const char *kafkaBrokers, - const string &dataDir, - const string &kafkaGroupID, - const char *shareLogTopic, - const int compressionLevel) -:running_(true), dataDir_(dataDir), -compressionLevel_(compressionLevel), chainType_(chainType), -hlConsumer_(kafkaBrokers, shareLogTopic, 0/* patition */, kafkaGroupID) -{ +template +ShareLogWriterT::ShareLogWriterT( + const char *chainType, + const char *kafkaBrokers, + const string &dataDir, + const string &kafkaGroupID, + const char *shareLogTopic, + const int compressionLevel) + : running_(true) + , dataDir_(dataDir) + , compressionLevel_(compressionLevel) + , chainType_(chainType) + , hlConsumer_(kafkaBrokers, shareLogTopic, 0 /* patition */, kafkaGroupID) { } -template +template ShareLogWriterT::~ShareLogWriterT() { // close file handlers - for (auto & itr : fileHandlers_) { + for (auto &itr : fileHandlers_) { LOG(INFO) << "fclose file handler, date: " << date("%F", itr.first); delete itr.second; } fileHandlers_.clear(); } -template +template void ShareLogWriterT::stop() { if (!running_) return; @@ -56,8 +58,8 @@ void ShareLogWriterT::stop() { running_ = false; } -template -zstr::ofstream * ShareLogWriterT::getFileHandler(uint32_t ts) { +template +zstr::ofstream *ShareLogWriterT::getFileHandler(uint32_t ts) { string filePath; try { @@ -68,7 +70,10 @@ zstr::ofstream * ShareLogWriterT::getFileHandler(uint32_t ts) { filePath = getStatsFilePath(chainType_.c_str(), dataDir_, ts); LOG(INFO) << "fopen: " << filePath; - zstr::ofstream *f = new zstr::ofstream(filePath, std::ios::app | std::ios::binary, compressionLevel_); // append mode, bin file + zstr::ofstream *f = new zstr::ofstream( + filePath, + std::ios::app | std::ios::binary, + compressionLevel_); // append mode, bin file if (!*f) { LOG(FATAL) << "fopen file fail: " << filePath; return nullptr; @@ -83,27 +88,29 @@ zstr::ofstream * ShareLogWriterT::getFileHandler(uint32_t ts) { } } -template +template void ShareLogWriterT::consumeShareLog(rd_kafka_message_t *rkmessage) { // check error if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { - LOG(FATAL) << "consume fatal"; + LOG(FATAL) << "consume fatal"; } return; } @@ -119,18 +126,22 @@ void ShareLogWriterT::consumeShareLog(rd_kafka_message_t *rkmessage) { // uint32_t headlength = *((uint32_t*)payload); // if (rkmessage->len < sizeof(uint32_t) + headlength) { - // LOG(ERROR) << "invalid share , kafka message size : "<< rkmessage->len << " < complete share size " << + // LOG(ERROR) << "invalid share , kafka message size : "<< rkmessage->len << + // " < complete share size " << // headlength + sizeof(uint32_t); // return; // } - // if (!share.ParseFromArray((const uint8_t *)(payload + sizeof(uint32_t)), headlength)) { - // LOG(ERROR) << "parse share from kafka message failed rkmessage->len = "<< rkmessage->len ; - // return; + // if (!share.ParseFromArray((const uint8_t *)(payload + sizeof(uint32_t)), + // headlength)) { + // LOG(ERROR) << "parse share from kafka message failed rkmessage->len = "<< + // rkmessage->len ; return; // } - if (!share.UnserializeWithVersion((const uint8_t *)(rkmessage->payload), rkmessage->len)) { - LOG(ERROR) << "parse share from kafka message failed rkmessage->len = "<< rkmessage->len ; + if (!share.UnserializeWithVersion( + (const uint8_t *)(rkmessage->payload), rkmessage->len)) { + LOG(ERROR) << "parse share from kafka message failed rkmessage->len = " + << rkmessage->len; return; } @@ -145,7 +156,7 @@ void ShareLogWriterT::consumeShareLog(rd_kafka_message_t *rkmessage) { } } -template +template void ShareLogWriterT::tryCloseOldHanders() { while (fileHandlers_.size() > 3) { // Maps (and sets) are sorted, so the first element is the smallest, @@ -159,17 +170,17 @@ void ShareLogWriterT::tryCloseOldHanders() { } } -template +template bool ShareLogWriterT::flushToDisk() { - if(shares_.empty()) + if (shares_.empty()) return true; try { - std::set usedHandlers; + std::set usedHandlers; DLOG(INFO) << "flushToDisk shares count: " << shares_.size(); - for (const auto& share : shares_) { - const uint32_t ts = share.timestamp() - (share.timestamp()% 86400); + for (const auto &share : shares_) { + const uint32_t ts = share.timestamp() - (share.timestamp() % 86400); zstr::ofstream *f = getFileHandler(ts); if (f == nullptr) return false; @@ -178,17 +189,17 @@ bool ShareLogWriterT::flushToDisk() { string message; uint32_t size = 0; - if(!share.SerializeToBuffer(message, size)) { + if (!share.SerializeToBuffer(message, size)) { DLOG(INFO) << "base.SerializeToArray failed!" << std::endl; continue; } - f->write((char*)&size, sizeof(uint32_t)); - f->write((char*)message.data(), size); + f->write((char *)&size, sizeof(uint32_t)); + f->write((char *)message.data(), size); } shares_.clear(); - for (auto & f : usedHandlers) { + for (auto &f : usedHandlers) { DLOG(INFO) << "fflush() file to disk"; f->flush(); } @@ -197,14 +208,14 @@ bool ShareLogWriterT::flushToDisk() { tryCloseOldHanders(); return true; - + } catch (...) { LOG(ERROR) << "write file fail"; return false; } } -template +template void ShareLogWriterT::run() { time_t lastFlushTime = time(nullptr); const int32_t kFlushDiskInterval = 2; @@ -240,12 +251,12 @@ void ShareLogWriterT::run() { if (rkmessage == nullptr) { continue; } - + DLOG(INFO) << "a new message, size: " << rkmessage->len; // consume share log consumeShareLog(rkmessage); - rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ + rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ } // flush left shares diff --git a/src/Statistics.h b/src/Statistics.h index 7a1bec0fb..eb4928f05 100644 --- a/src/Statistics.h +++ b/src/Statistics.h @@ -32,15 +32,15 @@ // none thread safe template class StatsWindow { - int64_t maxRingIdx_; // max ring idx + int64_t maxRingIdx_; // max ring idx int32_t windowSize_; std::vector elements_; public: StatsWindow(const int windowSize); // TODO -// bool unserialize(const ...); -// void serialize(...); + // bool unserialize(const ...); + // void serialize(...); void clear(); @@ -50,7 +50,7 @@ class StatsWindow { T sum(int64_t beginRingIdx); void mapMultiply(const T val); - void mapDivide (const T val); + void mapDivide(const T val); int32_t getWindowSize() const { return windowSize_; } }; @@ -61,11 +61,12 @@ class WorkerKey { int32_t userId_; int64_t workerId_; - WorkerKey(const int32_t userId, const int64_t workerId): - userId_(userId), workerId_(workerId) {} + WorkerKey(const int32_t userId, const int64_t workerId) + : userId_(userId) + , workerId_(workerId) {} - WorkerKey& operator=(const WorkerKey &r) { - userId_ = r.userId_; + WorkerKey &operator=(const WorkerKey &r) { + userId_ = r.userId_; workerId_ = r.workerId_; return *this; } @@ -80,31 +81,32 @@ class WorkerKey { // we use WorkerKey in std::unordered_map, so need to write it's hash func namespace std { -template<> +template <> struct hash { public: - size_t operator()(const WorkerKey &k) const - { + size_t operator()(const WorkerKey &k) const { size_t h1 = std::hash()(k.userId_); size_t h2 = std::hash()(k.workerId_); - return h1 ^ ( h2 << 1 ); + return h1 ^ (h2 << 1); } }; -} - +} // namespace std ///////////////////////////////// ShareStats ///////////////////////////////// class ShareStats { public: uint64_t shareAccept_; uint64_t shareReject_; - double rejectRate_; - double earn_; - - ShareStats(): shareAccept_(0U), shareReject_(0U), rejectRate_(0.0), earn_(0.0) {} + double rejectRate_; + double earn_; + + ShareStats() + : shareAccept_(0U) + , shareReject_(0U) + , rejectRate_(0.0) + , earn_(0.0) {} }; - /////////////////////////////// ShareStatsDay //////////////////////////////// // thread-safe template @@ -113,14 +115,16 @@ class ShareStatsDay { // hours uint64_t shareAccept1h_[24] = {0}; uint64_t shareReject1h_[24] = {0}; - double score1h_[24] = {0.0}; // For reference only, it is no longer the basis for earnings calculation - double earn1h_[24] = {0.0}; + double score1h_[24] = {0.0}; // For reference only, it is no longer the basis + // for earnings calculation + double earn1h_[24] = {0.0}; // daily uint64_t shareAccept1d_ = 0; uint64_t shareReject1d_ = 0; - double score1d_ = 0; // For reference only, it is no longer the basis for earnings calculation - double earn1d_ = 0; + double score1d_ = 0; // For reference only, it is no longer the basis for + // earnings calculation + double earn1d_ = 0; // mark which hour data has been modified: 23, 22, ...., 0 uint32_t modifyHoursFlag_; @@ -130,12 +134,14 @@ class ShareStatsDay { ShareStatsDay(const ShareStatsDay &r) = default; ShareStatsDay &operator=(const ShareStatsDay &r) = default; - void processShare(uint32_t hourIdx, const SHARE &share); + void processShare(uint32_t hourIdx, const SHARE &share, bool acceptStale); + double getShareReward(const SHARE &share); void getShareStatsHour(uint32_t hourIdx, ShareStats *stats); void getShareStatsDay(ShareStats *stats); }; -/////////////////////////////// DuplicateShareCheckerT //////////////////////////////// +/////////////////////////////// DuplicateShareCheckerT +/////////////////////////////////// // Used to detect duplicate share attacks. // Interface template @@ -145,7 +151,8 @@ class DuplicateShareChecker { virtual bool addShare(const SHARE &share) = 0; }; -/////////////////////////////// DuplicateShareCheckerT //////////////////////////////// +/////////////////////////////// DuplicateShareCheckerT +/////////////////////////////////// // Used to detect duplicate share attacks on ETH mining. template class DuplicateShareCheckerT : public DuplicateShareChecker { @@ -153,10 +160,10 @@ class DuplicateShareCheckerT : public DuplicateShareChecker { using GShareSet = std::set; DuplicateShareCheckerT(uint32_t trackingHeightNumber) - : trackingHeightNumber_(trackingHeightNumber) - { + : trackingHeightNumber_(trackingHeightNumber) { if (trackingHeightNumber == 0) { - LOG(FATAL) << "DuplicateShareChecker: trackingHeightNumber should not be 0."; + LOG(FATAL) + << "DuplicateShareChecker: trackingHeightNumber should not be 0."; } } @@ -165,7 +172,7 @@ class DuplicateShareCheckerT : public DuplicateShareChecker { auto itr = gset.find(gshare); if (itr != gset.end()) { - return false; // already exist + return false; // already exist } gset.insert(gshare); @@ -181,21 +188,19 @@ class DuplicateShareCheckerT : public DuplicateShareChecker { return addGShare(share.height(), GSHARE(share)); } - size_t gshareSetMapSize() { - return gshareSetMap_.size(); - } + size_t gshareSetMapSize() { return gshareSetMap_.size(); } private: inline void clearExcessGShareSet() { - for ( - auto itr = gshareSetMap_.begin(); - gshareSetMap_.size() > trackingHeightNumber_; - itr = gshareSetMap_.erase(itr) - ); + for (auto itr = gshareSetMap_.begin(); + gshareSetMap_.size() > trackingHeightNumber_; + itr = gshareSetMap_.erase(itr)) + ; } std::map gshareSetMap_; - const uint32_t trackingHeightNumber_; // if set to 3, max(gshareSetMap_.size()) == 3 + const uint32_t + trackingHeightNumber_; // if set to 3, max(gshareSetMap_.size()) == 3 }; #include "Statistics.inl" diff --git a/src/Statistics.inl b/src/Statistics.inl index b8032f329..833e37835 100644 --- a/src/Statistics.inl +++ b/src/Statistics.inl @@ -27,7 +27,9 @@ ////////////////////////////////// StatsWindow ///////////////////////////////// template StatsWindow::StatsWindow(const int windowSize) -:maxRingIdx_(-1), windowSize_(windowSize), elements_(windowSize) { + : maxRingIdx_(-1) + , windowSize_(windowSize) + , elements_(windowSize) { } template @@ -53,19 +55,19 @@ void StatsWindow::clear() { template bool StatsWindow::insert(const int64_t curRingIdx, const T val) { - if (maxRingIdx_ > curRingIdx + windowSize_) { // too small index, drop it + if (maxRingIdx_ > curRingIdx + windowSize_) { // too small index, drop it return false; } - if (maxRingIdx_ == -1/* first insert */ || - curRingIdx - maxRingIdx_ > windowSize_/* all data expired */) { + if (maxRingIdx_ == -1 /* first insert */ || + curRingIdx - maxRingIdx_ > windowSize_ /* all data expired */) { clear(); maxRingIdx_ = curRingIdx; } while (maxRingIdx_ < curRingIdx) { maxRingIdx_++; - elements_[maxRingIdx_ % windowSize_] = 0; // reset + elements_[maxRingIdx_ % windowSize_] = 0; // reset } elements_[curRingIdx % windowSize_] += val; @@ -97,17 +99,50 @@ T StatsWindow::sum(int64_t beginRingIdx) { /////////////////////////////// ShareStatsDay //////////////////////////////// template -void ShareStatsDay::getShareStatsHour(uint32_t hourIdx, ShareStats *stats) { +void ShareStatsDay::processShare( + uint32_t hourIdx, const SHARE &share, bool acceptStale) { + ScopeLock sl(lock_); + + if (StratumStatus::isAccepted(share.status()) && + (acceptStale || !StratumStatus::isStale(share.status()))) { + shareAccept1h_[hourIdx] += share.sharediff(); + shareAccept1d_ += share.sharediff(); + + double score = share.score(); + double reward = getShareReward(share); + /* + double minernumber = share.networkdiff()/(12.5*28*1000000); + double reward = 59*minernumber/(4.5*60*24); + reward *= pow(10.0,18.0); + */ + double earn = score * reward; + score1h_[hourIdx] += score; + score1d_ += score; + earn1h_[hourIdx] += earn; + earn1d_ += earn; + + } else { + shareReject1h_[hourIdx] += share.sharediff(); + shareReject1d_ += share.sharediff(); + } + modifyHoursFlag_ |= (0x01u << hourIdx); +} + +template +void ShareStatsDay::getShareStatsHour( + uint32_t hourIdx, ShareStats *stats) { ScopeLock sl(lock_); if (hourIdx > 23) return; stats->shareAccept_ = shareAccept1h_[hourIdx]; stats->shareReject_ = shareReject1h_[hourIdx]; - stats->earn_ = earn1h_[hourIdx]; + stats->earn_ = earn1h_[hourIdx]; if (stats->shareReject_) - stats->rejectRate_ = (stats->shareReject_ * 1.0 / (stats->shareAccept_ + stats->shareReject_)); + stats->rejectRate_ = + (stats->shareReject_ * 1.0 / + (stats->shareAccept_ + stats->shareReject_)); else stats->rejectRate_ = 0.0; } @@ -117,10 +152,12 @@ void ShareStatsDay::getShareStatsDay(ShareStats *stats) { ScopeLock sl(lock_); stats->shareAccept_ = shareAccept1d_; stats->shareReject_ = shareReject1d_; - stats->earn_ = earn1d_; + stats->earn_ = earn1d_; if (stats->shareReject_) - stats->rejectRate_ = (stats->shareReject_ * 1.0 / (stats->shareAccept_ + stats->shareReject_)); + stats->rejectRate_ = + (stats->shareReject_ * 1.0 / + (stats->shareAccept_ + stats->shareReject_)); else stats->rejectRate_ = 0.0; } diff --git a/src/StatsHttpd.h b/src/StatsHttpd.h index f3c3df06b..72d70236a 100644 --- a/src/StatsHttpd.h +++ b/src/StatsHttpd.h @@ -35,8 +35,6 @@ #define STATS_SLIDING_WINDOW_SECONDS 3600 - - /////////////////////////////// WorkerStatus ///////////////////////////////// // some miners use the same userName & workerName in different meachines, they // will be the same StatsWorkerItem, the unique key is (userId_ + workId_) @@ -62,7 +60,6 @@ class WorkerStatus { WorkerStatus &operator=(const WorkerStatus &r) = default; }; - //////////////////////////////// WorkerShares //////////////////////////////// // thread safe template @@ -82,21 +79,20 @@ class WorkerShares { public: WorkerShares(const int64_t workerId, const int32_t userId); -// void serialize(...); -// bool unserialize(const ...); + // void serialize(...); + // bool unserialize(const ...); - void processShare(const SHARE &share); + void processShare(const SHARE &share, bool acceptStale); WorkerStatus getWorkerStatus(); void getWorkerStatus(WorkerStatus &status); bool isExpired(); }; - //////////////////////////////// StatsServer //////////////////////////////// // Interface, used as a pointer type. class StatsServer { public: - virtual ~StatsServer() {}; + virtual ~StatsServer(){}; virtual bool init() = 0; virtual void stop() = 0; virtual void run() = 0; @@ -120,23 +116,23 @@ class StatsServerT : public StatsServer { }; enum RedisPublishPolicy { - REDIS_PUBLISH_USER_UPDATE = 1, + REDIS_PUBLISH_USER_UPDATE = 1, REDIS_PUBLISH_WORKER_UPDATE = 2 }; enum RedisIndexPolicy { - REDIS_INDEX_NONE = 0, - REDIS_INDEX_ACCEPT_1M = 1, - REDIS_INDEX_ACCEPT_5M = 2, - REDIS_INDEX_ACCEPT_15M = 4, - REDIS_INDEX_REJECT_15M = 8, - REDIS_INDEX_ACCEPT_1H = 16, - REDIS_INDEX_REJECT_1H = 32, - REDIS_INDEX_ACCEPT_COUNT = 64, - REDIS_INDEX_LAST_SHARE_IP = 128, + REDIS_INDEX_NONE = 0, + REDIS_INDEX_ACCEPT_1M = 1, + REDIS_INDEX_ACCEPT_5M = 2, + REDIS_INDEX_ACCEPT_15M = 4, + REDIS_INDEX_REJECT_15M = 8, + REDIS_INDEX_ACCEPT_1H = 16, + REDIS_INDEX_REJECT_1H = 32, + REDIS_INDEX_ACCEPT_COUNT = 64, + REDIS_INDEX_LAST_SHARE_IP = 128, REDIS_INDEX_LAST_SHARE_TIME = 256, - REDIS_INDEX_WORKER_NAME = 512, - REDIS_INDEX_MINER_AGENT = 1024 + REDIS_INDEX_WORKER_NAME = 512, + REDIS_INDEX_MINER_AGENT = 1024 }; struct WorkerIndexBuffer { @@ -158,40 +154,52 @@ class StatsServerT : public StatsServer { atomic totalUserCount_; time_t uptime_; - pthread_rwlock_t rwlock_; // for workerSet_ - std::unordered_map> > workerSet_; - std::unordered_map> > userSet_; - std::unordered_map userWorkerCount_; - WorkerShares poolWorker_; // worker status for the pool - - KafkaConsumer kafkaConsumer_; // consume topic: 'ShareLog' + pthread_rwlock_t rwlock_; // for workerSet_ + std::unordered_map< + WorkerKey /* userId + workerId */, + shared_ptr>> + workerSet_; + std::unordered_map>> + userSet_; + std::unordered_map + userWorkerCount_; + WorkerShares poolWorker_; // worker status for the pool + + KafkaConsumer kafkaConsumer_; // consume topic: 'ShareLog' thread threadConsume_; - KafkaConsumer kafkaConsumerCommonEvents_; // consume topic: 'CommonEvents' + KafkaConsumer kafkaConsumerCommonEvents_; // consume topic: 'CommonEvents' thread threadConsumeCommonEvents_; - MySQLConnection *poolDB_; // flush workers to table.mining_workers - MySQLConnection *poolDBCommonEvents_; // insert or update workers from table.mining_workers - + MySQLConnection *poolDB_; // flush workers to table.mining_workers + MySQLConnection * + poolDBCommonEvents_; // insert or update workers from table.mining_workers + RedisConnection *redisCommonEvents_; // writing workers' meta infomations std::vector redisGroup_; // flush hashrate to this group - uint32_t redisConcurrency_; // how many threads are writing to Redis at the same time + uint32_t redisConcurrency_; // how many threads are writing to Redis at the + // same time string redisKeyPrefix_; int redisKeyExpire_; uint32_t redisPublishPolicy_; // @see statshttpd.cfg - uint32_t redisIndexPolicy_; // @see statshttpd.cfg + uint32_t redisIndexPolicy_; // @see statshttpd.cfg time_t kFlushDBInterval_; - atomic isInserting_; // flag mark if we are flushing db - atomic isUpdateRedis_; // flag mark if we are flushing redis + atomic isInserting_; // flag mark if we are flushing db + atomic isUpdateRedis_; // flag mark if we are flushing redis + + atomic + lastShareTime_; // the generating time of the last consumed share + atomic isInitializing_; // if true, the database will not be flushed and + // the HTTP API will return an error - atomic lastShareTime_; // the generating time of the last consumed share - atomic isInitializing_; // if true, the database will not be flushed and the HTTP API will return an error - atomic lastFlushTime_; // the last db flush time - string fileLastFlushTime_; // write last db flush time to the file + string fileLastFlushTime_; // write last db flush time to the file + + shared_ptr> + dupShareChecker_; // Used to detect duplicate share attacks. - shared_ptr> dupShareChecker_; // Used to detect duplicate share attacks. + bool acceptStale_; // Whether stale shares are accepted // httpd struct event_base *base_; @@ -203,22 +211,31 @@ class StatsServerT : public StatsServer { void runThreadConsumeCommonEvents(); void consumeCommonEvents(rd_kafka_message_t *rkmessage); - bool updateWorkerStatusToDB(const int32_t userId, const int64_t workerId, - const char *workerName, const char *minerAgent); - bool updateWorkerStatusToRedis(const int32_t userId, const int64_t workerId, - const char *workerName, const char *minerAgent); - void updateWorkerStatusIndexToRedis(const int32_t userId, const string &key, - const string &score, const string &value); + bool updateWorkerStatusToDB( + const int32_t userId, + const int64_t workerId, + const char *workerName, + const char *minerAgent); + bool updateWorkerStatusToRedis( + const int32_t userId, + const int64_t workerId, + const char *workerName, + const char *minerAgent); + void updateWorkerStatusIndexToRedis( + const int32_t userId, + const string &key, + const string &score, + const string &value); void _processShare(WorkerKey &key, const SHARE &share); void processShare(const SHARE &share); - void getWorkerStatusBatch(const vector &keys, - vector &workerStatus); + void getWorkerStatusBatch( + const vector &keys, vector &workerStatus); WorkerStatus mergeWorkerStatus(const vector &workerStatus); void flushWorkersAndUsersToDB(); void _flushWorkersAndUsersToDBThread(); - + void flushWorkersAndUsersToRedis(); void _flushWorkersAndUsersToRedisThread(); void _flushWorkersAndUsersToRedisThread(uint32_t threadStep); @@ -229,10 +246,18 @@ class StatsServerT : public StatsServer { // and the second thread is responsible for the next 3. void flushWorkersToRedis(uint32_t threadStep); void flushUsersToRedis(uint32_t threadStep); - void addIndexToBuffer(WorkerIndexBuffer &buffer, const int64_t workerId, const WorkerStatus &status); - void flushIndexToRedis(RedisConnection *redis, std::unordered_map &indexBufferMap); - void flushIndexToRedis(RedisConnection *redis, WorkerIndexBuffer &buffer, const int32_t userId); - void flushIndexToRedis(RedisConnection *redis, const std::vector &commandVector); + void addIndexToBuffer( + WorkerIndexBuffer &buffer, + const int64_t workerId, + const WorkerStatus &status); + void flushIndexToRedis( + RedisConnection *redis, + std::unordered_map + &indexBufferMap); + void flushIndexToRedis( + RedisConnection *redis, WorkerIndexBuffer &buffer, const int32_t userId); + void flushIndexToRedis( + RedisConnection *redis, const std::vector &commandVector); void removeExpiredWorkers(); bool setupThreadConsume(); @@ -247,28 +272,40 @@ class StatsServerT : public StatsServer { atomic responseBytes_; public: - StatsServerT(const char *kafkaBrokers, const char *kafkaShareTopic, const char *kafkaCommonEventsTopic, - const string &httpdHost, unsigned short httpdPort, - const MysqlConnectInfo *poolDBInfo, const RedisConnectInfo *redisInfo, - const uint32_t redisConcurrency, const string &redisKeyPrefix, const int redisKeyExpire, - const int redisPublishPolicy, const int redisIndexPolicy, - const time_t kFlushDBInterval, const string &fileLastFlushTime, - shared_ptr> dupShareChecker); + StatsServerT( + const char *kafkaBrokers, + const char *kafkaShareTopic, + const char *kafkaCommonEventsTopic, + const string &httpdHost, + unsigned short httpdPort, + const MysqlConnectInfo *poolDBInfo, + const RedisConnectInfo *redisInfo, + const uint32_t redisConcurrency, + const string &redisKeyPrefix, + const int redisKeyExpire, + const int redisPublishPolicy, + const int redisIndexPolicy, + const time_t kFlushDBInterval, + const string &fileLastFlushTime, + shared_ptr> dupShareChecker, + bool acceptStale); ~StatsServerT(); bool init(); void stop(); void run(); - ServerStatus getServerStatus(); - static void httpdServerStatus (struct evhttp_request *req, void *arg); + static void httpdServerStatus(struct evhttp_request *req, void *arg); static void httpdGetWorkerStatus(struct evhttp_request *req, void *arg); - static void httpdGetFlushDBTime (struct evhttp_request *req, void *arg); + static void httpdGetFlushDBTime(struct evhttp_request *req, void *arg); - void getWorkerStatus(struct evbuffer *evb, const char *pUserId, - const char *pWorkerId, const char *pIsMerge); + void getWorkerStatus( + struct evbuffer *evb, + const char *pUserId, + const char *pWorkerId, + const char *pIsMerge); }; #include "StatsHttpd.inl" diff --git a/src/StatsHttpd.inl b/src/StatsHttpd.inl index 311cbc5df..5675bd8f8 100644 --- a/src/StatsHttpd.inl +++ b/src/StatsHttpd.inl @@ -33,28 +33,33 @@ //////////////////////////////// WorkerShares //////////////////////////////// template -WorkerShares::WorkerShares(const int64_t workerId, const int32_t userId): -workerId_(workerId), userId_(userId), acceptCount_(0), -lastShareIP_(0), lastShareTime_(0), -acceptShareSec_(STATS_SLIDING_WINDOW_SECONDS), -rejectShareMin_(STATS_SLIDING_WINDOW_SECONDS/60) -{ +WorkerShares::WorkerShares(const int64_t workerId, const int32_t userId) + : workerId_(workerId) + , userId_(userId) + , acceptCount_(0) + , lastShareIP_(0) + , lastShareTime_(0) + , acceptShareSec_(STATS_SLIDING_WINDOW_SECONDS) + , rejectShareMin_(STATS_SLIDING_WINDOW_SECONDS / 60) { assert(STATS_SLIDING_WINDOW_SECONDS >= 3600); } template -void WorkerShares::processShare(const SHARE &share) { +void WorkerShares::processShare(const SHARE &share, bool acceptStale) { ScopeLock sl(lock_); const time_t now = time(nullptr); if (now > share.timestamp() + STATS_SLIDING_WINDOW_SECONDS) { return; } - if (StratumStatus::isAccepted(share.status())) { + if (StratumStatus::isAccepted(share.status()) && + (acceptStale || !StratumStatus::isStale(share.status()))) { acceptCount_++; - acceptShareSec_.insert(share.timestamp(), share.sharediff()); + DLOG(INFO) << "收到id"<::getWorkerStatus() { const time_t now = time(nullptr); WorkerStatus s; - s.accept1m_ = acceptShareSec_.sum(now, 60); - s.accept5m_ = acceptShareSec_.sum(now, 300); + s.accept1m_ = acceptShareSec_.sum(now, 60); + s.accept5m_ = acceptShareSec_.sum(now, 300); s.accept15m_ = acceptShareSec_.sum(now, 900); - s.reject15m_ = rejectShareMin_.sum(now/60, 15); + s.reject15m_ = rejectShareMin_.sum(now / 60, 15); s.accept1h_ = acceptShareSec_.sum(now, 3600); - s.reject1h_ = rejectShareMin_.sum(now/60, 60); + s.reject1h_ = rejectShareMin_.sum(now / 60, 60); - s.acceptCount_ = acceptCount_; - s.lastShareIP_ = lastShareIP_; + s.acceptCount_ = acceptCount_; + s.lastShareIP_ = lastShareIP_; s.lastShareTime_ = lastShareTime_; return s; @@ -87,50 +92,75 @@ void WorkerShares::getWorkerStatus(WorkerStatus &s) { ScopeLock sl(lock_); const time_t now = time(nullptr); - s.accept1m_ = acceptShareSec_.sum(now, 60); - s.accept5m_ = acceptShareSec_.sum(now, 300); + s.accept1m_ = acceptShareSec_.sum(now, 60); + s.accept5m_ = acceptShareSec_.sum(now, 300); s.accept15m_ = acceptShareSec_.sum(now, 900); - s.reject15m_ = rejectShareMin_.sum(now/60, 15); + s.reject15m_ = rejectShareMin_.sum(now / 60, 15); s.accept1h_ = acceptShareSec_.sum(now, 3600); - s.reject1h_ = rejectShareMin_.sum(now/60, 60); + s.reject1h_ = rejectShareMin_.sum(now / 60, 60); - s.acceptCount_ = acceptCount_; - s.lastShareIP_ = lastShareIP_; + s.acceptCount_ = acceptCount_; + s.lastShareIP_ = lastShareIP_; s.lastShareTime_ = lastShareTime_; } template bool WorkerShares::isExpired() { ScopeLock sl(lock_); - return (lastShareTime_ + STATS_SLIDING_WINDOW_SECONDS) < (uint32_t)time(nullptr); + return (lastShareTime_ + STATS_SLIDING_WINDOW_SECONDS) < + (uint32_t)time(nullptr); } - //////////////////////////////// StatsServerT //////////////////////////////// template -StatsServerT::StatsServerT(const char *kafkaBrokers, const char *kafkaShareTopic, const char *kafkaCommonEventsTopic, - const string &httpdHost, unsigned short httpdPort, - const MysqlConnectInfo *poolDBInfo, const RedisConnectInfo *redisInfo, - const uint32_t redisConcurrency, const string &redisKeyPrefix, - const int redisKeyExpire, const int redisPublishPolicy, const int redisIndexPolicy, - const time_t kFlushDBInterval, const string &fileLastFlushTime, - shared_ptr> dupShareChecker): -running_(true), totalWorkerCount_(0), totalUserCount_(0), uptime_(time(nullptr)), -poolWorker_(0u/* worker id */, 0/* user id */), -kafkaConsumer_(kafkaBrokers, kafkaShareTopic, 0/* patition */), -kafkaConsumerCommonEvents_(kafkaBrokers, kafkaCommonEventsTopic, 0/* patition */), -poolDB_(nullptr), poolDBCommonEvents_(nullptr), -redisCommonEvents_(nullptr), redisConcurrency_(redisConcurrency), -redisKeyPrefix_(redisKeyPrefix), redisKeyExpire_(redisKeyExpire), -redisPublishPolicy_(redisPublishPolicy), redisIndexPolicy_(redisIndexPolicy), -kFlushDBInterval_(kFlushDBInterval), -isInserting_(false), isUpdateRedis_(false), -lastShareTime_(0), isInitializing_(true), lastFlushTime_(0), -fileLastFlushTime_(fileLastFlushTime), dupShareChecker_(dupShareChecker), -base_(nullptr), httpdHost_(httpdHost), httpdPort_(httpdPort), -requestCount_(0), responseBytes_(0) -{ +StatsServerT::StatsServerT( + const char *kafkaBrokers, + const char *kafkaShareTopic, + const char *kafkaCommonEventsTopic, + const string &httpdHost, + unsigned short httpdPort, + const MysqlConnectInfo *poolDBInfo, + const RedisConnectInfo *redisInfo, + const uint32_t redisConcurrency, + const string &redisKeyPrefix, + const int redisKeyExpire, + const int redisPublishPolicy, + const int redisIndexPolicy, + const time_t kFlushDBInterval, + const string &fileLastFlushTime, + shared_ptr> dupShareChecker, + bool acceptStale) + : running_(true) + , totalWorkerCount_(0) + , totalUserCount_(0) + , uptime_(time(nullptr)) + , poolWorker_(0u /* worker id */, 0 /* user id */) + , kafkaConsumer_(kafkaBrokers, kafkaShareTopic, 0 /* patition */) + , kafkaConsumerCommonEvents_( + kafkaBrokers, kafkaCommonEventsTopic, 0 /* patition */) + , poolDB_(nullptr) + , poolDBCommonEvents_(nullptr) + , redisCommonEvents_(nullptr) + , redisConcurrency_(redisConcurrency) + , redisKeyPrefix_(redisKeyPrefix) + , redisKeyExpire_(redisKeyExpire) + , redisPublishPolicy_(redisPublishPolicy) + , redisIndexPolicy_(redisIndexPolicy) + , kFlushDBInterval_(kFlushDBInterval) + , isInserting_(false) + , isUpdateRedis_(false) + , lastShareTime_(0) + , isInitializing_(true) + , lastFlushTime_(0) + , fileLastFlushTime_(fileLastFlushTime) + , dupShareChecker_(dupShareChecker) + , acceptStale_(acceptStale) + , base_(nullptr) + , httpdHost_(httpdHost) + , httpdPort_(httpdPort) + , requestCount_(0) + , responseBytes_(0) { if (poolDBInfo != nullptr) { poolDB_ = new MySQLConnection(*poolDBInfo); poolDBCommonEvents_ = new MySQLConnection(*poolDBInfo); @@ -138,8 +168,8 @@ requestCount_(0), responseBytes_(0) if (redisInfo != nullptr) { redisCommonEvents_ = new RedisConnection(*redisInfo); - - for (uint32_t i=0; i::~StatsServerT() { if (threadConsume_.joinable()) threadConsume_.join(); - + if (threadConsumeCommonEvents_.joinable()) threadConsumeCommonEvents_.join(); @@ -189,32 +219,34 @@ StatsServerT::~StatsServerT() { } template -string StatsServerT::getRedisKeyMiningWorker(const int32_t userId, const int64_t workerId) { - string key = redisKeyPrefix_; - key += "mining_workers/pu/"; - key += std::to_string(userId); - key += "/wk/"; - key += std::to_string(workerId); - return key; +string StatsServerT::getRedisKeyMiningWorker( + const int32_t userId, const int64_t workerId) { + string key = redisKeyPrefix_; + key += "mining_workers/pu/"; + key += std::to_string(userId); + key += "/wk/"; + key += std::to_string(workerId); + return key; } template string StatsServerT::getRedisKeyMiningWorker(const int32_t userId) { - string key = redisKeyPrefix_; - key += "mining_workers/pu/"; - key += std::to_string(userId); - key += "/all"; - return key; + string key = redisKeyPrefix_; + key += "mining_workers/pu/"; + key += std::to_string(userId); + key += "/all"; + return key; } template -string StatsServerT::getRedisKeyIndex(const int32_t userId, const string &indexName) { - string key = redisKeyPrefix_; - key += "mining_workers/pu/"; - key += std::to_string(userId); - key += "/sort/"; - key += indexName; - return key; +string StatsServerT::getRedisKeyIndex( + const int32_t userId, const string &indexName) { + string key = redisKeyPrefix_; + key += "mining_workers/pu/"; + key += std::to_string(userId); + key += "/sort/"; + key += indexName; + return key; } template @@ -225,9 +257,9 @@ bool StatsServerT::init() { return false; } - // check db conf (only poolDB_ needs) - string value = poolDB_->getVariable("max_allowed_packet"); - if (atoi(value.c_str()) < 16 * 1024 *1024) { + // check db conf (only poolDB_ needs) + string value = poolDB_->getVariable("max_allowed_packet"); + if (atoi(value.c_str()) < 16 * 1024 * 1024) { LOG(INFO) << "db conf 'max_allowed_packet' is less than 16*1024*1024"; return false; } @@ -243,7 +275,7 @@ bool StatsServerT::init() { return false; } - for (size_t i=0; iping()) { LOG(INFO) << "redis " << i << " in redisGroup ping failure"; return false; @@ -274,15 +306,15 @@ void StatsServerT::processShare(const SHARE &share) { if (now > share.timestamp() + STATS_SLIDING_WINDOW_SECONDS) { return; } - poolWorker_.processShare(share); + poolWorker_.processShare(share, acceptStale_); //矿池总情况 WorkerKey key(share.userid(), share.workerhashid()); - _processShare(key, share); + _processShare(key, share); //用户矿机 } template void StatsServerT::_processShare(WorkerKey &key, const SHARE &share) { - const int32_t userId = key.userId_; + const int32_t userId = key.userId_; pthread_rwlock_rdlock(&rwlock_); auto workerItr = workerSet_.find(key); @@ -292,21 +324,23 @@ void StatsServerT::_processShare(WorkerKey &key, const SHARE &share) { shared_ptr> workerShare = nullptr, userShare = nullptr; if (workerItr != workerSet_.end()) { - workerItr->second->processShare(share); + workerItr->second->processShare(share, acceptStale_); } else { - workerShare = make_shared>(share.workerhashid(), share.userid()); - workerShare->processShare(share); + workerShare = + make_shared>(share.workerhashid(), share.userid()); + workerShare->processShare(share, acceptStale_); } if (userItr != userSet_.end()) { - userItr->second->processShare(share); + userItr->second->processShare(share, acceptStale_); } else { - userShare = make_shared>(share.workerhashid(), share.userid()); - userShare->processShare(share); + userShare = + make_shared>(share.workerhashid(), share.userid()); + userShare->processShare(share, acceptStale_); } if (workerShare != nullptr || userShare != nullptr) { - pthread_rwlock_wrlock(&rwlock_); // write lock + pthread_rwlock_wrlock(&rwlock_); // write lock if (workerShare != nullptr) { workerSet_[key] = workerShare; totalWorkerCount_++; @@ -329,7 +363,8 @@ void StatsServerT::flushWorkersAndUsersToRedis() { } isUpdateRedis_ = true; - boost::thread t(boost::bind(&StatsServerT::_flushWorkersAndUsersToRedisThread, this)); + boost::thread t(boost::bind( + &StatsServerT::_flushWorkersAndUsersToRedisThread, this)); } template @@ -337,10 +372,9 @@ void StatsServerT::_flushWorkersAndUsersToRedisThread() { std::vector threadPool; assert(redisGroup_.size() == redisConcurrency_); - for (uint32_t i=0; i::_flushWorkersAndUsersToRedisThread, this, i)) - ); + for (uint32_t i = 0; i < redisConcurrency_; i++) { + threadPool.push_back(boost::thread(boost::bind( + &StatsServerT::_flushWorkersAndUsersToRedisThread, this, i))); } for (auto &t : threadPool) { @@ -350,14 +384,16 @@ void StatsServerT::_flushWorkersAndUsersToRedisThread() { } pthread_rwlock_rdlock(&rwlock_); - LOG(INFO) << "flush to redis... done, " << workerSet_.size() << " workers, " << userSet_.size() << " users"; + LOG(INFO) << "flush to redis... done, " << workerSet_.size() << " workers, " + << userSet_.size() << " users"; pthread_rwlock_unlock(&rwlock_); isUpdateRedis_ = false; } template -void StatsServerT::_flushWorkersAndUsersToRedisThread(uint32_t threadStep) { +void StatsServerT::_flushWorkersAndUsersToRedisThread( + uint32_t threadStep) { if (!checkRedis(threadStep)) { return; } @@ -369,7 +405,8 @@ template bool StatsServerT::checkRedis(uint32_t threadStep) { if (threadStep > redisGroup_.size() - 1) { LOG(ERROR) << "checkRedis(" << threadStep << "): " - << "threadStep out of range, should less than " << threadStep << "!"; + << "threadStep out of range, should less than " << threadStep + << "!"; return false; } @@ -391,13 +428,14 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { size_t workerCounter = 0; std::unordered_map indexBufferMap; - pthread_rwlock_rdlock(&rwlock_); // read lock + pthread_rwlock_rdlock(&rwlock_); // read lock LOG(INFO) << "redis (thread " << threadStep << "): flush workers, rd locked"; - + size_t stepSize = workerSet_.size() / redisConcurrency_; if (workerSet_.size() % redisConcurrency_ != 0) { // +1 to avoid missing the last few items. - // Example: 5 / 2 = 2. Each thread handles 2 items and the fifth was missing. + // Example: 5 / 2 = 2. Each thread handles 2 items and the fifth was + // missing. stepSize++; } @@ -406,13 +444,14 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { auto itr = workerSet_.begin(); // move to the beginning position - for (size_t i=0; ifirst.userId_; + const int32_t userId = itr->first.userId_; const int64_t workerId = itr->first.workerId_; shared_ptr> workerShare = itr->second; const WorkerStatus status = workerShare->getWorkerStatus(); @@ -420,18 +459,17 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { string key = getRedisKeyMiningWorker(userId, workerId); // update info - redis->prepare({"HMSET", key, - "accept_1m", std::to_string(status.accept1m_), - "accept_5m", std::to_string(status.accept5m_), - "accept_15m", std::to_string(status.accept15m_), - "reject_15m", std::to_string(status.reject15m_), - "accept_1h", std::to_string(status.accept1h_), - "reject_1h", std::to_string(status.reject1h_), - "accept_count", std::to_string(status.acceptCount_), - "last_share_ip", status.lastShareIP_.toString(), - "last_share_time", std::to_string(status.lastShareTime_), - "updated_at", std::to_string(time(nullptr)) - }); + redis->prepare({"HMSET", key, + "accept_1m", std::to_string(status.accept1m_), + "accept_5m", std::to_string(status.accept5m_), + "accept_15m", std::to_string(status.accept15m_), + "reject_15m", std::to_string(status.reject15m_), + "accept_1h", std::to_string(status.accept1h_), + "reject_1h", std::to_string(status.reject1h_), + "accept_count", std::to_string(status.acceptCount_), + "last_share_ip", status.lastShareIP_.toString(), + "last_share_time", std::to_string(status.lastShareTime_), + "updated_at", std::to_string(time(nullptr))}); // set key expire if (redisKeyExpire_ > 0) { redis->prepare({"EXPIRE", key, std::to_string(redisKeyExpire_)}); @@ -455,15 +493,15 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { return; } - for (size_t i=0; iexecute(); if (r.type() != REDIS_REPLY_STATUS || r.str() != "OK") { LOG(INFO) << "redis (thread " << threadStep << ") HMSET failed, " - << "item index: " << i << ", " - << "reply type: " << r.type() << ", " - << "reply str: " << r.str(); + << "item index: " << i << ", " + << "reply type: " << r.type() << ", " + << "reply str: " << r.str(); } } // set key expire @@ -471,10 +509,10 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { RedisResult r = redis->execute(); if (r.type() != REDIS_REPLY_INTEGER || r.integer() != 1) { LOG(INFO) << "redis (thread " << threadStep << ") EXPIRE failed, " - << "item index: " << i << ", " - << "reply type: " << r.type() << ", " - << "reply integer: " << r.integer() << "," - << "reply str: " << r.str(); + << "item index: " << i << ", " + << "reply type: " << r.type() << ", " + << "reply integer: " << r.integer() << "," + << "reply str: " << r.str(); } } // notification @@ -482,9 +520,9 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { RedisResult r = redis->execute(); if (r.type() != REDIS_REPLY_INTEGER) { LOG(INFO) << "redis (thread " << threadStep << ") PUBLISH failed, " - << "item index: " << i << ", " - << "reply type: " << r.type() << ", " - << "reply str: " << r.str(); + << "item index: " << i << ", " + << "reply type: " << r.type() << ", " + << "reply str: " << r.str(); } } } @@ -494,71 +532,94 @@ void StatsServerT::flushWorkersToRedis(uint32_t threadStep) { flushIndexToRedis(redis, indexBufferMap); } - LOG(INFO) << "flush workers to redis (thread " << threadStep << ") done, workers: " << workerCounter; + LOG(INFO) << "flush workers to redis (thread " << threadStep + << ") done, workers: " << workerCounter; return; } template -void StatsServerT::flushIndexToRedis(RedisConnection *redis, - std::unordered_map &indexBufferMap) { +void StatsServerT::flushIndexToRedis( + RedisConnection *redis, + std::unordered_map &indexBufferMap) { for (auto itr = indexBufferMap.begin(); itr != indexBufferMap.end(); itr++) { flushIndexToRedis(redis, itr->second, itr->first); } - } template -void StatsServerT::flushIndexToRedis(RedisConnection *redis, WorkerIndexBuffer &buffer, const int32_t userId) { +void StatsServerT::flushIndexToRedis( + RedisConnection *redis, WorkerIndexBuffer &buffer, const int32_t userId) { // accept_1m if (redisIndexPolicy_ & REDIS_INDEX_ACCEPT_1M) { - buffer.accept1m_.insert(buffer.accept1m_.begin(), {"ZADD", getRedisKeyIndex(userId, "accept_1m")}); + buffer.accept1m_.insert( + buffer.accept1m_.begin(), + {"ZADD", getRedisKeyIndex(userId, "accept_1m")}); flushIndexToRedis(redis, buffer.accept1m_); } // accept_5m if (redisIndexPolicy_ & REDIS_INDEX_ACCEPT_5M) { - buffer.accept5m_.insert(buffer.accept5m_.begin(), {"ZADD", getRedisKeyIndex(userId, "accept_5m")}); + buffer.accept5m_.insert( + buffer.accept5m_.begin(), + {"ZADD", getRedisKeyIndex(userId, "accept_5m")}); flushIndexToRedis(redis, buffer.accept5m_); } // accept_15m if (redisIndexPolicy_ & REDIS_INDEX_ACCEPT_15M) { - buffer.accept15m_.insert(buffer.accept15m_.begin(), {"ZADD", getRedisKeyIndex(userId, "accept_15m")}); + buffer.accept15m_.insert( + buffer.accept15m_.begin(), + {"ZADD", getRedisKeyIndex(userId, "accept_15m")}); flushIndexToRedis(redis, buffer.accept15m_); } // reject_15m if (redisIndexPolicy_ & REDIS_INDEX_REJECT_15M) { - buffer.reject15m_.insert(buffer.reject15m_.begin(), {"ZADD", getRedisKeyIndex(userId, "reject_15m")}); + buffer.reject15m_.insert( + buffer.reject15m_.begin(), + {"ZADD", getRedisKeyIndex(userId, "reject_15m")}); flushIndexToRedis(redis, buffer.reject15m_); } // accept_1h if (redisIndexPolicy_ & REDIS_INDEX_ACCEPT_1H) { - buffer.accept1h_.insert(buffer.accept1h_.begin(), {"ZADD", getRedisKeyIndex(userId, "accept_1h")}); + buffer.accept1h_.insert( + buffer.accept1h_.begin(), + {"ZADD", getRedisKeyIndex(userId, "accept_1h")}); flushIndexToRedis(redis, buffer.accept1h_); } // reject_1h if (redisIndexPolicy_ & REDIS_INDEX_REJECT_1H) { - buffer.reject1h_.insert(buffer.reject1h_.begin(), {"ZADD", getRedisKeyIndex(userId, "reject_1h")}); + buffer.reject1h_.insert( + buffer.reject1h_.begin(), + {"ZADD", getRedisKeyIndex(userId, "reject_1h")}); flushIndexToRedis(redis, buffer.reject1h_); } // accept_count if (redisIndexPolicy_ & REDIS_INDEX_ACCEPT_COUNT) { - buffer.acceptCount_.insert(buffer.acceptCount_.begin(), {"ZADD", getRedisKeyIndex(userId, "accept_count")}); + buffer.acceptCount_.insert( + buffer.acceptCount_.begin(), + {"ZADD", getRedisKeyIndex(userId, "accept_count")}); flushIndexToRedis(redis, buffer.acceptCount_); } // last_share_ip if (redisIndexPolicy_ & REDIS_INDEX_LAST_SHARE_IP) { - buffer.lastShareIP_.insert(buffer.lastShareIP_.begin(), {"ZADD", getRedisKeyIndex(userId, "last_share_ip")}); + buffer.lastShareIP_.insert( + buffer.lastShareIP_.begin(), + {"ZADD", getRedisKeyIndex(userId, "last_share_ip")}); flushIndexToRedis(redis, buffer.lastShareIP_); } // last_share_time if (redisIndexPolicy_ & REDIS_INDEX_LAST_SHARE_TIME) { - buffer.lastShareTime_.insert(buffer.lastShareTime_.begin(), {"ZADD", getRedisKeyIndex(userId, "last_share_time")}); + buffer.lastShareTime_.insert( + buffer.lastShareTime_.begin(), + {"ZADD", getRedisKeyIndex(userId, "last_share_time")}); flushIndexToRedis(redis, buffer.lastShareTime_); } } template -void StatsServerT::addIndexToBuffer(WorkerIndexBuffer &buffer, const int64_t workerId, const WorkerStatus &status) { +void StatsServerT::addIndexToBuffer( + WorkerIndexBuffer &buffer, + const int64_t workerId, + const WorkerStatus &status) { // accept_1m if (redisIndexPolicy_ & REDIS_INDEX_ACCEPT_1M) { buffer.accept1m_.push_back(std::to_string(status.accept1m_)); @@ -596,7 +657,8 @@ void StatsServerT::addIndexToBuffer(WorkerIndexBuffer &buffer, const int6 } // last_share_ip if (redisIndexPolicy_ & REDIS_INDEX_LAST_SHARE_IP) { - buffer.lastShareIP_.push_back(std::to_string(status.lastShareIP_.addrUint64[1])); + buffer.lastShareIP_.push_back( + std::to_string(status.lastShareIP_.addrUint64[1])); buffer.lastShareIP_.push_back(std::to_string(workerId)); } // last_share_time @@ -605,11 +667,12 @@ void StatsServerT::addIndexToBuffer(WorkerIndexBuffer &buffer, const int6 buffer.lastShareTime_.push_back(std::to_string(workerId)); } - buffer.size_ ++; + buffer.size_++; } template -void StatsServerT::flushIndexToRedis(RedisConnection *redis, const std::vector &commandVector) { +void StatsServerT::flushIndexToRedis( + RedisConnection *redis, const std::vector &commandVector) { redis->prepare(commandVector); RedisResult r = redis->execute(); if (r.type() != REDIS_REPLY_INTEGER) { @@ -625,13 +688,14 @@ void StatsServerT::flushUsersToRedis(uint32_t threadStep) { RedisConnection *redis = redisGroup_[threadStep]; size_t userCounter = 0; - pthread_rwlock_rdlock(&rwlock_); // read lock + pthread_rwlock_rdlock(&rwlock_); // read lock LOG(INFO) << "redis (thread " << threadStep << "): flush users, rd locked"; size_t stepSize = userSet_.size() / redisConcurrency_; if (userSet_.size() % redisConcurrency_ != 0) { // +1 to avoid missing the last few items. - // Example: 5 / 2 = 2. Each thread handles 2 items and the fifth was missing. + // Example: 5 / 2 = 2. Each thread handles 2 items and the fifth was + // missing. stepSize++; } @@ -640,13 +704,14 @@ void StatsServerT::flushUsersToRedis(uint32_t threadStep) { auto itr = userSet_.begin(); // move to the beginning position - for (size_t i=0; ifirst; + const int32_t userId = itr->first; shared_ptr> workerShare = itr->second; const WorkerStatus status = workerShare->getWorkerStatus(); const int32_t workerCount = userWorkerCount_[userId]; @@ -654,19 +719,18 @@ void StatsServerT::flushUsersToRedis(uint32_t threadStep) { string key = getRedisKeyMiningWorker(userId); // update info - redis->prepare({"HMSET", key, - "worker_count", std::to_string(workerCount), - "accept_1m", std::to_string(status.accept1m_), - "accept_5m", std::to_string(status.accept5m_), - "accept_15m", std::to_string(status.accept15m_), - "reject_15m", std::to_string(status.reject15m_), - "accept_1h", std::to_string(status.accept1h_), - "reject_1h", std::to_string(status.reject1h_), - "accept_count", std::to_string(status.acceptCount_), - "last_share_ip", status.lastShareIP_.toString(), - "last_share_time", std::to_string(status.lastShareTime_), - "updated_at", std::to_string(time(nullptr)) - }); + redis->prepare({"HMSET", key, + "worker_count", std::to_string(workerCount), + "accept_1m", std::to_string(status.accept1m_), + "accept_5m", std::to_string(status.accept5m_), + "accept_15m", std::to_string(status.accept15m_), + "reject_15m", std::to_string(status.reject15m_), + "accept_1h", std::to_string(status.accept1h_), + "reject_1h", std::to_string(status.reject1h_), + "accept_count", std::to_string(status.acceptCount_), + "last_share_ip", status.lastShareIP_.toString(), + "last_share_time", std::to_string(status.lastShareTime_), + "updated_at", std::to_string(time(nullptr))}); // set key expire if (redisKeyExpire_ > 0) { redis->prepare({"EXPIRE", key, std::to_string(redisKeyExpire_)}); @@ -685,15 +749,15 @@ void StatsServerT::flushUsersToRedis(uint32_t threadStep) { return; } - for (size_t i=0; iexecute(); if (r.type() != REDIS_REPLY_STATUS || r.str() != "OK") { LOG(INFO) << "redis (thread " << threadStep << ") HMSET failed, " - << "item index: " << i << ", " - << "reply type: " << r.type() << ", " - << "reply str: " << r.str(); + << "item index: " << i << ", " + << "reply type: " << r.type() << ", " + << "reply str: " << r.str(); } } // set key expire @@ -701,10 +765,10 @@ void StatsServerT::flushUsersToRedis(uint32_t threadStep) { RedisResult r = redis->execute(); if (r.type() != REDIS_REPLY_INTEGER || r.integer() != 1) { LOG(INFO) << "redis (thread " << threadStep << ") EXPIRE failed, " - << "item index: " << i << ", " - << "reply type: " << r.type() << ", " - << "reply integer: " << r.integer() << "," - << "reply str: " << r.str(); + << "item index: " << i << ", " + << "reply type: " << r.type() << ", " + << "reply integer: " << r.integer() << "," + << "reply str: " << r.str(); } } // publish notification @@ -712,14 +776,15 @@ void StatsServerT::flushUsersToRedis(uint32_t threadStep) { RedisResult r = redis->execute(); if (r.type() != REDIS_REPLY_INTEGER) { LOG(INFO) << "redis (thread " << threadStep << ") PUBLISH failed, " - << "item index: " << i << ", " - << "reply type: " << r.type() << ", " - << "reply str: " << r.str(); + << "item index: " << i << ", " + << "reply type: " << r.type() << ", " + << "reply str: " << r.str(); } } } - LOG(INFO) << "flush users to redis (thread " << threadStep << ") done, users: " << userCounter; + LOG(INFO) << "flush users to redis (thread " << threadStep + << ") done, users: " << userCounter; return; } @@ -732,7 +797,8 @@ void StatsServerT::flushWorkersAndUsersToDB() { } isInserting_ = true; - boost::thread t(boost::bind(&StatsServerT::_flushWorkersAndUsersToDBThread, this)); + boost::thread t( + boost::bind(&StatsServerT::_flushWorkersAndUsersToDBThread, this)); } template @@ -741,24 +807,31 @@ void StatsServerT::_flushWorkersAndUsersToDBThread() { // merge two table items // table.`mining_workers` unique index: `puid` + `worker_id` // - const string mergeSQL = "INSERT INTO `mining_workers` " - " SELECT * FROM `mining_workers_tmp` " - " ON DUPLICATE KEY " - " UPDATE " - " `mining_workers`.`accept_1m` =`mining_workers_tmp`.`accept_1m`, " - " `mining_workers`.`accept_5m` =`mining_workers_tmp`.`accept_5m`, " - " `mining_workers`.`accept_15m` =`mining_workers_tmp`.`accept_15m`, " - " `mining_workers`.`reject_15m` =`mining_workers_tmp`.`reject_15m`, " - " `mining_workers`.`accept_1h` =`mining_workers_tmp`.`accept_1h`, " - " `mining_workers`.`reject_1h` =`mining_workers_tmp`.`reject_1h`, " - " `mining_workers`.`accept_count` =`mining_workers_tmp`.`accept_count`," - " `mining_workers`.`last_share_ip` =`mining_workers_tmp`.`last_share_ip`," - " `mining_workers`.`last_share_time`=`mining_workers_tmp`.`last_share_time`," - " `mining_workers`.`updated_at` =`mining_workers_tmp`.`updated_at` "; + const string mergeSQL = + "INSERT INTO `mining_workers` " + " SELECT * FROM `mining_workers_tmp` " + " ON DUPLICATE KEY " + " UPDATE " + " `mining_workers`.`accept_1m` =`mining_workers_tmp`.`accept_1m`, " + " `mining_workers`.`accept_5m` =`mining_workers_tmp`.`accept_5m`, " + " `mining_workers`.`accept_15m` =`mining_workers_tmp`.`accept_15m`, " + " `mining_workers`.`reject_15m` =`mining_workers_tmp`.`reject_15m`, " + " `mining_workers`.`accept_1h` =`mining_workers_tmp`.`accept_1h`, " + " `mining_workers`.`reject_1h` =`mining_workers_tmp`.`reject_1h`, " + " `mining_workers`.`accept_count` " + "=`mining_workers_tmp`.`accept_count`," + " `mining_workers`.`last_share_ip` " + "=`mining_workers_tmp`.`last_share_ip`," + " " + "`mining_workers`.`last_share_time`=`mining_workers_tmp`.`last_share_" + "time`," + " `mining_workers`.`updated_at` =`mining_workers_tmp`.`updated_at` "; // fields for table.mining_workers - const string fields = "`worker_id`,`puid`,`group_id`,`accept_1m`, `accept_5m`," - "`accept_15m`, `reject_15m`, `accept_1h`,`reject_1h`, `accept_count`, `last_share_ip`," - " `last_share_time`, `created_at`, `updated_at`"; + const string fields = + "`worker_id`,`puid`,`group_id`,`accept_1m`, `accept_5m`," + "`accept_15m`, `reject_15m`, `accept_1h`,`reject_1h`, `accept_count`, " + "`last_share_ip`," + " `last_share_time`, `created_at`, `updated_at`"; // values for multi-insert sql vector values; size_t workerCounter = 0; @@ -769,59 +842,77 @@ void StatsServerT::_flushWorkersAndUsersToDBThread() { goto finish; } - pthread_rwlock_rdlock(&rwlock_); // read lock + pthread_rwlock_rdlock(&rwlock_); // read lock LOG(INFO) << "flush DB: rd locked"; // get all workes status for (auto itr = workerSet_.begin(); itr != workerSet_.end(); itr++) { workerCounter++; - const int32_t userId = itr->first.userId_; + const int32_t userId = itr->first.userId_; const int64_t workerId = itr->first.workerId_; shared_ptr> workerShare = itr->second; const WorkerStatus status = workerShare->getWorkerStatus(); const string nowStr = date("%F %T", time(nullptr)); - values.push_back(Strings::Format("%" PRId64",%d,%d,%" PRIu64",%" PRIu64"," - "%" PRIu64",%" PRIu64"," // accept_15m, reject_15m - "%" PRIu64",%" PRIu64"," // accept_1h, reject_1h - "%d,\"%s\"," - "\"%s\",\"%s\",\"%s\"", - workerId, userId, - -1 * userId, /* default group id */ - status.accept1m_, status.accept5m_, - status.accept15m_, status.reject15m_, - status.accept1h_, status.reject1h_, - status.acceptCount_, status.lastShareIP_.toString().c_str(), - date("%F %T", status.lastShareTime_).c_str(), - nowStr.c_str(), nowStr.c_str())); + values.push_back( + Strings::Format( + "%" PRId64 ",%d,%d,%" PRIu64 ",%" PRIu64 "," + "%" PRIu64 ",%" PRIu64 "," // accept_15m, reject_15m + "%" PRIu64 ",%" PRIu64 "," // accept_1h, reject_1h + "%d,\"%s\"," + "\"%s\",\"%s\",\"%s\"", + workerId, + userId, + -1 * userId, /* default group id */ + status.accept1m_, + status.accept5m_, + status.accept15m_, + status.reject15m_, + status.accept1h_, + status.reject1h_, + status.acceptCount_, + status.lastShareIP_.toString().c_str(), + date("%F %T", status.lastShareTime_).c_str(), + nowStr.c_str(), + nowStr.c_str())); + DLOG(INFO) << "当前workesid:"<first; + const int32_t userId = itr->first; const int64_t workerId = 0; shared_ptr> workerShare = itr->second; const WorkerStatus status = workerShare->getWorkerStatus(); const string nowStr = date("%F %T", time(nullptr)); - values.push_back(Strings::Format("%" PRId64",%d,%d,%" PRIu64",%" PRIu64"," - "%" PRIu64",%" PRIu64"," // accept_15m, reject_15m - "%" PRIu64",%" PRIu64"," // accept_1h, reject_1h - "%d,\"%s\"," - "\"%s\",\"%s\",\"%s\"", - workerId, userId, - -1 * userId, /* default group id */ - status.accept1m_, status.accept5m_, - status.accept15m_, status.reject15m_, - status.accept1h_, status.reject1h_, - status.acceptCount_, status.lastShareIP_.toString().c_str(), - date("%F %T", status.lastShareTime_).c_str(), - nowStr.c_str(), nowStr.c_str())); + values.push_back( + Strings::Format( + "%" PRId64 ",%d,%d,%" PRIu64 ",%" PRIu64 "," + "%" PRIu64 ",%" PRIu64 "," // accept_15m, reject_15m + "%" PRIu64 ",%" PRIu64 "," // accept_1h, reject_1h + "%d,\"%s\"," + "\"%s\",\"%s\",\"%s\"", + workerId, + userId, + -1 * userId, /* default group id */ + status.accept1m_, + status.accept5m_, + status.accept15m_, + status.reject15m_, + status.accept1h_, + status.reject1h_, + status.acceptCount_, + status.lastShareIP_.toString().c_str(), + date("%F %T", status.lastShareTime_).c_str(), + nowStr.c_str(), + nowStr.c_str())); + DLOG(INFO) << "当前usersid:"<::_flushWorkersAndUsersToDBThread() { goto finish; } - if (!poolDB_->execute("DROP TEMPORARY TABLE IF EXISTS `mining_workers_tmp`;")) { + if (!poolDB_->execute( + "DROP TEMPORARY TABLE IF EXISTS `mining_workers_tmp`;")) { LOG(ERROR) << "DROP TEMPORARY TABLE `mining_workers_tmp` failure"; goto finish; } - if (!poolDB_->execute("CREATE TEMPORARY TABLE `mining_workers_tmp` like `mining_workers`;")) { + if (!poolDB_->execute("CREATE TEMPORARY TABLE `mining_workers_tmp` like " + "`mining_workers`;")) { LOG(ERROR) << "CREATE TEMPORARY TABLE `mining_workers_tmp` failure"; // something went wrong with the current mysql connection, try to reconnect. poolDB_->reconnect(); @@ -853,12 +946,13 @@ void StatsServerT::_flushWorkersAndUsersToDBThread() { LOG(ERROR) << "merge mining_workers failure"; goto finish; } - LOG(INFO) << "flush to DB... done, workers: " << workerCounter << ", users: " << userCounter; + LOG(INFO) << "flush to DB... done, workers: " << workerCounter + << ", users: " << userCounter; lastFlushTime_ = time(nullptr); // save flush timestamp to file, for monitor system if (!fileLastFlushTime_.empty()) - writeTime2File(fileLastFlushTime_.c_str(), lastFlushTime_); + writeTime2File(fileLastFlushTime_.c_str(), lastFlushTime_); finish: isInserting_ = false; @@ -869,11 +963,11 @@ void StatsServerT::removeExpiredWorkers() { size_t expiredWorkerCount = 0; size_t expiredUserCount = 0; - pthread_rwlock_wrlock(&rwlock_); // write lock + pthread_rwlock_wrlock(&rwlock_); // write lock // delete all expired workers - for (auto itr = workerSet_.begin(); itr != workerSet_.end(); ) { - const int32_t userId = itr->first.userId_; + for (auto itr = workerSet_.begin(); itr != workerSet_.end();) { + const int32_t userId = itr->first.userId_; shared_ptr> workerShare = itr->second; if (workerShare->isExpired()) { @@ -882,7 +976,7 @@ void StatsServerT::removeExpiredWorkers() { expiredWorkerCount++; totalWorkerCount_--; userWorkerCount_[userId]--; - + if (userWorkerCount_[userId] <= 0) { userWorkerCount_.erase(userId); } @@ -892,7 +986,7 @@ void StatsServerT::removeExpiredWorkers() { } // delete all expired users - for (auto itr = userSet_.begin(); itr != userSet_.end(); ) { + for (auto itr = userSet_.begin(); itr != userSet_.end();) { shared_ptr> workerShare = itr->second; if (workerShare->isExpired()) { @@ -907,15 +1001,16 @@ void StatsServerT::removeExpiredWorkers() { pthread_rwlock_unlock(&rwlock_); - LOG(INFO) << "removed expired workers: " << expiredWorkerCount << ", users: " << expiredUserCount; + LOG(INFO) << "removed expired workers: " << expiredWorkerCount + << ", users: " << expiredUserCount; } template -void StatsServerT::getWorkerStatusBatch(const vector &keys, - vector &workerStatus) { +void StatsServerT::getWorkerStatusBatch( + const vector &keys, vector &workerStatus) { workerStatus.resize(keys.size()); - vector> > ptrs; + vector>> ptrs; ptrs.resize(keys.size()); // find all shared pointer @@ -949,24 +1044,25 @@ void StatsServerT::getWorkerStatusBatch(const vector &keys, } template -WorkerStatus StatsServerT::mergeWorkerStatus(const vector &workerStatus) { +WorkerStatus StatsServerT::mergeWorkerStatus( + const vector &workerStatus) { WorkerStatus s; if (workerStatus.size() == 0) return s; for (size_t i = 0; i < workerStatus.size(); i++) { - s.accept1m_ += workerStatus[i].accept1m_; - s.accept5m_ += workerStatus[i].accept5m_; - s.accept15m_ += workerStatus[i].accept15m_; - s.reject15m_ += workerStatus[i].reject15m_; - s.accept1h_ += workerStatus[i].accept1h_; - s.reject1h_ += workerStatus[i].reject1h_; + s.accept1m_ += workerStatus[i].accept1m_; + s.accept5m_ += workerStatus[i].accept5m_; + s.accept15m_ += workerStatus[i].accept15m_; + s.reject15m_ += workerStatus[i].reject15m_; + s.accept1h_ += workerStatus[i].accept1h_; + s.reject1h_ += workerStatus[i].reject1h_; s.acceptCount_ += workerStatus[i].acceptCount_; if (workerStatus[i].lastShareTime_ > s.lastShareTime_) { s.lastShareTime_ = workerStatus[i].lastShareTime_; - s.lastShareIP_ = workerStatus[i].lastShareIP_; + s.lastShareIP_ = workerStatus[i].lastShareIP_; } } return s; @@ -979,16 +1075,18 @@ void StatsServerT::consumeShareLog(rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -999,8 +1097,10 @@ void StatsServerT::consumeShareLog(rd_kafka_message_t *rkmessage) { SHARE share; - if (!share.UnserializeWithVersion((const uint8_t *)(rkmessage->payload), rkmessage->len)) { - LOG(ERROR) << "parse share from kafka message failed rkmessage->len = "<< rkmessage->len ; + if (!share.UnserializeWithVersion( + (const uint8_t *)(rkmessage->payload), rkmessage->len)) { + LOG(ERROR) << "parse share from kafka message failed rkmessage->len = " + << rkmessage->len; return; } @@ -1025,15 +1125,16 @@ bool StatsServerT::setupThreadConsume() { // so in 60 mins there will be 100000/10*3600 = 36,000,000 shares. // data size will be 36,000,000 * sizeof(SHARE) = 1,728,000,000 Bytes. // - const int32_t kConsumeLatestN = 100000/10*3600; // 36,000,000 + const int32_t kConsumeLatestN = 100000 / 10 * 3600; // 36,000,000 map consumerOptions; // fetch.wait.max.ms: - // Maximum time the broker may wait to fill the response with fetch.min.bytes. + // Maximum time the broker may wait to fill the response with + // fetch.min.bytes. consumerOptions["fetch.wait.max.ms"] = "200"; - if (kafkaConsumer_.setup(RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), - &consumerOptions) == false) { + if (kafkaConsumer_.setup( + RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), &consumerOptions) == false) { LOG(INFO) << "setup consumer fail"; return false; } @@ -1051,15 +1152,16 @@ bool StatsServerT::setupThreadConsume() { map consumerOptions; // fetch.wait.max.ms: - // Maximum time the broker may wait to fill the response with fetch.min.bytes. + // Maximum time the broker may wait to fill the response with + // fetch.min.bytes. consumerOptions["fetch.wait.max.ms"] = "600"; - if (kafkaConsumerCommonEvents_.setup(RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), - &consumerOptions) == false) { + if (kafkaConsumerCommonEvents_.setup( + RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), &consumerOptions) == false) { LOG(INFO) << "setup common events consumer fail"; return false; } - + if (!kafkaConsumerCommonEvents_.checkAlive()) { LOG(ERROR) << "common events kafka brokers is not alive"; return false; @@ -1068,19 +1170,21 @@ bool StatsServerT::setupThreadConsume() { // run threads threadConsume_ = thread(&StatsServerT::runThreadConsume, this); - threadConsumeCommonEvents_ = thread(&StatsServerT::runThreadConsumeCommonEvents, this); - + threadConsumeCommonEvents_ = + thread(&StatsServerT::runThreadConsumeCommonEvents, this); + return true; } template void StatsServerT::runThreadConsume() { LOG(INFO) << "start sharelog consume thread"; - time_t lastCleanTime = time(nullptr); - time_t lastFlushDBTime = 0; // Set to 0 to log lastShareTime_ of the first share + time_t lastCleanTime = time(nullptr); + time_t lastFlushDBTime = + 0; // Set to 0 to log lastShareTime_ of the first share - const time_t kExpiredCleanInterval = 60*30; - const int32_t kTimeoutMs = 1000; // consumer timeout + const time_t kExpiredCleanInterval = 60 * 30; + const int32_t kTimeoutMs = 1000; // consumer timeout // consuming history shares while (running_) { @@ -1093,25 +1197,29 @@ void StatsServerT::runThreadConsume() { // consume share log (lastShareTime_ will be updated) consumeShareLog(rkmessage); - rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ + rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ } if (lastFlushDBTime + kFlushDBInterval_ < time(nullptr)) { - LOG(INFO) << "consuming history shares: " << date("%F %T", lastShareTime_); + LOG(INFO) << "consuming history shares: " + << date("%F %T", lastShareTime_); lastFlushDBTime = time(nullptr); } // don't flush database while consuming history shares. - // otherwise, users' hashrate will be updated to 0 when statshttpd restarted. + // otherwise, users' hashrate will be updated to 0 when statshttpd + // restarted. - // the initialization state ends after consuming a share that generated in the last minute. + // the initialization state ends after consuming a share that generated in + // the last minute. if (lastShareTime_ + 60 >= time(nullptr)) { isInitializing_ = false; break; } // the initialization state ends after no shares in 5 minutes - // LastCleanTime is used here because it records the latest time that got a non-empty message + // LastCleanTime is used here because it records the latest time that got a + // non-empty message if (rkmessage == nullptr && lastCleanTime + 300 < time(nullptr)) { isInitializing_ = false; break; @@ -1126,7 +1234,7 @@ void StatsServerT::runThreadConsume() { if (rkmessage != nullptr) { // consume share log (lastShareTime_ will be updated) consumeShareLog(rkmessage); - rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ + rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ } // @@ -1155,14 +1263,14 @@ void StatsServerT::runThreadConsume() { } LOG(INFO) << "stop sharelog consume thread"; - stop(); // if thread exit, we must call server to stop + stop(); // if thread exit, we must call server to stop } template void StatsServerT::runThreadConsumeCommonEvents() { LOG(INFO) << "start common events consume thread"; - const int32_t kTimeoutMs = 3000; // consumer timeout + const int32_t kTimeoutMs = 3000; // consumer timeout while (running_) { // @@ -1179,7 +1287,7 @@ void StatsServerT::runThreadConsumeCommonEvents() { // consume share log consumeCommonEvents(rkmessage); - rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ + rd_kafka_message_destroy(rkmessage); /* Return message to rdkafka */ } LOG(INFO) << "stop common events consume thread"; @@ -1192,16 +1300,18 @@ void StatsServerT::consumeCommonEvents(rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -1210,7 +1320,7 @@ void StatsServerT::consumeCommonEvents(rd_kafka_message_t *rkmessage) { return; } - const char *message = (const char*)rkmessage->payload; + const char *message = (const char *)rkmessage->payload; DLOG(INFO) << "A New Common Event: " << string(message, rkmessage->len); JsonNode r; @@ -1220,7 +1330,7 @@ void StatsServerT::consumeCommonEvents(rd_kafka_message_t *rkmessage) { } // check fields - if (r["type"].type() != Utilities::JS::type::Str || + if (r["type"].type() != Utilities::JS::type::Str || r["content"].type() != Utilities::JS::type::Obj) { LOG(ERROR) << "common event missing some fields"; return; @@ -1229,47 +1339,54 @@ void StatsServerT::consumeCommonEvents(rd_kafka_message_t *rkmessage) { // update worker status if (r["type"].str() == "worker_update") { // check fields - if (r["content"]["user_id"].type() != Utilities::JS::type::Int || - r["content"]["worker_id"].type() != Utilities::JS::type::Int || + if (r["content"]["user_id"].type() != Utilities::JS::type::Int || + r["content"]["worker_id"].type() != Utilities::JS::type::Int || r["content"]["worker_name"].type() != Utilities::JS::type::Str || r["content"]["miner_agent"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "common event `worker_update` missing some fields"; return; } - int32_t userId = r["content"]["user_id"].int32(); - int64_t workerId = r["content"]["worker_id"].int64(); + int32_t userId = r["content"]["user_id"].int32(); + int64_t workerId = r["content"]["worker_id"].int64(); string workerName = filterWorkerName(r["content"]["worker_name"].str()); string minerAgent = filterWorkerName(r["content"]["miner_agent"].str()); if (poolDBCommonEvents_ != nullptr) { - updateWorkerStatusToDB(userId, workerId, workerName.c_str(), minerAgent.c_str()); + updateWorkerStatusToDB( + userId, workerId, workerName.c_str(), minerAgent.c_str()); } if (redisCommonEvents_ != nullptr) { - updateWorkerStatusToRedis(userId, workerId, workerName.c_str(), minerAgent.c_str()); + updateWorkerStatusToRedis( + userId, workerId, workerName.c_str(), minerAgent.c_str()); } } - } template -bool StatsServerT::updateWorkerStatusToRedis(const int32_t userId, const int64_t workerId, - const char *workerName, const char *minerAgent) { +bool StatsServerT::updateWorkerStatusToRedis( + const int32_t userId, + const int64_t workerId, + const char *workerName, + const char *minerAgent) { string key = getRedisKeyMiningWorker(userId, workerId); // update info { - redisCommonEvents_->prepare({"HMSET", key, - "worker_name", workerName, - "miner_agent", minerAgent, - "updated_at", std::to_string(time(nullptr)) - }); + redisCommonEvents_->prepare({"HMSET", + key, + "worker_name", + workerName, + "miner_agent", + minerAgent, + "updated_at", + std::to_string(time(nullptr))}); RedisResult r = redisCommonEvents_->execute(); if (r.type() != REDIS_REPLY_STATUS || r.str() != "OK") { LOG(INFO) << "redis HMSET failed, item key: " << key << ", " - << "reply type: " << r.type() << ", " - << "reply str: " << r.str(); + << "reply type: " << r.type() << ", " + << "reply str: " << r.str(); // try ping & reconnect redis, so last update may success if (!redisCommonEvents_->ping()) { @@ -1282,14 +1399,15 @@ bool StatsServerT::updateWorkerStatusToRedis(const int32_t userId, const // set key expire if (redisKeyExpire_ > 0) { - redisCommonEvents_->prepare({"EXPIRE", key, std::to_string(redisKeyExpire_)}); + redisCommonEvents_->prepare( + {"EXPIRE", key, std::to_string(redisKeyExpire_)}); RedisResult r = redisCommonEvents_->execute(); if (r.type() != REDIS_REPLY_INTEGER || r.integer() != 1) { LOG(INFO) << "redis EXPIRE failed, item key: " << key << ", " - << "reply type: " << r.type() << ", " - << "reply integer: " << r.integer() << "," - << "reply str: " << r.str(); + << "reply type: " << r.type() << ", " + << "reply integer: " << r.integer() << "," + << "reply str: " << r.str(); // try ping & reconnect redis, so last update may success if (!redisCommonEvents_->ping()) { @@ -1302,10 +1420,12 @@ bool StatsServerT::updateWorkerStatusToRedis(const int32_t userId, const // update index if (redisIndexPolicy_ & REDIS_INDEX_WORKER_NAME) { - updateWorkerStatusIndexToRedis(userId, "worker_name", workerName, std::to_string(workerId)); + updateWorkerStatusIndexToRedis( + userId, "worker_name", workerName, std::to_string(workerId)); } if (redisIndexPolicy_ & REDIS_INDEX_MINER_AGENT) { - updateWorkerStatusIndexToRedis(userId, "miner_agent", minerAgent, std::to_string(workerId)); + updateWorkerStatusIndexToRedis( + userId, "miner_agent", minerAgent, std::to_string(workerId)); } // publish notification @@ -1315,8 +1435,8 @@ bool StatsServerT::updateWorkerStatusToRedis(const int32_t userId, const if (r.type() != REDIS_REPLY_INTEGER) { LOG(INFO) << "redis PUBLISH failed, item key: " << key << ", " - << "reply type: " << r.type() << ", " - << "reply str: " << r.str(); + << "reply type: " << r.type() << ", " + << "reply str: " << r.str(); // try ping & reconnect redis, so last update may success if (!redisCommonEvents_->ping()) { @@ -1331,13 +1451,19 @@ bool StatsServerT::updateWorkerStatusToRedis(const int32_t userId, const } template -void StatsServerT::updateWorkerStatusIndexToRedis(const int32_t userId, const string &key, - const string &score, const string &value) { - +void StatsServerT::updateWorkerStatusIndexToRedis( + const int32_t userId, + const string &key, + const string &score, + const string &value) { + // convert string to number uint64_t scoreRank = getAlphaNumRank(score); - redisCommonEvents_->prepare({"ZADD", getRedisKeyIndex(userId, key), std::to_string(scoreRank), value}); + redisCommonEvents_->prepare({"ZADD", + getRedisKeyIndex(userId, key), + std::to_string(scoreRank), + value}); RedisResult r = redisCommonEvents_->execute(); if (r.type() != REDIS_REPLY_INTEGER) { @@ -1348,17 +1474,22 @@ void StatsServerT::updateWorkerStatusIndexToRedis(const int32_t userId, c } template -bool StatsServerT::updateWorkerStatusToDB(const int32_t userId, const int64_t workerId, - const char *workerName, const char *minerAgent) { +bool StatsServerT::updateWorkerStatusToDB( + const int32_t userId, + const int64_t workerId, + const char *workerName, + const char *minerAgent) { string sql; char **row = nullptr; MySQLResult res; const string nowStr = date("%F %T"); // find the miner - sql = Strings::Format("SELECT `group_id` FROM `mining_workers` " - " WHERE `puid`=%d AND `worker_id`= %" PRId64"", - userId, workerId); + sql = Strings::Format( + "SELECT `group_id` FROM `mining_workers` " + " WHERE `puid`=%d AND `worker_id`= %" PRId64 "", + userId, + workerId); poolDBCommonEvents_->query(sql, res); if (res.numRows() != 0 && (row = res.nextRow()) != nullptr) { @@ -1366,31 +1497,39 @@ bool StatsServerT::updateWorkerStatusToDB(const int32_t userId, const int // group Id == 0: means the miner's status is 'deleted' // we need to move from 'deleted' group to 'default' group. - sql = Strings::Format("UPDATE `mining_workers` SET `group_id`=%d, " - " `worker_name`=\"%s\", `miner_agent`=\"%s\", " - " `updated_at`=\"%s\" " - " WHERE `puid`=%d AND `worker_id`= %" PRId64"", - groupId == 0 ? userId * -1 : groupId, - workerName, minerAgent, - nowStr.c_str(), - userId, workerId); - } - else { + sql = Strings::Format( + "UPDATE `mining_workers` SET `group_id`=%d, " + " `worker_name`=\"%s\", `miner_agent`=\"%s\", " + " `updated_at`=\"%s\" " + " WHERE `puid`=%d AND `worker_id`= %" PRId64 "", + groupId == 0 ? userId * -1 : groupId, + workerName, + minerAgent, + nowStr.c_str(), + userId, + workerId); + } else { // we have to use 'ON DUPLICATE KEY UPDATE', because 'statshttpd' may insert // items to table.mining_workers between we 'select' and 'insert' gap. // 'statshttpd' will always set an empty 'worker_name'. - sql = Strings::Format("INSERT INTO `mining_workers`(`puid`,`worker_id`," - " `group_id`,`worker_name`,`miner_agent`," - " `created_at`,`updated_at`) " - " VALUES(%d,%" PRId64",%d,\"%s\",\"%s\",\"%s\",\"%s\")" - " ON DUPLICATE KEY UPDATE " - " `worker_name`= \"%s\",`miner_agent`=\"%s\",`updated_at`=\"%s\" ", - userId, workerId, - userId * -1, // default group id - workerName, minerAgent, - nowStr.c_str(), nowStr.c_str(), - workerName, minerAgent, - nowStr.c_str()); + sql = Strings::Format( + "INSERT INTO `mining_workers`(`puid`,`worker_id`," + " `group_id`,`worker_name`,`miner_agent`," + " `created_at`,`updated_at`) " + " VALUES(%d,%" PRId64 + ",%d,\"%s\",\"%s\",\"%s\",\"%s\")" + " ON DUPLICATE KEY UPDATE " + " `worker_name`= \"%s\",`miner_agent`=\"%s\",`updated_at`=\"%s\" ", + userId, + workerId, + userId * -1, // default group id + workerName, + minerAgent, + nowStr.c_str(), + nowStr.c_str(), + workerName, + minerAgent, + nowStr.c_str()); } if (poolDBCommonEvents_->execute(sql) == false) { @@ -1408,23 +1547,25 @@ bool StatsServerT::updateWorkerStatusToDB(const int32_t userId, const int } template -typename StatsServerT::ServerStatus StatsServerT::getServerStatus() { +typename StatsServerT::ServerStatus +StatsServerT::getServerStatus() { ServerStatus s; - s.uptime_ = (uint32_t)(time(nullptr) - uptime_); - s.requestCount_ = requestCount_; - s.workerCount_ = totalWorkerCount_; - s.userCount_ = totalUserCount_; + s.uptime_ = (uint32_t)(time(nullptr) - uptime_); + s.requestCount_ = requestCount_; + s.workerCount_ = totalWorkerCount_; + s.userCount_ = totalUserCount_; s.responseBytes_ = responseBytes_; - s.poolStatus_ = poolWorker_.getWorkerStatus(); + s.poolStatus_ = poolWorker_.getWorkerStatus(); return s; } template -void StatsServerT::httpdServerStatus(struct evhttp_request *req, void *arg) { - evhttp_add_header(evhttp_request_get_output_headers(req), - "Content-Type", "text/json"); +void StatsServerT::httpdServerStatus( + struct evhttp_request *req, void *arg) { + evhttp_add_header( + evhttp_request_get_output_headers(req), "Content-Type", "text/json"); StatsServerT *server = (StatsServerT *)arg; server->requestCount_++; @@ -1432,32 +1573,46 @@ void StatsServerT::httpdServerStatus(struct evhttp_request *req, void *ar // service is initializing, return a error if (server->isInitializing_) { - evbuffer_add_printf(evb, "{\"err_no\":2,\"err_msg\":\"service is initializing...\"}"); + evbuffer_add_printf( + evb, "{\"err_no\":2,\"err_msg\":\"service is initializing...\"}"); evhttp_send_reply(req, HTTP_OK, "OK", evb); evbuffer_free(evb); return; } - + StatsServerT::ServerStatus s = server->getServerStatus(); - evbuffer_add_printf(evb, "{\"err_no\":0,\"err_msg\":\"\"," - "\"data\":{\"uptime\":\"%04u d %02u h %02u m %02u s\"," - "\"request\":%" PRIu64",\"repbytes\":%" PRIu64"," - "\"pool\":{\"accept\":[%" PRIu64",%" PRIu64",%" PRIu64",%" PRIu64"]," - "\"reject\":[0,0,%" PRIu64",%" PRIu64"],\"accept_count\":%" PRIu32"," - "\"workers\":%" PRIu64",\"users\":%" PRIu64"" - "}}}", - s.uptime_/86400, (s.uptime_%86400)/3600, - (s.uptime_%3600)/60, s.uptime_%60, - s.requestCount_, s.responseBytes_, - // accept - s.poolStatus_.accept1m_, s.poolStatus_.accept5m_, - s.poolStatus_.accept15m_, s.poolStatus_.accept1h_, - // reject - s.poolStatus_.reject15m_, s.poolStatus_.reject1h_, - s.poolStatus_.acceptCount_, - s.workerCount_, s.userCount_); + evbuffer_add_printf( + evb, + "{\"err_no\":0,\"err_msg\":\"\"," + "\"data\":{\"uptime\":\"%04u d %02u h %02u m %02u s\"," + "\"request\":%" PRIu64 ",\"repbytes\":%" PRIu64 + "," + "\"pool\":{\"accept\":[%" PRIu64 ",%" PRIu64 ",%" PRIu64 ",%" PRIu64 + "]," + "\"reject\":[0,0,%" PRIu64 ",%" PRIu64 "],\"accept_count\":%" PRIu32 + "," + "\"workers\":%" PRIu64 ",\"users\":%" PRIu64 + "" + "}}}", + s.uptime_ / 86400, + (s.uptime_ % 86400) / 3600, + (s.uptime_ % 3600) / 60, + s.uptime_ % 60, + s.requestCount_, + s.responseBytes_, + // accept + s.poolStatus_.accept1m_, + s.poolStatus_.accept5m_, + s.poolStatus_.accept15m_, + s.poolStatus_.accept1h_, + // reject + s.poolStatus_.reject15m_, + s.poolStatus_.reject1h_, + s.poolStatus_.acceptCount_, + s.workerCount_, + s.userCount_); server->responseBytes_ += evbuffer_get_length(evb); evhttp_send_reply(req, HTTP_OK, "OK", evb); @@ -1465,14 +1620,15 @@ void StatsServerT::httpdServerStatus(struct evhttp_request *req, void *ar } template -void StatsServerT::httpdGetWorkerStatus(struct evhttp_request *req, void *arg) { - evhttp_add_header(evhttp_request_get_output_headers(req), - "Content-Type", "text/json"); +void StatsServerT::httpdGetWorkerStatus( + struct evhttp_request *req, void *arg) { + evhttp_add_header( + evhttp_request_get_output_headers(req), "Content-Type", "text/json"); StatsServerT *server = (StatsServerT *)arg; server->requestCount_++; evhttp_cmd_type rMethod = evhttp_request_get_command(req); - char *query = nullptr; // remember free it + char *query = nullptr; // remember free it if (rMethod == EVHTTP_REQ_GET) { // GET @@ -1482,15 +1638,14 @@ void StatsServerT::httpdGetWorkerStatus(struct evhttp_request *req, void query = strdup(uriQuery); evhttp_uri_free(uri); } - } - else if (rMethod == EVHTTP_REQ_POST) { + } else if (rMethod == EVHTTP_REQ_POST) { // POST struct evbuffer *evbIn = evhttp_request_get_input_buffer(req); size_t len = 0; if (evbIn != nullptr && (len = evbuffer_get_length(evbIn)) > 0) { query = (char *)malloc(len + 1); evbuffer_copyout(evbIn, query, len); - query[len] = '\0'; // evbuffer is not include '\0' + query[len] = '\0'; // evbuffer is not include '\0' } } @@ -1499,7 +1654,8 @@ void StatsServerT::httpdGetWorkerStatus(struct evhttp_request *req, void // service is initializing, return if (server->isInitializing_) { - evbuffer_add_printf(evb, "{\"err_no\":2,\"err_msg\":\"service is initializing...\"}"); + evbuffer_add_printf( + evb, "{\"err_no\":2,\"err_msg\":\"service is initializing...\"}"); evhttp_send_reply(req, HTTP_OK, "OK", evb); evbuffer_free(evb); @@ -1518,9 +1674,9 @@ void StatsServerT::httpdGetWorkerStatus(struct evhttp_request *req, void // parse query struct evkeyvalq params; evhttp_parse_query_str(query, ¶ms); - const char *pUserId = evhttp_find_header(¶ms, "user_id"); + const char *pUserId = evhttp_find_header(¶ms, "user_id"); const char *pWorkerId = evhttp_find_header(¶ms, "worker_id"); - const char *pIsMerge = evhttp_find_header(¶ms, "is_merge"); + const char *pIsMerge = evhttp_find_header(¶ms, "is_merge"); if (pUserId == nullptr || pWorkerId == nullptr) { evbuffer_add_printf(evb, "{\"err_no\":1,\"err_msg\":\"invalid args\"}"); @@ -1543,14 +1699,17 @@ finish: } template -void StatsServerT::getWorkerStatus(struct evbuffer *evb, const char *pUserId, - const char *pWorkerId, const char *pIsMerge) { +void StatsServerT::getWorkerStatus( + struct evbuffer *evb, + const char *pUserId, + const char *pWorkerId, + const char *pIsMerge) { assert(pWorkerId != nullptr); const int32_t userId = atoi(pUserId); bool isMerge = false; if (pIsMerge != nullptr && (*pIsMerge == 'T' || *pIsMerge == 't')) { - isMerge = true; + isMerge = true; } vector vWorkerIdsStr; @@ -1577,32 +1736,41 @@ void StatsServerT::getWorkerStatus(struct evbuffer *evb, const char *pUse for (const auto &status : workerStatus) { // extra infomations string extraInfo; - if (!isMerge && keys[i].workerId_ == 0) { // all workers of this user + if (!isMerge && keys[i].workerId_ == 0) { // all workers of this user pthread_rwlock_rdlock(&rwlock_); extraInfo = Strings::Format(",\"workers\":%d", userWorkerCount_[userId]); pthread_rwlock_unlock(&rwlock_); } - evbuffer_add_printf(evb, - "%s\"%" PRId64"\":{\"accept\":[%" PRIu64",%" PRIu64",%" PRIu64",%" PRIu64"]" - ",\"reject\":[0,0,%" PRIu64",%" PRIu64"],\"accept_count\":%" PRIu32"" - ",\"last_share_ip\":\"%s\",\"last_share_time\":%" PRIu64 - "%s}", - (i == 0 ? "" : ","), - (isMerge ? 0 : keys[i].workerId_), - status.accept1m_, status.accept5m_, status.accept15m_, status.accept1h_, - status.reject15m_, status.reject1h_, - status.acceptCount_, - status.lastShareIP_.toString().c_str(), status.lastShareTime_, - extraInfo.length() ? extraInfo.c_str() : ""); + evbuffer_add_printf( + evb, + "%s\"%" PRId64 "\":{\"accept\":[%" PRIu64 ",%" PRIu64 ",%" PRIu64 + ",%" PRIu64 + "]" + ",\"reject\":[0,0,%" PRIu64 ",%" PRIu64 "],\"accept_count\":%" PRIu32 + "" + ",\"last_share_ip\":\"%s\",\"last_share_time\":%" PRIu64 "%s}", + (i == 0 ? "" : ","), + (isMerge ? 0 : keys[i].workerId_), + status.accept1m_, + status.accept5m_, + status.accept15m_, + status.accept1h_, + status.reject15m_, + status.reject1h_, + status.acceptCount_, + status.lastShareIP_.toString().c_str(), + status.lastShareTime_, + extraInfo.length() ? extraInfo.c_str() : ""); i++; } } template -void StatsServerT::httpdGetFlushDBTime(struct evhttp_request *req, void *arg) { - evhttp_add_header(evhttp_request_get_output_headers(req), - "Content-Type", "text/json"); +void StatsServerT::httpdGetFlushDBTime( + struct evhttp_request *req, void *arg) { + evhttp_add_header( + evhttp_request_get_output_headers(req), "Content-Type", "text/json"); StatsServerT *server = (StatsServerT *)arg; server->requestCount_++; @@ -1610,14 +1778,19 @@ void StatsServerT::httpdGetFlushDBTime(struct evhttp_request *req, void * // service is initializing, return if (server->isInitializing_) { - evbuffer_add_printf(evb, "{\"err_no\":2,\"err_msg\":\"service is initializing...\"}"); + evbuffer_add_printf( + evb, "{\"err_no\":2,\"err_msg\":\"service is initializing...\"}"); evhttp_send_reply(req, HTTP_OK, "OK", evb); evbuffer_free(evb); return; } - - evbuffer_add_printf(evb, "{\"err_no\":0,\"err_msg\":\"\",\"data\":{\"flush_db_time\":%" PRId64 "}}", (int64_t)server->lastFlushTime_); + + evbuffer_add_printf( + evb, + "{\"err_no\":0,\"err_msg\":\"\",\"data\":{\"flush_db_time\":%" PRId64 + "}}", + (int64_t)server->lastFlushTime_); server->responseBytes_ += evbuffer_get_length(evb); evhttp_send_reply(req, HTTP_OK, "OK", evb); @@ -1632,17 +1805,26 @@ void StatsServerT::runHttpd() { base_ = event_base_new(); httpd = evhttp_new(base_); - evhttp_set_allowed_methods(httpd, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD); + evhttp_set_allowed_methods( + httpd, EVHTTP_REQ_GET | EVHTTP_REQ_POST | EVHTTP_REQ_HEAD); evhttp_set_timeout(httpd, 5 /* timeout in seconds */); - evhttp_set_cb(httpd, "/", StatsServerT::httpdServerStatus, this); - evhttp_set_cb(httpd, "/worker_status", StatsServerT::httpdGetWorkerStatus, this); - evhttp_set_cb(httpd, "/worker_status/", StatsServerT::httpdGetWorkerStatus, this); - evhttp_set_cb(httpd, "/flush_db_time", StatsServerT::httpdGetFlushDBTime, this); - - handle = evhttp_bind_socket_with_handle(httpd, httpdHost_.c_str(), httpdPort_); + evhttp_set_cb(httpd, "/", StatsServerT::httpdServerStatus, this); + evhttp_set_cb( + httpd, "/worker_status", StatsServerT::httpdGetWorkerStatus, this); + evhttp_set_cb( + httpd, + "/worker_status/", + StatsServerT::httpdGetWorkerStatus, + this); + evhttp_set_cb( + httpd, "/flush_db_time", StatsServerT::httpdGetFlushDBTime, this); + + handle = + evhttp_bind_socket_with_handle(httpd, httpdHost_.c_str(), httpdPort_); if (!handle) { - LOG(ERROR) << "couldn't bind to port: " << httpdPort_ << ", host: " << httpdHost_ << ", exiting."; + LOG(ERROR) << "couldn't bind to port: " << httpdPort_ + << ", host: " << httpdHost_ << ", exiting."; return; } event_base_dispatch(base_); diff --git a/src/Stratum.cc b/src/Stratum.cc index 09426d2d4..ade9c8687 100644 --- a/src/Stratum.cc +++ b/src/Stratum.cc @@ -37,11 +37,9 @@ string filterWorkerName(const string &workerName) { s.reserve(workerName.size()); for (const auto &c : workerName) { - if (('a' <= c && c <= 'z') || - ('A' <= c && c <= 'Z') || - ('0' <= c && c <= '9') || - c == '-' || c == '.' || c == '_' || c == ':' || - c == '|' || c == '^' || c == '/') { + if (('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9') || c == '-' || c == '.' || c == '_' || + c == ':' || c == '|' || c == '^' || c == '/') { s += c; } } @@ -49,61 +47,64 @@ string filterWorkerName(const string &workerName) { return s; } - //////////////////////////////// StratumStatus //////////////////////////////// -const char * StratumStatus::toString(int err) { +const char *StratumStatus::toString(int err) { switch (err) { - case ACCEPT: - return "Share accepted"; - case ACCEPT_STALE: - return "Share accepted (stale)"; - case SOLVED: - return "Share accepted and solved"; - case SOLVED_STALE: - return "Share accepted and solved (stale)"; - case REJECT_NO_REASON: - return "Share rejected"; - - case JOB_NOT_FOUND: - return "Job not found (=stale)"; - case DUPLICATE_SHARE: - return "Duplicate share"; - case LOW_DIFFICULTY: - return "Low difficulty"; - case UNAUTHORIZED: - return "Unauthorized worker"; - case NOT_SUBSCRIBED: - return "Not subscribed"; - - case ILLEGAL_METHOD: - return "Illegal method"; - case ILLEGAL_PARARMS: - return "Illegal params"; - case IP_BANNED: - return "Ip banned"; - case INVALID_USERNAME: - return "Invalid username"; - case INTERNAL_ERROR: - return "Internal error"; - case TIME_TOO_OLD: - return "Time too old"; - case TIME_TOO_NEW: - return "Time too new"; - case ILLEGAL_VERMASK: - return "Invalid version mask"; + case ACCEPT: + return "Share accepted"; + case ACCEPT_STALE: + return "Share accepted (stale)"; + case SOLVED: + return "Share accepted and solved"; + case SOLVED_STALE: + return "Share accepted and solved (stale)"; + case REJECT_NO_REASON: + return "Share rejected"; + + case JOB_NOT_FOUND: + return "Job not found (=stale)"; + case DUPLICATE_SHARE: + return "Duplicate share"; + case LOW_DIFFICULTY: + return "Low difficulty"; + case UNAUTHORIZED: + return "Unauthorized worker"; + case NOT_SUBSCRIBED: + return "Not subscribed"; + + case ILLEGAL_METHOD: + return "Illegal method"; + case ILLEGAL_PARARMS: + return "Illegal params"; + case IP_BANNED: + return "Ip banned"; + case INVALID_USERNAME: + return "Invalid username"; + case INTERNAL_ERROR: + return "Internal error"; + case TIME_TOO_OLD: + return "Time too old"; + case TIME_TOO_NEW: + return "Time too new"; + case ILLEGAL_VERMASK: + return "Invalid version mask"; #ifdef WORK_WITH_STRATUM_SWITCHER - case CLIENT_IS_NOT_SWITCHER: - return "Client is not a stratum switcher"; + case CLIENT_IS_NOT_SWITCHER: + return "Client is not a stratum switcher"; #endif - case UNKNOWN: default: - return "Unknown"; + case UNKNOWN: + default: + return "Unknown"; } } //////////////////////////////// StratumWorker //////////////////////////////// -StratumWorker::StratumWorker(): userId_(0), workerHashId_(0) {} +StratumWorker::StratumWorker() + : userId_(0) + , workerHashId_(0) { +} void StratumWorker::reset() { userId_ = 0; @@ -122,16 +123,17 @@ string StratumWorker::getUserName(const string &fullName) const { return fullName.substr(0, pos); } -void StratumWorker::setUserIDAndNames(const int32_t userId, const string &fullName) { +void StratumWorker::setUserIDAndNames( + const int32_t userId, const string &fullName) { reset(); userId_ = userId; auto pos = fullName.find("."); if (pos == fullName.npos) { - userName_ = fullName; + userName_ = fullName; } else { - userName_ = fullName.substr(0, pos); - workerName_ = fullName.substr(pos+1); + userName_ = fullName.substr(0, pos); + workerName_ = fullName.substr(pos + 1); } // the worker name will insert to DB, so must be filter @@ -158,11 +160,11 @@ int64_t StratumWorker::calcWorkerId(const string &workerName) { const uint256 workerNameHash = Hash(workerName.begin(), workerName.end()); // need to convert to uint64_t first than copy memory - const uint64_t tmpId = strtoull(workerNameHash.ToString().substr(0, 16).c_str(), - nullptr, 16); + const uint64_t tmpId = + strtoull(workerNameHash.ToString().substr(0, 16).c_str(), nullptr, 16); memcpy((uint8_t *)&workerHashId, (uint8_t *)&tmpId, 8); - if (workerHashId == 0) { // zero is kept + if (workerHashId == 0) { // zero is kept workerHashId++; } @@ -171,11 +173,8 @@ int64_t StratumWorker::calcWorkerId(const string &workerName) { ////////////////////////////////// StratumJob //////////////////////////////// StratumJob::StratumJob() - : jobId_(0) -{ + : jobId_(0) { } -StratumJob::~StratumJob() -{ - +StratumJob::~StratumJob() { } diff --git a/src/Stratum.h b/src/Stratum.h index 3c4aad5c8..41eb51275 100644 --- a/src/Stratum.h +++ b/src/Stratum.h @@ -31,26 +31,20 @@ // default worker name #define DEFAULT_WORKER_NAME "__default__" -inline uint32_t jobId2Time(uint64_t jobId) -{ +inline uint32_t jobId2Time(uint64_t jobId) { return (uint32_t)((jobId >> 32) & 0x00000000FFFFFFFFULL); } string filterWorkerName(const string &workerName); -inline string filterWorkerName(const char *workerName) -{ +inline string filterWorkerName(const char *workerName) { return filterWorkerName(std::string(workerName)); } - - //////////////////////////////// StratumError //////////////////////////////// -class StratumStatus -{ +class StratumStatus { public: - enum - { + enum { // make ACCEPT and SOLVED be two singular value, // so code bug is unlikely to make false ACCEPT shares @@ -58,14 +52,16 @@ class StratumStatus ACCEPT = 1798084231, // bin(01101011 00101100 10010110 10000111) // share reached the job target but the job is stale - // if uncle block is allowed in the chain, share can be accept as this status + // if uncle block is allowed in the chain, share can be accept as this + // status ACCEPT_STALE = 950395421, // bin(00111000 10100101 11100010 00011101) // share reached the network target SOLVED = 1422486894, // bin(‭01010100 11001001 01101101 01101110‬) // share reached the network target but the job is stale - // if uncle block is allowed in the chain, share can be accept as this status + // if uncle block is allowed in the chain, share can be accept as this + // status SOLVED_STALE = 1713984938, // bin(01100110 00101001 01010101 10101010) REJECT_NO_REASON = 0, @@ -93,10 +89,10 @@ class StratumStatus }; static const char *toString(int err); - + inline static bool isAccepted(int status) { return (status == ACCEPT) || (status == ACCEPT_STALE) || - (status == SOLVED) || (status == SOLVED_STALE); + (status == SOLVED) || (status == SOLVED_STALE); } inline static bool isStale(int status) { @@ -109,8 +105,7 @@ class StratumStatus }; //////////////////////////////// StratumWorker //////////////////////////////// -class StratumWorker -{ +class StratumWorker { public: int32_t userId_; int64_t workerHashId_; // substr(0, 8, HASH(wokerName)) @@ -154,12 +149,12 @@ class StratumWorker // so job_ids can be eventually rotated. // // -class StratumJob -{ +class StratumJob { public: // jobId: timestamp + gbtHash, hex string, we need to make sure jobId is // unique in a some time, jobId can convert to uint64_t uint64_t jobId_; + protected: StratumJob(); // protected so cannot create it. public: @@ -168,36 +163,43 @@ class StratumJob virtual string serializeToJson() const = 0; virtual bool unserializeFromJson(const char *s, size_t len) = 0; virtual uint32_t jobTime() const { return jobId2Time(jobId_); } - }; // shares submitted by this session, for duplicate share check // TODO: Move bitcoin-specific fields to the subclass struct LocalShare { - uint64_t exNonce2_; // extra nonce2 fixed 8 bytes - uint32_t nonce_; // nonce in block header - uint32_t time_; // nTime in block header - uint32_t versionMask_; // block version mask - - LocalShare(uint64_t exNonce2, uint32_t nonce, uint32_t time, uint32_t versionMask): - exNonce2_(exNonce2), nonce_(nonce), time_(time), versionMask_(versionMask) {} - - LocalShare(uint64_t exNonce2, uint32_t nonce, uint32_t time): - exNonce2_(exNonce2), nonce_(nonce), time_(time), versionMask_(0) {} - - LocalShare & operator=(const LocalShare &other) { + uint64_t exNonce2_; // extra nonce2 fixed 8 bytes + uint32_t nonce_; // nonce in block header + uint32_t time_; // nTime in block header + uint32_t versionMask_; // block version mask + + LocalShare( + uint64_t exNonce2, uint32_t nonce, uint32_t time, uint32_t versionMask) + : exNonce2_(exNonce2) + , nonce_(nonce) + , time_(time) + , versionMask_(versionMask) {} + + LocalShare(uint64_t exNonce2, uint32_t nonce, uint32_t time) + : exNonce2_(exNonce2) + , nonce_(nonce) + , time_(time) + , versionMask_(0) {} + + LocalShare &operator=(const LocalShare &other) { exNonce2_ = other.exNonce2_; - nonce_ = other.nonce_; - time_ = other.time_; + nonce_ = other.nonce_; + time_ = other.time_; versionMask_ = other.versionMask_; return *this; } bool operator<(const LocalShare &r) const { - if (exNonce2_ < r.exNonce2_ || - (exNonce2_ == r.exNonce2_ && nonce_ < r.nonce_) || - (exNonce2_ == r.exNonce2_ && nonce_ == r.nonce_ && time_ < r.time_) || - (exNonce2_ == r.exNonce2_ && nonce_ == r.nonce_ && time_ == r.time_ && versionMask_ < r.versionMask_)) { + if (exNonce2_ < r.exNonce2_ || + (exNonce2_ == r.exNonce2_ && nonce_ < r.nonce_) || + (exNonce2_ == r.exNonce2_ && nonce_ == r.nonce_ && time_ < r.time_) || + (exNonce2_ == r.exNonce2_ && nonce_ == r.nonce_ && time_ == r.time_ && + versionMask_ < r.versionMask_)) { return true; } return false; @@ -209,9 +211,7 @@ struct LocalJob { std::set submitShares_; LocalJob(uint64_t jobId) - : jobId_(jobId) - { - } + : jobId_(jobId) {} bool addLocalShare(const LocalShare &localShare) { return submitShares_.insert(localShare).second; diff --git a/src/StratumClient.cc b/src/StratumClient.cc index 69a48ec4c..64635f1ab 100644 --- a/src/StratumClient.cc +++ b/src/StratumClient.cc @@ -32,25 +32,30 @@ #include static map gStratumClientFactories; -bool StratumClient::registerFactory(const string &chainType, Factory factory) -{ +bool StratumClient::registerFactory(const string &chainType, Factory factory) { return gStratumClientFactories.emplace(chainType, move(factory)).second; } ///////////////////////////////// StratumClient //////////////////////////////// -StratumClient::StratumClient(struct event_base* base, - const string &workerFullName, - const string &workerPasswd) -: workerFullName_(workerFullName), workerPasswd_(workerPasswd), isMining_(false) -{ +StratumClient::StratumClient( + struct event_base *base, + const string &workerFullName, + const string &workerPasswd) + : workerFullName_(workerFullName) + , workerPasswd_(workerPasswd) + , isMining_(false) { inBuf_ = evbuffer_new(); - bev_ = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE); + bev_ = bufferevent_socket_new( + base, -1, BEV_OPT_CLOSE_ON_FREE | BEV_OPT_THREADSAFE); assert(bev_ != nullptr); - bufferevent_setcb(bev_, - StratumClientWrapper::readCallback, nullptr, - StratumClientWrapper::eventCallback, this); - bufferevent_enable(bev_, EV_READ|EV_WRITE); + bufferevent_setcb( + bev_, + StratumClientWrapper::readCallback, + nullptr, + StratumClientWrapper::eventCallback, + this); + bufferevent_enable(bev_, EV_READ | EV_WRITE); state_ = INIT; latestDiff_ = 1; @@ -74,7 +79,8 @@ StratumClient::~StratumClient() { bool StratumClient::connect(struct sockaddr_in &sin) { // bufferevent_socket_connect(): This function returns 0 if the connect // was successfully launched, and -1 if an error occurred. - int res = bufferevent_socket_connect(bev_, (struct sockaddr *)&sin, sizeof(sin)); + int res = + bufferevent_socket_connect(bev_, (struct sockaddr *)&sin, sizeof(sin)); if (res == 0) { return true; } @@ -93,17 +99,17 @@ void StratumClient::readBuf(struct evbuffer *buf) { bool StratumClient::tryReadLine(string &line) { line.clear(); - + // find eol struct evbuffer_ptr loc; loc = evbuffer_search_eol(inBuf_, nullptr, nullptr, EVBUFFER_EOL_LF); if (loc.pos < 0) { - return false; // not found + return false; // not found } // copies and removes the first datlen bytes from the front of buf // into the memory at data - line.resize(loc.pos + 1); // containing "\n" + line.resize(loc.pos + 1); // containing "\n" evbuffer_remove(inBuf_, (void *)line.data(), line.size()); return true; } @@ -116,24 +122,21 @@ void StratumClient::handleLine(const string &line) { LOG(ERROR) << "decode line fail, not a json string"; return; } - JsonNode jresult = jnode["result"]; - JsonNode jerror = jnode["error"]; - JsonNode jmethod = jnode["method"]; + JsonNode jresult = jnode["result"]; + JsonNode jerror = jnode["error"]; + JsonNode jmethod = jnode["method"]; if (jmethod.type() == Utilities::JS::type::Str) { - JsonNode jparams = jnode["params"]; + JsonNode jparams = jnode["params"]; auto jparamsArr = jparams.array(); if (jmethod.str() == "mining.notify") { latestJobId_ = jparamsArr[0].str(); DLOG(INFO) << "latestJobId_: " << latestJobId_; - } - else if (jmethod.str() == "mining.set_difficulty") { + } else if (jmethod.str() == "mining.set_difficulty") { latestDiff_ = jparamsArr[0].uint64(); DLOG(INFO) << "latestDiff_: " << latestDiff_; - } - else - { + } else { LOG(ERROR) << "unknown method: " << line; } return; @@ -143,10 +146,11 @@ void StratumClient::handleLine(const string &line) { // // {"error": null, "id": 2, "result": true} // - if (jerror.type() != Utilities::JS::type::Null || + if (jerror.type() != Utilities::JS::type::Null || jresult.type() != Utilities::JS::type::Bool || jresult.boolean() != true) { -// LOG(ERROR) << "json result is null, err: " << jerror.str() << ", line: " << line; + // LOG(ERROR) << "json result is null, err: " << jerror.str() << ", + // line: " << line; } return; } @@ -166,15 +170,18 @@ void StratumClient::handleLine(const string &line) { return; } - extraNonce1_ = resArr[1].uint32_hex(); + extraNonce1_ = resArr[1].uint32_hex(); extraNonce2Size_ = resArr[2].int32(); - DLOG(INFO) << "extraNonce1_: " << extraNonce1_ << ", extraNonce2Size_: " << extraNonce2Size_; + DLOG(INFO) << "extraNonce1_: " << extraNonce1_ + << ", extraNonce2Size_: " << extraNonce2Size_; // mining.authorize state_ = SUBSCRIBED; - string s = Strings::Format("{\"id\": 1, \"method\": \"mining.authorize\"," - "\"params\": [\"\%s\", \"%s\"]}\n", - workerFullName_.c_str(), workerPasswd_.c_str()); + string s = Strings::Format( + "{\"id\": 1, \"method\": \"mining.authorize\"," + "\"params\": [\"\%s\", \"%s\"]}\n", + workerFullName_.c_str(), + workerPasswd_.c_str()); sendData(s); return; } @@ -185,26 +192,25 @@ void StratumClient::handleLine(const string &line) { } } -string StratumClient::constructShare() -{ +string StratumClient::constructShare() { extraNonce2_++; string extraNonce2Str; // little-endian Bin2Hex((uint8_t *)&extraNonce2_, extraNonce2Size_, extraNonce2Str); // simulate miner - string s = Strings::Format("{\"params\": [\"%s\",\"%s\",\"%s\",\"%08x\",\"%08x\"]" - ",\"id\":4,\"method\": \"mining.submit\"}\n", - workerFullName_.c_str(), - latestJobId_.c_str(), - extraNonce2Str.c_str(), - (uint32_t)time(nullptr) /* ntime */, - (uint32_t)time(nullptr) /* nonce */); + string s = Strings::Format( + "{\"params\": [\"%s\",\"%s\",\"%s\",\"%08x\",\"%08x\"]" + ",\"id\":4,\"method\": \"mining.submit\"}\n", + workerFullName_.c_str(), + latestJobId_.c_str(), + extraNonce2Str.c_str(), + (uint32_t)time(nullptr) /* ntime */, + (uint32_t)time(nullptr) /* nonce */); return s; } -void StratumClient::submitShare() -{ +void StratumClient::submitShare() { if (state_ != AUTHENTICATED) return; @@ -217,18 +223,22 @@ void StratumClient::sendData(const char *data, size_t len) { DLOG(INFO) << "send(" << len << "): " << data; } - ////////////////////////////// StratumClientWrapper //////////////////////////// -StratumClientWrapper::StratumClientWrapper(const char *host, - const uint32_t port, - const uint32_t numConnections, - const string &userName, - const string &minerNamePrefix, - const string &passwd, - const string &type) - : running_(true), base_(event_base_new()), numConnections_(numConnections), - userName_(userName), minerNamePrefix_(minerNamePrefix), passwd_(passwd), type_(type) -{ +StratumClientWrapper::StratumClientWrapper( + const char *host, + const uint32_t port, + const uint32_t numConnections, + const string &userName, + const string &minerNamePrefix, + const string &passwd, + const string &type) + : running_(true) + , base_(event_base_new()) + , numConnections_(numConnections) + , userName_(userName) + , minerNamePrefix_(minerNamePrefix) + , passwd_(passwd) + , type_(type) { memset(&sin_, 0, sizeof(sin_)); sin_.sin_family = AF_INET; inet_pton(AF_INET, host, &(sin_.sin_addr)); @@ -261,33 +271,38 @@ void StratumClientWrapper::stop() { LOG(INFO) << "StratumClientWrapper::stop..."; } -void StratumClientWrapper::eventCallback(struct bufferevent *bev, - short events, void *ptr) { +void StratumClientWrapper::eventCallback( + struct bufferevent *bev, short events, void *ptr) { StratumClient *client = static_cast(ptr); if (events & BEV_EVENT_CONNECTED) { client->state_ = StratumClient::State::CONNECTED; // subscribe - client->sendData("{\"id\":1,\"method\":\"mining.subscribe\",\"params\":[\"__simulator__/0.1\"]}\n"); - } - else if (events & BEV_EVENT_ERROR) { + client->sendData( + "{\"id\":1,\"method\":\"mining.subscribe\",\"params\":[\"__simulator__/" + "0.1\"]}\n"); + } else if (events & BEV_EVENT_ERROR) { /* An error occured while connecting. */ // TODO - LOG(ERROR) << "event error: " << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()); + LOG(ERROR) << "event error: " + << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()); } } -void StratumClientWrapper::readCallback(struct bufferevent* bev, void *connection) { +void StratumClientWrapper::readCallback( + struct bufferevent *bev, void *connection) { StratumClient *client = static_cast(connection); client->readBuf(bufferevent_get_input(bev)); } -void StratumClientWrapper::timerCallback(evutil_socket_t fd, short event, void *ptr) { +void StratumClientWrapper::timerCallback( + evutil_socket_t fd, short event, void *ptr) { auto wrapper = static_cast(ptr); wrapper->submitShares(); } -void StratumClientWrapper::signalCallback(evutil_socket_t fd, short event, void *ptr) { +void StratumClientWrapper::signalCallback( + evutil_socket_t fd, short event, void *ptr) { auto wrapper = static_cast(ptr); wrapper->stop(); } @@ -297,10 +312,8 @@ void StratumClientWrapper::run() { // create clients // for (size_t i = 0; i < numConnections_; i++) { - const string workerFullName = Strings::Format("%s.%s-%05d", - userName_.c_str(), - minerNamePrefix_.c_str(), - i); + const string workerFullName = Strings::Format( + "%s.%s-%05d", userName_.c_str(), minerNamePrefix_.c_str(), i); auto client = createClient(base_, workerFullName, passwd_); if (!client->connect(sin_)) { @@ -311,17 +324,31 @@ void StratumClientWrapper::run() { } // create timer - timer_ = event_new(base_, -1, EV_PERSIST, StratumClientWrapper::timerCallback, this); + timer_ = event_new( + base_, -1, EV_PERSIST, StratumClientWrapper::timerCallback, this); // Submit a share every 15 seconds (in probability) for each connection. - // After the timer is triggered, a connection will be randomly selected to submit a share. + // After the timer is triggered, a connection will be randomly selected to + // submit a share. int sleepTime = 15000000 / connections_.size(); - struct timeval interval{sleepTime / 1000000, sleepTime % 1000000}; + struct timeval interval { + sleepTime / 1000000, sleepTime % 1000000 + }; event_add(timer_, &interval); // create signals - sigterm_ = event_new(base_, SIGTERM, EV_SIGNAL | EV_PERSIST, StratumClientWrapper::signalCallback, this); + sigterm_ = event_new( + base_, + SIGTERM, + EV_SIGNAL | EV_PERSIST, + StratumClientWrapper::signalCallback, + this); event_add(sigterm_, nullptr); - sigint_ = event_new(base_, SIGINT, EV_SIGNAL | EV_PERSIST, StratumClientWrapper::signalCallback, this); + sigint_ = event_new( + base_, + SIGINT, + EV_SIGNAL | EV_PERSIST, + StratumClientWrapper::signalCallback, + this); event_add(sigint_, nullptr); // event loop @@ -334,14 +361,16 @@ void StratumClientWrapper::submitShares() { // randomly select a connection to submit a share. static std::random_device rd; static std::mt19937 gen(rd()); - static std::uniform_int_distribution dis(0, connections_.size()-1); + static std::uniform_int_distribution dis(0, connections_.size() - 1); size_t i = dis(gen); connections_[i]->submitShare(); } -unique_ptr StratumClientWrapper::createClient(struct event_base *base, const string &workerFullName, const string &workerPasswd) -{ +unique_ptr StratumClientWrapper::createClient( + struct event_base *base, + const string &workerFullName, + const string &workerPasswd) { auto iter = gStratumClientFactories.find(type_); if (iter != gStratumClientFactories.end() && iter->second) { return iter->second(base, workerFullName, workerPasswd); @@ -381,12 +410,12 @@ bool TCPClientWrapper::connect(const char *host, const int port) { void TCPClientWrapper::send(const char *data, const size_t len) { ::send(sockfd_, data, len, 0); -// DLOG(INFO) << "send: " << data; + // DLOG(INFO) << "send: " << data; } void TCPClientWrapper::recv() { string buf; - buf.resize(4096); // we assume 4096 is big enough + buf.resize(4096); // we assume 4096 is big enough ssize_t bytes = ::recv(sockfd_, (void *)buf.data(), buf.size(), 0); if (bytes == -1) { @@ -401,26 +430,25 @@ void TCPClientWrapper::recv() { // put data to evbuffer evbuffer_add(inBuf_, buf.data(), buf.size()); -// DLOG(INFO) << "recv: " << buf; + // DLOG(INFO) << "recv: " << buf; } void TCPClientWrapper::getLine(string &line) { line.clear(); if (evbuffer_get_length(inBuf_) == 0) - recv(); + recv(); // find eol struct evbuffer_ptr loc; loc = evbuffer_search_eol(inBuf_, nullptr, nullptr, EVBUFFER_EOL_LF); if (loc.pos < 0) { - return; // not found + return; // not found } // copies and removes the first datlen bytes from the front of buf // into the memory at data - line.resize(loc.pos + 1); // containing "\n" + line.resize(loc.pos + 1); // containing "\n" evbuffer_remove(inBuf_, (void *)line.data(), line.size()); LOG(INFO) << "line: " << line; } - diff --git a/src/StratumClient.h b/src/StratumClient.h index 3a6c9ebae..033b968e0 100644 --- a/src/StratumClient.h +++ b/src/StratumClient.h @@ -45,17 +45,17 @@ ///////////////////////////////// StratumClient //////////////////////////////// class StratumClient { - protected: +protected: struct bufferevent *bev_; struct evbuffer *inBuf_; - uint32_t extraNonce1_; // session ID - int32_t extraNonce2Size_; + uint32_t extraNonce1_; // session ID + int32_t extraNonce2Size_; uint64_t extraNonce2_; string workerFullName_; string workerPasswd_; bool isMining_; - string latestJobId_; + string latestJobId_; uint64_t latestDiff_; bool tryReadLine(string &line); @@ -63,34 +63,37 @@ class StratumClient { public: // mining state - enum State { - INIT = 0, - CONNECTED = 1, - SUBSCRIBED = 2, - AUTHENTICATED = 3 - }; + enum State { INIT = 0, CONNECTED = 1, SUBSCRIBED = 2, AUTHENTICATED = 3 }; atomic state_; - using Factory = function (struct event_base *, const string &, const string &)>; + using Factory = function( + struct event_base *, const string &, const string &)>; static bool registerFactory(const string &chainType, Factory factory); - template + template static bool registerFactory(const string &chainType) { - static_assert(std::is_base_of::value, "Factory is not constructing the correct type"); - return registerFactory(chainType, [](struct event_base *base, const string &workerFullName, const string &workerPasswd) { - return boost::make_unique(base, workerFullName, workerPasswd); - }); + static_assert( + std::is_base_of::value, + "Factory is not constructing the correct type"); + return registerFactory( + chainType, + [](struct event_base *base, + const string &workerFullName, + const string &workerPasswd) { + return boost::make_unique(base, workerFullName, workerPasswd); + }); } public: - StratumClient(struct event_base *base, const string &workerFullName, const string &workerPasswd); + StratumClient( + struct event_base *base, + const string &workerFullName, + const string &workerPasswd); virtual ~StratumClient(); bool connect(struct sockaddr_in &sin); void sendData(const char *data, size_t len); - inline void sendData(const string &str) { - sendData(str.data(), str.size()); - } + inline void sendData(const string &str) { sendData(str.data(), str.size()); } void readBuf(struct evbuffer *buf); void submitShare(); @@ -106,7 +109,7 @@ class StratumClientWrapper { struct event *sigterm_; struct event *sigint_; uint32_t numConnections_; - string userName_; // miner usename + string userName_; // miner usename string minerNamePrefix_; string passwd_; // miner password, used to set difficulty string type_; @@ -115,14 +118,17 @@ class StratumClientWrapper { void submitShares(); public: - StratumClientWrapper(const char *host, const uint32_t port, - const uint32_t numConnections, - const string &userName, const string &minerNamePrefix, - const string &passwd, - const string &type); + StratumClientWrapper( + const char *host, + const uint32_t port, + const uint32_t numConnections, + const string &userName, + const string &minerNamePrefix, + const string &passwd, + const string &type); ~StratumClientWrapper(); - static void readCallback (struct bufferevent* bev, void *connection); + static void readCallback(struct bufferevent *bev, void *connection); static void eventCallback(struct bufferevent *bev, short events, void *ptr); static void timerCallback(evutil_socket_t fd, short event, void *ptr); static void signalCallback(evutil_socket_t fd, short event, void *ptr); @@ -130,11 +136,12 @@ class StratumClientWrapper { void stop(); void run(); - unique_ptr createClient(struct event_base *base, const string &workerFullName, const string &workerPasswd); + unique_ptr createClient( + struct event_base *base, + const string &workerFullName, + const string &workerPasswd); }; - - //////////////////////////////// TCPClientWrapper ////////////////////////////// // simple tcp wrapper, use for test class TCPClientWrapper { @@ -149,9 +156,7 @@ class TCPClientWrapper { bool connect(const char *host, const int port); void send(const char *data, const size_t len); - inline void send(const string &s) { - send(s.data(), s.size()); - } + inline void send(const string &s) { send(s.data(), s.size()); } void getLine(string &line); }; diff --git a/src/StratumMessageDispatcher.cc b/src/StratumMessageDispatcher.cc index 654e4a764..cfdb56a1a 100644 --- a/src/StratumMessageDispatcher.cc +++ b/src/StratumMessageDispatcher.cc @@ -34,14 +34,59 @@ using namespace std; -StratumMessageMinerDispatcher::StratumMessageMinerDispatcher(IStratumSession &session, unique_ptr miner) - : session_(session), miner_(move(miner)) { +#define NULL_DISPATCHER_LOG \ + LOG(ERROR) << "Null message dispatcher shall not be called" + +void StratumMessageNullDispatcher::handleRequest( + const string &idStr, + const string &method, + const JsonNode &jparams, + const JsonNode &jroot) { + NULL_DISPATCHER_LOG; } -void StratumMessageMinerDispatcher::handleRequest(const string &idStr, - const string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMessageNullDispatcher::handleExMessage(const string &exMessage) { + NULL_DISPATCHER_LOG; +} + +void StratumMessageNullDispatcher::responseShareAccepted(const string &idStr) { + NULL_DISPATCHER_LOG; +} + +void StratumMessageNullDispatcher::responseShareError( + const string &idStr, int32_t status) { + NULL_DISPATCHER_LOG; +} + +void StratumMessageNullDispatcher::setMinDiff(uint64_t minDiff) { + NULL_DISPATCHER_LOG; +} + +void StratumMessageNullDispatcher::resetCurDiff(uint64_t curDiff) { + NULL_DISPATCHER_LOG; +} + +void StratumMessageNullDispatcher::addLocalJob(LocalJob &localJob) { + NULL_DISPATCHER_LOG; +} + +void StratumMessageNullDispatcher::removeLocalJob(LocalJob &localJob) { + NULL_DISPATCHER_LOG; +} + +#undef NULL_DISPATCHER_LOG + +StratumMessageMinerDispatcher::StratumMessageMinerDispatcher( + IStratumSession &session, unique_ptr miner) + : session_(session) + , miner_(move(miner)) { +} + +void StratumMessageMinerDispatcher::handleRequest( + const string &idStr, + const string &method, + const JsonNode &jparams, + const JsonNode &jroot) { miner_->handleRequest(idStr, method, jparams, jroot); } @@ -53,7 +98,8 @@ void StratumMessageMinerDispatcher::responseShareAccepted(const string &idStr) { session_.responseTrue(idStr); } -void StratumMessageMinerDispatcher::responseShareError(const string &idStr, int32_t status) { +void StratumMessageMinerDispatcher::responseShareError( + const string &idStr, int32_t status) { session_.responseError(idStr, status); } @@ -92,9 +138,11 @@ struct StratumMessageExMiningSetDiff { boost::endian::little_uint16_buf_t count; }; -StratumMessageAgentDispatcher::StratumMessageAgentDispatcher(IStratumSession &session, - const DiffController &diffController) - : session_(session), diffController_(new DiffController(diffController)), curDiff_(0) { +StratumMessageAgentDispatcher::StratumMessageAgentDispatcher( + IStratumSession &session, const DiffController &diffController) + : session_(session) + , diffController_(new DiffController(diffController)) + , curDiff_(0) { } StratumMessageAgentDispatcher::~StratumMessageAgentDispatcher() { @@ -103,10 +151,11 @@ StratumMessageAgentDispatcher::~StratumMessageAgentDispatcher() { } } -void StratumMessageAgentDispatcher::handleRequest(const string &idStr, - const string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMessageAgentDispatcher::handleRequest( + const string &idStr, + const string &method, + const JsonNode &jparams, + const JsonNode &jroot) { LOG(ERROR) << "Miner message shall not reach here"; } @@ -164,7 +213,8 @@ void StratumMessageAgentDispatcher::addLocalJob(LocalJob &localJob) { if (!newDiffs.empty()) { // // CMD_MINING_SET_DIFF: - // | magic_number(1) | cmd(1) | len (2) | diff_2_exp(1) | count(2) | session_id (2) ... | + // | magic_number(1) | cmd(1) | len (2) | diff_2_exp(1) | count(2) | + // session_id (2) ... | // // // max session id count is 32,764, each message's max length is UINT16_MAX. @@ -183,15 +233,19 @@ void StratumMessageAgentDispatcher::removeLocalJob(LocalJob &localJob) { } } -void StratumMessageAgentDispatcher::handleExMessage_RegisterWorker(const string &exMessage) { +void StratumMessageAgentDispatcher::handleExMessage_RegisterWorker( + const string &exMessage) { // // REGISTER_WORKER: - // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | worker_name | + // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | + // worker_name | // - if (exMessage.size() < 8 || exMessage.size() > 100 /* 100 bytes is big enough */) + if (exMessage.size() < 8 || + exMessage.size() > 100 /* 100 bytes is big enough */) return; - auto header = reinterpret_cast(exMessage.data()); + auto header = reinterpret_cast( + exMessage.data()); auto sessionId = header->sessionId.value(); if (sessionId > StratumMessageEx::AGENT_MAX_SESSION_ID) return; @@ -218,18 +272,22 @@ void StratumMessageAgentDispatcher::handleExMessage_RegisterWorker(const string registerWorker(sessionId, clientAgent, workerName, workerId); } -void StratumMessageAgentDispatcher::handleExMessage_UnregisterWorker(const string &exMessage) { +void StratumMessageAgentDispatcher::handleExMessage_UnregisterWorker( + const string &exMessage) { // // UNREGISTER_WORKER: // | magic_number(1) | cmd(1) | len (2) | session_id(2) | // - if (exMessage.size() != 6) return; - auto header = reinterpret_cast(exMessage.data()); + if (exMessage.size() != 6) + return; + auto header = reinterpret_cast( + exMessage.data()); auto sessionId = header->sessionId.value(); unregisterWorker(sessionId); } -void StratumMessageAgentDispatcher::handleExMessage_SessionSpecific(const string &exMessage) { +void StratumMessageAgentDispatcher::handleExMessage_SessionSpecific( + const string &exMessage) { // // Session specific messages // | magic_number(1) | cmd(1) | len (2) | ... | session_id(2) | ... @@ -241,11 +299,16 @@ void StratumMessageAgentDispatcher::handleExMessage_SessionSpecific(const string } } -void StratumMessageAgentDispatcher::registerWorker(uint32_t sessionId,const std::string &clientAgent, const std::string &workerName, int64_t workerId) { +void StratumMessageAgentDispatcher::registerWorker( + uint32_t sessionId, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { DLOG(INFO) << "[agent] clientAgent: " << clientAgent - << ", workerName: " << workerName << ", workerId: " - << workerId << ", session id:" << sessionId; - miners_.emplace(sessionId, session_.createMiner(clientAgent, workerName, workerId)); + << ", workerName: " << workerName << ", workerId: " << workerId + << ", session id:" << sessionId; + miners_.emplace( + sessionId, session_.createMiner(clientAgent, workerName, workerId)); session_.addWorker(clientAgent, workerName, workerId); } @@ -253,10 +316,13 @@ void StratumMessageAgentDispatcher::unregisterWorker(uint32_t sessionId) { miners_.erase(sessionId); } -void StratumMessageAgentDispatcher::getSetDiffCommand(std::map> &diffSessionIds, std::string &exMessage) { +void StratumMessageAgentDispatcher::getSetDiffCommand( + std::map> &diffSessionIds, + std::string &exMessage) { // // CMD_MINING_SET_DIFF: - // | magic_number(1) | cmd(1) | len (2) | diff_2_exp(1) | count(2) | session_id (2) ... | + // | magic_number(1) | cmd(1) | len (2) | diff_2_exp(1) | count(2) | + // session_id (2) ... | // // // max session id count is 32,764, each message's max length is UINT16_MAX. @@ -271,7 +337,8 @@ void StratumMessageAgentDispatcher::getSetDiffCommand(std::map kMaxCount) count = kMaxCount; + if (count > kMaxCount) + count = kMaxCount; string buf; uint16_t len = 1 + 1 + 2 + 1 + 2 + count * 2; @@ -291,7 +358,8 @@ void StratumMessageAgentDispatcher::getSetDiffCommand(std::mapcount = count; - auto p = reinterpret_cast(start + 1 + 1 + 2 + 1 + 2); + auto p = reinterpret_cast( + start + 1 + 1 + 2 + 1 + 2); // session ids for (size_t j = 0; j < count; j++) { diff --git a/src/StratumMessageDispatcher.h b/src/StratumMessageDispatcher.h index 782fc7e13..91b695b91 100644 --- a/src/StratumMessageDispatcher.h +++ b/src/StratumMessageDispatcher.h @@ -43,7 +43,11 @@ class StratumMessageDispatcher { public: virtual ~StratumMessageDispatcher() = default; - virtual void handleRequest(const std::string &idStr, const std::string &method, const JsonNode &jparams, const JsonNode &jroot) = 0; + virtual void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) = 0; virtual void handleExMessage(const std::string &exMessage) = 0; virtual void responseShareAccepted(const std::string &idStr) = 0; virtual void responseShareError(const std::string &idStr, int32_t status) = 0; @@ -53,11 +57,32 @@ class StratumMessageDispatcher { virtual void removeLocalJob(LocalJob &localJob) = 0; }; -class StratumMessageMinerDispatcher : public StratumMessageDispatcher { +class StratumMessageNullDispatcher : public StratumMessageDispatcher { public: - StratumMessageMinerDispatcher(IStratumSession &session, std::unique_ptr miner); + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; + void handleExMessage(const std::string &exMessage) override; + void responseShareAccepted(const std::string &idStr) override; + void responseShareError(const std::string &idStr, int32_t status) override; + void setMinDiff(uint64_t minDiff) override; + void resetCurDiff(uint64_t curDiff) override; + void addLocalJob(LocalJob &localJob) override; + void removeLocalJob(LocalJob &localJob) override; +}; - void handleRequest(const std::string &idStr, const std::string &method, const JsonNode &jparams, const JsonNode &jroot) override; +class StratumMessageMinerDispatcher : public StratumMessageDispatcher { +public: + StratumMessageMinerDispatcher( + IStratumSession &session, std::unique_ptr miner); + + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; void handleExMessage(const std::string &exMessage) override; void responseShareAccepted(const std::string &idStr) override; void responseShareError(const std::string &idStr, int32_t status) override; @@ -73,10 +98,15 @@ class StratumMessageMinerDispatcher : public StratumMessageDispatcher { class StratumMessageAgentDispatcher : public StratumMessageDispatcher { public: - explicit StratumMessageAgentDispatcher(IStratumSession &session, const DiffController &diffController); + explicit StratumMessageAgentDispatcher( + IStratumSession &session, const DiffController &diffController); ~StratumMessageAgentDispatcher(); - void handleRequest(const std::string &idStr, const std::string &method, const JsonNode &jparams, const JsonNode &jroot) override; + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; void handleExMessage(const std::string &exMessage) override; void responseShareAccepted(const std::string &idStr) override {} void responseShareError(const std::string &idStr, int32_t status) override {} @@ -92,9 +122,15 @@ class StratumMessageAgentDispatcher : public StratumMessageDispatcher { public: // These are public for unittests... - void registerWorker(uint32_t sessionId, const std::string &clientAgent, const std::string &workerName, int64_t workerId); + void registerWorker( + uint32_t sessionId, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId); void unregisterWorker(uint32_t sessionId); - static void getSetDiffCommand(std::map> &diffSessionIds, std::string &exMessage); + static void getSetDiffCommand( + std::map> &diffSessionIds, + std::string &exMessage); protected: IStratumSession &session_; diff --git a/src/StratumMiner.cc b/src/StratumMiner.cc index ab57c0ce3..dfe8d55c3 100644 --- a/src/StratumMiner.cc +++ b/src/StratumMiner.cc @@ -33,14 +33,20 @@ #include -StratumMiner::StratumMiner(IStratumSession &session, - const DiffController &diffController, - const string &clientAgent, - const string &workerName, - int64_t workerId) - : session_(session), diffController_(new DiffController(diffController)), curDiff_(0), clientAgent_(clientAgent) - , isNiceHashClient_(isNiceHashAgent(clientAgent)), workerName_(workerName), workerId_(workerId) - , invalidSharesCounter_(INVALID_SHARE_SLIDING_WINDOWS_SIZE) { +StratumMiner::StratumMiner( + IStratumSession &session, + const DiffController &diffController, + const string &clientAgent, + const string &workerName, + int64_t workerId) + : session_(session) + , diffController_(new DiffController(diffController)) + , curDiff_(0) + , clientAgent_(clientAgent) + , isNiceHashClient_(isNiceHashAgent(clientAgent)) + , workerName_(workerName) + , workerId_(workerId) + , invalidSharesCounter_(INVALID_SHARE_SLIDING_WINDOWS_SIZE) { } void StratumMiner::setMinDiff(uint64_t minDiff) { @@ -56,7 +62,8 @@ uint64_t StratumMiner::calcCurDiff() { return curDiff_; } -bool StratumMiner::handleShare(const std::string &idStr, int32_t status, uint64_t shareDiff) { +bool StratumMiner::handleShare( + const std::string &idStr, int32_t status, uint64_t shareDiff) { auto &dispatcher = session_.getDispatcher(); if (StratumStatus::isAccepted(status)) { diffController_->addAcceptedShare(shareDiff); diff --git a/src/StratumMiner.h b/src/StratumMiner.h index cfcb61a65..48c1a75af 100644 --- a/src/StratumMiner.h +++ b/src/StratumMiner.h @@ -38,30 +38,36 @@ class IStratumSession; //////////////////////////////// StratumMiner //////////////////////////////// class StratumMiner { protected: - static const int INVALID_SHARE_SLIDING_WINDOWS_SIZE = 60; // unit: seconds - static const int64_t INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT = 20; // max number - StratumMiner(IStratumSession &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId); + static const int INVALID_SHARE_SLIDING_WINDOWS_SIZE = 60; // unit: seconds + static const int64_t INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT = + 20; // max number + StratumMiner( + IStratumSession &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId); + public: static const size_t kExtraNonce2Size_ = 8; virtual ~StratumMiner() = default; - virtual void handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) = 0; - virtual void handleExMessage(const std::string &exMessage) {}; // No agent support by default + virtual void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) = 0; + virtual void handleExMessage( + const std::string &exMessage){}; // No agent support by default void setMinDiff(uint64_t minDiff); void resetCurDiff(uint64_t curDiff); uint64_t getCurDiff() const { return curDiff_; }; uint64_t calcCurDiff(); virtual uint64_t addLocalJob(LocalJob &localJob) = 0; - virtual void removeLocalJob( LocalJob &localJob) = 0; + virtual void removeLocalJob(LocalJob &localJob) = 0; protected: - bool handleShare(const std::string &idStr, int32_t status, uint64_t shareDiff); + bool + handleShare(const std::string &idStr, int32_t status, uint64_t shareDiff); IStratumSession &session_; std::unique_ptr diffController_; @@ -74,17 +80,19 @@ class StratumMiner { StatsWindow invalidSharesCounter_; }; -template +template class StratumMinerBase : public StratumMiner { using SessionType = typename StratumTraits::SessionType; using JobDiffType = typename StratumTraits::JobDiffType; + protected: - StratumMinerBase(SessionType &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) - : StratumMiner(session, diffController, clientAgent, workerName, workerId) { + StratumMinerBase( + SessionType &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) + : StratumMiner(session, diffController, clientAgent, workerName, workerId) { for (auto &localJob : session.getLocalJobs()) { addLocalJob(localJob); } diff --git a/src/StratumServer.cc b/src/StratumServer.cc index 530dcbeba..499420d99 100644 --- a/src/StratumServer.cc +++ b/src/StratumServer.cc @@ -32,11 +32,14 @@ using namespace std; #ifndef WORK_WITH_STRATUM_SWITCHER -//////////////////////////////// SessionIDManagerT ////////////////////////////// +//////////////////////////////// SessionIDManagerT +///////////////////////////////// template -SessionIDManagerT::SessionIDManagerT(const uint8_t serverId) : -serverId_(serverId), count_(0), allocIdx_(0), allocInterval_(0) -{ +SessionIDManagerT::SessionIDManagerT(const uint8_t serverId) + : serverId_(serverId) + , count_(0) + , allocIdx_(0) + , allocInterval_(0) { static_assert(IBITS <= 24, "IBITS cannot large than 24"); sessionIds_.reset(); } @@ -97,16 +100,19 @@ template class SessionIDManagerT<24>; #endif // #ifndef WORK_WITH_STRATUM_SWITCHER - ////////////////////////////////// JobRepository /////////////////////////////// -JobRepository::JobRepository(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, Server *server) +JobRepository::JobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + Server *server) : running_(true) - , kafkaConsumer_(kafkaBrokers, consumerTopic, 0/*patition*/) - , server_(server), fileLastNotifyTime_(fileLastNotifyTime) + , kafkaConsumer_(kafkaBrokers, consumerTopic, 0 /*patition*/) + , server_(server) + , fileLastNotifyTime_(fileLastNotifyTime) , kMaxJobsLifeTime_(300) - , kMiningNotifyInterval_(30) // TODO: make as config arg - , lastJobSendTime_(0) -{ + , kMiningNotifyInterval_(30) // TODO: make as config arg + , lastJobSendTime_(0) { assert(kMiningNotifyInterval_ < kMaxJobsLifeTime_); } @@ -115,13 +121,12 @@ JobRepository::~JobRepository() { threadConsume_.join(); } -void JobRepository::setMaxJobDelay (const time_t maxJobDelay) { +void JobRepository::setMaxJobDelay(const time_t maxJobDelay) { LOG(INFO) << "set max job delay to " << maxJobDelay << "s"; kMaxJobsLifeTime_ = maxJobDelay; } shared_ptr JobRepository::getStratumJobEx(const uint64_t jobId) { - ScopeLock sl(lock_); auto itr = exJobs_.find(jobId); if (itr != exJobs_.end()) { return itr->second; @@ -130,7 +135,6 @@ shared_ptr JobRepository::getStratumJobEx(const uint64_t jobId) { } shared_ptr JobRepository::getLatestStratumJobEx() { - ScopeLock sl(lock_); if (exJobs_.size()) { return exJobs_.rbegin()->second; } @@ -152,8 +156,8 @@ bool JobRepository::setupThreadConsume() { // we need to consume the latest one map consumerOptions; consumerOptions["fetch.wait.max.ms"] = "10"; - if (kafkaConsumer_.setup(RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), - &consumerOptions) == false) { + if (kafkaConsumer_.setup( + RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), &consumerOptions) == false) { LOG(INFO) << "setup consumer fail"; return false; } @@ -180,46 +184,52 @@ void JobRepository::runThreadConsume() { if (rkmessage != nullptr) { // consume stratum job // - // It will create a StratumJob and try to broadcast it immediately with broadcastStratumJob(shared_ptr). - // A derived class needs to implement the abstract method broadcastStratumJob(shared_ptr) to decide - // whether to add the StratumJob to the map exJobs_ and whether to send the job to miners immediately. - // Derived classes do not need to implement a scheduled sending mechanism, checkAndSendMiningNotify() will - // provide a default implementation. + // It will create a StratumJob and try to broadcast it immediately with + // broadcastStratumJob(shared_ptr). A derived class needs to + // implement the abstract method + // broadcastStratumJob(shared_ptr) to decide whether to add + // the StratumJob to the map exJobs_ and whether to send the job to miners + // immediately. Derived classes do not need to implement a scheduled + // sending mechanism, checkAndSendMiningNotify() will provide a default + // implementation. consumeStratumJob(rkmessage); - + // Return message to rdkafka rd_kafka_message_destroy(rkmessage); } - // check if we need to send mining notify - // It's a default implementation of scheduled sending / regular updating of stratum jobs. - // If no job is sent for a long time via broadcastStratumJob(), a job will be sent via this method. - checkAndSendMiningNotify(); + server_->dispatch([this]() { + // check if we need to send mining notify + // It's a default implementation of scheduled sending / regular updating + // of stratum jobs. If no job is sent for a long time via + // broadcastStratumJob(), a job will be sent via this method. + checkAndSendMiningNotify(); - tryCleanExpiredJobs(); + tryCleanExpiredJobs(); + }); } LOG(INFO) << "stop job repository consume thread"; } - - void JobRepository::consumeStratumJob(rd_kafka_message_t *rkmessage) { // check error if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -229,8 +239,8 @@ void JobRepository::consumeStratumJob(rd_kafka_message_t *rkmessage) { } shared_ptr sjob = createStratumJob(); - bool res = sjob->unserializeFromJson((const char *)rkmessage->payload, - rkmessage->len); + bool res = sjob->unserializeFromJson( + (const char *)rkmessage->payload, rkmessage->len); if (res == false) { LOG(ERROR) << "unserialize stratum job fail"; return; @@ -238,27 +248,32 @@ void JobRepository::consumeStratumJob(rd_kafka_message_t *rkmessage) { // make sure the job is not expired. time_t now = time(nullptr); if (sjob->jobTime() + kMaxJobsLifeTime_ < now) { - LOG(ERROR) << "too large delay from kafka to receive topic 'StratumJob' job time=" << sjob->jobTime() << ", max delay=" << kMaxJobsLifeTime_ << ", now=" << now; - return; - } - // here you could use Map.find() without lock, it's sure - // that everyone is using this Map readonly now - auto existingJob = getStratumJobEx(sjob->jobId_); - if(existingJob != nullptr) - { - LOG(ERROR) << "jobId already existed"; + LOG(ERROR) + << "too large delay from kafka to receive topic 'StratumJob' job time=" + << sjob->jobTime() << ", max delay=" << kMaxJobsLifeTime_ + << ", now=" << now; return; } - broadcastStratumJob(sjob); + server_->dispatch([this, sjob]() { + // here you could use Map.find() without lock, it's sure + // that everyone is using this Map readonly now + auto existingJob = getStratumJobEx(sjob->jobId_); + if (existingJob != nullptr) { + LOG(ERROR) << "jobId already existed"; + return; + } + + broadcastStratumJob(sjob); + }); } -shared_ptr JobRepository::createStratumJobEx(shared_ptr sjob, bool isClean){ +shared_ptr +JobRepository::createStratumJobEx(shared_ptr sjob, bool isClean) { return std::make_shared(sjob, isClean); } void JobRepository::markAllJobsAsStale() { - ScopeLock sl(lock_); for (auto it : exJobs_) { it.second->markStale(); } @@ -267,8 +282,7 @@ void JobRepository::markAllJobsAsStale() { void JobRepository::checkAndSendMiningNotify() { // last job is 'expried', send a new one if (exJobs_.size() && - lastJobSendTime_ + kMiningNotifyInterval_ <= time(nullptr)) - { + lastJobSendTime_ + kMiningNotifyInterval_ <= time(nullptr)) { shared_ptr exJob = exJobs_.rbegin()->second; sendMiningNotify(exJob); } @@ -292,8 +306,6 @@ void JobRepository::sendMiningNotify(shared_ptr exJob) { } void JobRepository::tryCleanExpiredJobs() { - ScopeLock sl(lock_); - const uint32_t nowTs = (uint32_t)time(nullptr); // Keep at least one job to keep normal mining when the jobmaker fails while (exJobs_.size() > 1) { @@ -303,26 +315,23 @@ void JobRepository::tryCleanExpiredJobs() { const time_t jobTime = (time_t)(itr->first >> 32); if (nowTs < jobTime + kMaxJobsLifeTime_) { - break; // not expired + break; // not expired } + LOG(INFO) << "remove expired stratum job, id: " << itr->first + << ", time: " << date("%F %T", jobTime); + // remove expired job exJobs_.erase(itr); - - LOG(INFO) << "remove expired stratum job, id: " << itr->first - << ", time: " << date("%F %T", jobTime); } } - - - - //////////////////////////////////// UserInfo ///////////////////////////////// -UserInfo::UserInfo(const string &apiUrl, Server *server): -running_(true), apiUrl_(apiUrl), lastMaxUserId_(0), -server_(server) -{ +UserInfo::UserInfo(const string &apiUrl, Server *server) + : running_(true) + , apiUrl_(apiUrl) + , lastMaxUserId_(0) + , server_(server) { pthread_rwlock_init(&rwlock_, nullptr); } @@ -353,7 +362,7 @@ int32_t UserInfo::getUserId(const string userName) { if (itr != nameIds_.end()) { return itr->second; } - return 0; // not found + return 0; // not found } #ifdef USER_DEFINED_COINBASE @@ -368,7 +377,7 @@ string UserInfo::getCoinbaseInfo(int32_t userId) { if (itr != idCoinbaseInfos_.end()) { return itr->second; } - return ""; // not found + return ""; // not found } int32_t UserInfo::incrementalUpdateUsers() { @@ -376,9 +385,13 @@ int32_t UserInfo::incrementalUpdateUsers() { // WARNING: The API is incremental update, we use `?last_id=` to make sure // always get the new data. Make sure you have use `last_id` in API. // - const string url = Strings::Format("%s?last_id=%d&last_time=%" PRId64, apiUrl_.c_str(), lastMaxUserId_, lastTime_); + const string url = Strings::Format( + "%s?last_id=%d&last_time=%" PRId64, + apiUrl_.c_str(), + lastMaxUserId_, + lastTime_); string resp; - if (!httpGET(url.c_str(), resp, 10000/* timeout ms */)) { + if (!httpGET(url.c_str(), resp, 10000 /* timeout ms */)) { LOG(ERROR) << "http get request user list fail, url: " << url; return -1; } @@ -389,7 +402,8 @@ int32_t UserInfo::incrementalUpdateUsers() { return -1; } if (r["data"].type() == Utilities::JS::type::Undefined) { - LOG(ERROR) << "invalid data, should key->value, type: " << (int)r["data"].type(); + LOG(ERROR) << "invalid data, should key->value, type: " + << (int)r["data"].type(); return -1; } JsonNode data = r["data"]; @@ -403,7 +417,7 @@ int32_t UserInfo::incrementalUpdateUsers() { pthread_rwlock_wrlock(&rwlock_); for (JsonNode &itr : *vUser) { - const string userName(itr.key_start(), itr.key_end() - itr.key_start()); + const string userName(itr.key_start(), itr.key_end() - itr.key_start()); if (itr.type() != Utilities::JS::type::Obj) { LOG(ERROR) << "invalid data, should key - value" << std::endl; @@ -418,7 +432,8 @@ int32_t UserInfo::incrementalUpdateUsers() { coinbaseInfo.resize(USER_DEFINED_COINBASE_SIZE); } else { // padding '\x20' at both beginning and ending of coinbaseInfo - int beginPaddingLen = (USER_DEFINED_COINBASE_SIZE - coinbaseInfo.size()) / 2; + int beginPaddingLen = + (USER_DEFINED_COINBASE_SIZE - coinbaseInfo.size()) / 2; coinbaseInfo.insert(0, beginPaddingLen, '\x20'); coinbaseInfo.resize(USER_DEFINED_COINBASE_SIZE, '\x20'); } @@ -431,7 +446,6 @@ int32_t UserInfo::incrementalUpdateUsers() { // get user's coinbase info LOG(INFO) << "user id: " << userId << ", coinbase info: " << coinbaseInfo; idCoinbaseInfos_[userId] = coinbaseInfo; - } pthread_rwlock_unlock(&rwlock_); @@ -447,9 +461,10 @@ int32_t UserInfo::incrementalUpdateUsers() { // WARNING: The API is incremental update, we use `?last_id=` to make sure // always get the new data. Make sure you have use `last_id` in API. // - const string url = Strings::Format("%s?last_id=%d", apiUrl_.c_str(), lastMaxUserId_); + const string url = + Strings::Format("%s?last_id=%d", apiUrl_.c_str(), lastMaxUserId_); string resp; - if (!httpGET(url.c_str(), resp, 10000/* timeout ms */)) { + if (!httpGET(url.c_str(), resp, 10000 /* timeout ms */)) { LOG(ERROR) << "http get request user list fail, url: " << url; return -1; } @@ -460,7 +475,8 @@ int32_t UserInfo::incrementalUpdateUsers() { return -1; } if (r["data"].type() == Utilities::JS::type::Undefined) { - LOG(ERROR) << "invalid data, should key->value, type: " << (int)r["data"].type(); + LOG(ERROR) << "invalid data, should key->value, type: " + << (int)r["data"].type(); return -1; } auto vUser = r["data"].children(); @@ -470,8 +486,8 @@ int32_t UserInfo::incrementalUpdateUsers() { pthread_rwlock_wrlock(&rwlock_); for (const auto &itr : *vUser) { - const string userName(itr.key_start(), itr.key_end() - itr.key_start()); - const int32_t userId = itr.int32(); + const string userName(itr.key_start(), itr.key_end() - itr.key_start()); + const int32_t userId = itr.int32(); if (userId > lastMaxUserId_) { lastMaxUserId_ = userId; } @@ -486,12 +502,12 @@ int32_t UserInfo::incrementalUpdateUsers() { #endif void UserInfo::runThreadUpdate() { - const time_t updateInterval = 10; // seconds + const time_t updateInterval = 10; // seconds time_t lastUpdateTime = time(nullptr); while (running_) { if (lastUpdateTime + updateInterval > time(nullptr)) { - usleep(500000); // 500ms + usleep(500000); // 500ms continue; } @@ -529,23 +545,30 @@ bool UserInfo::setupThreads() { return true; } -void UserInfo::addWorker(const int32_t userId, const int64_t workerId, - const string &workerName, const string &minerAgent) { +void UserInfo::addWorker( + const int32_t userId, + const int64_t workerId, + const string &workerName, + const string &minerAgent) { ScopeLock sl(workerNameLock_); // insert to Q workerNameQ_.push_back(WorkerName()); - workerNameQ_.rbegin()->userId_ = userId; + workerNameQ_.rbegin()->userId_ = userId; workerNameQ_.rbegin()->workerId_ = workerId; // worker name - snprintf(workerNameQ_.rbegin()->workerName_, - sizeof(workerNameQ_.rbegin()->workerName_), - "%s", workerName.c_str()); + snprintf( + workerNameQ_.rbegin()->workerName_, + sizeof(workerNameQ_.rbegin()->workerName_), + "%s", + workerName.c_str()); // miner agent - snprintf(workerNameQ_.rbegin()->minerAgent_, - sizeof(workerNameQ_.rbegin()->minerAgent_), - "%s", minerAgent.c_str()); + snprintf( + workerNameQ_.rbegin()->minerAgent_, + sizeof(workerNameQ_.rbegin()->minerAgent_), + "%s", + minerAgent.c_str()); } void UserInfo::runThreadInsertWorkerName() { @@ -569,27 +592,27 @@ int32_t UserInfo::insertWorkerName() { if (itr == workerNameQ_.end()) return 0; - // sent events to kafka: worker_update { string eventJson; - eventJson = Strings::Format("{\"created_at\":\"%s\"," - "\"type\":\"worker_update\"," - "\"content\":{" - "\"user_id\":%d," - "\"worker_id\":%" PRId64 "," - "\"worker_name\":\"%s\"," - "\"miner_agent\":\"%s\"" - "}}", - date("%F %T").c_str(), - itr->userId_, - itr->workerId_, - itr->workerName_, - itr->minerAgent_); + eventJson = Strings::Format( + "{\"created_at\":\"%s\"," + "\"type\":\"worker_update\"," + "\"content\":{" + "\"user_id\":%d," + "\"worker_id\":%" PRId64 + "," + "\"worker_name\":\"%s\"," + "\"miner_agent\":\"%s\"" + "}}", + date("%F %T").c_str(), + itr->userId_, + itr->workerId_, + itr->workerName_, + itr->minerAgent_); server_->sendCommonEvents2Kafka(eventJson); } - { ScopeLock sl(workerNameLock_); workerNameQ_.pop_front(); @@ -597,14 +620,11 @@ int32_t UserInfo::insertWorkerName() { return 1; } - - ////////////////////////////////// StratumJobEx //////////////////////////////// StratumJobEx::StratumJobEx(shared_ptr sjob, bool isClean) : state_(0) , isClean_(isClean) - , sjob_(sjob) -{ + , sjob_(sjob) { assert(sjob); } @@ -622,44 +642,56 @@ bool StratumJobEx::isStale() { } ////////////////////////////////// StratumServer /////////////////////////////// -StratumServer::StratumServer(const char *ip, const unsigned short port, - const char *kafkaBrokers, const string &userAPIUrl, - const uint8_t serverId, const string &fileLastNotifyTime, - bool isEnableSimulator, bool isSubmitInvalidBlock, - bool isDevModeEnable, float devFixedDifficulty, - const string &consumerTopic, - uint32_t maxJobDelay, - shared_ptr defaultDifficultyController, - const string& solvedShareTopic, - const string& shareTopic, - const string& commonEventsTopic) - : running_(true), - ip_(ip), port_(port), serverId_(serverId), - fileLastNotifyTime_(fileLastNotifyTime), - kafkaBrokers_(kafkaBrokers), userAPIUrl_(userAPIUrl), - isEnableSimulator_(isEnableSimulator), isSubmitInvalidBlock_(isSubmitInvalidBlock), - isDevModeEnable_(isDevModeEnable), devFixedDifficulty_(devFixedDifficulty), - consumerTopic_(consumerTopic), - maxJobDelay_(maxJobDelay), - defaultDifficultyController_(defaultDifficultyController), - solvedShareTopic_(solvedShareTopic), - shareTopic_(shareTopic), - commonEventsTopic_(commonEventsTopic) -{ +StratumServer::StratumServer( + const char *ip, + const unsigned short port, + const char *kafkaBrokers, + const string &userAPIUrl, + const uint8_t serverId, + const string &fileLastNotifyTime, + bool isEnableSimulator, + bool isSubmitInvalidBlock, + bool isDevModeEnable, + float devFixedDifficulty, + const string &consumerTopic, + uint32_t maxJobDelay, + shared_ptr defaultDifficultyController, + const string &solvedShareTopic, + const string &shareTopic, + const string &commonEventsTopic) + : running_(true) + , ip_(ip) + , port_(port) + , serverId_(serverId) + , fileLastNotifyTime_(fileLastNotifyTime) + , kafkaBrokers_(kafkaBrokers) + , userAPIUrl_(userAPIUrl) + , isEnableSimulator_(isEnableSimulator) + , isSubmitInvalidBlock_(isSubmitInvalidBlock) + , isDevModeEnable_(isDevModeEnable) + , devFixedDifficulty_(devFixedDifficulty) + , consumerTopic_(consumerTopic) + , maxJobDelay_(maxJobDelay) + , defaultDifficultyController_(defaultDifficultyController) + , solvedShareTopic_(solvedShareTopic) + , shareTopic_(shareTopic) + , commonEventsTopic_(commonEventsTopic) { } StratumServer::~StratumServer() { } -bool StratumServer::createServer(const string &type, const int32_t shareAvgSeconds, const libconfig::Config &config) { - server_ = std::shared_ptr(createStratumServer(type, shareAvgSeconds, config)); +bool StratumServer::createServer( + const string &type, + const int32_t shareAvgSeconds, + const libconfig::Config &config) { + server_ = std::shared_ptr( + createStratumServer(type, shareAvgSeconds, config)); return server_ != nullptr; } -bool StratumServer::init() -{ - if (!server_->setup(this)) - { +bool StratumServer::init() { + if (!server_->setup(this)) { LOG(ERROR) << "fail to setup server"; return false; } @@ -681,7 +713,9 @@ void StratumServer::run() { ///////////////////////////////////// Server /////////////////////////////////// Server::Server(const int32_t shareAvgSeconds) - : base_(nullptr), signal_event_(nullptr), listener_(nullptr) + : base_(nullptr) + , signal_event_(nullptr) + , listener_(nullptr) , kafkaProducerShareLog_(nullptr) , kafkaProducerSolvedShare_(nullptr) , kafkaProducerCommonEvents_(nullptr) @@ -695,8 +729,7 @@ Server::Server(const int32_t shareAvgSeconds) , kShareAvgSeconds_(shareAvgSeconds) , jobRepository_(nullptr) , userInfo_(nullptr) - , serverId_(0) -{ + , serverId_(0) { } Server::~Server() { @@ -732,46 +765,56 @@ Server::~Server() { #endif } - -bool Server::setup(StratumServer* sserver) { +bool Server::setup(StratumServer *sserver) { #ifdef WORK_WITH_STRATUM_SWITCHER - LOG(INFO) << "WORK_WITH_STRATUM_SWITCHER enabled, miners can only connect to the sserver via a stratum switcher."; + LOG(INFO) << "WORK_WITH_STRATUM_SWITCHER enabled, miners can only connect to " + "the sserver via a stratum switcher."; #endif if (sserver->isEnableSimulator_) { isEnableSimulator_ = true; - LOG(WARNING) << "Simulator is enabled, all share will be accepted. " - << "This option should not be enabled in a production environment!"; + LOG(WARNING) + << "Simulator is enabled, all share will be accepted. " + << "This option should not be enabled in a production environment!"; } if (sserver->isSubmitInvalidBlock_) { isSubmitInvalidBlock_ = true; - LOG(WARNING) << "Submit invalid block is enabled, all shares will become solved shares. " - << "This option should not be enabled in a production environment!"; + LOG(WARNING) + << "Submit invalid block is enabled, all shares will become solved " + "shares. " + << "This option should not be enabled in a production environment!"; } if (sserver->isDevModeEnable_) { isDevModeEnable_ = true; devFixedDifficulty_ = sserver->devFixedDifficulty_; - LOG(WARNING) << "Development mode is enabled with fixed difficulty: " << devFixedDifficulty_ - << ". This option should not be enabled in a production environment!"; + LOG(WARNING) + << "Development mode is enabled with fixed difficulty: " + << devFixedDifficulty_ + << ". This option should not be enabled in a production environment!"; } defaultDifficultyController_ = sserver->defaultDifficultyController_; - kafkaProducerSolvedShare_ = new KafkaProducer(sserver->kafkaBrokers_.c_str(), - sserver->solvedShareTopic_.c_str(), - RD_KAFKA_PARTITION_UA); - kafkaProducerShareLog_ = new KafkaProducer(sserver->kafkaBrokers_.c_str(), - sserver->shareTopic_.c_str(), - RD_KAFKA_PARTITION_UA); - kafkaProducerCommonEvents_ = new KafkaProducer(sserver->kafkaBrokers_.c_str(), - sserver->commonEventsTopic_.c_str(), - RD_KAFKA_PARTITION_UA); + kafkaProducerSolvedShare_ = new KafkaProducer( + sserver->kafkaBrokers_.c_str(), + sserver->solvedShareTopic_.c_str(), + RD_KAFKA_PARTITION_UA); + kafkaProducerShareLog_ = new KafkaProducer( + sserver->kafkaBrokers_.c_str(), + sserver->shareTopic_.c_str(), + RD_KAFKA_PARTITION_UA); + kafkaProducerCommonEvents_ = new KafkaProducer( + sserver->kafkaBrokers_.c_str(), + sserver->commonEventsTopic_.c_str(), + RD_KAFKA_PARTITION_UA); // job repository - jobRepository_ = createJobRepository(sserver->kafkaBrokers_.c_str(), sserver->consumerTopic_.c_str(), \ - sserver->fileLastNotifyTime_); + jobRepository_ = createJobRepository( + sserver->kafkaBrokers_.c_str(), + sserver->consumerTopic_.c_str(), + sserver->fileLastNotifyTime_); jobRepository_->setMaxJobDelay(sserver->maxJobDelay_); if (!jobRepository_->setupThreadConsume()) { return false; @@ -827,8 +870,8 @@ bool Server::setup(StratumServer* sserver) { { map options; options["queue.buffering.max.messages"] = "500000"; - options["queue.buffering.max.ms"] = "1000"; // send every second - options["batch.num.messages"] = "10000"; + options["queue.buffering.max.ms"] = "1000"; // send every second + options["batch.num.messages"] = "10000"; if (!kafkaProducerCommonEvents_->setup(&options)) { LOG(ERROR) << "kafka kafkaProducerCommonEvents_ setup failure"; @@ -841,27 +884,30 @@ bool Server::setup(StratumServer* sserver) { } base_ = event_base_new(); - if(!base_) { + if (!base_) { LOG(ERROR) << "server: cannot create base"; return false; } memset(&sin_, 0, sizeof(sin_)); sin_.sin_family = AF_INET; - sin_.sin_port = htons(sserver->port_); + sin_.sin_port = htons(sserver->port_); sin_.sin_addr.s_addr = htonl(INADDR_ANY); - const char* ip = sserver->ip_.c_str(); + const char *ip = sserver->ip_.c_str(); if (ip && inet_pton(AF_INET, ip, &sin_.sin_addr) == 0) { LOG(ERROR) << "invalid ip: " << ip; return false; } - listener_ = evconnlistener_new_bind(base_, - Server::listenerCallback, - (void*)this, - LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_FREE, - -1, (struct sockaddr*)&sin_, sizeof(sin_)); - if(!listener_) { + listener_ = evconnlistener_new_bind( + base_, + Server::listenerCallback, + (void *)this, + LEV_OPT_REUSEABLE | LEV_OPT_CLOSE_ON_FREE, + -1, + (struct sockaddr *)&sin_, + sizeof(sin_)); + if (!listener_) { LOG(ERROR) << "cannot create listener: " << ip << ":" << sserver->port_; return false; } @@ -869,7 +915,7 @@ bool Server::setup(StratumServer* sserver) { } void Server::run() { - if(base_ != NULL) { + if (base_ != NULL) { // event_base_loop(base_, EVLOOP_NONBLOCK); event_base_dispatch(base_); } @@ -883,6 +929,37 @@ void Server::stop() { userInfo_->stop(); } +namespace { + +class StratumServerTask { +public: + StratumServerTask(event_base *base, std::function task) + : task_{move(task)} + , event_{event_new(base, -1, 0, &StratumServerTask::execute, this)} { + event_add(event_, nullptr); + event_active(event_, EV_TIMEOUT, 0); + } + + static void execute(evutil_socket_t, short, void *context) { + delete static_cast(context); + } + +private: + ~StratumServerTask() { + task_(); + event_free(event_); + } + + std::function task_; + struct event *event_; +}; + +} // namespace + +void Server::dispatch(std::function task) { + new StratumServerTask{base_, move(task)}; +} + void Server::sendMiningNotifyToAll(shared_ptr exJobPtr) { // // http://www.sgi.com/tech/stl/Map.html @@ -893,8 +970,6 @@ void Server::sendMiningNotifyToAll(shared_ptr exJobPtr) { // of course, for iterators that actually point to the element that is // being erased. // - - ScopeLock sl(connsLock_); auto itr = connections_.begin(); while (itr != connections_.end()) { auto &conn = *itr; @@ -911,7 +986,6 @@ void Server::sendMiningNotifyToAll(shared_ptr exJobPtr) { } void Server::addConnection(unique_ptr connection) { - ScopeLock sl(connsLock_); connections_.insert(move(connection)); } @@ -923,13 +997,14 @@ void Server::removeConnection(StratumSession &connection) { connection.markAsDead(); } -void Server::listenerCallback(struct evconnlistener* listener, - evutil_socket_t fd, - struct sockaddr *saddr, - int socklen, void* data) -{ +void Server::listenerCallback( + struct evconnlistener *listener, + evutil_socket_t fd, + struct sockaddr *saddr, + int socklen, + void *data) { Server *server = static_cast(data); - struct event_base *base = (struct event_base*)server->base_; + struct event_base *base = (struct event_base *)server->base_; struct bufferevent *bev; uint32_t sessionID = 0u; @@ -941,8 +1016,9 @@ void Server::listenerCallback(struct evconnlistener* listener, } #endif - bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE); - if(bev == nullptr) { + bev = bufferevent_socket_new( + base, fd, BEV_OPT_CLOSE_ON_FREE | BEV_OPT_THREADSAFE); + if (bev == nullptr) { LOG(ERROR) << "error constructing bufferevent!"; server->stop(); return; @@ -950,27 +1026,25 @@ void Server::listenerCallback(struct evconnlistener* listener, // create stratum session auto conn = server->createConnection(bev, saddr, sessionID); - if (!conn->initialize()) - { + if (!conn->initialize()) { return; } // set callback functions - bufferevent_setcb(bev, - Server::readCallback, nullptr, - Server::eventCallback, conn.get()); + bufferevent_setcb( + bev, Server::readCallback, nullptr, Server::eventCallback, conn.get()); // By default, a newly created bufferevent has writing enabled. - bufferevent_enable(bev, EV_READ|EV_WRITE); + bufferevent_enable(bev, EV_READ | EV_WRITE); server->addConnection(move(conn)); } -void Server::readCallback(struct bufferevent* bev, void *connection) { +void Server::readCallback(struct bufferevent *bev, void *connection) { auto conn = static_cast(connection); conn->readBuf(bufferevent_get_input(bev)); } -void Server::eventCallback(struct bufferevent* bev, short events, - void *connection) { +void Server::eventCallback( + struct bufferevent *bev, short events, void *connection) { auto conn = static_cast(connection); // should not be 'BEV_EVENT_CONNECTED' @@ -978,27 +1052,21 @@ void Server::eventCallback(struct bufferevent* bev, short events, if (events & BEV_EVENT_EOF) { LOG(INFO) << "socket closed"; - } - else if (events & BEV_EVENT_ERROR) { + } else if (events & BEV_EVENT_ERROR) { LOG(INFO) << "got an error on the socket: " - << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()); - } - else if (events & BEV_EVENT_TIMEOUT) { + << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()); + } else if (events & BEV_EVENT_TIMEOUT) { LOG(INFO) << "socket read/write timeout, events: " << events; - } - else { + } else { LOG(ERROR) << "unhandled socket events: " << events; } conn->getServer().removeConnection(*conn); } - - void Server::sendShare2Kafka(const uint8_t *data, size_t len) { kafkaProducerShareLog_->produce(data, len); } - void Server::sendCommonEvents2Kafka(const string &message) { kafkaProducerCommonEvents_->produce(message.data(), message.size()); } diff --git a/src/StratumServer.h b/src/StratumServer.h index 38b5eca0b..6a095a44c 100644 --- a/src/StratumServer.h +++ b/src/StratumServer.h @@ -51,21 +51,17 @@ class DiffController; //////////////////////////////// SessionIDManager ////////////////////////////// -enum StratumServerType -{ - BTC = 1, - ETH -}; +enum StratumServerType { BTC = 1, ETH }; class SessionIDManager { public: virtual ~SessionIDManager() {} virtual bool ifFull() = 0; - // The default value is 0: no interval, the session id will be allocated continuously. - // If the value is N, then id2 = id1 + N. - // Skipped ids are not assigned to other sessions unless the allocator reaches - // the maximum and rolls back to the beginning. - // This setting can be used to reserve more mining space for workers and there is no DoS risk. + // The default value is 0: no interval, the session id will be allocated + // continuously. If the value is N, then id2 = id1 + N. Skipped ids are not + // assigned to other sessions unless the allocator reaches the maximum and + // rolls back to the beginning. This setting can be used to reserve more + // mining space for workers and there is no DoS risk. virtual void setAllocInterval(uint32_t interval) = 0; virtual bool allocSessionId(uint32_t *sessionID) = 0; virtual void freeSessionId(uint32_t sessionId) = 0; @@ -84,12 +80,13 @@ class SessionIDManagerT : public SessionIDManager { // [000...] [1, 255] range: [0, kSessionIdMask] // - const static uint32_t kSessionIdMask = (1 << IBITS) - 1; // example: 0x00FFFFFF; + const static uint32_t kSessionIdMask = + (1 << IBITS) - 1; // example: 0x00FFFFFF; uint8_t serverId_; std::bitset sessionIds_; - uint32_t count_; // how many ids are used now + uint32_t count_; // how many ids are used now uint32_t allocIdx_; uint32_t allocInterval_; mutex lock_; @@ -107,17 +104,14 @@ class SessionIDManagerT : public SessionIDManager { #endif // #ifndef WORK_WITH_STRATUM_SWITCHER - ////////////////////////////////// JobRepository /////////////////////////////// -class JobRepository -{ +class JobRepository { protected: atomic running_; - mutex lock_; std::map> exJobs_; KafkaConsumer kafkaConsumer_; // consume topic: 'StratumJob' - Server *server_; // call server to send new job + Server *server_; // call server to send new job string fileLastNotifyTime_; @@ -135,46 +129,52 @@ class JobRepository void checkAndSendMiningNotify(); protected: - JobRepository(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, Server *server); + JobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + Server *server); + public: virtual ~JobRepository(); void stop(); bool setupThreadConsume(); void markAllJobsAsStale(); - - void setMaxJobDelay (const time_t maxJobDelay); + + void setMaxJobDelay(const time_t maxJobDelay); void sendMiningNotify(shared_ptr exJob); shared_ptr getStratumJobEx(const uint64_t jobId); shared_ptr getLatestStratumJobEx(); virtual shared_ptr createStratumJob() = 0; - virtual shared_ptr createStratumJobEx(shared_ptr sjob, bool isClean); + virtual shared_ptr + createStratumJobEx(shared_ptr sjob, bool isClean); virtual void broadcastStratumJob(shared_ptr sjob) = 0; }; -// This base class is to help type safety of accessing server_ member variable. Avoid manual casting. -// And by templating a minimum class declaration, we avoid bloating the code too much. -template -class JobRepositoryBase : public JobRepository -{ +// This base class is to help type safety of accessing server_ member variable. +// Avoid manual casting. And by templating a minimum class declaration, we +// avoid bloating the code too much. +template +class JobRepositoryBase : public JobRepository { protected: - JobRepositoryBase(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerType *server) - : JobRepository(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) - { + JobRepositoryBase( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerType *server) + : JobRepository(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) {} - } protected: - inline ServerType* GetServer() const - { - return static_cast(server_); + inline ServerType *GetServer() const { + return static_cast(server_); } + private: using JobRepository::server_; // hide the server_ member variable }; - - ///////////////////////////////////// UserInfo ///////////////////////////////// // 1. update userName->userId by interval // 2. insert worker name to db @@ -182,10 +182,12 @@ class UserInfo { struct WorkerName { int32_t userId_; int64_t workerId_; - char workerName_[21]; - char minerAgent_[31]; + char workerName_[21]; + char minerAgent_[31]; - WorkerName(): userId_(0), workerId_(0) { + WorkerName() + : userId_(0) + , workerId_(0) { memset(workerName_, 0, sizeof(workerName_)); memset(minerAgent_, 0, sizeof(minerAgent_)); } @@ -199,7 +201,7 @@ class UserInfo { // username -> userId std::unordered_map nameIds_; int32_t lastMaxUserId_; - + #ifdef USER_DEFINED_COINBASE // userId -> userCoinbaseInfo std::unordered_map idCoinbaseInfos_; @@ -229,11 +231,14 @@ class UserInfo { int32_t getUserId(const string userName); #ifdef USER_DEFINED_COINBASE - string getCoinbaseInfo(int32_t userId); + string getCoinbaseInfo(int32_t userId); #endif - void addWorker(const int32_t userId, const int64_t workerId, - const string &workerName, const string &minerAgent); + void addWorker( + const int32_t userId, + const int64_t workerId, + const string &workerName, + const string &minerAgent); }; ////////////////////////////////// StratumJobEx //////////////////////////////// @@ -254,19 +259,16 @@ class StratumJobEx { void markStale(); bool isStale(); - }; - ///////////////////////////////////// Server /////////////////////////////////// class Server { // NetIO struct sockaddr_in sin_; - struct event_base* base_; - struct event* signal_event_; - struct evconnlistener* listener_; + struct event_base *base_; + struct event *signal_event_; + struct evconnlistener *listener_; std::set> connections_; - mutex connsLock_; public: // kafka producers @@ -290,7 +292,8 @@ class Server { #endif // - // WARNING: if enable, difficulty sent to miners is always devFixedDifficulty_. + // WARNING: if enable, difficulty sent to miners is always + // devFixedDifficulty_. // for development // bool isDevModeEnable_; @@ -307,46 +310,55 @@ class Server { protected: Server(const int32_t shareAvgSeconds); - virtual bool setupInternal(StratumServer* sserver){ return true; }; + virtual bool setupInternal(StratumServer *sserver) { return true; }; public: virtual ~Server(); - bool setup(StratumServer* sserver); + bool setup(StratumServer *sserver); void run(); void stop(); + // Dispatch the task to the libevent loop + void dispatch(std::function task); + void sendMiningNotifyToAll(shared_ptr exJobPtr); void addConnection(unique_ptr connection); void removeConnection(StratumSession &connection); - static void listenerCallback(struct evconnlistener* listener, - evutil_socket_t socket, - struct sockaddr* saddr, - int socklen, void* server); - static void readCallback (struct bufferevent *, void *connection); + static void listenerCallback( + struct evconnlistener *listener, + evutil_socket_t socket, + struct sockaddr *saddr, + int socklen, + void *server); + static void readCallback(struct bufferevent *, void *connection); static void eventCallback(struct bufferevent *, short, void *connection); - void sendShare2Kafka (const uint8_t *data, size_t len); + void sendShare2Kafka(const uint8_t *data, size_t len); void sendCommonEvents2Kafka(const string &message); - virtual unique_ptr createConnection(struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) = 0; + virtual unique_ptr createConnection( + struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) = 0; protected: - virtual JobRepository* createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) = 0; - + virtual JobRepository *createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) = 0; }; -template -class ServerBase : public Server -{ +template +class ServerBase : public Server { public: - TJobRepository* GetJobRepository(){ return static_cast(jobRepository_); } + TJobRepository *GetJobRepository() { + return static_cast(jobRepository_); + } + protected: - ServerBase(const int32_t shareAvgSeconds) : Server(shareAvgSeconds) { } + ServerBase(const int32_t shareAvgSeconds) + : Server(shareAvgSeconds) {} private: using Server::jobRepository_; @@ -361,7 +373,7 @@ class StratumServer { shared_ptr server_; string ip_; unsigned short port_; - uint8_t serverId_; // global unique, range: [1, 255] + uint8_t serverId_; // global unique, range: [1, 255] string fileLastNotifyTime_; @@ -373,13 +385,13 @@ class StratumServer { // if enable it, will make block and submit bool isSubmitInvalidBlock_; - + // if enable, difficulty sent to miners is always devFixedDifficulty_ bool isDevModeEnable_; // difficulty to send to miners. for development float devFixedDifficulty_; - + string consumerTopic_; uint32_t maxJobDelay_; shared_ptr defaultDifficultyController_; @@ -387,26 +399,31 @@ class StratumServer { string shareTopic_; string commonEventsTopic_; - StratumServer(const char *ip, const unsigned short port, - const char *kafkaBrokers, - const string &userAPIUrl, - const uint8_t serverId, const string &fileLastNotifyTime, - bool isEnableSimulator, - bool isSubmitInvalidBlock, - bool isDevModeEnable, - float devFixedDifficulty, - const string &consumerTopic, - uint32_t maxJobDelay, - shared_ptr defaultDifficultyController, - const string& solvedShareTopic, - const string& shareTopic, - const string& commonEventsTopic); + StratumServer( + const char *ip, + const unsigned short port, + const char *kafkaBrokers, + const string &userAPIUrl, + const uint8_t serverId, + const string &fileLastNotifyTime, + bool isEnableSimulator, + bool isSubmitInvalidBlock, + bool isDevModeEnable, + float devFixedDifficulty, + const string &consumerTopic, + uint32_t maxJobDelay, + shared_ptr defaultDifficultyController, + const string &solvedShareTopic, + const string &shareTopic, + const string &commonEventsTopic); ~StratumServer(); - bool createServer(const string &type, const int32_t shareAvgSeconds, const libconfig::Config &config); + bool createServer( + const string &type, + const int32_t shareAvgSeconds, + const libconfig::Config &config); bool init(); void stop(); void run(); }; - #endif diff --git a/src/StratumSession.cc b/src/StratumSession.cc index dbb7fe221..c20a1aa3b 100644 --- a/src/StratumSession.cc +++ b/src/StratumSession.cc @@ -44,24 +44,40 @@ static const uint32_t WriteTimeout = 120; static const string PoolWatcherAgent = "__PoolWatcher__"; static const string BtccomAgentPrefix = "btccom-agent/"; -StratumSession::StratumSession(Server &server, struct bufferevent *bev, struct sockaddr *saddr, uint32_t extraNonce1) - : server_(server), bev_(bev), extraNonce1_(extraNonce1), buffer_(evbuffer_new()), clientAgent_("unknown") - , isAgentClient_(false), isNiceHashClient_(false), state_(CONNECTED), isDead_(false), isLongTimeout_(false) { +StratumSession::StratumSession( + Server &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1) + : server_(server) + , bev_(bev) + , extraNonce1_(extraNonce1) + , buffer_(evbuffer_new()) + , clientAgent_("unknown") + , isAgentClient_(false) + , isNiceHashClient_(false) + , state_(CONNECTED) + , isDead_(false) + , isLongTimeout_(false) { assert(saddr->sa_family == AF_INET); auto ipv4 = reinterpret_cast(saddr); clientIpInt_ = ipv4->sin_addr.s_addr; clientIp_.resize(INET_ADDRSTRLEN); - evutil_inet_ntop(AF_INET, &ipv4->sin_addr, &clientIp_.front(), INET_ADDRSTRLEN); + evutil_inet_ntop( + AF_INET, &ipv4->sin_addr, &clientIp_.front(), INET_ADDRSTRLEN); // remove the padding bytes clientIp_ = clientIp_.c_str(); + // make a null dispatcher here to guard against invalid access + dispatcher_ = boost::make_unique(); + setup(); LOG(INFO) << "client connect, ip: " << clientIp_; } StratumSession::~StratumSession() { - LOG(INFO) << "close stratum session, ip: " << clientIp_ - << ", name: \"" << worker_.fullName_ << "\"" + LOG(INFO) << "close stratum session, ip: " << clientIp_ << ", name: \"" + << worker_.fullName_ << "\"" << ", agent: \"" << clientAgent_ << "\""; evbuffer_free(buffer_); bufferevent_free(bev_); @@ -108,11 +124,12 @@ bool StratumSession::handleMessage() { // and it will fall into infinite loop with handleMessage() calling. // if (len < 4) { - LOG(ERROR) << "received invalid ex-message, type: " << std::hex << cmd << ", len: " << len; + LOG(ERROR) << "received invalid ex-message, type: " << std::hex << cmd + << ", len: " << len; return false; } - if (evBufLen < len) // didn't received the whole message yet + if (evBufLen < len) // didn't received the whole message yet return false; // copies and removes the first datlen bytes from the front of buf @@ -120,10 +137,8 @@ bool StratumSession::handleMessage() { string exMessage; exMessage.resize(len); evbuffer_remove(buffer_, &exMessage.front(), exMessage.size()); - if (dispatcher_) { - dispatcher_->handleExMessage(exMessage); - } - return true; // read message success, return true + dispatcher_->handleExMessage(exMessage); + return true; // read message success, return true } // @@ -135,7 +150,7 @@ bool StratumSession::handleMessage() { return true; } - return false; // read message failure + return false; // read message failure } bool StratumSession::tryReadLine(std::string &line) { @@ -145,12 +160,12 @@ bool StratumSession::tryReadLine(std::string &line) { struct evbuffer_ptr loc; loc = evbuffer_search_eol(buffer_, nullptr, nullptr, EVBUFFER_EOL_LF); if (loc.pos < 0) { - return false; // not found + return false; // not found } // copies and removes the first datlen bytes from the front of buf // into the memory at data - line.resize(loc.pos + 1); // containing "\n" + line.resize(loc.pos + 1); // containing "\n" evbuffer_remove(buffer_, &line.front(), line.size()); return true; } @@ -160,7 +175,8 @@ void StratumSession::handleLine(const std::string &line) { JsonNode jnode; if (!JsonNode::parse(line.data(), line.data() + line.size(), jnode)) { - LOG(ERROR) << "decode line fail, not a json string. string value: \"" << line.c_str() << "\""; + LOG(ERROR) << "decode line fail, not a json string. string value: \"" + << line.c_str() << "\""; return; } JsonNode jid = jnode["id"]; @@ -189,8 +205,7 @@ void StratumSession::logAuthorizeResult(bool success) { << ", workerName:" << worker_.fullName_ << ", clientAgent: " << clientAgent_ << ", clientIp: " << clientIp_; - } - else { + } else { LOG(WARNING) << "authorize failed, workerName:" << worker_.fullName_ << ", clientAgent: " << clientAgent_ << ", clientIp: " << clientIp_; @@ -198,28 +213,32 @@ void StratumSession::logAuthorizeResult(bool success) { } string StratumSession::getMinerInfoJson(const string &type) { - return Strings::Format("{\"created_at\":\"%s\"," - "\"type\":\"%s\"," - "\"content\":{" - "\"user_id\":%d,\"user_name\":\"%s\"," - "\"worker_id\":%" PRId64 ",\"worker_name\":\"%s\"," - "\"client_agent\":\"%s\",\"ip\":\"%s\"," - "\"session_id\":\"%08x\"" - "}}", - date("%F %T").c_str(), - type.c_str(), - worker_.userId_, worker_.userName_.c_str(), - worker_.workerHashId_, worker_.workerName_.c_str(), - clientAgent_.c_str(), clientIp_.c_str(), - extraNonce1_); + return Strings::Format( + "{\"created_at\":\"%s\"," + "\"type\":\"%s\"," + "\"content\":{" + "\"user_id\":%d,\"user_name\":\"%s\"," + "\"worker_id\":%" PRId64 + ",\"worker_name\":\"%s\"," + "\"client_agent\":\"%s\",\"ip\":\"%s\"," + "\"session_id\":\"%08x\"" + "}}", + date("%F %T").c_str(), + type.c_str(), + worker_.userId_, + worker_.userName_.c_str(), + worker_.workerHashId_, + worker_.workerName_.c_str(), + clientAgent_.c_str(), + clientIp_.c_str(), + extraNonce1_); } -void StratumSession::checkUserAndPwd(const string &idStr, const string &fullName, const string &password) -{ +void StratumSession::checkUserAndPwd( + const string &idStr, const string &fullName, const string &password) { const string userName = worker_.getUserName(fullName); const int32_t userId = server_.userInfo_->getUserId(userName); - if (userId <= 0) - { + if (userId <= 0) { logAuthorizeResult(false); responseError(idStr, StratumStatus::INVALID_USERNAME); return; @@ -231,12 +250,15 @@ void StratumSession::checkUserAndPwd(const string &idStr, const string &fullName // set id & names, will filter workername in this func worker_.setUserIDAndNames(userId, fullName); - server_.userInfo_->addWorker(worker_.userId_, worker_.workerHashId_, worker_.workerName_, clientAgent_); + server_.userInfo_->addWorker( + worker_.userId_, + worker_.workerHashId_, + worker_.workerName_, + clientAgent_); dispatcher_ = createDispatcher(); logAuthorizeResult(true); - if (!password.empty()) - { + if (!password.empty()) { setDefaultDifficultyFromPassword(password); } @@ -245,7 +267,8 @@ void StratumSession::checkUserAndPwd(const string &idStr, const string &fullName setReadTimeout(isLongTimeout_ ? 86400 * 7 : 60 * 10); // send latest stratum job - sendMiningNotify(server_.jobRepository_->getLatestStratumJobEx(), true /* is first job */); + sendMiningNotify( + server_.jobRepository_->getLatestStratumJobEx(), true /* is first job */); // sent events to kafka: miner_connect server_.sendCommonEvents2Kafka(getMinerInfoJson("miner_connect")); @@ -255,20 +278,14 @@ void StratumSession::setDefaultDifficultyFromPassword(const string &password) { // testcase: TEST(StratumSession, SetDiff) using namespace boost::algorithm; - if (!dispatcher_) { - LOG(ERROR) << "StratumSession::setDefaultDifficultyFromPassword: ignore password " - << password << ", dispatcher_ is empty!"; - return; - } - uint64_t d = 0u, md = 0u; - vector arr; // key=value,key=value + vector arr; // key=value,key=value split(arr, password, is_any_of(",")); if (arr.size() == 0) return; for (auto it = arr.begin(); it != arr.end(); it++) { - vector arr2; // key,value + vector arr2; // key,value split(arr2, *it, is_any_of("=")); if (arr2.size() != 2 || arr2[1].empty()) { continue; @@ -277,8 +294,7 @@ void StratumSession::setDefaultDifficultyFromPassword(const string &password) { if (arr2[0] == "d") { // 'd' : start difficulty d = strtoull(arr2[1].c_str(), nullptr, 10); - } - else if (arr2[0] == "md") { + } else if (arr2[0] == "md") { // 'md' : minimum difficulty md = strtoull(arr2[1].c_str(), nullptr, 10); } @@ -300,15 +316,16 @@ void StratumSession::setDefaultDifficultyFromPassword(const string &password) { void StratumSession::setClientAgent(const string &clientAgent) { clientAgent_ = filterWorkerName(clientAgent); isNiceHashClient_ = isNiceHashAgent(clientAgent_); - isAgentClient_ = (0 == clientAgent_.compare(0, BtccomAgentPrefix.size(), BtccomAgentPrefix)); + isAgentClient_ = + (0 == + clientAgent_.compare(0, BtccomAgentPrefix.size(), BtccomAgentPrefix)); isLongTimeout_ = (isAgentClient_ || clientAgent_ == PoolWatcherAgent); } -bool StratumSession::validate(const JsonNode &jmethod, const JsonNode &jparams) { - if (jmethod.type() == Utilities::JS::type::Str && - jmethod.size() != 0 && - jparams.type() == Utilities::JS::type::Array) - { +bool StratumSession::validate( + const JsonNode &jmethod, const JsonNode &jparams) { + if (jmethod.type() == Utilities::JS::type::Str && jmethod.size() != 0 && + jparams.type() == Utilities::JS::type::Array) { return true; } @@ -317,22 +334,25 @@ bool StratumSession::validate(const JsonNode &jmethod, const JsonNode &jparams) unique_ptr StratumSession::createDispatcher() { // By default there is no agent support - return boost::make_unique(*this, - createMiner(clientAgent_, - worker_.workerName_, - worker_.workerHashId_)); + return boost::make_unique( + *this, + createMiner(clientAgent_, worker_.workerName_, worker_.workerHashId_)); } bool StratumSession::isDead() const { return isDead_.load(); } -void StratumSession::addWorker(const std::string &clientAgent, const std::string &workerName, int64_t workerId) { +void StratumSession::addWorker( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { if (state_ != AUTHENTICATED) { LOG(ERROR) << "curr stratum session has NOT auth yet"; return; } - server_.userInfo_->addWorker(worker_.userId_, workerId, workerName, clientAgent); + server_.userInfo_->addWorker( + worker_.userId_, workerId, workerName, clientAgent); } void StratumSession::markAsDead() { @@ -386,10 +406,13 @@ void StratumSession::responseError(const string &idStr, int errCode) { // {"id": 10, "result": null, "error":[21, "Job not found", null]} // char buf[1024]; - int len = snprintf(buf, sizeof(buf), - "{\"id\":%s,\"result\":null,\"error\":[%d,\"%s\",null]}\n", - idStr.empty() ? "null" : idStr.c_str(), - errCode, StratumStatus::toString(errCode)); + int len = snprintf( + buf, + sizeof(buf), + "{\"id\":%s,\"result\":null,\"error\":[%d,\"%s\",null]}\n", + idStr.empty() ? "null" : idStr.c_str(), + errCode, + StratumStatus::toString(errCode)); sendData(buf, len); } @@ -398,28 +421,26 @@ void StratumSession::responseError(const string &idStr, int errCode) { * * * 5 Response object - * When a rpc call is made, the Server MUST reply with a Response, except for in the case of Notifications. - * The Response is expressed as a single JSON Object, with the following members: - * jsonrpc - * A String specifying the version of the JSON-RPC protocol. MUST be exactly "2.0". - * result - * This member is REQUIRED on success. - * This member MUST NOT exist if there was an error invoking the method. - * The value of this member is determined by the method invoked on the Server. - * error - * This member is REQUIRED on error. - * This member MUST NOT exist if there was no error triggered during invocation. - * The value for this member MUST be an Object as defined in section 5.1. - * id - * This member is REQUIRED. - * It MUST be the same as the value of the id member in the Request Object. - * If there was an error in detecting the id in the Request object (e.g. Parse error/Invalid Request), it MUST be Null. + * When a rpc call is made, the Server MUST reply with a Response, except for in + * the case of Notifications. The Response is expressed as a single JSON Object, + * with the following members: jsonrpc A String specifying the version of the + * JSON-RPC protocol. MUST be exactly "2.0". result This member is REQUIRED on + * success. This member MUST NOT exist if there was an error invoking the + * method. The value of this member is determined by the method invoked on the + * Server. error This member is REQUIRED on error. This member MUST NOT exist if + * there was no error triggered during invocation. The value for this member + * MUST be an Object as defined in section 5.1. id This member is REQUIRED. It + * MUST be the same as the value of the id member in the Request Object. If + * there was an error in detecting the id in the Request object (e.g. Parse + * error/Invalid Request), it MUST be Null. * - * Either the result member or error member MUST be included, but both members MUST NOT be included. + * Either the result member or error member MUST be included, but both members + * MUST NOT be included. */ void StratumSession::rpc2ResponseTrue(const string &idStr) { - const string s = Strings::Format("{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":true}\n", idStr.c_str()); + const string s = Strings::Format( + "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":true}\n", idStr.c_str()); sendData(s); } @@ -429,8 +450,8 @@ void StratumSession::rpc2ResponseTrue(const string &idStr) { * * 5.1 Error object * - * When a rpc call encounters an error, the Response Object MUST contain the error member - * with a value that is a Object with the following members: + * When a rpc call encounters an error, the Response Object MUST contain the + * error member with a value that is a Object with the following members: * * code * A Number that indicates the error type that occurred. @@ -439,17 +460,21 @@ void StratumSession::rpc2ResponseTrue(const string &idStr) { * A String providing a short description of the error. * The message SHOULD be limited to a concise single sentence. * data - * A Primitive or Structured value that contains additional information about the error. - * This may be omitted. - * The value of this member is defined by the Server (e.g. detailed error information, nested errors etc.). + * A Primitive or Structured value that contains additional information + * about the error. This may be omitted. The value of this member is defined by + * the Server (e.g. detailed error information, nested errors etc.). */ void StratumSession::rpc2ResponseError(const string &idStr, int errCode) { char buf[1024]; - int len = snprintf(buf, sizeof(buf), - "{\"id\":%s,\"jsonrpc\":\"2.0\",\"error\":{\"code\":%d,\"message\":\"%s\"}}\n", - idStr.empty() ? "null" : idStr.c_str(), - errCode, StratumStatus::toString(errCode)); + int len = snprintf( + buf, + sizeof(buf), + "{\"id\":%s,\"jsonrpc\":\"2.0\",\"error\":{\"code\":%d,\"message\":\"%" + "s\"}}\n", + idStr.empty() ? "null" : idStr.c_str(), + errCode, + StratumStatus::toString(errCode)); sendData(buf, len); } @@ -457,16 +482,19 @@ void StratumSession::responseAuthorized(const std::string &idStr) { responseTrue(idStr); } -void StratumSession::sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) { +void StratumSession::sendSetDifficulty( + LocalJob &localJob, uint64_t difficulty) { string s; if (!server_.isDevModeEnable_) { - s = Strings::Format("{\"id\":null,\"method\":\"mining.set_difficulty\"" - ",\"params\":[%" PRIu64"]}\n", - difficulty); + s = Strings::Format( + "{\"id\":null,\"method\":\"mining.set_difficulty\"" + ",\"params\":[%" PRIu64 "]}\n", + difficulty); } else { - s = Strings::Format("{\"id\":null,\"method\":\"mining.set_difficulty\"" - ",\"params\":[%.3f]}\n", - server_.devFixedDifficulty_); + s = Strings::Format( + "{\"id\":null,\"method\":\"mining.set_difficulty\"" + ",\"params\":[%.3f]}\n", + server_.devFixedDifficulty_); } sendData(s); diff --git a/src/StratumSession.h b/src/StratumSession.h index 11b0b6b80..5927c7257 100644 --- a/src/StratumSession.h +++ b/src/StratumSession.h @@ -44,19 +44,22 @@ class Server; class StratumJobEx; // Supported BTCAgent features / capabilities, a JSON array. -// Sent within the request / response of agent.get_capabilities for protocol negotiation. -// Known capabilities: -// verrol: version rolling (shares with a version mask can be submitted through a BTCAgent session). +// Sent within the request / response of agent.get_capabilities for protocol +// negotiation. Known capabilities: +// verrol: version rolling (shares with a version mask can be submitted +// through a BTCAgent session). #define BTCAGENT_PROTOCOL_CAPABILITIES "[\"verrol\"]" enum class StratumCommandEx : uint8_t { - REGISTER_WORKER = 0x01u, // Agent -> Pool - SUBMIT_SHARE = 0x02u, // Agent -> Pool, mining.submit(...) - SUBMIT_SHARE_WITH_TIME = 0x03u, // Agent -> Pool, mining.submit(..., nTime) - UNREGISTER_WORKER = 0x04u, // Agent -> Pool - MINING_SET_DIFF = 0x05u, // Pool -> Agent, mining.set_difficulty(diff) - SUBMIT_SHARE_WITH_VER = 0x12u, // Agent -> Pool, mining.submit(..., nVersionMask) - SUBMIT_SHARE_WITH_TIME_VER = 0x13u, // Agent -> Pool, mining.submit(..., nTime, nVersionMask) + REGISTER_WORKER = 0x01u, // Agent -> Pool + SUBMIT_SHARE = 0x02u, // Agent -> Pool, mining.submit(...) + SUBMIT_SHARE_WITH_TIME = 0x03u, // Agent -> Pool, mining.submit(..., nTime) + UNREGISTER_WORKER = 0x04u, // Agent -> Pool + MINING_SET_DIFF = 0x05u, // Pool -> Agent, mining.set_difficulty(diff) + SUBMIT_SHARE_WITH_VER = + 0x12u, // Agent -> Pool, mining.submit(..., nVersionMask) + SUBMIT_SHARE_WITH_TIME_VER = + 0x13u, // Agent -> Pool, mining.submit(..., nTime, nVersionMask) }; struct StratumMessageEx { @@ -74,10 +77,14 @@ struct StratumMessageEx { class IStratumSession { public: virtual ~IStratumSession() = default; - virtual void addWorker(const std::string &clientAgent, const std::string &workerName, int64_t workerId) = 0; - virtual std::unique_ptr createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) = 0; + virtual void addWorker( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) = 0; + virtual std::unique_ptr createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) = 0; virtual uint16_t decodeSessionId(const std::string &exMessage) const = 0; virtual StratumMessageDispatcher &getDispatcher() = 0; virtual void responseTrue(const std::string &idStr) = 0; @@ -90,11 +97,8 @@ class IStratumSession { class StratumSession : public IStratumSession { public: // mining state - enum State { - CONNECTED = 0, - SUBSCRIBED = 1, - AUTHENTICATED = 2 - }; + enum State { CONNECTED = 0, SUBSCRIBED = 1, AUTHENTICATED = 2 }; + protected: Server &server_; struct bufferevent *bev_; @@ -104,7 +108,7 @@ class StratumSession : public IStratumSession { uint32_t clientIpInt_; std::string clientIp_; - std::string clientAgent_; // eg. bfgminer/4.4.0-32-gac4e9b3 + std::string clientAgent_; // eg. bfgminer/4.4.0-32-gac4e9b3 bool isAgentClient_; bool isNiceHashClient_; std::unique_ptr dispatcher_; @@ -117,11 +121,16 @@ class StratumSession : public IStratumSession { void setup(); void setReadTimeout(int32_t readTimeout); - bool handleMessage(); // handle all messages: ex-message and stratum message + bool handleMessage(); // handle all messages: ex-message and stratum message bool tryReadLine(std::string &line); void handleLine(const std::string &line); - virtual void handleRequest(const std::string &idStr, const std::string &method, const JsonNode &jparams, const JsonNode &jroot) = 0; - void checkUserAndPwd(const string &idStr, const string &fullName, const string &password); + virtual void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) = 0; + void checkUserAndPwd( + const string &idStr, const string &fullName, const string &password); void setDefaultDifficultyFromPassword(const string &password); void setClientAgent(const string &clientAgent); @@ -131,12 +140,18 @@ class StratumSession : public IStratumSession { virtual bool validate(const JsonNode &jmethod, const JsonNode &jparams); virtual std::unique_ptr createDispatcher(); - StratumSession(Server &server, struct bufferevent *bev, struct sockaddr *saddr, uint32_t extraNonce1); + StratumSession( + Server &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1); public: virtual ~StratumSession(); virtual bool initialize() { return true; } - uint16_t decodeSessionId(const std::string &exMessage) const override { return StratumMessageEx::AGENT_MAX_SESSION_ID; }; + uint16_t decodeSessionId(const std::string &exMessage) const override { + return StratumMessageEx::AGENT_MAX_SESSION_ID; + }; Server &getServer() { return server_; } StratumWorker &getWorker() { return worker_; } @@ -146,10 +161,15 @@ class StratumSession : public IStratumSession { State getState() const { return state_; } bool isDead() const; void markAsDead(); - void addWorker(const std::string &clientAgent, const std::string &workerName, int64_t workerId) override; + void addWorker( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) override; void sendData(const char *data, size_t len) override; - void sendData(const std::string &str) override { sendData(str.data(), str.size()); } + void sendData(const std::string &str) override { + sendData(str.data(), str.size()); + } void readBuf(struct evbuffer *buf); void responseTrue(const std::string &idStr) override; @@ -158,42 +178,49 @@ class StratumSession : public IStratumSession { void rpc2ResponseTrue(const string &idStr); void rpc2ResponseError(const string &idStr, int errCode); void sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) override; - virtual void sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob=false) = 0; + virtual void sendMiningNotify( + shared_ptr exJobPtr, bool isFirstJob = false) = 0; }; -// This base class is to help type safety of accessing server_ member variable. Avoid manual casting. -// And by templating a minimum class declaration, we avoid bloating the code too much. -template -class StratumSessionBase : public StratumSession -{ +// This base class is to help type safety of accessing server_ member variable. +// Avoid manual casting. And by templating a minimum class declaration, we +// avoid bloating the code too much. +template +class StratumSessionBase : public StratumSession { protected: using ServerType = typename StratumTraits::ServerType; - StratumSessionBase(ServerType &server, struct bufferevent *bev, struct sockaddr *saddr, uint32_t extraNonce1) - : StratumSession(server, bev, saddr, extraNonce1) - , kMaxNumLocalJobs_(10) - { - // usually stratum job interval is 30~60 seconds, 10 is enough for miners - // should <= 10, we use short_job_id, range: [0 ~ 9]. do NOT change it. - assert(kMaxNumLocalJobs_ <= 10); - } + StratumSessionBase( + ServerType &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1) + : StratumSession(server, bev, saddr, extraNonce1) + , kMaxNumLocalJobs_(256) {} using LocalJobType = typename StratumTraits::LocalJobType; - static_assert(std::is_base_of::value, "Local job type is not derived from LocalJob"); + static_assert( + std::is_base_of::value, + "Local job type is not derived from LocalJob"); std::deque localJobs_; size_t kMaxNumLocalJobs_; public: - template - LocalJobType *findLocalJob(const Key& key) - { - for (auto &localJob : localJobs_) { - if (localJob == key) return &localJob; + size_t maxNumLocalJobs() const { return kMaxNumLocalJobs_; } + + template + LocalJobType *findLocalJob(const Key &key) { + auto iter = localJobs_.rbegin(); + auto iend = localJobs_.rend(); + for (; iter != iend; ++iter) { + if (*iter == key) { + return &(*iter); + } } return nullptr; } - template - LocalJobType &addLocalJob(uint64_t jobId, Args&&... args) { + template + LocalJobType &addLocalJob(uint64_t jobId, Args &&... args) { localJobs_.emplace_back(jobId, std::forward(args)...); auto &localJob = localJobs_.back(); dispatcher_->addLocalJob(localJob); @@ -209,8 +236,7 @@ class StratumSessionBase : public StratumSession std::deque &getLocalJobs() { return localJobs_; } - inline ServerType &getServer() const - { + inline ServerType &getServer() const { return static_cast(server_); } }; diff --git a/src/Utils.cc b/src/Utils.cc index 1c8ec3bba..4a9098d68 100644 --- a/src/Utils.cc +++ b/src/Utils.cc @@ -47,7 +47,7 @@ static inline int _hex2bin_char(const char c) { bool Hex2BinReverse(const char *in, size_t size, vector &out) { out.clear(); - out.reserve(size/2); + out.reserve(size / 2); uint8_t h, l; // skip space, 0x @@ -57,20 +57,19 @@ bool Hex2BinReverse(const char *in, size_t size, vector &out) { // convert while (psz > in) { - if(*psz == 'x') + if (*psz == 'x') break; l = _hex2bin_char(*psz--); h = _hex2bin_char(*psz--); - + out.push_back((h << 4) | l); } return true; - } bool Hex2Bin(const char *in, size_t size, vector &out) { out.clear(); - out.reserve(size/2); + out.reserve(size / 2); uint8_t h, l; // skip space, 0x @@ -87,12 +86,11 @@ bool Hex2Bin(const char *in, size_t size, vector &out) { out.push_back((h << 4) | l); } return true; - } bool Hex2Bin(const char *in, vector &out) { out.clear(); - out.reserve(strlen(in)/2); + out.reserve(strlen(in) / 2); uint8_t h, l; // skip space, 0x @@ -102,7 +100,9 @@ bool Hex2Bin(const char *in, vector &out) { if (psz[0] == '0' && tolower(psz[1]) == 'x') psz += 2; - if (strlen(psz) % 2 == 1) { return false; } + if (strlen(psz) % 2 == 1) { + return false; + } // convert while (*psz != '\0' && *(psz + 1) != '\0') { @@ -130,27 +130,26 @@ void Bin2Hex(const vector &in, string &str) { void Bin2HexR(const uint8_t *in, size_t len, string &str) { vector r; r.resize(len); - for(size_t i = 0; i < len; ++i) - { + for (size_t i = 0; i < len; ++i) { r[i] = in[len - 1 - i]; } Bin2Hex(r, str); } void Bin2HexR(const vector &in, string &str) { - Bin2HexR((const uint8_t*)in.data(), in.size(), str); + Bin2HexR((const uint8_t *)in.data(), in.size(), str); } // Receive 0MQ string from socket and convert into string -std::string s_recv (zmq::socket_t & socket) { +std::string s_recv(zmq::socket_t &socket) { zmq::message_t message; socket.recv(&message); - return std::string(static_cast(message.data()), message.size()); + return std::string(static_cast(message.data()), message.size()); } // Convert string to 0MQ string and send to socket -bool s_send(zmq::socket_t & socket, const std::string & string) { +bool s_send(zmq::socket_t &socket, const std::string &string) { zmq::message_t message(string.size()); memcpy(message.data(), string.data(), string.size()); @@ -159,7 +158,7 @@ bool s_send(zmq::socket_t & socket, const std::string & string) { } // Sends string as 0MQ string, as multipart non-terminal -bool s_sendmore (zmq::socket_t & socket, const std::string & string) { +bool s_sendmore(zmq::socket_t &socket, const std::string &string) { zmq::message_t message(string.size()); memcpy(message.data(), string.data(), string.size()); @@ -167,21 +166,18 @@ bool s_sendmore (zmq::socket_t & socket, const std::string & string) { return (rc); } - - struct CurlChunk { char *memory; size_t size; }; static size_t -CurlWriteChunkCallback(void *contents, size_t size, size_t nmemb, void *userp) -{ +CurlWriteChunkCallback(void *contents, size_t size, size_t nmemb, void *userp) { size_t realsize = size * nmemb; struct CurlChunk *mem = (struct CurlChunk *)userp; mem->memory = (char *)realloc(mem->memory, mem->size + realsize + 1); - if(mem->memory == NULL) { + if (mem->memory == NULL) { /* out of memory! */ printf("not enough memory (realloc returned NULL)\n"); return 0; @@ -194,7 +190,8 @@ CurlWriteChunkCallback(void *contents, size_t size, size_t nmemb, void *userp) return realsize; } -// This may be ugly but I really do not want to modify every places calling HTTP methods... +// This may be ugly but I really do not want to modify every places calling HTTP +// methods... static bool sslVerifyPeer = true; void setSslVerifyPeer(bool verifyPeer) { @@ -205,13 +202,20 @@ bool httpGET(const char *url, string &response, long timeoutMs) { return httpPOST(url, nullptr, nullptr, response, timeoutMs, nullptr); } -bool httpGET(const char *url, const char *userpwd, - string &response, long timeoutMs) { +bool httpGET( + const char *url, const char *userpwd, string &response, long timeoutMs) { return httpPOST(url, userpwd, nullptr, response, timeoutMs, nullptr); } -bool httpPOSTImpl(const char *url, const char *userpwd, const char *postData, int len, - string &response, long timeoutMs, const char *mineType, const char *agent) { +bool httpPOSTImpl( + const char *url, + const char *userpwd, + const char *postData, + int len, + string &response, + long timeoutMs, + const char *mineType, + const char *agent) { struct curl_slist *headers = NULL; CURLcode status; long code; @@ -221,8 +225,9 @@ bool httpPOSTImpl(const char *url, const char *userpwd, const char *postData, in return false; } - chunk.memory = (char *)malloc(1); /* will be grown as needed by the realloc above */ - chunk.size = 0; /* no data at this point */ + chunk.memory = + (char *)malloc(1); /* will be grown as needed by the realloc above */ + chunk.size = 0; /* no data at this point */ // RSK doesn't support 'Expect: 100-Continue' in 'HTTP/1.1'. // So switch to 'HTTP/1.0'. @@ -238,24 +243,26 @@ bool httpPOSTImpl(const char *url, const char *userpwd, const char *postData, in if (postData != nullptr) { curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, len); - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, postData); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, postData); } - + DLOG(INFO) << "postData 的值为 " << postData; if (userpwd != nullptr) curl_easy_setopt(curl, CURLOPT_USERPWD, userpwd); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, sslVerifyPeer ? 2 : 0); curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, sslVerifyPeer); curl_easy_setopt(curl, CURLOPT_USERAGENT, agent); curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, timeoutMs); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, CurlWriteChunkCallback); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&chunk); status = curl_easy_perform(curl); if (status != 0) { - LOG(ERROR) << "unable to request data from: " << url << ", error: " << curl_easy_strerror(status); + LOG(ERROR) << "unable to request data from: " << url + << ", error: " << curl_easy_strerror(status); goto error; } @@ -263,8 +270,8 @@ bool httpPOSTImpl(const char *url, const char *userpwd, const char *postData, in response.assign(chunk.memory, chunk.size); curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &code); - //status code 200 - 208 indicates ok - //sia returns 204 as success + // status code 200 - 208 indicates ok + // sia returns 204 as success if (code < 200 || code > 208) { LOG(ERROR) << "server responded with code: " << code; goto error; @@ -275,7 +282,6 @@ bool httpPOSTImpl(const char *url, const char *userpwd, const char *postData, in free(chunk.memory); return true; - error: if (curl) curl_easy_cleanup(curl); @@ -286,26 +292,59 @@ bool httpPOSTImpl(const char *url, const char *userpwd, const char *postData, in return false; } -bool httpPOST(const char *url, const char *userpwd, const char *postData, - string &response, long timeoutMs, const char *mineType, const char *agent) -{ - return httpPOSTImpl(url, userpwd, postData, postData != nullptr ? strlen(postData) : 0, response, timeoutMs, mineType, agent); +bool httpPOST( + const char *url, + const char *userpwd, + const char *postData, + string &response, + long timeoutMs, + const char *mineType, + const char *agent) { + return httpPOSTImpl( + url, + userpwd, + postData, + postData != nullptr ? strlen(postData) : 0, + response, + timeoutMs, + mineType, + agent); } -bool httpPOST(const char *url, const char *userpwd, const char *postData, - string &response, long timeoutMs, const char *mineType) -{ - return httpPOST(url, userpwd, postData, response, timeoutMs, mineType, "curl"); +bool httpPOST( + const char *url, + const char *userpwd, + const char *postData, + string &response, + long timeoutMs, + const char *mineType) { + return httpPOST( + url, userpwd, postData, response, timeoutMs, mineType, "curl"); } -bool blockchainNodeRpcCall(const char *url, const char *userpwd, const char *reqData, - string &response) { - return httpPOST(url, userpwd, reqData, response, 5000/* timeout ms */, "application/json"); +bool blockchainNodeRpcCall( + const char *url, + const char *userpwd, + const char *reqData, + string &response) { + return httpPOST( + url, + userpwd, + reqData, + response, + 5000 /* timeout ms */, + "application/json"); } -bool rpcCall(const char *url, const char *userpwd, const char *reqData, int len, string &response, const char *agent) -{ - return httpPOSTImpl(url, userpwd, reqData, len, response, 5000, "application/json", agent); +bool rpcCall( + const char *url, + const char *userpwd, + const char *reqData, + int len, + string &response, + const char *agent) { + return httpPOSTImpl( + url, userpwd, reqData, len, response, 5000, "application/json", agent); } // @@ -339,22 +378,24 @@ time_t str2time(const char *str, const char *format) { void writeTime2File(const char *filename, uint32_t t) { FILE *fp = fopen(filename, "w"); - if (!fp) { return; } + if (!fp) { + return; + } fprintf(fp, "%u", t); fclose(fp); } -string Strings::Format(const char * fmt, ...) { +string Strings::Format(const char *fmt, ...) { char tmp[512]; string dest; va_list al; va_start(al, fmt); int len = vsnprintf(tmp, 512, fmt, al); va_end(al); - if (len>511) { - char * destbuff = new char[len+1]; + if (len > 511) { + char *destbuff = new char[len + 1]; va_start(al, fmt); - len = vsnprintf(destbuff, len+1, fmt, al); + len = vsnprintf(destbuff, len + 1, fmt, al); va_end(al); dest.append(destbuff, len); delete[] destbuff; @@ -364,16 +405,16 @@ string Strings::Format(const char * fmt, ...) { return dest; } -void Strings::Append(string & dest, const char * fmt, ...) { +void Strings::Append(string &dest, const char *fmt, ...) { char tmp[512]; va_list al; va_start(al, fmt); int len = vsnprintf(tmp, 512, fmt, al); va_end(al); - if (len>511) { - char * destbuff = new char[len+1]; + if (len > 511) { + char *destbuff = new char[len + 1]; va_start(al, fmt); - len = vsnprintf(destbuff, len+1, fmt, al); + len = vsnprintf(destbuff, len + 1, fmt, al); va_end(al); dest.append(destbuff, len); delete[] destbuff; @@ -400,26 +441,29 @@ string score2Str(double s) { return Strings::Format(f.c_str(), s); } -bool fileExists(const char* file) { +bool fileExists(const char *file) { struct stat buf; return (stat(file, &buf) == 0); } -bool fileNonEmpty(const char* file) { +bool fileNonEmpty(const char *file) { struct stat buf; return (stat(file, &buf) == 0) && (buf.st_size > 0); } -string getStatsFilePath(const char *chainType, const string &dataDir, time_t ts) { +string +getStatsFilePath(const char *chainType, const string &dataDir, time_t ts) { bool needSlash = false; if (dataDir.length() > 0 && *dataDir.rbegin() != '/') { needSlash = true; } // filename: sharelog-2016-07-12.bin - return Strings::Format("%s%ssharelog%s-%s.bin", - dataDir.c_str(), needSlash ? "/" : "", - chainType, - date("%F", ts).c_str()); + return Strings::Format( + "%s%ssharelog%s-%s.bin", + dataDir.c_str(), + needSlash ? "/" : "", + chainType, + date("%F", ts).c_str()); } // A 37-character character set. @@ -428,83 +472,83 @@ string getStatsFilePath(const char *chainType, const string &dataDir, time_t ts) // others: 36 static const uint8_t kAlphaNumRankBase = 37; static const uint8_t kAlphaNumRankTable[256] = { - 36 /* 0 */, 36 /* 1 */, 36 /* 2 */, 36 /* 3 */, - 36 /* 4 */, 36 /* 5 */, 36 /* 6 */, 36 /* 7 */, - 36 /* 8 */, 36 /* 9 */, 36 /* 10 */, 36 /* 11 */, - 36 /* 12 */, 36 /* 13 */, 36 /* 14 */, 36 /* 15 */, - 36 /* 16 */, 36 /* 17 */, 36 /* 18 */, 36 /* 19 */, - 36 /* 20 */, 36 /* 21 */, 36 /* 22 */, 36 /* 23 */, - 36 /* 24 */, 36 /* 25 */, 36 /* 26 */, 36 /* 27 */, - 36 /* 28 */, 36 /* 29 */, 36 /* 30 */, 36 /* 31 */, - 36 /* ' ' */, 36 /* '!' */, 36 /* '"' */, 36 /* '#' */, - 36 /* '$' */, 36 /* '%' */, 36 /* '&' */, 36 /* '\'' */, - 36 /* '(' */, 36 /* ')' */, 36 /* '*' */, 36 /* '+' */, - 36 /* ',' */, 36 /* '-' */, 36 /* '.' */, 36 /* '/' */, - 26 /* '0' */, 27 /* '1' */, 28 /* '2' */, 29 /* '3' */, - 30 /* '4' */, 31 /* '5' */, 32 /* '6' */, 33 /* '7' */, - 34 /* '8' */, 35 /* '9' */, 36 /* ':' */, 36 /* ';' */, - 36 /* '<' */, 36 /* '=' */, 36 /* '>' */, 36 /* '?' */, - 36 /* '@' */, 0 /* 'A' */, 1 /* 'B' */, 2 /* 'C' */, - 3 /* 'D' */, 4 /* 'E' */, 5 /* 'F' */, 6 /* 'G' */, - 7 /* 'H' */, 8 /* 'I' */, 9 /* 'J' */, 10 /* 'K' */, - 11 /* 'L' */, 12 /* 'M' */, 13 /* 'N' */, 14 /* 'O' */, - 15 /* 'P' */, 16 /* 'Q' */, 17 /* 'R' */, 18 /* 'S' */, - 19 /* 'T' */, 20 /* 'U' */, 21 /* 'V' */, 22 /* 'W' */, - 23 /* 'X' */, 24 /* 'Y' */, 25 /* 'Z' */, 36 /* '[' */, - 36 /* '\' */, 36 /* ']' */, 36 /* '^' */, 36 /* '_' */, - 36 /* '`' */, 0 /* 'a' */, 1 /* 'b' */, 2 /* 'c' */, - 3 /* 'd' */, 4 /* 'e' */, 5 /* 'f' */, 6 /* 'g' */, - 7 /* 'h' */, 8 /* 'i' */, 9 /* 'j' */, 10 /* 'k' */, - 11 /* 'l' */, 12 /* 'm' */, 13 /* 'n' */, 14 /* 'o' */, - 15 /* 'p' */, 16 /* 'q' */, 17 /* 'r' */, 18 /* 's' */, - 19 /* 't' */, 20 /* 'u' */, 21 /* 'v' */, 22 /* 'w' */, - 23 /* 'x' */, 24 /* 'y' */, 25 /* 'z' */, 36 /* '{' */, - 36 /* '|' */, 36 /* '}' */, 36 /* '~' */, 36 /* 127 */, - 36 /* 128 */, 36 /* 129 */, 36 /* 130 */, 36 /* 131 */, - 36 /* 132 */, 36 /* 133 */, 36 /* 134 */, 36 /* 135 */, - 36 /* 136 */, 36 /* 137 */, 36 /* 138 */, 36 /* 139 */, - 36 /* 140 */, 36 /* 141 */, 36 /* 142 */, 36 /* 143 */, - 36 /* 144 */, 36 /* 145 */, 36 /* 146 */, 36 /* 147 */, - 36 /* 148 */, 36 /* 149 */, 36 /* 150 */, 36 /* 151 */, - 36 /* 152 */, 36 /* 153 */, 36 /* 154 */, 36 /* 155 */, - 36 /* 156 */, 36 /* 157 */, 36 /* 158 */, 36 /* 159 */, - 36 /* 160 */, 36 /* 161 */, 36 /* 162 */, 36 /* 163 */, - 36 /* 164 */, 36 /* 165 */, 36 /* 166 */, 36 /* 167 */, - 36 /* 168 */, 36 /* 169 */, 36 /* 170 */, 36 /* 171 */, - 36 /* 172 */, 36 /* 173 */, 36 /* 174 */, 36 /* 175 */, - 36 /* 176 */, 36 /* 177 */, 36 /* 178 */, 36 /* 179 */, - 36 /* 180 */, 36 /* 181 */, 36 /* 182 */, 36 /* 183 */, - 36 /* 184 */, 36 /* 185 */, 36 /* 186 */, 36 /* 187 */, - 36 /* 188 */, 36 /* 189 */, 36 /* 190 */, 36 /* 191 */, - 36 /* 192 */, 36 /* 193 */, 36 /* 194 */, 36 /* 195 */, - 36 /* 196 */, 36 /* 197 */, 36 /* 198 */, 36 /* 199 */, - 36 /* 200 */, 36 /* 201 */, 36 /* 202 */, 36 /* 203 */, - 36 /* 204 */, 36 /* 205 */, 36 /* 206 */, 36 /* 207 */, - 36 /* 208 */, 36 /* 209 */, 36 /* 210 */, 36 /* 211 */, - 36 /* 212 */, 36 /* 213 */, 36 /* 214 */, 36 /* 215 */, - 36 /* 216 */, 36 /* 217 */, 36 /* 218 */, 36 /* 219 */, - 36 /* 220 */, 36 /* 221 */, 36 /* 222 */, 36 /* 223 */, - 36 /* 224 */, 36 /* 225 */, 36 /* 226 */, 36 /* 227 */, - 36 /* 228 */, 36 /* 229 */, 36 /* 230 */, 36 /* 231 */, - 36 /* 232 */, 36 /* 233 */, 36 /* 234 */, 36 /* 235 */, - 36 /* 236 */, 36 /* 237 */, 36 /* 238 */, 36 /* 239 */, - 36 /* 240 */, 36 /* 241 */, 36 /* 242 */, 36 /* 243 */, - 36 /* 244 */, 36 /* 245 */, 36 /* 246 */, 36 /* 247 */, - 36 /* 248 */, 36 /* 249 */, 36 /* 250 */, 36 /* 251 */, - 36 /* 252 */, 36 /* 253 */, 36 /* 254 */, 36 /* 255 */ + 36 /* 0 */, 36 /* 1 */, 36 /* 2 */, 36 /* 3 */, + 36 /* 4 */, 36 /* 5 */, 36 /* 6 */, 36 /* 7 */, + 36 /* 8 */, 36 /* 9 */, 36 /* 10 */, 36 /* 11 */, + 36 /* 12 */, 36 /* 13 */, 36 /* 14 */, 36 /* 15 */, + 36 /* 16 */, 36 /* 17 */, 36 /* 18 */, 36 /* 19 */, + 36 /* 20 */, 36 /* 21 */, 36 /* 22 */, 36 /* 23 */, + 36 /* 24 */, 36 /* 25 */, 36 /* 26 */, 36 /* 27 */, + 36 /* 28 */, 36 /* 29 */, 36 /* 30 */, 36 /* 31 */, + 36 /* ' ' */, 36 /* '!' */, 36 /* '"' */, 36 /* '#' */, + 36 /* '$' */, 36 /* '%' */, 36 /* '&' */, 36 /* '\'' */, + 36 /* '(' */, 36 /* ')' */, 36 /* '*' */, 36 /* '+' */, + 36 /* ',' */, 36 /* '-' */, 36 /* '.' */, 36 /* '/' */, + 26 /* '0' */, 27 /* '1' */, 28 /* '2' */, 29 /* '3' */, + 30 /* '4' */, 31 /* '5' */, 32 /* '6' */, 33 /* '7' */, + 34 /* '8' */, 35 /* '9' */, 36 /* ':' */, 36 /* ';' */, + 36 /* '<' */, 36 /* '=' */, 36 /* '>' */, 36 /* '?' */, + 36 /* '@' */, 0 /* 'A' */, 1 /* 'B' */, 2 /* 'C' */, + 3 /* 'D' */, 4 /* 'E' */, 5 /* 'F' */, 6 /* 'G' */, + 7 /* 'H' */, 8 /* 'I' */, 9 /* 'J' */, 10 /* 'K' */, + 11 /* 'L' */, 12 /* 'M' */, 13 /* 'N' */, 14 /* 'O' */, + 15 /* 'P' */, 16 /* 'Q' */, 17 /* 'R' */, 18 /* 'S' */, + 19 /* 'T' */, 20 /* 'U' */, 21 /* 'V' */, 22 /* 'W' */, + 23 /* 'X' */, 24 /* 'Y' */, 25 /* 'Z' */, 36 /* '[' */, + 36 /* '\' */, 36 /* ']' */, 36 /* '^' */, 36 /* '_' */, + 36 /* '`' */, 0 /* 'a' */, 1 /* 'b' */, 2 /* 'c' */, + 3 /* 'd' */, 4 /* 'e' */, 5 /* 'f' */, 6 /* 'g' */, + 7 /* 'h' */, 8 /* 'i' */, 9 /* 'j' */, 10 /* 'k' */, + 11 /* 'l' */, 12 /* 'm' */, 13 /* 'n' */, 14 /* 'o' */, + 15 /* 'p' */, 16 /* 'q' */, 17 /* 'r' */, 18 /* 's' */, + 19 /* 't' */, 20 /* 'u' */, 21 /* 'v' */, 22 /* 'w' */, + 23 /* 'x' */, 24 /* 'y' */, 25 /* 'z' */, 36 /* '{' */, + 36 /* '|' */, 36 /* '}' */, 36 /* '~' */, 36 /* 127 */, + 36 /* 128 */, 36 /* 129 */, 36 /* 130 */, 36 /* 131 */, + 36 /* 132 */, 36 /* 133 */, 36 /* 134 */, 36 /* 135 */, + 36 /* 136 */, 36 /* 137 */, 36 /* 138 */, 36 /* 139 */, + 36 /* 140 */, 36 /* 141 */, 36 /* 142 */, 36 /* 143 */, + 36 /* 144 */, 36 /* 145 */, 36 /* 146 */, 36 /* 147 */, + 36 /* 148 */, 36 /* 149 */, 36 /* 150 */, 36 /* 151 */, + 36 /* 152 */, 36 /* 153 */, 36 /* 154 */, 36 /* 155 */, + 36 /* 156 */, 36 /* 157 */, 36 /* 158 */, 36 /* 159 */, + 36 /* 160 */, 36 /* 161 */, 36 /* 162 */, 36 /* 163 */, + 36 /* 164 */, 36 /* 165 */, 36 /* 166 */, 36 /* 167 */, + 36 /* 168 */, 36 /* 169 */, 36 /* 170 */, 36 /* 171 */, + 36 /* 172 */, 36 /* 173 */, 36 /* 174 */, 36 /* 175 */, + 36 /* 176 */, 36 /* 177 */, 36 /* 178 */, 36 /* 179 */, + 36 /* 180 */, 36 /* 181 */, 36 /* 182 */, 36 /* 183 */, + 36 /* 184 */, 36 /* 185 */, 36 /* 186 */, 36 /* 187 */, + 36 /* 188 */, 36 /* 189 */, 36 /* 190 */, 36 /* 191 */, + 36 /* 192 */, 36 /* 193 */, 36 /* 194 */, 36 /* 195 */, + 36 /* 196 */, 36 /* 197 */, 36 /* 198 */, 36 /* 199 */, + 36 /* 200 */, 36 /* 201 */, 36 /* 202 */, 36 /* 203 */, + 36 /* 204 */, 36 /* 205 */, 36 /* 206 */, 36 /* 207 */, + 36 /* 208 */, 36 /* 209 */, 36 /* 210 */, 36 /* 211 */, + 36 /* 212 */, 36 /* 213 */, 36 /* 214 */, 36 /* 215 */, + 36 /* 216 */, 36 /* 217 */, 36 /* 218 */, 36 /* 219 */, + 36 /* 220 */, 36 /* 221 */, 36 /* 222 */, 36 /* 223 */, + 36 /* 224 */, 36 /* 225 */, 36 /* 226 */, 36 /* 227 */, + 36 /* 228 */, 36 /* 229 */, 36 /* 230 */, 36 /* 231 */, + 36 /* 232 */, 36 /* 233 */, 36 /* 234 */, 36 /* 235 */, + 36 /* 236 */, 36 /* 237 */, 36 /* 238 */, 36 /* 239 */, + 36 /* 240 */, 36 /* 241 */, 36 /* 242 */, 36 /* 243 */, + 36 /* 244 */, 36 /* 245 */, 36 /* 246 */, 36 /* 247 */, + 36 /* 248 */, 36 /* 249 */, 36 /* 250 */, 36 /* 251 */, + 36 /* 252 */, 36 /* 253 */, 36 /* 254 */, 36 /* 255 */ }; uint64_t getAlphaNumRank(const string &str, size_t significand) { - uint64_t r=0; - size_t i=0; + uint64_t r = 0; + size_t i = 0; size_t strSize = (str.size() > significand) ? significand : str.size(); - for (; i -void readFromSetting(const S &setting, - const string &key, - V &value, - bool optional = false) -{ +template +void readFromSetting( + const S &setting, const string &key, V &value, bool optional = false) { if (!setting.lookupValue(key, value) && !optional) { LOG(FATAL) << "config section missing key: " << key; } } -string getStatsFilePath(const char *chainType, const string &dataDir, time_t ts); +string +getStatsFilePath(const char *chainType, const string &dataDir, time_t ts); // redis sorted-set uses double as its rank. // 37^9 = 1.299617398e+14 < 2^52 = 4.503599627e+15 diff --git a/src/Watcher.cc b/src/Watcher.cc index f824f85ff..bf9a56da9 100644 --- a/src/Watcher.cc +++ b/src/Watcher.cc @@ -32,9 +32,7 @@ #include #include - -static -bool tryReadLine(string &line, struct bufferevent *bufev) { +static bool tryReadLine(string &line, struct bufferevent *bufev) { line.clear(); struct evbuffer *inBuf = bufferevent_get_input(bufev); @@ -42,33 +40,33 @@ bool tryReadLine(string &line, struct bufferevent *bufev) { struct evbuffer_ptr loc; loc = evbuffer_search_eol(inBuf, NULL, NULL, EVBUFFER_EOL_LF); if (loc.pos < 0) { - return false; // not found + return false; // not found } // copies and removes the first datlen bytes from the front of buf // into the memory at data - line.resize(loc.pos + 1); // containing "\n" + line.resize(loc.pos + 1); // containing "\n" evbuffer_remove(inBuf, (void *)line.data(), line.size()); return true; } -static -bool resolve(const string &host, struct in_addr *sin_addr) { +static bool resolve(const string &host, struct in_addr *sin_addr) { LOG(INFO) << "resolve " << host.c_str(); struct evutil_addrinfo *ai = NULL; struct evutil_addrinfo hints_in; memset(&hints_in, 0, sizeof(evutil_addrinfo)); // AF_INET, v4; AF_INT6, v6; AF_UNSPEC, both v4 & v6 - hints_in.ai_family = AF_UNSPEC; + hints_in.ai_family = AF_UNSPEC; hints_in.ai_socktype = SOCK_STREAM; hints_in.ai_protocol = IPPROTO_TCP; - hints_in.ai_flags = EVUTIL_AI_ADDRCONFIG; + hints_in.ai_flags = EVUTIL_AI_ADDRCONFIG; // TODO: use non-blocking to resolve hostname int err = evutil_getaddrinfo(host.c_str(), NULL, &hints_in, &ai); if (err != 0) { - LOG(ERROR) << "[" << host.c_str() << "] evutil_getaddrinfo err: " << err << ", " << evutil_gai_strerror(err); + LOG(ERROR) << "[" << host.c_str() << "] evutil_getaddrinfo err: " << err + << ", " << evutil_gai_strerror(err); return false; } if (ai == NULL) { @@ -78,7 +76,7 @@ bool resolve(const string &host, struct in_addr *sin_addr) { // only get the first record, ignore ai = ai->ai_next if (ai->ai_family == AF_INET) { - struct sockaddr_in *sin = (struct sockaddr_in*)ai->ai_addr; + struct sockaddr_in *sin = (struct sockaddr_in *)ai->ai_addr; *sin_addr = sin->sin_addr; char ipStr[INET_ADDRSTRLEN]; @@ -94,12 +92,18 @@ bool resolve(const string &host, struct in_addr *sin_addr) { } ///////////////////////////////// ClientContainer ////////////////////////////// -ClientContainer::ClientContainer(const string &kafkaBrokers, const string &consumerTopic, const string &producerTopic, - bool disableChecking) - : running_(true), disableChecking_(disableChecking), kafkaBrokers_(kafkaBrokers) - , kafkaProducer_(kafkaBrokers_.c_str(), producerTopic.c_str(), 0/* partition */) - , kafkaStratumJobConsumer_(kafkaBrokers_.c_str(), consumerTopic.c_str(), 0/*patition*/) -{ +ClientContainer::ClientContainer( + const string &kafkaBrokers, + const string &consumerTopic, + const string &producerTopic, + bool disableChecking) + : running_(true) + , disableChecking_(disableChecking) + , kafkaBrokers_(kafkaBrokers) + , kafkaProducer_( + kafkaBrokers_.c_str(), producerTopic.c_str(), 0 /* partition */) + , kafkaStratumJobConsumer_( + kafkaBrokers_.c_str(), consumerTopic.c_str(), 0 /*patition*/) { base_ = event_base_new(); assert(base_ != nullptr); } @@ -110,7 +114,7 @@ ClientContainer::~ClientContainer() { void ClientContainer::runThreadStratumJobConsume() { LOG(INFO) << "start stratum job consume thread"; - + const int32_t kTimeoutMs = 1000; while (running_) { @@ -138,17 +142,19 @@ void ClientContainer::consumeStratumJob(rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); - + + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); + if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { LOG(FATAL) << "consume fatal"; @@ -156,7 +162,7 @@ void ClientContainer::consumeStratumJob(rd_kafka_message_t *rkmessage) { return; } - string str((const char*)rkmessage->payload, rkmessage->len); + string str((const char *)rkmessage->payload, rkmessage->len); consumeStratumJobInternal(str); } @@ -198,23 +204,24 @@ bool ClientContainer::init() { /* setup threadStratumJobConsume */ { const int32_t kConsumeLatestN = 1; - + // we need to consume the latest one map consumerOptions; consumerOptions["fetch.wait.max.ms"] = "10"; - if (kafkaStratumJobConsumer_.setup(RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), - &consumerOptions) == false) { + if (kafkaStratumJobConsumer_.setup( + RD_KAFKA_OFFSET_TAIL(kConsumeLatestN), &consumerOptions) == false) { LOG(INFO) << "setup stratumJobConsume fail"; return false; } - + if (!kafkaStratumJobConsumer_.checkAlive()) { LOG(ERROR) << "kafka brokers is not alive"; return false; } if (!disableChecking_) { - threadStratumJobConsume_ = thread(&ClientContainer::runThreadStratumJobConsume, this); + threadStratumJobConsume_ = + thread(&ClientContainer::runThreadStratumJobConsume, this); } } @@ -222,9 +229,13 @@ bool ClientContainer::init() { } // do add pools before init() -bool ClientContainer::addPools(const string &poolName, const string &poolHost, - const int16_t poolPort, const string &workerName) { - auto ptr = createPoolWatchClient(base_, poolName, poolHost, poolPort, workerName); +bool ClientContainer::addPools( + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName) { + auto ptr = + createPoolWatchClient(base_, poolName, poolHost, poolPort, workerName); if (!ptr->connect()) { return false; } @@ -236,8 +247,12 @@ bool ClientContainer::addPools(const string &poolName, const string &poolHost, void ClientContainer::removeAndCreateClient(PoolWatchClient *client) { for (size_t i = 0; i < clients_.size(); i++) { if (clients_[i] == client) { - auto ptr = createPoolWatchClient(base_, client->poolName_, client->poolHost_, - client->poolPort_, client->workerName_); + auto ptr = createPoolWatchClient( + base_, + client->poolName_, + client->poolHost_, + client->poolPort_, + client->workerName_); ptr->connect(); LOG(INFO) << "reconnect " << ptr->poolName_; @@ -256,8 +271,8 @@ void ClientContainer::readCallback(struct bufferevent *bev, void *ptr) { } // static func -void ClientContainer::eventCallback(struct bufferevent *bev, - short events, void *ptr) { +void ClientContainer::eventCallback( + struct bufferevent *bev, short events, void *ptr) { PoolWatchClient *client = static_cast(ptr); ClientContainer *container = client->container_; @@ -272,15 +287,12 @@ void ClientContainer::eventCallback(struct bufferevent *bev, if (events & BEV_EVENT_EOF) { LOG(INFO) << "upsession closed"; - } - else if (events & BEV_EVENT_ERROR) { + } else if (events & BEV_EVENT_ERROR) { LOG(ERROR) << "got an error on the upsession: " - << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()); - } - else if (events & BEV_EVENT_TIMEOUT) { + << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()); + } else if (events & BEV_EVENT_TIMEOUT) { LOG(INFO) << "upsession read/write timeout, events: " << events; - } - else { + } else { LOG(ERROR) << "unhandled upsession events: " << events; } @@ -288,28 +300,31 @@ void ClientContainer::eventCallback(struct bufferevent *bev, container->removeAndCreateClient(client); } - ///////////////////////////////// PoolWatchClient ////////////////////////////// -PoolWatchClient::PoolWatchClient(struct event_base *base, ClientContainer *container, - bool disableChecking, - const string &poolName, - const string &poolHost, const int16_t poolPort, - const string &workerName) +PoolWatchClient::PoolWatchClient( + struct event_base *base, + ClientContainer *container, + bool disableChecking, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName) : disableChecking_(disableChecking) , container_(container) , poolName_(poolName) , poolHost_(poolHost) , poolPort_(poolPort) - , workerName_(workerName) -{ + , workerName_(workerName) { bev_ = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE); assert(bev_ != nullptr); - bufferevent_setcb(bev_, - ClientContainer::readCallback, NULL, - ClientContainer::eventCallback, this); - bufferevent_enable(bev_, EV_READ|EV_WRITE); - + bufferevent_setcb( + bev_, + ClientContainer::readCallback, + NULL, + ClientContainer::eventCallback, + this); + bufferevent_enable(bev_, EV_READ | EV_WRITE); state_ = INIT; @@ -326,7 +341,7 @@ bool PoolWatchClient::connect() { struct sockaddr_in sin; memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_port = htons(poolPort_); + sin.sin_port = htons(poolPort_); if (!resolve(poolHost_, &sin.sin_addr)) { return false; } @@ -335,7 +350,8 @@ bool PoolWatchClient::connect() { // bufferevent_socket_connect(): This function returns 0 if the connect // was successfully launched, and -1 if an error occurred. - int res = bufferevent_socket_connect(bev_, (struct sockaddr *)&sin, sizeof(sin)); + int res = + bufferevent_socket_connect(bev_, (struct sockaddr *)&sin, sizeof(sin)); if (res == 0) { state_ = CONNECTED; return true; @@ -364,4 +380,3 @@ bool PoolWatchClient::handleMessage() { return false; } - diff --git a/src/Watcher.h b/src/Watcher.h index c54d26014..4b5b424a8 100644 --- a/src/Watcher.h +++ b/src/Watcher.h @@ -38,13 +38,11 @@ #include #include -#define BTCCOM_WATCHER_AGENT "btc.com-watcher/0.2" - +#define BTCCOM_WATCHER_AGENT "btc.com-watcher/0.2" class PoolWatchClient; class ClientContainer; - ///////////////////////////////// ClientContainer ////////////////////////////// class ClientContainer { protected: @@ -61,37 +59,42 @@ class ClientContainer { void runThreadStratumJobConsume(); void consumeStratumJob(rd_kafka_message_t *rkmessage); - KafkaProducer kafkaProducer_; // produce GBT message - KafkaConsumer kafkaStratumJobConsumer_; // consume topic: 'StratumJob' + KafkaProducer kafkaProducer_; // produce GBT message + KafkaConsumer kafkaStratumJobConsumer_; // consume topic: 'StratumJob' - virtual void consumeStratumJobInternal(const string& str) = 0; + virtual void consumeStratumJobInternal(const string &str) = 0; virtual string createOnConnectedReplyString() const = 0; - virtual PoolWatchClient* createPoolWatchClient( - struct event_base *base, - const string &poolName, const string &poolHost, - const int16_t poolPort, const string &workerName) = 0; - - ClientContainer(const string &kafkaBrokers, const string &consumerTopic, const string &producerTopic, - bool disableChecking); + virtual PoolWatchClient *createPoolWatchClient( + struct event_base *base, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName) = 0; + + ClientContainer( + const string &kafkaBrokers, + const string &consumerTopic, + const string &producerTopic, + bool disableChecking); public: virtual ~ClientContainer(); - bool addPools(const string &poolName, const string &poolHost, - const int16_t poolPort, const string &workerName); + bool addPools( + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName); virtual bool init(); void run(); void stop(); void removeAndCreateClient(PoolWatchClient *client); - - static void readCallback (struct bufferevent *bev, void *ptr); + static void readCallback(struct bufferevent *bev, void *ptr); static void eventCallback(struct bufferevent *bev, short events, void *ptr); - }; - ///////////////////////////////// PoolWatchClient ////////////////////////////// class PoolWatchClient { protected: @@ -102,26 +105,24 @@ class PoolWatchClient { virtual void handleStratumMessage(const string &line) = 0; public: - enum State { - INIT = 0, - CONNECTED = 1, - SUBSCRIBED = 2, - AUTHENTICATED = 3 - }; + enum State { INIT = 0, CONNECTED = 1, SUBSCRIBED = 2, AUTHENTICATED = 3 }; State state_; ClientContainer *container_; - string poolName_; - string poolHost_; + string poolName_; + string poolHost_; int16_t poolPort_; - string workerName_; + string workerName_; protected: - PoolWatchClient(struct event_base *base, ClientContainer *container, - bool disableChecking, - const string &poolName, - const string &poolHost, const int16_t poolPort, - const string &workerName); + PoolWatchClient( + struct event_base *base, + ClientContainer *container, + bool disableChecking, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName); public: virtual ~PoolWatchClient(); @@ -130,9 +131,7 @@ class PoolWatchClient { void recvData(); void sendData(const char *data, size_t len); - inline void sendData(const string &str) { - sendData(str.data(), str.size()); - } + inline void sendData(const string &str) { sendData(str.data(), str.size()); } }; #endif diff --git a/src/Zookeeper.cc b/src/Zookeeper.cc index 58c5c8cfb..52556374b 100644 --- a/src/Zookeeper.cc +++ b/src/Zookeeper.cc @@ -30,7 +30,8 @@ #include #include "Zookeeper.h" -ZookeeperException::ZookeeperException(const string &what_arg) : std::runtime_error(what_arg) { +ZookeeperException::ZookeeperException(const string &what_arg) + : std::runtime_error(what_arg) { // no more action than its parent } @@ -38,8 +39,10 @@ int Zookeeper::nodeNameCompare(const void *pname1, const void *pname2) { return strcmp(*(const char **)pname1, *(const char **)pname2); } -void Zookeeper::globalWatcher(zhandle_t *zh, int type, int state, const char *path, void *zookeeper) { - DLOG(INFO) << "Zookeeper::globalWatcher: type:" << type << ", state:" << state << ", path:" << path; +void Zookeeper::globalWatcher( + zhandle_t *zh, int type, int state, const char *path, void *zookeeper) { + DLOG(INFO) << "Zookeeper::globalWatcher: type:" << type << ", state:" << state + << ", path:" << path; if (type == ZOO_SESSION_EVENT && state == ZOO_CONNECTING_STATE) { ZookeeperException ex("Zookeeper: lost the connection from broker."); @@ -48,26 +51,31 @@ void Zookeeper::globalWatcher(zhandle_t *zh, int type, int state, const char *pa } } -void Zookeeper::lockWatcher(zhandle_t *zh, int type, int state, const char *path, void *pMutex) { +void Zookeeper::lockWatcher( + zhandle_t *zh, int type, int state, const char *path, void *pMutex) { pthread_mutex_unlock((pthread_mutex_t *)pMutex); } Zookeeper::Zookeeper(const char *servers) { - zh = zookeeper_init(servers, Zookeeper::globalWatcher, ZOOKEEPER_CONNECT_TIMEOUT, NULL, this, 0); + zh = zookeeper_init( + servers, + Zookeeper::globalWatcher, + ZOOKEEPER_CONNECT_TIMEOUT, + NULL, + this, + 0); if (zh == NULL) { throw ZookeeperException(string("Zookeeper init failed: ") + zerror(errno)); } - - for (int i=0; i<=20 && zoo_state(zh)!=ZOO_CONNECTED_STATE; i+=5) - { + + for (int i = 0; i <= 20 && zoo_state(zh) != ZOO_CONNECTED_STATE; i += 5) { LOG(INFO) << "Zookeeper: connecting to zookeeper brokers: " << i << "s"; sleep(5); } - - if (zoo_state(zh)!=ZOO_CONNECTED_STATE) - { + + if (zoo_state(zh) != ZOO_CONNECTED_STATE) { ZookeeperException ex("Zookeeper: connecting to zookeeper brokers failed!"); throw ex; } @@ -86,7 +94,7 @@ void Zookeeper::getLock(const char *lockParentPath) { // It will append the path with a increasing sequence char *lockNodeNewPathBuffer; int bufferLen; - + // Add 100 bytes for "/node" and the appened string likes "0000000293". // The final node path looks like this: "/locks/jobmaker/node0000000293". bufferLen = strlen(lockParentPath) + 100; @@ -101,12 +109,14 @@ void Zookeeper::getLock(const char *lockParentPath) { // It isn't a busy waiting because doGetLock() will // block itself with pthread_mutex_lock() until zookeeper // event awake it. - while (!doGetLock(lockParentPath, lockNodeNewPathBuffer)); + while (!doGetLock(lockParentPath, lockNodeNewPathBuffer)) + ; delete[] lockNodeNewPathBuffer; } -bool Zookeeper::doGetLock(const char *lockParentPath, const char *lockNodePath) { +bool Zookeeper::doGetLock( + const char *lockParentPath, const char *lockNodePath) { int i = 0; int stat = 0; int myNodePosition = -1; @@ -115,24 +125,28 @@ bool Zookeeper::doGetLock(const char *lockParentPath, const char *lockNodePath) struct String_vector nodes = {0, NULL}; pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; - // get the name part (likes "node0000000293") from full path (likes "/locks/jobmaker/node0000000293"). + // get the name part (likes "node0000000293") from full path (likes + // "/locks/jobmaker/node0000000293"). myNodeName = lockNodePath + strlen(lockParentPath) + 1; stat = zoo_get_children(zh, lockParentPath, 0, &nodes); if (stat != ZOK) { - throw ZookeeperException(string("Zookeeper::doGetLock: get children for ") + lockParentPath + - " failed:" + zerror(stat)); + throw ZookeeperException( + string("Zookeeper::doGetLock: get children for ") + lockParentPath + + " failed:" + zerror(stat)); } // it should not be 0 because of a new node added for the process. assert(nodes.count > 0); - - qsort(nodes.data, nodes.count, sizeof(nodes.data), Zookeeper::nodeNameCompare); - LOG(INFO) << "Zookeeper: fight for lock " << lockParentPath << " with " << nodes.count << " clients"; + qsort( + nodes.data, nodes.count, sizeof(nodes.data), Zookeeper::nodeNameCompare); - for (i=0; i 0 && myNodePosition < nodes.count); // get the previous node near myself - watchNodePath = string(lockParentPath) + "/" + nodes.data[myNodePosition - 1]; + watchNodePath = + string(lockParentPath) + "/" + nodes.data[myNodePosition - 1]; LOG(INFO) << "Zookeeper: watch the lock release for " << watchNodePath; // Watch the previous node with callback function. - stat = zoo_wexists(zh, watchNodePath.c_str(), Zookeeper::lockWatcher, &mutex, NULL); + stat = zoo_wexists( + zh, watchNodePath.c_str(), Zookeeper::lockWatcher, &mutex, NULL); if (stat != ZOK) { - throw ZookeeperException(string("Zookeeper::doGetLock: watch node ") + watchNodePath + - " failed: " + zerror(stat)); + throw ZookeeperException( + string("Zookeeper::doGetLock: watch node ") + watchNodePath + + " failed: " + zerror(stat)); } // block the thread for waiting watch event. @@ -181,17 +198,27 @@ bool Zookeeper::doGetLock(const char *lockParentPath, const char *lockNodePath) } } -void Zookeeper::createLockNode(const char *nodePath, char *newNodePath, int newNodePathMaxLen) { +void Zookeeper::createLockNode( + const char *nodePath, char *newNodePath, int newNodePathMaxLen) { int stat; // the ZOO_EPHEMERAL node will disapper if the client offline. - // ZOO_SEQUENCE will appened a increasing sequence after nodePath (set as newNodePath). - stat = zoo_create(zh, nodePath, NULL, -1, &ZOO_READ_ACL_UNSAFE, - ZOO_EPHEMERAL | ZOO_SEQUENCE, newNodePath, newNodePathMaxLen); + // ZOO_SEQUENCE will appened a increasing sequence after nodePath (set as + // newNodePath). + stat = zoo_create( + zh, + nodePath, + NULL, + -1, + &ZOO_READ_ACL_UNSAFE, + ZOO_EPHEMERAL | ZOO_SEQUENCE, + newNodePath, + newNodePathMaxLen); if (stat != ZOK) { - throw ZookeeperException(string("Zookeeper::createLockNode: create node ") + nodePath + - " failed: " + zerror(stat)); + throw ZookeeperException( + string("Zookeeper::createLockNode: create node ") + nodePath + + " failed: " + zerror(stat)); } } @@ -206,9 +233,9 @@ void Zookeeper::createNodesRecursively(const char *nodePath) { // the path should be 2 or more words and the first char must be '/' // (we cannot create the root node "/") assert(pathLen >= 2 && path[0] == '/'); - + // pos=1: skip the root node "/" - for (pos=1; pos &leaves, - uint256 *proot, bool *pmutated, - uint32_t branchpos, - std::vector *pbranch) { - if (pbranch) pbranch->clear(); - if (leaves.size() == 0) { - if (pmutated) *pmutated = false; - if (proot) *proot = uint256(); - return; - } - bool mutated = false; - // count is the number of leaves processed so far. - uint32_t count = 0; - // inner is an array of eagerly computed subtree hashes, indexed by tree - // level (0 being the leaves). - // For example, when count is 25 (11001 in binary), inner[4] is the hash of - // the first 16 leaves, inner[3] of the next 8 leaves, and inner[0] equal to - // the last leaf. The other inner entries are undefined. - uint256 inner[32]; - // Which position in inner is a hash that depends on the matching leaf. - int matchlevel = -1; - // First process all leaves into 'inner' values. - while (count < leaves.size()) { - uint256 h = leaves[count]; - bool matchh = count == branchpos; - count++; - int level; - // For each of the lower bits in count that are 0, do 1 step. Each - // corresponds to an inner value that existed before processing the - // current leaf, and each needs a hash to combine it. - for (level = 0; !(count & (((uint32_t)1) << level)); level++) { - if (pbranch) { - if (matchh) { - pbranch->push_back(inner[level]); - } else if (matchlevel == level) { - pbranch->push_back(h); - matchh = true; - } - } - mutated |= (inner[level] == h); - CHash256() - .Write(inner[level].begin(), 32) - .Write(h.begin(), 32) - .Finalize(h.begin()); - } - // Store the resulting hash at inner position level. - inner[level] = h; +static void MerkleComputation( + const std::vector &leaves, + uint256 *proot, + bool *pmutated, + uint32_t branchpos, + std::vector *pbranch) { + if (pbranch) + pbranch->clear(); + if (leaves.size() == 0) { + if (pmutated) + *pmutated = false; + if (proot) + *proot = uint256(); + return; + } + bool mutated = false; + // count is the number of leaves processed so far. + uint32_t count = 0; + // inner is an array of eagerly computed subtree hashes, indexed by tree + // level (0 being the leaves). + // For example, when count is 25 (11001 in binary), inner[4] is the hash of + // the first 16 leaves, inner[3] of the next 8 leaves, and inner[0] equal to + // the last leaf. The other inner entries are undefined. + uint256 inner[32]; + // Which position in inner is a hash that depends on the matching leaf. + int matchlevel = -1; + // First process all leaves into 'inner' values. + while (count < leaves.size()) { + uint256 h = leaves[count]; + bool matchh = count == branchpos; + count++; + int level; + // For each of the lower bits in count that are 0, do 1 step. Each + // corresponds to an inner value that existed before processing the + // current leaf, and each needs a hash to combine it. + for (level = 0; !(count & (((uint32_t)1) << level)); level++) { + if (pbranch) { if (matchh) { - matchlevel = level; + pbranch->push_back(inner[level]); + } else if (matchlevel == level) { + pbranch->push_back(h); + matchh = true; } + } + mutated |= (inner[level] == h); + CHash256() + .Write(inner[level].begin(), 32) + .Write(h.begin(), 32) + .Finalize(h.begin()); } - // Do a final 'sweep' over the rightmost branch of the tree to process - // odd levels, and reduce everything to a single top value. - // Level is the level (counted from the bottom) up to which we've sweeped. - int level = 0; - // As long as bit number level in count is zero, skip it. It means there - // is nothing left at this level. - while (!(count & (((uint32_t)1) << level))) { - level++; + // Store the resulting hash at inner position level. + inner[level] = h; + if (matchh) { + matchlevel = level; } - uint256 h = inner[level]; - bool matchh = matchlevel == level; - while (count != (((uint32_t)1) << level)) { - // If we reach this point, h is an inner value that is not the top. - // We combine it with itself (Bitcoin's special rule for odd levels in - // the tree) to produce a higher level one. - if (pbranch && matchh) { - pbranch->push_back(h); - } - CHash256() - .Write(h.begin(), 32) - .Write(h.begin(), 32) - .Finalize(h.begin()); - // Increment count to the value it would have if two entries at this - // level had existed. - count += (((uint32_t)1) << level); - level++; - // And propagate the result upwards accordingly. - while (!(count & (((uint32_t)1) << level))) { - if (pbranch) { - if (matchh) { - pbranch->push_back(inner[level]); - } else if (matchlevel == level) { - pbranch->push_back(h); - matchh = true; - } - } - CHash256() - .Write(inner[level].begin(), 32) - .Write(h.begin(), 32) - .Finalize(h.begin()); - level++; + } + // Do a final 'sweep' over the rightmost branch of the tree to process + // odd levels, and reduce everything to a single top value. + // Level is the level (counted from the bottom) up to which we've sweeped. + int level = 0; + // As long as bit number level in count is zero, skip it. It means there + // is nothing left at this level. + while (!(count & (((uint32_t)1) << level))) { + level++; + } + uint256 h = inner[level]; + bool matchh = matchlevel == level; + while (count != (((uint32_t)1) << level)) { + // If we reach this point, h is an inner value that is not the top. + // We combine it with itself (Bitcoin's special rule for odd levels in + // the tree) to produce a higher level one. + if (pbranch && matchh) { + pbranch->push_back(h); + } + CHash256().Write(h.begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + // Increment count to the value it would have if two entries at this + // level had existed. + count += (((uint32_t)1) << level); + level++; + // And propagate the result upwards accordingly. + while (!(count & (((uint32_t)1) << level))) { + if (pbranch) { + if (matchh) { + pbranch->push_back(inner[level]); + } else if (matchlevel == level) { + pbranch->push_back(h); + matchh = true; } + } + CHash256() + .Write(inner[level].begin(), 32) + .Write(h.begin(), 32) + .Finalize(h.begin()); + level++; } - // Return result. - if (pmutated) *pmutated = mutated; - if (proot) *proot = h; + } + // Return result. + if (pmutated) + *pmutated = mutated; + if (proot) + *proot = h; } -std::vector ComputeMerkleBranch(const std::vector &leaves, uint32_t position) { - std::vector ret; - MerkleComputation(leaves, nullptr, nullptr, position, &ret); - return ret; +std::vector +ComputeMerkleBranch(const std::vector &leaves, uint32_t position) { + std::vector ret; + MerkleComputation(leaves, nullptr, nullptr, position, &ret); + return ret; } -std::vector BlockMerkleBranch(const CBlock &block, - uint32_t position) { - std::vector leaves; - leaves.resize(block.vtx.size()); - for (size_t s = 0; s < block.vtx.size(); s++) { - leaves[s] = block.vtx[s]->GetHash(); - } - return ComputeMerkleBranch(leaves, position); +std::vector BlockMerkleBranch(const CBlock &block, uint32_t position) { + std::vector leaves; + leaves.resize(block.vtx.size()); + for (size_t s = 0; s < block.vtx.size(); s++) { + leaves[s] = block.vtx[s]->GetHash(); + } + return ComputeMerkleBranch(leaves, position); } #endif @@ -164,8 +168,7 @@ std::vector BlockMerkleBranch(const CBlock &block, #ifndef CHAIN_TYPE_UBTC /////////////////////// Block Reward of BTC, BCH, SBTC /////////////////////// -int64_t GetBlockReward(int nHeight, const Consensus::Params& consensusParams) -{ +int64_t GetBlockReward(int nHeight, const Consensus::Params &consensusParams) { int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; // Force block reward to zero when right shift is undefined. if (halvings >= 64) @@ -173,8 +176,10 @@ int64_t GetBlockReward(int nHeight, const Consensus::Params& consensusParams) int64_t nSubsidy = 50 * COIN_TO_SATOSHIS; - // Block reward is cut in half every 210,000 blocks which will occur approximately every 4 years. - nSubsidy >>= halvings; // this line is secure, it copied from bitcoin's validation.cpp + // Block reward is cut in half every 210,000 blocks which will occur + // approximately every 4 years. + nSubsidy >>= + halvings; // this line is secure, it copied from bitcoin's validation.cpp return nSubsidy; } @@ -182,72 +187,75 @@ int64_t GetBlockReward(int nHeight, const Consensus::Params& consensusParams) /////////////////////// Block Reward of UBTC /////////////////////// // copied from UnitedBitcoin-v1.1.0.0 -int64_t GetBlockReward(int nHeight, const Consensus::Params& consensusParams) -{ - int halvings; - - if (nHeight < Params().GetConsensus().ForkV1Height) - { - halvings = nHeight / consensusParams.nSubsidyHalvingInterval; - // Force block reward to zero when right shift is undefined. - if (halvings >= 64) - return 0; - - int64_t nSubsidy = 50 * COIN_TO_SATOSHIS; - // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years. - nSubsidy >>= halvings; - return nSubsidy; - } - else { - int halfPeriodLeft = consensusParams.ForkV1Height - 1 - consensusParams.nSubsidyHalvingInterval * 2; - int halfPeriodRight = (consensusParams.nSubsidyHalvingInterval - halfPeriodLeft) * 10; - - int PeriodEndHeight = consensusParams.ForkV1Height -1 + (consensusParams.nSubsidyHalvingInterval - halfPeriodLeft) * 10; - if (nHeight <= PeriodEndHeight) - halvings = 2; - else - { - halvings = 3 + (nHeight - PeriodEndHeight - 1) / (consensusParams.nSubsidyHalvingInterval * 10); - } - - // Force block reward to zero when right shift is undefined. - if (halvings >= 64) - return 0; - - int64_t nSubsidy = 50 * COIN_TO_SATOSHIS; - // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years. - nSubsidy >>= halvings; - nSubsidy = nSubsidy / 10 * 0.8; - - return nSubsidy; - } +int64_t GetBlockReward(int nHeight, const Consensus::Params &consensusParams) { + int halvings; + + if (nHeight < Params().GetConsensus().ForkV1Height) { + halvings = nHeight / consensusParams.nSubsidyHalvingInterval; + // Force block reward to zero when right shift is undefined. + if (halvings >= 64) + return 0; + + int64_t nSubsidy = 50 * COIN_TO_SATOSHIS; + // Subsidy is cut in half every 210,000 blocks which will occur + // approximately every 4 years. + nSubsidy >>= halvings; + return nSubsidy; + } else { + int halfPeriodLeft = consensusParams.ForkV1Height - 1 - + consensusParams.nSubsidyHalvingInterval * 2; + int halfPeriodRight = + (consensusParams.nSubsidyHalvingInterval - halfPeriodLeft) * 10; + + int PeriodEndHeight = consensusParams.ForkV1Height - 1 + + (consensusParams.nSubsidyHalvingInterval - halfPeriodLeft) * 10; + if (nHeight <= PeriodEndHeight) + halvings = 2; + else { + halvings = 3 + + (nHeight - PeriodEndHeight - 1) / + (consensusParams.nSubsidyHalvingInterval * 10); + } + + // Force block reward to zero when right shift is undefined. + if (halvings >= 64) + return 0; + + int64_t nSubsidy = 50 * COIN_TO_SATOSHIS; + // Subsidy is cut in half every 210,000 blocks which will occur + // approximately every 4 years. + nSubsidy >>= halvings; + nSubsidy = nSubsidy / 10 * 0.8; + + return nSubsidy; + } } #endif - #ifdef CHAIN_TYPE_SBTC -namespace BitcoinUtils -{ - CTxDestination DecodeDestination(const std::string& str) { - CBitcoinAddress addr(str); - return addr.Get(); - } +namespace BitcoinUtils { +CTxDestination DecodeDestination(const std::string &str) { + CBitcoinAddress addr(str); + return addr.Get(); +} - bool IsValidDestinationString(const std::string& str) { - CBitcoinAddress addr(str); - return addr.IsValid(); - } +bool IsValidDestinationString(const std::string &str) { + CBitcoinAddress addr(str); + return addr.IsValid(); } +} // namespace BitcoinUtils #endif // CHAIN_TYPE_SBTC - -static bool checkBitcoinRPCGetNetworkInfo(const string &rpcAddr, const string &rpcUserpass) { +static bool checkBitcoinRPCGetNetworkInfo( + const string &rpcAddr, const string &rpcUserpass) { string response; - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getnetworkinfo\",\"params\":[]}"; - bool res = blockchainNodeRpcCall(rpcAddr.c_str(), rpcUserpass.c_str(), - request.c_str(), response); + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getnetworkinfo\"," + "\"params\":[]}"; + bool res = blockchainNodeRpcCall( + rpcAddr.c_str(), rpcUserpass.c_str(), request.c_str(), response); if (!res) { LOG(ERROR) << "rpc getnetworkinfo call failure"; @@ -257,8 +265,8 @@ static bool checkBitcoinRPCGetNetworkInfo(const string &rpcAddr, const string &r LOG(INFO) << "getnetworkinfo: " << response; JsonNode r; - if (!JsonNode::parse(response.c_str(), - response.c_str() + response.length(), r)) { + if (!JsonNode::parse( + response.c_str(), response.c_str() + response.length(), r)) { LOG(ERROR) << "decode getnetworkinfo failure"; return false; } @@ -277,11 +285,13 @@ static bool checkBitcoinRPCGetNetworkInfo(const string &rpcAddr, const string &r return true; } -static bool checkBitcoinRPCGetInfo(const string &rpcAddr, const string &rpcUserpass) { +static bool +checkBitcoinRPCGetInfo(const string &rpcAddr, const string &rpcUserpass) { string response; - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getinfo\",\"params\":[]}"; - bool res = blockchainNodeRpcCall(rpcAddr.c_str(), rpcUserpass.c_str(), - request.c_str(), response); + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getinfo\",\"params\":[]}"; + bool res = blockchainNodeRpcCall( + rpcAddr.c_str(), rpcUserpass.c_str(), request.c_str(), response); if (!res) { LOG(ERROR) << "rpc getinfo call failure"; return false; @@ -290,8 +300,8 @@ static bool checkBitcoinRPCGetInfo(const string &rpcAddr, const string &rpcUserp LOG(INFO) << "getinfo: " << response; JsonNode r; - if (!JsonNode::parse(response.c_str(), - response.c_str() + response.length(), r)) { + if (!JsonNode::parse( + response.c_str(), response.c_str() + response.length(), r)) { LOG(ERROR) << "decode getinfo failure"; return false; } @@ -312,7 +322,7 @@ static bool checkBitcoinRPCGetInfo(const string &rpcAddr, const string &rpcUserp bool checkBitcoinRPC(const string &rpcAddr, const string &rpcUserpass) { return checkBitcoinRPCGetNetworkInfo(rpcAddr, rpcUserpass) || - checkBitcoinRPCGetInfo(rpcAddr, rpcUserpass); + checkBitcoinRPCGetInfo(rpcAddr, rpcUserpass); } int32_t getBlockHeightFromCoinbase(const string &coinbase1) { @@ -321,14 +331,13 @@ int32_t getBlockHeightFromCoinbase(const string &coinbase1) { auto size = (int32_t)strtol(sizeStr.c_str(), nullptr, 16); // see CScript::push_int64 for the logic - if(size == OP_0) + if (size == OP_0) return 0; - if(size >= OP_1 && size <= OP_1 + 16) + if (size >= OP_1 && size <= OP_1 + 16) return size - (OP_1 - 1); string heightHex; - for(int i = 0; i < size; ++i) - { + for (int i = 0; i < size; ++i) { heightHex = coinbase1.substr(86 + (i * 2), 2) + heightHex; } diff --git a/src/bitcoin/BitcoinUtils.h b/src/bitcoin/BitcoinUtils.h index aa5936f61..acdc93358 100644 --- a/src/bitcoin/BitcoinUtils.h +++ b/src/bitcoin/BitcoinUtils.h @@ -35,68 +35,61 @@ #include "CommonBitcoin.h" #if defined(CHAIN_TYPE_BCH) || defined(CHAIN_TYPE_BSV) - // header that defined DecodeDestination & IsValidDestinationString - #include +// header that defined DecodeDestination & IsValidDestinationString +#include #ifdef CHAIN_TYPE_BCH - #define AMOUNT_TYPE(x) Amount(x * SATOSHI) - #define COIN_TO_SATOSHIS (COIN / SATOSHI) - #define AMOUNT_SATOSHIS(amt) (amt / SATOSHI) +#define AMOUNT_TYPE(x) Amount(x *SATOSHI) +#define COIN_TO_SATOSHIS (COIN / SATOSHI) +#define AMOUNT_SATOSHIS(amt) (amt / SATOSHI) - std::vector ComputeMerkleBranch(const std::vector &leaves, uint32_t position); - std::vector BlockMerkleBranch(const CBlock &block, uint32_t position); +std::vector +ComputeMerkleBranch(const std::vector &leaves, uint32_t position); +std::vector BlockMerkleBranch(const CBlock &block, uint32_t position); #else - #define AMOUNT_TYPE Amount - #define COIN_TO_SATOSHIS COIN.GetSatoshis() - #define AMOUNT_SATOSHIS(amt) amt.GetSatoshis() +#define AMOUNT_TYPE Amount +#define COIN_TO_SATOSHIS COIN.GetSatoshis() +#define AMOUNT_SATOSHIS(amt) amt.GetSatoshis() #endif - namespace BitcoinUtils - { - inline bool IsValidDestinationString(const std::string &addr) - { - return ::IsValidDestinationString(addr, Params()); - } - inline CTxDestination DecodeDestination(const std::string& str) - { - return ::DecodeDestination(str, Params()); - } - } +namespace BitcoinUtils { +inline bool IsValidDestinationString(const std::string &addr) { + return ::IsValidDestinationString(addr, Params()); +} +inline CTxDestination DecodeDestination(const std::string &str) { + return ::DecodeDestination(str, Params()); +} +} // namespace BitcoinUtils #elif defined(CHAIN_TYPE_SBTC) - #define AMOUNT_TYPE CAmount - #define COIN_TO_SATOSHIS COIN - #define AMOUNT_SATOSHIS(amt) amt - - namespace BitcoinUtils - { - CTxDestination DecodeDestination(const std::string& str); - bool IsValidDestinationString(const std::string& str); - } +#define AMOUNT_TYPE CAmount +#define COIN_TO_SATOSHIS COIN +#define AMOUNT_SATOSHIS(amt) amt + +namespace BitcoinUtils { +CTxDestination DecodeDestination(const std::string &str); +bool IsValidDestinationString(const std::string &str); +} // namespace BitcoinUtils #else - #define AMOUNT_TYPE CAmount - #define COIN_TO_SATOSHIS COIN - #define AMOUNT_SATOSHIS(amt) amt - - namespace BitcoinUtils - { - inline bool IsValidDestinationString(const std::string &addr) - { - return ::IsValidDestinationString(addr); - } - inline CTxDestination DecodeDestination(const std::string& str) - { - return ::DecodeDestination(str); - } - } +#define AMOUNT_TYPE CAmount +#define COIN_TO_SATOSHIS COIN +#define AMOUNT_SATOSHIS(amt) amt + +namespace BitcoinUtils { +inline bool IsValidDestinationString(const std::string &addr) { + return ::IsValidDestinationString(addr); +} +inline CTxDestination DecodeDestination(const std::string &str) { + return ::DecodeDestination(str); +} +} // namespace BitcoinUtils #endif std::string EncodeHexBlock(const CBlock &block); std::string EncodeHexBlockHeader(const CBlockHeader &blkHeader); -int64_t GetBlockReward(int nHeight, const Consensus::Params& consensusParams); +int64_t GetBlockReward(int nHeight, const Consensus::Params &consensusParams); bool checkBitcoinRPC(const string &rpcAddr, const string &rpcUserpass); int32_t getBlockHeightFromCoinbase(const string &coinbase1); - #endif // BITCOIN_UTILS_H_ diff --git a/src/bitcoin/BlockMakerBitcoin.cc b/src/bitcoin/BlockMakerBitcoin.cc index e66e9d0ab..803ad6850 100644 --- a/src/bitcoin/BlockMakerBitcoin.cc +++ b/src/bitcoin/BlockMakerBitcoin.cc @@ -35,17 +35,25 @@ #include ////////////////////////////////// BlockMaker ////////////////////////////////// -BlockMakerBitcoin::BlockMakerBitcoin(shared_ptr blkMakerDef, const char *kafkaBrokers, const MysqlConnectInfo &poolDB) +BlockMakerBitcoin::BlockMakerBitcoin( + shared_ptr blkMakerDef, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB) : BlockMaker(blkMakerDef, kafkaBrokers, poolDB) - , kMaxRawGbtNum_(100) /* if 5 seconds a rawgbt, will hold 100*5/60 = 8 mins rawgbt */ - , kMaxStratumJobNum_(120) /* if 30 seconds a stratum job, will hold 60 mins stratum job */ + , kMaxRawGbtNum_( + 100) /* if 5 seconds a rawgbt, will hold 100*5/60 = 8 mins rawgbt */ + , kMaxStratumJobNum_( + 120) /* if 30 seconds a stratum job, will hold 60 mins stratum job */ , lastSubmittedBlockTime() , submittedRskBlocks(0) - , kafkaConsumerRawGbt_ (kafkaBrokers, def()->rawGbtTopic_.c_str(), 0/* patition */) - , kafkaConsumerStratumJob_ (kafkaBrokers, def()->stratumJobTopic_.c_str(), 0/* patition */) - , kafkaConsumerNamecoinSolvedShare_(kafkaBrokers, def()->auxPowSolvedShareTopic_.c_str(), 0/* patition */) - , kafkaConsumerRskSolvedShare_(kafkaBrokers, def()->rskSolvedShareTopic_.c_str(), 0/* patition */) -{ + , kafkaConsumerRawGbt_( + kafkaBrokers, def()->rawGbtTopic_.c_str(), 0 /* patition */) + , kafkaConsumerStratumJob_( + kafkaBrokers, def()->stratumJobTopic_.c_str(), 0 /* patition */) + , kafkaConsumerNamecoinSolvedShare_( + kafkaBrokers, def()->auxPowSolvedShareTopic_.c_str(), 0 /* patition */) + , kafkaConsumerRskSolvedShare_( + kafkaBrokers, def()->rskSolvedShareTopic_.c_str(), 0 /* patition */) { } BlockMakerBitcoin::~BlockMakerBitcoin() { @@ -66,15 +74,15 @@ bool BlockMakerBitcoin::init() { if (!checkBitcoinds()) return false; - if(!BlockMaker::init()) - { + if (!BlockMaker::init()) { return false; } // // Raw Gbt // // we need to consume the latest N messages - if (kafkaConsumerRawGbt_.setup(RD_KAFKA_OFFSET_TAIL(kMaxRawGbtNum_)) == false) { + if (kafkaConsumerRawGbt_.setup(RD_KAFKA_OFFSET_TAIL(kMaxRawGbtNum_)) == + false) { LOG(INFO) << "setup kafkaConsumerRawGbt_ fail"; return false; } @@ -87,7 +95,8 @@ bool BlockMakerBitcoin::init() { // Stratum Job // // we need to consume the latest 2 messages, just in case - if (kafkaConsumerStratumJob_.setup(RD_KAFKA_OFFSET_TAIL(kMaxStratumJobNum_)) == false) { + if (kafkaConsumerStratumJob_.setup( + RD_KAFKA_OFFSET_TAIL(kMaxStratumJobNum_)) == false) { LOG(INFO) << "setup kafkaConsumerStratumJob_ fail"; return false; } @@ -100,12 +109,14 @@ bool BlockMakerBitcoin::init() { // Namecoin Sloved Share // // we need to consume the latest 2 messages, just in case - if (kafkaConsumerNamecoinSolvedShare_.setup(RD_KAFKA_OFFSET_TAIL(2)) == false) { + if (kafkaConsumerNamecoinSolvedShare_.setup(RD_KAFKA_OFFSET_TAIL(2)) == + false) { LOG(INFO) << "setup kafkaConsumerNamecoinSolvedShare_ fail"; return false; } if (!kafkaConsumerNamecoinSolvedShare_.checkAlive()) { - LOG(ERROR) << "kafka brokers is not alive: kafkaConsumerNamecoinSolvedShare_"; + LOG(ERROR) + << "kafka brokers is not alive: kafkaConsumerNamecoinSolvedShare_"; return false; } @@ -131,16 +142,18 @@ void BlockMakerBitcoin::consumeRawGbt(rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -160,9 +173,9 @@ void BlockMakerBitcoin::addRawgbt(const char *str, size_t len) { LOG(ERROR) << "parse rawgbt message to json fail"; return; } - if (r["created_at_ts"].type() != Utilities::JS::type::Int || + if (r["created_at_ts"].type() != Utilities::JS::type::Int || r["block_template_base64"].type() != Utilities::JS::type::Str || - r["gbthash"].type() != Utilities::JS::type::Str) { + r["gbthash"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "invalid rawgbt: missing fields"; return; } @@ -174,7 +187,7 @@ void BlockMakerBitcoin::addRawgbt(const char *str, size_t len) { } const string gbt = DecodeBase64(r["block_template_base64"].str()); - assert(gbt.length() > 64); // valid gbt string's len at least 64 bytes + assert(gbt.length() > 64); // valid gbt string's len at least 64 bytes JsonNode nodeGbt; if (!JsonNode::parse(gbt.c_str(), gbt.c_str() + gbt.length(), nodeGbt)) { @@ -185,28 +198,30 @@ void BlockMakerBitcoin::addRawgbt(const char *str, size_t len) { #ifdef CHAIN_TYPE_BCH bool isLightVersion = jgbt["job_id"].type() == Utilities::JS::type::Str; - if(isLightVersion) - { + if (isLightVersion) { ScopeLock ls(rawGbtlightLock_); rawGbtlightMap_[gbtHash] = jgbt["job_id"].str(); - LOG(INFO) << "insert rawgbt light: " << gbtHash.ToString() << ", job_id: " << jgbt["job_id"].str().c_str(); + LOG(INFO) << "insert rawgbt light: " << gbtHash.ToString() + << ", job_id: " << jgbt["job_id"].str().c_str(); return; } #endif // CHAIN_TYPE_BCH // transaction without coinbase_tx - shared_ptr> vtxs = std::make_shared>(); - for (JsonNode & node : jgbt["transactions"].array()) { + shared_ptr> vtxs = + std::make_shared>(); + for (JsonNode &node : jgbt["transactions"].array()) { CMutableTransaction tx; DecodeHexTx(tx, node["data"].str()); vtxs->push_back(MakeTransactionRef(std::move(tx))); } - LOG(INFO) << "insert rawgbt: " << gbtHash.ToString() << ", txs: " << vtxs->size(); + LOG(INFO) << "insert rawgbt: " << gbtHash.ToString() + << ", txs: " << vtxs->size(); insertRawGbt(gbtHash, vtxs); } -void BlockMakerBitcoin::insertRawGbt(const uint256 &gbtHash, - shared_ptr> vtxs) { +void BlockMakerBitcoin::insertRawGbt( + const uint256 &gbtHash, shared_ptr> vtxs) { ScopeLock ls(rawGbtLock_); // insert rawgbt @@ -217,13 +232,12 @@ void BlockMakerBitcoin::insertRawGbt(const uint256 &gbtHash, while (rawGbtQ_.size() > kMaxRawGbtNum_) { const uint256 h = *rawGbtQ_.begin(); - rawGbtMap_.erase(h); // delete from map - rawGbtQ_.pop_front(); // delete from Q + rawGbtMap_.erase(h); // delete from map + rawGbtQ_.pop_front(); // delete from Q } } -static -string _buildAuxPow(const CBlock *block) { +static string _buildAuxPow(const CBlock *block) { // // see: https://en.bitcoin.it/wiki/Merged_mining_specification // @@ -248,7 +262,7 @@ string _buildAuxPow(const CBlock *block) { // 3. coinbase_branch, Merkle branch { - vector merkleBranch = BlockMerkleBranch(*block, 0/* position */); + vector merkleBranch = BlockMerkleBranch(*block, 0 /* position */); // Number of links in branch // should be Variable integer, but can't over than 0xfd, so we just print @@ -271,8 +285,8 @@ string _buildAuxPow(const CBlock *block) { // 4. Aux Blockchain Link { - auxPow += "00"; // Number of links in branch - auxPow += "00000000"; // Branch sides bitmask + auxPow += "00"; // Number of links in branch + auxPow += "00000000"; // Branch sides bitmask } // 5. Parent Block Header @@ -285,22 +299,25 @@ string _buildAuxPow(const CBlock *block) { return auxPow; } -void BlockMakerBitcoin::consumeNamecoinSolvedShare(rd_kafka_message_t *rkmessage) { +void BlockMakerBitcoin::consumeNamecoinSolvedShare( + rd_kafka_message_t *rkmessage) { // check error if (rkmessage->err) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -316,30 +333,32 @@ void BlockMakerBitcoin::consumeNamecoinSolvedShare(rd_kafka_message_t *rkmessage // namecoin solved share message // JsonNode j; - if (JsonNode::parse((const char *)rkmessage->payload, - (const char *)rkmessage->payload + rkmessage->len, j) == false) { + if (JsonNode::parse( + (const char *)rkmessage->payload, + (const char *)rkmessage->payload + rkmessage->len, + j) == false) { LOG(ERROR) << "decode namecoin solved share message fail: " - << string((const char *)rkmessage->payload, rkmessage->len); + << string((const char *)rkmessage->payload, rkmessage->len); return; } // check fields - if (j["job_id"].type() != Utilities::JS::type::Int || + if (j["job_id"].type() != Utilities::JS::type::Int || j["aux_block_hash"].type() != Utilities::JS::type::Str || - j["block_header"].type() != Utilities::JS::type::Str || - j["coinbase_tx"].type() != Utilities::JS::type::Str || - j["rpc_addr"].type() != Utilities::JS::type::Str || - j["rpc_userpass"].type() != Utilities::JS::type::Str) { + j["block_header"].type() != Utilities::JS::type::Str || + j["coinbase_tx"].type() != Utilities::JS::type::Str || + j["rpc_addr"].type() != Utilities::JS::type::Str || + j["rpc_userpass"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "namecoin solved share message missing some fields"; return; } - const uint64_t jobId = j["job_id"].uint64(); - const string auxBlockHash = j["aux_block_hash"].str(); + const uint64_t jobId = j["job_id"].uint64(); + const string auxBlockHash = j["aux_block_hash"].str(); const string blockHeaderHex = j["block_header"].str(); - const string coinbaseTxHex = j["coinbase_tx"].str(); - const string rpcAddr = j["rpc_addr"].str(); - const string rpcUserpass = j["rpc_userpass"].str(); - assert(blockHeaderHex.size() == sizeof(CBlockHeader)*2); + const string coinbaseTxHex = j["coinbase_tx"].str(); + const string rpcAddr = j["rpc_addr"].str(); + const string rpcUserpass = j["rpc_userpass"].str(); + assert(blockHeaderHex.size() == sizeof(CBlockHeader) * 2); CBlockHeader blkHeader; vector coinbaseTxBin; @@ -368,7 +387,8 @@ void BlockMakerBitcoin::consumeNamecoinSolvedShare(rd_kafka_message_t *rkmessage { ScopeLock ls(rawGbtLock_); if (rawGbtMap_.find(gbtHash) == rawGbtMap_.end()) { - LOG(ERROR) << "can't find this gbthash in rawGbtMap_: " << gbtHash.ToString(); + LOG(ERROR) << "can't find this gbthash in rawGbtMap_: " + << gbtHash.ToString(); return; } vtxs = rawGbtMap_[gbtHash]; @@ -400,27 +420,33 @@ void BlockMakerBitcoin::consumeNamecoinSolvedShare(rd_kafka_message_t *rkmessage const string auxPow = _buildAuxPow(&newblk); // submit to namecoind - submitNamecoinBlockNonBlocking(auxBlockHash, auxPow, - newblk.GetHash().ToString(), - rpcAddr, rpcUserpass); + submitNamecoinBlockNonBlocking( + auxBlockHash, auxPow, newblk.GetHash().ToString(), rpcAddr, rpcUserpass); } -void BlockMakerBitcoin::submitNamecoinBlockNonBlocking(const string &auxBlockHash, - const string &auxPow, - const string &bitcoinBlockHash, - const string &rpcAddress, - const string &rpcUserpass) { +void BlockMakerBitcoin::submitNamecoinBlockNonBlocking( + const string &auxBlockHash, + const string &auxPow, + const string &bitcoinBlockHash, + const string &rpcAddress, + const string &rpcUserpass) { // use thread to submit - boost::thread t(boost::bind(&BlockMakerBitcoin::_submitNamecoinBlockThread, this, - auxBlockHash, auxPow, bitcoinBlockHash, - rpcAddress, rpcUserpass)); + boost::thread t(boost::bind( + &BlockMakerBitcoin::_submitNamecoinBlockThread, + this, + auxBlockHash, + auxPow, + bitcoinBlockHash, + rpcAddress, + rpcUserpass)); } -void BlockMakerBitcoin::_submitNamecoinBlockThread(const string &auxBlockHash, - const string &auxPow, - const string &bitcoinBlockHash, - const string &rpcAddress, - const string &rpcUserpass) { +void BlockMakerBitcoin::_submitNamecoinBlockThread( + const string &auxBlockHash, + const string &auxPow, + const string &bitcoinBlockHash, + const string &rpcAddress, + const string &rpcUserpass) { // // request : submitauxblock // @@ -428,28 +454,33 @@ void BlockMakerBitcoin::_submitNamecoinBlockThread(const string &auxBlockHash, string request = ""; bool isSupportSubmitAuxBlock = false; - if(isAddrSupportSubmitAux_.find(rpcAddress) != isAddrSupportSubmitAux_.end()) { - isSupportSubmitAuxBlock = isAddrSupportSubmitAux_.find(rpcAddress)->second; + if (isAddrSupportSubmitAux_.find(rpcAddress) != + isAddrSupportSubmitAux_.end()) { + isSupportSubmitAuxBlock = + isAddrSupportSubmitAux_.find(rpcAddress)->second; } else { - LOG(INFO) << "can't find " << rpcAddress << " in isAddrSupportSubmitAux_ map"; + LOG(INFO) << "can't find " << rpcAddress + << " in isAddrSupportSubmitAux_ map"; } - if(isSupportSubmitAuxBlock) { - request = Strings::Format("{\"id\":1,\"method\":\"submitauxblock\",\"params\":[\"%s\",\"%s\"]}", - auxBlockHash.c_str(), - auxPow.c_str()); + if (isSupportSubmitAuxBlock) { + request = Strings::Format( + "{\"id\":1,\"method\":\"submitauxblock\",\"params\":[\"%s\",\"%s\"]}", + auxBlockHash.c_str(), + auxPow.c_str()); } else { - request = Strings::Format("{\"id\":1,\"method\":\"getauxblock\",\"params\":[\"%s\",\"%s\"]}", - auxBlockHash.c_str(), - auxPow.c_str()); + request = Strings::Format( + "{\"id\":1,\"method\":\"getauxblock\",\"params\":[\"%s\",\"%s\"]}", + auxBlockHash.c_str(), + auxPow.c_str()); } DLOG(INFO) << "submitauxblock request: " << request; // try N times for (size_t i = 0; i < 3; i++) { string response; - bool res = blockchainNodeRpcCall(rpcAddress.c_str(), rpcUserpass.c_str(), - request.c_str(), response); + bool res = blockchainNodeRpcCall( + rpcAddress.c_str(), rpcUserpass.c_str(), request.c_str(), response); // success if (res == true) { @@ -465,16 +496,20 @@ void BlockMakerBitcoin::_submitNamecoinBlockThread(const string &auxBlockHash, // // save to databse // - if(!def()->foundAuxBlockTable_.empty()) { + if (!def()->foundAuxBlockTable_.empty()) { const string nowStr = date("%F %T"); string sql; - sql = Strings::Format("INSERT INTO `%s` " - " (`bitcoin_block_hash`,`aux_block_hash`," - " `aux_pow`,`created_at`) " - " VALUES (\"%s\",\"%s\",\"%s\",\"%s\"); ", - def()->foundAuxBlockTable_.empty() ? "found_nmc_blocks" : def()->foundAuxBlockTable_.c_str(), - bitcoinBlockHash.c_str(), - auxBlockHash.c_str(), auxPow.c_str(), nowStr.c_str()); + sql = Strings::Format( + "INSERT INTO `%s` " + " (`bitcoin_block_hash`,`aux_block_hash`," + " `aux_pow`,`created_at`) " + " VALUES (\"%s\",\"%s\",\"%s\",\"%s\"); ", + def()->foundAuxBlockTable_.empty() ? "found_nmc_blocks" + : def()->foundAuxBlockTable_.c_str(), + bitcoinBlockHash.c_str(), + auxBlockHash.c_str(), + auxPow.c_str(), + nowStr.c_str()); // try connect to DB MySQLConnection db(poolDB_); for (size_t i = 0; i < 3; i++) { @@ -506,12 +541,16 @@ void BlockMakerBitcoin::processSolvedShare(rd_kafka_message_t *rkmessage) { coinbaseTxBin.resize(rkmessage->len - sizeof(FoundBlock)); // foundBlock - memcpy((uint8_t *)&foundBlock, (const uint8_t *)rkmessage->payload, sizeof(FoundBlock)); + memcpy( + (uint8_t *)&foundBlock, + (const uint8_t *)rkmessage->payload, + sizeof(FoundBlock)); // coinbase tx - memcpy((uint8_t *)coinbaseTxBin.data(), - (const uint8_t *)rkmessage->payload + sizeof(FoundBlock), - coinbaseTxBin.size()); + memcpy( + (uint8_t *)coinbaseTxBin.data(), + (const uint8_t *)rkmessage->payload + sizeof(FoundBlock), + coinbaseTxBin.size()); // copy header memcpy((uint8_t *)&blkHeader, foundBlock.header80_, sizeof(CBlockHeader)); } @@ -531,18 +570,18 @@ void BlockMakerBitcoin::processSolvedShare(rd_kafka_message_t *rkmessage) { { ScopeLock ls(rawGbtlightLock_); const auto iter = rawGbtlightMap_.find(gbtHash); - if(iter != rawGbtlightMap_.end()) - { + if (iter != rawGbtlightMap_.end()) { gbtlightJobId = iter->second; } } bool lightVersion = !gbtlightJobId.empty(); - if(!lightVersion) -#endif // CHAIN_TYPE_BCH + if (!lightVersion) +#endif // CHAIN_TYPE_BCH { ScopeLock ls(rawGbtLock_); if (rawGbtMap_.find(gbtHash) == rawGbtMap_.end()) { - LOG(ERROR) << "can't find this gbthash in rawGbtMap_: " << gbtHash.ToString(); + LOG(ERROR) << "can't find this gbthash in rawGbtMap_: " + << gbtHash.ToString(); return; } vtxs = rawGbtMap_[gbtHash]; @@ -571,57 +610,72 @@ void BlockMakerBitcoin::processSolvedShare(rd_kafka_message_t *rkmessage) { // submit to bitcoind const string blockHex = EncodeHexBlock(newblk); #ifdef CHAIN_TYPE_BCH - if(lightVersion) - { - LOG(INFO) << "submit block light: " << newblk.GetHash().ToString() << " with job_id: " << gbtlightJobId.c_str(); + if (lightVersion) { + LOG(INFO) << "submit block light: " << newblk.GetHash().ToString() + << " with job_id: " << gbtlightJobId.c_str(); submitBlockLightNonBlocking(blockHex, gbtlightJobId); - } - else -#endif // CHAIN_TYPE_BCH + } else +#endif // CHAIN_TYPE_BCH { #ifdef CHAIN_TYPE_LTC LOG(INFO) << "submit block pow: " << newblk.GetPoWHash().ToString(); #endif LOG(INFO) << "submit block: " << newblk.GetHash().ToString(); - submitBlockNonBlocking(blockHex); // using thread + submitBlockNonBlocking(blockHex); // using thread } uint64_t coinbaseValue = AMOUNT_SATOSHIS(newblk.vtx[0]->GetValueOut()); // save to DB, using thread - saveBlockToDBNonBlocking(foundBlock, blkHeader, - coinbaseValue, // coinbase value - blockHex.length()/2); + saveBlockToDBNonBlocking( + foundBlock, + blkHeader, + coinbaseValue, // coinbase value + blockHex.length() / 2); } -void BlockMakerBitcoin::saveBlockToDBNonBlocking(const FoundBlock &foundBlock, - const CBlockHeader &header, - const uint64_t coinbaseValue, - const int32_t blksize) { - boost::thread t(boost::bind(&BlockMakerBitcoin::_saveBlockToDBThread, this, - foundBlock, header, coinbaseValue, blksize)); +void BlockMakerBitcoin::saveBlockToDBNonBlocking( + const FoundBlock &foundBlock, + const CBlockHeader &header, + const uint64_t coinbaseValue, + const int32_t blksize) { + boost::thread t(boost::bind( + &BlockMakerBitcoin::_saveBlockToDBThread, + this, + foundBlock, + header, + coinbaseValue, + blksize)); } -void BlockMakerBitcoin::_saveBlockToDBThread(const FoundBlock &foundBlock, - const CBlockHeader &header, - const uint64_t coinbaseValue, - const int32_t blksize) { +void BlockMakerBitcoin::_saveBlockToDBThread( + const FoundBlock &foundBlock, + const CBlockHeader &header, + const uint64_t coinbaseValue, + const int32_t blksize) { const string nowStr = date("%F %T"); string sql; - sql = Strings::Format("INSERT INTO `found_blocks` " - " (`puid`, `worker_id`, `worker_full_name`, `job_id`" - " ,`height`, `hash`, `rewards`, `size`, `prev_hash`" - " ,`bits`, `version`, `created_at`)" - " VALUES (%d,%" PRId64",\"%s\", %" PRIu64",%d,\"%s\"" - " ,%" PRId64",%d,\"%s\",%u,%d,\"%s\"); ", - foundBlock.userId_, foundBlock.workerId_, - // filter again, just in case - filterWorkerName(foundBlock.workerFullName_).c_str(), - foundBlock.jobId_, foundBlock.height_, - header.GetHash().ToString().c_str(), - coinbaseValue, blksize, - header.hashPrevBlock.ToString().c_str(), - header.nBits, header.nVersion, nowStr.c_str()); + sql = Strings::Format( + "INSERT INTO `found_blocks` " + " (`puid`, `worker_id`, `worker_full_name`, `job_id`" + " ,`height`, `hash`, `rewards`, `size`, `prev_hash`" + " ,`bits`, `version`, `created_at`)" + " VALUES (%d,%" PRId64 ",\"%s\", %" PRIu64 + ",%d,\"%s\"" + " ,%" PRId64 ",%d,\"%s\",%u,%d,\"%s\"); ", + foundBlock.userId_, + foundBlock.workerId_, + // filter again, just in case + filterWorkerName(foundBlock.workerFullName_).c_str(), + foundBlock.jobId_, + foundBlock.height_, + header.GetHash().ToString().c_str(), + coinbaseValue, + blksize, + header.hashPrevBlock.ToString().c_str(), + header.nBits, + header.nVersion, + nowStr.c_str()); LOG(INFO) << "BlockMakerBitcoin::_saveBlockToDBThread: " << sql; @@ -656,15 +710,22 @@ bool BlockMakerBitcoin::checkBitcoinds() { void BlockMakerBitcoin::submitBlockNonBlocking(const string &blockHex) { for (const auto &itr : def()->nodes) { // use thread to submit - boost::thread t(boost::bind(&BlockMakerBitcoin::_submitBlockThread, this, - itr.rpcAddr_, itr.rpcUserPwd_, blockHex)); + boost::thread t(boost::bind( + &BlockMakerBitcoin::_submitBlockThread, + this, + itr.rpcAddr_, + itr.rpcUserPwd_, + blockHex)); } } -void BlockMakerBitcoin::_submitBlockThread(const string &rpcAddress, - const string &rpcUserpass, - const string &blockHex) { - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"submitblock\",\"params\":[\""; +void BlockMakerBitcoin::_submitBlockThread( + const string &rpcAddress, + const string &rpcUserpass, + const string &blockHex) { + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"submitblock\",\"params\":" + "[\""; request += blockHex + "\"]}"; LOG(INFO) << "submit block to: " << rpcAddress; @@ -672,8 +733,8 @@ void BlockMakerBitcoin::_submitBlockThread(const string &rpcAddress, // try N times for (size_t i = 0; i < 3; i++) { string response; - bool res = blockchainNodeRpcCall(rpcAddress.c_str(), rpcUserpass.c_str(), - request.c_str(), response); + bool res = blockchainNodeRpcCall( + rpcAddress.c_str(), rpcUserpass.c_str(), request.c_str(), response); // success if (res == true) { @@ -687,19 +748,29 @@ void BlockMakerBitcoin::_submitBlockThread(const string &rpcAddress, } #ifdef CHAIN_TYPE_BCH -void BlockMakerBitcoin::submitBlockLightNonBlocking(const string &blockHex, const string& job_id) { +void BlockMakerBitcoin::submitBlockLightNonBlocking( + const string &blockHex, const string &job_id) { for (const auto &itr : def()->nodes) { // use thread to submit - boost::thread t(boost::bind(&BlockMakerBitcoin::_submitBlockLightThread, this, - itr.rpcAddr_, itr.rpcUserPwd_, job_id, blockHex)); + boost::thread t(boost::bind( + &BlockMakerBitcoin::_submitBlockLightThread, + this, + itr.rpcAddr_, + itr.rpcUserPwd_, + job_id, + blockHex)); t.detach(); } } -void BlockMakerBitcoin::_submitBlockLightThread(const string &rpcAddress, const string &rpcUserpass, const string& job_id, - const string &blockHex) -{ - - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"submitblocklight\",\"params\":[\""; +void BlockMakerBitcoin::_submitBlockLightThread( + const string &rpcAddress, + const string &rpcUserpass, + const string &job_id, + const string &blockHex) { + + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"submitblocklight\"," + "\"params\":[\""; request += blockHex + "\", \""; request += job_id + "\""; request += "]}"; @@ -708,16 +779,17 @@ void BlockMakerBitcoin::_submitBlockLightThread(const string &rpcAddress, const // try N times for (size_t i = 0; i < 3; i++) { string response; - bool res = blockchainNodeRpcCall(rpcAddress.c_str(), rpcUserpass.c_str(), - request.c_str(), response); + bool res = blockchainNodeRpcCall( + rpcAddress.c_str(), rpcUserpass.c_str(), request.c_str(), response); // success if (res == true) { - LOG(INFO) << "rpc call success, submit block light response: " << response; + LOG(INFO) << "rpc call success, submit block light response: " + << response; break; } // failure LOG(ERROR) << "rpc call fail: " << response; - } + } } #endif // CHAIN_TYPE_BCH @@ -727,16 +799,18 @@ void BlockMakerBitcoin::consumeStratumJob(rd_kafka_message_t *rkmessage) { if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) { // Reached the end of the topic+partition queue on the broker. // Not really an error. - // LOG(INFO) << "consumer reached end of " << rd_kafka_topic_name(rkmessage->rkt) + // LOG(INFO) << "consumer reached end of " << + // rd_kafka_topic_name(rkmessage->rkt) // << "[" << rkmessage->partition << "] " // << " message queue at offset " << rkmessage->offset; // acturlly return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -749,8 +823,8 @@ void BlockMakerBitcoin::consumeStratumJob(rd_kafka_message_t *rkmessage) { LOG(INFO) << "received StratumJob message, len: " << rkmessage->len; shared_ptr sjob = std::make_shared(); - bool res = sjob->unserializeFromJson((const char *)rkmessage->payload, - rkmessage->len); + bool res = sjob->unserializeFromJson( + (const char *)rkmessage->payload, rkmessage->len); if (res == false) { LOG(ERROR) << "unserialize stratum job fail"; return; @@ -768,29 +842,36 @@ void BlockMakerBitcoin::consumeStratumJob(rd_kafka_message_t *rkmessage) { } } - LOG(INFO) << "StratumJob, jobId: " << sjob->jobId_ << ", gbtHash: " << gbtHash.ToString(); + LOG(INFO) << "StratumJob, jobId: " << sjob->jobId_ + << ", gbtHash: " << gbtHash.ToString(); bool isSupportSubmitAuxBlock = false; - if(!sjob->nmcRpcAddr_.empty() && !sjob->nmcRpcUserpass_.empty() && - isAddrSupportSubmitAux_.find(sjob->nmcRpcAddr_) == isAddrSupportSubmitAux_.end()) { + if (!sjob->nmcRpcAddr_.empty() && !sjob->nmcRpcUserpass_.empty() && + isAddrSupportSubmitAux_.find(sjob->nmcRpcAddr_) == + isAddrSupportSubmitAux_.end()) { string response; - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"help\",\"params\":[]}"; - bool res = blockchainNodeRpcCall(sjob->nmcRpcAddr_.c_str(), sjob->nmcRpcUserpass_.c_str(), - request.c_str(), response); + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"help\",\"params\":[]}"; + bool res = blockchainNodeRpcCall( + sjob->nmcRpcAddr_.c_str(), + sjob->nmcRpcUserpass_.c_str(), + request.c_str(), + response); if (!res) { LOG(INFO) << "auxcoind rpc call failure"; } else { - isSupportSubmitAuxBlock = (response.find("createauxblock") == std::string::npos || - response.find("submitauxblock") == std::string::npos) ? false : true; - - LOG(INFO) << "auxcoind " << (isSupportSubmitAuxBlock ? " " : "doesn't ") - << "support rpc commands: createauxblock and submitauxblock"; + isSupportSubmitAuxBlock = + (response.find("createauxblock") == std::string::npos || + response.find("submitauxblock") == std::string::npos) + ? false + : true; - isAddrSupportSubmitAux_[sjob->nmcRpcAddr_] = isSupportSubmitAuxBlock; - } + LOG(INFO) << "auxcoind " << (isSupportSubmitAuxBlock ? " " : "doesn't ") + << "support rpc commands: createauxblock and submitauxblock"; + isAddrSupportSubmitAux_[sjob->nmcRpcAddr_] = isSupportSubmitAuxBlock; + } } - } void BlockMakerBitcoin::runThreadConsumeRawGbt() { @@ -842,21 +923,43 @@ void BlockMakerBitcoin::runThreadConsumeNamecoinSolvedShare() { } /** - Beginning of methods needed to consume a solved share and submit a block to RSK node. + Beginning of methods needed to consume a solved share and submit a block to + RSK node. @author Martin Medina @copyright RSK Labs Ltd. */ -void BlockMakerBitcoin::submitRskBlockPartialMerkleNonBlocking(const string &rpcAddress, const string &rpcUserPwd, const string &blockHashHex, - const string &blockHeaderHex, const string &coinbaseHex, const string &merkleHashesHex, - const string &totalTxCount) { - boost::thread t(boost::bind(&BlockMakerBitcoin::_submitRskBlockPartialMerkleThread, this, rpcAddress, rpcUserPwd, blockHashHex, blockHeaderHex, coinbaseHex, merkleHashesHex, totalTxCount)); +void BlockMakerBitcoin::submitRskBlockPartialMerkleNonBlocking( + const string &rpcAddress, + const string &rpcUserPwd, + const string &blockHashHex, + const string &blockHeaderHex, + const string &coinbaseHex, + const string &merkleHashesHex, + const string &totalTxCount) { + boost::thread t(boost::bind( + &BlockMakerBitcoin::_submitRskBlockPartialMerkleThread, + this, + rpcAddress, + rpcUserPwd, + blockHashHex, + blockHeaderHex, + coinbaseHex, + merkleHashesHex, + totalTxCount)); } -void BlockMakerBitcoin::_submitRskBlockPartialMerkleThread(const string &rpcAddress, const string &rpcUserPwd, const string &blockHashHex, - const string &blockHeaderHex, const string &coinbaseHex, const string &merkleHashesHex, - const string &totalTxCount) { - string request = "{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"mnr_submitBitcoinBlockPartialMerkle\",\"params\":["; +void BlockMakerBitcoin::_submitRskBlockPartialMerkleThread( + const string &rpcAddress, + const string &rpcUserPwd, + const string &blockHashHex, + const string &blockHeaderHex, + const string &coinbaseHex, + const string &merkleHashesHex, + const string &totalTxCount) { + string request = + "{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"mnr_" + "submitBitcoinBlockPartialMerkle\",\"params\":["; request += "\"" + blockHashHex + "\", "; request += "\"" + blockHeaderHex + "\", "; request += "\"" + coinbaseHex + "\", "; @@ -867,7 +970,8 @@ void BlockMakerBitcoin::_submitRskBlockPartialMerkleThread(const string &rpcAddr // try N times for (size_t i = 0; i < 3; i++) { string response; - bool res = blockchainNodeRpcCall(rpcAddress.c_str(), rpcUserPwd.c_str(), request.c_str(), response); + bool res = blockchainNodeRpcCall( + rpcAddress.c_str(), rpcUserPwd.c_str(), request.c_str(), response); // success if (res) { @@ -888,9 +992,10 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { return; } - LOG(ERROR) << "consume error for topic " << rd_kafka_topic_name(rkmessage->rkt) - << "[" << rkmessage->partition << "] offset " << rkmessage->offset - << ": " << rd_kafka_message_errstr(rkmessage); + LOG(ERROR) << "consume error for topic " + << rd_kafka_topic_name(rkmessage->rkt) << "[" + << rkmessage->partition << "] offset " << rkmessage->offset + << ": " << rd_kafka_message_errstr(rkmessage); if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION || rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) { @@ -920,15 +1025,21 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { coinbaseTxBin.resize(rkmessage->len - sizeof(RskSolvedShareData)); // shareData - memcpy((uint8_t *)&shareData, (const uint8_t *)rkmessage->payload, sizeof(RskSolvedShareData)); + memcpy( + (uint8_t *)&shareData, + (const uint8_t *)rkmessage->payload, + sizeof(RskSolvedShareData)); // coinbase tx - memcpy((uint8_t *)coinbaseTxBin.data(), (const uint8_t *)rkmessage->payload + sizeof(RskSolvedShareData), coinbaseTxBin.size()); + memcpy( + (uint8_t *)coinbaseTxBin.data(), + (const uint8_t *)rkmessage->payload + sizeof(RskSolvedShareData), + coinbaseTxBin.size()); // copy header memcpy((uint8_t *)&blkHeader, shareData.header80_, sizeof(CBlockHeader)); } LOG(INFO) << "submit RSK block: " << blkHeader.GetHash().ToString(); - + // get gbtHash and rawgbt (vtxs) uint256 gbtHash; shared_ptr> vtxs; @@ -941,14 +1052,14 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { { ScopeLock ls(rawGbtLock_); if (rawGbtMap_.find(gbtHash) == rawGbtMap_.end()) { - LOG(ERROR) << "can't find this gbthash in rawGbtMap_: " << gbtHash.ToString(); + LOG(ERROR) << "can't find this gbthash in rawGbtMap_: " + << gbtHash.ToString(); return; } vtxs = rawGbtMap_[gbtHash]; } assert(vtxs.get() != nullptr); - vector vtxhashes; vtxhashes.resize(1 + vtxs->size()); // coinbase + gbt txs @@ -966,14 +1077,15 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { // put other tx hashes for (size_t i = 0; i < vtxs->size(); i++) { - vtxhashes[i + 1] = (*vtxs)[i]->GetHash(); // vtxs is a shared_ptr> + vtxhashes[i + 1] = + (*vtxs)[i]->GetHash(); // vtxs is a shared_ptr> } string blockHashHex = blkHeader.GetHash().ToString(); string blockHeaderHex = EncodeHexBlockHeader(blkHeader); // coinbase bin -> hex - string coinbaseHex; + string coinbaseHex; Bin2Hex(coinbaseTxBin, coinbaseHex); // build coinbase's merkle tree branch @@ -981,12 +1093,15 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { string hashHex; vector cbMerkleBranch = ComputeMerkleBranch(vtxhashes, 0); - Bin2Hex((uint8_t*)(vtxhashes[0].begin()), sizeof(uint256), hashHex); // coinbase hash + Bin2Hex( + (uint8_t *)(vtxhashes[0].begin()), + sizeof(uint256), + hashHex); // coinbase hash merkleHashesHex.append(hashHex); for (size_t i = 0; i < cbMerkleBranch.size(); i++) { - merkleHashesHex.append("\x20"); // space character - Bin2Hex((uint8_t*)cbMerkleBranch[i].begin(), sizeof(uint256), hashHex); - merkleHashesHex.append(hashHex); + merkleHashesHex.append("\x20"); // space character + Bin2Hex((uint8_t *)cbMerkleBranch[i].begin(), sizeof(uint256), hashHex); + merkleHashesHex.append(hashHex); } // block tx count @@ -994,8 +1109,14 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { sstream << std::hex << vtxhashes.size(); string totalTxCountHex(sstream.str()); - submitRskBlockPartialMerkleNonBlocking(shareData.rpcAddress_, shareData.rpcUserPwd_, blockHashHex, blockHeaderHex, - coinbaseHex, merkleHashesHex, totalTxCountHex); // using thread + submitRskBlockPartialMerkleNonBlocking( + shareData.rpcAddress_, + shareData.rpcUserPwd_, + blockHashHex, + blockHeaderHex, + coinbaseHex, + merkleHashesHex, + totalTxCountHex); // using thread } /** @@ -1005,28 +1126,29 @@ void BlockMakerBitcoin::consumeRskSolvedShare(rd_kafka_message_t *rkmessage) { @returns true if block can be submitted to RSK node. false otherwise. */ bool BlockMakerBitcoin::submitToRskNode() { - uint32_t maxSubmissionsPerSecond = 2; - int64_t oneSecondWindowInMs = 1000; + uint32_t maxSubmissionsPerSecond = 2; + int64_t oneSecondWindowInMs = 1000; - if (lastSubmittedBlockTime.is_not_a_date_time()) { - lastSubmittedBlockTime = bpt::microsec_clock::universal_time(); - } + if (lastSubmittedBlockTime.is_not_a_date_time()) { + lastSubmittedBlockTime = bpt::microsec_clock::universal_time(); + } - bpt::ptime currentTime(bpt::microsec_clock::universal_time()); - bpt::time_duration elapsed = currentTime - lastSubmittedBlockTime; + bpt::ptime currentTime(bpt::microsec_clock::universal_time()); + bpt::time_duration elapsed = currentTime - lastSubmittedBlockTime; - if (elapsed.total_milliseconds() > oneSecondWindowInMs) { - lastSubmittedBlockTime = currentTime; - submittedRskBlocks = 0; - elapsed = currentTime - lastSubmittedBlockTime; - } + if (elapsed.total_milliseconds() > oneSecondWindowInMs) { + lastSubmittedBlockTime = currentTime; + submittedRskBlocks = 0; + elapsed = currentTime - lastSubmittedBlockTime; + } - if (elapsed.total_milliseconds() < oneSecondWindowInMs && submittedRskBlocks < maxSubmissionsPerSecond) { - submittedRskBlocks++; - return true; - } + if (elapsed.total_milliseconds() < oneSecondWindowInMs && + submittedRskBlocks < maxSubmissionsPerSecond) { + submittedRskBlocks++; + return true; + } - return false; + return false; } void BlockMakerBitcoin::runThreadConsumeRskSolvedShare() { @@ -1048,9 +1170,13 @@ void BlockMakerBitcoin::runThreadConsumeRskSolvedShare() { void BlockMakerBitcoin::run() { // setup threads - threadConsumeRawGbt_ = thread(&BlockMakerBitcoin::runThreadConsumeRawGbt, this); - threadConsumeStratumJob_ = thread(&BlockMakerBitcoin::runThreadConsumeStratumJob, this); - threadConsumeNamecoinSolvedShare_ = thread(&BlockMakerBitcoin::runThreadConsumeNamecoinSolvedShare, this); - threadConsumeRskSolvedShare_ = thread(&BlockMakerBitcoin::runThreadConsumeRskSolvedShare, this); + threadConsumeRawGbt_ = + thread(&BlockMakerBitcoin::runThreadConsumeRawGbt, this); + threadConsumeStratumJob_ = + thread(&BlockMakerBitcoin::runThreadConsumeStratumJob, this); + threadConsumeNamecoinSolvedShare_ = + thread(&BlockMakerBitcoin::runThreadConsumeNamecoinSolvedShare, this); + threadConsumeRskSolvedShare_ = + thread(&BlockMakerBitcoin::runThreadConsumeRskSolvedShare, this); BlockMaker::run(); } diff --git a/src/bitcoin/BlockMakerBitcoin.h b/src/bitcoin/BlockMakerBitcoin.h index 89c440972..d8c4bd863 100644 --- a/src/bitcoin/BlockMakerBitcoin.h +++ b/src/bitcoin/BlockMakerBitcoin.h @@ -41,21 +41,25 @@ namespace bpt = boost::posix_time; // public: // virtual ~BlockMakerHandler() = 0; // mark it's an abstract class // virtual void init(const BlockMakerDefinition &def) { def_ = def; } - + // // read-only definition // virtual const BlockMakerDefinition& def() { return def_; } // virtual void processSolvedShare(rd_kafka_message_t *rkmessage) = 0; // // Interface with the GwMaker. // // There is a default implementation that use virtual functions below. -// // If the implementation does not meet the requirements, you can overload it +// // If the implementation does not meet the requirements, you can overload +// it // // and ignore all the following virtual functions. // // virtual string makeRawGwMsg(); // // protected: -// // // These virtual functions make it easier to implement the makeRawGwMsg() interface. -// // // In most cases, you just need to override getRequestData() and processRawGw(). -// // // If you have overloaded makeRawGwMsg() above, you can ignore all the following functions. +// // // These virtual functions make it easier to implement the +// makeRawGwMsg() interface. +// // // In most cases, you just need to override getRequestData() and +// processRawGw(). +// // // If you have overloaded makeRawGwMsg() above, you can ignore all the +// following functions. // // // Receive rpc response and generate RawGw message for the pool. // // virtual string processRawGw(const string &gw) { return ""; } @@ -79,14 +83,12 @@ namespace bpt = boost::posix_time; // class BlockMakerHandlerSia : public BlockMakerHandler{ // virtual void processSolvedShare(rd_kafka_message_t *rkmessage) { - + // } // }; - ////////////////////////////////// BlockMaker ////////////////////////////////// -class BlockMakerBitcoin : public BlockMaker -{ +class BlockMakerBitcoin : public BlockMaker { protected: #ifdef CHAIN_TYPE_BCH mutex rawGbtlightLock_; @@ -113,15 +115,16 @@ class BlockMakerBitcoin : public BlockMaker KafkaConsumer kafkaConsumerNamecoinSolvedShare_; KafkaConsumer kafkaConsumerRskSolvedShare_; - void insertRawGbt(const uint256 &gbtHash, - shared_ptr> vtxs); + void insertRawGbt( + const uint256 &gbtHash, shared_ptr> vtxs); thread threadConsumeRawGbt_; thread threadConsumeStratumJob_; thread threadConsumeNamecoinSolvedShare_; thread threadConsumeRskSolvedShare_; - std::map isAddrSupportSubmitAux_; + std::map + isAddrSupportSubmitAux_; void runThreadConsumeRawGbt(); void runThreadConsumeStratumJob(); @@ -137,60 +140,78 @@ class BlockMakerBitcoin : public BlockMaker void addRawgbt(const char *str, size_t len); - void saveBlockToDBNonBlocking(const FoundBlock &foundBlock, - const CBlockHeader &header, - const uint64_t coinbaseValue, const int32_t blksize); - void _saveBlockToDBThread(const FoundBlock &foundBlock, - const CBlockHeader &header, - const uint64_t coinbaseValue, const int32_t blksize); + void saveBlockToDBNonBlocking( + const FoundBlock &foundBlock, + const CBlockHeader &header, + const uint64_t coinbaseValue, + const int32_t blksize); + void _saveBlockToDBThread( + const FoundBlock &foundBlock, + const CBlockHeader &header, + const uint64_t coinbaseValue, + const int32_t blksize); #ifdef CHAIN_TYPE_BCH - void submitBlockLightNonBlocking(const string &blockHex, const string& job_id); - void _submitBlockLightThread(const string &rpcAddress, const string &rpcUserpass, const string& job_id, - const string &blockHex); + void + submitBlockLightNonBlocking(const string &blockHex, const string &job_id); + void _submitBlockLightThread( + const string &rpcAddress, + const string &rpcUserpass, + const string &job_id, + const string &blockHex); #endif // CHAIN_TYPE_BCH void submitBlockNonBlocking(const string &blockHex); - void _submitBlockThread(const string &rpcAddress, const string &rpcUserpass, - const string &blockHex); + void _submitBlockThread( + const string &rpcAddress, + const string &rpcUserpass, + const string &blockHex); bool checkBitcoinds(); - void submitNamecoinBlockNonBlocking(const string &auxBlockHash, - const string &auxPow, - const string &bitcoinBlockHash, - const string &rpcAddress, - const string &rpcUserpass); - void _submitNamecoinBlockThread(const string &auxBlockHash, - const string &auxPow, - const string &bitcoinBlockHash, - const string &rpcAddress, - const string &rpcUserpass); - - void submitRskBlockPartialMerkleNonBlocking(const string &rpcAddress, - const string &rpcUserPwd, - const string &blockHashHex, - const string &blockHeaderHex, - const string &coinbaseHex, - const string &merkleHashesHex, - const string &totalTxCount); - void _submitRskBlockPartialMerkleThread(const string &rpcAddress, - const string &rpcUserPwd, - const string &blockHashHex, - const string &blockHeaderHex, - const string &coinbaseHex, - const string &merkleHashesHex, - const string &totalTxCount); + void submitNamecoinBlockNonBlocking( + const string &auxBlockHash, + const string &auxPow, + const string &bitcoinBlockHash, + const string &rpcAddress, + const string &rpcUserpass); + void _submitNamecoinBlockThread( + const string &auxBlockHash, + const string &auxPow, + const string &bitcoinBlockHash, + const string &rpcAddress, + const string &rpcUserpass); + + void submitRskBlockPartialMerkleNonBlocking( + const string &rpcAddress, + const string &rpcUserPwd, + const string &blockHashHex, + const string &blockHeaderHex, + const string &coinbaseHex, + const string &merkleHashesHex, + const string &totalTxCount); + void _submitRskBlockPartialMerkleThread( + const string &rpcAddress, + const string &rpcUserPwd, + const string &blockHashHex, + const string &blockHeaderHex, + const string &coinbaseHex, + const string &merkleHashesHex, + const string &totalTxCount); bool submitToRskNode(); // read-only definition - inline shared_ptr def() { return std::dynamic_pointer_cast(def_); } + inline shared_ptr def() { + return std::dynamic_pointer_cast(def_); + } public: - BlockMakerBitcoin(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB); + BlockMakerBitcoin( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB); virtual ~BlockMakerBitcoin(); bool init() override; void run() override; }; - #endif diff --git a/src/bitcoin/CommonBitcoin.cc b/src/bitcoin/CommonBitcoin.cc index 648007b6e..bd50dc11c 100644 --- a/src/bitcoin/CommonBitcoin.cc +++ b/src/bitcoin/CommonBitcoin.cc @@ -37,7 +37,7 @@ uint64_t TargetToDiff(const string &str) { return TargetToDiff(t); } -void BitsToTarget(uint32_t bits, uint256 & target) { +void BitsToTarget(uint32_t bits, uint256 &target) { target = ArithToUint256(arith_uint256().SetCompact(bits)); } @@ -52,7 +52,7 @@ static const auto TARGET_DIFF1 = arith_uint256().SetCompact(BITS_DIFF1); static uint32_t _DiffToBits(uint64_t diff) { uint64_t nbytes = (BITS_DIFF1 >> 24) & 0xff; - uint64_t value = BITS_DIFF1 & 0xffffffULL; + uint64_t value = BITS_DIFF1 & 0xffffffULL; if (diff == 0) { return 1; @@ -65,18 +65,16 @@ static uint32_t _DiffToBits(uint64_t diff) { if (value % diff == 0) { value /= diff; - } - else if ((value << 8) % diff == 0) { + } else if ((value << 8) % diff == 0) { nbytes -= 1; value <<= 8; value /= diff; - } - else { + } else { return 1; } if (value > 0x00ffffffULL) { - return 1; // overflow... should not happen + return 1; // overflow... should not happen } return (uint32_t)(value | (nbytes << 24)); } @@ -96,7 +94,7 @@ void DiffToTarget(uint64_t diff, uint256 &target, bool useTable) { if (useTable) { // try to find by table const uint64_t p = (uint64_t)log2(diff); - if (p < (sizeof(kDiff2TargetTable)/sizeof(kDiff2TargetTable[0])) && + if (p < (sizeof(kDiff2TargetTable) / sizeof(kDiff2TargetTable[0])) && diff == (1ull << p)) { target = kDiff2TargetTable[p]; return; diff --git a/src/bitcoin/CommonBitcoin.h b/src/bitcoin/CommonBitcoin.h index f35ab4e95..bf9675f10 100644 --- a/src/bitcoin/CommonBitcoin.h +++ b/src/bitcoin/CommonBitcoin.h @@ -1,4 +1,4 @@ -/* +/* The MIT License (MIT) Copyright (c) [2016] [BTC.COM] @@ -32,12 +32,11 @@ uint64_t TargetToDiff(uint256 &target); uint64_t TargetToDiff(const string &str); -void BitsToTarget(uint32_t bits, uint256 & target); -void DiffToTarget(uint64_t diff, uint256 & target, bool useTable=true); +void BitsToTarget(uint32_t bits, uint256 &target); +void DiffToTarget(uint64_t diff, uint256 &target, bool useTable = true); void BitsToDifficulty(uint32_t bits, double *difficulty); void BitsToDifficulty(uint32_t bits, uint64_t *difficulty); ////////////////////////////// for Bitcoin ////////////////////////////// - #endif diff --git a/src/bitcoin/GbtMaker.cc b/src/bitcoin/GbtMaker.cc index 3d228cbcb..a82962265 100644 --- a/src/bitcoin/GbtMaker.cc +++ b/src/bitcoin/GbtMaker.cc @@ -33,38 +33,46 @@ #include "utilities_js.hpp" #include "hash.h" - // // bitcoind zmq pub msg type: "hashblock", "hashtx", "rawblock", "rawtx" // -#define BITCOIND_ZMQ_HASHBLOCK "hashblock" -#define BITCOIND_ZMQ_HASHTX "hashtx" +#define BITCOIND_ZMQ_HASHBLOCK "hashblock" +#define BITCOIND_ZMQ_HASHTX "hashtx" // // namecoind zmq pub msg type: "hashblock", "hashtx", "rawblock", "rawtx" // -#define NAMECOIND_ZMQ_HASHBLOCK "hashblock" -#define NAMECOIND_ZMQ_HASHTX "hashtx" - +#define NAMECOIND_ZMQ_HASHBLOCK "hashblock" +#define NAMECOIND_ZMQ_HASHTX "hashtx" /////////////////////////////////// GbtMaker ///////////////////////////////// -GbtMaker::GbtMaker(const string &zmqBitcoindAddr, - const string &bitcoindRpcAddr, const string &bitcoindRpcUserpass, - const string &kafkaBrokers, const string &kafkaRawGbtTopic, - uint32_t kRpcCallInterval, bool isCheckZmq) -: running_(true), zmqContext_(1/*i/o threads*/), -zmqBitcoindAddr_(zmqBitcoindAddr), bitcoindRpcAddr_(bitcoindRpcAddr), -bitcoindRpcUserpass_(bitcoindRpcUserpass), lastGbtMakeTime_(0), kRpcCallInterval_(kRpcCallInterval), -kafkaBrokers_(kafkaBrokers), kafkaRawGbtTopic_(kafkaRawGbtTopic), -kafkaProducer_(kafkaBrokers_.c_str(), kafkaRawGbtTopic_.c_str(), 0/* partition */), -isCheckZmq_(isCheckZmq) -{ +GbtMaker::GbtMaker( + const string &zmqBitcoindAddr, + const string &bitcoindRpcAddr, + const string &bitcoindRpcUserpass, + const string &kafkaBrokers, + const string &kafkaRawGbtTopic, + uint32_t kRpcCallInterval, + bool isCheckZmq) + : running_(true) + , zmqContext_(1 /*i/o threads*/) + , zmqBitcoindAddr_(zmqBitcoindAddr) + , bitcoindRpcAddr_(bitcoindRpcAddr) + , bitcoindRpcUserpass_(bitcoindRpcUserpass) + , lastGbtMakeTime_(0) + , kRpcCallInterval_(kRpcCallInterval) + , kafkaBrokers_(kafkaBrokers) + , kafkaRawGbtTopic_(kafkaRawGbtTopic) + , kafkaProducer_( + kafkaBrokers_.c_str(), kafkaRawGbtTopic_.c_str(), 0 /* partition */) + , isCheckZmq_(isCheckZmq) { #ifdef CHAIN_TYPE_BCH lastGbtLightMakeTime_ = 0; #endif } -GbtMaker::~GbtMaker() {} +GbtMaker::~GbtMaker() { +} bool GbtMaker::init() { map options; @@ -82,7 +90,8 @@ bool GbtMaker::init() { } // check bitcoind network - if (!checkBitcoinRPC(bitcoindRpcAddr_.c_str(), bitcoindRpcUserpass_.c_str())) { + if (!checkBitcoinRPC( + bitcoindRpcAddr_.c_str(), bitcoindRpcUserpass_.c_str())) { return false; } @@ -98,20 +107,22 @@ bool GbtMaker::checkBitcoindZMQ() { // zmq::socket_t subscriber(zmqContext_, ZMQ_SUB); subscriber.connect(zmqBitcoindAddr_); - subscriber.setsockopt(ZMQ_SUBSCRIBE, - BITCOIND_ZMQ_HASHTX, strlen(BITCOIND_ZMQ_HASHTX)); + subscriber.setsockopt( + ZMQ_SUBSCRIBE, BITCOIND_ZMQ_HASHTX, strlen(BITCOIND_ZMQ_HASHTX)); zmq::message_t ztype, zcontent; LOG(INFO) << "check bitcoind zmq, waiting for zmq message 'hashtx'..."; try { subscriber.recv(&ztype); subscriber.recv(&zcontent); - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(ERROR) << "bitcoind zmq recv exception: " << e.what(); return false; } - const string type = std::string(static_cast(ztype.data()), ztype.size()); - const string content = std::string(static_cast(zcontent.data()), zcontent.size()); + const string type = + std::string(static_cast(ztype.data()), ztype.size()); + const string content = + std::string(static_cast(zcontent.data()), zcontent.size()); if (type == BITCOIND_ZMQ_HASHTX) { string hashHex; @@ -137,9 +148,14 @@ void GbtMaker::kafkaProduceMsg(const void *payload, size_t len) { } bool GbtMaker::bitcoindRpcGBT(string &response) { - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getblocktemplate\",\"params\":[{\"rules\" : [\"segwit\"]}]}"; - bool res = blockchainNodeRpcCall(bitcoindRpcAddr_.c_str(), bitcoindRpcUserpass_.c_str(), - request.c_str(), response); + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getblocktemplate\"," + "\"params\":[{\"rules\" : [\"segwit\"]}]}"; + bool res = blockchainNodeRpcCall( + bitcoindRpcAddr_.c_str(), + bitcoindRpcUserpass_.c_str(), + request.c_str(), + response); if (!res) { LOG(ERROR) << "bitcoind rpc failure"; return false; @@ -154,51 +170,51 @@ string GbtMaker::makeRawGbtMsg() { } JsonNode r; - if (!JsonNode::parse(gbt.c_str(), - gbt.c_str() + gbt.length(), r)) { + if (!JsonNode::parse(gbt.c_str(), gbt.c_str() + gbt.length(), r)) { LOG(ERROR) << "decode gbt failure: " << gbt; return ""; } // check fields - if (r["result"].type() != Utilities::JS::type::Obj || + if (r["result"].type() != Utilities::JS::type::Obj || r["result"]["previousblockhash"].type() != Utilities::JS::type::Str || - r["result"]["height"].type() != Utilities::JS::type::Int || - r["result"]["coinbasevalue"].type() != Utilities::JS::type::Int || - r["result"]["bits"].type() != Utilities::JS::type::Str || - r["result"]["mintime"].type() != Utilities::JS::type::Int || - r["result"]["curtime"].type() != Utilities::JS::type::Int || - r["result"]["version"].type() != Utilities::JS::type::Int) { + r["result"]["height"].type() != Utilities::JS::type::Int || + r["result"]["coinbasevalue"].type() != Utilities::JS::type::Int || + r["result"]["bits"].type() != Utilities::JS::type::Str || + r["result"]["mintime"].type() != Utilities::JS::type::Int || + r["result"]["curtime"].type() != Utilities::JS::type::Int || + r["result"]["version"].type() != Utilities::JS::type::Int) { LOG(ERROR) << "gbt check fields failure"; return ""; } const uint256 gbtHash = Hash(gbt.begin(), gbt.end()); LOG(INFO) << "gbt height: " << r["result"]["height"].uint32() - << ", prev_hash: " << r["result"]["previousblockhash"].str() - << ", coinbase_value: " << r["result"]["coinbasevalue"].uint64() - << ", bits: " << r["result"]["bits"].str() - << ", mintime: " << r["result"]["mintime"].uint32() - << ", version: " << r["result"]["version"].uint32() - << "|0x" << Strings::Format("%08x", r["result"]["version"].uint32()) - << ", gbthash: " << gbtHash.ToString(); - - return Strings::Format("{\"created_at_ts\":%u," - "\"block_template_base64\":\"%s\"," - "\"gbthash\":\"%s\"}", - (uint32_t)time(nullptr), EncodeBase64(gbt).c_str(), - gbtHash.ToString().c_str()); -// return Strings::Format("{\"created_at_ts\":%u," -// "\"gbthash\":\"%s\"}", -// (uint32_t)time(nullptr), -// gbtHash.ToString().c_str()); + << ", prev_hash: " << r["result"]["previousblockhash"].str() + << ", coinbase_value: " << r["result"]["coinbasevalue"].uint64() + << ", bits: " << r["result"]["bits"].str() + << ", mintime: " << r["result"]["mintime"].uint32() + << ", version: " << r["result"]["version"].uint32() << "|0x" + << Strings::Format("%08x", r["result"]["version"].uint32()) + << ", gbthash: " << gbtHash.ToString(); + + return Strings::Format( + "{\"created_at_ts\":%u," + "\"block_template_base64\":\"%s\"," + "\"gbthash\":\"%s\"}", + (uint32_t)time(nullptr), + EncodeBase64(gbt).c_str(), + gbtHash.ToString().c_str()); + // return Strings::Format("{\"created_at_ts\":%u," + // "\"gbthash\":\"%s\"}", + // (uint32_t)time(nullptr), + // gbtHash.ToString().c_str()); } void GbtMaker::submitRawGbtMsg(bool checkTime) { ScopeLock sl(lock_); - if (checkTime && - lastGbtMakeTime_ + kRpcCallInterval_ > time(nullptr)) { + if (checkTime && lastGbtMakeTime_ + kRpcCallInterval_ > time(nullptr)) { return; } @@ -216,15 +232,18 @@ void GbtMaker::submitRawGbtMsg(bool checkTime) { #ifdef CHAIN_TYPE_BCH bool GbtMaker::bitcoindRpcGBTLight(string &response) { - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getblocktemplatelight\",\"params\":[{\"rules\" : [\"segwit\"]}]}"; - bool res = blockchainNodeRpcCall(bitcoindRpcAddr_.c_str(), bitcoindRpcUserpass_.c_str(), - request.c_str(), response); + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getblocktemplatelight\"," + "\"params\":[{\"rules\" : [\"segwit\"]}]}"; + bool res = blockchainNodeRpcCall( + bitcoindRpcAddr_.c_str(), + bitcoindRpcUserpass_.c_str(), + request.c_str(), + response); if (!res) { LOG(ERROR) << "bitcoind rpc gbtlight failure"; return false; - } - else - { + } else { LOG(INFO) << "bitcoind response: " << response; } return true; @@ -237,51 +256,50 @@ string GbtMaker::makeRawGbtLightMsg() { } JsonNode r; - if (!JsonNode::parse(gbt.c_str(), - gbt.c_str() + gbt.length(), r)) { + if (!JsonNode::parse(gbt.c_str(), gbt.c_str() + gbt.length(), r)) { LOG(ERROR) << "decode gbt failure: " << gbt; return ""; } // check fields - if (r["result"].type() != Utilities::JS::type::Obj || + if (r["result"].type() != Utilities::JS::type::Obj || r["result"]["previousblockhash"].type() != Utilities::JS::type::Str || - r["result"]["height"].type() != Utilities::JS::type::Int || - r["result"]["coinbasevalue"].type() != Utilities::JS::type::Int || - r["result"]["bits"].type() != Utilities::JS::type::Str || - r["result"]["mintime"].type() != Utilities::JS::type::Int || - r["result"]["curtime"].type() != Utilities::JS::type::Int || - r["result"]["version"].type() != Utilities::JS::type::Int) { + r["result"]["height"].type() != Utilities::JS::type::Int || + r["result"]["coinbasevalue"].type() != Utilities::JS::type::Int || + r["result"]["bits"].type() != Utilities::JS::type::Str || + r["result"]["mintime"].type() != Utilities::JS::type::Int || + r["result"]["curtime"].type() != Utilities::JS::type::Int || + r["result"]["version"].type() != Utilities::JS::type::Int) { LOG(ERROR) << "gbt light check fields failure"; return ""; } const uint256 gbtHash = Hash(gbt.begin(), gbt.end()); LOG(INFO) << "light gbt height: " << r["result"]["height"].uint32() - << ", prev_hash: " << r["result"]["previousblockhash"].str() - << ", coinbase_value: " << r["result"]["coinbasevalue"].uint64() - << ", bits: " << r["result"]["bits"].str() + << ", prev_hash: " << r["result"]["previousblockhash"].str() + << ", coinbase_value: " << r["result"]["coinbasevalue"].uint64() + << ", bits: " << r["result"]["bits"].str() << ", mintime: " << r["result"]["mintime"].uint32() - << ", version: " << r["result"]["version"].uint32() - << "|0x" << Strings::Format("%08x", r["result"]["version"].uint32()) + << ", version: " << r["result"]["version"].uint32() << "|0x" + << Strings::Format("%08x", r["result"]["version"].uint32()) << ", gbthash: " << gbtHash.ToString(); - string result = Strings::Format("{\"created_at_ts\":%u," - "\"block_template_base64\":\"%s\"," - "\"gbthash\":\"%s\"}", - (uint32_t)time(nullptr), EncodeBase64(gbt).c_str(), - gbtHash.ToString().c_str()); + string result = Strings::Format( + "{\"created_at_ts\":%u," + "\"block_template_base64\":\"%s\"," + "\"gbthash\":\"%s\"}", + (uint32_t)time(nullptr), + EncodeBase64(gbt).c_str(), + gbtHash.ToString().c_str()); LOG(INFO) << "makeRawGbtLightMsg result: " << result.c_str(); return result; } - void GbtMaker::submitRawGbtLightMsg(bool checkTime) { ScopeLock sl(lock_); - if (checkTime && - lastGbtLightMakeTime_ + kRpcCallInterval_ > time(nullptr)) { + if (checkTime && lastGbtLightMakeTime_ + kRpcCallInterval_ > time(nullptr)) { return; } @@ -290,7 +308,7 @@ void GbtMaker::submitRawGbtLightMsg(bool checkTime) { LOG(ERROR) << "get rawgbt light failure"; return; } - LOG(INFO) << "rawGbtlight message: " << rawGbtLightMsg.c_str(); + LOG(INFO) << "rawGbtlight message: " << rawGbtLightMsg.c_str(); lastGbtLightMakeTime_ = (uint32_t)time(nullptr); // submit to Kafka @@ -302,38 +320,41 @@ void GbtMaker::submitRawGbtLightMsg(bool checkTime) { void GbtMaker::threadListenBitcoind() { zmq::socket_t subscriber(zmqContext_, ZMQ_SUB); subscriber.connect(zmqBitcoindAddr_); - subscriber.setsockopt(ZMQ_SUBSCRIBE, - BITCOIND_ZMQ_HASHBLOCK, strlen(BITCOIND_ZMQ_HASHBLOCK)); + subscriber.setsockopt( + ZMQ_SUBSCRIBE, BITCOIND_ZMQ_HASHBLOCK, strlen(BITCOIND_ZMQ_HASHBLOCK)); while (running_) { zmq::message_t zType, zContent, zSequence; try { // if we use block mode, can't quit this thread if (subscriber.recv(&zType, ZMQ_DONTWAIT) == false) { - if (!running_) { break; } - usleep(20000); // so we sleep and try again + if (!running_) { + break; + } + usleep(20000); // so we sleep and try again continue; } subscriber.recv(&zContent); subscriber.recv(&zSequence); - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(ERROR) << "bitcoind zmq recv exception: " << e.what(); - break; // break big while + break; // break big while } - const string type = std::string(static_cast(zType.data()), zType.size()); - const string content = std::string(static_cast(zContent.data()), zContent.size()); - const string sequence = std::string(static_cast(zSequence.data()), zSequence.size()); - - if (type == BITCOIND_ZMQ_HASHBLOCK) - { + const string type = + std::string(static_cast(zType.data()), zType.size()); + const string content = + std::string(static_cast(zContent.data()), zContent.size()); + const string sequence = + std::string(static_cast(zSequence.data()), zSequence.size()); + + if (type == BITCOIND_ZMQ_HASHBLOCK) { string hashHex; Bin2Hex((const uint8_t *)content.data(), content.size(), hashHex); string sequenceHex; Bin2Hex((const uint8_t *)sequence.data(), sequence.size(), sequenceHex); - LOG(INFO) << ">>>> bitcoind recv hashblock: " << hashHex << ", sequence: " << sequenceHex << " <<<<"; - } - else - { + LOG(INFO) << ">>>> bitcoind recv hashblock: " << hashHex + << ", sequence: " << sequenceHex << " <<<<"; + } else { LOG(ERROR) << "unknown message type from bitcoind: " << type; } @@ -349,8 +370,7 @@ void GbtMaker::threadListenBitcoind() { } #ifdef CHAIN_TYPE_BCH -void GbtMaker::runLightGbt() -{ +void GbtMaker::runLightGbt() { thread threadListenBitcoind = thread(&GbtMaker::threadListenBitcoind, this); while (running_) { @@ -360,7 +380,6 @@ void GbtMaker::runLightGbt() if (threadListenBitcoind.joinable()) threadListenBitcoind.join(); - } #endif @@ -376,31 +395,35 @@ void GbtMaker::run() { threadListenBitcoind.join(); } - - - //////////////////////////////// NMCAuxBlockMaker ////////////////////////////// -NMCAuxBlockMaker::NMCAuxBlockMaker(const string &zmqNamecoindAddr, - const string &rpcAddr, - const string &rpcUserpass, - const string &kafkaBrokers, - const string &kafkaAuxPowGwTopic, - uint32_t kRpcCallInterval, - const string &fileLastRpcCallTime, - bool isCheckZmq, - const string &coinbaseAddress) : -running_(true), zmqContext_(1/*i/o threads*/), -zmqNamecoindAddr_(zmqNamecoindAddr), -rpcAddr_(rpcAddr), rpcUserpass_(rpcUserpass), -lastCallTime_(0), kRpcCallInterval_(kRpcCallInterval), -fileLastRpcCallTime_(fileLastRpcCallTime), -kafkaBrokers_(kafkaBrokers), kafkaAuxPowGwTopic_(kafkaAuxPowGwTopic), -kafkaProducer_(kafkaBrokers_.c_str(), kafkaAuxPowGwTopic_.c_str(), 0/* partition */), -isCheckZmq_(isCheckZmq), coinbaseAddress_(coinbaseAddress) -{ +NMCAuxBlockMaker::NMCAuxBlockMaker( + const string &zmqNamecoindAddr, + const string &rpcAddr, + const string &rpcUserpass, + const string &kafkaBrokers, + const string &kafkaAuxPowGwTopic, + uint32_t kRpcCallInterval, + const string &fileLastRpcCallTime, + bool isCheckZmq, + const string &coinbaseAddress) + : running_(true) + , zmqContext_(1 /*i/o threads*/) + , zmqNamecoindAddr_(zmqNamecoindAddr) + , rpcAddr_(rpcAddr) + , rpcUserpass_(rpcUserpass) + , lastCallTime_(0) + , kRpcCallInterval_(kRpcCallInterval) + , fileLastRpcCallTime_(fileLastRpcCallTime) + , kafkaBrokers_(kafkaBrokers) + , kafkaAuxPowGwTopic_(kafkaAuxPowGwTopic) + , kafkaProducer_( + kafkaBrokers_.c_str(), kafkaAuxPowGwTopic_.c_str(), 0 /* partition */) + , isCheckZmq_(isCheckZmq) + , coinbaseAddress_(coinbaseAddress) { } -NMCAuxBlockMaker::~NMCAuxBlockMaker() {} +NMCAuxBlockMaker::~NMCAuxBlockMaker() { +} bool NMCAuxBlockMaker::checkNamecoindZMQ() { // @@ -408,20 +431,22 @@ bool NMCAuxBlockMaker::checkNamecoindZMQ() { // zmq::socket_t subscriber(zmqContext_, ZMQ_SUB); subscriber.connect(zmqNamecoindAddr_); - subscriber.setsockopt(ZMQ_SUBSCRIBE, - NAMECOIND_ZMQ_HASHTX, strlen(NAMECOIND_ZMQ_HASHTX)); + subscriber.setsockopt( + ZMQ_SUBSCRIBE, NAMECOIND_ZMQ_HASHTX, strlen(NAMECOIND_ZMQ_HASHTX)); zmq::message_t ztype, zcontent; LOG(INFO) << "check namecoind zmq, waiting for zmq message 'hashtx'..."; try { subscriber.recv(&ztype); subscriber.recv(&zcontent); - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(ERROR) << "namecoind zmq recv exception: " << e.what(); return false; } - const string type = std::string(static_cast(ztype.data()), ztype.size()); - const string content = std::string(static_cast(zcontent.data()), zcontent.size()); + const string type = + std::string(static_cast(ztype.data()), ztype.size()); + const string content = + std::string(static_cast(zcontent.data()), zcontent.size()); if (type == NAMECOIND_ZMQ_HASHTX) { string hashHex; @@ -437,20 +462,25 @@ bool NMCAuxBlockMaker::checkNamecoindZMQ() { bool NMCAuxBlockMaker::callRpcCreateAuxBlock(string &resp) { // // curl -v --user "username:password" - // -d '{"jsonrpc": "1.0", "id":"curltest", "method": "createauxblock","params": []}' - // -H 'content-type: text/plain;' "http://127.0.0.1:8336" + // -d '{"jsonrpc": "1.0", "id":"curltest", "method": + // "createauxblock","params": []}' -H 'content-type: text/plain;' + // "http://127.0.0.1:8336" // string request = ""; if (useCreateAuxBlockInterface_) { - request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"createauxblock\",\"params\":[\""; - request += coinbaseAddress_; + request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"createauxblock\"," + "\"params\":[\""; + request += coinbaseAddress_; request += "\"]}"; } else { - request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getauxblock\",\"params\":[]}"; + request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getauxblock\"," + "\"params\":[]}"; } - bool res = blockchainNodeRpcCall(rpcAddr_.c_str(), rpcUserpass_.c_str(), - request.c_str(), resp); + bool res = blockchainNodeRpcCall( + rpcAddr_.c_str(), rpcUserpass_.c_str(), request.c_str(), resp); if (!res) { LOG(ERROR) << "namecoind rpc failure"; return false; @@ -466,8 +496,7 @@ string NMCAuxBlockMaker::makeAuxBlockMsg() { DLOG(INFO) << "createauxblock json: " << aux; JsonNode r; - if (!JsonNode::parse(aux.c_str(), - aux.c_str() + aux.length(), r)) { + if (!JsonNode::parse(aux.c_str(), aux.c_str() + aux.length(), r)) { LOG(ERROR) << "decode createauxblock json failure: " << aux; return ""; } @@ -485,20 +514,20 @@ string NMCAuxBlockMaker::makeAuxBlockMsg() { // } // // check fields - if (r["result"].type() != Utilities::JS::type::Obj || - r["result"]["hash"].type() != Utilities::JS::type::Str || - r["result"]["chainid"].type() != Utilities::JS::type::Int || + if (r["result"].type() != Utilities::JS::type::Obj || + r["result"]["hash"].type() != Utilities::JS::type::Str || + r["result"]["chainid"].type() != Utilities::JS::type::Int || r["result"]["previousblockhash"].type() != Utilities::JS::type::Str || - r["result"]["coinbasevalue"].type() != Utilities::JS::type::Int || - r["result"]["bits"].type() != Utilities::JS::type::Str || - r["result"]["height"].type() != Utilities::JS::type::Int) { + r["result"]["coinbasevalue"].type() != Utilities::JS::type::Int || + r["result"]["bits"].type() != Utilities::JS::type::Str || + r["result"]["height"].type() != Utilities::JS::type::Int) { LOG(ERROR) << "namecoin aux check fields failure"; return ""; } // the MergedMiningProxy will indicate (optional) merkle_size and merkle_nonce // https://github.com/btccom/stratumSwitcher/tree/master/mergedMiningProxy - int32_t merkleSize = 1; + int32_t merkleSize = 1; int32_t merkleNonce = 0; if (r["result"]["merkle_size"].type() == Utilities::JS::type::Int) { @@ -509,23 +538,27 @@ string NMCAuxBlockMaker::makeAuxBlockMsg() { } // message for kafka - string msg = Strings::Format("{\"created_at_ts\":%u," - " \"hash\":\"%s\", \"height\":%d," - " \"merkle_size\":%d, \"merkle_nonce\":%d," - " \"chainid\":%d, \"bits\":\"%s\"," - " \"rpc_addr\":\"%s\", \"rpc_userpass\":\"%s\"" - "}", - (uint32_t)time(nullptr), - r["result"]["hash"].str().c_str(), - r["result"]["height"].int32(), - merkleSize, merkleNonce, - r["result"]["chainid"].int32(), - r["result"]["bits"].str().c_str(), - rpcAddr_.c_str(), rpcUserpass_.c_str()); + string msg = Strings::Format( + "{\"created_at_ts\":%u," + " \"hash\":\"%s\", \"height\":%d," + " \"merkle_size\":%d, \"merkle_nonce\":%d," + " \"chainid\":%d, \"bits\":\"%s\"," + " \"rpc_addr\":\"%s\", \"rpc_userpass\":\"%s\"" + "}", + (uint32_t)time(nullptr), + r["result"]["hash"].str().c_str(), + r["result"]["height"].int32(), + merkleSize, + merkleNonce, + r["result"]["chainid"].int32(), + r["result"]["bits"].str().c_str(), + rpcAddr_.c_str(), + rpcUserpass_.c_str()); LOG(INFO) << "createauxblock, height: " << r["result"]["height"].int32() - << ", hash: " << r["result"]["hash"].str() - << ", previousblockhash: " << r["result"]["previousblockhash"].str(); + << ", hash: " << r["result"]["hash"].str() + << ", previousblockhash: " + << r["result"]["previousblockhash"].str(); return msg; } @@ -533,8 +566,7 @@ string NMCAuxBlockMaker::makeAuxBlockMsg() { void NMCAuxBlockMaker::submitAuxblockMsg(bool checkTime) { ScopeLock sl(lock_); - if (checkTime && - lastCallTime_ + kRpcCallInterval_ > time(nullptr)) { + if (checkTime && lastCallTime_ + kRpcCallInterval_ > time(nullptr)) { return; } const string auxMsg = makeAuxBlockMsg(); @@ -550,41 +582,42 @@ void NMCAuxBlockMaker::submitAuxblockMsg(bool checkTime) { // save the timestamp to file, for monitor system if (!fileLastRpcCallTime_.empty()) { - writeTime2File(fileLastRpcCallTime_.c_str(), lastCallTime_); + writeTime2File(fileLastRpcCallTime_.c_str(), lastCallTime_); } } void NMCAuxBlockMaker::threadListenNamecoind() { zmq::socket_t subscriber(zmqContext_, ZMQ_SUB); subscriber.connect(zmqNamecoindAddr_); - subscriber.setsockopt(ZMQ_SUBSCRIBE, - NAMECOIND_ZMQ_HASHBLOCK, strlen(NAMECOIND_ZMQ_HASHBLOCK)); + subscriber.setsockopt( + ZMQ_SUBSCRIBE, NAMECOIND_ZMQ_HASHBLOCK, strlen(NAMECOIND_ZMQ_HASHBLOCK)); while (running_) { zmq::message_t ztype, zcontent; try { if (subscriber.recv(&ztype, ZMQ_DONTWAIT) == false) { - if (!running_) { break; } - usleep(50000); // so we sleep and try again + if (!running_) { + break; + } + usleep(50000); // so we sleep and try again continue; } subscriber.recv(&zcontent); - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(ERROR) << "namecoind zmq recv exception: " << e.what(); - break; // break big while + break; // break big while } - const string type = std::string(static_cast(ztype.data()), ztype.size()); - const string content = std::string(static_cast(zcontent.data()), zcontent.size()); + const string type = + std::string(static_cast(ztype.data()), ztype.size()); + const string content = + std::string(static_cast(zcontent.data()), zcontent.size()); - if (type == NAMECOIND_ZMQ_HASHBLOCK) - { + if (type == NAMECOIND_ZMQ_HASHBLOCK) { string hashHex; Bin2Hex((const uint8_t *)content.data(), content.size(), hashHex); LOG(INFO) << ">>>> namecoind recv hashblock: " << hashHex << " <<<<"; submitAuxblockMsg(false); - } - else - { + } else { LOG(ERROR) << "unknown message type from namecoind: " << type; } } /* /while */ @@ -620,23 +653,29 @@ bool NMCAuxBlockMaker::init() { // check aux mining rpc commands: createauxblock & submitauxblock { string response; - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"help\",\"params\":[]}"; - bool res = blockchainNodeRpcCall(rpcAddr_.c_str(), rpcUserpass_.c_str(), - request.c_str(), response); + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"help\",\"params\":[]}"; + bool res = blockchainNodeRpcCall( + rpcAddr_.c_str(), rpcUserpass_.c_str(), request.c_str(), response); if (!res) { LOG(ERROR) << "namecoind rpc call failure"; return false; } - useCreateAuxBlockInterface_ = (response.find("createauxblock") == std::string::npos || - response.find("submitauxblock") == std::string::npos) ? false : true; + useCreateAuxBlockInterface_ = + (response.find("createauxblock") == std::string::npos || + response.find("submitauxblock") == std::string::npos) + ? false + : true; - LOG(INFO) << "namecoind " << (useCreateAuxBlockInterface_ ? " " : "doesn't ") << "support rpc commands: createauxblock and submitauxblock"; + LOG(INFO) << "namecoind " + << (useCreateAuxBlockInterface_ ? " " : "doesn't ") + << "support rpc commands: createauxblock and submitauxblock"; } if (isCheckZmq_ && !checkNamecoindZMQ()) return false; - + return true; } @@ -652,7 +691,8 @@ void NMCAuxBlockMaker::run() { // // listen namecoind zmq for detect new block coming // - thread threadListenNamecoind = thread(&NMCAuxBlockMaker::threadListenNamecoind, this); + thread threadListenNamecoind = + thread(&NMCAuxBlockMaker::threadListenNamecoind, this); // createauxblock interval while (running_) { @@ -663,4 +703,3 @@ void NMCAuxBlockMaker::run() { if (threadListenNamecoind.joinable()) threadListenNamecoind.join(); } - diff --git a/src/bitcoin/GbtMaker.h b/src/bitcoin/GbtMaker.h index 8628c0652..04985f9d4 100644 --- a/src/bitcoin/GbtMaker.h +++ b/src/bitcoin/GbtMaker.h @@ -29,7 +29,6 @@ #include "zmq.hpp" - /////////////////////////////////// GbtMaker /////////////////////////////////// class GbtMaker { atomic running_; @@ -68,10 +67,14 @@ class GbtMaker { void kafkaProduceMsg(const void *payload, size_t len); public: - GbtMaker(const string &zmqBitcoindAddr, - const string &bitcoindRpcAddr, const string &bitcoindRpcUserpass, - const string &kafkaBrokers, const string &kafkaRawGbtTopic, - uint32_t kRpcCallInterval, bool isCheckZmq); + GbtMaker( + const string &zmqBitcoindAddr, + const string &bitcoindRpcAddr, + const string &bitcoindRpcUserpass, + const string &kafkaBrokers, + const string &kafkaRawGbtTopic, + uint32_t kRpcCallInterval, + bool isCheckZmq); ~GbtMaker(); bool init(); @@ -82,8 +85,6 @@ class GbtMaker { void run(); }; - - //////////////////////////////// NMCAuxBlockMaker ////////////////////////////// // // rpc call: ./namecoin-cli createauxblock N59bssPo1MbK3khwPELTEomyzYbHLb59uY @@ -105,7 +106,7 @@ class NMCAuxBlockMaker { string kafkaAuxPowGwTopic_; KafkaProducer kafkaProducer_; bool isCheckZmq_; - string coinbaseAddress_; // nmc coinbase payout address + string coinbaseAddress_; // nmc coinbase payout address bool useCreateAuxBlockInterface_; bool checkNamecoindZMQ(); @@ -118,12 +119,16 @@ class NMCAuxBlockMaker { void kafkaProduceMsg(const void *payload, size_t len); public: - NMCAuxBlockMaker(const string &zmqNamecoindAddr, - const string &rpcAddr, const string &rpcUserpass, - const string &kafkaBrokers, const string &kafkaAuxPowGwTopic, - uint32_t kRpcCallInterval, - const string &fileLastRpcCallTime, bool isCheckZmq, - const string &coinbaseAddress); + NMCAuxBlockMaker( + const string &zmqNamecoindAddr, + const string &rpcAddr, + const string &rpcUserpass, + const string &kafkaBrokers, + const string &kafkaAuxPowGwTopic, + uint32_t kRpcCallInterval, + const string &fileLastRpcCallTime, + bool isCheckZmq, + const string &coinbaseAddress); ~NMCAuxBlockMaker(); bool init(); @@ -131,5 +136,4 @@ class NMCAuxBlockMaker { void run(); }; - #endif diff --git a/src/bitcoin/JobMakerBitcoin.cc b/src/bitcoin/JobMakerBitcoin.cc index 80696afc9..63e34143c 100644 --- a/src/bitcoin/JobMakerBitcoin.cc +++ b/src/bitcoin/JobMakerBitcoin.cc @@ -37,7 +37,7 @@ #include #include -#ifdef INCLUDE_BTC_KEY_IO_H // +#ifdef INCLUDE_BTC_KEY_IO_H // #include // IsValidDestinationString for bch is not in this file. #endif @@ -45,15 +45,14 @@ #include "Utils.h" ////////////////////////////////JobMakerHandlerBitcoin////////////////////////////////// -JobMakerHandlerBitcoin::JobMakerHandlerBitcoin() +JobMakerHandlerBitcoin::JobMakerHandlerBitcoin() : currBestHeight_(0) , lastJobSendTime_(0) , isLastJobEmptyBlock_(false) , latestNmcAuxBlockHeight_(0) , previousRskWork_(nullptr) , currentRskWork_(nullptr) - , isMergedMiningUpdate_(false) -{ + , isMergedMiningUpdate_(false) { } bool JobMakerHandlerBitcoin::init(shared_ptr defPtr) { @@ -68,7 +67,7 @@ bool JobMakerHandlerBitcoin::init(shared_ptr defPtr) { } LOG(INFO) << "Block Version: " << std::hex << def()->blockVersion_; - LOG(INFO) << "Coinbase Info: " << def()->coinbaseInfo_; + LOG(INFO) << "Coinbase Info: " << def()->coinbaseInfo_; LOG(INFO) << "Payout Address: " << def()->payoutAddr_; // check pool payout address @@ -81,14 +80,21 @@ bool JobMakerHandlerBitcoin::init(shared_ptr defPtr) { return true; } -bool JobMakerHandlerBitcoin::initConsumerHandlers(const string &kafkaBrokers, vector &handlers) { +bool JobMakerHandlerBitcoin::initConsumerHandlers( + const string &kafkaBrokers, vector &handlers) { const int32_t consumeLatestN = 20; shared_ptr kafkaRawGbtConsumer; { - auto messageProcessor = std::bind(&JobMakerHandlerBitcoin::processRawGbtMsg, this, std::placeholders::_1); - auto handler = createConsumerHandler(kafkaBrokers, def()->rawGbtTopic_, consumeLatestN, {}, messageProcessor); - if(handler.kafkaConsumer_ == nullptr) + auto messageProcessor = std::bind( + &JobMakerHandlerBitcoin::processRawGbtMsg, this, std::placeholders::_1); + auto handler = createConsumerHandler( + kafkaBrokers, + def()->rawGbtTopic_, + consumeLatestN, + {}, + messageProcessor); + if (handler.kafkaConsumer_ == nullptr) return false; handlers.push_back(handler); kafkaRawGbtConsumer = handler.kafkaConsumer_; @@ -96,9 +102,11 @@ bool JobMakerHandlerBitcoin::initConsumerHandlers(const string &kafkaBrokers, ve shared_ptr kafkaAuxPowConsumer; { - auto messageProcessor = std::bind(&JobMakerHandlerBitcoin::processAuxPowMsg, this, std::placeholders::_1); - auto handler = createConsumerHandler(kafkaBrokers, def()->auxPowGwTopic_, 1, {}, messageProcessor); - if(handler.kafkaConsumer_ == nullptr) + auto messageProcessor = std::bind( + &JobMakerHandlerBitcoin::processAuxPowMsg, this, std::placeholders::_1); + auto handler = createConsumerHandler( + kafkaBrokers, def()->auxPowGwTopic_, 1, {}, messageProcessor, false); + if (handler.kafkaConsumer_ == nullptr) return false; handlers.push_back(handler); kafkaAuxPowConsumer = handler.kafkaConsumer_; @@ -106,15 +114,18 @@ bool JobMakerHandlerBitcoin::initConsumerHandlers(const string &kafkaBrokers, ve shared_ptr kafkaRskGwConsumer; { - auto messageProcessor = std::bind(&JobMakerHandlerBitcoin::processRskGwMsg, this, std::placeholders::_1); - auto handler = createConsumerHandler(kafkaBrokers, def()->rskRawGwTopic_, 1, {}, messageProcessor); - if(handler.kafkaConsumer_ == nullptr) + auto messageProcessor = std::bind( + &JobMakerHandlerBitcoin::processRskGwMsg, this, std::placeholders::_1); + auto handler = createConsumerHandler( + kafkaBrokers, def()->rskRawGwTopic_, 1, {}, messageProcessor, false); + if (handler.kafkaConsumer_ == nullptr) return false; handlers.push_back(handler); kafkaRskGwConsumer = handler.kafkaConsumer_; } - // sleep 3 seconds, wait for the latest N messages transfer from broker to client + // sleep 3 seconds, wait for the latest N messages transfer from broker to + // client sleep(3); /* pre-consume some messages for initialization */ @@ -124,7 +135,7 @@ bool JobMakerHandlerBitcoin::initConsumerHandlers(const string &kafkaBrokers, ve // { rd_kafka_message_t *rkmessage; - rkmessage = kafkaAuxPowConsumer->consumer(1000/* timeout ms */); + rkmessage = kafkaAuxPowConsumer->consumer(1000 /* timeout ms */); if (rkmessage != nullptr && !rkmessage->err) { string msg((const char *)rkmessage->payload, rkmessage->len); processAuxPowMsg(msg); @@ -137,7 +148,7 @@ bool JobMakerHandlerBitcoin::initConsumerHandlers(const string &kafkaBrokers, ve // { rd_kafka_message_t *rkmessage; - rkmessage = kafkaRskGwConsumer->consumer(1000/* timeout ms */); + rkmessage = kafkaRskGwConsumer->consumer(1000 /* timeout ms */); if (rkmessage != nullptr && !rkmessage->err) { string msg((const char *)rkmessage->payload, rkmessage->len); processRskGwMsg(msg); @@ -151,7 +162,7 @@ bool JobMakerHandlerBitcoin::initConsumerHandlers(const string &kafkaBrokers, ve LOG(INFO) << "consume latest rawgbt messages from kafka..."; for (int32_t i = 0; i < consumeLatestN; i++) { rd_kafka_message_t *rkmessage; - rkmessage = kafkaRawGbtConsumer->consumer(5000/* timeout ms */); + rkmessage = kafkaRawGbtConsumer->consumer(5000 /* timeout ms */); if (rkmessage == nullptr || rkmessage->err) { break; } @@ -171,9 +182,9 @@ bool JobMakerHandlerBitcoin::addRawGbt(const string &msg) { return false; } - if (r["created_at_ts"].type() != Utilities::JS::type::Int || + if (r["created_at_ts"].type() != Utilities::JS::type::Int || r["block_template_base64"].type() != Utilities::JS::type::Str || - r["gbthash"].type() != Utilities::JS::type::Str) { + r["gbthash"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "invalid rawgbt: missing fields"; return false; } @@ -190,14 +201,15 @@ bool JobMakerHandlerBitcoin::addRawGbt(const string &msg) { const int64_t timeDiff = (int64_t)time(nullptr) - (int64_t)gbtTime; if (labs(timeDiff) >= 60) { LOG(WARNING) << "rawgbt diff time is more than 60, ignore it"; - return false; // time diff too large, there must be some problems, so ignore it + return false; // time diff too large, there must be some problems, so ignore + // it } if (labs(timeDiff) >= 3) { LOG(WARNING) << "rawgbt diff time is too large: " << timeDiff << " seconds"; } const string gbt = DecodeBase64(r["block_template_base64"].str()); - assert(gbt.length() > 64); // valid gbt string's len at least 64 bytes + assert(gbt.length() > 64); // valid gbt string's len at least 64 bytes JsonNode nodeGbt; if (!JsonNode::parse(gbt.c_str(), gbt.c_str() + gbt.length(), nodeGbt)) { @@ -207,23 +219,23 @@ bool JobMakerHandlerBitcoin::addRawGbt(const string &msg) { assert(nodeGbt["result"]["height"].type() == Utilities::JS::type::Int); const uint32_t height = nodeGbt["result"]["height"].uint32(); - #ifdef CHAIN_TYPE_BCH - bool isLightVersion = nodeGbt["result"]["job_id"].type() == Utilities::JS::type::Str; + bool isLightVersion = + nodeGbt["result"]["job_id"].type() == Utilities::JS::type::Str; bool isEmptyBlock = false; - if(isLightVersion) - { + if (isLightVersion) { assert(nodeGbt["result"]["merkle"].type() == Utilities::JS::type::Array); isEmptyBlock = nodeGbt["result"]["merkle"].array().size() == 0; - } - else - { - assert(nodeGbt["result"]["transactions"].type() == Utilities::JS::type::Array); + } else { + assert( + nodeGbt["result"]["transactions"].type() == Utilities::JS::type::Array); isEmptyBlock = nodeGbt["result"]["transactions"].array().size() == 0; } #else - assert(nodeGbt["result"]["transactions"].type() == Utilities::JS::type::Array); - const bool isEmptyBlock = nodeGbt["result"]["transactions"].array().size() == 0; + assert( + nodeGbt["result"]["transactions"].type() == Utilities::JS::type::Array); + const bool isEmptyBlock = + nodeGbt["result"]["transactions"].array().size() == 0; #endif { @@ -233,17 +245,18 @@ bool JobMakerHandlerBitcoin::addRawGbt(const string &msg) { const uint64_t bestKey = rawgbtMap_.rbegin()->first; const uint32_t bestTime = gbtKeyGetTime(bestKey); const uint32_t bestHeight = gbtKeyGetHeight(bestKey); - const bool bestIsEmpty = gbtKeyIsEmptyBlock(bestKey); + const bool bestIsEmpty = gbtKeyIsEmptyBlock(bestKey); // To prevent the job's block height ups and downs // when the block height of two bitcoind is not synchronized. // The block height downs must past twice the time of stratumJobInterval_ // without the higher height GBT received. - if (height < bestHeight && !bestIsEmpty && + if (height < bestHeight && !bestIsEmpty && gbtTime - bestTime < 2 * def()->jobInterval_) { LOG(WARNING) << "skip low height GBT. height: " << height << ", best height: " << bestHeight - << ", elapsed time after best GBT: " << (gbtTime - bestTime) << "s"; + << ", elapsed time after best GBT: " + << (gbtTime - bestTime) << "s"; return false; } } @@ -261,9 +274,10 @@ bool JobMakerHandlerBitcoin::addRawGbt(const string &msg) { lastestGbtHash_.pop_front(); } - LOG(INFO) << "add rawgbt, height: "<< height << ", gbthash: " - << r["gbthash"].str().substr(0, 16) << "..., gbtTime(UTC): " << date("%F %T", gbtTime) - << ", isEmpty:" << isEmptyBlock; + LOG(INFO) << "add rawgbt, height: " << height + << ", gbthash: " << r["gbthash"].str().substr(0, 16) + << "..., gbtTime(UTC): " << date("%F %T", gbtTime) + << ", isEmpty:" << isEmptyBlock; return true; } @@ -296,10 +310,11 @@ bool JobMakerHandlerBitcoin::findBestRawGbt(string &bestRawGbt) { if (bestKey == lastSendBestKey) { LOG(WARNING) << "bestKey is the same as last one: " << lastSendBestKey; } - - // if last job is an empty block job, we need to + + // if last job is an empty block job, we need to // send a new non-empty job as quick as possible. - if (bestHeight == currBestHeight_ && isLastJobEmptyBlock_ && !currentGbtIsEmpty) { + if (bestHeight == currBestHeight_ && isLastJobEmptyBlock_ && + !currentGbtIsEmpty) { needUpdateEmptyBlockJob = true; LOG(INFO) << "--------update last empty block job--------"; } @@ -313,9 +328,10 @@ bool JobMakerHandlerBitcoin::findBestRawGbt(string &bestRawGbt) { isFindNewHeight = true; } - if (isFindNewHeight || needUpdateEmptyBlockJob || isMergedMiningUpdate_ || isReachTimeout()) { - lastSendBestKey = bestKey; - currBestHeight_ = bestHeight; + if (isFindNewHeight || needUpdateEmptyBlockJob || isMergedMiningUpdate_ || + isReachTimeout()) { + lastSendBestKey = bestKey; + currBestHeight_ = bestHeight; bestRawGbt = rawgbtMap_.rbegin()->second.c_str(); return true; @@ -341,23 +357,26 @@ void JobMakerHandlerBitcoin::clearTimeoutGbt() { // Ensure that rawgbtMap_ has at least one element, even if it expires. // So jobmaker can always generate jobs even if blockchain node does not - // update the response of getblocktemplate for a long time when there is no new transaction. - // This happens on SBTC v0.17. - for (auto itr = rawgbtMap_.begin(); rawgbtMap_.size() > 1 && itr != rawgbtMap_.end(); ) { - const uint32_t ts = gbtKeyGetTime(itr->first); + // update the response of getblocktemplate for a long time when there is no + // new transaction. This happens on SBTC v0.17. + for (auto itr = rawgbtMap_.begin(); + rawgbtMap_.size() > 1 && itr != rawgbtMap_.end();) { + const uint32_t ts = gbtKeyGetTime(itr->first); const bool isEmpty = gbtKeyIsEmptyBlock(itr->first); const uint32_t height = gbtKeyGetHeight(itr->first); // gbt expired time - const uint32_t expiredTime = ts + (isEmpty ? def()->emptyGbtLifeTime_ : def()->gbtLifeTime_); + const uint32_t expiredTime = + ts + (isEmpty ? def()->emptyGbtLifeTime_ : def()->gbtLifeTime_); if (expiredTime > ts_now) { // not expired ++itr; } else { // remove expired gbt - LOG(INFO) << "remove timeout rawgbt: " << date("%F %T", ts) << "|" << ts << - ", height:" << height << ", isEmptyBlock:" << (isEmpty ? 1 : 0); + LOG(INFO) << "remove timeout rawgbt: " << date("%F %T", ts) << "|" << ts + << ", height:" << height + << ", isEmptyBlock:" << (isEmpty ? 1 : 0); // c++11: returns an iterator to the next element in the map itr = rawgbtMap_.erase(itr); @@ -376,13 +395,13 @@ void JobMakerHandlerBitcoin::clearTimeoutGw() { const uint32_t ts_now = time(nullptr); currentRskWork = *currentRskWork_; - if(currentRskWork.getCreatedAt() + 120u < ts_now) { + if (currentRskWork.getCreatedAt() + 120u < ts_now) { delete currentRskWork_; currentRskWork_ = nullptr; } previousRskWork = *previousRskWork_; - if(previousRskWork.getCreatedAt() + 120u < ts_now) { + if (previousRskWork.getCreatedAt() + 120u < ts_now) { delete previousRskWork_; previousRskWork_ = nullptr; } @@ -401,9 +420,10 @@ bool JobMakerHandlerBitcoin::triggerRskUpdate() { previousRskWork = *previousRskWork_; } - bool notifyFlagUpdate = def()->mergedMiningNotifyPolicy_ == 1 && currentRskWork.getNotifyFlag(); - bool differentHashUpdate = def()->mergedMiningNotifyPolicy_ == 2 && - (currentRskWork.getBlockHash() != previousRskWork.getBlockHash()); + bool notifyFlagUpdate = + def()->mergedMiningNotifyPolicy_ == 1 && currentRskWork.getNotifyFlag(); + bool differentHashUpdate = def()->mergedMiningNotifyPolicy_ == 2 && + (currentRskWork.getBlockHash() != previousRskWork.getBlockHash()); return notifyFlagUpdate || differentHashUpdate; } @@ -413,8 +433,7 @@ bool JobMakerHandlerBitcoin::processRawGbtMsg(const string &msg) { return addRawGbt(msg); } -bool JobMakerHandlerBitcoin::processAuxPowMsg(const string &msg) -{ +bool JobMakerHandlerBitcoin::processAuxPowMsg(const string &msg) { uint32_t currentNmcBlockHeight = 0; string currentNmcBlockHash; // get block height @@ -435,7 +454,6 @@ bool JobMakerHandlerBitcoin::processAuxPowMsg(const string &msg) currentNmcBlockHash = r["hash"].str(); } - uint32_t latestNmcAuxBlockHeight = 0; string latestNmcAuxBlockHash; // set json string @@ -452,8 +470,10 @@ bool JobMakerHandlerBitcoin::processAuxPowMsg(const string &msg) DLOG(INFO) << "latestAuxPowJson: " << latestNmcAuxBlockJson_; } - bool higherHeightUpdate = def()->mergedMiningNotifyPolicy_ == 1 && currentNmcBlockHeight > latestNmcAuxBlockHeight; - bool differentHashUpdate = def()->mergedMiningNotifyPolicy_ == 2 && currentNmcBlockHash != latestNmcAuxBlockHash; + bool higherHeightUpdate = def()->mergedMiningNotifyPolicy_ == 1 && + currentNmcBlockHeight > latestNmcAuxBlockHeight; + bool differentHashUpdate = def()->mergedMiningNotifyPolicy_ == 2 && + currentNmcBlockHash != latestNmcAuxBlockHash; isMergedMiningUpdate_ = higherHeightUpdate || differentHashUpdate; return isMergedMiningUpdate_; @@ -465,7 +485,7 @@ bool JobMakerHandlerBitcoin::processRskGwMsg(const string &rawGetWork) { ScopeLock sl(rskWorkAccessLock_); RskWork *rskWork = new RskWork(); - if(rskWork->initFromGw(rawGetWork)) { + if (rskWork->initFromGw(rawGetWork)) { if (previousRskWork_ != nullptr) { delete previousRskWork_; @@ -502,14 +522,15 @@ string JobMakerHandlerBitcoin::makeStratumJob(const string &gbt) { } StratumJobBitcoin sjob; - if (!sjob.initFromGbt(gbt.c_str(), def()->coinbaseInfo_, - poolPayoutAddr_, - def()->blockVersion_, - latestNmcAuxBlockJson, - currentRskBlockJson, - def()->serverId_, - isMergedMiningUpdate_)) - { + if (!sjob.initFromGbt( + gbt.c_str(), + def()->coinbaseInfo_, + poolPayoutAddr_, + def()->blockVersion_, + latestNmcAuxBlockJson, + currentRskBlockJson, + def()->serverId_, + isMergedMiningUpdate_)) { LOG(ERROR) << "init stratum job message from gbt str fail"; return ""; } @@ -523,7 +544,7 @@ string JobMakerHandlerBitcoin::makeStratumJob(const string &gbt) { isLastJobEmptyBlock_ = sjob.isEmptyBlock(); LOG(INFO) << "--------producer stratum job, jobId: " << sjob.jobId_ - << ", height: " << sjob.height_ << "--------"; + << ", height: " << sjob.height_ << "--------"; LOG(INFO) << "sjob: " << jobMsg; isMergedMiningUpdate_ = false; @@ -538,20 +559,23 @@ string JobMakerHandlerBitcoin::makeStratumJobMsg() { return makeStratumJob(bestRawGbt); } -uint64_t JobMakerHandlerBitcoin::makeGbtKey(uint32_t gbtTime, bool isEmptyBlock, uint32_t height) { +uint64_t JobMakerHandlerBitcoin::makeGbtKey( + uint32_t gbtTime, bool isEmptyBlock, uint32_t height) { assert(height < 0x7FFFFFFFU); // // gbtKey: // ----------------------------------------------------------------------------------------- - // | 32 bits | 31 bits | 1 bit | - // | xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx | xxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx | x | - // | gbtTime | height | nonEmptyFlag | + // | 32 bits | 31 bits | 1 bit | | + // xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx | xxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx | + // x | | gbtTime | height + // | nonEmptyFlag | // ----------------------------------------------------------------------------------------- // use nonEmptyFlag (aka: !isEmptyBlock) so the key of a non-empty block // will large than the key of an empty block. // - return (((uint64_t)gbtTime) << 32) | (((uint64_t)height) << 1) | ((uint64_t)(!isEmptyBlock)); + return (((uint64_t)gbtTime) << 32) | (((uint64_t)height) << 1) | + ((uint64_t)(!isEmptyBlock)); } uint32_t JobMakerHandlerBitcoin::gbtKeyGetTime(uint64_t gbtKey) { diff --git a/src/bitcoin/JobMakerBitcoin.h b/src/bitcoin/JobMakerBitcoin.h index 633d9841e..ada614f2f 100644 --- a/src/bitcoin/JobMakerBitcoin.h +++ b/src/bitcoin/JobMakerBitcoin.h @@ -31,9 +31,7 @@ #include #include - -class JobMakerHandlerBitcoin : public JobMakerHandler -{ +class JobMakerHandlerBitcoin : public JobMakerHandler { mutex lock_; // lock when update rawgbtMap_ mutex auxJsonLock_; mutex rskWorkAccessLock_; @@ -43,7 +41,8 @@ class JobMakerHandlerBitcoin : public JobMakerHandler uint32_t currBestHeight_; uint32_t lastJobSendTime_; bool isLastJobEmptyBlock_; - std::map rawgbtMap_; // sorted gbt by timestamp + std::map + rawgbtMap_; // sorted gbt by timestamp deque lastestGbtHash_; // merged mining for AuxPow blocks (example: Namecoin, ElastOS) @@ -68,18 +67,21 @@ class JobMakerHandlerBitcoin : public JobMakerHandler bool findBestRawGbt(string &bestRawGbt); string makeStratumJob(const string &gbt); - inline uint64_t makeGbtKey(uint32_t gbtTime, bool isEmptyBlock, uint32_t height); - inline uint32_t gbtKeyGetTime (uint64_t gbtKey); - inline uint32_t gbtKeyGetHeight (uint64_t gbtKey); - inline bool gbtKeyIsEmptyBlock(uint64_t gbtKey); + inline uint64_t + makeGbtKey(uint32_t gbtTime, bool isEmptyBlock, uint32_t height); + inline uint32_t gbtKeyGetTime(uint64_t gbtKey); + inline uint32_t gbtKeyGetHeight(uint64_t gbtKey); + inline bool gbtKeyIsEmptyBlock(uint64_t gbtKey); public: JobMakerHandlerBitcoin(); virtual ~JobMakerHandlerBitcoin() {} bool init(shared_ptr def) override; - virtual bool initConsumerHandlers(const string &kafkaBrokers, vector &handlers) override; - + virtual bool initConsumerHandlers( + const string &kafkaBrokers, + vector &handlers) override; + bool processRawGbtMsg(const string &msg); bool processAuxPowMsg(const string &msg); bool processRskGwMsg(const string &msg); @@ -87,7 +89,9 @@ class JobMakerHandlerBitcoin : public JobMakerHandler virtual string makeStratumJobMsg() override; // read-only definition - inline shared_ptr def() { return std::dynamic_pointer_cast(def_); } + inline shared_ptr def() { + return std::dynamic_pointer_cast(def_); + } }; #endif diff --git a/src/bitcoin/ShareLogParserBitcoin.h b/src/bitcoin/ShareLogParserBitcoin.h index 38c427622..10bc68e70 100644 --- a/src/bitcoin/ShareLogParserBitcoin.h +++ b/src/bitcoin/ShareLogParserBitcoin.h @@ -24,7 +24,6 @@ #ifndef SHARELOGPARSER_BITCOIN_H_ #define SHARELOGPARSER_BITCOIN_H_ - #include "ShareLogParser.h" #include "StratumBitcoin.h" diff --git a/src/bitcoin/StatisticsBitcoin.cc b/src/bitcoin/StatisticsBitcoin.cc index 336fbe8cf..18bcf4c2b 100644 --- a/src/bitcoin/StatisticsBitcoin.cc +++ b/src/bitcoin/StatisticsBitcoin.cc @@ -28,27 +28,8 @@ #include "BitcoinUtils.h" template <> -void ShareStatsDay::processShare(uint32_t hourIdx, const ShareBitcoin &share) { - ScopeLock sl(lock_); - - if (StratumStatus::isAccepted(share.status())) { - shareAccept1h_[hourIdx] += share.sharediff(); - shareAccept1d_ += share.sharediff(); - - double score = share.score(); - double reward = GetBlockReward(share.height(), Params().GetConsensus()); - double earn = score * reward; - - score1h_[hourIdx] += score; - score1d_ += score; - earn1h_[hourIdx] += earn; - earn1d_ += earn; - - } else { - shareReject1h_[hourIdx] += share.sharediff(); - shareReject1d_ += share.sharediff(); - } - modifyHoursFlag_ |= (0x01u << hourIdx); +double ShareStatsDay::getShareReward(const ShareBitcoin &share) { + return GetBlockReward(share.height(), Params().GetConsensus()); } /////////////// template instantiation /////////////// diff --git a/src/bitcoin/StatsHttpdBitcoin.cc b/src/bitcoin/StatsHttpdBitcoin.cc index 2151e2490..9c4b29ee9 100644 --- a/src/bitcoin/StatsHttpdBitcoin.cc +++ b/src/bitcoin/StatsHttpdBitcoin.cc @@ -23,7 +23,6 @@ */ #include "StatsHttpdBitcoin.h" - /////////////// template instantiation /////////////// // Without this, some linking errors will issued. // If you add a new derived class of Share, add it at the following. diff --git a/src/bitcoin/StratumBitcoin.cc b/src/bitcoin/StratumBitcoin.cc index a88b49232..5e9b9abee 100644 --- a/src/bitcoin/StratumBitcoin.cc +++ b/src/bitcoin/StratumBitcoin.cc @@ -38,8 +38,8 @@ #include -static -void makeMerkleBranch(const vector &vtxhashs, vector &steps) { +static void +makeMerkleBranch(const vector &vtxhashs, vector &steps) { if (vtxhashs.size() == 0) { return; } @@ -55,18 +55,20 @@ void makeMerkleBranch(const vector &vtxhashs, vector &steps) { // ignore the first one than merge two for (size_t i = 0; i < (hashs.size() - 1) / 2; i++) { // Hash = Double SHA256 - hashs[i] = Hash(BEGIN(hashs[i*2 + 1]), END(hashs[i*2 + 1]), - BEGIN(hashs[i*2 + 2]), END(hashs[i*2 + 2])); + hashs[i] = Hash( + BEGIN(hashs[i * 2 + 1]), + END(hashs[i * 2 + 1]), + BEGIN(hashs[i * 2 + 2]), + END(hashs[i * 2 + 2])); } hashs.resize((hashs.size() - 1) / 2); } assert(hashs.size() == 1); - steps.push_back(*hashs.begin()); // put the last one + steps.push_back(*hashs.begin()); // put the last one } -static -int64_t findExtraNonceStart(const vector &coinbaseOriTpl, - const vector &placeHolder) { +static int64_t findExtraNonceStart( + const vector &coinbaseOriTpl, const vector &placeHolder) { // find for the end for (int64_t i = coinbaseOriTpl.size() - placeHolder.size(); i >= 0; i--) { if (memcmp(&coinbaseOriTpl[i], &placeHolder[0], placeHolder.size()) == 0) { @@ -84,9 +86,7 @@ StratumJobBitcoin::StratumJobBitcoin() , minTime_(0U) , coinbaseValue_(0) , nmcAuxBits_(0u) - , isMergedMiningCleanJob_(false) -{ - + , isMergedMiningCleanJob_(false) { } string StratumJobBitcoin::serializeToJson() const { @@ -99,51 +99,61 @@ string StratumJobBitcoin::serializeToJson() const { // // we use key->value json string, so it's easy to update system // - return Strings::Format("{\"jobId\":%" PRIu64",\"gbtHash\":\"%s\"" - ",\"prevHash\":\"%s\",\"prevHashBeStr\":\"%s\"" - ",\"height\":%d,\"coinbase1\":\"%s\",\"coinbase2\":\"%s\"" - ",\"merkleBranch\":\"%s\"" - ",\"nVersion\":%d,\"nBits\":%u,\"nTime\":%u" - ",\"minTime\":%u,\"coinbaseValue\":%lld" - ",\"witnessCommitment\":\"%s\"" - #ifdef CHAIN_TYPE_UBTC - ",\"rootStateHash\":\"%s\"" - #endif - // namecoin, optional - ",\"nmcBlockHash\":\"%s\",\"nmcBits\":%u,\"nmcHeight\":%d" - ",\"nmcRpcAddr\":\"%s\",\"nmcRpcUserpass\":\"%s\"" - // RSK, optional - ",\"rskBlockHashForMergedMining\":\"%s\",\"rskNetworkTarget\":\"0x%s\"" - ",\"rskFeesForMiner\":\"%s\"" - ",\"rskdRpcAddress\":\"%s\",\"rskdRpcUserPwd\":\"%s\"" - // namecoin and RSK - // TODO: delete isRskCleanJob (keep it for forward compatible). - ",\"isRskCleanJob\":%s,\"mergedMiningClean\":%s" - "}", - jobId_, gbtHash_.c_str(), - prevHash_.ToString().c_str(), prevHashBeStr_.c_str(), - height_, coinbase1_.c_str(), coinbase2_.c_str(), - // merkleBranch_ could be empty - merkleBranchStr.size() ? merkleBranchStr.c_str() : "", - nVersion_, nBits_, nTime_, - minTime_, coinbaseValue_, - witnessCommitment_.size() ? witnessCommitment_.c_str() : "", - #ifdef CHAIN_TYPE_UBTC - rootStateHash_.size() ? rootStateHash_.c_str() : "", - #endif - // nmc - nmcAuxBlockHash_.ToString().c_str(), - nmcAuxBits_, nmcHeight_, - nmcRpcAddr_.size() ? nmcRpcAddr_.c_str() : "", - nmcRpcUserpass_.size() ? nmcRpcUserpass_.c_str() : "", - // rsk - blockHashForMergedMining_.size() ? blockHashForMergedMining_.c_str() : "", - rskNetworkTarget_.GetHex().c_str(), - feesForMiner_.size() ? feesForMiner_.c_str() : "", - rskdRpcAddress_.size() ? rskdRpcAddress_.c_str() : "", - rskdRpcUserPwd_.c_str() ? rskdRpcUserPwd_.c_str() : "", - isMergedMiningCleanJob_ ? "true" : "false", - isMergedMiningCleanJob_ ? "true" : "false"); + return Strings::Format( + "{\"jobId\":%" PRIu64 + ",\"gbtHash\":\"%s\"" + ",\"prevHash\":\"%s\",\"prevHashBeStr\":\"%s\"" + ",\"height\":%d,\"coinbase1\":\"%s\",\"coinbase2\":\"%s\"" + ",\"merkleBranch\":\"%s\"" + ",\"nVersion\":%d,\"nBits\":%u,\"nTime\":%u" + ",\"minTime\":%u,\"coinbaseValue\":%lld" + ",\"witnessCommitment\":\"%s\"" +#ifdef CHAIN_TYPE_UBTC + ",\"rootStateHash\":\"%s\"" +#endif + // namecoin, optional + ",\"nmcBlockHash\":\"%s\",\"nmcBits\":%u,\"nmcHeight\":%d" + ",\"nmcRpcAddr\":\"%s\",\"nmcRpcUserpass\":\"%s\"" + // RSK, optional + ",\"rskBlockHashForMergedMining\":\"%s\",\"rskNetworkTarget\":\"0x%s\"" + ",\"rskFeesForMiner\":\"%s\"" + ",\"rskdRpcAddress\":\"%s\",\"rskdRpcUserPwd\":\"%s\"" + // namecoin and RSK + // TODO: delete isRskCleanJob (keep it for forward compatible). + ",\"isRskCleanJob\":%s,\"mergedMiningClean\":%s" + "}", + jobId_, + gbtHash_.c_str(), + prevHash_.ToString().c_str(), + prevHashBeStr_.c_str(), + height_, + coinbase1_.c_str(), + coinbase2_.c_str(), + // merkleBranch_ could be empty + merkleBranchStr.size() ? merkleBranchStr.c_str() : "", + nVersion_, + nBits_, + nTime_, + minTime_, + coinbaseValue_, + witnessCommitment_.size() ? witnessCommitment_.c_str() : "", +#ifdef CHAIN_TYPE_UBTC + rootStateHash_.size() ? rootStateHash_.c_str() : "", +#endif + // nmc + nmcAuxBlockHash_.ToString().c_str(), + nmcAuxBits_, + nmcHeight_, + nmcRpcAddr_.size() ? nmcRpcAddr_.c_str() : "", + nmcRpcUserpass_.size() ? nmcRpcUserpass_.c_str() : "", + // rsk + blockHashForMergedMining_.size() ? blockHashForMergedMining_.c_str() : "", + rskNetworkTarget_.GetHex().c_str(), + feesForMiner_.size() ? feesForMiner_.c_str() : "", + rskdRpcAddress_.size() ? rskdRpcAddress_.c_str() : "", + rskdRpcUserPwd_.c_str() ? rskdRpcUserPwd_.c_str() : "", + isMergedMiningCleanJob_ ? "true" : "false", + isMergedMiningCleanJob_ ? "true" : "false"); } bool StratumJobBitcoin::unserializeFromJson(const char *s, size_t len) { @@ -151,40 +161,40 @@ bool StratumJobBitcoin::unserializeFromJson(const char *s, size_t len) { if (!JsonNode::parse(s, s + len, j)) { return false; } - if (j["jobId"].type() != Utilities::JS::type::Int || - j["gbtHash"].type() != Utilities::JS::type::Str || - j["prevHash"].type() != Utilities::JS::type::Str || - j["prevHashBeStr"].type()!= Utilities::JS::type::Str || - j["height"].type() != Utilities::JS::type::Int || - j["coinbase1"].type() != Utilities::JS::type::Str || - j["coinbase2"].type() != Utilities::JS::type::Str || + if (j["jobId"].type() != Utilities::JS::type::Int || + j["gbtHash"].type() != Utilities::JS::type::Str || + j["prevHash"].type() != Utilities::JS::type::Str || + j["prevHashBeStr"].type() != Utilities::JS::type::Str || + j["height"].type() != Utilities::JS::type::Int || + j["coinbase1"].type() != Utilities::JS::type::Str || + j["coinbase2"].type() != Utilities::JS::type::Str || j["merkleBranch"].type() != Utilities::JS::type::Str || - j["nVersion"].type() != Utilities::JS::type::Int || - j["nBits"].type() != Utilities::JS::type::Int || - j["nTime"].type() != Utilities::JS::type::Int || - j["minTime"].type() != Utilities::JS::type::Int || - j["coinbaseValue"].type()!= Utilities::JS::type::Int) { + j["nVersion"].type() != Utilities::JS::type::Int || + j["nBits"].type() != Utilities::JS::type::Int || + j["nTime"].type() != Utilities::JS::type::Int || + j["minTime"].type() != Utilities::JS::type::Int || + j["coinbaseValue"].type() != Utilities::JS::type::Int) { LOG(ERROR) << "parse stratum job failure: " << s; return false; } - jobId_ = j["jobId"].uint64(); - gbtHash_ = j["gbtHash"].str(); - prevHash_ = uint256S(j["prevHash"].str()); + jobId_ = j["jobId"].uint64(); + gbtHash_ = j["gbtHash"].str(); + prevHash_ = uint256S(j["prevHash"].str()); prevHashBeStr_ = j["prevHashBeStr"].str(); - height_ = j["height"].int32(); - coinbase1_ = j["coinbase1"].str(); - coinbase2_ = j["coinbase2"].str(); - nVersion_ = j["nVersion"].int32(); - nBits_ = j["nBits"].uint32(); - nTime_ = j["nTime"].uint32(); - minTime_ = j["minTime"].uint32(); + height_ = j["height"].int32(); + coinbase1_ = j["coinbase1"].str(); + coinbase2_ = j["coinbase2"].str(); + nVersion_ = j["nVersion"].int32(); + nBits_ = j["nBits"].uint32(); + nTime_ = j["nTime"].uint32(); + minTime_ = j["minTime"].uint32(); coinbaseValue_ = j["coinbaseValue"].int64(); // witnessCommitment, optional // witnessCommitment must be at least 38 bytes if (j["witnessCommitment"].type() == Utilities::JS::type::Str && - j["witnessCommitment"].str().length() >= 38*2) { + j["witnessCommitment"].str().length() >= 38 * 2) { witnessCommitment_ = j["witnessCommitment"].str(); } @@ -192,7 +202,7 @@ bool StratumJobBitcoin::unserializeFromJson(const char *s, size_t len) { // rootStateHash, optional // rootStateHash must be at least 2 bytes (00f9, empty root state hash) if (j["rootStateHash"].type() == Utilities::JS::type::Str && - j["rootStateHash"].str().length() >= 2*2) { + j["rootStateHash"].str().length() >= 2 * 2) { rootStateHash_ = j["rootStateHash"].str(); } #endif @@ -205,39 +215,39 @@ bool StratumJobBitcoin::unserializeFromJson(const char *s, size_t len) { // // namecoin, optional // - if (j["nmcBlockHash"].type() == Utilities::JS::type::Str && - j["nmcBits"].type() == Utilities::JS::type::Int && - j["nmcHeight"].type() == Utilities::JS::type::Int && - j["nmcRpcAddr"].type() == Utilities::JS::type::Str && + if (j["nmcBlockHash"].type() == Utilities::JS::type::Str && + j["nmcBits"].type() == Utilities::JS::type::Int && + j["nmcHeight"].type() == Utilities::JS::type::Int && + j["nmcRpcAddr"].type() == Utilities::JS::type::Str && j["nmcRpcUserpass"].type() == Utilities::JS::type::Str) { nmcAuxBlockHash_ = uint256S(j["nmcBlockHash"].str()); - nmcAuxBits_ = j["nmcBits"].uint32(); - nmcHeight_ = j["nmcHeight"].int32(); - nmcRpcAddr_ = j["nmcRpcAddr"].str(); - nmcRpcUserpass_ = j["nmcRpcUserpass"].str(); + nmcAuxBits_ = j["nmcBits"].uint32(); + nmcHeight_ = j["nmcHeight"].int32(); + nmcRpcAddr_ = j["nmcRpcAddr"].str(); + nmcRpcUserpass_ = j["nmcRpcUserpass"].str(); BitsToTarget(nmcAuxBits_, nmcNetworkTarget_); } // // RSK, optional // - if (j["rskBlockHashForMergedMining"].type() == Utilities::JS::type::Str && - j["rskNetworkTarget"].type() == Utilities::JS::type::Str && - j["rskFeesForMiner"].type() == Utilities::JS::type::Str && - j["rskdRpcAddress"].type() == Utilities::JS::type::Str && - j["rskdRpcUserPwd"].type() == Utilities::JS::type::Str) { + if (j["rskBlockHashForMergedMining"].type() == Utilities::JS::type::Str && + j["rskNetworkTarget"].type() == Utilities::JS::type::Str && + j["rskFeesForMiner"].type() == Utilities::JS::type::Str && + j["rskdRpcAddress"].type() == Utilities::JS::type::Str && + j["rskdRpcUserPwd"].type() == Utilities::JS::type::Str) { blockHashForMergedMining_ = j["rskBlockHashForMergedMining"].str(); - rskNetworkTarget_ = uint256S(j["rskNetworkTarget"].str()); - feesForMiner_ = j["rskFeesForMiner"].str(); - rskdRpcAddress_ = j["rskdRpcAddress"].str(); - rskdRpcUserPwd_ = j["rskdRpcUserPwd"].str(); + rskNetworkTarget_ = uint256S(j["rskNetworkTarget"].str()); + feesForMiner_ = j["rskFeesForMiner"].str(); + rskdRpcAddress_ = j["rskdRpcAddress"].str(); + rskdRpcUserPwd_ = j["rskdRpcUserPwd"].str(); } const string merkleBranchStr = j["merkleBranch"].str(); const size_t merkleBranchCount = merkleBranchStr.length() / 64; merkleBranch_.resize(merkleBranchCount); for (size_t i = 0; i < merkleBranchCount; i++) { - merkleBranch_[i] = uint256S(merkleBranchStr.substr(i*64, 64)); + merkleBranch_[i] = uint256S(merkleBranchStr.substr(i * 64, 64)); } BitsToTarget(nBits_, networkTarget_); @@ -245,14 +255,15 @@ bool StratumJobBitcoin::unserializeFromJson(const char *s, size_t len) { return true; } -bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseInfo, - const CTxDestination &poolPayoutAddr, - const uint32_t blockVersion, - const string &nmcAuxBlockJson, - const RskWork &latestRskBlockJson, - const uint8_t serverId, - const bool isMergedMiningUpdate) -{ +bool StratumJobBitcoin::initFromGbt( + const char *gbt, + const string &poolCoinbaseInfo, + const CTxDestination &poolPayoutAddr, + const uint32_t blockVersion, + const string &nmcAuxBlockJson, + const RskWork &latestRskBlockJson, + const uint8_t serverId, + const bool isMergedMiningUpdate) { uint256 gbtHash = Hash(gbt, gbt + strlen(gbt)); JsonNode r; if (!JsonNode::parse(gbt, gbt + strlen(gbt), r)) { @@ -261,38 +272,41 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI } JsonNode jgbt = r["result"]; - // jobId: timestamp + gbtHash, we need to make sure jobId is unique in a some time - // jobId can convert to uint64_t - auto hash = reinterpret_cast(gbtHash.begin()); - jobId_ = (static_cast(time(nullptr)) << 32) | (hash->value() & 0xFFFFFF00) | serverId; + // jobId: timestamp + gbtHash, we need to make sure jobId is unique in a some + // time jobId can convert to uint64_t + auto hash = + reinterpret_cast(gbtHash.begin()); + jobId_ = (static_cast(time(nullptr)) << 32) | + (hash->value() & 0xFFFFFF00) | serverId; gbtHash_ = gbtHash.ToString(); // height etc. // fields in gbt json has already checked by GbtMaker prevHash_ = uint256S(jgbt["previousblockhash"].str()); - height_ = jgbt["height"].int32(); + height_ = jgbt["height"].int32(); if (blockVersion != 0) { nVersion_ = blockVersion; } else { nVersion_ = jgbt["version"].uint32(); } - nBits_ = jgbt["bits"].uint32_hex(); - nTime_ = jgbt["curtime"].uint32(); - minTime_ = jgbt["mintime"].uint32(); + nBits_ = jgbt["bits"].uint32_hex(); + nTime_ = jgbt["curtime"].uint32(); + minTime_ = jgbt["mintime"].uint32(); coinbaseValue_ = jgbt["coinbasevalue"].int64(); // default_witness_commitment must be at least 38 bytes if (jgbt["default_witness_commitment"].type() == Utilities::JS::type::Str && - jgbt["default_witness_commitment"].str().length() >= 38*2) { + jgbt["default_witness_commitment"].str().length() >= 38 * 2) { witnessCommitment_ = jgbt["default_witness_commitment"].str(); } #ifdef CHAIN_TYPE_UBTC // rootStateHash, optional - // default_root_state_hash must be at least 2 bytes (00f9, empty root state hash) + // default_root_state_hash must be at least 2 bytes (00f9, empty root state + // hash) if (jgbt["default_root_state_hash"].type() == Utilities::JS::type::Str && - jgbt["default_root_state_hash"].str().length() >= 2*2) { + jgbt["default_root_state_hash"].str().length() >= 2 * 2) { rootStateHash_ = jgbt["default_root_state_hash"].str(); } #endif @@ -312,23 +326,20 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI #ifdef CHAIN_TYPE_BCH bool isLightVersion = jgbt["job_id"].type() == Utilities::JS::type::Str; // merkle branch, merkleBranch_ could be empty - if(isLightVersion) - { - auto& gbtMerkle = jgbt["merkle"].array(); - for(auto& mHex : gbtMerkle) - { + if (isLightVersion) { + auto &gbtMerkle = jgbt["merkle"].array(); + for (auto &mHex : gbtMerkle) { uint256 m; m.SetHex(mHex.str().c_str()); merkleBranch_.push_back(m); } - } - else + } else #endif // merkle branch, merkleBranch_ could be empty { // read txs hash/data - vector vtxhashs; // txs without coinbase - for (JsonNode & node : jgbt["transactions"].array()) { + vector vtxhashs; // txs without coinbase + for (JsonNode &node : jgbt["transactions"].array()) { CMutableTransaction tx; DecodeHexTx(tx, node["data"].str()); vtxhashs.push_back(MakeTransactionRef(std::move(tx))->GetHash()); @@ -346,42 +357,45 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI if (!nmcAuxBlockJson.empty()) { do { JsonNode jNmcAux; - if (!JsonNode::parse(nmcAuxBlockJson.c_str(), - nmcAuxBlockJson.c_str() + nmcAuxBlockJson.length(), - jNmcAux)) { - LOG(ERROR) << "decode nmc auxblock json fail: >" << nmcAuxBlockJson << "<"; + if (!JsonNode::parse( + nmcAuxBlockJson.c_str(), + nmcAuxBlockJson.c_str() + nmcAuxBlockJson.length(), + jNmcAux)) { + LOG(ERROR) << "decode nmc auxblock json fail: >" << nmcAuxBlockJson + << "<"; break; } // check fields created_at_ts if (jNmcAux["created_at_ts"].type() != Utilities::JS::type::Int || - jNmcAux["hash"].type() != Utilities::JS::type::Str || - jNmcAux["merkle_size"].type() != Utilities::JS::type::Int || - jNmcAux["merkle_nonce"].type() != Utilities::JS::type::Int || - jNmcAux["height"].type() != Utilities::JS::type::Int || - jNmcAux["bits"].type() != Utilities::JS::type::Str || - jNmcAux["rpc_addr"].type() != Utilities::JS::type::Str || - jNmcAux["rpc_userpass"].type() != Utilities::JS::type::Str) { + jNmcAux["hash"].type() != Utilities::JS::type::Str || + jNmcAux["merkle_size"].type() != Utilities::JS::type::Int || + jNmcAux["merkle_nonce"].type() != Utilities::JS::type::Int || + jNmcAux["height"].type() != Utilities::JS::type::Int || + jNmcAux["bits"].type() != Utilities::JS::type::Str || + jNmcAux["rpc_addr"].type() != Utilities::JS::type::Str || + jNmcAux["rpc_userpass"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "nmc auxblock fields failure"; break; } // check timestamp if (jNmcAux["created_at_ts"].uint32() + 60u < time(nullptr)) { - LOG(ERROR) << "too old nmc auxblock: " << date("%F %T", jNmcAux["created_at_ts"].uint32()); + LOG(ERROR) << "too old nmc auxblock: " + << date("%F %T", jNmcAux["created_at_ts"].uint32()); break; } // set nmc aux info - nmcAuxBlockHash_ = uint256S(jNmcAux["hash"].str()); - nmcAuxMerkleSize_ = jNmcAux["merkle_size"].int32(); + nmcAuxBlockHash_ = uint256S(jNmcAux["hash"].str()); + nmcAuxMerkleSize_ = jNmcAux["merkle_size"].int32(); nmcAuxMerkleNonce_ = jNmcAux["merkle_nonce"].int32(); - nmcAuxBits_ = jNmcAux["bits"].uint32_hex(); - nmcHeight_ = jNmcAux["height"].int32(); - nmcRpcAddr_ = jNmcAux["rpc_addr"].str(); - nmcRpcUserpass_ = jNmcAux["rpc_userpass"].str(); + nmcAuxBits_ = jNmcAux["bits"].uint32_hex(); + nmcHeight_ = jNmcAux["height"].int32(); + nmcRpcAddr_ = jNmcAux["rpc_addr"].str(); + nmcRpcUserpass_ = jNmcAux["rpc_userpass"].str(); BitsToTarget(nmcAuxBits_, nmcNetworkTarget_); } while (0); } - + // // rsk merged mining // @@ -416,8 +430,8 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI cbIn.scriptSig << CScriptNum((uint32_t)time(nullptr)); // pool's info - cbIn.scriptSig.insert(cbIn.scriptSig.end(), - poolCoinbaseInfo.begin(), poolCoinbaseInfo.end()); + cbIn.scriptSig.insert( + cbIn.scriptSig.end(), poolCoinbaseInfo.begin(), poolCoinbaseInfo.end()); // // put namecoin merged mining info, 44 bytes @@ -425,41 +439,47 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI // if (nmcAuxBits_ != 0u) { string merkleSize, merkleNonce; - Bin2Hex((uint8_t *)&nmcAuxMerkleSize_, 4, merkleSize); + Bin2Hex((uint8_t *)&nmcAuxMerkleSize_, 4, merkleSize); Bin2Hex((uint8_t *)&nmcAuxMerkleNonce_, 4, merkleNonce); - string mergedMiningCoinbase = Strings::Format("%s%s%s%s", - // magic: 0xfa, 0xbe, 0x6d('m'), 0x6d('m') - "fabe6d6d", - // block_hash: Hash of the AuxPOW block header - nmcAuxBlockHash_.ToString().c_str(), - merkleSize.c_str(), // merkle_size : 1 - merkleNonce.c_str() // merkle_nonce: 0 - ); + string mergedMiningCoinbase = Strings::Format( + "%s%s%s%s", + // magic: 0xfa, 0xbe, 0x6d('m'), 0x6d('m') + "fabe6d6d", + // block_hash: Hash of the AuxPOW block header + nmcAuxBlockHash_.ToString().c_str(), + merkleSize.c_str(), // merkle_size : 1 + merkleNonce.c_str() // merkle_nonce: 0 + ); vector mergedMiningBin; Hex2Bin(mergedMiningCoinbase.c_str(), mergedMiningBin); - assert(mergedMiningBin.size() == (12+32)); - cbIn.scriptSig.insert(cbIn.scriptSig.end(), - mergedMiningBin.begin(), mergedMiningBin.end()); + assert(mergedMiningBin.size() == (12 + 32)); + cbIn.scriptSig.insert( + cbIn.scriptSig.end(), mergedMiningBin.begin(), mergedMiningBin.end()); } - #ifdef USER_DEFINED_COINBASE +#ifdef USER_DEFINED_COINBASE // reserved for user defined coinbase info string userCoinbaseInfoPadding; userCoinbaseInfoPadding.resize(USER_DEFINED_COINBASE_SIZE, '\x20'); - cbIn.scriptSig.insert(cbIn.scriptSig.end(), userCoinbaseInfoPadding.begin(), userCoinbaseInfoPadding.end()); - #endif + cbIn.scriptSig.insert( + cbIn.scriptSig.end(), + userCoinbaseInfoPadding.begin(), + userCoinbaseInfoPadding.end()); +#endif // placeHolder: extra nonce1 (4bytes) + extra nonce2 (8bytes) const vector placeHolder(4 + 8, 0xEE); // pub extra nonce place holder - cbIn.scriptSig.insert(cbIn.scriptSig.end(), placeHolder.begin(), placeHolder.end()); + cbIn.scriptSig.insert( + cbIn.scriptSig.end(), placeHolder.begin(), placeHolder.end()); // 100: coinbase script sig max len, range: (2, 100). // // bitcoind/src/main.cpp: CheckTransaction() // if (tx.IsCoinBase()) // { - // if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) + // if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > + // 100) // return state.DoS(100, false, REJECT_INVALID, "bad-cb-length"); // } // @@ -492,14 +512,15 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI Hex2Bin(witnessCommitment_.c_str(), binBuf); CTxOut witnessTxOut; - witnessTxOut.scriptPubKey = CScript((unsigned char*)binBuf.data(), - (unsigned char*)binBuf.data() + binBuf.size()); + witnessTxOut.scriptPubKey = CScript( + (unsigned char *)binBuf.data(), + (unsigned char *)binBuf.data() + binBuf.size()); witnessTxOut.nValue = AMOUNT_TYPE(0); cbOut.push_back(witnessTxOut); } - #ifdef CHAIN_TYPE_UBTC +#ifdef CHAIN_TYPE_UBTC // // output[2] (optional): root state hash of UB smart contract // @@ -509,20 +530,22 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI Hex2Bin(rootStateHash_.c_str(), binBuf); CTxOut rootStateTxOut; - rootStateTxOut.scriptPubKey = CScript((unsigned char*)binBuf.data(), - (unsigned char*)binBuf.data() + binBuf.size()); + rootStateTxOut.scriptPubKey = CScript( + (unsigned char *)binBuf.data(), + (unsigned char *)binBuf.data() + binBuf.size()); rootStateTxOut.nValue = 0; cbOut.push_back(rootStateTxOut); } - #endif +#endif // // output[3] (optional): RSK merge mining // if (latestRskBlockJson.isInitialized()) { DLOG(INFO) << "RSK blockhash: " << blockHashForMergedMining_; - string rskBlockTag = "\x52\x53\x4B\x42\x4C\x4F\x43\x4B\x3A"; // "RSKBLOCK:" + string rskBlockTag = + "\x52\x53\x4B\x42\x4C\x4F\x43\x4B\x3A"; // "RSKBLOCK:" vector rskTag(rskBlockTag.begin(), rskBlockTag.end()); vector binBuf; @@ -531,8 +554,9 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI rskTag.insert(std::end(rskTag), std::begin(binBuf), std::end(binBuf)); CTxOut rskTxOut; - rskTxOut.scriptPubKey = CScript((unsigned char*)rskTag.data(), - (unsigned char*)rskTag.data() + rskTag.size()); + rskTxOut.scriptPubKey = CScript( + (unsigned char *)rskTag.data(), + (unsigned char *)rskTag.data() + rskTag.size()); rskTxOut.nValue = AMOUNT_TYPE(0); cbOut.push_back(rskTxOut); @@ -546,24 +570,26 @@ bool StratumJobBitcoin::initFromGbt(const char *gbt, const string &poolCoinbaseI { CSerializeData sdata; CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); - ssTx << cbtx; // put coinbase CTransaction to CDataStream - ssTx.GetAndClear(sdata); // dump coinbase bin to coinbaseTpl + ssTx << cbtx; // put coinbase CTransaction to CDataStream + ssTx.GetAndClear(sdata); // dump coinbase bin to coinbaseTpl coinbaseTpl.insert(coinbaseTpl.end(), sdata.begin(), sdata.end()); } // check coinbase tx size if (coinbaseTpl.size() >= COINBASE_TX_MAX_SIZE) { LOG(FATAL) << "coinbase tx size " << coinbaseTpl.size() - << " is over than max " << COINBASE_TX_MAX_SIZE; + << " is over than max " << COINBASE_TX_MAX_SIZE; return false; } - const int64_t extraNonceStart = findExtraNonceStart(coinbaseTpl, placeHolder); + const int64_t extraNonceStart = + findExtraNonceStart(coinbaseTpl, placeHolder); coinbase1_ = HexStr(&coinbaseTpl[0], &coinbaseTpl[extraNonceStart]); - coinbase2_ = HexStr(&coinbaseTpl[extraNonceStart + placeHolder.size()], - &coinbaseTpl[coinbaseTpl.size()]); + coinbase2_ = HexStr( + &coinbaseTpl[extraNonceStart + placeHolder.size()], + &coinbaseTpl[coinbaseTpl.size()]); } - + return true; } diff --git a/src/bitcoin/StratumBitcoin.h b/src/bitcoin/StratumBitcoin.h index 5191b7012..cbe57b8d2 100644 --- a/src/bitcoin/StratumBitcoin.h +++ b/src/bitcoin/StratumBitcoin.h @@ -33,16 +33,15 @@ #include "script/standard.h" #include "bitcoin/bitcoin.pb.h" - // // max coinbase tx size, bytes -// Tips: currently there is only 1 input and 1, 2 or 3 output (reward, segwit and RSK outputs), +// Tips: currently there is only 1 input and 1, 2 or 3 output (reward, segwit +// and RSK outputs), // so 500 bytes may enough. -#define COINBASE_TX_MAX_SIZE 500 +#define COINBASE_TX_MAX_SIZE 500 ////////////////////////////////// FoundBlock ////////////////////////////////// -class FoundBlock -{ +class FoundBlock { public: uint64_t jobId_; int64_t workerId_; // found by who @@ -51,227 +50,235 @@ class FoundBlock uint8_t header80_[80]; char workerFullName_[40]; // . - FoundBlock() : jobId_(0), workerId_(0), userId_(0), height_(0) - { + FoundBlock() + : jobId_(0) + , workerId_(0) + , userId_(0) + , height_(0) { memset(header80_, 0, sizeof(header80_)); memset(workerFullName_, 0, sizeof(workerFullName_)); } }; - struct ShareBitcoinBytesVersion { - uint32_t version_ = 0; - uint32_t checkSum_ = 0; + uint32_t version_ = 0; + uint32_t checkSum_ = 0; - int64_t workerHashId_ = 0; - int32_t userId_ = 0; - int32_t status_ = 0; - int64_t timestamp_ = 0; - IpAddress ip_ = 0; + int64_t workerHashId_ = 0; + int32_t userId_ = 0; + int32_t status_ = 0; + int64_t timestamp_ = 0; + IpAddress ip_ = 0; - uint64_t jobId_ = 0; + uint64_t jobId_ = 0; uint64_t shareDiff_ = 0; - uint32_t blkBits_ = 0; - uint32_t height_ = 0; - uint32_t nonce_ = 0; + uint32_t blkBits_ = 0; + uint32_t height_ = 0; + uint32_t nonce_ = 0; uint32_t sessionId_ = 0; uint32_t checkSum() const { uint64_t c = 0; - c += (uint64_t) version_; - c += (uint64_t) workerHashId_; - c += (uint64_t) userId_; - c += (uint64_t) status_; - c += (uint64_t) timestamp_; - c += (uint64_t) ip_.addrUint64[0]; - c += (uint64_t) ip_.addrUint64[1]; - c += (uint64_t) jobId_; - c += (uint64_t) shareDiff_; - c += (uint64_t) blkBits_; - c += (uint64_t) height_; - c += (uint64_t) nonce_; - c += (uint64_t) sessionId_; - - return ((uint32_t) c) + ((uint32_t) (c >> 32)); + c += (uint64_t)version_; + c += (uint64_t)workerHashId_; + c += (uint64_t)userId_; + c += (uint64_t)status_; + c += (uint64_t)timestamp_; + c += (uint64_t)ip_.addrUint64[0]; + c += (uint64_t)ip_.addrUint64[1]; + c += (uint64_t)jobId_; + c += (uint64_t)shareDiff_; + c += (uint64_t)blkBits_; + c += (uint64_t)height_; + c += (uint64_t)nonce_; + c += (uint64_t)sessionId_; + + return ((uint32_t)c) + ((uint32_t)(c >> 32)); } }; -class ShareBitcoin : public sharebase::BitcoinMsg { +class ShareBitcoin : public sharebase::BitcoinMsg { public: - ShareBitcoin() { - set_version(CURRENT_VERSION); - set_workerhashid(0); - set_userid(0); - set_status(0); - set_timestamp(0); - set_ip("0.0.0.0"); - set_jobid(0); - set_sharediff(0); - set_blkbits(0); - set_height(0); - set_nonce(0); - set_sessionid(0); - } + ShareBitcoin() { + set_version(CURRENT_VERSION); + set_workerhashid(0); + set_userid(0); + set_status(0); + set_timestamp(0); + set_ip("0.0.0.0"); + set_jobid(0); + set_sharediff(0); + set_blkbits(0); + set_height(0); + set_nonce(0); + set_sessionid(0); + } - ShareBitcoin(const ShareBitcoin &r) = default; - ShareBitcoin&operator=(const ShareBitcoin &r) = default; + ShareBitcoin(const ShareBitcoin &r) = default; + ShareBitcoin &operator=(const ShareBitcoin &r) = default; - double score() const { + double score() const { - if (sharediff() == 0 || blkbits() == 0) - { - return 0.0; - } + if (sharediff() == 0 || blkbits() == 0) { + return 0.0; + } - double networkDifficulty = 1.0;//0.0; - BitsToDifficulty(blkbits(), &networkDifficulty); + double networkDifficulty = 1.0; // 0.0; + BitsToDifficulty(blkbits(), &networkDifficulty); - if (networkDifficulty < (double)sharediff()) - { - return 1.0; - } - - return (double)sharediff() / networkDifficulty; + if (networkDifficulty < (double)sharediff()) { + return 1.0; } + return (double)sharediff() / networkDifficulty; + } - bool isValid() const { - - if (version() != CURRENT_VERSION) { - DLOG(INFO) << "share version " << version(); - return false; - } + bool isValid() const { - if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || - height() == 0 || blkbits() == 0 || sharediff() == 0) - { - DLOG(INFO) << "share jobid : " << jobid() << "\n" - << "share userid : " << userid() << "\n" - << "share workerhashid : " << workerhashid() << "\n" - << "share height : " << height() << "\n" - << "share blkbits : " << blkbits() << "\n" - << "share sharediff : " << sharediff() << "\n" ; - return false; - } + if (version() != CURRENT_VERSION) { + DLOG(INFO) << "share version " << version(); + return false; + } - return true; + if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || height() == 0 || + blkbits() == 0 || sharediff() == 0) { + DLOG(INFO) << "share jobid : " << jobid() << "\n" + << "share userid : " << userid() << "\n" + << "share workerhashid : " << workerhashid() << "\n" + << "share height : " << height() << "\n" + << "share blkbits : " << blkbits() << "\n" + << "share sharediff : " << sharediff() << "\n"; + return false; } - std::string toString() const { + return true; + } - double networkDifficulty = 0.0; - BitsToDifficulty(blkbits(), &networkDifficulty); + std::string toString() const { + + double networkDifficulty = 0.0; + BitsToDifficulty(blkbits(), &networkDifficulty); + + return Strings::Format( + "share(jobId: %" PRIu64 + ", ip: %s, userId: %d, " + "workerId: %" PRId64 + ", time: %u/%s, height: %u, " + "blkBits: %08x/%lf, shareDiff: %" PRIu64 + ", " + "status: %d/%s)", + jobid(), + ip().c_str(), + userid(), + workerhashid(), + timestamp(), + date("%F %T", timestamp()).c_str(), + height(), + blkbits(), + networkDifficulty, + sharediff(), + status(), + StratumStatus::toString(status())); + } - return Strings::Format("share(jobId: %" PRIu64 ", ip: %s, userId: %d, " - "workerId: %" PRId64 ", time: %u/%s, height: %u, " - "blkBits: %08x/%lf, shareDiff: %" PRIu64 ", " - "status: %d/%s)", - jobid(), ip().c_str(), userid(), - workerhashid(), timestamp(), date("%F %T", timestamp()).c_str(), height(), - blkbits(), networkDifficulty, sharediff(), - status(), StratumStatus::toString(status())); + bool SerializeToBuffer(string &data, uint32_t &size) const { + size = ByteSize(); + data.resize(size); + if (!SerializeToArray((uint8_t *)data.data(), size)) { + DLOG(INFO) << "share SerializeToArray failed!"; + return false; } + return true; + } - bool SerializeToBuffer(string& data, uint32_t& size) const { - size = ByteSize(); - data.resize(size); - if (!SerializeToArray((uint8_t *)data.data(), size)) { - DLOG(INFO) << "share SerializeToArray failed!"; - return false; - } - return true; + bool UnserializeWithVersion(const uint8_t *data, uint32_t size) { + + if (nullptr == data || size <= 0) { + return false; } - bool UnserializeWithVersion(const uint8_t* data, uint32_t size){ + const uint8_t *payload = data; + uint32_t version = *((uint32_t *)payload); - if(nullptr == data || size <= 0) { + if (version == CURRENT_VERSION) { + if (!ParseFromArray( + (const uint8_t *)(payload + sizeof(uint32_t)), + size - sizeof(uint32_t))) { + DLOG(INFO) << "share ParseFromArray failed!"; return false; } + } else if ( + version == BYTES_VERSION && size == sizeof(ShareBitcoinBytesVersion)) { + + ShareBitcoinBytesVersion *share = (ShareBitcoinBytesVersion *)payload; - const uint8_t * payload = data; - uint32_t version = *((uint32_t*)payload); - - if (version == CURRENT_VERSION) { - if (!ParseFromArray((const uint8_t *)(payload + sizeof(uint32_t)), size - sizeof(uint32_t))) { - DLOG(INFO) << "share ParseFromArray failed!"; - return false; - } - } else if (version == BYTES_VERSION && size == sizeof(ShareBitcoinBytesVersion)){ - - ShareBitcoinBytesVersion* share = (ShareBitcoinBytesVersion*) payload; - - if (share->checkSum() != share->checkSum_) { - DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_<< ", checkSum(): " << share->checkSum(); - return false; - } - - set_version(CURRENT_VERSION); - set_workerhashid(share->workerHashId_); - set_userid(share->userId_); - set_status(share->status_); - set_timestamp(share->timestamp_); - set_ip(share->ip_.toString()); - set_jobid(share->jobId_); - set_sharediff(share->shareDiff_); - set_blkbits(share->blkBits_); - set_height(share->height_); - set_nonce(share->nonce_); - set_sessionid(share->sessionId_); - } else { - DLOG(INFO) << "unknow share received!"; + if (share->checkSum() != share->checkSum_) { + DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_ + << ", checkSum(): " << share->checkSum(); return false; } - return true; + set_version(CURRENT_VERSION); + set_workerhashid(share->workerHashId_); + set_userid(share->userId_); + set_status(share->status_); + set_timestamp(share->timestamp_); + set_ip(share->ip_.toString()); + set_jobid(share->jobId_); + set_sharediff(share->shareDiff_); + set_blkbits(share->blkBits_); + set_height(share->height_); + set_nonce(share->nonce_); + set_sessionid(share->sessionId_); + } else { + DLOG(INFO) << "unknow share received!"; + return false; } - bool SerializeToArrayWithVersion(string& data, uint32_t& size) const { - size = ByteSize(); - data.resize(size + sizeof(uint32_t)); + return true; + } - uint8_t * payload = (uint8_t *)data.data(); - *((uint32_t*)payload) = version(); + bool SerializeToArrayWithVersion(string &data, uint32_t &size) const { + size = ByteSize(); + data.resize(size + sizeof(uint32_t)); - if (!SerializeToArray(payload + sizeof(uint32_t), size)) { - DLOG(INFO) << "SerializeToArray failed!"; - return false; - } + uint8_t *payload = (uint8_t *)data.data(); + *((uint32_t *)payload) = version(); - size += sizeof(uint32_t); - return true; + if (!SerializeToArray(payload + sizeof(uint32_t), size)) { + DLOG(INFO) << "SerializeToArray failed!"; + return false; } - bool SerializeToArrayWithLength(string& data, uint32_t& size) const { - size = ByteSize(); - data.resize(size + sizeof(uint32_t)); + size += sizeof(uint32_t); + return true; + } - *((uint32_t*)data.data()) = size; - uint8_t * payload = (uint8_t *)data.data(); + bool SerializeToArrayWithLength(string &data, uint32_t &size) const { + size = ByteSize(); + data.resize(size + sizeof(uint32_t)); - if (!SerializeToArray(payload + sizeof(uint32_t), size)) { - DLOG(INFO) << "SerializeToArray failed!"; - return false; - } + *((uint32_t *)data.data()) = size; + uint8_t *payload = (uint8_t *)data.data(); - size += sizeof(uint32_t); - return true; + if (!SerializeToArray(payload + sizeof(uint32_t), size)) { + DLOG(INFO) << "SerializeToArray failed!"; + return false; } - size_t getsharelength() { - return IsInitialized() ? ByteSize() : 0; - } + size += sizeof(uint32_t); + return true; + } + size_t getsharelength() { return IsInitialized() ? ByteSize() : 0; } public: - const static uint32_t BYTES_VERSION = 0x00010003u; const static uint32_t CURRENT_VERSION = 0x00010004u; }; - -class StratumJobBitcoin : public StratumJob -{ +class StratumJobBitcoin : public StratumJob { public: string gbtHash_; // gbt hash id uint256 prevHash_; @@ -290,7 +297,7 @@ class StratumJobBitcoin : public StratumJob string witnessCommitment_; #ifdef CHAIN_TYPE_UBTC // if UB smart contract is not active, it will be empty - string rootStateHash_; + string rootStateHash_; #endif uint256 networkTarget_; @@ -315,17 +322,18 @@ class StratumJobBitcoin : public StratumJob public: StratumJobBitcoin(); - bool initFromGbt( const char *gbt, const string &poolCoinbaseInfo, - const CTxDestination &poolPayoutAddr, - const uint32_t blockVersion, - const string &nmcAuxBlockJson, - const RskWork &latestRskBlockJson, - const uint8_t serverId, - const bool isMergedMiningUpdate); + bool initFromGbt( + const char *gbt, + const string &poolCoinbaseInfo, + const CTxDestination &poolPayoutAddr, + const uint32_t blockVersion, + const string &nmcAuxBlockJson, + const RskWork &latestRskBlockJson, + const uint8_t serverId, + const bool isMergedMiningUpdate); string serializeToJson() const override; bool unserializeFromJson(const char *s, size_t len) override; bool isEmptyBlock(); - }; class ServerBitcoin; @@ -337,8 +345,12 @@ struct StratumTraitsBitcoin { using JobDiffType = uint64_t; struct LocalJobType : public LocalJob { LocalJobType(uint64_t jobId, uint8_t shortJobId, uint32_t blkBits) - : LocalJob(jobId), shortJobId_(shortJobId), blkBits_(blkBits) {} - bool operator==(uint8_t shortJobId) const { return shortJobId_ == shortJobId; } + : LocalJob(jobId) + , shortJobId_(shortJobId) + , blkBits_(blkBits) {} + bool operator==(uint8_t shortJobId) const { + return shortJobId_ == shortJobId; + } uint8_t shortJobId_; uint32_t blkBits_; }; diff --git a/src/bitcoin/StratumMinerBitcoin.cc b/src/bitcoin/StratumMinerBitcoin.cc index 2873aea87..8b43bd772 100644 --- a/src/bitcoin/StratumMinerBitcoin.cc +++ b/src/bitcoin/StratumMinerBitcoin.cc @@ -30,31 +30,34 @@ #include #include -///////////////////////////////// StratumMinerBitcoin //////////////////////////////// -StratumMinerBitcoin::StratumMinerBitcoin(StratumSessionBitcoin &session, - const DiffController &diffController, - const string &clientAgent, - const string &workerName, - int64_t workerId) - : StratumMinerBase(session, diffController, clientAgent, workerName, workerId) { +///////////////////////////////// StratumMinerBitcoin +/////////////////////////////////// +StratumMinerBitcoin::StratumMinerBitcoin( + StratumSessionBitcoin &session, + const DiffController &diffController, + const string &clientAgent, + const string &workerName, + int64_t workerId) + : StratumMinerBase( + session, diffController, clientAgent, workerName, workerId) { } -void StratumMinerBitcoin::handleRequest(const string &idStr, - const string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMinerBitcoin::handleRequest( + const string &idStr, + const string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "mining.submit") { handleRequest_Submit(idStr, jparams); - } else if (method == "mining.suggest_target") { - handleRequest_SuggestTarget(idStr, jparams); } } void StratumMinerBitcoin::handleExMessage(const std::string &exMessage) { // - // SUBMIT_SHARE | SUBMIT_SHARE_WITH_TIME | SUBMIT_SHARE_WITH_VER | SUBMIT_SHARE_WITH_TIME_VER: - // | magic_number(1) | cmd(1) | len (2) | jobId (uint8_t) | session_id (uint16_t) | - // | extra_nonce2 (uint32_t) | nNonce (uint32_t) | [nTime (uint32_t) ] | [nVersionMask (uint32_t)] | + // SUBMIT_SHARE | SUBMIT_SHARE_WITH_TIME | SUBMIT_SHARE_WITH_VER | + // SUBMIT_SHARE_WITH_TIME_VER: | magic_number(1) | cmd(1) | len (2) | jobId + // (uint8_t) | session_id (uint16_t) | | extra_nonce2 (uint32_t) | nNonce + // (uint32_t) | [nTime (uint32_t) ] | [nVersionMask (uint32_t)] | // auto command = static_cast(exMessage[1]); if (command == StratumCommandEx::SUBMIT_SHARE) { @@ -68,13 +71,15 @@ void StratumMinerBitcoin::handleExMessage(const std::string &exMessage) { } } -void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, const JsonNode &jparams) { +void StratumMinerBitcoin::handleRequest_Submit( + const string &idStr, const JsonNode &jparams) { auto &session = getSession(); if (session.getState() != StratumSession::AUTHENTICATED) { session.responseError(idStr, StratumStatus::UNAUTHORIZED); // there must be something wrong, send reconnect command - const string s = "{\"id\":null,\"method\":\"client.reconnect\",\"params\":[]}\n"; + const string s = + "{\"id\":null,\"method\":\"client.reconnect\",\"params\":[]}\n"; session.sendData(s); return; } @@ -92,9 +97,10 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, const JsonNo uint8_t shortJobId; if (isNiceHashClient_) { - shortJobId = (uint8_t) (jparams.children()->at(1).uint64() % 10); + shortJobId = (uint8_t)( + jparams.children()->at(1).uint64() % session.maxNumLocalJobs()); } else { - shortJobId = (uint8_t) jparams.children()->at(1).uint32(); + shortJobId = (uint8_t)jparams.children()->at(1).uint32(); } const uint64_t extraNonce2 = jparams.children()->at(2).uint64_hex(); uint32_t nTime = jparams.children()->at(3).uint32_hex(); @@ -105,30 +111,19 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, const JsonNo versionMask = jparams.children()->at(5).uint32_hex(); } - handleRequest_Submit(idStr, shortJobId, extraNonce2, nonce, nTime, versionMask); + handleRequest_Submit( + idStr, shortJobId, extraNonce2, nonce, nTime, versionMask); } -void StratumMinerBitcoin::handleRequest_SuggestTarget(const string &idStr, - const JsonNode &jparams) { - auto &session = getSession(); - if (session.getState() != StratumSession::CONNECTED) { - return; // suggest should be call before subscribe - } - - if (jparams.children()->size() == 0) { - session.responseError(idStr, StratumStatus::ILLEGAL_PARARMS); - return; - } - resetCurDiff(formatDifficulty(TargetToDiff(jparams.children()->at(0).str()))); -} - -void StratumMinerBitcoin::handleExMessage_SubmitShare(const std::string &exMessage, - const bool isWithTime, - const bool isWithVersion) { +void StratumMinerBitcoin::handleExMessage_SubmitShare( + const std::string &exMessage, + const bool isWithTime, + const bool isWithVersion) { // - // SUBMIT_SHARE | SUBMIT_SHARE_WITH_TIME | SUBMIT_SHARE_WITH_VER | SUBMIT_SHARE_WITH_TIME_VER: - // | magic_number(1) | cmd(1) | len (2) | jobId (uint8_t) | session_id (uint16_t) | - // | extra_nonce2 (uint32_t) | nNonce (uint32_t) | [nTime (uint32_t) ] | [nVersionMask (uint32_t)] | + // SUBMIT_SHARE | SUBMIT_SHARE_WITH_TIME | SUBMIT_SHARE_WITH_VER | + // SUBMIT_SHARE_WITH_TIME_VER: | magic_number(1) | cmd(1) | len (2) | jobId + // (uint8_t) | session_id (uint16_t) | | extra_nonce2 (uint32_t) | nNonce + // (uint32_t) | [nTime (uint32_t) ] | [nVersionMask (uint32_t)] | // size_t msgSize = 15; if (isWithTime) { @@ -142,33 +137,42 @@ void StratumMinerBitcoin::handleExMessage_SubmitShare(const std::string &exMessa } const uint8_t *p = (uint8_t *)exMessage.data(); - const uint8_t shortJobId = *(uint8_t *)(p + 4); - const uint16_t sessionId = *(uint16_t *)(p + 5); + const uint8_t shortJobId = *(uint8_t *)(p + 4); + const uint16_t sessionId = *(uint16_t *)(p + 5); if (sessionId > StratumMessageEx::AGENT_MAX_SESSION_ID) { return; } - const uint32_t exNonce2 = *(uint32_t *)(p + 7); - const uint32_t nonce = *(uint32_t *)(p + 11); - const uint32_t timestamp = (isWithTime == false ? 0 : *(uint32_t *)(p + 15)); - const uint32_t versionMask = (isWithVersion == false ? 0 : *(uint32_t *)(p + msgSize - 4)); + const uint32_t exNonce2 = *(uint32_t *)(p + 7); + const uint32_t nonce = *(uint32_t *)(p + 11); + const uint32_t timestamp = (isWithTime == false ? 0 : *(uint32_t *)(p + 15)); + const uint32_t versionMask = + (isWithVersion == false ? 0 : *(uint32_t *)(p + msgSize - 4)); - const uint64_t fullExtraNonce2 = ((uint64_t)sessionId << 32) | (uint64_t)exNonce2; + const uint64_t fullExtraNonce2 = + ((uint64_t)sessionId << 32) | (uint64_t)exNonce2; // debug - DLOG(INFO) << Strings::Format("[agent] shortJobId: %02x, sessionId: %08x, " - "exNonce2: %016llx, nonce: %08x, time: %08x, versionMask: %08x", - shortJobId, (uint32_t)sessionId, - fullExtraNonce2, nonce, timestamp, versionMask); - - handleRequest_Submit("null", shortJobId, fullExtraNonce2, nonce, timestamp, versionMask); + DLOG(INFO) << Strings::Format( + "[agent] shortJobId: %02x, sessionId: %08x, " + "exNonce2: %016llx, nonce: %08x, time: %08x, versionMask: %08x", + shortJobId, + (uint32_t)sessionId, + fullExtraNonce2, + nonce, + timestamp, + versionMask); + + handleRequest_Submit( + "null", shortJobId, fullExtraNonce2, nonce, timestamp, versionMask); } -void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, - uint8_t shortJobId, - uint64_t extraNonce2, - uint32_t nonce, - uint32_t nTime, - uint32_t versionMask) { +void StratumMinerBitcoin::handleRequest_Submit( + const string &idStr, + uint8_t shortJobId, + uint64_t extraNonce2, + uint32_t nonce, + uint32_t nTime, + uint32_t versionMask) { auto &session = getSession(); auto &server = session.getServer(); auto &worker = session.getWorker(); @@ -182,11 +186,12 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, // if can't find localJob, could do nothing handleShare(idStr, StratumStatus::JOB_NOT_FOUND, 0); - LOG(INFO) << "rejected share: " << StratumStatus::toString(StratumStatus::JOB_NOT_FOUND) + LOG(INFO) << "rejected share: " + << StratumStatus::toString(StratumStatus::JOB_NOT_FOUND) << ", worker: " << worker.fullName_ << ", versionMask: " << Strings::Format("%08x", versionMask) - << ", Share(id: " << idStr << ", shortJobId: " - << (int) shortJobId << ", nTime: " << nTime << "/" << date("%F %T", nTime) << ")"; + << ", Share(id: " << idStr << ", shortJobId: " << (int)shortJobId + << ", nTime: " << nTime << "/" << date("%F %T", nTime) << ")"; return; } @@ -197,7 +202,8 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, if (exjob.get() != NULL) { // 0 means miner use stratum job's default block time - auto sjobBitcoin = std::static_pointer_cast(exjob->sjob_); + auto sjobBitcoin = + std::static_pointer_cast(exjob->sjob_); if (nTime == 0) { nTime = sjobBitcoin->nTime_; } @@ -218,7 +224,7 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, share.set_userid(worker.userId_); share.set_sharediff(iter->second); share.set_blkbits(localJob->blkBits_); - share.set_timestamp((uint64_t) time(nullptr)); + share.set_timestamp((uint64_t)time(nullptr)); share.set_height(height); share.set_nonce(nonce); share.set_versionmask(versionMask); @@ -242,17 +248,29 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, if (!localJob->addLocalShare(localShare)) { share.set_status(StratumStatus::DUPLICATE_SHARE); } else { -#ifdef USER_DEFINED_COINBASE +#ifdef USER_DEFINED_COINBASE // check block header - share.set_status(server->checkShare(share, session.getSessionId(), extraNonce2Hex, - nTime, nonce, versionMask, jobTarget, - worker_.fullName_, - &localJob->userCoinbaseInfo_)); + share.set_status(server->checkShare( + share, + session.getSessionId(), + extraNonce2Hex, + nTime, + nonce, + versionMask, + jobTarget, + worker_.fullName_, + &localJob->userCoinbaseInfo_)); #else // check block header - share.set_status(server.checkShare(share, session.getSessionId(), extraNonce2Hex, - nTime, nonce, versionMask, jobTarget, - worker.fullName_)); + share.set_status(server.checkShare( + share, + session.getSessionId(), + extraNonce2Hex, + nTime, + nonce, + versionMask, + jobTarget, + worker.fullName_)); #endif } @@ -260,24 +278,26 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, if (!handleShare(idStr, share.status(), share.sharediff())) { // add invalid share to counter - invalidSharesCounter_.insert((int64_t) time(nullptr), 1); + invalidSharesCounter_.insert((int64_t)time(nullptr), 1); - // log all rejected share to answer "Why the rejection rate of my miner increased?" + // log all rejected share to answer "Why the rejection rate of my miner + // increased?" LOG(INFO) << "rejected share: " << StratumStatus::toString(share.status()) << ", worker: " << worker.fullName_ << ", versionMask: " << Strings::Format("%08x", versionMask) << ", " << share.toString(); // check if thers is invalid share spamming - int64_t invalidSharesNum = invalidSharesCounter_.sum(time(nullptr), - INVALID_SHARE_SLIDING_WINDOWS_SIZE); + int64_t invalidSharesNum = invalidSharesCounter_.sum( + time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE); // too much invalid shares, don't send them to kafka if (invalidSharesNum >= INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT) { isSendShareToKafka = false; - LOG(INFO) << "invalid share spamming, diff: " - << share.sharediff() << ", worker: " << worker.fullName_ << ", agent: " - << clientAgent_ << ", ip: " << session.getClientIp(); + LOG(INFO) << "invalid share spamming, diff: " << share.sharediff() + << ", worker: " << worker.fullName_ + << ", agent: " << clientAgent_ + << ", ip: " << session.getClientIp(); } } @@ -286,10 +306,10 @@ void StratumMinerBitcoin::handleRequest_Submit(const string &idStr, std::string message; uint32_t size = 0; if (!share.SerializeToArrayWithVersion(message, size)) { - LOG(ERROR) << "share SerializeToBuffer failed!"<< share.toString(); + LOG(ERROR) << "share SerializeToBuffer failed!" << share.toString(); return; } - server.sendShare2Kafka((const uint8_t *) message.data(), size); + server.sendShare2Kafka((const uint8_t *)message.data(), size); } } diff --git a/src/bitcoin/StratumMinerBitcoin.h b/src/bitcoin/StratumMinerBitcoin.h index aa2a2a2c3..60cde721a 100644 --- a/src/bitcoin/StratumMinerBitcoin.h +++ b/src/bitcoin/StratumMinerBitcoin.h @@ -30,30 +30,33 @@ class StratumMinerBitcoin : public StratumMinerBase { public: - StratumMinerBitcoin(StratumSessionBitcoin &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId); - - void handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) override; + StratumMinerBitcoin( + StratumSessionBitcoin &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId); + + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; void handleExMessage(const std::string &exMessage) override; private: void handleRequest_Submit(const std::string &idStr, const JsonNode &jparams); - void handleRequest_SuggestTarget(const std::string &idStr, const JsonNode &jparams); - void handleExMessage_SubmitShare(const std::string &exMessage, - const bool isWithTime, - const bool isWithVersion); - void handleRequest_Submit(const std::string &idStr, - uint8_t shortJobId, - uint64_t extraNonce2, - uint32_t nonce, - uint32_t nTime, - uint32_t versionMask); + void handleExMessage_SubmitShare( + const std::string &exMessage, + const bool isWithTime, + const bool isWithVersion); + void handleRequest_Submit( + const std::string &idStr, + uint8_t shortJobId, + uint64_t extraNonce2, + uint32_t nonce, + uint32_t nTime, + uint32_t versionMask); }; #endif // #ifndef STRATUM_MINER_BITCOIN_H_ diff --git a/src/bitcoin/StratumServerBitcoin.cc b/src/bitcoin/StratumServerBitcoin.cc index 21e75c8bb..0d608e0df 100644 --- a/src/bitcoin/StratumServerBitcoin.cc +++ b/src/bitcoin/StratumServerBitcoin.cc @@ -35,33 +35,34 @@ using namespace std; -//////////////////////////////////// JobRepositoryBitcoin ///////////////////////////////// -JobRepositoryBitcoin::JobRepositoryBitcoin(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerBitcoin *server) - : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) -{ - +//////////////////////////////////// JobRepositoryBitcoin +//////////////////////////////////// +JobRepositoryBitcoin::JobRepositoryBitcoin( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerBitcoin *server) + : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) { } -JobRepositoryBitcoin::~JobRepositoryBitcoin() -{ - +JobRepositoryBitcoin::~JobRepositoryBitcoin() { } shared_ptr JobRepositoryBitcoin::createStratumJob() { return std::make_shared(); } -shared_ptr JobRepositoryBitcoin::createStratumJobEx(shared_ptr sjob, bool isClean) -{ +shared_ptr JobRepositoryBitcoin::createStratumJobEx( + shared_ptr sjob, bool isClean) { return std::make_shared(sjob, isClean); } - -void JobRepositoryBitcoin::broadcastStratumJob(shared_ptr sjobBase) { +void JobRepositoryBitcoin::broadcastStratumJob( + shared_ptr sjobBase) { auto sjob = std::static_pointer_cast(sjobBase); - if(!sjob) - { - LOG(FATAL) << "JobRepositoryBitcoin::broadcastStratumJob error: cast StratumJobBitcoin failed"; + if (!sjob) { + LOG(FATAL) << "JobRepositoryBitcoin::broadcastStratumJob error: cast " + "StratumJobBitcoin failed"; return; } bool isClean = false; @@ -69,37 +70,36 @@ void JobRepositoryBitcoin::broadcastStratumJob(shared_ptr sjobBase) isClean = true; latestPrevBlockHash_ = sjob->prevHash_; LOG(INFO) << "received new height stratum job, height: " << sjob->height_ - << ", prevhash: " << sjob->prevHash_.ToString(); + << ", prevhash: " << sjob->prevHash_.ToString(); } bool isMergedMiningClean = sjob->isMergedMiningCleanJob_; - // - // The `clean_jobs` field should be `true` ONLY IF a new block found in Bitcoin blockchains. - // Most miner implements will never submit their previous shares if the field is `true`. - // There will be a huge loss of hashrates and earnings if the field is often `true`. - // - // There is the definition from : - // - // clean_jobs - When true, server indicates that submitting shares from previous jobs - // don't have a sense and such shares will be rejected. When this flag is set, - // miner should also drop all previous jobs. - // + // + // The `clean_jobs` field should be `true` ONLY IF a new block found in + // Bitcoin blockchains. Most miner implements will never submit their previous + // shares if the field is `true`. There will be a huge loss of hashrates and + // earnings if the field is often `true`. + // + // There is the definition from + // : + // + // clean_jobs - When true, server indicates that submitting shares from + // previous jobs don't have a sense and such shares will be rejected. When + // this flag is set, miner should also drop all previous jobs. + // shared_ptr exJob(createStratumJobEx(sjob, isClean)); - { - ScopeLock sl(lock_); - if (isClean) { - // mark all jobs as stale, should do this before insert new job - for (auto it : exJobs_) { - it.second->markStale(); - } + if (isClean) { + // mark all jobs as stale, should do this before insert new job + for (auto it : exJobs_) { + it.second->markStale(); } - - // insert new job - exJobs_[sjob->jobId_] = exJob; } + // insert new job + exJobs_[sjob->jobId_] = exJob; + // if job has clean flag, call server to send job if (isClean || isMergedMiningClean) { sendMiningNotify(exJob); @@ -117,21 +117,19 @@ void JobRepositoryBitcoin::broadcastStratumJob(shared_ptr sjobBase) auto sjob1 = std::static_pointer_cast(exJob1->sjob_); auto sjob2 = std::static_pointer_cast(exJob2->sjob_); - if (exJob2->isClean_ == true && - sjob2->merkleBranch_.size() == 0 && + if (exJob2->isClean_ == true && sjob2->merkleBranch_.size() == 0 && sjob1->merkleBranch_.size() != 0) { sendMiningNotify(exJob); } } } -StratumJobExBitcoin::StratumJobExBitcoin(shared_ptr sjob, bool isClean) - : StratumJobEx(sjob, isClean) -{ +StratumJobExBitcoin::StratumJobExBitcoin( + shared_ptr sjob, bool isClean) + : StratumJobEx(sjob, isClean) { init(); } - void StratumJobExBitcoin::init() { auto sjob = std::static_pointer_cast(sjob_); string merkleBranchStr; @@ -147,55 +145,67 @@ void StratumJobExBitcoin::init() { merkleBranchStr.append("\"" + merklStr + "\","); } if (merkleBranchStr.length()) { - merkleBranchStr.resize(merkleBranchStr.length() - 1); // remove last ',' + merkleBranchStr.resize(merkleBranchStr.length() - 1); // remove last ',' } } // we don't put jobId here, session will fill with the shortJobId miningNotify1_ = "{\"id\":null,\"method\":\"mining.notify\",\"params\":[\""; - miningNotify2_ = Strings::Format("\",\"%s\",\"", - sjob->prevHashBeStr_.c_str()); + miningNotify2_ = + Strings::Format("\",\"%s\",\"", sjob->prevHashBeStr_.c_str()); // coinbase1_ may be modified when USER_DEFINED_COINBASE enabled, // so put it into a single variable. coinbase1_ = sjob->coinbase1_.c_str(); - miningNotify3_ = Strings::Format("\",\"%s\"" - ",[%s]" - ",\"%08x\",\"%08x\",\"%08x\",%s" - "]}\n", - sjob->coinbase2_.c_str(), - merkleBranchStr.c_str(), - sjob->nVersion_, sjob->nBits_, sjob->nTime_, - isClean_ ? "true" : "false"); + miningNotify3_ = Strings::Format( + "\",\"%s\"" + ",[%s]" + ",\"%08x\",\"%08x\",\"%08x\",%s" + "]}\n", + sjob->coinbase2_.c_str(), + merkleBranchStr.c_str(), + sjob->nVersion_, + sjob->nBits_, + sjob->nTime_, + isClean_ ? "true" : "false"); // always set clean to true, reset of them is the same with miningNotify2_ - miningNotify3Clean_ = Strings::Format("\",\"%s\"" - ",[%s]" - ",\"%08x\",\"%08x\",\"%08x\",true" - "]}\n", - sjob->coinbase2_.c_str(), - merkleBranchStr.c_str(), - sjob->nVersion_, sjob->nBits_, sjob->nTime_); - + miningNotify3Clean_ = Strings::Format( + "\",\"%s\"" + ",[%s]" + ",\"%08x\",\"%08x\",\"%08x\",true" + "]}\n", + sjob->coinbase2_.c_str(), + merkleBranchStr.c_str(), + sjob->nVersion_, + sjob->nBits_, + sjob->nTime_); } - -void StratumJobExBitcoin::generateCoinbaseTx(std::vector *coinbaseBin, - const uint32_t extraNonce1, - const string &extraNonce2Hex, - string *userCoinbaseInfo) { +void StratumJobExBitcoin::generateCoinbaseTx( + std::vector *coinbaseBin, + const uint32_t extraNonce1, + const string &extraNonce2Hex, + string *userCoinbaseInfo) { string coinbaseHex; - const string extraNonceStr = Strings::Format("%08x%s", extraNonce1, extraNonce2Hex.c_str()); + const string extraNonceStr = + Strings::Format("%08x%s", extraNonce1, extraNonce2Hex.c_str()); auto sjob = std::static_pointer_cast(sjob_); string coinbase1 = sjob->coinbase1_; #ifdef USER_DEFINED_COINBASE if (userCoinbaseInfo != nullptr) { string userCoinbaseHex; - Bin2Hex((uint8*)(*userCoinbaseInfo).c_str(), (*userCoinbaseInfo).size(), userCoinbaseHex); + Bin2Hex( + (uint8 *)(*userCoinbaseInfo).c_str(), + (*userCoinbaseInfo).size(), + userCoinbaseHex); // replace the last `userCoinbaseHex.size()` bytes to `userCoinbaseHex` - coinbase1.replace(coinbase1.size()-userCoinbaseHex.size(), userCoinbaseHex.size(), userCoinbaseHex); + coinbase1.replace( + coinbase1.size() - userCoinbaseHex.size(), + userCoinbaseHex.size(), + userCoinbaseHex); } #endif @@ -205,53 +215,57 @@ void StratumJobExBitcoin::generateCoinbaseTx(std::vector *coinbaseBin, Hex2Bin((const char *)coinbaseHex.c_str(), *coinbaseBin); } -void StratumJobExBitcoin::generateBlockHeader(CBlockHeader *header, - std::vector *coinbaseBin, - const uint32_t extraNonce1, - const string &extraNonce2Hex, - const vector &merkleBranch, - const uint256 &hashPrevBlock, - const uint32_t nBits, const int32_t nVersion, - const uint32_t nTime, const uint32_t nonce, - const uint32_t versionMask, - string *userCoinbaseInfo) { - generateCoinbaseTx(coinbaseBin, extraNonce1, extraNonce2Hex, userCoinbaseInfo); +void StratumJobExBitcoin::generateBlockHeader( + CBlockHeader *header, + std::vector *coinbaseBin, + const uint32_t extraNonce1, + const string &extraNonce2Hex, + const vector &merkleBranch, + const uint256 &hashPrevBlock, + const uint32_t nBits, + const int32_t nVersion, + const uint32_t nTime, + const uint32_t nonce, + const uint32_t versionMask, + string *userCoinbaseInfo) { + generateCoinbaseTx( + coinbaseBin, extraNonce1, extraNonce2Hex, userCoinbaseInfo); header->hashPrevBlock = hashPrevBlock; - header->nVersion = (nVersion ^ versionMask); - header->nBits = nBits; - header->nTime = nTime; - header->nNonce = nonce; + header->nVersion = (nVersion ^ versionMask); + header->nBits = nBits; + header->nTime = nTime; + header->nNonce = nonce; // hashMerkleRoot header->hashMerkleRoot = Hash(coinbaseBin->begin(), coinbaseBin->end()); - for (const uint256 & step : merkleBranch) { - header->hashMerkleRoot = Hash(BEGIN(header->hashMerkleRoot), - END (header->hashMerkleRoot), - BEGIN(step), - END (step)); + for (const uint256 &step : merkleBranch) { + header->hashMerkleRoot = Hash( + BEGIN(header->hashMerkleRoot), + END(header->hashMerkleRoot), + BEGIN(step), + END(step)); } } ////////////////////////////////// ServerBitcoin /////////////////////////////// -ServerBitcoin::ServerBitcoin(const int32_t shareAvgSeconds, const libconfig::Config &config) +ServerBitcoin::ServerBitcoin( + const int32_t shareAvgSeconds, const libconfig::Config &config) : ServerBase(shareAvgSeconds) , kafkaProducerNamecoinSolvedShare_(nullptr) - , kafkaProducerRskSolvedShare_(nullptr) -{ + , kafkaProducerRskSolvedShare_(nullptr) { // TODO: Shall we throw an error here if the relvant value does not exist? - config.lookupValue("sserver.auxpow_solved_share_topic", auxPowSolvedShareTopic_); + config.lookupValue( + "sserver.auxpow_solved_share_topic", auxPowSolvedShareTopic_); config.lookupValue("sserver.rsk_solved_share_topic", rskSolvedShareTopic_); - versionMask_ = 0u; // block version mask - if (config.exists("sserver.version_mask")) - { + versionMask_ = 0u; // block version mask + if (config.exists("sserver.version_mask")) { config.lookupValue("sserver.version_mask", versionMask_); } } -ServerBitcoin::~ServerBitcoin() -{ +ServerBitcoin::~ServerBitcoin() { if (kafkaProducerNamecoinSolvedShare_ != nullptr) { delete kafkaProducerNamecoinSolvedShare_; } @@ -264,14 +278,15 @@ uint32_t ServerBitcoin::getVersionMask() const { return versionMask_; } -bool ServerBitcoin::setupInternal(StratumServer* sserver) -{ - kafkaProducerNamecoinSolvedShare_ = new KafkaProducer(sserver->kafkaBrokers_.c_str(), - auxPowSolvedShareTopic_.c_str(), - RD_KAFKA_PARTITION_UA); - kafkaProducerRskSolvedShare_ = new KafkaProducer(sserver->kafkaBrokers_.c_str(), - rskSolvedShareTopic_.c_str(), - RD_KAFKA_PARTITION_UA); +bool ServerBitcoin::setupInternal(StratumServer *sserver) { + kafkaProducerNamecoinSolvedShare_ = new KafkaProducer( + sserver->kafkaBrokers_.c_str(), + auxPowSolvedShareTopic_.c_str(), + RD_KAFKA_PARTITION_UA); + kafkaProducerRskSolvedShare_ = new KafkaProducer( + sserver->kafkaBrokers_.c_str(), + rskSolvedShareTopic_.c_str(), + RD_KAFKA_PARTITION_UA); // kafkaProducerNamecoinSolvedShare_ { @@ -303,24 +318,25 @@ bool ServerBitcoin::setupInternal(StratumServer* sserver) } } - return true; } -JobRepository *ServerBitcoin::createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) -{ - return new JobRepositoryBitcoin(kafkaBrokers, consumerTopic, fileLastNotifyTime, this); +JobRepository *ServerBitcoin::createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) { + return new JobRepositoryBitcoin( + kafkaBrokers, consumerTopic, fileLastNotifyTime, this); } -unique_ptr ServerBitcoin::createConnection(struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) -{ - return boost::make_unique(*this, bev, saddr, sessionID); +unique_ptr ServerBitcoin::createConnection( + struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) { + return boost::make_unique( + *this, bev, saddr, sessionID); } -void ServerBitcoin::sendSolvedShare2Kafka(const FoundBlock *foundBlock, - const std::vector &coinbaseBin) { +void ServerBitcoin::sendSolvedShare2Kafka( + const FoundBlock *foundBlock, const std::vector &coinbaseBin) { // // solved share message: FoundBlock + coinbase_Tx // @@ -338,13 +354,18 @@ void ServerBitcoin::sendSolvedShare2Kafka(const FoundBlock *foundBlock, kafkaProducerSolvedShare_->produce(buf.data(), buf.size()); } -int ServerBitcoin::checkShare(const ShareBitcoin &share, - const uint32_t extraNonce1, const string &extraNonce2Hex, - const uint32_t nTime, const uint32_t nonce, - const uint32_t versionMask, - const uint256 &jobTarget, const string &workFullName, - string *userCoinbaseInfo) { - shared_ptr exJobPtrShared = GetJobRepository()->getStratumJobEx(share.jobid()); +int ServerBitcoin::checkShare( + const ShareBitcoin &share, + const uint32_t extraNonce1, + const string &extraNonce2Hex, + const uint32_t nTime, + const uint32_t nonce, + const uint32_t versionMask, + const uint256 &jobTarget, + const string &workFullName, + string *userCoinbaseInfo) { + shared_ptr exJobPtrShared = + GetJobRepository()->getStratumJobEx(share.jobid()); auto exJobPtr = std::static_pointer_cast(exJobPtrShared); if (exJobPtr == nullptr) { return StratumStatus::JOB_NOT_FOUND; @@ -368,19 +389,26 @@ int ServerBitcoin::checkShare(const ShareBitcoin &share, CBlockHeader header; std::vector coinbaseBin; - exJobPtr->generateBlockHeader(&header, &coinbaseBin, - extraNonce1, extraNonce2Hex, - sjob->merkleBranch_, sjob->prevHash_, - sjob->nBits_, sjob->nVersion_, nTime, nonce, - versionMask, - userCoinbaseInfo); + exJobPtr->generateBlockHeader( + &header, + &coinbaseBin, + extraNonce1, + extraNonce2Hex, + sjob->merkleBranch_, + sjob->prevHash_, + sjob->nBits_, + sjob->nVersion_, + nTime, + nonce, + versionMask, + userCoinbaseInfo); #ifdef CHAIN_TYPE_LTC - uint256 blkHash = header.GetPoWHash(); + uint256 blkHash = header.GetPoWHash(); #else uint256 blkHash = header.GetHash(); #endif - arith_uint256 bnBlockHash = UintToArith256(blkHash); + arith_uint256 bnBlockHash = UintToArith256(blkHash); arith_uint256 bnNetworkTarget = UintToArith256(sjob->networkTarget_); // @@ -391,13 +419,17 @@ int ServerBitcoin::checkShare(const ShareBitcoin &share, // build found block // FoundBlock foundBlock; - foundBlock.jobId_ = share.jobid(); + foundBlock.jobId_ = share.jobid(); foundBlock.workerId_ = share.workerhashid(); - foundBlock.userId_ = share.userid(); - foundBlock.height_ = sjob->height_; - memcpy(foundBlock.header80_, (const uint8_t *)&header, sizeof(CBlockHeader)); - snprintf(foundBlock.workerFullName_, sizeof(foundBlock.workerFullName_), - "%s", workFullName.c_str()); + foundBlock.userId_ = share.userid(); + foundBlock.height_ = sjob->height_; + memcpy( + foundBlock.header80_, (const uint8_t *)&header, sizeof(CBlockHeader)); + snprintf( + foundBlock.workerFullName_, + sizeof(foundBlock.workerFullName_), + "%s", + workFullName.c_str()); // send sendSolvedShare2Kafka(&foundBlock, coinbaseBin); @@ -405,38 +437,55 @@ int ServerBitcoin::checkShare(const ShareBitcoin &share, GetJobRepository()->markAllJobsAsStale(); LOG(INFO) << ">>>> found a new block: " << blkHash.ToString() - << ", jobId: " << share.jobid() << ", userId: " << share.userid() - << ", by: " << workFullName << " <<<<"; + << ", jobId: " << share.jobid() << ", userId: " << share.userid() + << ", by: " << workFullName << " <<<<"; } // print out high diff share, 2^10 = 1024 if ((bnBlockHash >> 10) <= bnNetworkTarget) { LOG(INFO) << "high diff share, blkhash: " << blkHash.ToString() - << ", diff: " << TargetToDiff(blkHash) - << ", networkDiff: " << TargetToDiff(sjob->networkTarget_) - << ", by: " << workFullName; + << ", diff: " << TargetToDiff(blkHash) + << ", networkDiff: " << TargetToDiff(sjob->networkTarget_) + << ", by: " << workFullName; } // // found new RSK block // if (!sjob->blockHashForMergedMining_.empty() && - (isSubmitInvalidBlock_ == true || bnBlockHash <= UintToArith256(sjob->rskNetworkTarget_))) { + (isSubmitInvalidBlock_ == true || + bnBlockHash <= UintToArith256(sjob->rskNetworkTarget_))) { // // build data needed to submit block to RSK // RskSolvedShareData shareData; - shareData.jobId_ = share.jobid(); + shareData.jobId_ = share.jobid(); shareData.workerId_ = share.workerhashid(); - shareData.userId_ = share.userid(); + shareData.userId_ = share.userid(); // height = matching bitcoin block height - shareData.height_ = sjob->height_; - snprintf(shareData.feesForMiner_, sizeof(shareData.feesForMiner_), "%s", sjob->feesForMiner_.c_str()); - snprintf(shareData.rpcAddress_, sizeof(shareData.rpcAddress_), "%s", sjob->rskdRpcAddress_.c_str()); - snprintf(shareData.rpcUserPwd_, sizeof(shareData.rpcUserPwd_), "%s", sjob->rskdRpcUserPwd_.c_str()); + shareData.height_ = sjob->height_; + snprintf( + shareData.feesForMiner_, + sizeof(shareData.feesForMiner_), + "%s", + sjob->feesForMiner_.c_str()); + snprintf( + shareData.rpcAddress_, + sizeof(shareData.rpcAddress_), + "%s", + sjob->rskdRpcAddress_.c_str()); + snprintf( + shareData.rpcUserPwd_, + sizeof(shareData.rpcUserPwd_), + "%s", + sjob->rskdRpcUserPwd_.c_str()); memcpy(shareData.header80_, (const uint8_t *)&header, sizeof(CBlockHeader)); - snprintf(shareData.workerFullName_, sizeof(shareData.workerFullName_), "%s", workFullName.c_str()); - + snprintf( + shareData.workerFullName_, + sizeof(shareData.workerFullName_), + "%s", + workFullName.c_str()); + // // send to kafka topic // @@ -457,15 +506,16 @@ int ServerBitcoin::checkShare(const ShareBitcoin &share, // log the finding // LOG(INFO) << ">>>> found a new RSK block: " << blkHash.ToString() - << ", jobId: " << share.jobid() << ", userId: " << share.userid() - << ", by: " << workFullName << " <<<<"; + << ", jobId: " << share.jobid() << ", userId: " << share.userid() + << ", by: " << workFullName << " <<<<"; } // // found namecoin block // if (sjob->nmcAuxBits_ != 0 && - (isSubmitInvalidBlock_ == true || bnBlockHash <= UintToArith256(sjob->nmcNetworkTarget_))) { + (isSubmitInvalidBlock_ == true || + bnBlockHash <= UintToArith256(sjob->nmcNetworkTarget_))) { // // build namecoin solved share message // @@ -474,34 +524,38 @@ int ServerBitcoin::checkShare(const ShareBitcoin &share, DLOG(INFO) << "blockHeaderHex: " << blockHeaderHex; string coinbaseTxHex; - Bin2Hex((const uint8_t *)coinbaseBin.data(), coinbaseBin.size(), coinbaseTxHex); + Bin2Hex( + (const uint8_t *)coinbaseBin.data(), coinbaseBin.size(), coinbaseTxHex); DLOG(INFO) << "coinbaseTxHex: " << coinbaseTxHex; - const string nmcAuxSolvedShare = Strings::Format("{\"job_id\":%" PRIu64"," - " \"aux_block_hash\":\"%s\"," - " \"block_header\":\"%s\"," - " \"coinbase_tx\":\"%s\"," - " \"rpc_addr\":\"%s\"," - " \"rpc_userpass\":\"%s\"" - "}", - share.jobid(), - sjob->nmcAuxBlockHash_.ToString().c_str(), - blockHeaderHex.c_str(), - coinbaseTxHex.c_str(), - sjob->nmcRpcAddr_.size() ? sjob->nmcRpcAddr_.c_str() : "", - sjob->nmcRpcUserpass_.size() ? sjob->nmcRpcUserpass_.c_str() : ""); + const string nmcAuxSolvedShare = Strings::Format( + "{\"job_id\":%" PRIu64 + "," + " \"aux_block_hash\":\"%s\"," + " \"block_header\":\"%s\"," + " \"coinbase_tx\":\"%s\"," + " \"rpc_addr\":\"%s\"," + " \"rpc_userpass\":\"%s\"" + "}", + share.jobid(), + sjob->nmcAuxBlockHash_.ToString().c_str(), + blockHeaderHex.c_str(), + coinbaseTxHex.c_str(), + sjob->nmcRpcAddr_.size() ? sjob->nmcRpcAddr_.c_str() : "", + sjob->nmcRpcUserpass_.size() ? sjob->nmcRpcUserpass_.c_str() : ""); // send found namecoin aux block to kafka - kafkaProducerNamecoinSolvedShare_->produce(nmcAuxSolvedShare.data(), - nmcAuxSolvedShare.size()); + kafkaProducerNamecoinSolvedShare_->produce( + nmcAuxSolvedShare.data(), nmcAuxSolvedShare.size()); LOG(INFO) << ">>>> found namecoin block: " << sjob->nmcHeight_ << ", " - << sjob->nmcAuxBlockHash_.ToString() - << ", jobId: " << share.jobid() << ", userId: " << share.userid() - << ", by: " << workFullName << " <<<<"; + << sjob->nmcAuxBlockHash_.ToString() + << ", jobId: " << share.jobid() << ", userId: " << share.userid() + << ", by: " << workFullName << " <<<<"; } - DLOG(INFO) << "blkHash: " << blkHash.ToString() << ", jobTarget: " - << jobTarget.ToString() << ", networkTarget: " << sjob->networkTarget_.ToString(); + DLOG(INFO) << "blkHash: " << blkHash.ToString() + << ", jobTarget: " << jobTarget.ToString() + << ", networkTarget: " << sjob->networkTarget_.ToString(); // check share diff if (isEnableSimulator_ == false && bnBlockHash > UintToArith256(jobTarget)) { diff --git a/src/bitcoin/StratumServerBitcoin.h b/src/bitcoin/StratumServerBitcoin.h index 1bfdab5e6..29c2349bd 100644 --- a/src/bitcoin/StratumServerBitcoin.h +++ b/src/bitcoin/StratumServerBitcoin.h @@ -32,8 +32,7 @@ class FoundBlock; class JobRepositoryBitcoin; class ShareBitcoin; -class ServerBitcoin : public ServerBase -{ +class ServerBitcoin : public ServerBase { private: string auxPowSolvedShareTopic_; string rskSolvedShareTopic_; @@ -48,47 +47,58 @@ class ServerBitcoin : public ServerBase uint32_t getVersionMask() const; - bool setupInternal(StratumServer* sserver) override; + bool setupInternal(StratumServer *sserver) override; + + unique_ptr createConnection( + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t sessionID) override; + void sendSolvedShare2Kafka( + const FoundBlock *foundBlock, const std::vector &coinbaseBin); + + int checkShare( + const ShareBitcoin &share, + const uint32_t extraNonce1, + const string &extraNonce2Hex, + const uint32_t nTime, + const uint32_t nonce, + const uint32_t versionMask, + const uint256 &jobTarget, + const string &workFullName, + string *userCoinbaseInfo = nullptr); - unique_ptr createConnection(struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) override; - void sendSolvedShare2Kafka(const FoundBlock *foundBlock, - const std::vector &coinbaseBin); - - int checkShare(const ShareBitcoin &share, - const uint32_t extraNonce1, const string &extraNonce2Hex, - const uint32_t nTime, const uint32_t nonce, - const uint32_t versionMask, - const uint256 &jobTarget, const string &workFullName, - string *userCoinbaseInfo = nullptr); private: - JobRepository* createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) override; - + JobRepository *createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) override; }; -class JobRepositoryBitcoin : public JobRepositoryBase -{ +class JobRepositoryBitcoin : public JobRepositoryBase { private: uint256 latestPrevBlockHash_; + public: - JobRepositoryBitcoin(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerBitcoin *server); + JobRepositoryBitcoin( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerBitcoin *server); virtual ~JobRepositoryBitcoin(); virtual shared_ptr createStratumJob() override; - shared_ptr createStratumJobEx(shared_ptr sjob, bool isClean) override; + shared_ptr + createStratumJobEx(shared_ptr sjob, bool isClean) override; void broadcastStratumJob(shared_ptr sjob) override; - }; -class StratumJobExBitcoin : public StratumJobEx -{ - void generateCoinbaseTx(std::vector *coinbaseBin, - const uint32_t extraNonce1, - const string &extraNonce2Hex, - string *userCoinbaseInfo = nullptr); +class StratumJobExBitcoin : public StratumJobEx { + void generateCoinbaseTx( + std::vector *coinbaseBin, + const uint32_t extraNonce1, + const string &extraNonce2Hex, + string *userCoinbaseInfo = nullptr); public: - string miningNotify1_; string miningNotify2_; string coinbase1_; @@ -98,18 +108,20 @@ class StratumJobExBitcoin : public StratumJobEx public: StratumJobExBitcoin(shared_ptr sjob, bool isClean); - void generateBlockHeader(CBlockHeader *header, - std::vector *coinbaseBin, - const uint32_t extraNonce1, - const string &extraNonce2Hex, - const vector &merkleBranch, - const uint256 &hashPrevBlock, - const uint32_t nBits, const int32_t nVersion, - const uint32_t nTime, const uint32_t nonce, - const uint32_t versionMask, - string *userCoinbaseInfo = nullptr); + void generateBlockHeader( + CBlockHeader *header, + std::vector *coinbaseBin, + const uint32_t extraNonce1, + const string &extraNonce2Hex, + const vector &merkleBranch, + const uint256 &hashPrevBlock, + const uint32_t nBits, + const int32_t nVersion, + const uint32_t nTime, + const uint32_t nonce, + const uint32_t versionMask, + string *userCoinbaseInfo = nullptr); void init(); - }; #endif diff --git a/src/bitcoin/StratumSessionBitcoin.cc b/src/bitcoin/StratumSessionBitcoin.cc index 88f2d2875..0ea14d368 100644 --- a/src/bitcoin/StratumSessionBitcoin.cc +++ b/src/bitcoin/StratumSessionBitcoin.cc @@ -38,24 +38,29 @@ struct StratumMessageExSubmit { boost::endian::little_uint16_buf_t sessionId; }; -StratumSessionBitcoin::StratumSessionBitcoin(ServerBitcoin &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1) - : StratumSessionBase(server, bev, saddr, extraNonce1) - , shortJobIdIdx_(0) - , versionMask_(0) - , suggestedMinDiff_(0) { +StratumSessionBitcoin::StratumSessionBitcoin( + ServerBitcoin &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1) + : StratumSessionBase(server, bev, saddr, extraNonce1) + , shortJobIdIdx_(0) + , versionMask_(0) + , suggestedMinDiff_(0) + , suggestedDiff_(0) { } -uint16_t StratumSessionBitcoin::decodeSessionId(const std::string &exMessage) const { +uint16_t +StratumSessionBitcoin::decodeSessionId(const std::string &exMessage) const { if (exMessage.size() < (1 + 1 + 2 + 1 + 2)) return StratumMessageEx::AGENT_MAX_SESSION_ID + 1; - auto header = reinterpret_cast(exMessage.data()); + auto header = + reinterpret_cast(exMessage.data()); return header->sessionId.value(); } -void StratumSessionBitcoin::sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) { +void StratumSessionBitcoin::sendMiningNotify( + shared_ptr exJobPtr, bool isFirstJob) { auto exJob = std::static_pointer_cast(exJobPtr); if (state_ < AUTHENTICATED || exJob == nullptr) { return; @@ -66,7 +71,8 @@ void StratumSessionBitcoin::sendMiningNotify(shared_ptr exJobPtr, #ifdef USER_DEFINED_COINBASE // add the User's coinbaseInfo to the coinbase1's tail - string userCoinbaseInfo = GetServer()->userInfo_->getCoinbaseInfo(worker_.userId_); + string userCoinbaseInfo = + GetServer()->userInfo_->getCoinbaseInfo(worker_.userId_); ljob.userCoinbaseInfo_ = userCoinbaseInfo; #endif @@ -82,10 +88,11 @@ void StratumSessionBitcoin::sendMiningNotify(shared_ptr exJobPtr, // we need to send unique JobID to NiceHash Client, they have problems with // short Job ID // - const uint64_t niceHashJobId = (uint64_t) time(nullptr) * 10 + ljob.shortJobId_; - notifyStr.append(Strings::Format("% " PRIu64"", niceHashJobId)); + const uint64_t niceHashJobId = + (uint64_t)time(nullptr) * kMaxNumLocalJobs_ + ljob.shortJobId_; + notifyStr.append(Strings::Format("% " PRIu64 "", niceHashJobId)); } else { - notifyStr.append(Strings::Format("%u", ljob.shortJobId_)); // short jobId + notifyStr.append(Strings::Format("%u", ljob.shortJobId_)); // short jobId } // notify2 @@ -95,9 +102,15 @@ void StratumSessionBitcoin::sendMiningNotify(shared_ptr exJobPtr, #ifdef USER_DEFINED_COINBASE string userCoinbaseHex; - Bin2Hex((const uint8_t *)ljob.userCoinbaseInfo_.c_str(), ljob.userCoinbaseInfo_.size(), userCoinbaseHex); + Bin2Hex( + (const uint8_t *)ljob.userCoinbaseInfo_.c_str(), + ljob.userCoinbaseInfo_.size(), + userCoinbaseHex); // replace the last `userCoinbaseHex.size()` bytes to `userCoinbaseHex` - coinbase1.replace(coinbase1.size()-userCoinbaseHex.size(), userCoinbaseHex.size(), userCoinbaseHex); + coinbase1.replace( + coinbase1.size() - userCoinbaseHex.size(), + userCoinbaseHex.size(), + userCoinbaseHex); #endif // coinbase1 @@ -109,16 +122,17 @@ void StratumSessionBitcoin::sendMiningNotify(shared_ptr exJobPtr, else notifyStr.append(exJob->miningNotify3_); - sendData(notifyStr); // send notify string + sendData(notifyStr); // send notify string // clear localJobs_ clearLocalJobs(); } -void StratumSessionBitcoin::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionBitcoin::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { // Note: "mining.suggest_target" may be called before "mining.subscribe", // and most miners will call "mining.configure" in its first request. // So, don't assume that any future requests will appear after @@ -126,30 +140,30 @@ void StratumSessionBitcoin::handleRequest(const std::string &idStr, if (method == "mining.subscribe") { handleRequest_Subscribe(idStr, jparams); - } - else if (method == "mining.authorize") { + } else if (method == "mining.authorize") { handleRequest_Authorize(idStr, jparams); - } - else if (method == "mining.configure") { + } else if (method == "mining.configure") { handleRequest_MiningConfigure(idStr, jparams); - } - else if (method == "agent.get_capabilities") { + } else if (method == "agent.get_capabilities") { handleRequest_AgentGetCapabilities(idStr, jparams); - } - else if (dispatcher_) { + } else if (method == "mining.suggest_target") { + handleRequest_SuggestTarget(idStr, jparams); + } else { dispatcher_->handleRequest(idStr, method, jparams, jroot); } } -void StratumSessionBitcoin::handleRequest_AgentGetCapabilities(const string &idStr, - const JsonNode &jparams) { - string s = Strings::Format("{\"id\":%s,\"result\":{\"capabilities\":" BTCAGENT_PROTOCOL_CAPABILITIES "}}\n", - idStr.c_str()); +void StratumSessionBitcoin::handleRequest_AgentGetCapabilities( + const string &idStr, const JsonNode &jparams) { + string s = Strings::Format( + "{\"id\":%s,\"result\":{\"capabilities\":" BTCAGENT_PROTOCOL_CAPABILITIES + "}}\n", + idStr.c_str()); sendData(s); } -void StratumSessionBitcoin::handleRequest_MiningConfigure(const string &idStr, - const JsonNode &jparams) { +void StratumSessionBitcoin::handleRequest_MiningConfigure( + const string &idStr, const JsonNode &jparams) { uint32_t allowedVersionMask = getServer().getVersionMask(); if (jparams.children()->size() < 2 || @@ -184,7 +198,7 @@ void StratumSessionBitcoin::handleRequest_MiningConfigure(const string &idStr, // "minimum-difficulty": true // } // } - // + // auto extensions = jparams.children()->at(0).array(); JsonNode options = jparams.children()->at(1); std::map results; @@ -194,7 +208,7 @@ void StratumSessionBitcoin::handleRequest_MiningConfigure(const string &idStr, continue; } const string name = ext.str(); - + //------------------------------------------------------------ if (name == "minimum-difficulty") { auto diffNode = options["minimum-difficulty.value"]; @@ -213,10 +227,11 @@ void StratumSessionBitcoin::handleRequest_MiningConfigure(const string &idStr, results["version-rolling"] = "false"; continue; } - + versionMask_ = maskNode.uint32_hex(); results["version-rolling"] = "true"; - results["version-rolling.mask"] = Strings::Format("\"%08x\"", versionMask_ & allowedVersionMask); + results["version-rolling.mask"] = + Strings::Format("\"%08x\"", versionMask_ & allowedVersionMask); } //---------------------------------------------------------- else { @@ -229,33 +244,37 @@ void StratumSessionBitcoin::handleRequest_MiningConfigure(const string &idStr, // c++ map to json object if (!results.empty()) { - auto itr=results.begin(); + auto itr = results.begin(); resultStr += "\"" + itr->first + "\":" + itr->second; - + while (++itr != results.end()) { resultStr += ",\"" + itr->first + "\":" + itr->second; } } - + // // send result of mining.configure // - string s = Strings::Format("{\"id\":%s,\"result\":{%s},\"error\":null}\n", - idStr.c_str(), resultStr.c_str()); + string s = Strings::Format( + "{\"id\":%s,\"result\":{%s},\"error\":null}\n", + idStr.c_str(), + resultStr.c_str()); sendData(s); // // mining.set_version_mask // if (versionMask_ != 0) { - s = Strings::Format("{\"id\":null,\"method\":\"mining.set_version_mask\",\"params\":[\"%08x\"]}\n", - versionMask_ & allowedVersionMask); + s = Strings::Format( + "{\"id\":null,\"method\":\"mining.set_version_mask\",\"params\":[\"%" + "08x\"]}\n", + versionMask_ & allowedVersionMask); sendData(s); } } -void StratumSessionBitcoin::handleRequest_Subscribe(const string &idStr, - const JsonNode &jparams) { +void StratumSessionBitcoin::handleRequest_Subscribe( + const string &idStr, const JsonNode &jparams) { if (state_ != CONNECTED) { responseError(idStr, StratumStatus::UNKNOWN); return; @@ -264,29 +283,33 @@ void StratumSessionBitcoin::handleRequest_Subscribe(const string &idStr, #ifdef WORK_WITH_STRATUM_SWITCHER // - // For working with StratumSwitcher, the ExtraNonce1 must be provided as param 2. + // For working with StratumSwitcher, the ExtraNonce1 must be provided as + // param 2. // // params[0] = client version [require] // params[1] = session id / ExtraNonce1 [require] // params[2] = miner's real IP (unit32) [optional] // // StratumSwitcher request eg.: - // {"id": 1, "method": "mining.subscribe", "params": ["StratumSwitcher/0.1", "01ad557d", 203569230]} - // 203569230 -> 12.34.56.78 + // {"id": 1, "method": "mining.subscribe", "params": ["StratumSwitcher/0.1", + // "01ad557d", 203569230]} 203569230 -> 12.34.56.78 // if (jparams.children()->size() < 2) { responseError(idStr, StratumStatus::CLIENT_IS_NOT_SWITCHER); LOG(ERROR) << "A non-switcher subscribe request is detected and rejected."; - LOG(ERROR) << "Cmake option POOL__WORK_WITH_STRATUM_SWITCHER enabled, you can only connect to the sserver via a stratum switcher."; + LOG(ERROR) << "Cmake option POOL__WORK_WITH_STRATUM_SWITCHER enabled, you " + "can only connect to the sserver via a stratum switcher."; return; } state_ = SUBSCRIBED; - setClientAgent(jparams.children()->at(0).str().substr(0, 30)); // 30 is max len + setClientAgent( + jparams.children()->at(0).str().substr(0, 30)); // 30 is max len - string extraNonce1Str = jparams.children()->at(1).str().substr(0, 8); // 8 is max len + string extraNonce1Str = + jparams.children()->at(1).str().substr(0, 8); // 8 is max len sscanf(extraNonce1Str.c_str(), "%x", &extraNonce1_); // convert hex to int // receive miner's IP from stratumSwitcher @@ -297,7 +320,8 @@ void StratumSessionBitcoin::handleRequest_Subscribe(const string &idStr, clientIp_.resize(INET_ADDRSTRLEN); struct in_addr addr; addr.s_addr = clientIpInt_; - clientIp_ = inet_ntop(AF_INET, &addr, (char *)clientIp_.data(), (socklen_t)clientIp_.size()); + clientIp_ = inet_ntop( + AF_INET, &addr, (char *)clientIp_.data(), (socklen_t)clientIp_.size()); LOG(INFO) << "client real IP: " << clientIp_; } @@ -310,28 +334,37 @@ void StratumSessionBitcoin::handleRequest_Subscribe(const string &idStr, // params[1] = session id of pool [optional] // // client request eg.: - // {"id": 1, "method": "mining.subscribe", "params": ["bfgminer/4.4.0-32-gac4e9b3", "01ad557d"]} + // {"id": 1, "method": "mining.subscribe", "params": + // ["bfgminer/4.4.0-32-gac4e9b3", "01ad557d"]} // if (jparams.children()->size() >= 1) { - setClientAgent(jparams.children()->at(0).str().substr(0, 30)); // 30 is max len + setClientAgent( + jparams.children()->at(0).str().substr(0, 30)); // 30 is max len } #endif // WORK_WITH_STRATUM_SWITCHER - - // result[0] = 2-tuple with name of subscribed notification and subscription ID. - // Theoretically it may be used for unsubscribing, but obviously miners won't use it. + // result[0] = 2-tuple with name of subscribed notification and subscription + // ID. + // Theoretically it may be used for unsubscribing, but obviously + // miners won't use it. // result[1] = ExtraNonce1, used for building the coinbase. - // result[2] = Extranonce2_size, the number of bytes that the miner users for its ExtraNonce2 counter + // result[2] = Extranonce2_size, the number of bytes that the miner users for + // its ExtraNonce2 counter assert(StratumMiner::kExtraNonce2Size_ == 8); - auto s = Strings::Format("{\"id\":%s,\"result\":[[[\"mining.set_difficulty\",\"%08x\"]" - ",[\"mining.notify\",\"%08x\"]],\"%08x\",%d],\"error\":null}\n", - idStr.c_str(), extraNonce1_, extraNonce1_, extraNonce1_, StratumMiner::kExtraNonce2Size_); + auto s = Strings::Format( + "{\"id\":%s,\"result\":[[[\"mining.set_difficulty\",\"%08x\"]" + ",[\"mining.notify\",\"%08x\"]],\"%08x\",%d],\"error\":null}\n", + idStr.c_str(), + extraNonce1_, + extraNonce1_, + extraNonce1_, + StratumMiner::kExtraNonce2Size_); sendData(s); } -void StratumSessionBitcoin::handleRequest_Authorize(const string &idStr, - const JsonNode &jparams) { +void StratumSessionBitcoin::handleRequest_Authorize( + const string &idStr, const JsonNode &jparams) { if (state_ != SUBSCRIBED) { responseError(idStr, StratumStatus::NOT_SUBSCRIBED); return; @@ -340,9 +373,9 @@ void StratumSessionBitcoin::handleRequest_Authorize(const string &idStr, // // params[0] = user[.worker] // params[1] = password - // eg. {"params": ["slush.miner1", "password"], "id": 2, "method": "mining.authorize"} - // the password may be omitted. - // eg. {"params": ["slush.miner1"], "id": 2, "method": "mining.authorize"} + // eg. {"params": ["slush.miner1", "password"], "id": 2, "method": + // "mining.authorize"} the password may be omitted. eg. {"params": + // ["slush.miner1"], "id": 2, "method": "mining.authorize"} // if (jparams.children()->size() < 1) { responseError(idStr, StratumStatus::INVALID_USERNAME); @@ -360,6 +393,23 @@ void StratumSessionBitcoin::handleRequest_Authorize(const string &idStr, return; } +void StratumSessionBitcoin::handleRequest_SuggestTarget( + const string &idStr, const JsonNode &jparams) { + if (state_ != CONNECTED) { + responseError(idStr, StratumStatus::ILLEGAL_METHOD); + return; // suggest should be call before subscribe + } + + if (jparams.children()->size() == 0) { + responseError(idStr, StratumStatus::ILLEGAL_PARARMS); + return; + } + + suggestedDiff_ = + formatDifficulty(TargetToDiff(jparams.children()->at(0).str())); + responseTrue(idStr); +} + void StratumSessionBitcoin::logAuthorizeResult(bool success) { if (success) { LOG(INFO) << "authorize success, userId: " << worker_.userId_ @@ -368,8 +418,7 @@ void StratumSessionBitcoin::logAuthorizeResult(bool success) { << ", versionMask: " << Strings::Format("%08x", versionMask_) << ", clientAgent: " << clientAgent_ << ", clientIp: " << clientIp_; - } - else { + } else { LOG(WARNING) << "authorize failed, workerName:" << worker_.fullName_ << ", versionMask: " << Strings::Format("%08x", versionMask_) << ", clientAgent: " << clientAgent_ @@ -378,57 +427,65 @@ void StratumSessionBitcoin::logAuthorizeResult(bool success) { } string StratumSessionBitcoin::getMinerInfoJson(const string &type) { - return Strings::Format("{\"created_at\":\"%s\"," - "\"type\":\"%s\"," - "\"content\":{" - "\"user_id\":%d,\"user_name\":\"%s\"," - "\"worker_id\":%" PRId64 ",\"worker_name\":\"%s\"," - "\"client_agent\":\"%s\",\"ip\":\"%s\"," - "\"session_id\":\"%08x\",\"version_mask\":\"%08x\"" - "}}", - date("%F %T").c_str(), - type.c_str(), - worker_.userId_, worker_.userName_.c_str(), - worker_.workerHashId_, worker_.workerName_.c_str(), - clientAgent_.c_str(), clientIp_.c_str(), - extraNonce1_, versionMask_); + return Strings::Format( + "{\"created_at\":\"%s\"," + "\"type\":\"%s\"," + "\"content\":{" + "\"user_id\":%d,\"user_name\":\"%s\"," + "\"worker_id\":%" PRId64 + ",\"worker_name\":\"%s\"," + "\"client_agent\":\"%s\",\"ip\":\"%s\"," + "\"session_id\":\"%08x\",\"version_mask\":\"%08x\"" + "}}", + date("%F %T").c_str(), + type.c_str(), + worker_.userId_, + worker_.userName_.c_str(), + worker_.workerHashId_, + worker_.workerName_.c_str(), + clientAgent_.c_str(), + clientIp_.c_str(), + extraNonce1_, + versionMask_); } unique_ptr StratumSessionBitcoin::createDispatcher() { if (isAgentClient_) { return boost::make_unique( - *this, - *getServer().defaultDifficultyController_); + *this, *getServer().defaultDifficultyController_); } else { return boost::make_unique( - *this, - createMiner(clientAgent_, - worker_.workerName_, - worker_.workerHashId_)); + *this, + createMiner(clientAgent_, worker_.workerName_, worker_.workerHashId_)); } } uint8_t StratumSessionBitcoin::allocShortJobId() { // return range: [0, 9] - if (shortJobIdIdx_ >= 10) { + if (shortJobIdIdx_ >= kMaxNumLocalJobs_) { shortJobIdIdx_ = 0; } return shortJobIdIdx_++; } -unique_ptr StratumSessionBitcoin::createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) { +unique_ptr StratumSessionBitcoin::createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { auto miner = boost::make_unique( - *this, - *getServer().defaultDifficultyController_, - clientAgent, - workerName, - workerId); + *this, + *getServer().defaultDifficultyController_, + clientAgent, + workerName, + workerId); if (suggestedMinDiff_ != 0) { miner->setMinDiff(suggestedMinDiff_); } + if (suggestedDiff_ != 0) { + miner->resetCurDiff(suggestedDiff_); + } + return miner; } diff --git a/src/bitcoin/StratumSessionBitcoin.h b/src/bitcoin/StratumSessionBitcoin.h index 7639c2837..59530878d 100644 --- a/src/bitcoin/StratumSessionBitcoin.h +++ b/src/bitcoin/StratumSessionBitcoin.h @@ -31,38 +31,52 @@ class StratumSessionBitcoin : public StratumSessionBase { public: - StratumSessionBitcoin(ServerBitcoin &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1); + StratumSessionBitcoin( + ServerBitcoin &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1); uint16_t decodeSessionId(const std::string &exMessage) const override; - void sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; + void + sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; protected: - void handleRequest(const std::string &idStr, const std::string &method, - const JsonNode &jparams, const JsonNode &jroot) override; - void handleRequest_MiningConfigure(const string &idStr, const JsonNode &jparams); - void handleRequest_Subscribe(const std::string &idStr, const JsonNode &jparams); - void handleRequest_Authorize(const std::string &idStr, const JsonNode &jparams); + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; + void + handleRequest_MiningConfigure(const string &idStr, const JsonNode &jparams); + void + handleRequest_Subscribe(const std::string &idStr, const JsonNode &jparams); + void + handleRequest_Authorize(const std::string &idStr, const JsonNode &jparams); // request from BTCAgent - void handleRequest_AgentGetCapabilities(const string &idStr, const JsonNode &jparams); + void handleRequest_AgentGetCapabilities( + const string &idStr, const JsonNode &jparams); + void handleRequest_SuggestTarget( + const std::string &idStr, const JsonNode &jparams); void logAuthorizeResult(bool success) override; string getMinerInfoJson(const string &type) override; std::unique_ptr createDispatcher() override; + public: - std::unique_ptr createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) override; + std::unique_ptr createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) override; private: uint8_t allocShortJobId(); uint8_t shortJobIdIdx_; - + uint32_t versionMask_; // version mask that the miner wants uint64_t suggestedMinDiff_; // min difficulty that the miner wants + uint64_t suggestedDiff_; // difficulty that the miner wants }; -#endif // #ifndef STRATUM_SESSION_BITCOIN_H_ +#endif // #ifndef STRATUM_SESSION_BITCOIN_H_ diff --git a/src/bitcoin/WatcherBitcoin.cc b/src/bitcoin/WatcherBitcoin.cc index 6b5b78219..bafa56368 100644 --- a/src/bitcoin/WatcherBitcoin.cc +++ b/src/bitcoin/WatcherBitcoin.cc @@ -30,95 +30,99 @@ #include #include - // // input : 89c2f63dfb970e5638aa66ae3b7404a8a9914ad80328e9fe0000000000000000 // output : 00000000000000000328e9fea9914ad83b7404a838aa66aefb970e5689c2f63d -static -string convertPrevHash(const string &prevHash) { +static string convertPrevHash(const string &prevHash) { assert(prevHash.length() == 64); string hash; for (int i = 7; i >= 0; i--) { - uint32_t v = (uint32_t)strtoul(prevHash.substr(i*8, 8).c_str(), nullptr, 16); + uint32_t v = + (uint32_t)strtoul(prevHash.substr(i * 8, 8).c_str(), nullptr, 16); hash.append(Strings::Format("%08x", v)); } return hash; } - - ///////////////////////////////// ClientContainer ////////////////////////////// -ClientContainerBitcoin::ClientContainerBitcoin(const string &kafkaBrokers, const string &jobTopic, const string &gbtTopic, - bool disableChecking) +ClientContainerBitcoin::ClientContainerBitcoin( + const string &kafkaBrokers, + const string &jobTopic, + const string &gbtTopic, + bool disableChecking) : ClientContainer(kafkaBrokers, jobTopic, gbtTopic, disableChecking) - , poolStratumJob_(nullptr) -{ + , poolStratumJob_(nullptr) { } -ClientContainerBitcoin::~ClientContainerBitcoin() -{ +ClientContainerBitcoin::~ClientContainerBitcoin() { } -boost::shared_lock ClientContainerBitcoin::getPoolStratumJobReadLock() { +boost::shared_lock +ClientContainerBitcoin::getPoolStratumJobReadLock() { return boost::shared_lock(stratumJobMutex_); } -const shared_ptr ClientContainerBitcoin::getPoolStratumJob() { +const shared_ptr +ClientContainerBitcoin::getPoolStratumJob() { return poolStratumJob_; } -PoolWatchClient* ClientContainerBitcoin::createPoolWatchClient( - struct event_base *base, const string &poolName, const string &poolHost, - const int16_t poolPort, const string &workerName) -{ - return new PoolWatchClientBitcoin(base, this, disableChecking_, - poolName, poolHost, poolPort, workerName); +PoolWatchClient *ClientContainerBitcoin::createPoolWatchClient( + struct event_base *base, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName) { + return new PoolWatchClientBitcoin( + base, this, disableChecking_, poolName, poolHost, poolPort, workerName); } -void ClientContainerBitcoin::consumeStratumJobInternal(const string& str) -{ - shared_ptr sjob = std::make_shared(); - bool res = sjob->unserializeFromJson((const char *)str.data(), str.size()); - if (res == false) { - LOG(ERROR) << "unserialize stratum job fail"; - return; - } +void ClientContainerBitcoin::consumeStratumJobInternal(const string &str) { + shared_ptr sjob = std::make_shared(); + bool res = sjob->unserializeFromJson((const char *)str.data(), str.size()); + if (res == false) { + LOG(ERROR) << "unserialize stratum job fail"; + return; + } - // make sure the job is not expired. - if (jobId2Time(sjob->jobId_) + 60 < time(nullptr)) { - LOG(ERROR) << "too large delay from kafka to receive topic 'StratumJob'"; - return; - } + // make sure the job is not expired. + if (jobId2Time(sjob->jobId_) + 60 < time(nullptr)) { + LOG(ERROR) << "too large delay from kafka to receive topic 'StratumJob'"; + return; + } - LOG(INFO) << "[POOL] stratum job received, height: " << sjob->height_ - << ", prevhash: " << sjob->prevHash_.ToString() - << ", nBits: " << sjob->nBits_; + LOG(INFO) << "[POOL] stratum job received, height: " << sjob->height_ + << ", prevhash: " << sjob->prevHash_.ToString() + << ", nBits: " << sjob->nBits_; - { - // get a write lock before change this->poolStratumJob_ - // it will unlock by itself in destructor. - boost::unique_lock writeLock(stratumJobMutex_); + { + // get a write lock before change this->poolStratumJob_ + // it will unlock by itself in destructor. + boost::unique_lock writeLock(stratumJobMutex_); - uint256 oldPrevHash; + uint256 oldPrevHash; - if (poolStratumJob_ != nullptr) { - oldPrevHash = poolStratumJob_->prevHash_; - } + if (poolStratumJob_ != nullptr) { + oldPrevHash = poolStratumJob_->prevHash_; + } - poolStratumJob_ = sjob; + poolStratumJob_ = sjob; - if (oldPrevHash != sjob->prevHash_) { - LOG(INFO) << "[POOL] prev block changed, height: " << sjob->height_ - << ", prevhash: " << sjob->prevHash_.ToString() - << ", nBits: " << sjob->nBits_; - } + if (oldPrevHash != sjob->prevHash_) { + LOG(INFO) << "[POOL] prev block changed, height: " << sjob->height_ + << ", prevhash: " << sjob->prevHash_.ToString() + << ", nBits: " << sjob->nBits_; } + } } -bool ClientContainerBitcoin::sendEmptyGBT(const string &poolName, - int32_t blockHeight, uint32_t nBits, - const string &blockPrevHash, - uint32_t blockTime, uint32_t blockVersion) { +bool ClientContainerBitcoin::sendEmptyGBT( + const string &poolName, + int32_t blockHeight, + uint32_t nBits, + const string &blockPrevHash, + uint32_t blockTime, + uint32_t blockVersion) { // generate empty GBT string gbt; @@ -126,28 +130,30 @@ bool ClientContainerBitcoin::sendEmptyGBT(const string &poolName, gbt += Strings::Format("\"previousblockhash\":\"%s\"", blockPrevHash.c_str()); gbt += Strings::Format(",\"height\":%d", blockHeight); - const CChainParams& chainparams = Params(); - gbt += Strings::Format(",\"coinbasevalue\":%" PRId64"", - GetBlockReward(blockHeight, chainparams.GetConsensus())); + const CChainParams &chainparams = Params(); + gbt += Strings::Format( + ",\"coinbasevalue\":%" PRId64 "", + GetBlockReward(blockHeight, chainparams.GetConsensus())); gbt += Strings::Format(",\"bits\":\"%08x\"", nBits); - const uint32_t minTime = blockTime - 60*10; // just set 10 mins ago - gbt += Strings::Format(",\"mintime\":%" PRIu32"", minTime); - gbt += Strings::Format(",\"curtime\":%" PRIu32"", blockTime); - gbt += Strings::Format(",\"version\":%" PRIu32"", blockVersion); - gbt += Strings::Format(",\"transactions\":[]"); // empty transactions + const uint32_t minTime = blockTime - 60 * 10; // just set 10 mins ago + gbt += Strings::Format(",\"mintime\":%" PRIu32 "", minTime); + gbt += Strings::Format(",\"curtime\":%" PRIu32 "", blockTime); + gbt += Strings::Format(",\"version\":%" PRIu32 "", blockVersion); + gbt += Strings::Format(",\"transactions\":[]"); // empty transactions gbt += Strings::Format("}}"); const uint256 gbtHash = Hash(gbt.begin(), gbt.end()); - string sjob = Strings::Format("{\"created_at_ts\":%u," - "\"block_template_base64\":\"%s\"," - "\"gbthash\":\"%s\"," - "\"from_pool\":\"%s\"}", - (uint32_t)time(nullptr), - EncodeBase64(gbt).c_str(), - gbtHash.ToString().c_str(), - poolName.c_str()); + string sjob = Strings::Format( + "{\"created_at_ts\":%u," + "\"block_template_base64\":\"%s\"," + "\"gbthash\":\"%s\"," + "\"from_pool\":\"%s\"}", + (uint32_t)time(nullptr), + EncodeBase64(gbt).c_str(), + gbtHash.ToString().c_str(), + poolName.c_str()); // submit to Kafka kafkaProducer_.produce(sjob.c_str(), sjob.length()); @@ -158,31 +164,41 @@ bool ClientContainerBitcoin::sendEmptyGBT(const string &poolName, return true; } -string ClientContainerBitcoin::createOnConnectedReplyString() const -{ - string s = Strings::Format("{\"id\":1,\"method\":\"mining.subscribe\"" - ",\"params\":[\"%s\"]}\n", BTCCOM_WATCHER_AGENT); +string ClientContainerBitcoin::createOnConnectedReplyString() const { + string s = Strings::Format( + "{\"id\":1,\"method\":\"mining.subscribe\"" + ",\"params\":[\"%s\"]}\n", + BTCCOM_WATCHER_AGENT); return s; } ///////////////////////////////// PoolWatchClient ////////////////////////////// -PoolWatchClientBitcoin::PoolWatchClientBitcoin(struct event_base *base, ClientContainerBitcoin *container, - bool disableChecking, - const string &poolName, - const string &poolHost, const int16_t poolPort, - const string &workerName) - : PoolWatchClient(base, container, disableChecking, poolName, poolHost, poolPort, workerName) - , extraNonce1_(0), extraNonce2Size_(0) -{ +PoolWatchClientBitcoin::PoolWatchClientBitcoin( + struct event_base *base, + ClientContainerBitcoin *container, + bool disableChecking, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName) + : PoolWatchClient( + base, + container, + disableChecking, + poolName, + poolHost, + poolPort, + workerName) + , extraNonce1_(0) + , extraNonce2Size_(0) { } -PoolWatchClientBitcoin::~PoolWatchClientBitcoin() -{ +PoolWatchClientBitcoin::~PoolWatchClientBitcoin() { } - void PoolWatchClientBitcoin::handleStratumMessage(const string &line) { - DLOG(INFO) << "<" << poolName_ << "> UpPoolWatchClient recv(" << line.size() << "): " << line; + DLOG(INFO) << "<" << poolName_ << "> UpPoolWatchClient recv(" << line.size() + << "): " << line; auto containerBitcoin = GetContainerBitcoin(); @@ -192,45 +208,47 @@ void PoolWatchClientBitcoin::handleStratumMessage(const string &line) { return; } JsonNode jresult = jnode["result"]; - JsonNode jerror = jnode["error"]; + JsonNode jerror = jnode["error"]; JsonNode jmethod = jnode["method"]; if (jmethod.type() == Utilities::JS::type::Str) { - JsonNode jparams = jnode["params"]; + JsonNode jparams = jnode["params"]; auto jparamsArr = jparams.array(); if (jmethod.str() == "mining.notify") { const string prevHash = convertPrevHash(jparamsArr[1].str()); if (lastPrevBlockHash_.empty()) { - lastPrevBlockHash_ = prevHash; // first set prev block hash + lastPrevBlockHash_ = prevHash; // first set prev block hash } // stratum job prev block hash changed if (lastPrevBlockHash_ != prevHash) { // block height in coinbase (BIP34) - const int32_t blockHeight = getBlockHeightFromCoinbase(jparamsArr[2].str()); - + const int32_t blockHeight = + getBlockHeightFromCoinbase(jparamsArr[2].str()); + // nBits, the encoded form of network target - const uint32_t nBits = jparamsArr[6].uint32_hex(); + const uint32_t nBits = jparamsArr[6].uint32_hex(); // only for display, it will be replaced by current system time - uint32_t blockTime = jparamsArr[7].uint32_hex(); + uint32_t blockTime = jparamsArr[7].uint32_hex(); - // only for display, it will be replaced by current poolStratumJob's nVersion - uint32_t nVersion = jparamsArr[5].uint32_hex(); + // only for display, it will be replaced by current poolStratumJob's + // nVersion + uint32_t nVersion = jparamsArr[5].uint32_hex(); lastPrevBlockHash_ = prevHash; - LOG(INFO) << "<" << poolName_ << "> prev block changed, height: " << blockHeight - << ", prev_hash: " << prevHash - << ", block_time: " << blockTime - << ", nBits: " << nBits - << ", nVersion: " << nVersion; + LOG(INFO) << "<" << poolName_ + << "> prev block changed, height: " << blockHeight + << ", prev_hash: " << prevHash + << ", block_time: " << blockTime << ", nBits: " << nBits + << ", nVersion: " << nVersion; ////////////////////////////////////////////////////////////////////////// // To ensure the external job is not deviation from the blockchain. - // + // // eg. a Bitcoin pool may receive a Bitcoin Cash job from a external // stratum server, because the stratum server is automatic switched // between Bitcoin and Bitcoin Cash depending on profit. @@ -239,63 +257,72 @@ void PoolWatchClientBitcoin::handleStratumMessage(const string &line) { // get a read lock before lookup this->poolStratumJob_ // it will unlock by itself in destructor. auto readLock = containerBitcoin->getPoolStratumJobReadLock(); - const shared_ptr poolStratumJob = containerBitcoin->getPoolStratumJob(); + const shared_ptr poolStratumJob = + containerBitcoin->getPoolStratumJob(); if (poolStratumJob == nullptr) { - LOG(WARNING) << "<" << poolName_ << "> discard the job: pool stratum job is empty"; + LOG(WARNING) << "<" << poolName_ + << "> discard the job: pool stratum job is empty"; return; } if (blockHeight == poolStratumJob->height_) { - LOG(INFO) << "<" << poolName_ << "> discard the job: height is same as pool." - << " pool height: " << poolStratumJob->height_ - << ", the job height: " << blockHeight; + LOG(INFO) << "<" << poolName_ + << "> discard the job: height is same as pool." + << " pool height: " << poolStratumJob->height_ + << ", the job height: " << blockHeight; return; } if (blockHeight != poolStratumJob->height_ + 1) { - LOG(WARNING) << "<" << poolName_ << "> discard the job: height jumping too much." - << " pool height: " << poolStratumJob->height_ - << ", the job height: " << blockHeight; + LOG(WARNING) << "<" << poolName_ + << "> discard the job: height jumping too much." + << " pool height: " << poolStratumJob->height_ + << ", the job height: " << blockHeight; return; } #ifdef CHAIN_TYPE_BCH // BCH adjusts the difficulty in each block, - // its DAA algorithm will produce a difficulty change between 0.5 and 2 times. + // its DAA algorithm will produce a difficulty change between 0.5 and + // 2 times. // @see double poolDiff, jobDiff; BitsToDifficulty(poolStratumJob->nBits_, &poolDiff); BitsToDifficulty(nBits, &jobDiff); double multiple = jobDiff / poolDiff; if (multiple < 0.5 || multiple > 2.0) { - LOG(WARNING) << "<" << poolName_ << "> discard the job: difficulty changes too much." - << " pool diff: " << poolDiff << " (" << poolStratumJob->nBits_ << ")" + LOG(WARNING) << "<" << poolName_ + << "> discard the job: difficulty changes too much." + << " pool diff: " << poolDiff << " (" + << poolStratumJob->nBits_ << ")" << ", the job diff: " << jobDiff << " (" << nBits << ", = " << multiple << "x pool diff)"; return; } #else - // Except for BCH, other blockchains do not adjust the difficulty in each block. + // Except for BCH, other blockchains do not adjust the difficulty in + // each block. if (nBits != poolStratumJob->nBits_) { - LOG(WARNING) << "<" << poolName_ << "> discard the job: nBits different from pool job." + LOG(WARNING) << "<" << poolName_ + << "> discard the job: nBits different from pool job." << " pool nBits: " << poolStratumJob->nBits_ << ", the job nBits: " << nBits; return; } #endif - // the block time from other pool may have a deviation with the current time. - // so replaced it by current system time. + // the block time from other pool may have a deviation with the + // current time. so replaced it by current system time. blockTime = (uint32_t)time(nullptr); - // the nVersion from other pool may have some flags that we don't want. - // so replaced it by current poolStratumJob's. + // the nVersion from other pool may have some flags that we don't + // want. so replaced it by current poolStratumJob's. nVersion = poolStratumJob->nVersion_; } - containerBitcoin->sendEmptyGBT(poolName_, blockHeight, nBits, prevHash, blockTime, nVersion); - + containerBitcoin->sendEmptyGBT( + poolName_, blockHeight, nBits, prevHash, blockTime, nVersion); } } @@ -306,10 +333,10 @@ void PoolWatchClientBitcoin::handleStratumMessage(const string &line) { // // {"error": null, "id": 2, "result": true} // - if (jerror.type() != Utilities::JS::type::Null || + if (jerror.type() != Utilities::JS::type::Null || jresult.type() != Utilities::JS::type::Bool || jresult.boolean() != true) { - LOG(ERROR) << poolName_ << " auth fail"; + LOG(ERROR) << poolName_ << " auth fail"; } return; } @@ -320,27 +347,30 @@ void PoolWatchClientBitcoin::handleStratumMessage(const string &line) { // ["mining.notify","01000002"]],"01000002",8],"error":null} // if (jerror.type() != Utilities::JS::type::Null) { - LOG(ERROR) << "<" << poolName_ << "> json result is null, err: " << jerror.str(); + LOG(ERROR) << "<" << poolName_ + << "> json result is null, err: " << jerror.str(); return; } std::vector resArr = jresult.array(); if (resArr.size() < 3) { - LOG(ERROR) << "<" << poolName_ << "> result element's number is less than 3: " << line; + LOG(ERROR) << "<" << poolName_ + << "> result element's number is less than 3: " << line; return; } - extraNonce1_ = resArr[1].uint32_hex(); + extraNonce1_ = resArr[1].uint32_hex(); extraNonce2Size_ = resArr[2].uint32(); LOG(INFO) << "<" << poolName_ << "> extraNonce1: " << extraNonce1_ - << ", extraNonce2 Size: " << extraNonce2Size_; + << ", extraNonce2 Size: " << extraNonce2Size_; // subscribe successful state_ = SUBSCRIBED; // do mining.authorize - string s = Strings::Format("{\"id\": 1, \"method\": \"mining.authorize\"," - "\"params\": [\"%s\", \"\"]}\n", - workerName_.c_str()); + string s = Strings::Format( + "{\"id\": 1, \"method\": \"mining.authorize\"," + "\"params\": [\"%s\", \"\"]}\n", + workerName_.c_str()); sendData(s); return; } @@ -348,7 +378,8 @@ void PoolWatchClientBitcoin::handleStratumMessage(const string &line) { if (state_ == SUBSCRIBED && jresult.boolean() == true) { // authorize successful state_ = AUTHENTICATED; - LOG(INFO) << "<" << poolName_ << "> auth success, name: \"" << workerName_ << "\""; + LOG(INFO) << "<" << poolName_ << "> auth success, name: \"" << workerName_ + << "\""; return; } } diff --git a/src/bitcoin/WatcherBitcoin.h b/src/bitcoin/WatcherBitcoin.h index a5317196a..a28180dfc 100644 --- a/src/bitcoin/WatcherBitcoin.h +++ b/src/bitcoin/WatcherBitcoin.h @@ -27,38 +27,43 @@ #include "Watcher.h" #include "StratumBitcoin.h" - ///////////////////////////////// ClientContainer ////////////////////////////// -class ClientContainerBitcoin : public ClientContainer -{ +class ClientContainerBitcoin : public ClientContainer { boost::shared_mutex stratumJobMutex_; - shared_ptr poolStratumJob_; // the last stratum job from the pool itself + shared_ptr + poolStratumJob_; // the last stratum job from the pool itself protected: - void consumeStratumJobInternal(const string& str) override; + void consumeStratumJobInternal(const string &str) override; string createOnConnectedReplyString() const override; - PoolWatchClient* createPoolWatchClient( - struct event_base *base, - const string &poolName, const string &poolHost, - const int16_t poolPort, const string &workerName) override; + PoolWatchClient *createPoolWatchClient( + struct event_base *base, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName) override; public: - ClientContainerBitcoin(const string &kafkaBrokers, const string &jobTopic, const string &gbtTopic, - bool disableChecking); + ClientContainerBitcoin( + const string &kafkaBrokers, + const string &jobTopic, + const string &gbtTopic, + bool disableChecking); ~ClientContainerBitcoin(); - bool sendEmptyGBT(const string &poolName, - int32_t blockHeight, uint32_t nBits, - const string &blockPrevHash, - uint32_t blockTime, uint32_t blockVersion); + bool sendEmptyGBT( + const string &poolName, + int32_t blockHeight, + uint32_t nBits, + const string &blockPrevHash, + uint32_t blockTime, + uint32_t blockVersion); - const shared_ptr getPoolStratumJob(); + const shared_ptr getPoolStratumJob(); boost::shared_lock getPoolStratumJobReadLock(); }; - ///////////////////////////////// PoolWatchClient ////////////////////////////// -class PoolWatchClientBitcoin : public PoolWatchClient -{ +class PoolWatchClientBitcoin : public PoolWatchClient { uint32_t extraNonce1_; uint32_t extraNonce2Size_; @@ -67,14 +72,19 @@ class PoolWatchClientBitcoin : public PoolWatchClient void handleStratumMessage(const string &line) override; public: - PoolWatchClientBitcoin(struct event_base *base, ClientContainerBitcoin *container, - bool disableChecking, - const string &poolName, - const string &poolHost, const int16_t poolPort, - const string &workerName); + PoolWatchClientBitcoin( + struct event_base *base, + ClientContainerBitcoin *container, + bool disableChecking, + const string &poolName, + const string &poolHost, + const int16_t poolPort, + const string &workerName); ~PoolWatchClientBitcoin(); - ClientContainerBitcoin* GetContainerBitcoin(){ return static_cast(container_); } + ClientContainerBitcoin *GetContainerBitcoin() { + return static_cast(container_); + } }; #endif diff --git a/src/blkmaker/BlkMakerMain.cc b/src/blkmaker/BlkMakerMain.cc index 5c34338b2..86c51e74f 100644 --- a/src/blkmaker/BlkMakerMain.cc +++ b/src/blkmaker/BlkMakerMain.cc @@ -47,10 +47,10 @@ using namespace std; using namespace libconfig; -vector> makers;// *gBlockMaker = nullptr; +vector> makers; // *gBlockMaker = nullptr; void handler(int sig) { - for (auto maker: makers) { + for (auto maker : makers) { if (maker) maker->stop(); } @@ -58,7 +58,8 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("blkmaker")); - fprintf(stderr, "Usage:\tblkmaker -c \"blkmaker.cfg\" [-l ]\n"); + fprintf( + stderr, "Usage:\tblkmaker -c \"blkmaker.cfg\" [-l ]\n"); } // BlockMaker* createBlockMaker(Config& cfg, MysqlConnectInfo* poolDBInfo) { @@ -66,7 +67,7 @@ void usage() { // string broker = cfg.lookup("kafka.brokers"); // BlockMaker *maker = nullptr; -// if ("BTC" == type) +// if ("BTC" == type) // maker = new BlockMaker(broker.c_str(), *poolDBInfo); // else // maker = new BlockMakerEth(broker.c_str(), *poolDBInfo); @@ -74,15 +75,18 @@ void usage() { // return maker; // } -BlockMaker* createBlockMaker(shared_ptr def, const string& broker, MysqlConnectInfo* poolDBInfo) { +BlockMaker *createBlockMaker( + shared_ptr def, + const string &broker, + MysqlConnectInfo *poolDBInfo) { BlockMaker *maker = nullptr; #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == def->chainType_) -#else +#else if (false) -#endif +#endif maker = new BlockMakerBitcoin(def, broker.c_str(), *poolDBInfo); - else if ("ETH" == def->chainType_) + else if ("ETH" == def->chainType_) maker = new BlockMakerEth(def, broker.c_str(), *poolDBInfo); else if ("SIA" == def->chainType_) maker = new BlockMakerSia(def, broker.c_str(), *poolDBInfo); @@ -94,40 +98,39 @@ BlockMaker* createBlockMaker(shared_ptr def, const string& return maker; } -shared_ptr createDefinition(const Setting &setting) -{ +shared_ptr createDefinition(const Setting &setting) { string chainType; shared_ptr def; - readFromSetting(setting, "chain_type", chainType); + readFromSetting(setting, "chain_type", chainType); - // The hard fork Constantinople of Ethereum mainnet has been delayed. - // So set a default height that won't arrive (9999999). - // The user can change the height in the configuration file - // after the fork height is determined. + // The hard fork Constantinople of Ethereum mainnet if (chainType == "ETH") { - int constantinopleHeight = 9999999; + int constantinopleHeight = 7280000; setting.lookupValue("constantinople_height", constantinopleHeight); EthConsensus::setHardForkConstantinopleHeight(constantinopleHeight); } - + #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == chainType) -#else +#else if (false) #endif { - shared_ptr bitcoinDef = std::make_shared(); - - readFromSetting(setting, "job_topic", bitcoinDef->stratumJobTopic_); - readFromSetting(setting, "rawgbt_topic", bitcoinDef->rawGbtTopic_); - readFromSetting(setting, "auxpow_solved_share_topic", bitcoinDef->auxPowSolvedShareTopic_); - readFromSetting(setting, "rsk_solved_share_topic", bitcoinDef->rskSolvedShareTopic_); + shared_ptr bitcoinDef = + std::make_shared(); + + readFromSetting(setting, "job_topic", bitcoinDef->stratumJobTopic_); + readFromSetting(setting, "rawgbt_topic", bitcoinDef->rawGbtTopic_); + readFromSetting( + setting, + "auxpow_solved_share_topic", + bitcoinDef->auxPowSolvedShareTopic_); + readFromSetting( + setting, "rsk_solved_share_topic", bitcoinDef->rskSolvedShareTopic_); def = bitcoinDef; - } - else - { + } else { def = std::make_shared(); } @@ -135,22 +138,23 @@ shared_ptr createDefinition(const Setting &setting) def->enabled_ = false; readFromSetting(setting, "enabled", def->enabled_, true); readFromSetting(setting, "solved_share_topic", def->solvedShareTopic_); - readFromSetting(setting, "found_aux_block_table", def->foundAuxBlockTable_, true); - + readFromSetting( + setting, "found_aux_block_table", def->foundAuxBlockTable_, true); + const Setting &nodes = setting["nodes"]; - for (int i = 0; i < nodes.getLength(); ++i) - { + for (int i = 0; i < nodes.getLength(); ++i) { const Setting &nodeSetting = nodes[i]; NodeDefinition nodeDef; - readFromSetting(nodeSetting, "rpc_addr", nodeDef.rpcAddr_); - readFromSetting(nodeSetting, "rpc_userpwd", nodeDef.rpcUserPwd_); + readFromSetting(nodeSetting, "rpc_addr", nodeDef.rpcAddr_); + readFromSetting(nodeSetting, "rpc_userpwd", nodeDef.rpcUserPwd_); def->nodes.push_back(nodeDef); } return def; } -// shared_ptr createBlockMakerHandler(const BlockMakerDefinition &def) +// shared_ptr createBlockMakerHandler(const +// BlockMakerDefinition &def) // { // shared_ptr handler; @@ -168,23 +172,24 @@ shared_ptr createDefinition(const Setting &setting) // return handler; // } -void createBlockMakers(const libconfig::Config &cfg, MysqlConnectInfo* poolDBInfo) -{ +void createBlockMakers( + const libconfig::Config &cfg, MysqlConnectInfo *poolDBInfo) { string broker = cfg.lookup("kafka.brokers"); const Setting &root = cfg.getRoot(); const Setting &makerDefs = root["blk_makers"]; - for (int i = 0; i < makerDefs.getLength(); ++i) - { + for (int i = 0; i < makerDefs.getLength(); ++i) { auto def = createDefinition(makerDefs[i]); - if (!def->enabled_) - { - LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->solvedShareTopic_ << ", disabled."; + if (!def->enabled_) { + LOG(INFO) << "chain: " << def->chainType_ + << ", topic: " << def->solvedShareTopic_ << ", disabled."; continue; } - LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->solvedShareTopic_ << ", enabled."; - //auto handler = createBlockMakerHandler(def); - //makers.push_back(std::make_shared(broker.c_str(), *poolDBInfo)); + LOG(INFO) << "chain: " << def->chainType_ + << ", topic: " << def->solvedShareTopic_ << ", enabled."; + // auto handler = createBlockMakerHandler(def); + // makers.push_back(std::make_shared(broker.c_str(), + // *poolDBInfo)); shared_ptr maker(createBlockMaker(def, broker, poolDBInfo)); makers.push_back(maker); } @@ -197,7 +202,7 @@ void workerThread(shared_ptr maker) { int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -206,15 +211,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -227,25 +233,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("blkmaker"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -257,29 +262,34 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); + + bool sslVerifyPeer = true; + cfg.lookupValue("curl.ssl_verify_peer", sslVerifyPeer); + setSslVerifyPeer(sslVerifyPeer); MysqlConnectInfo *poolDBInfo = nullptr; { int32_t poolDBPort = 3306; cfg.lookupValue("pooldb.port", poolDBPort); - poolDBInfo = new MysqlConnectInfo(cfg.lookup("pooldb.host"), poolDBPort, - cfg.lookup("pooldb.username"), - cfg.lookup("pooldb.password"), - cfg.lookup("pooldb.dbname")); + poolDBInfo = new MysqlConnectInfo( + cfg.lookup("pooldb.host"), + poolDBPort, + cfg.lookup("pooldb.username"), + cfg.lookup("pooldb.password"), + cfg.lookup("pooldb.dbname")); } createBlockMakers(cfg, poolDBInfo); try { vector> workers; - for (auto maker : makers) - { + for (auto maker : makers) { if (maker->init()) { workers.push_back(std::make_shared(workerThread, maker)); - } - else { - LOG(FATAL) << "blkmaker init failure, chain: " << maker->def()->chainType_; + } else { + LOG(FATAL) << "blkmaker init failure, chain: " + << maker->def()->chainType_; } } @@ -291,7 +301,7 @@ int main(int argc, char **argv) { LOG(INFO) << "worker exit"; } } - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/blkmaker/blkmaker.cfg b/src/blkmaker/blkmaker.cfg index 5b9cb3585..95e722d42 100644 --- a/src/blkmaker/blkmaker.cfg +++ b/src/blkmaker/blkmaker.cfg @@ -22,6 +22,10 @@ kafka = { brokers = "127.0.0.1:9092"; # "10.0.0.1:9092,10.0.0.2:9092,..." }; +curl = { + ssl_verify_peer = true; // set false to skip ssl verification on node RPC +}; + # # pool mysql db: table.found_blocks # @@ -46,11 +50,8 @@ blk_makers = ( #solved share topic solved_share_topic = "EthSolvedShare"; - # The hard fork Constantinople of Ethereum mainnet has been delayed. - # So set a default height that won't arrive (9999999). - # The user can change the height in the configuration file - # after the fork height is determined. - constantinople_height = 9999999; + # The hard fork Constantinople of Ethereum mainnet + constantinople_height = 7280000; }, { chain_type = "SIA"; //blockchain short name diff --git a/src/bytom/BlockMakerBytom.cc b/src/bytom/BlockMakerBytom.cc index 2034d851a..7691e15eb 100644 --- a/src/bytom/BlockMakerBytom.cc +++ b/src/bytom/BlockMakerBytom.cc @@ -28,19 +28,18 @@ #include - //////////////////////////////////////BlockMakerBytom////////////////////////////////////////////////// -BlockMakerBytom::BlockMakerBytom(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB) - : BlockMaker(def, kafkaBrokers, poolDB) -{ +BlockMakerBytom::BlockMakerBytom( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB) + : BlockMaker(def, kafkaBrokers, poolDB) { } -void BlockMakerBytom::processSolvedShare(rd_kafka_message_t *rkmessage) -{ +void BlockMakerBytom::processSolvedShare(rd_kafka_message_t *rkmessage) { const char *message = (const char *)rkmessage->payload; JsonNode r; - if (!JsonNode::parse(message, message + rkmessage->len, r)) - { + if (!JsonNode::parse(message, message + rkmessage->len, r)) { LOG(ERROR) << "decode common event failure"; return; } @@ -52,20 +51,19 @@ void BlockMakerBytom::processSolvedShare(rd_kafka_message_t *rkmessage) r["networkDiff"].type() != Utilities::JS::type::Int || r["userId"].type() != Utilities::JS::type::Int || r["workerId"].type() != Utilities::JS::type::Int || - r["workerFullName"].type() != Utilities::JS::type::Str) - { + r["workerFullName"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "eth solved share format wrong"; return; } - string bhString = r["header"].str(); - string request = Strings::Format("{\"block_header\": \"%s\"}\n", - bhString.c_str()); + string request = + Strings::Format("{\"block_header\": \"%s\"}\n", bhString.c_str()); submitBlockNonBlocking(request); - // NOTE: Database save is not implemented. Need to setup mysql in test environment + // NOTE: Database save is not implemented. Need to setup mysql in test + // environment StratumWorker worker; worker.userId_ = r["userId"].int32(); worker.workerHashId_ = r["workerId"].int64(); @@ -79,46 +77,73 @@ void BlockMakerBytom::processSolvedShare(rd_kafka_message_t *rkmessage) void BlockMakerBytom::submitBlockNonBlocking(const string &request) { for (const auto &itr : def()->nodes) { // use thread to submit - boost::thread t(boost::bind(&BlockMakerBytom::_submitBlockThread, this, - itr.rpcAddr_, itr.rpcUserPwd_, request)); + boost::thread t(boost::bind( + &BlockMakerBytom::_submitBlockThread, + this, + itr.rpcAddr_, + itr.rpcUserPwd_, + request)); t.detach(); } } -void BlockMakerBytom::_submitBlockThread(const string &rpcAddress, const string &rpcUserpass, - const string &request) -{ +void BlockMakerBytom::_submitBlockThread( + const string &rpcAddress, + const string &rpcUserpass, + const string &request) { string response; - LOG(INFO) << "submitting block to " << rpcAddress.c_str() << " with request value: " << request.c_str(); - rpcCall(rpcAddress.c_str(), rpcUserpass.c_str(), request.c_str(), request.length(), response, "curl"); + LOG(INFO) << "submitting block to " << rpcAddress.c_str() + << " with request value: " << request.c_str(); + rpcCall( + rpcAddress.c_str(), + rpcUserpass.c_str(), + request.c_str(), + request.length(), + response, + "curl"); LOG(INFO) << "submission result: " << response; } -void BlockMakerBytom::saveBlockToDBNonBlocking(const string &header, const uint32_t height, - const uint64_t networkDiff, const StratumWorker &worker) { - boost::thread t(boost::bind(&BlockMakerBytom::_saveBlockToDBThread, this, - header, height, networkDiff, worker)); +void BlockMakerBytom::saveBlockToDBNonBlocking( + const string &header, + const uint32_t height, + const uint64_t networkDiff, + const StratumWorker &worker) { + boost::thread t(boost::bind( + &BlockMakerBytom::_saveBlockToDBThread, + this, + header, + height, + networkDiff, + worker)); } -void BlockMakerBytom::_saveBlockToDBThread(const string &header, const uint32_t height, - const uint64_t networkDiff, const StratumWorker &worker) { +void BlockMakerBytom::_saveBlockToDBThread( + const string &header, + const uint32_t height, + const uint64_t networkDiff, + const StratumWorker &worker) { const string nowStr = date("%F %T"); string sql; - sql = Strings::Format("INSERT INTO `found_blocks` " - " (`puid`, `worker_id`" - ", `worker_full_name`" - ", `height`, `hash`, `rewards`" - ", `network_diff`, `created_at`)" - " VALUES (%ld, %" PRId64 - ", '%s'" - ", %lu, '%s', %" PRId64 - ", %" PRIu64 ", '%s'); ", - worker.userId_, worker.workerHashId_, - // filter again, just in case - filterWorkerName(worker.fullName_).c_str(), - height, header.c_str(), GetBlockRewardBytom(height), - networkDiff, nowStr.c_str()); - + sql = Strings::Format( + "INSERT INTO `found_blocks` " + " (`puid`, `worker_id`" + ", `worker_full_name`" + ", `height`, `hash`, `rewards`" + ", `network_diff`, `created_at`)" + " VALUES (%ld, %" PRId64 + ", '%s'" + ", %lu, '%s', %" PRId64 ", %" PRIu64 ", '%s'); ", + worker.userId_, + worker.workerHashId_, + // filter again, just in case + filterWorkerName(worker.fullName_).c_str(), + height, + header.c_str(), + GetBlockRewardBytom(height), + networkDiff, + nowStr.c_str()); + // try connect to DB MySQLConnection db(poolDB_); for (size_t i = 0; i < 3; i++) { @@ -130,9 +155,7 @@ void BlockMakerBytom::_saveBlockToDBThread(const string &header, const uint32_t if (db.execute(sql) == false) { LOG(ERROR) << "insert found block failure: " << sql; - } - else - { + } else { LOG(INFO) << "insert found block success for height " << height; } } \ No newline at end of file diff --git a/src/bytom/BlockMakerBytom.h b/src/bytom/BlockMakerBytom.h index 1ced4c82a..d73ad4657 100644 --- a/src/bytom/BlockMakerBytom.h +++ b/src/bytom/BlockMakerBytom.h @@ -27,18 +27,30 @@ #include "BlockMaker.h" #include "CommonBytom.h" -class BlockMakerBytom : public BlockMaker -{ +class BlockMakerBytom : public BlockMaker { public: - BlockMakerBytom(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB); + BlockMakerBytom( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB); void processSolvedShare(rd_kafka_message_t *rkmessage) override; private: void submitBlockNonBlocking(const string &request); - void _submitBlockThread(const string &rpcAddress, const string &rpcUserpass, const string& request); - void saveBlockToDBNonBlocking(const string &header, const uint32_t height, const uint64_t networkDiff, const StratumWorker &worker); - void _saveBlockToDBThread(const string &header, const uint32_t height, const uint64_t networkDiff, const StratumWorker &worker); - + void _submitBlockThread( + const string &rpcAddress, + const string &rpcUserpass, + const string &request); + void saveBlockToDBNonBlocking( + const string &header, + const uint32_t height, + const uint64_t networkDiff, + const StratumWorker &worker); + void _saveBlockToDBThread( + const string &header, + const uint32_t height, + const uint64_t networkDiff, + const StratumWorker &worker); }; #endif diff --git a/src/bytom/BytomUtils.cc b/src/bytom/BytomUtils.cc index 5c485b397..6fff974a7 100644 --- a/src/bytom/BytomUtils.cc +++ b/src/bytom/BytomUtils.cc @@ -1,14 +1,12 @@ #include "BytomUtils.h" -uint64_t GetBlockRewardBytom(uint64_t nHeight) -{ - // based on bytom's mining.go (BlockSubsidy function) - const uint64_t initialBlockSubsidy = 140700041250000000UL; - const uint64_t baseSubsidy = 41250000000UL; - const uint64_t subsidyReductionInterval = 840000UL; - if(nHeight == 0) - { - return initialBlockSubsidy; - } - return baseSubsidy >> (nHeight/subsidyReductionInterval); +uint64_t GetBlockRewardBytom(uint64_t nHeight) { + // based on bytom's mining.go (BlockSubsidy function) + const uint64_t initialBlockSubsidy = 140700041250000000UL; + const uint64_t baseSubsidy = 41250000000UL; + const uint64_t subsidyReductionInterval = 840000UL; + if (nHeight == 0) { + return initialBlockSubsidy; + } + return baseSubsidy >> (nHeight / subsidyReductionInterval); } diff --git a/src/bytom/CommonBytom.cc b/src/bytom/CommonBytom.cc index 84f1176ca..f2f5dd3d6 100644 --- a/src/bytom/CommonBytom.cc +++ b/src/bytom/CommonBytom.cc @@ -24,20 +24,16 @@ #include "Common.h" #include "bytom/bh_shared.h" - -uint64_t Bytom_TargetCompactToDifficulty(uint64_t bits) -{ +uint64_t Bytom_TargetCompactToDifficulty(uint64_t bits) { return CalculateDifficultyByTargetCompact(bits); } -void Bytom_DifficultyToTargetBinary(uint64_t difficulty, vector& out) -{ +void Bytom_DifficultyToTargetBinary(uint64_t difficulty, vector &out) { out.resize(32); - GoSlice outSlice = {(void *)out.data(), 32, 32}; + GoSlice outSlice = {(void *)out.data(), 32, 32}; CalculateTargetBinaryByDifficulty(difficulty, outSlice); } -uint64_t Bytom_JobDifficultyToTargetCompact(uint64_t difficulty) -{ +uint64_t Bytom_JobDifficultyToTargetCompact(uint64_t difficulty) { return CalculateTargetCompactByDifficulty(difficulty); } diff --git a/src/bytom/CommonBytom.h b/src/bytom/CommonBytom.h index 8ce297f9a..7a8c2365a 100644 --- a/src/bytom/CommonBytom.h +++ b/src/bytom/CommonBytom.h @@ -1,4 +1,4 @@ -/* +/* The MIT License (MIT) Copyright (c) [2016] [BTC.COM] @@ -27,7 +27,7 @@ #include "Common.h" uint64_t Bytom_TargetCompactToDifficulty(uint64_t bits); -void Bytom_DifficultyToTargetBinary(uint64_t difficulty, vector& out); +void Bytom_DifficultyToTargetBinary(uint64_t difficulty, vector &out); uint64_t Bytom_JobDifficultyToTargetCompact(uint64_t difficulty); #endif diff --git a/src/bytom/GwMakerBytom.cc b/src/bytom/GwMakerBytom.cc index cff12742e..96540d292 100644 --- a/src/bytom/GwMakerBytom.cc +++ b/src/bytom/GwMakerBytom.cc @@ -27,26 +27,22 @@ #include ///////////////////////////////GwMakerHandlerBytom//////////////////////////////////// -bool GwMakerHandlerBytom::checkFields(JsonNode &r) -{ +bool GwMakerHandlerBytom::checkFields(JsonNode &r) { //{"status":"success","data":{"block_header": //"0101a612b60a752a07bab9d7495a6861f88fc6f1c6656a29de3afda4747965400762c88cfb8d8ad7054010bb9a9b0622a77f633f47973971a955ca6ae00bad39372c9bf957b11fdae27dc9c377e5192668bc0a367e4a4764f11e7c725ecced1d7b6a492974fab1b6d5bc009ffcfd86808080801d", //"seed":"9e6f94f7a8b839b8bfd349fdb794cc125a0711a25c6b4c1dfbdf8d448e0a9a45"}} - if (r.type() != Utilities::JS::type::Obj) - { + if (r.type() != Utilities::JS::type::Obj) { LOG(ERROR) << "Bytom getwork return not jason"; return false; } JsonNode status = r["status"]; - if (status.type() != Utilities::JS::type::Str) - { + if (status.type() != Utilities::JS::type::Str) { LOG(ERROR) << "Bytom getwork return not jason"; return false; } - if (status.str() != "success") - { + if (status.str() != "success") { LOG(ERROR) << "status " << status.str(); return false; } @@ -54,8 +50,7 @@ bool GwMakerHandlerBytom::checkFields(JsonNode &r) JsonNode data = r["data"]; if (data.type() != Utilities::JS::type::Obj || data["block_header"].type() != Utilities::JS::type::Str || - data["seed"].type() != Utilities::JS::type::Str) - { + data["seed"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "Bytom getwork retrun unexpected"; return false; } @@ -63,27 +58,25 @@ bool GwMakerHandlerBytom::checkFields(JsonNode &r) return true; } -string GwMakerHandlerBytom::constructRawMsg(JsonNode &r) -{ +string GwMakerHandlerBytom::constructRawMsg(JsonNode &r) { auto data = r["data"]; string header = data["block_header"].str(); string seed = data["seed"].str(); - LOG(INFO) << "chain: " << def_.chainType_ - << ", topic: " << def_.rawGwTopic_ - << ", hHash: " << header - << ", sHash: " << seed; + LOG(INFO) << "chain: " << def_.chainType_ << ", topic: " << def_.rawGwTopic_ + << ", hHash: " << header << ", sHash: " << seed; - return Strings::Format("{\"created_at_ts\":%u," - "\"chainType\":\"%s\"," - "\"rpcAddress\":\"%s\"," - "\"rpcUserPwd\":\"%s\"," - "\"hHash\":\"%s\"," - "\"sHash\":\"%s\"}", - (uint32_t)time(nullptr), - def_.chainType_.c_str(), - def_.rpcAddr_.c_str(), - def_.rpcUserPwd_.c_str(), - header.c_str(), - seed.c_str()); + return Strings::Format( + "{\"created_at_ts\":%u," + "\"chainType\":\"%s\"," + "\"rpcAddress\":\"%s\"," + "\"rpcUserPwd\":\"%s\"," + "\"hHash\":\"%s\"," + "\"sHash\":\"%s\"}", + (uint32_t)time(nullptr), + def_.chainType_.c_str(), + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + header.c_str(), + seed.c_str()); } diff --git a/src/bytom/GwMakerBytom.h b/src/bytom/GwMakerBytom.h index 8552bae68..8a17b123f 100644 --- a/src/bytom/GwMakerBytom.h +++ b/src/bytom/GwMakerBytom.h @@ -29,8 +29,7 @@ #include "GwMaker.h" #include "utilities_js.hpp" -class GwMakerHandlerBytom : public GwMakerHandlerJson -{ +class GwMakerHandlerBytom : public GwMakerHandlerJson { bool checkFields(JsonNode &r) override; string constructRawMsg(JsonNode &r) override; string getRequestData() override { return "{}"; } diff --git a/src/bytom/JobMakerBytom.cc b/src/bytom/JobMakerBytom.cc index d7ce765b1..7de4852aa 100644 --- a/src/bytom/JobMakerBytom.cc +++ b/src/bytom/JobMakerBytom.cc @@ -27,11 +27,9 @@ #include "Utils.h" #include "utilities_js.hpp" ///////////////////////////////////JobMakerHandlerBytom////////////////////////////////// -bool JobMakerHandlerBytom::processMsg(const string &msg) -{ +bool JobMakerHandlerBytom::processMsg(const string &msg) { JsonNode j; - if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), j)) - { + if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), j)) { LOG(ERROR) << "deserialize bytom work failed " << msg; return false; } @@ -50,32 +48,29 @@ bool JobMakerHandlerBytom::processMsg(const string &msg) return true; } -bool JobMakerHandlerBytom::validate(JsonNode &j) -{ +bool JobMakerHandlerBytom::validate(JsonNode &j) { // check fields are valid if (j.type() != Utilities::JS::type::Obj || - j["created_at_ts"].type() != Utilities::JS::type::Int || - j["rpcAddress"].type() != Utilities::JS::type::Str || - j["rpcUserPwd"].type() != Utilities::JS::type::Str || - j["hHash"].type() != Utilities::JS::type::Str) { - LOG(ERROR) << "work format not expected"; + j["created_at_ts"].type() != Utilities::JS::type::Int || + j["rpcAddress"].type() != Utilities::JS::type::Str || + j["rpcUserPwd"].type() != Utilities::JS::type::Str || + j["hHash"].type() != Utilities::JS::type::Str) { + LOG(ERROR) << "work format not expected"; return false; - } + } // check timestamp - if (j["created_at_ts"].uint32() + def()->maxJobDelay_ < time(nullptr)) - { - LOG(ERROR) << "too old bytom work: " << date("%F %T", j["created_at_ts"].uint32()); + if (j["created_at_ts"].uint32() + def()->maxJobDelay_ < time(nullptr)) { + LOG(ERROR) << "too old bytom work: " + << date("%F %T", j["created_at_ts"].uint32()); return false; } return true; } -string JobMakerHandlerBytom::makeStratumJobMsg() -{ - if (0 == header_.size() || - 0 == seed_.size()) +string JobMakerHandlerBytom::makeStratumJobMsg() { + if (0 == header_.size() || 0 == seed_.size()) return ""; StratumJobBytom job; diff --git a/src/bytom/JobMakerBytom.h b/src/bytom/JobMakerBytom.h index a4989e0ee..9050502b9 100644 --- a/src/bytom/JobMakerBytom.h +++ b/src/bytom/JobMakerBytom.h @@ -27,8 +27,7 @@ #include "JobMaker.h" #include "utilities_js.hpp" -class JobMakerHandlerBytom : public GwJobMakerHandler -{ +class JobMakerHandlerBytom : public GwJobMakerHandler { public: virtual ~JobMakerHandlerBytom() {} bool processMsg(const string &msg) override; diff --git a/src/bytom/ShareLogParserBytom.h b/src/bytom/ShareLogParserBytom.h index 3bb57aade..c1ba934b9 100644 --- a/src/bytom/ShareLogParserBytom.h +++ b/src/bytom/ShareLogParserBytom.h @@ -24,7 +24,6 @@ #ifndef SHARELOGPARSER_BYTOM_H_ #define SHARELOGPARSER_BYTOM_H_ - #include "ShareLogParser.h" #include "StratumBytom.h" diff --git a/src/bytom/StatisticsBytom.cc b/src/bytom/StatisticsBytom.cc index fa03d15ba..9c367e2ea 100644 --- a/src/bytom/StatisticsBytom.cc +++ b/src/bytom/StatisticsBytom.cc @@ -27,29 +27,9 @@ #include "StratumBytom.h" #include "BytomUtils.h" - template <> -void ShareStatsDay::processShare(uint32_t hourIdx, const ShareBytom &share) { - ScopeLock sl(lock_); - - if (StratumStatus::isAccepted(share.status())) { - shareAccept1h_[hourIdx] += share.sharediff(); - shareAccept1d_ += share.sharediff(); - - double score = share.score(); - double reward = GetBlockRewardBytom(share.height()); - double earn = score * reward; - - score1h_[hourIdx] += score; - score1d_ += score; - earn1h_[hourIdx] += earn; - earn1d_ += earn; - - } else { - shareReject1h_[hourIdx] += share.sharediff(); - shareReject1d_ += share.sharediff(); - } - modifyHoursFlag_ |= (0x01u << hourIdx); +double ShareStatsDay::getShareReward(const ShareBytom &share) { + return GetBlockRewardBytom(share.height()); } /////////////// template instantiation /////////////// diff --git a/src/bytom/StatisticsBytom.h b/src/bytom/StatisticsBytom.h index 1e59f4cb0..8723545b4 100644 --- a/src/bytom/StatisticsBytom.h +++ b/src/bytom/StatisticsBytom.h @@ -29,26 +29,33 @@ #include "CommonBytom.h" #include "StratumBytom.h" -/////////////////////////////// GlobalShareBytom //////////////////////////////// +/////////////////////////////// GlobalShareBytom +/////////////////////////////////// // Used to detect duplicate share attacks on Bytom mining. struct GlobalShareBytom { BytomCombinedHeader combinedHeader_; GlobalShareBytom() = delete; - GlobalShareBytom(const ShareBytom &share) - { - memcpy(&combinedHeader_, share.combinedheader().data(), share.combinedheader().length()); + GlobalShareBytom(const ShareBytom &share) { + memcpy( + &combinedHeader_, + share.combinedheader().data(), + share.combinedheader().length()); } - GlobalShareBytom& operator=(const GlobalShareBytom &r) = default; + GlobalShareBytom &operator=(const GlobalShareBytom &r) = default; bool operator<(const GlobalShareBytom &r) const { - return std::memcmp(&combinedHeader_, &r.combinedHeader_, sizeof(BytomCombinedHeader)) < 0; + return std::memcmp( + &combinedHeader_, + &r.combinedHeader_, + sizeof(BytomCombinedHeader)) < 0; } }; //////////////////////////// Alias //////////////////////////// -using DuplicateShareCheckerBytom = DuplicateShareCheckerT; +using DuplicateShareCheckerBytom = + DuplicateShareCheckerT; #endif diff --git a/src/bytom/StatsHttpdBytom.cc b/src/bytom/StatsHttpdBytom.cc index 9658645af..c37e22913 100644 --- a/src/bytom/StatsHttpdBytom.cc +++ b/src/bytom/StatsHttpdBytom.cc @@ -23,7 +23,6 @@ */ #include "StatsHttpdBytom.h" - /////////////// template instantiation /////////////// // Without this, some linking errors will issued. // If you add a new derived class of Share, add it at the following. diff --git a/src/bytom/StratumBytom.cc b/src/bytom/StratumBytom.cc index 11505df49..588423cc6 100644 --- a/src/bytom/StratumBytom.cc +++ b/src/bytom/StratumBytom.cc @@ -30,41 +30,35 @@ #include StratumJobBytom::StratumJobBytom() - : nTime_(0U) -{ - + : nTime_(0U) { } -StratumJobBytom::~StratumJobBytom() -{ - +StratumJobBytom::~StratumJobBytom() { } -string StratumJobBytom::serializeToJson() const -{ - return Strings::Format("{\"created_at_ts\":%u" - ",\"jobId\":%" PRIu64 "" - ",\"sHash\":\"%s\"" - ",\"hHash\":\"%s\"" - "}", - nTime_, - jobId_, - seed_.c_str(), - hHash_.c_str()); +string StratumJobBytom::serializeToJson() const { + return Strings::Format( + "{\"created_at_ts\":%u" + ",\"jobId\":%" PRIu64 + "" + ",\"sHash\":\"%s\"" + ",\"hHash\":\"%s\"" + "}", + nTime_, + jobId_, + seed_.c_str(), + hHash_.c_str()); } -bool StratumJobBytom::unserializeFromJson(const char *s, size_t len) -{ +bool StratumJobBytom::unserializeFromJson(const char *s, size_t len) { JsonNode j; - if (!JsonNode::parse(s, s + len, j)) - { + if (!JsonNode::parse(s, s + len, j)) { return false; } if (j["created_at_ts"].type() != Utilities::JS::type::Int || j["jobId"].type() != Utilities::JS::type::Int || j["sHash"].type() != Utilities::JS::type::Str || - j["hHash"].type() != Utilities::JS::type::Str) - { + j["hHash"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "parse bytom stratum job failure: " << s; return false; } @@ -78,9 +72,9 @@ bool StratumJobBytom::unserializeFromJson(const char *s, size_t len) return true; } -void StratumJobBytom::updateBlockHeaderFromHash() -{ - GoSlice text = {(void *)hHash_.data(), (int)hHash_.length(), (int)hHash_.length()}; +void StratumJobBytom::updateBlockHeaderFromHash() { + GoSlice text = { + (void *)hHash_.data(), (int)hHash_.length(), (int)hHash_.length()}; DecodeBlockHeader_return bh = DecodeBlockHeader(text); DLOG(INFO) << "bytom block height=" << bh.r1 << ", timestamp=" << bh.r3; blockHeader_.version = bh.r0; @@ -95,21 +89,25 @@ void StratumJobBytom::updateBlockHeaderFromHash() free(bh.r6); } -string BlockHeaderBytom::serializeToJson() const -{ - return Strings::Format("{\"Version\":%" PRIu64 "" - ",\"Height\":%" PRIu64 "" - ",\"PreviousBlockHash\":\"%s\"" - ",\"Timestamp\":%" PRIu64 "" - ",\"TransactionsMerkleRoot\":\"%s\"" - ",\"TransactionStatusHash\":\"%s\"" - ",\"Bits\":%" PRIu64 "" - "}", - version, - height, - previousBlockHash.c_str(), - timestamp, - transactionsMerkleRoot.c_str(), - transactionStatusHash.c_str(), - bits); +string BlockHeaderBytom::serializeToJson() const { + return Strings::Format( + "{\"Version\":%" PRIu64 + "" + ",\"Height\":%" PRIu64 + "" + ",\"PreviousBlockHash\":\"%s\"" + ",\"Timestamp\":%" PRIu64 + "" + ",\"TransactionsMerkleRoot\":\"%s\"" + ",\"TransactionStatusHash\":\"%s\"" + ",\"Bits\":%" PRIu64 + "" + "}", + version, + height, + previousBlockHash.c_str(), + timestamp, + transactionsMerkleRoot.c_str(), + transactionStatusHash.c_str(), + bits); } diff --git a/src/bytom/StratumBytom.h b/src/bytom/StratumBytom.h index 89f2bec62..19c232463 100644 --- a/src/bytom/StratumBytom.h +++ b/src/bytom/StratumBytom.h @@ -27,148 +27,143 @@ #include "Stratum.h" #include "CommonBytom.h" #include "bytom/bytom.pb.h" -union BytomCombinedHeader -{ - struct - { +union BytomCombinedHeader { + struct { uint64_t blockCommitmentMerkleRootCheapHash_; uint64_t blockCommitmentStatusHashCheapHash_; uint64_t timestamp_; - uint64_t nonce_; + uint64_t nonce_; }; uint8_t bytes[32]; }; - - -class ShareBytomBytesVersion -{ +class ShareBytomBytesVersion { public: + uint32_t version_ = 0; // 0 + uint32_t checkSum_ = 0; // 4 - uint32_t version_ = 0;//0 - uint32_t checkSum_ = 0;//4 + uint64_t jobId_ = 0; // 8 + int64_t workerHashId_ = 0; // 16 + int64_t timestamp_ = 0; // 24 + uint64_t shareDiff_ = 0; // 32 + uint64_t blkBits_ = 0; // 40 + uint64_t height_ = 0; // 48 + IpAddress ip_; // 56 + BytomCombinedHeader combinedHeader_; // 72 - uint64_t jobId_ = 0;//8 - int64_t workerHashId_ = 0;//16 - int64_t timestamp_ = 0;//24 - uint64_t shareDiff_ = 0;//32 - uint64_t blkBits_ = 0;//40 - uint64_t height_ = 0;//48 - IpAddress ip_; //56 - BytomCombinedHeader combinedHeader_;//72 - - int32_t userId_ = 0;//104 - int32_t status_ = 0;//108 + int32_t userId_ = 0; // 104 + int32_t status_ = 0; // 108 uint32_t checkSum() const { uint64_t c = 0; - c += (uint64_t) version_; - c += (uint64_t) workerHashId_; - c += (uint64_t) userId_; - c += (uint64_t) status_; - c += (uint64_t) timestamp_; - c += (uint64_t) ip_.addrUint64[0]; - c += (uint64_t) ip_.addrUint64[1]; - c += (uint64_t) jobId_; - c += (uint64_t) shareDiff_; - c += (uint64_t) blkBits_; - c += (uint64_t) height_; - c += (uint64_t) combinedHeader_.blockCommitmentMerkleRootCheapHash_; - c += (uint64_t) combinedHeader_.blockCommitmentStatusHashCheapHash_; - c += (uint64_t) combinedHeader_.timestamp_; - c += (uint64_t) combinedHeader_.nonce_; - - return ((uint32_t) c) + ((uint32_t) (c >> 32)); + c += (uint64_t)version_; + c += (uint64_t)workerHashId_; + c += (uint64_t)userId_; + c += (uint64_t)status_; + c += (uint64_t)timestamp_; + c += (uint64_t)ip_.addrUint64[0]; + c += (uint64_t)ip_.addrUint64[1]; + c += (uint64_t)jobId_; + c += (uint64_t)shareDiff_; + c += (uint64_t)blkBits_; + c += (uint64_t)height_; + c += (uint64_t)combinedHeader_.blockCommitmentMerkleRootCheapHash_; + c += (uint64_t)combinedHeader_.blockCommitmentStatusHashCheapHash_; + c += (uint64_t)combinedHeader_.timestamp_; + c += (uint64_t)combinedHeader_.nonce_; + + return ((uint32_t)c) + ((uint32_t)(c >> 32)); } - }; - - -class ShareBytom :public sharebase::BytomMsg -{ +class ShareBytom : public sharebase::BytomMsg { public: + const static uint32_t BYTES_VERSION = + 0x00030001u; // first 0003: bytom, second 0001: version 1. + const static uint32_t CURRENT_VERSION = + 0x00030002u; // first 0003: bytom, second 0002: version 2. - const static uint32_t BYTES_VERSION = 0x00030001u; // first 0003: bytom, second 0001: version 1. - const static uint32_t CURRENT_VERSION = 0x00030002u; // first 0003: bytom, second 0002: version 2. - - - ShareBytom() { - set_version(CURRENT_VERSION); - } + ShareBytom() { set_version(CURRENT_VERSION); } ShareBytom(const ShareBytom &r) = default; ShareBytom &operator=(const ShareBytom &r) = default; - double score() const - { - if (sharediff() == 0 || blkbits() == 0) - { + double score() const { + if (sharediff() == 0 || blkbits() == 0) { return 0.0; } uint64_t difficulty = Bytom_TargetCompactToDifficulty(blkbits()); - // Network diff may less than share diff on testnet or regression test network. - // On regression test network, the network diff may be zero. - // But no matter how low the network diff is, you can only dig one block at a time. - if (difficulty < sharediff()) - { + // Network diff may less than share diff on testnet or regression test + // network. On regression test network, the network diff may be zero. But no + // matter how low the network diff is, you can only dig one block at a time. + if (difficulty < sharediff()) { return 1.0; } return (double)sharediff() / (double)difficulty; } - bool isValid() const - { + bool isValid() const { if (version() != CURRENT_VERSION) { return false; } - if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || - height() == 0 || blkbits() == 0 || sharediff() == 0) - { + if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || height() == 0 || + blkbits() == 0 || sharediff() == 0) { return false; } - + return true; } - string toString() const - { + string toString() const { uint64_t networkDifficulty = Bytom_TargetCompactToDifficulty(blkbits()); BytomCombinedHeader combinedHeader; - memcpy(&combinedHeader, combinedheader().data(), sizeof(BytomCombinedHeader)); - - return Strings::Format("share(jobId: %" PRIu64 ", ip: %s, userId: %d, " - "workerId: %" PRId64 ", time: %u/%s, height: %u, " - "blkBits: %08x/%" PRId64 ", nonce: %08x, shareDiff: %" PRIu64 ", " - "status: %d/%s)", - jobid(), ip().c_str(), userid(), - workerhashid(), timestamp(), date("%F %T", timestamp()).c_str(), height(), - blkbits(), networkDifficulty, combinedHeader.nonce_, sharediff(), - status(), StratumStatus::toString(status())); + memcpy( + &combinedHeader, combinedheader().data(), sizeof(BytomCombinedHeader)); + + return Strings::Format( + "share(jobId: %" PRIu64 + ", ip: %s, userId: %d, " + "workerId: %" PRId64 + ", time: %u/%s, height: %u, " + "blkBits: %08x/%" PRId64 ", nonce: %08x, shareDiff: %" PRIu64 + ", " + "status: %d/%s)", + jobid(), + ip().c_str(), + userid(), + workerhashid(), + timestamp(), + date("%F %T", timestamp()).c_str(), + height(), + blkbits(), + networkDifficulty, + combinedHeader.nonce_, + sharediff(), + status(), + StratumStatus::toString(status())); } - bool SerializeToBuffer(string& data, uint32_t& size) const{ + bool SerializeToBuffer(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size); if (!SerializeToArray((uint8_t *)data.data(), size)) { DLOG(INFO) << "share SerializeToArray failed!" << std::endl; return false; - } return true; } - bool SerializeToArrayWithLength(string& data, uint32_t& size) const { + bool SerializeToArrayWithLength(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - *((uint32_t*)data.data()) = size; - uint8_t * payload = (uint8_t *)data.data(); + *((uint32_t *)data.data()) = size; + uint8_t *payload = (uint8_t *)data.data(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { DLOG(INFO) << "share SerializeToArray failed!"; @@ -179,27 +174,31 @@ class ShareBytom :public sharebase::BytomMsg return true; } - bool UnserializeWithVersion(const uint8_t* data, uint32_t size){ + bool UnserializeWithVersion(const uint8_t *data, uint32_t size) { - if(nullptr == data || size <= 0) { + if (nullptr == data || size <= 0) { return false; } - const uint8_t * payload = data; - uint32_t version = *((uint32_t*)payload); + const uint8_t *payload = data; + uint32_t version = *((uint32_t *)payload); if (version == CURRENT_VERSION) { - if (!ParseFromArray((const uint8_t *)(payload + sizeof(uint32_t)), size - sizeof(uint32_t))) { + if (!ParseFromArray( + (const uint8_t *)(payload + sizeof(uint32_t)), + size - sizeof(uint32_t))) { DLOG(INFO) << "share ParseFromArray failed!"; return false; } - } else if (version == BYTES_VERSION && size == sizeof(ShareBytomBytesVersion)) { + } else if ( + version == BYTES_VERSION && size == sizeof(ShareBytomBytesVersion)) { - ShareBytomBytesVersion* share = (ShareBytomBytesVersion*) payload; + ShareBytomBytesVersion *share = (ShareBytomBytesVersion *)payload; if (share->checkSum() != share->checkSum_) { - DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_<< ", checkSum(): " << share->checkSum(); + DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_ + << ", checkSum(): " << share->checkSum(); return false; } @@ -210,7 +209,8 @@ class ShareBytom :public sharebase::BytomMsg set_sharediff(share->shareDiff_); set_blkbits(share->blkBits_); set_ip(share->ip_.toString()); - set_combinedheader(&(share->combinedHeader_), sizeof(BytomCombinedHeader)); + set_combinedheader( + &(share->combinedHeader_), sizeof(BytomCombinedHeader)); set_userid(share->userId_); set_status(share->status_); @@ -223,12 +223,12 @@ class ShareBytom :public sharebase::BytomMsg return true; } - bool SerializeToArrayWithVersion(string& data, uint32_t& size) const { + bool SerializeToArrayWithVersion(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - uint8_t * payload = (uint8_t *)data.data(); - *((uint32_t*)payload) = version(); + uint8_t *payload = (uint8_t *)data.data(); + *((uint32_t *)payload) = version(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { DLOG(INFO) << "SerializeToArray failed!"; @@ -239,26 +239,21 @@ class ShareBytom :public sharebase::BytomMsg return true; } - - uint32_t getsharelength() { - return IsInitialized() ? ByteSize() : 0; - } + uint32_t getsharelength() { return IsInitialized() ? ByteSize() : 0; } }; -struct BlockHeaderBytom -{ - uint64_t version; // The version of the block. - uint64_t height; // The height of the block. +struct BlockHeaderBytom { + uint64_t version; // The version of the block. + uint64_t height; // The height of the block. string previousBlockHash; // The hash of the previous block. - uint64_t timestamp; // The time of the block in seconds. - uint64_t bits; // Difficulty target for the block. + uint64_t timestamp; // The time of the block in seconds. + uint64_t bits; // Difficulty target for the block. string transactionsMerkleRoot; string transactionStatusHash; string serializeToJson() const; }; -class StratumJobBytom : public StratumJob -{ +class StratumJobBytom : public StratumJob { public: StratumJobBytom(); ~StratumJobBytom(); @@ -271,7 +266,6 @@ class StratumJobBytom : public StratumJob void updateBlockHeaderFromHash(); uint32_t nTime_; - }; class ServerBytom; @@ -283,8 +277,12 @@ struct StratumTraitsBytom { using JobDiffType = uint64_t; struct LocalJobType : public LocalJob { LocalJobType(uint64_t jobId, uint8_t shortJobId) - : LocalJob(jobId), shortJobId_(shortJobId), jobDifficulty_(0) {} - bool operator==(uint8_t shortJobId) const { return shortJobId_ == shortJobId; } + : LocalJob(jobId) + , shortJobId_(shortJobId) + , jobDifficulty_(0) {} + bool operator==(uint8_t shortJobId) const { + return shortJobId_ == shortJobId; + } uint8_t shortJobId_; uint64_t jobDifficulty_; }; diff --git a/src/bytom/StratumMinerBytom.cc b/src/bytom/StratumMinerBytom.cc index be5c09833..0fd0203fd 100644 --- a/src/bytom/StratumMinerBytom.cc +++ b/src/bytom/StratumMinerBytom.cc @@ -32,21 +32,24 @@ #ifndef NO_CUDA #include "cutil/src/GpuTs.h" -#endif //NO_CUDA +#endif // NO_CUDA /////////////////////////////StratumMinerBytom//////////////////////////// -StratumMinerBytom::StratumMinerBytom(StratumSessionBytom &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) - : StratumMinerBase(session, diffController, clientAgent, workerName, workerId) { +StratumMinerBytom::StratumMinerBytom( + StratumSessionBytom &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) + : StratumMinerBase( + session, diffController, clientAgent, workerName, workerId) { } -void StratumMinerBytom::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMinerBytom::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "getwork") { handleRequest_GetWork(idStr, jparams); } else if (method == "submit") { @@ -54,32 +57,40 @@ void StratumMinerBytom::handleRequest(const std::string &idStr, } } -void StratumMinerBytom::handleRequest_GetWork(const string &idStr, const JsonNode &jparams) { - getSession().sendMiningNotify(getSession().getServer().GetJobRepository()->getLatestStratumJobEx(), false); +void StratumMinerBytom::handleRequest_GetWork( + const string &idStr, const JsonNode &jparams) { + getSession().sendMiningNotify( + getSession().getServer().GetJobRepository()->getLatestStratumJobEx(), + false); } namespace BytomUtils { -int checkProofOfWork(EncodeBlockHeader_return encoded, shared_ptr sJob, uint64_t difficulty) { - DLOG(INFO) << "verify blockheader hash=" << encoded.r1 << ", seed=" << sJob->seed_; +int checkProofOfWork( + EncodeBlockHeader_return encoded, + shared_ptr sJob, + uint64_t difficulty) { + DLOG(INFO) << "verify blockheader hash=" << encoded.r1 + << ", seed=" << sJob->seed_; vector vHeader, vSeed; Hex2Bin(encoded.r1, vHeader); Hex2Bin(sJob->seed_.c_str(), sJob->seed_.length(), vSeed); #ifndef NO_CUDA - uint8_t *pTarget = GpuTs((uint8_t*)vHeader.data(), (uint8_t*)vSeed.data()); + uint8_t *pTarget = GpuTs((uint8_t *)vHeader.data(), (uint8_t *)vSeed.data()); #else - GoSlice hSlice = {(void *) vHeader.data(), (int) vHeader.size(), (int) vHeader.size()}; - GoSlice sSlice = {(void *) vSeed.data(), (int) vSeed.size(), (int) vSeed.size()}; + GoSlice hSlice = { + (void *)vHeader.data(), (int)vHeader.size(), (int)vHeader.size()}; + GoSlice sSlice = {(void *)vSeed.data(), (int)vSeed.size(), (int)vSeed.size()}; uint8_t pTarget[32]; - GoSlice hOut = {(void *) pTarget, 32, 32}; + GoSlice hOut = {(void *)pTarget, 32, 32}; ProofOfWorkHashCPU(hSlice, sSlice, hOut); #endif // first job target first before checking solved share string targetStr; Bin2Hex(pTarget, 32, targetStr); - GoSlice text = {(void *) pTarget, 32, 32}; + GoSlice text = {(void *)pTarget, 32, 32}; uint64_t localJobBits = Bytom_JobDifficultyToTargetCompact(difficulty); bool powResultLocalJob = CheckProofOfWork(text, localJobBits); @@ -97,9 +108,10 @@ int checkProofOfWork(EncodeBlockHeader_return encoded, shared_ptr(jparams); - uint8_t shortJobId = (uint8_t) params["job_id"].uint32(); + uint8_t shortJobId = (uint8_t)params["job_id"].uint32(); LocalJob *localJob = session.findLocalJob(shortJobId); if (nullptr == localJob) { session.rpc2ResponseBoolean(idStr, false, "Block expired"); - LOG(ERROR) << "can not find local bytom job id=" << (int) shortJobId; + LOG(ERROR) << "can not find local bytom job id=" << (int)shortJobId; return; } @@ -134,20 +146,22 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode auto sJob = std::static_pointer_cast(exjob->sjob_); if (nullptr == sJob) { session.rpc2ResponseBoolean(idStr, false, "Unknown reason"); - LOG(FATAL) << "Code error, casting stratum job bytom failed for job id=" << std::hex << localJob->jobId_; + LOG(FATAL) << "Code error, casting stratum job bytom failed for job id=" + << std::hex << localJob->jobId_; return; } - //get header submission string and header hash string + // get header submission string and header hash string // nonce in bytom B3Poisoned is using hex not decimal uint64_t nonce = 0; { string nonceHex = params["nonce"].str(); vector nonceBinBuf; Hex2BinReverse(nonceHex.c_str(), nonceHex.length(), nonceBinBuf); - nonce = *(uint64_t *) nonceBinBuf.data(); - LOG(INFO) << idStr.c_str() << ": bytom handle request submit jobId " << (int) shortJobId - << " with nonce: " << nonce << " - noncehex: " << nonceHex.c_str(); + nonce = *(uint64_t *)nonceBinBuf.data(); + LOG(INFO) << idStr.c_str() << ": bytom handle request submit jobId " + << (int)shortJobId << " with nonce: " << nonce + << " - noncehex: " << nonceHex.c_str(); } // Check share duplication @@ -155,7 +169,7 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode if (!server.isEnableSimulator_ && !localJob->addLocalShare(localShare)) { session.responseError(idStr, StratumStatus::DUPLICATE_SHARE); // add invalid share to counter - invalidSharesCounter_.insert((int64_t) time(nullptr), 1); + invalidSharesCounter_.insert((int64_t)time(nullptr), 1); return; } @@ -168,7 +182,7 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode auto difficulty = iter->second; auto clientIp = session.getClientIp(); - //Check share + // Check share ShareBytom share; // ShareBase portion share.set_version(ShareBytom::CURRENT_VERSION); @@ -176,7 +190,7 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode share.set_workerhashid(workerId_); share.set_userid(worker.userId_); share.set_status(StratumStatus::REJECT_NO_REASON); - share.set_timestamp((uint32_t) time(nullptr)); + share.set_timestamp((uint32_t)time(nullptr)); IpAddress ip; ip.fromIpv4Int(clientIp); share.set_ip(ip.toString()); @@ -189,11 +203,11 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode auto StringToCheapHash = [](const std::string &str) -> uint64_t { // int merkleRootLen = std::min(32, (int)str.length()); - auto merkleRootBegin = (uint8_t *) &str[0]; + auto merkleRootBegin = (uint8_t *)&str[0]; // auto merkleRootEnd = merkleRootBegin + merkleRootLen; uint64_t res; - memcpy(&res, merkleRootBegin, std::min(8, (int) str.length())); + memcpy(&res, merkleRootBegin, std::min(8, (int)str.length())); return res; // vector merkleRootBin(merkleRootBegin, merkleRootEnd); @@ -214,24 +228,26 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode share.set_status(StratumStatus::JOB_NOT_FOUND); session.rpc2ResponseBoolean(idStr, false, "Block expired"); } else { - EncodeBlockHeader_return encoded = EncodeBlockHeader(sJob->blockHeader_.version, - sJob->blockHeader_.height, - (char *) sJob->blockHeader_.previousBlockHash.c_str(), - sJob->blockHeader_.timestamp, - nonce, - sJob->blockHeader_.bits, - (char *) sJob->blockHeader_.transactionStatusHash.c_str(), - (char *) sJob->blockHeader_.transactionsMerkleRoot.c_str()); + EncodeBlockHeader_return encoded = EncodeBlockHeader( + sJob->blockHeader_.version, + sJob->blockHeader_.height, + (char *)sJob->blockHeader_.previousBlockHash.c_str(), + sJob->blockHeader_.timestamp, + nonce, + sJob->blockHeader_.bits, + (char *)sJob->blockHeader_.transactionStatusHash.c_str(), + (char *)sJob->blockHeader_.transactionsMerkleRoot.c_str()); int powResult = BytomUtils::checkProofOfWork(encoded, sJob, difficulty); share.set_status(powResult); if (powResult == StratumStatus::SOLVED) { std::cout << "share solved\n"; LOG(INFO) << "share solved"; - server.sendSolvedShare2Kafka(nonce, - encoded.r0, - share.height(), - Bytom_TargetCompactToDifficulty(sJob->blockHeader_.bits), - worker); + server.sendSolvedShare2Kafka( + nonce, + encoded.r0, + share.height(), + Bytom_TargetCompactToDifficulty(sJob->blockHeader_.bits), + worker); server.GetJobRepository()->markAllJobsAsStale(); handleShare(idStr, share.status(), share.sharediff()); } else if (powResult == StratumStatus::ACCEPT) { @@ -239,7 +255,8 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode } else { std::string failMessage = "Unknown reason"; switch (share.status()) { - case StratumStatus::LOW_DIFFICULTY:failMessage = "Low difficulty share"; + case StratumStatus::LOW_DIFFICULTY: + failMessage = "Low difficulty share"; break; } session.rpc2ResponseBoolean(idStr, false, failMessage); @@ -252,13 +269,14 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode DLOG(INFO) << share.toString(); // check if thers is invalid share spamming if (!StratumStatus::isAccepted(share.status())) { - int64_t invalidSharesNum = invalidSharesCounter_.sum(time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE); + int64_t invalidSharesNum = invalidSharesCounter_.sum( + time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE); // too much invalid shares, don't send them to kafka if (invalidSharesNum >= INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT) { isSendShareToKafka = false; - LOG(WARNING) << "invalid share spamming, diff: " - << share.sharediff() << ", uid: " << worker.userId_ - << ", uname: \"" << worker.userName_ << "\", ip: " << clientIp + LOG(WARNING) << "invalid share spamming, diff: " << share.sharediff() + << ", uid: " << worker.userId_ << ", uname: \"" + << worker.userName_ << "\", ip: " << clientIp << "checkshare result: " << share.status(); } } @@ -268,16 +286,15 @@ void StratumMinerBytom::handleRequest_Submit(const string &idStr, const JsonNode std::string message; uint32_t size = 0; if (!share.SerializeToArrayWithVersion(message, size)) { - LOG(ERROR) << "share SerializeToBuffer failed!"<< share.toString(); + LOG(ERROR) << "share SerializeToBuffer failed!" << share.toString(); return; } - server.sendShare2Kafka((const uint8_t *) message.data(), size); + server.sendShare2Kafka((const uint8_t *)message.data(), size); // string shareInHex; // Bin2Hex((uint8_t *) &share, sizeof(ShareBytom), shareInHex); // LOG(INFO) << "\nsendShare2Kafka ShareBytom:\n" // << "- size: " << sizeof(ShareBytom) << " bytes\n" // << "- hexvalue: " << shareInHex.c_str() << "\n"; - } } diff --git a/src/bytom/StratumMinerBytom.h b/src/bytom/StratumMinerBytom.h index 95faa5409..1e49a80a1 100644 --- a/src/bytom/StratumMinerBytom.h +++ b/src/bytom/StratumMinerBytom.h @@ -27,23 +27,24 @@ #include "StratumMiner.h" #include "StratumServerBytom.h" -class StratumMinerBytom : public StratumMinerBase -{ +class StratumMinerBytom : public StratumMinerBase { public: - StratumMinerBytom(StratumSessionBytom &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId); - - void handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) override; + StratumMinerBytom( + StratumSessionBytom &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId); + + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; private: void handleRequest_GetWork(const string &idStr, const JsonNode &jparams); void handleRequest_Submit(const string &idStr, const JsonNode &jparams); }; -#endif // #ifndef STRATUM_MINER_BYTOM_H_ +#endif // #ifndef STRATUM_MINER_BYTOM_H_ diff --git a/src/bytom/StratumServerBytom.cc b/src/bytom/StratumServerBytom.cc index ecde9584b..83567ac75 100644 --- a/src/bytom/StratumServerBytom.cc +++ b/src/bytom/StratumServerBytom.cc @@ -31,73 +31,82 @@ using namespace std; ///////////////////////////////////JobRepositoryBytom/////////////////////////////////// -JobRepositoryBytom::JobRepositoryBytom(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerBytom *server) - : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) -{ - +JobRepositoryBytom::JobRepositoryBytom( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerBytom *server) + : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) { } -shared_ptr JobRepositoryBytom::createStratumJobEx(shared_ptr sjob, bool isClean) -{ +shared_ptr JobRepositoryBytom::createStratumJobEx( + shared_ptr sjob, bool isClean) { return std::make_shared(sjob, isClean); } -void JobRepositoryBytom::broadcastStratumJob(shared_ptr sjobBase) -{ +void JobRepositoryBytom::broadcastStratumJob(shared_ptr sjobBase) { auto sjob = std::static_pointer_cast(sjobBase); - if(!sjob) - { - LOG(FATAL) << "JobRepositoryBytom::broadcastStratumJob error: cast StratumJobBytom failed"; + if (!sjob) { + LOG(FATAL) << "JobRepositoryBytom::broadcastStratumJob error: cast " + "StratumJobBytom failed"; return; } bool isClean = false; if (latestPreviousBlockHash_ != sjob->blockHeader_.previousBlockHash) { isClean = true; latestPreviousBlockHash_ = sjob->blockHeader_.previousBlockHash; - LOG(INFO) << "received new height stratum job, height: " << sjob->blockHeader_.height + LOG(INFO) << "received new height stratum job, height: " + << sjob->blockHeader_.height << ", prevhash: " << sjob->blockHeader_.previousBlockHash.c_str(); - } + } shared_ptr exJob(createStratumJobEx(sjob, isClean)); - { - ScopeLock sl(lock_); - - if (isClean) { - // mark all jobs as stale, should do this before insert new job - for (auto it : exJobs_) { - it.second->markStale(); - } - } - // insert new job - exJobs_[sjob->jobId_] = exJob; + if (isClean) { + // mark all jobs as stale, should do this before insert new job + for (auto it : exJobs_) { + it.second->markStale(); + } } + + // insert new job + exJobs_[sjob->jobId_] = exJob; + if (isClean) { sendMiningNotify(exJob); } } - ///////////////////////////////ServerBytom/////////////////////////////// -JobRepository *ServerBytom::createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) -{ - return new JobRepositoryBytom(kafkaBrokers, consumerTopic, fileLastNotifyTime, this); +JobRepository *ServerBytom::createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) { + return new JobRepositoryBytom( + kafkaBrokers, consumerTopic, fileLastNotifyTime, this); } -unique_ptr ServerBytom::createConnection(struct bufferevent *bev, struct sockaddr *saddr, const uint32_t sessionID) -{ +unique_ptr ServerBytom::createConnection( + struct bufferevent *bev, struct sockaddr *saddr, const uint32_t sessionID) { return boost::make_unique(*this, bev, saddr, sessionID); } -void ServerBytom::sendSolvedShare2Kafka(uint64_t nonce, const string &strHeader, - uint64_t height, uint64_t networkDiff, const StratumWorker &worker) -{ - string msg = Strings::Format("{\"nonce\":%lu,\"header\":\"%s\"," - "\"height\":%lu,\"networkDiff\":%" PRIu64 ",\"userId\":%ld," - "\"workerId\":%" PRId64 ",\"workerFullName\":\"%s\"}", - nonce, strHeader.c_str(), - height, networkDiff, worker.userId_, - worker.workerHashId_, filterWorkerName(worker.fullName_).c_str()); +void ServerBytom::sendSolvedShare2Kafka( + uint64_t nonce, + const string &strHeader, + uint64_t height, + uint64_t networkDiff, + const StratumWorker &worker) { + string msg = Strings::Format( + "{\"nonce\":%lu,\"header\":\"%s\"," + "\"height\":%lu,\"networkDiff\":%" PRIu64 + ",\"userId\":%ld," + "\"workerId\":%" PRId64 ",\"workerFullName\":\"%s\"}", + nonce, + strHeader.c_str(), + height, + networkDiff, + worker.userId_, + worker.workerHashId_, + filterWorkerName(worker.fullName_).c_str()); kafkaProducerSolvedShare_->produce(msg.c_str(), msg.length()); } \ No newline at end of file diff --git a/src/bytom/StratumServerBytom.h b/src/bytom/StratumServerBytom.h index b13184397..5d4912d9f 100644 --- a/src/bytom/StratumServerBytom.h +++ b/src/bytom/StratumServerBytom.h @@ -29,30 +29,44 @@ class JobRepositoryBytom; -class ServerBytom : public ServerBase -{ +class ServerBytom : public ServerBase { public: - ServerBytom(const int32_t shareAvgSeconds) : ServerBase(shareAvgSeconds) {} + ServerBytom(const int32_t shareAvgSeconds) + : ServerBase(shareAvgSeconds) {} - JobRepository* createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) override; + JobRepository *createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) override; - unique_ptr createConnection(struct bufferevent *bev, struct sockaddr *saddr, const uint32_t sessionID) override; - void sendSolvedShare2Kafka(uint64_t nonce, const string &strHeader, - uint64_t height, uint64_t networkDiff, const StratumWorker &worker); + unique_ptr createConnection( + struct bufferevent *bev, + struct sockaddr *saddr, + const uint32_t sessionID) override; + void sendSolvedShare2Kafka( + uint64_t nonce, + const string &strHeader, + uint64_t height, + uint64_t networkDiff, + const StratumWorker &worker); }; -class JobRepositoryBytom : public JobRepositoryBase -{ +class JobRepositoryBytom : public JobRepositoryBase { private: string latestPreviousBlockHash_; public: - JobRepositoryBytom(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerBytom *server); - shared_ptr createStratumJob() override { return std::make_shared(); } - shared_ptr createStratumJobEx(shared_ptr sjob, bool isClean) override; + JobRepositoryBytom( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerBytom *server); + shared_ptr createStratumJob() override { + return std::make_shared(); + } + shared_ptr + createStratumJobEx(shared_ptr sjob, bool isClean) override; void broadcastStratumJob(shared_ptr sjob) override; }; -#endif // STRATUM_SERVER_BYTOM_H_ +#endif // STRATUM_SERVER_BYTOM_H_ diff --git a/src/bytom/StratumSessionBytom.cc b/src/bytom/StratumSessionBytom.cc index b256ff26a..629aa1885 100644 --- a/src/bytom/StratumSessionBytom.cc +++ b/src/bytom/StratumSessionBytom.cc @@ -30,42 +30,52 @@ #include -StratumSessionBytom::StratumSessionBytom(ServerBytom &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1) - : StratumSessionBase(server, bev, saddr, extraNonce1), shortJobId_(1) { +StratumSessionBytom::StratumSessionBytom( + ServerBytom &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1) + : StratumSessionBase(server, bev, saddr, extraNonce1) + , shortJobId_(1) { } -void StratumSessionBytom::rpc2ResponseBoolean(const string &idStr, bool result, const string &failMessage) { +void StratumSessionBytom::rpc2ResponseBoolean( + const string &idStr, bool result, const string &failMessage) { if (result) { - const string s = Strings::Format("{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":{\"status\":\"OK\"},\"error\":null}\n", - idStr.c_str()); + const string s = Strings::Format( + "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":{\"status\":\"OK\"}," + "\"error\":null}\n", + idStr.c_str()); sendData(s); } else { const string s = Strings::Format( - "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":null,\"error\":{\"code\":-1, \"message\":\"%s\"}}\n", + "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":null,\"error\":{\"code\":-1," + " \"message\":\"%s\"}}\n", idStr.c_str(), failMessage.c_str()); sendData(s); } } -void StratumSessionBytom::sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) { +void StratumSessionBytom::sendSetDifficulty( + LocalJob &localJob, uint64_t difficulty) { // Bytom has no set difficulty method, but will change the target directly - static_cast(localJob).jobDifficulty_ = difficulty; + static_cast(localJob).jobDifficulty_ = + difficulty; } -void StratumSessionBytom::sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) { +void StratumSessionBytom::sendMiningNotify( + shared_ptr exJobPtr, bool isFirstJob) { auto &server = getServer(); /* Bytom difficulty logic (based on B3-Mimic repo) - constants - * Diff1: 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF - Sending miningNotify + * Diff1: + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF Sending + miningNotify - target - Pool target is based from Diff1 and difficulty. target = Diff1 / difficulty - Miner difficulty logic: + Pool target is based from Diff1 and difficulty. target = Diff1 / + difficulty Miner difficulty logic: - use target Pool check submit (see StratumMinerBytom::handleRequest_Submit) */ @@ -79,17 +89,20 @@ void StratumSessionBytom::sendMiningNotify(shared_ptr exJobPtr, bo return; auto &ljob = addLocalJob(sJob->jobId_, shortJobId_++); - uint64_t jobDifficulty = server.isDevModeEnable_ ? server.devFixedDifficulty_ : ljob.jobDifficulty_; + uint64_t jobDifficulty = server.isDevModeEnable_ ? server.devFixedDifficulty_ + : ljob.jobDifficulty_; if (jobDifficulty == 0) - jobDifficulty = server.isDevModeEnable_ ? 1 : Bytom_TargetCompactToDifficulty(sJob->blockHeader_.bits); + jobDifficulty = server.isDevModeEnable_ + ? 1 + : Bytom_TargetCompactToDifficulty(sJob->blockHeader_.bits); - uint64_t nonce = (((uint64_t) extraNonce1_) << 32); + uint64_t nonce = (((uint64_t)extraNonce1_) << 32); string notifyStr, nonceStr, versionStr, heightStr, timestampStr, bitsStr; - Bin2HexR((uint8_t *) &nonce, 8, nonceStr); - Bin2Hex((uint8_t *) &sJob->blockHeader_.version, 8, versionStr); - Bin2Hex((uint8_t *) &sJob->blockHeader_.height, 8, heightStr); - Bin2Hex((uint8_t *) &sJob->blockHeader_.timestamp, 8, timestampStr); - Bin2Hex((uint8_t *) &sJob->blockHeader_.bits, 8, bitsStr); + Bin2HexR((uint8_t *)&nonce, 8, nonceStr); + Bin2Hex((uint8_t *)&sJob->blockHeader_.version, 8, versionStr); + Bin2Hex((uint8_t *)&sJob->blockHeader_.height, 8, heightStr); + Bin2Hex((uint8_t *)&sJob->blockHeader_.timestamp, 8, timestampStr); + Bin2Hex((uint8_t *)&sJob->blockHeader_.bits, 8, bitsStr); string targetStr; { @@ -97,7 +110,8 @@ void StratumSessionBytom::sendMiningNotify(shared_ptr exJobPtr, bo Bytom_DifficultyToTargetBinary(jobDifficulty, targetBin); // trim the zeroes to reduce bandwidth unsigned int endIdx = targetBin.size() - 1; - for (; endIdx > 0; --endIdx) // > 0 (not >=0) because need to print at least 1 byte + for (; endIdx > 0; + --endIdx) // > 0 (not >=0) because need to print at least 1 byte { if (targetBin[endIdx] != 0) break; @@ -132,7 +146,8 @@ void StratumSessionBytom::sendMiningNotify(shared_ptr exJobPtr, bo if (isFirstJob) { notifyStr = Strings::Format( - "{\"id\": 1, \"jsonrpc\": \"2.0\", \"result\": {\"id\": \"%s\", \"job\": %s, \"status\": \"OK\"}, \"error\": null}", + "{\"id\": 1, \"jsonrpc\": \"2.0\", \"result\": {\"id\": \"%s\", " + "\"job\": %s, \"status\": \"OK\"}, \"error\": null}", server.isDevModeEnable_ ? "antminer_1" : worker_.fullName_.c_str(), jobString.c_str()); } else { @@ -140,14 +155,15 @@ void StratumSessionBytom::sendMiningNotify(shared_ptr exJobPtr, bo "{\"jsonrpc\": \"2.0\", \"method\":\"job\", \"params\": %s}", jobString.c_str()); } - // LOG(INFO) << "Difficulty: " << ljob.jobDifficulty_ << "\nsendMiningNotify " << notifyStr.c_str(); + // LOG(INFO) << "Difficulty: " << ljob.jobDifficulty_ << "\nsendMiningNotify " + // << notifyStr.c_str(); sendData(notifyStr); } -bool StratumSessionBytom::validate(const JsonNode &jmethod, const JsonNode &jparams) { +bool StratumSessionBytom::validate( + const JsonNode &jmethod, const JsonNode &jparams) { - if (jmethod.type() == Utilities::JS::type::Str && - jmethod.size() != 0 && + if (jmethod.type() == Utilities::JS::type::Str && jmethod.size() != 0 && jparams.type() == Utilities::JS::type::Obj) { return true; } @@ -155,24 +171,23 @@ bool StratumSessionBytom::validate(const JsonNode &jmethod, const JsonNode &jpar return false; } -void StratumSessionBytom::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionBytom::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "login") { handleRequest_Authorize(idStr, jparams, jroot); - } - else if (dispatcher_) { + } else { dispatcher_->handleRequest(idStr, method, jparams, jroot); } } -void StratumSessionBytom::handleRequest_Authorize(const string &idStr, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionBytom::handleRequest_Authorize( + const string &idStr, const JsonNode &jparams, const JsonNode &jroot) { state_ = SUBSCRIBED; - auto params = const_cast (jparams); + auto params = const_cast(jparams); string fullName = params["login"].str(); string password = params["pass"].str(); @@ -180,12 +195,14 @@ void StratumSessionBytom::handleRequest_Authorize(const string &idStr, return; } -unique_ptr StratumSessionBytom::createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) { - return boost::make_unique(*this, - *getServer().defaultDifficultyController_, - clientAgent, - workerName, - workerId); +unique_ptr StratumSessionBytom::createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { + return boost::make_unique( + *this, + *getServer().defaultDifficultyController_, + clientAgent, + workerName, + workerId); } diff --git a/src/bytom/StratumSessionBytom.h b/src/bytom/StratumSessionBytom.h index ecfda1395..460a65d1f 100644 --- a/src/bytom/StratumSessionBytom.h +++ b/src/bytom/StratumSessionBytom.h @@ -30,30 +30,39 @@ class StratumSessionBytom : public StratumSessionBase { public: - StratumSessionBytom(ServerBytom &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1); + StratumSessionBytom( + ServerBytom &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1); - void rpc2ResponseBoolean(const string &idStr, bool result, const string& failMessage = ""); + void rpc2ResponseBoolean( + const string &idStr, bool result, const string &failMessage = ""); void sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) override; - void sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; - void responseTrue(const string &idStr) override { return rpc2ResponseBoolean(idStr, true); }; + void + sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; + void responseTrue(const string &idStr) override { + return rpc2ResponseBoolean(idStr, true); + }; protected: bool validate(const JsonNode &jmethod, const JsonNode &jparams) override; - void handleRequest(const std::string &idStr, const std::string &method, - const JsonNode &jparams, const JsonNode &jroot) override; - void handleRequest_Authorize(const std::string &idStr, - const JsonNode &jparams, - const JsonNode &jroot); + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; + void handleRequest_Authorize( + const std::string &idStr, const JsonNode &jparams, const JsonNode &jroot); + public: - std::unique_ptr createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) override; + std::unique_ptr createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) override; private: - uint8_t shortJobId_; //jobId starts from 1 + uint8_t shortJobId_; // jobId starts from 1 }; -#endif // #ifndef STRATUM_SESSION_BYTOM_H_ +#endif // #ifndef STRATUM_SESSION_BYTOM_H_ diff --git a/src/decred/BlockMakerDecred.cc b/src/decred/BlockMakerDecred.cc index 60137f66d..393d02870 100644 --- a/src/decred/BlockMakerDecred.cc +++ b/src/decred/BlockMakerDecred.cc @@ -26,39 +26,48 @@ #include "StratumDecred.h" #include "DecredUtils.h" -BlockMakerDecred::BlockMakerDecred(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB) - : BlockMaker(def, kafkaBrokers, poolDB) -{ +BlockMakerDecred::BlockMakerDecred( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB) + : BlockMaker(def, kafkaBrokers, poolDB) { } -void BlockMakerDecred::processSolvedShare(rd_kafka_message_t *rkmessage) -{ +void BlockMakerDecred::processSolvedShare(rd_kafka_message_t *rkmessage) { if (rkmessage->len != sizeof(FoundBlockDecred)) { return; } auto foundBlock = reinterpret_cast(rkmessage->payload); - // TODO: Think about a better way to do it asynchronously for all block makers... + // TODO: Think about a better way to do it asynchronously for all block + // makers... for (auto &node : def()->nodes) { - thread t(std::bind(&BlockMakerDecred::submitBlockHeader, this, node, foundBlock->header_)); + thread t(std::bind( + &BlockMakerDecred::submitBlockHeader, this, node, foundBlock->header_)); t.detach(); } thread d(std::bind(&BlockMakerDecred::saveBlockToDB, this, *foundBlock)); d.detach(); } -void BlockMakerDecred::submitBlockHeader(const NodeDefinition& node, const BlockHeaderDecred& header) -{ - // RPC call getwork with padded block header as data parameter is equivalent to submitbblock - string request = "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getwork\",\"params\":[\""; - request += HexStr(BEGIN(header), END(header)) + "8000000100000000000005a0\"]}"; +void BlockMakerDecred::submitBlockHeader( + const NodeDefinition &node, const BlockHeaderDecred &header) { + // RPC call getwork with padded block header as data parameter is equivalent + // to submitbblock + string request = + "{\"jsonrpc\":\"1.0\",\"id\":\"1\",\"method\":\"getwork\",\"params\":[\""; + request += + HexStr(BEGIN(header), END(header)) + "8000000100000000000005a0\"]}"; LOG(INFO) << "submit block to: " << node.rpcAddr_ << ", request: " << request; // try N times for (size_t i = 0; i < 3; i++) { string response; - bool res = blockchainNodeRpcCall(node.rpcAddr_.c_str(), node.rpcUserPwd_.c_str(), - request.c_str(), response); + bool res = blockchainNodeRpcCall( + node.rpcAddr_.c_str(), + node.rpcUserPwd_.c_str(), + request.c_str(), + response); // success if (res == true) { @@ -71,24 +80,35 @@ void BlockMakerDecred::submitBlockHeader(const NodeDefinition& node, const Block } } -void BlockMakerDecred::saveBlockToDB(const FoundBlockDecred &foundBlock) -{ - auto& header = foundBlock.header_; +void BlockMakerDecred::saveBlockToDB(const FoundBlockDecred &foundBlock) { + auto &header = foundBlock.header_; const string nowStr = date("%F %T"); - string sql = Strings::Format("INSERT INTO `found_blocks` " - " (`puid`, `worker_id`, `worker_full_name`, `job_id`" - " ,`height`, `hash`, `rewards`, `size`, `prev_hash`" - " ,`bits`, `version`, `voters`, `network`, `created_at`)" - " VALUES (%d,%" PRId64",\"%s\", %" PRIu64",%d,\"%s\"" - " ,%" PRId64",%d,\"%s\",%u,%d,%u,%u,\"%s\"); ", - foundBlock.userId_, foundBlock.workerId_, - // filter again, just in case - filterWorkerName(foundBlock.workerFullName_).c_str(), - foundBlock.jobId_, header.height.value(), - header.getHash().ToString().c_str(), - GetBlockRewardDecredWork(header.height.value(), header.voters.value(), NetworkParamsDecred::get(foundBlock.network_)), - header.size.value(), header.prevBlock.ToString().c_str(), header.nBits.value(), - header.version.value(), header.voters.value(), foundBlock.network_, nowStr.c_str()); + string sql = Strings::Format( + "INSERT INTO `found_blocks` " + " (`puid`, `worker_id`, `worker_full_name`, `job_id`" + " ,`height`, `hash`, `rewards`, `size`, `prev_hash`" + " ,`bits`, `version`, `voters`, `network`, `created_at`)" + " VALUES (%d,%" PRId64 ",\"%s\", %" PRIu64 + ",%d,\"%s\"" + " ,%" PRId64 ",%d,\"%s\",%u,%d,%u,%u,\"%s\"); ", + foundBlock.userId_, + foundBlock.workerId_, + // filter again, just in case + filterWorkerName(foundBlock.workerFullName_).c_str(), + foundBlock.jobId_, + header.height.value(), + header.getHash().ToString().c_str(), + GetBlockRewardDecredWork( + header.height.value(), + header.voters.value(), + NetworkParamsDecred::get(foundBlock.network_)), + header.size.value(), + header.prevBlock.ToString().c_str(), + header.nBits.value(), + header.version.value(), + header.voters.value(), + foundBlock.network_, + nowStr.c_str()); LOG(INFO) << "BlockMakerDecred::saveBlockToDB: " << sql; diff --git a/src/decred/BlockMakerDecred.h b/src/decred/BlockMakerDecred.h index fc29f65f3..dd801586e 100644 --- a/src/decred/BlockMakerDecred.h +++ b/src/decred/BlockMakerDecred.h @@ -30,14 +30,18 @@ class BlockMakerDecred : public BlockMaker { public: - BlockMakerDecred(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB); + BlockMakerDecred( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB); protected: void processSolvedShare(rd_kafka_message_t *rkmessage) override; private: - void submitBlockHeader(const NodeDefinition& node, const BlockHeaderDecred& header); - void saveBlockToDB(const FoundBlockDecred& foundBlock); + void submitBlockHeader( + const NodeDefinition &node, const BlockHeaderDecred &header); + void saveBlockToDB(const FoundBlockDecred &foundBlock); }; #endif diff --git a/src/decred/CommonDecred.cc b/src/decred/CommonDecred.cc index a3678aff4..6c6f06a16 100644 --- a/src/decred/CommonDecred.cc +++ b/src/decred/CommonDecred.cc @@ -27,11 +27,9 @@ extern "C" { #include "libsph/sph_blake.h" - } -uint256 BlockHeaderDecred::getHash() const -{ +uint256 BlockHeaderDecred::getHash() const { uint256 hash; sph_blake256_context ctx; sph_blake256_init(&ctx); @@ -40,43 +38,42 @@ uint256 BlockHeaderDecred::getHash() const return hash; } -const NetworkParamsDecred& NetworkParamsDecred::get(NetworkDecred network) -{ +const NetworkParamsDecred &NetworkParamsDecred::get(NetworkDecred network) { static NetworkParamsDecred mainnetParams{ - arith_uint256{}.SetCompact(0x1d00ffff), - 3119582664, - 100, - 101, - 6144, - 6, - 3, - 1, - 4096, - 5, + arith_uint256{}.SetCompact(0x1d00ffff), + 3119582664, + 100, + 101, + 6144, + 6, + 3, + 1, + 4096, + 5, }; static NetworkParamsDecred testnetParams{ - arith_uint256{}.SetCompact(0x1e00ffff), - 2500000000, - 100, - 101, - 2048, - 6, - 3, - 1, - 768, - 5, + arith_uint256{}.SetCompact(0x1e00ffff), + 2500000000, + 100, + 101, + 2048, + 6, + 3, + 1, + 768, + 5, }; static NetworkParamsDecred simnetParams{ - arith_uint256{}.SetCompact(0x207fffff), - 50000000000, - 100, - 101, - 128, - 6, - 3, - 1, - 16 + (64 * 2), - 5, + arith_uint256{}.SetCompact(0x207fffff), + 50000000000, + 100, + 101, + 128, + 6, + 3, + 1, + 16 + (64 * 2), + 5, }; switch (network) { diff --git a/src/decred/CommonDecred.h b/src/decred/CommonDecred.h index 48057ee03..5c9586525 100644 --- a/src/decred/CommonDecred.h +++ b/src/decred/CommonDecred.h @@ -30,8 +30,9 @@ #include #include -// Decred block header (https://docs.decred.org/advanced/block-header-specifications/) -// Byte arrays are used so that the members are packed +// Decred block header +// (https://docs.decred.org/advanced/block-header-specifications/) Byte arrays +// are used so that the members are packed struct BlockHeaderDecred { boost::endian::little_uint32_buf_t version; uint256 prevBlock; @@ -55,7 +56,8 @@ struct BlockHeaderDecred { uint256 getHash() const; }; -static_assert(sizeof(BlockHeaderDecred) == 180, "Decred block header type is invalid"); +static_assert( + sizeof(BlockHeaderDecred) == 180, "Decred block header type is invalid"); // CMD_MAGIC_NUMBER number from the network type enum class NetworkDecred : uint32_t { @@ -72,7 +74,8 @@ inline bool operator<(NetworkDecred lhs, NetworkDecred rhs) { case NetworkDecred::TestNet: return lhs != NetworkDecred::MainNet && lhs != NetworkDecred::TestNet; case NetworkDecred::SimNet: - return lhs != NetworkDecred::MainNet && lhs != NetworkDecred::TestNet && lhs != NetworkDecred::SimNet; + return lhs != NetworkDecred::MainNet && lhs != NetworkDecred::TestNet && + lhs != NetworkDecred::SimNet; } return false; } @@ -89,7 +92,7 @@ struct NetworkParamsDecred { int64_t stakeValidationHeight; uint16_t ticketsPerBlock; - static const NetworkParamsDecred& get(NetworkDecred network); + static const NetworkParamsDecred &get(NetworkDecred network); }; #endif diff --git a/src/decred/DecredUtils.cc b/src/decred/DecredUtils.cc index 324d33066..5a364c699 100644 --- a/src/decred/DecredUtils.cc +++ b/src/decred/DecredUtils.cc @@ -25,8 +25,8 @@ #include "DecredUtils.h" #include "CommonDecred.h" -static int64_t GetBlockRewaredDecred(uint32_t height, const NetworkParamsDecred& params) -{ +static int64_t +GetBlockRewaredDecred(uint32_t height, const NetworkParamsDecred ¶ms) { int64_t iterations = height / params.subsidyReductionInterval; int64_t subsidy = params.baseSubsidy; for (int64_t i = 0; i < iterations; i++) { @@ -36,10 +36,12 @@ static int64_t GetBlockRewaredDecred(uint32_t height, const NetworkParamsDecred& return subsidy; } -int64_t GetBlockRewardDecredWork(uint32_t height, uint16_t voters, const NetworkParamsDecred& params) -{ - int64_t totalProportion = params.workRewardProportion + params.stakeRewardProportion + params.blockTaxProportion; - int64_t powSubsidy = GetBlockRewaredDecred(height, params) * params.workRewardProportion / totalProportion; +int64_t GetBlockRewardDecredWork( + uint32_t height, uint16_t voters, const NetworkParamsDecred ¶ms) { + int64_t totalProportion = params.workRewardProportion + + params.stakeRewardProportion + params.blockTaxProportion; + int64_t powSubsidy = GetBlockRewaredDecred(height, params) * + params.workRewardProportion / totalProportion; if (height < params.stakeValidationHeight) { return powSubsidy; } diff --git a/src/decred/DecredUtils.h b/src/decred/DecredUtils.h index 9a2e025a2..4eb27ee4b 100644 --- a/src/decred/DecredUtils.h +++ b/src/decred/DecredUtils.h @@ -29,6 +29,7 @@ struct NetworkParamsDecred; -int64_t GetBlockRewardDecredWork(uint32_t height, uint16_t voters, const NetworkParamsDecred& params); +int64_t GetBlockRewardDecredWork( + uint32_t height, uint16_t voters, const NetworkParamsDecred ¶ms); #endif diff --git a/src/decred/GwMakerDecred.cc b/src/decred/GwMakerDecred.cc index b38c597a1..61c728fb0 100644 --- a/src/decred/GwMakerDecred.cc +++ b/src/decred/GwMakerDecred.cc @@ -28,24 +28,12 @@ ///////////////////////////////GwMakerHandlerDecred//////////////////////////////////// bool GwMakerHandlerDecred::checkFields(JsonNode &r) { - if (r.type() != Utilities::JS::type::Array || - r.array().size() != 2) { - return false; - } - - auto& r0 = r.array().at(0); - if (r0["result"].type() != Utilities::JS::type::Int) { - return false; - } - - auto& r1 = r.array().at(1); - if (r1["result"].type() != Utilities::JS::type::Obj || - r1["result"]["data"].type() != Utilities::JS::type::Str || - r1["result"]["data"].size() != 384 || - !IsHex(r1["result"]["data"].str()) || - r1["result"]["target"].type() != Utilities::JS::type::Str || - r1["result"]["target"].size() != 64 || - !IsHex(r1["result"]["target"].str())) { + if (r["result"].type() != Utilities::JS::type::Obj || + r["result"]["data"].type() != Utilities::JS::type::Str || + r["result"]["data"].size() != 384 || !IsHex(r["result"]["data"].str()) || + r["result"]["target"].type() != Utilities::JS::type::Str || + r["result"]["target"].size() != 64 || + !IsHex(r["result"]["target"].str())) { return false; } @@ -53,25 +41,21 @@ bool GwMakerHandlerDecred::checkFields(JsonNode &r) { } string GwMakerHandlerDecred::constructRawMsg(JsonNode &r) { - auto& r0 = r.array().at(0); - auto& r1 = r.array().at(1); LOG(INFO) << "chain: " << def_.chainType_ << ", topic: " << def_.rawGwTopic_ - << ", data: " << r1["result"]["data"].str() - << ", target: " << r1["result"]["target"].str() - << ", network: " << r0["result"].uint32(); - - return Strings::Format("{\"created_at_ts\":%u," - "\"chainType\":\"%s\"," - "\"rpcAddress\":\"%s\"," - "\"rpcUserPwd\":\"%s\"," - "\"data\":\"%s\"," - "\"target\":\"%s\"," - "\"network\":%u}", - (uint32_t)time(nullptr), - def_.chainType_.c_str(), - def_.rpcAddr_.c_str(), - def_.rpcUserPwd_.c_str(), - r1["result"]["data"].str().c_str(), - r1["result"]["target"].str().c_str(), - r0["result"].uint32()); + << ", data: " << r["result"]["data"].str() + << ", target: " << r["result"]["target"].str(); + + return Strings::Format( + "{\"created_at_ts\":%u," + "\"chainType\":\"%s\"," + "\"rpcAddress\":\"%s\"," + "\"rpcUserPwd\":\"%s\"," + "\"data\":\"%s\"," + "\"target\":\"%s\"}", + (uint32_t)time(nullptr), + def_.chainType_.c_str(), + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + r["result"]["data"].str().c_str(), + r["result"]["target"].str().c_str()); } diff --git a/src/decred/GwMakerDecred.h b/src/decred/GwMakerDecred.h index 6999c3d98..c0ddc953a 100644 --- a/src/decred/GwMakerDecred.h +++ b/src/decred/GwMakerDecred.h @@ -29,14 +29,12 @@ #include "GwMaker.h" #include "utilities_js.hpp" -class GwMakerHandlerDecred : public GwMakerHandlerJson -{ +class GwMakerHandlerDecred : public GwMakerHandlerJson { bool checkFields(JsonNode &r) override; string constructRawMsg(JsonNode &r) override; - string getRequestData() override - { - return "[{\"jsonrpc\": \"2.0\", \"method\": \"getcurrentnet\", \"params\": [], \"id\": 0}" - ",{\"jsonrpc\": \"2.0\", \"method\": \"getwork\", \"params\": [], \"id\": 1}]"; + string getRequestData() override { + return "{\"jsonrpc\": \"2.0\", \"method\": \"getwork\", \"params\": [], " + "\"id\": 1}"; } }; diff --git a/src/decred/JobMakerDecred.cc b/src/decred/JobMakerDecred.cc index 389f7d0e0..20ab3296a 100644 --- a/src/decred/JobMakerDecred.cc +++ b/src/decred/JobMakerDecred.cc @@ -33,30 +33,27 @@ #include #include -#define OFFSET_AND_SIZE_DECRED(f) offsetof(BlockHeaderDecred, f) * 2, sizeof(static_cast(nullptr)->f) * 2 +#define OFFSET_AND_SIZE_DECRED(f) \ + offsetof(BlockHeaderDecred, f) * 2, \ + sizeof(static_cast(nullptr)->f) * 2 using std::ostream; -static ostream& operator<<(ostream& os, const GetWorkDecred& work) -{ +static ostream &operator<<(ostream &os, const GetWorkDecred &work) { os << "data = " << work.data << ", target = " << work.target << ", created at = " << work.createdAt << ", height = " << work.height - << ", network = " << static_cast(work.network) << ", voters = " << work.voters - << ", size = " << work.size; + << ", voters = " << work.voters << ", size = " << work.size; return os; } ////////////////////////////////JobMakerHandlerDecred////////////////////////////////// -JobMakerHandlerDecred::JobMakerHandlerDecred() -{ +JobMakerHandlerDecred::JobMakerHandlerDecred() { } -bool JobMakerHandlerDecred::processMsg(const string &msg) -{ +bool JobMakerHandlerDecred::processMsg(const string &msg) { clearTimeoutWorks(); JsonNode j; - if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), j)) - { + if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), j)) { LOG(ERROR) << "deserialize decred work failed " << msg; return false; } @@ -67,61 +64,63 @@ bool JobMakerHandlerDecred::processMsg(const string &msg) return processMsg(j); } -string JobMakerHandlerDecred::makeStratumJobMsg() -{ - if (works_.empty()) return string{}; +string JobMakerHandlerDecred::makeStratumJobMsg() { + if (works_.empty()) + return string{}; - auto& work = *works_.get().rbegin(); - auto& data = work.data; + auto &work = *works_.get().rbegin(); + auto &data = work.data; auto jobId = generateJobId(djb2(data.c_str())); auto prevHash = data.substr(OFFSET_AND_SIZE_DECRED(prevBlock)); auto merkelRootOffset = offsetof(BlockHeaderDecred, merkelRoot); - auto coinBase1 = data.substr(merkelRootOffset * 2, (offsetof(BlockHeaderDecred, extraData) - merkelRootOffset) * 2); + auto coinBase1 = data.substr( + merkelRootOffset * 2, + (offsetof(BlockHeaderDecred, extraData) - merkelRootOffset) * 2); auto coinBase2 = data.substr(OFFSET_AND_SIZE_DECRED(stakeVersion)); auto version = data.substr(OFFSET_AND_SIZE_DECRED(version)); - return Strings::Format("{\"jobId\":%" PRIu64 - ",\"prevHash\":\"%s\"" - ",\"coinBase1\":\"%s\"" - ",\"coinBase2\":\"%s\"" - ",\"version\":\"%s\"" - ",\"target\":\"%s\"" - ",\"network\":%" PRIu32 - "}", - jobId, - prevHash.c_str(), - coinBase1.c_str(), - coinBase2.c_str(), - version.c_str(), - work.target.c_str(), - static_cast(work.network)); + return Strings::Format( + "{\"jobId\":%" PRIu64 + ",\"prevHash\":\"%s\"" + ",\"coinBase1\":\"%s\"" + ",\"coinBase2\":\"%s\"" + ",\"version\":\"%s\"" + ",\"target\":\"%s\"}", + jobId, + prevHash.c_str(), + coinBase1.c_str(), + coinBase2.c_str(), + version.c_str(), + work.target.c_str()); } -bool JobMakerHandlerDecred::processMsg(JsonNode &j) -{ +bool JobMakerHandlerDecred::processMsg(JsonNode &j) { auto data = j["data"].str(); auto target = j["target"].str(); auto createdAt = j["created_at_ts"].uint32(); - auto network = static_cast(j["network"].uint32()); auto votersString = data.substr(OFFSET_AND_SIZE_DECRED(voters)); - auto voters = boost::endian::big_to_native(static_cast(strtoul(votersString.c_str(), nullptr, 16))); + auto voters = boost::endian::big_to_native( + static_cast(strtoul(votersString.c_str(), nullptr, 16))); auto sizeString = data.substr(OFFSET_AND_SIZE_DECRED(size)); - auto size = boost::endian::big_to_native(static_cast(strtoul(sizeString.c_str(), nullptr, 16))); + auto size = boost::endian::big_to_native( + static_cast(strtoul(sizeString.c_str(), nullptr, 16))); auto heightString = data.substr(OFFSET_AND_SIZE_DECRED(height)); - auto height = boost::endian::big_to_native(static_cast(strtoul(heightString.c_str(), nullptr, 16))); + auto height = boost::endian::big_to_native( + static_cast(strtoul(heightString.c_str(), nullptr, 16))); if (size == 0 || height == 0) { - LOG(ERROR) << "current work is invalid: data = " << data << ", target = " << target << ", created at = " << createdAt; + LOG(ERROR) << "current work is invalid: data = " << data + << ", target = " << target << ", created at = " << createdAt; return false; } - // The rightmost element with the equivalent network has the highest height/voters/size due to the nature of composite key + // The rightmost element with the equivalent network has the highest + // height/voters/size due to the nature of composite key uint32_t bestHeight = 0; uint16_t bestVoters = 0; uint32_t bestSize = 0; auto &works = works_.get(); - auto r = works.equal_range(network); - if (r.first != works.end()) { - auto &bestWork = *(--r.second); + if (!works.empty()) { + auto &bestWork = *works.rbegin(); bestHeight = bestWork.height; bestVoters = bestWork.voters; bestSize = bestWork.size; @@ -131,22 +130,27 @@ bool JobMakerHandlerDecred::processMsg(JsonNode &j) // when the block height of two bitcoind is not synchronized. // The block height downs must past twice the time of stratumJobInterval_ // without the higher height GW received. - if (height < bestHeight && createdAt > bestTime && createdAt - bestWork.createdAt < 2 * def()->jobInterval_) { - LOG(WARNING) << "skip low height work: data = " << data << ", target = " << target << ", created at = " << createdAt << - ", best height = " << bestHeight << ", best time = " << bestTime; + if (height < bestHeight && createdAt > bestTime && + createdAt - bestWork.createdAt < 2 * def()->jobInterval_) { + LOG(WARNING) << "skip low height work: data = " << data + << ", target = " << target << ", created at = " << createdAt + << ", best height = " << bestHeight + << ", best time = " << bestTime; return false; } } - auto p = works_.emplace(data, target, createdAt, height, network, size, voters); - auto& work = *p.first; + auto p = works_.emplace(data, target, createdAt, height, size, voters); + auto &work = *p.first; if (!p.second) { - LOG(ERROR) << "current work is duplicated with a previous work: " << work << ", current created at = " << createdAt; + LOG(ERROR) << "current work is duplicated with a previous work: " << work + << ", current created at = " << createdAt; return false; } if (height < bestHeight) { - LOG(WARNING) << "current work has a lower height: " << work << ", best height = " << bestHeight; + LOG(WARNING) << "current work has a lower height: " << work + << ", best height = " << bestHeight; return false; } @@ -162,43 +166,40 @@ bool JobMakerHandlerDecred::processMsg(JsonNode &j) return true; } -bool JobMakerHandlerDecred::validate(JsonNode &j) -{ +bool JobMakerHandlerDecred::validate(JsonNode &j) { // check fields are valid if (j.type() != Utilities::JS::type::Obj || - j["created_at_ts"].type() != Utilities::JS::type::Int || - j["rpcAddress"].type() != Utilities::JS::type::Str || - j["rpcUserPwd"].type() != Utilities::JS::type::Str || - j["data"].type() != Utilities::JS::type::Str || - j["data"].size() != 384 || - !IsHex(j["data"].str()) || - j["target"].type() != Utilities::JS::type::Str || - j["target"].size() != 64 || - !IsHex(j["target"].str()) || - j["network"].type() != Utilities::JS::type::Int) { - LOG(ERROR) << "work format not expected"; + j["created_at_ts"].type() != Utilities::JS::type::Int || + j["rpcAddress"].type() != Utilities::JS::type::Str || + j["rpcUserPwd"].type() != Utilities::JS::type::Str || + j["data"].type() != Utilities::JS::type::Str || j["data"].size() != 384 || + !IsHex(j["data"].str()) || + j["target"].type() != Utilities::JS::type::Str || + j["target"].size() != 64 || !IsHex(j["target"].str())) { + LOG(ERROR) << "work format not expected"; return false; - } + } // check timestamp - if (j["created_at_ts"].uint32() + def()->maxJobDelay_ < time(nullptr)) - { - LOG(ERROR) << "too old decred work: " << date("%F %T", j["created_at_ts"].uint32()); + if (j["created_at_ts"].uint32() + def()->maxJobDelay_ < time(nullptr)) { + LOG(ERROR) << "too old decred work: " + << date("%F %T", j["created_at_ts"].uint32()); return false; } return true; } -void JobMakerHandlerDecred::clearTimeoutWorks() -{ +void JobMakerHandlerDecred::clearTimeoutWorks() { // Ensure that we has at least one work, even if it expires. // So jobmaker can always generate jobs even if blockchain node does not - // update the response of getwork for a long time when there is no new transaction. - if (works_.size() <= 1) return; + // update the response of getwork for a long time when there is no new + // transaction. + if (works_.size() <= 1) + return; auto tsNow = static_cast(time(nullptr)); - auto& works = works_.get(); + auto &works = works_.get(); auto iter = works.begin(); auto iend = works.end(); while (iter != iend) { diff --git a/src/decred/JobMakerDecred.h b/src/decred/JobMakerDecred.h index 0ab1404ac..d9e06f915 100644 --- a/src/decred/JobMakerDecred.h +++ b/src/decred/JobMakerDecred.h @@ -36,22 +36,24 @@ #include struct GetWorkDecred { - GetWorkDecred(const string &data, - const string &target, - uint32_t createdAt, - uint32_t height, - NetworkDecred network, - uint32_t size, - uint16_t voters) - : data(data), target(target), createdAt(createdAt), height(height), network(network), size(size), voters(voters) - { - } + GetWorkDecred( + const string &data, + const string &target, + uint32_t createdAt, + uint32_t height, + uint32_t size, + uint16_t voters) + : data(data) + , target(target) + , createdAt(createdAt) + , height(height) + , size(size) + , voters(voters) {} string data; string target; uint32_t createdAt; uint32_t height; - NetworkDecred network; uint32_t size; uint16_t voters; }; @@ -61,36 +63,37 @@ struct ByCreationTimeDecred {}; struct ByDataDecred {}; // The getwork job dimensions -// - Height + voters + size + creation time: ordered non-unique for best block retrieval +// - Height + voters + size + creation time: ordered non-unique for best block +// retrieval // - Creation time: ordered non-unique for timeout handling // - Data: hashed unique for duplication checks using GetWorkDecredMap = boost::multi_index_container< - GetWorkDecred, - boost::multi_index::indexed_by< - boost::multi_index::ordered_non_unique< - boost::multi_index::tag, - boost::multi_index::composite_key< - GetWorkDecred, - boost::multi_index::member, - boost::multi_index::member, - boost::multi_index::member, - boost::multi_index::member, - boost::multi_index::member - > - >, - boost::multi_index::ordered_non_unique< - boost::multi_index::tag, - boost::multi_index::member - >, - boost::multi_index::hashed_unique< - boost::multi_index::tag, - boost::multi_index::member - > - > ->; + GetWorkDecred, + boost::multi_index::indexed_by< + boost::multi_index::ordered_non_unique< + boost::multi_index::tag, + boost::multi_index::composite_key< + GetWorkDecred, + boost::multi_index:: + member, + boost::multi_index:: + member, + boost::multi_index:: + member, + boost::multi_index::member< + GetWorkDecred, + uint32_t, + &GetWorkDecred::createdAt>>>, + boost::multi_index::ordered_non_unique< + boost::multi_index::tag, + boost::multi_index:: + member>, + boost::multi_index::hashed_unique< + boost::multi_index::tag, + boost::multi_index:: + member>>>; -class JobMakerHandlerDecred : public GwJobMakerHandler -{ +class JobMakerHandlerDecred : public GwJobMakerHandler { public: JobMakerHandlerDecred(); bool processMsg(const string &msg) override; diff --git a/src/decred/StatisticsDecred.cc b/src/decred/StatisticsDecred.cc index 069f68e72..9de80732f 100644 --- a/src/decred/StatisticsDecred.cc +++ b/src/decred/StatisticsDecred.cc @@ -27,27 +27,11 @@ #include "DecredUtils.h" template <> -void ShareStatsDay::processShare(uint32_t hourIdx, const ShareDecred &share) { - ScopeLock sl(lock_); - - if (StratumStatus::isAccepted(share.status())) { - shareAccept1h_[hourIdx] += share.sharediff(); - shareAccept1d_ += share.sharediff(); - - double score = share.score(); - double reward = GetBlockRewardDecredWork(share.height(), share.voters(), NetworkParamsDecred::get((NetworkDecred)share.network())); - double earn = score * reward; - - score1h_[hourIdx] += score; - score1d_ += score; - earn1h_[hourIdx] += earn; - earn1d_ += earn; - - } else { - shareReject1h_[hourIdx] += share.sharediff(); - shareReject1d_ += share.sharediff(); - } - modifyHoursFlag_ |= (0x01u << hourIdx); +double ShareStatsDay::getShareReward(const ShareDecred &share) { + return GetBlockRewardDecredWork( + share.height(), + share.voters(), + NetworkParamsDecred::get((NetworkDecred)share.network())); } /////////////// template instantiation /////////////// diff --git a/src/decred/StratumDecred.cc b/src/decred/StratumDecred.cc index 057455d07..edf4d1c71 100644 --- a/src/decred/StratumDecred.cc +++ b/src/decred/StratumDecred.cc @@ -28,27 +28,24 @@ #include StratumJobDecred::StratumJobDecred() - : StratumJob() -{ + : StratumJob() { memset(&header_, 0, sizeof(BlockHeaderDecred)); } string StratumJobDecred::serializeToJson() const { - return Strings::Format("{\"jobId\":%" PRIu64 - ",\"prevHash\":\"%s\"" - ",\"coinBase1\":\"%s\"" - ",\"coinBase2\":\"%s\"" - ",\"version\":\"%s\"" - ",\"target\":\"%s\"" - ",\"network\":%" PRIu32 - "}", - jobId_, - getPrevHash().c_str(), - getCoinBase1().c_str(), - HexStr(BEGIN(header_.stakeVersion), END(header_.stakeVersion)).c_str(), - HexStr(BEGIN(header_.version), END(header_.version)).c_str(), - target_.ToString().c_str(), - static_cast(network_)); + return Strings::Format( + "{\"jobId\":%" PRIu64 + ",\"prevHash\":\"%s\"" + ",\"coinBase1\":\"%s\"" + ",\"coinBase2\":\"%s\"" + ",\"version\":\"%s\"" + ",\"target\":\"%s\"}", + jobId_, + getPrevHash().c_str(), + getCoinBase1().c_str(), + HexStr(BEGIN(header_.stakeVersion), END(header_.stakeVersion)).c_str(), + HexStr(BEGIN(header_.version), END(header_.version)).c_str(), + target_.ToString().c_str()); } bool StratumJobDecred::unserializeFromJson(const char *s, size_t len) { @@ -67,15 +64,13 @@ bool StratumJobDecred::unserializeFromJson(const char *s, size_t len) { j["version"].type() != Utilities::JS::type::Str || j["version"].size() != 8 || j["target"].type() != Utilities::JS::type::Str || - j["target"].size() != 64 || - j["network"].type() != Utilities::JS::type::Int) { + j["target"].size() != 64) { LOG(ERROR) << "parse stratum job failure: " << s; return false; } memset(&header_, 0, sizeof(BlockHeaderDecred)); jobId_ = j["jobId"].uint64(); - network_ = static_cast(j["network"].uint32()); #define UNSERIALIZE_SJOB_FIELD(n, d) \ auto n##Str = j[#n].str(); \ @@ -91,16 +86,14 @@ bool StratumJobDecred::unserializeFromJson(const char *s, size_t len) { UNSERIALIZE_SJOB_FIELD(version, &header_.version); UNSERIALIZE_SJOB_FIELD(target, target_.begin()); #undef UNSERIALIZE_SJOB_FIELD - + return true; } -string StratumJobDecred::getPrevHash() const -{ +string StratumJobDecred::getPrevHash() const { return HexStr(header_.prevBlock.begin(), header_.prevBlock.end()); } -string StratumJobDecred::getCoinBase1() const -{ +string StratumJobDecred::getCoinBase1() const { return HexStr(header_.merkelRoot.begin(), header_.extraData.begin()); } diff --git a/src/decred/StratumDecred.h b/src/decred/StratumDecred.h index 831bc79e1..6e68e17d4 100644 --- a/src/decred/StratumDecred.h +++ b/src/decred/StratumDecred.h @@ -28,8 +28,7 @@ #include "Stratum.h" #include "CommonDecred.h" #include "decred/decred.pb.h" -class FoundBlockDecred -{ +class FoundBlockDecred { public: uint64_t jobId_; int64_t workerId_; // found by who @@ -38,63 +37,66 @@ class FoundBlockDecred BlockHeaderDecred header_; NetworkDecred network_; - FoundBlockDecred(uint64_t jobId, int64_t workerId, int32_t userId, const string &workerFullName, const BlockHeaderDecred& header, NetworkDecred network) - : jobId_(jobId), workerId_(workerId), userId_(userId), header_(header), network_(network) - { - snprintf(workerFullName_, sizeof(workerFullName_), "%s", workerFullName.c_str()); + FoundBlockDecred( + uint64_t jobId, + int64_t workerId, + int32_t userId, + const string &workerFullName, + const BlockHeaderDecred &header, + NetworkDecred network) + : jobId_(jobId) + , workerId_(workerId) + , userId_(userId) + , header_(header) + , network_(network) { + snprintf( + workerFullName_, sizeof(workerFullName_), "%s", workerFullName.c_str()); } }; - - - -class ShareDecredBytesVersion -{ +class ShareDecredBytesVersion { public: - - uint32_t version_;//0 - uint32_t checkSum_;//4 - - int64_t workerHashId_;//8 - int32_t userId_;//16 - int32_t status_;//20 - int64_t timestamp_;//24 - IpAddress ip_;//32 - - uint64_t jobId_;//48 - uint64_t shareDiff_;//56 - uint32_t blkBits_;//64 - uint32_t height_;//68 - uint32_t nonce_;//72 - uint32_t sessionId_;//76 - NetworkDecred network_;//80 - uint16_t voters_;//84 + uint32_t version_; // 0 + uint32_t checkSum_; // 4 + + int64_t workerHashId_; // 8 + int32_t userId_; // 16 + int32_t status_; // 20 + int64_t timestamp_; // 24 + IpAddress ip_; // 32 + + uint64_t jobId_; // 48 + uint64_t shareDiff_; // 56 + uint32_t blkBits_; // 64 + uint32_t height_; // 68 + uint32_t nonce_; // 72 + uint32_t sessionId_; // 76 + NetworkDecred network_; // 80 + uint16_t voters_; // 84 uint32_t checkSum() const { uint64_t c = 0; - c += (uint64_t) version_; - c += (uint64_t) workerHashId_; - c += (uint64_t) userId_; - c += (uint64_t) status_; - c += (uint64_t) timestamp_; - c += (uint64_t) ip_.addrUint64[0]; - c += (uint64_t) ip_.addrUint64[1]; - c += (uint64_t) jobId_; - c += (uint64_t) shareDiff_; - c += (uint64_t) blkBits_; - c += (uint64_t) height_; - c += (uint64_t) nonce_; - c += (uint64_t) sessionId_; - c += (uint64_t) network_; - c += (uint64_t) voters_; - - return ((uint32_t) c) + ((uint32_t) (c >> 32)); + c += (uint64_t)version_; + c += (uint64_t)workerHashId_; + c += (uint64_t)userId_; + c += (uint64_t)status_; + c += (uint64_t)timestamp_; + c += (uint64_t)ip_.addrUint64[0]; + c += (uint64_t)ip_.addrUint64[1]; + c += (uint64_t)jobId_; + c += (uint64_t)shareDiff_; + c += (uint64_t)blkBits_; + c += (uint64_t)height_; + c += (uint64_t)nonce_; + c += (uint64_t)sessionId_; + c += (uint64_t)network_; + c += (uint64_t)voters_; + + return ((uint32_t)c) + ((uint32_t)(c >> 32)); } - }; - // [[[[ IMPORTANT REMINDER! ]]]] // Please keep the Share structure forward compatible. // That is: don't change it unless you add code so that @@ -103,14 +105,13 @@ class ShareDecredBytesVersion // and the new version will coexist for a while. // If there is no forward compatibility, one of the versions of Share // will be considered invalid, resulting in loss of users' hashrate. -class ShareDecred : public sharebase::DecredMsg -{ +class ShareDecred : public sharebase::DecredMsg { public: - - const static uint32_t BYTES_VERSION = 0x00200001u; // first 0020: DCR, second 0001: version 1, the share struct is bytes array - const static uint32_t CURRENT_VERSION = 0x00200002u; // first 0020: DCR, second 0002: version 2 - - + const static uint32_t BYTES_VERSION = + 0x00200001u; // first 0020: DCR, second 0001: version 1, the share struct + // is bytes array + const static uint32_t CURRENT_VERSION = + 0x00200002u; // first 0020: DCR, second 0002: version 2 ShareDecred() { set_version(ShareDecred::CURRENT_VERSION); @@ -156,86 +157,102 @@ class ShareDecred : public sharebase::DecredMsg set_ip(ip.toString()); } - double score() const - { - if (sharediff() == 0 || blkbits() == 0) - { + double score() const { + if (sharediff() == 0 || blkbits() == 0) { return 0.0; } - double networkDifficulty = NetworkParamsDecred::get((NetworkDecred)network()).powLimit.getdouble() / arith_uint256().SetCompact(blkbits()).getdouble(); + double networkDifficulty = + NetworkParamsDecred::get((NetworkDecred)network()) + .powLimit.getdouble() / + arith_uint256().SetCompact(blkbits()).getdouble(); - // Network diff may less than share diff on testnet or regression test network. - // On regression test network, the network diff may be zero. - // But no matter how low the network diff is, you can only dig one block at a time. - if (networkDifficulty < sharediff()) - { + // Network diff may less than share diff on testnet or regression test + // network. On regression test network, the network diff may be zero. But no + // matter how low the network diff is, you can only dig one block at a time. + if (networkDifficulty < sharediff()) { return 1.0; } return sharediff() / networkDifficulty; } - bool isValid() const - { + bool isValid() const { if (version() != CURRENT_VERSION) { return false; } - if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || - height() == 0 || blkbits() == 0 || sharediff() == 0) - { + if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || height() == 0 || + blkbits() == 0 || sharediff() == 0) { return false; } return true; } - string toString() const - { - double networkDifficulty = NetworkParamsDecred::get((NetworkDecred)network()).powLimit.getdouble() / arith_uint256().SetCompact(blkbits()).getdouble(); - return Strings::Format("share(jobId: %" PRIu64 ", ip: %s, userId: %d, " - "workerId: %" PRId64 ", time: %u/%s, height: %u, " - "blkBits: %08x/%lf, shareDiff: %" PRIu64 ", " - "voters: %u, status: %d/%s)", - jobid(), ip().c_str(), userid(), - workerhashid(), timestamp(), date("%F %T", timestamp()).c_str(), height(), - blkbits(), networkDifficulty, sharediff(), - voters(), status(), StratumStatus::toString(status())); + string toString() const { + double networkDifficulty = + NetworkParamsDecred::get((NetworkDecred)network()) + .powLimit.getdouble() / + arith_uint256().SetCompact(blkbits()).getdouble(); + return Strings::Format( + "share(jobId: %" PRIu64 + ", ip: %s, userId: %d, " + "workerId: %" PRId64 + ", time: %u/%s, height: %u, " + "blkBits: %08x/%lf, shareDiff: %" PRIu64 + ", " + "voters: %u, status: %d/%s)", + jobid(), + ip().c_str(), + userid(), + workerhashid(), + timestamp(), + date("%F %T", timestamp()).c_str(), + height(), + blkbits(), + networkDifficulty, + sharediff(), + voters(), + status(), + StratumStatus::toString(status())); } - bool SerializeToBuffer(string& data, uint32_t& size) const{ + bool SerializeToBuffer(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size); if (!SerializeToArray((uint8_t *)data.data(), size)) { DLOG(INFO) << "base.SerializeToArray failed!" << std::endl; return false; - } return true; } - bool UnserializeWithVersion(const uint8_t* data, uint32_t size){ + bool UnserializeWithVersion(const uint8_t *data, uint32_t size) { - if(nullptr == data || size <= 0) { + if (nullptr == data || size <= 0) { return false; } - const uint8_t * payload = data; - uint32_t version = *((uint32_t*)payload); + const uint8_t *payload = data; + uint32_t version = *((uint32_t *)payload); if (version == CURRENT_VERSION) { - if (!ParseFromArray((const uint8_t *)(payload + sizeof(uint32_t)), size - sizeof(uint32_t))) { + if (!ParseFromArray( + (const uint8_t *)(payload + sizeof(uint32_t)), + size - sizeof(uint32_t))) { DLOG(INFO) << "share ParseFromArray failed!"; return false; } - } else if (version == BYTES_VERSION && size == sizeof(ShareDecredBytesVersion)) { + } else if ( + version == BYTES_VERSION && size == sizeof(ShareDecredBytesVersion)) { - ShareDecredBytesVersion* share = (ShareDecredBytesVersion*) payload; + ShareDecredBytesVersion *share = (ShareDecredBytesVersion *)payload; if (share->checkSum() != share->checkSum_) { - DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_<< ", checkSum(): " << share->checkSum(); + DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_ + << ", checkSum(): " << share->checkSum(); return false; } @@ -263,16 +280,15 @@ class ShareDecred : public sharebase::DecredMsg return true; } - - bool SerializeToArrayWithLength(string& data, uint32_t& size) const { + bool SerializeToArrayWithLength(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - *((uint32_t*)data.data()) = size; - uint8_t * payload = (uint8_t *)data.data(); + *((uint32_t *)data.data()) = size; + uint8_t *payload = (uint8_t *)data.data(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { - DLOG(INFO) << "base.SerializeToArray failed!"; + DLOG(INFO) << "base.SerializeToArray failed!"; return false; } @@ -280,13 +296,12 @@ class ShareDecred : public sharebase::DecredMsg return true; } - - bool SerializeToArrayWithVersion(string& data, uint32_t& size) const { + bool SerializeToArrayWithVersion(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - uint8_t * payload = (uint8_t *)data.data(); - *((uint32_t*)payload) = version(); + uint8_t *payload = (uint8_t *)data.data(); + *((uint32_t *)payload) = version(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { DLOG(INFO) << "SerializeToArray failed!"; @@ -297,18 +312,16 @@ class ShareDecred : public sharebase::DecredMsg return true; } - uint32_t getsharelength() { - return IsInitialized() ? ByteSize() : 0; - } + uint32_t getsharelength() { return IsInitialized() ? ByteSize() : 0; } }; class StratumJobDecred : public StratumJob { public: - static const size_t CoinBase1Size = offsetof(BlockHeaderDecred, extraData) - offsetof(BlockHeaderDecred, merkelRoot); + static const size_t CoinBase1Size = offsetof(BlockHeaderDecred, extraData) - + offsetof(BlockHeaderDecred, merkelRoot); BlockHeaderDecred header_; uint256 target_; - NetworkDecred network_; StratumJobDecred(); string serializeToJson() const override; @@ -321,7 +334,10 @@ class StratumProtocolDecred { public: virtual ~StratumProtocolDecred() = default; virtual string getExtraNonce1String(uint32_t extraNonce1) const = 0; - virtual void setExtraNonces(BlockHeaderDecred &header, uint32_t extraNonce1, const vector &extraNonce2) = 0; + virtual void setExtraNonces( + BlockHeaderDecred &header, + uint32_t extraNonce1, + const vector &extraNonce2) = 0; }; class ServerDecred; @@ -333,8 +349,12 @@ struct StratumTraitsDecred { using JobDiffType = uint64_t; struct LocalJobType : public LocalJob { LocalJobType(uint64_t jobId, uint8_t shortJobId, uint32_t blkBits) - : LocalJob(jobId), shortJobId_(shortJobId), blkBits_(blkBits) {} - bool operator==(uint8_t shortJobId) const { return shortJobId_ == shortJobId; } + : LocalJob(jobId) + , shortJobId_(shortJobId) + , blkBits_(blkBits) {} + bool operator==(uint8_t shortJobId) const { + return shortJobId_ == shortJobId; + } uint8_t shortJobId_; uint32_t blkBits_; }; diff --git a/src/decred/StratumMinerDecred.cc b/src/decred/StratumMinerDecred.cc index a129f3dad..38e7b0854 100644 --- a/src/decred/StratumMinerDecred.cc +++ b/src/decred/StratumMinerDecred.cc @@ -32,24 +32,28 @@ #include -StratumMinerDecred::StratumMinerDecred(StratumSessionDecred &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) - : StratumMinerBase(session, diffController, clientAgent, workerName, workerId) { +StratumMinerDecred::StratumMinerDecred( + StratumSessionDecred &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) + : StratumMinerBase( + session, diffController, clientAgent, workerName, workerId) { } -void StratumMinerDecred::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMinerDecred::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "mining.submit") { handleRequest_Submit(idStr, jparams); } } -void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNode &jparams) { +void StratumMinerDecred::handleRequest_Submit( + const string &idStr, const JsonNode &jparams) { auto &session = getSession(); if (session.getState() != StratumSession::AUTHENTICATED) { session.responseError(idStr, StratumStatus::UNAUTHORIZED); @@ -66,8 +70,12 @@ void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNod // params[3] = nTime // params[4] = nonce if (jparams.children()->size() < 5 || - std::any_of(std::next(jparams.children()->begin()), jparams.children()->end(), - [](const JsonNode &n) { return n.type() != Utilities::JS::type::Str || !IsHex(n.str()); })) { + std::any_of( + std::next(jparams.children()->begin()), + jparams.children()->end(), + [](const JsonNode &n) { + return n.type() != Utilities::JS::type::Str || !IsHex(n.str()); + })) { session.responseError(idStr, StratumStatus::ILLEGAL_PARARMS); return; } @@ -79,7 +87,8 @@ void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNod return; } - auto shortJobId = static_cast(jparams.children()->at(1).uint32_hex()); + auto shortJobId = + static_cast(jparams.children()->at(1).uint32_hex()); auto ntime = jparams.children()->at(3).uint32_hex(); auto nonce = jparams.children()->at(4).uint32_hex(); @@ -92,9 +101,11 @@ void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNod // if can't find localJob, could do nothing session.responseError(idStr, StratumStatus::JOB_NOT_FOUND); - LOG(INFO) << "rejected share: " << StratumStatus::toString(StratumStatus::JOB_NOT_FOUND) - << ", worker: " << worker.fullName_ << ", Share(id: " << idStr << ", shortJobId: " - << static_cast(shortJobId) << ", nTime: " << ntime << "/" << date("%F %T", ntime) << ")"; + LOG(INFO) << "rejected share: " + << StratumStatus::toString(StratumStatus::JOB_NOT_FOUND) + << ", worker: " << worker.fullName_ << ", Share(id: " << idStr + << ", shortJobId: " << static_cast(shortJobId) + << ", nTime: " << ntime << "/" << date("%F %T", ntime) << ")"; return; } @@ -117,28 +128,33 @@ void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNod height = sjob->header_.height.value(); } - ShareDecred share(workerId_, - worker.userId_, - clientIp, - localJob->jobId_, - iter->second, - localJob->blkBits_, - height, - nonce, - session.getSessionId()); + ShareDecred share( + workerId_, + worker.userId_, + clientIp, + localJob->jobId_, + iter->second, + localJob->blkBits_, + height, + nonce, + session.getSessionId()); // we send share to kafka by default, but if there are lots of invalid // shares in a short time, we just drop them. bool isSendShareToKafka = true; - LocalShare - localShare(reinterpret_cast(extraNonce2.data())->value(), nonce, ntime); + LocalShare localShare( + reinterpret_cast(extraNonce2.data()) + ->value(), + nonce, + ntime); // can't find local share if (!localJob->addLocalShare(localShare)) { share.set_status(StratumStatus::DUPLICATE_SHARE); } else { - share.set_status(server.checkShare(share, exjob, extraNonce2, ntime, nonce, worker.fullName_)); + share.set_status(server.checkShare( + share, exjob, extraNonce2, ntime, nonce, worker.fullName_)); } if (!handleShare(idStr, share.status(), share.sharediff())) { @@ -149,19 +165,21 @@ void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNod DLOG(INFO) << share.toString(); if (!StratumStatus::isAccepted(share.status())) { - // log all rejected share to answer "Why the rejection rate of my miner increased?" + // log all rejected share to answer "Why the rejection rate of my miner + // increased?" LOG(INFO) << "rejected share: " << StratumStatus::toString(share.status()) << ", worker: " << worker.fullName_ << ", " << share.toString(); // check if thers is invalid share spamming - int64_t invalidSharesNum = invalidSharesCounter_.sum(time(nullptr), - INVALID_SHARE_SLIDING_WINDOWS_SIZE); + int64_t invalidSharesNum = invalidSharesCounter_.sum( + time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE); // too much invalid shares, don't send them to kafka if (invalidSharesNum >= INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT) { isSendShareToKafka = false; - LOG(INFO) << "invalid share spamming, diff: " << share.sharediff() << ", worker: " - << worker.fullName_ << ", agent: " << clientAgent_ << ", ip: " << clientIp; + LOG(INFO) << "invalid share spamming, diff: " << share.sharediff() + << ", worker: " << worker.fullName_ + << ", agent: " << clientAgent_ << ", ip: " << clientIp; } } @@ -170,11 +188,12 @@ void StratumMinerDecred::handleRequest_Submit(const string &idStr, const JsonNod std::string message; uint32_t size = 0; if (!share.SerializeToArrayWithVersion(message, size)) { - LOG(ERROR) << "share SerializeToArrayWithVersion failed!"<< share.toString(); + LOG(ERROR) << "share SerializeToArrayWithVersion failed!" + << share.toString(); return; } - server.sendShare2Kafka((const uint8_t *) message.data(), size); + server.sendShare2Kafka((const uint8_t *)message.data(), size); } return; } diff --git a/src/decred/StratumMinerDecred.h b/src/decred/StratumMinerDecred.h index d6a091b1a..bff5133f8 100644 --- a/src/decred/StratumMinerDecred.h +++ b/src/decred/StratumMinerDecred.h @@ -31,16 +31,18 @@ class StratumMinerDecred : public StratumMinerBase { public: using StratumMiner::kExtraNonce2Size_; - StratumMinerDecred(StratumSessionDecred &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId); - - void handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) override; + StratumMinerDecred( + StratumSessionDecred &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId); + + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; private: void handleRequest_Submit(const string &idStr, const JsonNode &jparams); diff --git a/src/decred/StratumServerDecred.cc b/src/decred/StratumServerDecred.cc index 21f8dde88..70259ad8c 100644 --- a/src/decred/StratumServerDecred.cc +++ b/src/decred/StratumServerDecred.cc @@ -37,27 +37,31 @@ #include using std::ostream; -static ostream& operator<<(ostream& os, const StratumJobDecred& job) -{ - os << "jobId = " << job.jobId_ << ", prevHash = " << job.getPrevHash() << ", coinBase1 = " << job.getCoinBase1() - << ", coinBase2 = " << job.header_.stakeVersion << ", vesion = " << job.header_.version << ", height = " << job.header_.height; +static ostream &operator<<(ostream &os, const StratumJobDecred &job) { + os << "jobId = " << job.jobId_ << ", prevHash = " << job.getPrevHash() + << ", coinBase1 = " << job.getCoinBase1() + << ", coinBase2 = " << job.header_.stakeVersion + << ", vesion = " << job.header_.version + << ", height = " << job.header_.height; return os; } -JobRepositoryDecred::JobRepositoryDecred(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerDecred *server) - : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) +JobRepositoryDecred::JobRepositoryDecred( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerDecred *server) + : JobRepositoryBase( + kafkaBrokers, consumerTopic, fileLastNotifyTime, server) , lastHeight_(0) - , lastVoters_(0) -{ + , lastVoters_(0) { } -shared_ptr JobRepositoryDecred::createStratumJob() -{ +shared_ptr JobRepositoryDecred::createStratumJob() { return std::make_shared(); } -void JobRepositoryDecred::broadcastStratumJob(shared_ptr sjob) -{ +void JobRepositoryDecred::broadcastStratumJob(shared_ptr sjob) { auto jobDecred = std::static_pointer_cast(sjob); if (!jobDecred) { LOG(ERROR) << "wrong job type: jobId = " << sjob->jobId_; @@ -77,99 +81,151 @@ void JobRepositoryDecred::broadcastStratumJob(shared_ptr sjob) bool moreVoters = voters > lastVoters_; shared_ptr jobEx(createStratumJobEx(jobDecred, isClean)); - { - ScopeLock sl(lock_); - - if (isClean) { - // mark all jobs as stale, should do this before insert new job - // stale shares will not be rejected, they will be marked as ACCEPT_STALE and have lower rewards. - for (auto it : exJobs_) { - it.second->markStale(); - } - } - // insert new job - exJobs_[jobDecred->jobId_] = jobEx; + if (isClean) { + // mark all jobs as stale, should do this before insert new job + // stale shares will not be rejected, they will be marked as ACCEPT_STALE + // and have lower rewards. + for (auto it : exJobs_) { + it.second->markStale(); + } } - // We want to update jobs immediately if there are more voters for the same height block + // insert new job + exJobs_[jobDecred->jobId_] = jobEx; + + // We want to update jobs immediately if there are more voters for the same + // height block if (isClean || moreVoters) { lastVoters_ = voters; sendMiningNotify(jobEx); } } -// gominer protocol -// mining.notify: extra nonce 2 size is the actual extra nonce 2 size, extra nonce 1 is the actual extra nonce 1 -// mining.submit: extra nonce 2 is the actual extra nonce 2 -class StratumProtocolDecredGoMiner : public StratumProtocolDecred { +// nicehash protocol +// mining.notify: extra nonce 2 size is the actual extra nonce 2 size, extra +// nonce 1 is the actual extra nonce 1 mining.submit: extra nonce 2 is the +// actual extra nonce 2 +class StratumProtocolDecredNiceHash : public StratumProtocolDecred { public: string getExtraNonce1String(uint32_t extraNonce1) const override { - return Strings::Format("%08" PRIx32, boost::endian::endian_reverse(extraNonce1)); + return Strings::Format( + "%08" PRIx32, boost::endian::endian_reverse(extraNonce1)); } - void setExtraNonces(BlockHeaderDecred &header, uint32_t extraNonce1, const vector &extraNonce2) override { - *reinterpret_cast(header.extraData.begin()) = extraNonce1; - std::copy_n(extraNonce2.begin(), StratumMiner::kExtraNonce2Size_, header.extraData.begin() + sizeof(extraNonce1)); + void setExtraNonces( + BlockHeaderDecred &header, + uint32_t extraNonce1, + const vector &extraNonce2) override { + *reinterpret_cast( + header.extraData.begin()) = extraNonce1; + std::copy_n( + extraNonce2.begin(), + StratumMiner::kExtraNonce2Size_, + header.extraData.begin() + sizeof(extraNonce1)); } }; // tpruvot protocol -// mining.notify: extra nonce 2 size is not used, extra nonce 1 is considered as the whole extra nonce, bits higher than 32 to be rolled -// mining.submit: extra nonce 2 is considered as the whole rolled extra nonce but we only take the first 8 bytes (last 4 bytes shall be extra nonce1). +// mining.notify: extra nonce 2 size is not used, extra nonce 1 is considered as +// the whole extra nonce, bits higher than 32 to be rolled mining.submit: extra +// nonce 2 is considered as the whole rolled extra nonce but we only take the +// first 8 bytes (last 4 bytes shall be extra nonce1). class StratumProtocolDecredTPruvot : public StratumProtocolDecred { public: string getExtraNonce1String(uint32_t extraNonce1) const override { - return Strings::Format("%024" PRIx32, boost::endian::endian_reverse(extraNonce1)); + return Strings::Format( + "%024" PRIx32, boost::endian::endian_reverse(extraNonce1)); } - void setExtraNonces(BlockHeaderDecred &header, uint32_t extraNonce1, const vector &extraNonce2) override { - *reinterpret_cast(header.extraData.begin() + StratumMiner::kExtraNonce2Size_) = extraNonce1; - std::copy_n(extraNonce2.begin(), StratumMiner::kExtraNonce2Size_, header.extraData.begin()); + void setExtraNonces( + BlockHeaderDecred &header, + uint32_t extraNonce1, + const vector &extraNonce2) override { + *reinterpret_cast( + header.extraData.begin() + StratumMiner::kExtraNonce2Size_) = + extraNonce1; + std::copy_n( + extraNonce2.begin(), + StratumMiner::kExtraNonce2Size_, + header.extraData.begin()); } }; -ServerDecred::ServerDecred(int32_t shareAvgSeconds, const libconfig::Config &config) +ServerDecred::ServerDecred( + int32_t shareAvgSeconds, const libconfig::Config &config) : ServerBase(shareAvgSeconds) -{ + , network_(NetworkDecred::MainNet) { string protocol; - config.lookupValue("sserver.protocol", protocol); - boost::algorithm::to_lower(protocol); + if (config.lookupValue("sserver.protocol", protocol)) { + boost::algorithm::to_lower(protocol); + } if (protocol == "gominer") { - LOG(INFO) << "Using gominer stratum protocol"; - protocol_ = boost::make_unique(); + LOG(FATAL) << "Gominer is no longer a valid stratum protocol option"; + } else if (protocol == "nicehash") { + LOG(INFO) << "Using nicehash stratum protocol"; + protocol_ = boost::make_unique(); } else { LOG(INFO) << "Using tpruvot stratum protocol"; protocol_ = boost::make_unique(); } + + string network; + if (config.lookupValue("sserver.network", network)) { + boost::algorithm::to_lower(network); + } + if (network == "testnet") { + LOG(INFO) << "Running testnet"; + network_ = NetworkDecred::TestNet; + } else if (network == "simnet") { + LOG(INFO) << "Running simnet"; + network_ = NetworkDecred::SimNet; + } else { + LOG(INFO) << "Running mainnet"; + network_ = NetworkDecred::MainNet; + } } -unique_ptr ServerDecred::createConnection(bufferevent *bev, sockaddr *saddr, uint32_t sessionID) -{ - return boost::make_unique(*this, bev, saddr, sessionID, *protocol_); +unique_ptr ServerDecred::createConnection( + bufferevent *bev, sockaddr *saddr, uint32_t sessionID) { + return boost::make_unique( + *this, bev, saddr, sessionID, *protocol_); } -JobRepository* ServerDecred::createJobRepository(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime) -{ - return new JobRepositoryDecred(kafkaBrokers, consumerTopic, fileLastNotifyTime, this); +JobRepository *ServerDecred::createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) { + return new JobRepositoryDecred( + kafkaBrokers, consumerTopic, fileLastNotifyTime, this); } -int ServerDecred::checkShare(ShareDecred &share, shared_ptr exJobPtr, const vector &extraNonce2, - uint32_t ntime, uint32_t nonce, const string &workerFullName) -{ +int ServerDecred::checkShare( + ShareDecred &share, + shared_ptr exJobPtr, + const vector &extraNonce2, + uint32_t ntime, + uint32_t nonce, + const string &workerFullName) { if (!exJobPtr || exJobPtr->isStale()) { return StratumStatus::JOB_NOT_FOUND; } auto sjob = std::static_pointer_cast(exJobPtr->sjob_); - share.set_network((uint32_t)sjob->network_); + share.set_network(static_cast(network_)); share.set_voters(sjob->header_.voters.value()); if (ntime > sjob->header_.timestamp.value() + 600) { return StratumStatus::TIME_TOO_NEW; } - FoundBlockDecred foundBlock(share.jobid(), share.workerhashid(), share.userid(), workerFullName, sjob->header_, sjob->network_); - auto& header = foundBlock.header_; + FoundBlockDecred foundBlock( + share.jobid(), + share.workerhashid(), + share.userid(), + workerFullName, + sjob->header_, + network_); + auto &header = foundBlock.header_; header.timestamp = ntime; header.nonce = nonce; protocol_->setExtraNonces(header, share.sessionid(), extraNonce2); @@ -189,8 +245,8 @@ int ServerDecred::checkShare(ShareDecred &share, shared_ptr exJobP GetJobRepository()->markAllJobsAsStale(); LOG(INFO) << ">>>> found a new block: " << blkHash.ToString() - << ", jobId: " << share.jobid() << ", userId: " << share.userid() - << ", by: " << workerFullName << " <<<<"; + << ", jobId: " << share.jobid() << ", userId: " << share.userid() + << ", by: " << workerFullName << " <<<<"; } // print out high diff share, 2^10 = 1024 @@ -201,10 +257,12 @@ int ServerDecred::checkShare(ShareDecred &share, shared_ptr exJobP } // check share diff - auto jobTarget = NetworkParamsDecred::get(sjob->network_).powLimit / share.sharediff(); + auto jobTarget = + NetworkParamsDecred::get(network_).powLimit / share.sharediff(); - DLOG(INFO) << "blkHash: " << blkHash.ToString() << ", jobTarget: " - << jobTarget.ToString() << ", networkTarget: " << sjob->target_.ToString(); + DLOG(INFO) << "blkHash: " << blkHash.ToString() + << ", jobTarget: " << jobTarget.ToString() + << ", networkTarget: " << sjob->target_.ToString(); if (isEnableSimulator_ == false && bnBlockHash > jobTarget) { return StratumStatus::LOW_DIFFICULTY; diff --git a/src/decred/StratumServerDecred.h b/src/decred/StratumServerDecred.h index 5a612bb82..518cabe3d 100644 --- a/src/decred/StratumServerDecred.h +++ b/src/decred/StratumServerDecred.h @@ -32,7 +32,11 @@ class ServerDecred; class JobRepositoryDecred : public JobRepositoryBase { public: - JobRepositoryDecred(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerDecred *server); + JobRepositoryDecred( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerDecred *server); shared_ptr createStratumJob() override; void broadcastStratumJob(shared_ptr sjob) override; @@ -44,19 +48,30 @@ class JobRepositoryDecred : public JobRepositoryBase { class ServerDecred : public ServerBase { public: - explicit ServerDecred(int32_t shareAvgSeconds, const libconfig::Config &config); - unique_ptr createConnection(bufferevent *bev, sockaddr *saddr, uint32_t sessionID) override; + explicit ServerDecred( + int32_t shareAvgSeconds, const libconfig::Config &config); + unique_ptr createConnection( + bufferevent *bev, sockaddr *saddr, uint32_t sessionID) override; - int checkShare(ShareDecred &share, shared_ptr exJobPtr, const vector &extraNonce2, - uint32_t ntime, uint32_t nonce, const string &workerFullName); + int checkShare( + ShareDecred &share, + shared_ptr exJobPtr, + const vector &extraNonce2, + uint32_t ntime, + uint32_t nonce, + const string &workerFullName); protected: - JobRepository* createJobRepository(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime) override; + JobRepository *createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) override; private: void sendSolvedShare2Kafka(const FoundBlockDecred &foundBlock); - + unique_ptr protocol_; + NetworkDecred network_; }; #endif diff --git a/src/decred/StratumSessionDecred.cc b/src/decred/StratumSessionDecred.cc index aa7fc513d..cdec17d92 100644 --- a/src/decred/StratumSessionDecred.cc +++ b/src/decred/StratumSessionDecred.cc @@ -32,75 +32,82 @@ #include -StratumSessionDecred::StratumSessionDecred(ServerDecred &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1, - const StratumProtocolDecred &protocol) - : StratumSessionBase(server, bev, saddr, extraNonce1) - , protocol_(protocol) - , shortJobId_(0) { +StratumSessionDecred::StratumSessionDecred( + ServerDecred &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1, + const StratumProtocolDecred &protocol) + : StratumSessionBase(server, bev, saddr, extraNonce1) + , protocol_(protocol) + , shortJobId_(0) { } -void StratumSessionDecred::sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) { - if (state_ < AUTHENTICATED || exJobPtr == nullptr) - { +void StratumSessionDecred::sendMiningNotify( + shared_ptr exJobPtr, bool isFirstJob) { + if (state_ < AUTHENTICATED || exJobPtr == nullptr) { LOG(ERROR) << "decred sendMiningNotify failed, state = " << state_; return; } auto jobDecred = std::static_pointer_cast(exJobPtr->sjob_); - if (nullptr == jobDecred) - { + if (nullptr == jobDecred) { LOG(ERROR) << "Invalid job type, jobId = " << exJobPtr->sjob_->jobId_; return; } - auto &ljob = addLocalJob(jobDecred->jobId_, shortJobId_++, jobDecred->header_.nBits.value()); + auto &ljob = addLocalJob( + jobDecred->jobId_, shortJobId_++, jobDecred->header_.nBits.value()); // PrevHash field is int32 reversed - auto prevHash = reinterpret_cast(jobDecred->header_.prevBlock.begin()); - auto notifyStr = Strings::Format("{\"id\":null,\"jsonrpc\":\"2.0\",\"method\":\"mining.notify\"," - "\"params\":[\"%04" PRIx8 "\",\"%08x%08x%08x%08x%08x%08x%08x%08x\",\"%s00000000\",\"%s\",[],\"%s\",\"%" PRIx32 "\",\"%" PRIx32 "\",%s]}\n", - ljob.shortJobId_, - prevHash[0].value(), - prevHash[1].value(), - prevHash[2].value(), - prevHash[3].value(), - prevHash[4].value(), - prevHash[5].value(), - prevHash[6].value(), - prevHash[7].value(), - jobDecred->getCoinBase1().c_str(), - HexStr(BEGIN(jobDecred->header_.stakeVersion), END(jobDecred->header_.stakeVersion)).c_str(), - HexStr(BEGIN(jobDecred->header_.version), END(jobDecred->header_.version)).c_str(), - jobDecred->header_.nBits.value(), - jobDecred->header_.timestamp.value(), - exJobPtr->isClean_ ? "true" : "false"); + auto prevHash = reinterpret_cast( + jobDecred->header_.prevBlock.begin()); + auto notifyStr = Strings::Format( + "{\"id\":null,\"jsonrpc\":\"2.0\",\"method\":\"mining.notify\"," + "\"params\":[\"%04" PRIx8 + "\",\"%08x%08x%08x%08x%08x%08x%08x%08x\",\"%s00000000\",\"%s\",[],\"%s\"," + "\"%" PRIx32 "\",\"%" PRIx32 "\",%s]}\n", + ljob.shortJobId_, + prevHash[0].value(), + prevHash[1].value(), + prevHash[2].value(), + prevHash[3].value(), + prevHash[4].value(), + prevHash[5].value(), + prevHash[6].value(), + prevHash[7].value(), + jobDecred->getCoinBase1().c_str(), + HexStr( + BEGIN(jobDecred->header_.stakeVersion), + END(jobDecred->header_.stakeVersion)) + .c_str(), + HexStr(BEGIN(jobDecred->header_.version), END(jobDecred->header_.version)) + .c_str(), + jobDecred->header_.nBits.value(), + jobDecred->header_.timestamp.value(), + exJobPtr->isClean_ ? "true" : "false"); sendData(notifyStr); // clear localJobs_ clearLocalJobs(); } -void StratumSessionDecred::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionDecred::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "mining.subscribe") { handleRequest_Subscribe(idStr, jparams, jroot); - } - else if (method == "mining.authorize") { + } else if (method == "mining.authorize") { handleRequest_Authorize(idStr, jparams, jroot); - } - else if (dispatcher_) { + } else { dispatcher_->handleRequest(idStr, method, jparams, jroot); } } -void StratumSessionDecred::handleRequest_Subscribe(const string &idStr, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionDecred::handleRequest_Subscribe( + const string &idStr, const JsonNode &jparams, const JsonNode &jroot) { if (state_ != CONNECTED) { responseError(idStr, StratumStatus::UNKNOWN); return; @@ -109,29 +116,33 @@ void StratumSessionDecred::handleRequest_Subscribe(const string &idStr, #ifdef WORK_WITH_STRATUM_SWITCHER // - // For working with StratumSwitcher, the ExtraNonce1 must be provided as param 2. + // For working with StratumSwitcher, the ExtraNonce1 must be provided as + // param 2. // // params[0] = client version [require] // params[1] = session id / ExtraNonce1 [require] // params[2] = miner's real IP (unit32) [optional] // // StratumSwitcher request eg.: - // {"id": 1, "method": "mining.subscribe", "params": ["StratumSwitcher/0.1", "01ad557d", 203569230]} - // 203569230 -> 12.34.56.78 + // {"id": 1, "method": "mining.subscribe", "params": ["StratumSwitcher/0.1", + // "01ad557d", 203569230]} 203569230 -> 12.34.56.78 // if (jparams.children()->size() < 2) { responseError(idStr, StratumStatus::CLIENT_IS_NOT_SWITCHER); LOG(ERROR) << "A non-switcher subscribe request is detected and rejected."; - LOG(ERROR) << "Cmake option POOL__WORK_WITH_STRATUM_SWITCHER enabled, you can only connect to the sserver via a stratum switcher."; + LOG(ERROR) << "Cmake option POOL__WORK_WITH_STRATUM_SWITCHER enabled, you " + "can only connect to the sserver via a stratum switcher."; return; } state_ = SUBSCRIBED; - setClientAgent(jparams.children()->at(0).str().substr(0, 30)); // 30 is max len + setClientAgent( + jparams.children()->at(0).str().substr(0, 30)); // 30 is max len - string extNonce1Str = jparams.children()->at(1).str().substr(0, 8); // 8 is max len + string extNonce1Str = + jparams.children()->at(1).str().substr(0, 8); // 8 is max len sscanf(extNonce1Str.c_str(), "%x", &extraNonce1_); // convert hex to int // receive miner's IP from stratumSwitcher @@ -142,7 +153,8 @@ void StratumSessionDecred::handleRequest_Subscribe(const string &idStr, clientIp_.resize(INET_ADDRSTRLEN); struct in_addr addr; addr.s_addr = clientIpInt_; - clientIp_ = inet_ntop(AF_INET, &addr, (char *)clientIp_.data(), (socklen_t)clientIp_.size()); + clientIp_ = inet_ntop( + AF_INET, &addr, (char *)clientIp_.data(), (socklen_t)clientIp_.size()); LOG(INFO) << "client real IP: " << clientIp_; } @@ -154,33 +166,41 @@ void StratumSessionDecred::handleRequest_Subscribe(const string &idStr, // params[0] = client version [optional] // // client request eg.: - // {"id": 1, "method": "mining.subscribe", "params": ["gominer/0.2.0-decred"]} + // {"id": 1, "method": "mining.subscribe", "params": + // ["gominer/0.2.0-decred"]} // if (jparams.children()->size() >= 1) { - setClientAgent(jparams.children()->at(0).str().substr(0, 30)); // 30 is max len + setClientAgent( + jparams.children()->at(0).str().substr(0, 30)); // 30 is max len } #endif // WORK_WITH_STRATUM_SWITCHER - // result[0] = 2-tuple with name of subscribed notification and subscription ID. - // Theoretically it may be used for unsubscribing, but obviously miners won't use it. - // result[1] = ExtraNonce1, used for building the coinbase. There are 2 variants of miners known - // to us: one will take first 4 bytes and another will take last four bytes so we put - // the value on both places. - // result[2] = ExtraNonce2_size, the number of bytes that the miner users for its ExtraNonce2 counter + // result[0] = 2-tuple with name of subscribed notification and subscription + // ID. + // Theoretically it may be used for unsubscribing, but obviously + // miners won't use it. + // result[1] = ExtraNonce1, used for building the coinbase. There are 2 + // variants of miners known + // to us: one will take first 4 bytes and another will take last + // four bytes so we put the value on both places. + // result[2] = ExtraNonce2_size, the number of bytes that the miner users for + // its ExtraNonce2 counter auto extraNonce1Str = protocol_.getExtraNonce1String(extraNonce1_); - const string s = Strings::Format("{\"id\":%s,\"result\":[[[\"mining.set_difficulty\",\"%08x\"]" - ",[\"mining.notify\",\"%08x\"]],\"%s\",%d],\"error\":null}\n", - idStr.c_str(), extraNonce1_, extraNonce1_, extraNonce1Str.c_str(), StratumMiner::kExtraNonce2Size_); + const string s = Strings::Format( + "{\"id\":%s,\"result\":[[[\"mining.set_difficulty\",\"%08x\"]" + ",[\"mining.notify\",\"%08x\"]],\"%s\",%d],\"error\":null}\n", + idStr.c_str(), + extraNonce1_, + extraNonce1_, + extraNonce1Str.c_str(), + StratumMiner::kExtraNonce2Size_); sendData(s); } -void StratumSessionDecred::handleRequest_Authorize(const string &idStr, - const JsonNode &jparams, - const JsonNode &jroot) -{ - if (state_ != SUBSCRIBED) - { +void StratumSessionDecred::handleRequest_Authorize( + const string &idStr, const JsonNode &jparams, const JsonNode &jroot) { + if (state_ != SUBSCRIBED) { responseError(idStr, StratumStatus::NOT_SUBSCRIBED); return; } @@ -188,12 +208,11 @@ void StratumSessionDecred::handleRequest_Authorize(const string &idStr, // // params[0] = user[.worker] // params[1] = password - // eg. {"params": ["slush.miner1", "password"], "id": 2, "method": "mining.authorize"} - // the password may be omitted. - // eg. {"params": ["slush.miner1"], "id": 2, "method": "mining.authorize"} + // eg. {"params": ["slush.miner1", "password"], "id": 2, "method": + // "mining.authorize"} the password may be omitted. eg. {"params": + // ["slush.miner1"], "id": 2, "method": "mining.authorize"} // - if (jparams.children()->size() < 1) - { + if (jparams.children()->size() < 1) { responseError(idStr, StratumStatus::INVALID_USERNAME); return; } @@ -201,8 +220,7 @@ void StratumSessionDecred::handleRequest_Authorize(const string &idStr, string fullName, password; fullName = jparams.children()->at(0).str(); - if (jparams.children()->size() > 1) - { + if (jparams.children()->size() > 1) { password = jparams.children()->at(1).str(); } @@ -210,12 +228,14 @@ void StratumSessionDecred::handleRequest_Authorize(const string &idStr, return; } -unique_ptr StratumSessionDecred::createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) { - return boost::make_unique(*this, - *getServer().defaultDifficultyController_, - clientAgent, - workerName, - workerId); +unique_ptr StratumSessionDecred::createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { + return boost::make_unique( + *this, + *getServer().defaultDifficultyController_, + clientAgent, + workerName, + workerId); } diff --git a/src/decred/StratumSessionDecred.h b/src/decred/StratumSessionDecred.h index 6b4988370..3af7044aa 100644 --- a/src/decred/StratumSessionDecred.h +++ b/src/decred/StratumSessionDecred.h @@ -30,32 +30,37 @@ class StratumSessionDecred : public StratumSessionBase { public: - StratumSessionDecred(ServerDecred &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1, - const StratumProtocolDecred &protocol); + StratumSessionDecred( + ServerDecred &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1, + const StratumProtocolDecred &protocol); - void sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; + void + sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; protected: - void handleRequest(const std::string &idStr, const std::string &method, - const JsonNode &jparams, const JsonNode &jroot) override; - void handleRequest_Subscribe(const std::string &idStr, - const JsonNode &jparams, - const JsonNode &jroot); - void handleRequest_Authorize(const std::string &idStr, - const JsonNode &jparams, - const JsonNode &jroot); + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; + void handleRequest_Subscribe( + const std::string &idStr, const JsonNode &jparams, const JsonNode &jroot); + void handleRequest_Authorize( + const std::string &idStr, const JsonNode &jparams, const JsonNode &jroot); + public: - std::unique_ptr createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) override; + std::unique_ptr createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) override; private: const StratumProtocolDecred &protocol_; - uint8_t shortJobId_; // jobId starts from 0 + uint8_t shortJobId_; // jobId starts from 0 }; -#endif // #ifndef STRATUM_SESSION_DECRED_H_ +#endif // #ifndef STRATUM_SESSION_DECRED_H_ diff --git a/src/eth/BlockMakerEth.cc b/src/eth/BlockMakerEth.cc index f112bfb84..3fc32a282 100644 --- a/src/eth/BlockMakerEth.cc +++ b/src/eth/BlockMakerEth.cc @@ -29,20 +29,22 @@ #include ////////////////////////////////////////////////BlockMakerEth//////////////////////////////////////////////////////////////// -BlockMakerEth::BlockMakerEth(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB) - : BlockMaker(def, kafkaBrokers, poolDB) -{ +BlockMakerEth::BlockMakerEth( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB) + : BlockMaker(def, kafkaBrokers, poolDB) { if (!checkRpcSubmitBlock()) { - LOG(FATAL) << "One of Ethereum nodes don't support both parity_submitBlockDetail and eth_submitBlock, cannot submit block to it!"; + LOG(FATAL) + << "One of Ethereum nodes don't support both parity_submitBlockDetail " + "and eth_submitBlock, cannot submit block to it!"; } } -void BlockMakerEth::processSolvedShare(rd_kafka_message_t *rkmessage) -{ +void BlockMakerEth::processSolvedShare(rd_kafka_message_t *rkmessage) { const char *message = (const char *)rkmessage->payload; JsonNode r; - if (!JsonNode::parse(message, message + rkmessage->len, r)) - { + if (!JsonNode::parse(message, message + rkmessage->len, r)) { LOG(ERROR) << "decode common event failure"; return; } @@ -56,8 +58,7 @@ void BlockMakerEth::processSolvedShare(rd_kafka_message_t *rkmessage) r["userId"].type() != Utilities::JS::type::Int || r["workerId"].type() != Utilities::JS::type::Int || r["workerFullName"].type() != Utilities::JS::type::Str || - r["chain"].type() != Utilities::JS::type::Str) - { + r["chain"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "eth solved share format wrong"; return; } @@ -67,26 +68,42 @@ void BlockMakerEth::processSolvedShare(rd_kafka_message_t *rkmessage) worker.workerHashId_ = r["workerId"].int64(); worker.fullName_ = r["workerFullName"].str(); - submitBlockNonBlocking(r["nonce"].str(), r["header"].str(), r["mix"].str(), def()->nodes, - r["height"].uint32(), r["chain"].str(), r["networkDiff"].uint64(), - worker); + submitBlockNonBlocking( + r["nonce"].str(), + r["header"].str(), + r["mix"].str(), + def()->nodes, + r["height"].uint32(), + r["chain"].str(), + r["networkDiff"].uint64(), + worker); } -bool BlockMakerEth::submitBlock(const string &nonce, const string &header, const string &mix, - const string &rpcUrl, const string &rpcUserPass, - string &errMsg, string &blockHash, - string &request, string &response, bool &resultFound) { +bool BlockMakerEth::submitBlock( + const string &nonce, + const string &header, + const string &mix, + const string &rpcUrl, + const string &rpcUserPass, + string &errMsg, + string &blockHash, + string &request, + string &response, + bool &resultFound) { resultFound = false; /** * Use parity_submitBlockDetail and eth_submitBlock at the same time. - * + * * About RPC parity_submitBlockDetail: - * A new RPC to submit POW work to Ethereum node. It has the same functionality as `eth_submitWork` + * A new RPC to submit POW work to Ethereum node. It has the same + functionality as `eth_submitWork` * but returns the hash of the submitted block. - * When an error occurs, the specific error message will be returned instead of a `false`. - * It defined by the BTCPool project and implemented in the Parity Ethereum node as a private RPC. - * + * When an error occurs, the specific error message will be returned + instead of a `false`. + * It defined by the BTCPool project and implemented in the Parity Ethereum + node as a private RPC. + * * RPC parity_submitWorkDetail * Params (same as `eth_submitWork`): @@ -100,47 +117,51 @@ bool BlockMakerEth::submitBlock(const string &nonce, const string &header, const * "block_hash" * * Error on failure: - * {code: -32005, message: "Cannot submit work.", data: ""} - * + * {code: -32005, message: "Cannot submit work.", data: ""} + * * Examples for the RPC calling: * - * + * */ request = Strings::Format( - "[" - "{\"jsonrpc\":\"2.0\",\"method\":\"parity_submitWorkDetail\",\"params\":[\"%s\",\"%s\",\"%s\"],\"id\":1}," - "{\"jsonrpc\":\"2.0\",\"method\":\"eth_submitWork\",\"params\":[\"%s\",\"%s\",\"%s\"],\"id\":2}" - "]", - HexAddPrefix(nonce).c_str(), - HexAddPrefix(header).c_str(), - HexAddPrefix(mix).c_str(), - HexAddPrefix(nonce).c_str(), - HexAddPrefix(header).c_str(), - HexAddPrefix(mix).c_str() - ); - - bool ok = blockchainNodeRpcCall(rpcUrl.c_str(), rpcUserPass.c_str(), request.c_str(), response); - DLOG(INFO) << "eth_submitWork request for server " << rpcUrl << ": " << request; - DLOG(INFO) << "eth_submitWork response for server " << rpcUrl << ": " << response; + "[" + "{\"jsonrpc\":\"2.0\",\"method\":\"parity_submitWorkDetail\",\"params\":[" + "\"%s\",\"%s\",\"%s\"],\"id\":1}," + "{\"jsonrpc\":\"2.0\",\"method\":\"eth_submitWork\",\"params\":[\"%s\"," + "\"%s\",\"%s\"],\"id\":2}" + "]", + HexAddPrefix(nonce).c_str(), + HexAddPrefix(header).c_str(), + HexAddPrefix(mix).c_str(), + HexAddPrefix(nonce).c_str(), + HexAddPrefix(header).c_str(), + HexAddPrefix(mix).c_str()); + + bool ok = blockchainNodeRpcCall( + rpcUrl.c_str(), rpcUserPass.c_str(), request.c_str(), response); + DLOG(INFO) << "eth_submitWork request for server " << rpcUrl << ": " + << request; + DLOG(INFO) << "eth_submitWork response for server " << rpcUrl << ": " + << response; if (!ok) { LOG(WARNING) << "Call RPC eth_submitWork failed, node url: " << rpcUrl - << ", request: " << request - << ", response: " << response; + << ", request: " << request << ", response: " << response; return false; } JsonNode r; - if (!JsonNode::parse(response.c_str(), response.c_str() + response.size(), r)) { + if (!JsonNode::parse( + response.c_str(), response.c_str() + response.size(), r)) { LOG(WARNING) << "decode response failure, node url: " << rpcUrl - << ", request: " << request - << ", response: " << response; + << ", request: " << request << ", response: " << response; return false; } if (r.type() != Utilities::JS::type::Array || r.children()->size() != 2) { - LOG(WARNING) << "node doesn't support multiple requests in the same JSON, node url: " << rpcUrl - << ", request: " << request - << ", response: " << response; + LOG(WARNING) + << "node doesn't support multiple requests in the same JSON, node url: " + << rpcUrl << ", request: " << request << ", response: " << response; return false; } @@ -148,9 +169,10 @@ bool BlockMakerEth::submitBlock(const string &nonce, const string &header, const bool success = false; for (auto res : *results) { - if (res.type() != Utilities::JS::type::Obj && res["id"].type() != Utilities::JS::type::Int) { - LOG(WARNING) << "Result is not a valid JSON-RPC object, node url: " << rpcUrl - << ", request: " << request + if (res.type() != Utilities::JS::type::Obj && + res["id"].type() != Utilities::JS::type::Int) { + LOG(WARNING) << "Result is not a valid JSON-RPC object, node url: " + << rpcUrl << ", request: " << request << ", response: " << response; continue; } @@ -168,20 +190,21 @@ bool BlockMakerEth::submitBlock(const string &nonce, const string &header, const blockHash = res["result"].str(); continue; } - + // // Failure result of parity_submitWorkDetail. Example: - // {"jsonrpc":"2.0","error":{"code":-32005,"message":"Cannot submit work.","data":"PoW hash is invalid or out of date."},"id":5} + // {"jsonrpc":"2.0","error":{"code":-32005,"message":"Cannot submit + // work.","data":"PoW hash is invalid or out of date."},"id":5} // - if (res["error"].type() == Utilities::JS::type::Obj && res["error"]["data"].type() == Utilities::JS::type::Str) { + if (res["error"].type() == Utilities::JS::type::Obj && + res["error"]["data"].type() == Utilities::JS::type::Str) { errMsg = res["error"]["data"].str(); resultFound = true; continue; } // Ignore "Method not found" error of RPC parity_submitWorkDetail - } - else { + } else { // // Response of eth_submitWork. Example: // {"jsonrpc":"2.0","result":false,"id":5} @@ -192,14 +215,13 @@ bool BlockMakerEth::submitBlock(const string &nonce, const string &header, const } // Don't set `success = false` if the result is false, // because parity_submitWorkDetail may have been successful - + resultFound = true; continue; } LOG(WARNING) << "Unexpected result, node url: " << rpcUrl - << ", request: " << request - << ", response: " << response; + << ", request: " << request << ", response: " << response; } } @@ -217,48 +239,67 @@ bool BlockMakerEth::checkRpcSubmitBlock() { bool resultFound = false; submitBlock( - "0x0000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - itr.rpcAddr_, itr.rpcUserPwd_, - errMsg, blockHash, - request, response, resultFound - ); + "0x0000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + itr.rpcAddr_, + itr.rpcUserPwd_, + errMsg, + blockHash, + request, + response, + resultFound); if (!resultFound) { - LOG(FATAL) << "Node " << itr.rpcAddr_ << " doesn't support both parity_submitBlockDetail and eth_submitBlock, cannot submit block to it!" - << " Request: " << request - << ", response: " << response; + LOG(FATAL) << "Node " << itr.rpcAddr_ + << " doesn't support both parity_submitBlockDetail and " + "eth_submitBlock, cannot submit block to it!" + << " Request: " << request << ", response: " << response; return false; } if (!errMsg.empty()) { - LOG(INFO) << "Node " << itr.rpcAddr_ << " supports parity_submitBlockDetail. Block hash will be recorded correctly if submit block to it." - << " Request: " << request - << ", response: " << response; - } - else { - LOG(WARNING) << "Node " << itr.rpcAddr_ << " doesn't supports parity_submitBlockDetail. Block hash will be empty if submit block to it." - << " Request: " << request - << ", response: " << response; + LOG(INFO) << "Node " << itr.rpcAddr_ + << " supports parity_submitBlockDetail. Block hash will be " + "recorded correctly if submit block to it." + << " Request: " << request << ", response: " << response; + } else { + LOG(WARNING) << "Node " << itr.rpcAddr_ + << " doesn't supports parity_submitBlockDetail. Block hash " + "will be empty if submit block to it." + << " Request: " << request << ", response: " << response; } } return true; } -void BlockMakerEth::submitBlockNonBlocking(const string &nonce, const string &header, const string &mix, const vector &nodes, - const uint32_t height, const string &chain, const uint64_t networkDiff, const StratumWorker &worker) { +void BlockMakerEth::submitBlockNonBlocking( + const string &nonce, + const string &header, + const string &mix, + const vector &nodes, + const uint32_t height, + const string &chain, + const uint64_t networkDiff, + const StratumWorker &worker) { std::vector> threadPool; std::atomic syncSubmitSuccess(false); // run threads - for (size_t i=0; i( - std::bind(&BlockMakerEth::_submitBlockThread, this, - nonce, header, mix, nodes[i], - height, chain, networkDiff, worker, - &syncSubmitSuccess)); + for (size_t i = 0; i < nodes.size(); i++) { + auto t = std::make_shared(std::bind( + &BlockMakerEth::_submitBlockThread, + this, + nonce, + header, + mix, + nodes[i], + height, + chain, + networkDiff, + worker, + &syncSubmitSuccess)); threadPool.push_back(t); } @@ -268,11 +309,18 @@ void BlockMakerEth::submitBlockNonBlocking(const string &nonce, const string &he } } -void BlockMakerEth::_submitBlockThread(const string &nonce, const string &header, const string &mix, const NodeDefinition &node, - const uint32_t height, const string &chain, const uint64_t networkDiff, const StratumWorker &worker, - std::atomic *syncSubmitSuccess) { +void BlockMakerEth::_submitBlockThread( + const string &nonce, + const string &header, + const string &mix, + const NodeDefinition &node, + const uint32_t height, + const string &chain, + const uint64_t networkDiff, + const StratumWorker &worker, + std::atomic *syncSubmitSuccess) { string blockHash; - + // unused vars string request, response; bool resultFound; @@ -280,19 +328,27 @@ void BlockMakerEth::_submitBlockThread(const string &nonce, const string &header auto submitBlockOnce = [&]() { string errMsg; bool success = BlockMakerEth::submitBlock( - nonce, header, mix, - node.rpcAddr_, node.rpcUserPwd_, - errMsg, blockHash, - request, response, resultFound - ); + nonce, + header, + mix, + node.rpcAddr_, + node.rpcUserPwd_, + errMsg, + blockHash, + request, + response, + resultFound); if (success) { - LOG(INFO) << "submit block success, chain: " << chain << ", height: " << height - << ", hash: " << blockHash << ", hash_no_nonce: " << header - << ", networkDiff: " << networkDiff << ", worker: " << worker.fullName_; + LOG(INFO) << "submit block success, chain: " << chain + << ", height: " << height << ", hash: " << blockHash + << ", hash_no_nonce: " << header + << ", networkDiff: " << networkDiff + << ", worker: " << worker.fullName_; return true; } - LOG(WARNING) << "submit block failed, chain: " << chain << ", height: " << height << ", hash_no_nonce: " << header + LOG(WARNING) << "submit block failed, chain: " << chain + << ", height: " << height << ", hash_no_nonce: " << header << ", err_msg: " << errMsg; return false; }; @@ -300,7 +356,8 @@ void BlockMakerEth::_submitBlockThread(const string &nonce, const string &header int retryTime = 5; while (retryTime > 0) { if (*syncSubmitSuccess) { - LOG(INFO) << "_submitBlockThread(" << node.rpcAddr_ << "): " << "other thread submit success, skip"; + LOG(INFO) << "_submitBlockThread(" << node.rpcAddr_ << "): " + << "other thread submit success, skip"; return; } if (submitBlockOnce()) { @@ -315,27 +372,39 @@ void BlockMakerEth::_submitBlockThread(const string &nonce, const string &header saveBlockToDB(nonce, header, blockHash, height, chain, networkDiff, worker); } -void BlockMakerEth::saveBlockToDB(const string &nonce, const string &header, const string &blockHash, const uint32_t height, - const string &chain, const uint64_t networkDiff, const StratumWorker &worker) { +void BlockMakerEth::saveBlockToDB( + const string &nonce, + const string &header, + const string &blockHash, + const uint32_t height, + const string &chain, + const uint64_t networkDiff, + const StratumWorker &worker) { const string nowStr = date("%F %T"); string sql; - sql = Strings::Format("INSERT INTO `found_blocks` " - " (`puid`, `worker_id`" - ", `worker_full_name`, `chain`" - ", `height`, `hash`, `hash_no_nonce`, `nonce`" - ", `rewards`" - ", `network_diff`, `created_at`)" - " VALUES (%ld, %" PRId64 - ", '%s', '%s'" - ", %lu, '%s', '%s', '%s'" - ", %" PRId64 - ", %" PRIu64 ", '%s'); ", - worker.userId_, worker.workerHashId_, - // filter again, just in case - filterWorkerName(worker.fullName_).c_str(), chain.c_str(), - height, blockHash.c_str(), header.c_str(), nonce.c_str(), - EthConsensus::getStaticBlockReward(height, chain), - networkDiff, nowStr.c_str()); + sql = Strings::Format( + "INSERT INTO `found_blocks` " + " (`puid`, `worker_id`" + ", `worker_full_name`, `chain`" + ", `height`, `hash`, `hash_no_nonce`, `nonce`" + ", `rewards`" + ", `network_diff`, `created_at`)" + " VALUES (%ld, %" PRId64 + ", '%s', '%s'" + ", %lu, '%s', '%s', '%s'" + ", %" PRId64 ", %" PRIu64 ", '%s'); ", + worker.userId_, + worker.workerHashId_, + // filter again, just in case + filterWorkerName(worker.fullName_).c_str(), + chain.c_str(), + height, + blockHash.c_str(), + header.c_str(), + nonce.c_str(), + EthConsensus::getStaticBlockReward(height, chain,networkDiff), + networkDiff, + nowStr.c_str()); // try connect to DB MySQLConnection db(poolDB_); @@ -348,9 +417,7 @@ void BlockMakerEth::saveBlockToDB(const string &nonce, const string &header, con if (db.execute(sql) == false) { LOG(ERROR) << "insert found block failure: " << sql; - } - else - { + } else { LOG(INFO) << "insert found block success for height " << height; } } diff --git a/src/eth/BlockMakerEth.h b/src/eth/BlockMakerEth.h index 6f39db926..779f9d159 100644 --- a/src/eth/BlockMakerEth.h +++ b/src/eth/BlockMakerEth.h @@ -29,29 +29,62 @@ #include "BlockMaker.h" #include "CommonEth.h" - -class BlockMakerEth : public BlockMaker -{ +class BlockMakerEth : public BlockMaker { public: - BlockMakerEth(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB); + BlockMakerEth( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB); void processSolvedShare(rd_kafka_message_t *rkmessage) override; private: - void submitBlockNonBlocking(const string &nonce, const string &header, const string &mix, const vector &nodes, - const uint32_t height, const string &chain, const uint64_t networkDiff, const StratumWorker &worker); - void _submitBlockThread(const string &nonce, const string &header, const string &mix, const NodeDefinition &node, - const uint32_t height, const string &chain, const uint64_t networkDiff, const StratumWorker &worker, - std::atomic *syncSubmitSuccess); - void saveBlockToDB(const string &nonce, const string &header, const string &blockHash, const uint32_t height, - const string &chain, const uint64_t networkDiff, const StratumWorker &worker); - - static bool submitBlock(const string &nonce, const string &header, const string &mix, - const string &rpcUrl, const string &rpcUserPass, - string &errMsg, string &blockHash, - string &request, string &response, bool &resultFound); - static bool submitBlockDetail(const string &nonce, const string &header, const string &mix, - const string &rpcUrl, const string &rpcUserPass, - string &errMsg, string &blockHash); + void submitBlockNonBlocking( + const string &nonce, + const string &header, + const string &mix, + const vector &nodes, + const uint32_t height, + const string &chain, + const uint64_t networkDiff, + const StratumWorker &worker); + void _submitBlockThread( + const string &nonce, + const string &header, + const string &mix, + const NodeDefinition &node, + const uint32_t height, + const string &chain, + const uint64_t networkDiff, + const StratumWorker &worker, + std::atomic *syncSubmitSuccess); + void saveBlockToDB( + const string &nonce, + const string &header, + const string &blockHash, + const uint32_t height, + const string &chain, + const uint64_t networkDiff, + const StratumWorker &worker); + + static bool submitBlock( + const string &nonce, + const string &header, + const string &mix, + const string &rpcUrl, + const string &rpcUserPass, + string &errMsg, + string &blockHash, + string &request, + string &response, + bool &resultFound); + static bool submitBlockDetail( + const string &nonce, + const string &header, + const string &mix, + const string &rpcUrl, + const string &rpcUserPass, + string &errMsg, + string &blockHash); bool checkRpcSubmitBlock(); }; diff --git a/src/eth/CommonEth.cc b/src/eth/CommonEth.cc index dc990beb0..52d756467 100644 --- a/src/eth/CommonEth.cc +++ b/src/eth/CommonEth.cc @@ -24,10 +24,11 @@ #include "CommonEth.h" #include -static arith_uint256 kMaxUint256("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); +static arith_uint256 kMaxUint256( + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); static uint64_t kMaxUint64 = 0xffffffffffffffffull; -string Eth_DifficultyToTarget(uint64_t diff) { +string Eth_DifficultyToTarget(uint64_t diff) { if (0 == diff) { return kMaxUint256.GetHex(); } @@ -47,8 +48,7 @@ uint64_t Eth_TargetToDifficulty(string targetHex) { return diff.GetLow64(); } -uint64_t Eth_TargetToDifficulty(const uint256 &targetBin) -{ +uint64_t Eth_TargetToDifficulty(const uint256 &targetBin) { arith_uint256 target = UintToArith256(targetBin); if (target == 0) { @@ -59,13 +59,11 @@ uint64_t Eth_TargetToDifficulty(const uint256 &targetBin) return diff.GetLow64(); } -void Hex256ToEthash256(const string &strHex, ethash_h256_t ðashHeader) -{ +void Hex256ToEthash256(const string &strHex, ethash_h256_t ðashHeader) { if (strHex.size() != 64) return; - for (size_t i = 0; i < 32; ++i) - { + for (size_t i = 0; i < 32; ++i) { size_t size; int val = stoi(strHex.substr(i * 2, 2), &size, 16); ethashHeader.b[i] = (uint8_t)val; @@ -73,14 +71,14 @@ void Hex256ToEthash256(const string &strHex, ethash_h256_t ðashHeader) } void Uint256ToEthash256(const uint256 hash, ethash_h256_t ðashHeader) { - //uint256 store hash byte in reversed order - for (int i = 0; i < 32; ++i) - ethashHeader.b[i] = *(hash.begin() + 31 -i); + // uint256 store hash byte in reversed order + for (int i = 0; i < 32; ++i) + ethashHeader.b[i] = *(hash.begin() + 31 - i); } uint256 Ethash256ToUint256(const ethash_h256_t ðashHeader) { vector v; - for (int i = 31; i >= 0; --i) + for (int i = 31; i >= 0; --i) v.push_back(ethashHeader.b[i]); return uint256(v); } diff --git a/src/eth/CommonEth.h b/src/eth/CommonEth.h index 9dff70b35..ba58bf24b 100644 --- a/src/eth/CommonEth.h +++ b/src/eth/CommonEth.h @@ -1,4 +1,4 @@ -/* +/* The MIT License (MIT) Copyright (c) [2016] [BTC.COM] @@ -30,7 +30,6 @@ #include "libethash/ethash.h" #include "libblake2/blake2.h" - ////////////////////////////// for Eth ////////////////////////////// string Eth_DifficultyToTarget(uint64_t diff); uint64_t Eth_TargetToDifficulty(string target); @@ -39,13 +38,14 @@ void Hex256ToEthash256(const string &strHex, ethash_h256_t ðashHeader); void Uint256ToEthash256(const uint256 hash, ethash_h256_t ðashHeader); uint256 Ethash256ToUint256(const ethash_h256_t ðashHeader); -// NICEHASH_STRATUM uses a different difficulty value than the Ethereum network and BTCPool ETH. -// Conversion between difficulty and target is done the same way as with Bitcoin; -// difficulty of 1 is transformed to target being in HEX: +// NICEHASH_STRATUM uses a different difficulty value than the Ethereum network +// and BTCPool ETH. Conversion between difficulty and target is done the same +// way as with Bitcoin; difficulty of 1 is transformed to target being in HEX: // 00000000ffff0000000000000000000000000000000000000000000000000000 // @see https://www.nicehash.com/sw/Ethereum_specification_R1.txt inline double Eth_DiffToNicehashDiff(uint64_t diff) { - // Ethereum difficulty is numerically equivalent to 2^32 times the difficulty of Bitcoin/NICEHASH_STRATUM. + // Ethereum difficulty is numerically equivalent to 2^32 times the difficulty + // of Bitcoin/NICEHASH_STRATUM. return ((double)diff) / ((double)4294967296.0); } diff --git a/src/eth/EthConsensus.cc b/src/eth/EthConsensus.cc index fd4d68293..e86ee9267 100644 --- a/src/eth/EthConsensus.cc +++ b/src/eth/EthConsensus.cc @@ -22,139 +22,150 @@ THE SOFTWARE. */ #include "EthConsensus.h" - +#include #include #include -// The hard fork Constantinople of Ethereum mainnet has been delayed. -// So set a default height that won't arrive (9999999). -// The user can change the height in the configuration file -// after the fork height is determined. -int EthConsensus::kHardForkConstantinopleHeight_ = 9999999; +// The hard fork Constantinople of Ethereum mainnet +int EthConsensus::kHardForkConstantinopleHeight_ = 7280000; void EthConsensus::setHardForkConstantinopleHeight(int height) { - kHardForkConstantinopleHeight_ = height; - LOG(INFO) << "Height of Ethereum Constantinople Hard Fork: " << kHardForkConstantinopleHeight_; + kHardForkConstantinopleHeight_ = height; + LOG(INFO) << "Height of Ethereum Constantinople Hard Fork: " + << kHardForkConstantinopleHeight_; } EthConsensus::Chain EthConsensus::getChain(std::string chainStr) { - // toupper - std::transform(chainStr.begin(), chainStr.end(), chainStr.begin(), ::toupper); - - if (chainStr == "CLASSIC") { - return Chain::CLASSIC; - } - else if (chainStr == "FOUNDATION") { - return Chain::FOUNDATION; - } - else { - return Chain::UNKNOWN; - } + // toupper + std::transform(chainStr.begin(), chainStr.end(), chainStr.begin(), ::toupper); + + if (chainStr == "CLASSIC") { + return Chain::CLASSIC; + } else if (chainStr == "FOUNDATION") { + return Chain::FOUNDATION; + } else { + return Chain::UNKNOWN; + } } std::string EthConsensus::getChainStr(const Chain chain) { - switch (chain) { - case Chain::CLASSIC: - return "CLASSIC"; - case Chain::FOUNDATION: - return "FOUNDATION"; - case Chain::UNKNOWN: - return "UNKNOWN"; - } - // should not be here + switch (chain) { + case Chain::CLASSIC: + return "CLASSIC"; + case Chain::FOUNDATION: + return "FOUNDATION"; + case Chain::UNKNOWN: return "UNKNOWN"; + } + // should not be here + return "UNKNOWN"; } // The "static" block reward for the winning block. - // Uncle block rewards are not included. -int64_t EthConsensus::getStaticBlockReward(int nHeight, Chain chain) { - switch (chain) { - case Chain::CLASSIC: - return getStaticBlockRewardClassic(nHeight); - case Chain::FOUNDATION: - return getStaticBlockRewardFoundation(nHeight); - case Chain::UNKNOWN: - return 0; - } - // should not be here +// Uncle block rewards are not included. +int64_t EthConsensus::getStaticBlockReward(int nHeight, Chain chain,int64_t netdiff) { + switch (chain) { + case Chain::CLASSIC: + return getStaticBlockRewardClassic(nHeight); + case Chain::FOUNDATION: + return getStaticBlockRewardFoundation(nHeight,netdiff); + case Chain::UNKNOWN: return 0; + } + // should not be here + return 0; +} + +int64_t EthConsensus::getStaticBlockReward2(int64_t networkdiff) { + double minernumber = networkdiff/(12.5*28*1000000); + double reward = 59*minernumber/(4.5*60*24); + reward *= pow(10.0,18.0); + return reward; } + + // static block rewards of Ethereum Classic Main Network // The implementation followed ECIP-1017: // https://github.com/ethereumproject/ECIPs/blob/master/ECIPs/ECIP-1017.md int64_t EthConsensus::getStaticBlockRewardClassic(int nHeight) { - const int64_t blockEra = (nHeight - 1) / 5000000 + 1; + const int64_t blockEra = (nHeight - 1) / 5000000 + 1; - // The blockEra is 2 in 2018. - // Avoid calculations by giving the result directly. - if (blockEra == 2) { - return 4e+18; - } - - int64_t reward = 5e+18; - - for (int i=1; i= kHardForkConstantinopleHeight_) { - return 2e+18; +int64_t EthConsensus::getStaticBlockRewardFoundation(int nHeight,int64_t networkdiff) { + // Constantinople fork at block 7080000 on Mainnet. + if (nHeight >= kHardForkConstantinopleHeight_) { //待定 + //return 2e+18; } - // Ethereum Main Network has a static block reward (3 Ether) before height 7080000. - return 3e+18; + // Ethereum Main Network has a static block reward (3 Ether) before height + // 7080000. + //return 3e+18; + double minernumber = networkdiff/(12.5*28*1000000); + double reward = 59*minernumber/(4.5*60*24); + reward *= pow(10.0,18.0); + return reward; + //return 6e+16; } double EthConsensus::getUncleBlockRewardRatio(int nHeight, Chain chain) { - switch (chain) { - case Chain::CLASSIC: - return getUncleBlockRewardRatioClassic(nHeight); - case Chain::FOUNDATION: - return getUncleBlockRewardRatioFoundation(nHeight); - case Chain::UNKNOWN: - return 0.0; - } - // should not be here + switch (chain) { + case Chain::CLASSIC: + return getUncleBlockRewardRatioClassic(nHeight); + case Chain::FOUNDATION: + return getUncleBlockRewardRatioFoundation(nHeight); + case Chain::UNKNOWN: return 0.0; + } + // should not be here + return 0.0; } // uncle block reward radio of Ethereum Classic Main Network // The implementation followed ECIP-1017: // https://github.com/ethereumproject/ECIPs/blob/master/ECIPs/ECIP-1017.md double EthConsensus::getUncleBlockRewardRatioClassic(int nHeight) { - // Assume that there is only one height lower than the main chain block + // Assume that there is only one height lower than the main chain block - const int64_t blockEra = (nHeight - 1) / 5000000 + 1; + const int64_t blockEra = (nHeight - 1) / 5000000 + 1; - if (blockEra == 1) { - // The blockEra 1 is special - return 7.0 / 8.0; - } - else if (blockEra == 2) { - // The blockEra is 2 in 2018. - // Avoid calculations by giving the result directly. - return 1.0 / 32.0; - } + if (blockEra == 1) { + // The blockEra 1 is special + return 7.0 / 8.0; + } else if (blockEra == 2) { + // The blockEra is 2 in 2018. + // Avoid calculations by giving the result directly. + return 1.0 / 32.0; + } - double radio = 1.0 / 32.0; + double radio = 1.0 / 32.0; - for (int i=2; i= 4 && work[3].type() == Utilities::JS::type::Str) { if (heightStr != "null" && heightStr != work[3].str()) { - LOG(WARNING) << "block height mis-matched between getBlockByNumber(pending) " - << heightStr <<" and getWork() " << work[3].str(); + LOG(WARNING) + << "block height mis-matched between getBlockByNumber(pending) " + << heightStr << " and getWork() " << work[3].str(); } heightStr = work[3].str(); } long height = strtol(heightStr.c_str(), nullptr, 16); if (height < 1 || height == LONG_MAX) { - LOG(WARNING) << "block height/number wrong: " << heightStr << " (" << height << ")"; + LOG(WARNING) << "block height/number wrong: " << heightStr << " (" << height + << ")"; return ""; } float gasLimit = (float)strtoll(block["gasLimit"].str().c_str(), nullptr, 16); - float gasUsed = (float)strtoll(block["gasUsed"].str().c_str(), nullptr, 16); + float gasUsed = (float)strtoll(block["gasUsed"].str().c_str(), nullptr, 16); float gasUsedPercent = gasUsed / gasLimit * 100; size_t uncles = block["uncles"].array().size(); size_t transactions = block["transactions"].array().size(); - - LOG(INFO) << "chain: " << def_.chainType_ - << ", topic: " << def_.rawGwTopic_ + + LOG(INFO) << "chain: " << def_.chainType_ << ", topic: " << def_.rawGwTopic_ << ", parent: " << block["parentHash"].str() - << ", target: " << work[2].str() - << ", hHash: " << work[0].str() - << ", sHash: " << work[1].str() - << ", height: " << height - << ", uncles: " << uncles - << ", transactions: " << transactions + << ", target: " << work[2].str() << ", hHash: " << work[0].str() + << ", sHash: " << work[1].str() << ", height: " << height + << ", uncles: " << uncles << ", transactions: " << transactions << ", gasUsedPercent: " << gasUsedPercent; - return Strings::Format("{" - "\"created_at_ts\":%u," - "\"chainType\":\"%s\"," - "\"rpcAddress\":\"%s\"," - "\"rpcUserPwd\":\"%s\"," - "\"parent\":\"%s\"," - "\"target\":\"%s\"," - "\"hHash\":\"%s\"," - "\"sHash\":\"%s\"," - "\"height\":%ld," - "\"uncles\":%lu," - "\"transactions\":%lu," - "\"gasUsedPercent\":%f" - "}", - (uint32_t)time(nullptr), - def_.chainType_.c_str(), - def_.rpcAddr_.c_str(), - def_.rpcUserPwd_.c_str(), - block["parentHash"].str().c_str(), - work[2].str().c_str(), - work[0].str().c_str(), - work[1].str().c_str(), - height, - uncles, - transactions, - gasUsedPercent); + return Strings::Format( + "{" + "\"created_at_ts\":%u," + "\"chainType\":\"%s\"," + "\"rpcAddress\":\"%s\"," + "\"rpcUserPwd\":\"%s\"," + "\"parent\":\"%s\"," + "\"target\":\"%s\"," + "\"hHash\":\"%s\"," + "\"sHash\":\"%s\"," + "\"height\":%ld," + "\"uncles\":%lu," + "\"transactions\":%lu," + "\"gasUsedPercent\":%f" + "}", + (uint32_t)time(nullptr), + def_.chainType_.c_str(), + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + block["parentHash"].str().c_str(), + work[2].str().c_str(), + work[0].str().c_str(), + work[1].str().c_str(), + height, + uncles, + transactions, + gasUsedPercent); } string GwMakerHandlerEth::getBlockHeight() { - const string request = "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByNumber\",\"params\":[\"pending\", false],\"id\":2}"; + const string request = + "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByNumber\",\"params\":[" + "\"pending\", false],\"id\":2}"; string response; - bool res = blockchainNodeRpcCall(def_.rpcAddr_.c_str(), def_.rpcUserPwd_.c_str(), request.c_str(), response); + bool res = blockchainNodeRpcCall( + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + request.c_str(), + response); if (!res) { LOG(ERROR) << "get pending block failed"; return ""; } JsonNode j; - if (!JsonNode::parse(response.c_str(), response.c_str() + response.length(), j)) - { + if (!JsonNode::parse( + response.c_str(), response.c_str() + response.length(), j)) { LOG(ERROR) << "deserialize block informaiton failed"; return ""; } JsonNode result = j["result"]; if (result.type() != Utilities::JS::type::Obj || - result["number"].type() != Utilities::JS::type::Str) - { + result["number"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "block informaiton format not expected: " << response; return ""; } diff --git a/src/eth/GwMakerEth.h b/src/eth/GwMakerEth.h index 3f392ceab..02caf3ed8 100644 --- a/src/eth/GwMakerEth.h +++ b/src/eth/GwMakerEth.h @@ -29,15 +29,16 @@ #include "GwMaker.h" #include "utilities_js.hpp" -class GwMakerHandlerEth : public GwMakerHandlerJson -{ +class GwMakerHandlerEth : public GwMakerHandlerJson { bool checkFields(JsonNode &r) override; bool checkFieldsPendingBlock(JsonNode &r); bool checkFieldsGetwork(JsonNode &r); string constructRawMsg(JsonNode &r) override; string getRequestData() override { - return "[{\"jsonrpc\": \"2.0\", \"method\": \"eth_getBlockByNumber\", \"params\": [\"pending\", false], \"id\": 1}" - ",{\"jsonrpc\": \"2.0\", \"method\": \"eth_getWork\", \"params\": [], \"id\": 1}]"; + return "[{\"jsonrpc\": \"2.0\", \"method\": \"eth_getBlockByNumber\", " + "\"params\": [\"pending\", false], \"id\": 1}" + ",{\"jsonrpc\": \"2.0\", \"method\": \"eth_getWork\", \"params\": " + "[], \"id\": 1}]"; } string getBlockHeight(); }; diff --git a/src/eth/JobMakerEth.cc b/src/eth/JobMakerEth.cc index 9a0536683..4b210b5a6 100644 --- a/src/eth/JobMakerEth.cc +++ b/src/eth/JobMakerEth.cc @@ -27,13 +27,10 @@ #include "Utils.h" - ////////////////////////////////JobMakerHandlerEth////////////////////////////////// -bool JobMakerHandlerEth::processMsg(const string &msg) -{ +bool JobMakerHandlerEth::processMsg(const string &msg) { shared_ptr work = make_shared(); - if (!work->initFromGw(msg)) - { + if (!work->initFromGw(msg)) { LOG(ERROR) << "eth initFromGw failed " << msg; return false; } @@ -42,20 +39,24 @@ bool JobMakerHandlerEth::processMsg(const string &msg) if (workMap_.find(key) != workMap_.end()) { DLOG(INFO) << "key already exist in workMap: " << key; } - + workMap_.insert(std::make_pair(key, work)); - LOG(INFO) << "add work, height: " << work->getHeight() << ", header: " << work->getBlockHash() << ", from: " << work->getRpcAddress(); + LOG(INFO) << "add work, height: " << work->getHeight() + << ", header: " << work->getBlockHash() + << ", from: " << work->getRpcAddress(); clearTimeoutMsg(); if (work->getHeight() < lastReceivedHeight_) { - LOG(WARNING) << "low height work. lastHeight:" << lastReceivedHeight_ << ", workHeight: " << work->getHeight(); + LOG(WARNING) << "low height work. lastHeight:" << lastReceivedHeight_ + << ", workHeight: " << work->getHeight(); return false; } if (work->getHeight() == lastReceivedHeight_) { if (!workOfLastJob_) { - LOG(WARNING) << "work of last job is empty! lastHeight: " << lastReceivedHeight_; + LOG(WARNING) << "work of last job is empty! lastHeight: " + << lastReceivedHeight_; return true; } @@ -86,9 +87,11 @@ void JobMakerHandlerEth::clearTimeoutMsg() { // Ensure that workMap_ has at least one element, even if it expires. // So jobmaker can always generate jobs even if blockchain node does not - // update the response of getwork for a long time when there is no new transaction. - for (auto itr = workMap_.begin(); workMap_.size() > 1 && itr != workMap_.end(); ) { - const uint32_t ts = itr->second->getCreatedAt(); + // update the response of getwork for a long time when there is no new + // transaction. + for (auto itr = workMap_.begin(); + workMap_.size() > 1 && itr != workMap_.end();) { + const uint32_t ts = itr->second->getCreatedAt(); const uint32_t height = itr->second->getHeight(); // gbt expired time @@ -99,8 +102,9 @@ void JobMakerHandlerEth::clearTimeoutMsg() { ++itr; } else { // remove expired gbt - LOG(INFO) << "remove timeout work: " << date("%F %T", ts) << "|" << ts << - ", height:" << height << ", headerHash:" << itr->second->getBlockHash(); + LOG(INFO) << "remove timeout work: " << date("%F %T", ts) << "|" << ts + << ", height:" << height + << ", headerHash:" << itr->second->getBlockHash(); // c++11: returns an iterator to the next element in the map itr = workMap_.erase(itr); @@ -108,8 +112,7 @@ void JobMakerHandlerEth::clearTimeoutMsg() { } } -string JobMakerHandlerEth::makeStratumJobMsg() -{ +string JobMakerHandlerEth::makeStratumJobMsg() { if (workMap_.empty()) { return ""; } @@ -128,12 +131,16 @@ string JobMakerHandlerEth::makeStratumJobMsg() uint64_t JobMakerHandlerEth::makeWorkKey(const RskWorkEth &work) { const string &blockHash = work.getBlockHash(); - uint64_t blockHashSuffix = strtoull(blockHash.substr(blockHash.size() - 4).c_str(), nullptr, 16); + uint64_t blockHashSuffix = + strtoull(blockHash.substr(blockHash.size() - 4).c_str(), nullptr, 16); - // key = | 32bits height | 8bits uncles | 8bits gasUsedPercent | 16bit hashSuffix | + // key = | 32bits height | 8bits uncles | 8bits gasUsedPercent | 16bit + // hashSuffix | uint64_t key = ((uint64_t)work.getHeight()) << 32; - key += (((uint64_t)work.getUncles()) & 0xFFu) << 24; // No overflow, the largest number of uncles in Ethereum is 2. - key += (((uint64_t)work.getGasUsedPercent()) & 0xFFu) << 16; // No overflow, the largest number should be 100 (100%). + key += (((uint64_t)work.getUncles()) & 0xFFu) + << 24; // No overflow, the largest number of uncles in Ethereum is 2. + key += (((uint64_t)work.getGasUsedPercent()) & 0xFFu) + << 16; // No overflow, the largest number should be 100 (100%). key += blockHashSuffix; return key; diff --git a/src/eth/JobMakerEth.h b/src/eth/JobMakerEth.h index 6cdfd63c1..14a46291e 100644 --- a/src/eth/JobMakerEth.h +++ b/src/eth/JobMakerEth.h @@ -24,35 +24,35 @@ #ifndef JOB_MAKER_ETH_H_ #define JOB_MAKER_ETH_H_ - #include "JobMaker.h" #include "EthConsensus.h" #include "rsk/RskWork.h" - struct JobMakerDefinitionEth : public GwJobMakerDefinition { virtual ~JobMakerDefinitionEth() {} EthConsensus::Chain chain_; }; - -class JobMakerHandlerEth : public GwJobMakerHandler -{ +class JobMakerHandlerEth : public GwJobMakerHandler { public: virtual ~JobMakerHandlerEth() {} bool processMsg(const string &msg) override; string makeStratumJobMsg() override; // read-only definition - inline shared_ptr def() { return std::dynamic_pointer_cast(def_); } + inline shared_ptr def() { + return std::dynamic_pointer_cast(def_); + } private: void clearTimeoutMsg(); inline uint64_t makeWorkKey(const RskWorkEth &work); - std::map> workMap_; // sorting works by height + uncles + gasUsedPercent + hash - shared_ptr workOfLastJob_; // for quickly updating jobs that with low gas used and low uncles + std::map> + workMap_; // sorting works by height + uncles + gasUsedPercent + hash + shared_ptr workOfLastJob_; // for quickly updating jobs that with + // low gas used and low uncles uint32_t lastReceivedHeight_ = 0; // used for rejecting low height works }; diff --git a/src/eth/ShareLogParserEth.cc b/src/eth/ShareLogParserEth.cc index 0c8d2deab..b1dddfcdc 100644 --- a/src/eth/ShareLogParserEth.cc +++ b/src/eth/ShareLogParserEth.cc @@ -29,4 +29,3 @@ template class ShareLogDumperT; template class ShareLogParserT; template class ShareLogParserServerT; - diff --git a/src/eth/ShareLogParserEth.h b/src/eth/ShareLogParserEth.h index b319077ee..2a49bac1a 100644 --- a/src/eth/ShareLogParserEth.h +++ b/src/eth/ShareLogParserEth.h @@ -24,7 +24,6 @@ #ifndef SHARELOGPARSER_ETH_H_ #define SHARELOGPARSER_ETH_H_ - #include "ShareLogParser.h" #include "StratumEth.h" diff --git a/src/eth/StatisticsEth.cc b/src/eth/StatisticsEth.cc index 3ca0765dc..3558fb512 100644 --- a/src/eth/StatisticsEth.cc +++ b/src/eth/StatisticsEth.cc @@ -23,30 +23,9 @@ */ #include "StatisticsEth.h" - template <> -void ShareStatsDay::processShare(uint32_t hourIdx, const ShareEth &share) { - ScopeLock sl(lock_); - - if (StratumStatus::isAccepted(share.status())) { - shareAccept1h_[hourIdx] += share.sharediff(); - shareAccept1d_ += share.sharediff(); - - double score = share.score(); - double reward = EthConsensus::getStaticBlockReward(share.height(), share.getChain()); - double earn = score * reward; - - score1h_[hourIdx] += score; - score1d_ += score; - earn1h_[hourIdx] += earn; - earn1d_ += earn; - - } else { - shareReject1h_[hourIdx] += share.sharediff(); - shareReject1d_ += share.sharediff(); - } - modifyHoursFlag_ |= (0x01u << hourIdx); +double ShareStatsDay::getShareReward(const ShareEth &share) { + return EthConsensus::getStaticBlockReward(share.height(), share.getChain(),share.networkdiff()); } - template class ShareStatsDay; diff --git a/src/eth/StatisticsEth.h b/src/eth/StatisticsEth.h index 3e9d7b82f..c162dd965 100644 --- a/src/eth/StatisticsEth.h +++ b/src/eth/StatisticsEth.h @@ -29,7 +29,7 @@ #include "CommonEth.h" #include "StratumEth.h" -/////////////////////////////// GlobalShareEth //////////////////////////////// +/////////////////////////////// GlobalShareEth //////////////////////////////// // Used to detect duplicate share attacks on ETH mining. struct GlobalShareEth { uint64_t headerHash_; @@ -39,10 +39,9 @@ struct GlobalShareEth { GlobalShareEth(const ShareEth &share) : headerHash_(share.headerhash()) - , nonce_(share.nonce()) - {} + , nonce_(share.nonce()) {} - GlobalShareEth& operator=(const GlobalShareEth &r) = default; + GlobalShareEth &operator=(const GlobalShareEth &r) = default; bool operator<(const GlobalShareEth &r) const { if (headerHash_ < r.headerHash_ || @@ -53,6 +52,7 @@ struct GlobalShareEth { } }; //////////////////////////// Alias //////////////////////////// -using DuplicateShareCheckerEth = DuplicateShareCheckerT; +using DuplicateShareCheckerEth = + DuplicateShareCheckerT; -#endif // STATISTICS_ETH_H_ +#endif // STATISTICS_ETH_H_ diff --git a/src/eth/StatsHttpdEth.cc b/src/eth/StatsHttpdEth.cc index 12341efb9..3e529aa39 100644 --- a/src/eth/StatsHttpdEth.cc +++ b/src/eth/StatsHttpdEth.cc @@ -23,7 +23,6 @@ */ #include "StatsHttpdEth.h" - /////////////// template instantiation /////////////// // Without this, some linking errors will issued. // If you add a new derived class of Share, add it at the following. diff --git a/src/eth/StratumClientEth.cc b/src/eth/StratumClientEth.cc index 9b95a7544..55020aa40 100644 --- a/src/eth/StratumClientEth.cc +++ b/src/eth/StratumClientEth.cc @@ -26,27 +26,29 @@ #include "Utils.h" ////////////////////////////// StratumClientEth //////////////////////////// -StratumClientEth::StratumClientEth(struct event_base *base, const string &workerFullName, const string &workerPasswd) : -StratumClient(base, workerFullName, workerPasswd) -{ +StratumClientEth::StratumClientEth( + struct event_base *base, + const string &workerFullName, + const string &workerPasswd) + : StratumClient(base, workerFullName, workerPasswd) { } -string StratumClientEth::constructShare() -{ - //etherminer (STRATUM) +string StratumClientEth::constructShare() { + // etherminer (STRATUM) // {"id": 4, "method": "mining.submit", // "params": ["0x7b9d694c26a210b9f0d35bb9bfdd70a413351111.fatrat1117", // "ae778d304393d441bf8e1c47237261675caa3827997f671d8e5ec3bd5d862503", // "0x4cc7c01bfbe51c67", // "0xae778d304393d441bf8e1c47237261675caa3827997f671d8e5ec3bd5d862503", // "0x52fdd9e9a796903c6b88af4192717e77d9a9c6fa6a1366540b65e6bcfa9069aa"]} - string s = Strings::Format("{\"id\": 4, \"method\": \"mining.submit\", " - "\"params\": [\"%s\",\"%s\",\"0x%016llx\",\"%s\",\"%s\"]}\n", - workerFullName_.c_str(), - latestJobId_.c_str(), - extraNonce2_, - headerHash_.c_str(), - mixHash_.c_str()); + string s = Strings::Format( + "{\"id\": 4, \"method\": \"mining.submit\", " + "\"params\": [\"%s\",\"%s\",\"0x%016llx\",\"%s\",\"%s\"]}\n", + workerFullName_.c_str(), + latestJobId_.c_str(), + extraNonce2_, + headerHash_.c_str(), + mixHash_.c_str()); extraNonce2_++; return s; @@ -60,15 +62,15 @@ void StratumClientEth::handleLine(const string &line) { LOG(ERROR) << "decode line fail, not a json string"; return; } - JsonNode jresult = jnode["result"]; - JsonNode jerror = jnode["error"]; - JsonNode jmethod = jnode["method"]; + JsonNode jresult = jnode["result"]; + JsonNode jerror = jnode["error"]; + JsonNode jmethod = jnode["method"]; if (jmethod.type() == Utilities::JS::type::Str) { - JsonNode jparams = jnode["params"]; + JsonNode jparams = jnode["params"]; auto jparamsArr = jparams.array(); - //Etherminer mining.notify + // Etherminer mining.notify //{"id":6,"method":"mining.notify","params": //["dd159c7ec5b056ad9e95e7c997829f667bc8e34c6d43fcb9e0c440ed94a85d80", //"dd159c7ec5b056ad9e95e7c997829f667bc8e34c6d43fcb9e0c440ed94a85d80", @@ -80,14 +82,13 @@ void StratumClientEth::handleLine(const string &line) { mixHash_ = jparamsArr[2].str(); target_ = jparamsArr[3].str(); - DLOG(INFO) << "job id: " << latestJobId_ << ", header hash: " << headerHash_ << ", mix: " << mixHash_ << ", target: " << target_; - } - else if (jmethod.str() == "mining.set_difficulty") { + DLOG(INFO) << "job id: " << latestJobId_ + << ", header hash: " << headerHash_ << ", mix: " << mixHash_ + << ", target: " << target_; + } else if (jmethod.str() == "mining.set_difficulty") { latestDiff_ = jparamsArr[0].uint64(); DLOG(INFO) << "latestDiff_: " << latestDiff_; - } - else - { + } else { LOG(ERROR) << "unknown method: " << line; } return; @@ -97,10 +98,11 @@ void StratumClientEth::handleLine(const string &line) { // // {"error": null, "id": 2, "result": true} // - if (jerror.type() != Utilities::JS::type::Null || + if (jerror.type() != Utilities::JS::type::Null || jresult.type() != Utilities::JS::type::Bool || jresult.boolean() != true) { -// LOG(ERROR) << "json result is null, err: " << jerror.str() << ", line: " << line; + // LOG(ERROR) << "json result is null, err: " << jerror.str() << ", + // line: " << line; } return; } @@ -108,9 +110,11 @@ void StratumClientEth::handleLine(const string &line) { if (state_ == CONNECTED) { // mining.authorize state_ = SUBSCRIBED; - string s = Strings::Format("{\"id\": 1, \"method\": \"mining.authorize\"," - "\"params\": [\"\%s\", \"%s\"]}\n", - workerFullName_.c_str(), workerPasswd_.c_str()); + string s = Strings::Format( + "{\"id\": 1, \"method\": \"mining.authorize\"," + "\"params\": [\"\%s\", \"%s\"]}\n", + workerFullName_.c_str(), + workerPasswd_.c_str()); sendData(s); return; } diff --git a/src/eth/StratumClientEth.h b/src/eth/StratumClientEth.h index 03122bfd0..196bc3463 100644 --- a/src/eth/StratumClientEth.h +++ b/src/eth/StratumClientEth.h @@ -27,10 +27,12 @@ #include "StratumClient.h" -class StratumClientEth : public StratumClient -{ +class StratumClientEth : public StratumClient { public: - StratumClientEth(struct event_base *base, const string &workerFullName, const string &workerPasswd); + StratumClientEth( + struct event_base *base, + const string &workerFullName, + const string &workerPasswd); virtual string constructShare(); string headerHash_; string mixHash_; diff --git a/src/eth/StratumEth.cc b/src/eth/StratumEth.cc index 090687b53..be4319cda 100644 --- a/src/eth/StratumEth.cc +++ b/src/eth/StratumEth.cc @@ -29,14 +29,12 @@ #include "bitcoin/CommonBitcoin.h" ///////////////////////////////StratumJobEth/////////////////////////// -StratumJobEth::StratumJobEth() -{ +StratumJobEth::StratumJobEth() { } -bool StratumJobEth::initFromGw(const RskWorkEth &work, EthConsensus::Chain chain, uint8_t serverId) -{ - if (work.isInitialized()) - { +bool StratumJobEth::initFromGw( + const RskWorkEth &work, EthConsensus::Chain chain, uint8_t serverId) { + if (work.isInitialized()) { chain_ = chain; height_ = work.getHeight(); @@ -56,65 +54,65 @@ bool StratumJobEth::initFromGw(const RskWorkEth &work, EthConsensus::Chain chain // generate job id string header = headerHash_.substr(2, 64); // jobId: timestamp + hash of header + server id - jobId_ = (static_cast(time(nullptr)) << 32) | (djb2(header.c_str()) & 0xFFFFFF00) | serverId; + jobId_ = (static_cast(time(nullptr)) << 32) | + (djb2(header.c_str()) & 0xFFFFFF00) | serverId; } return seedHash_.size() && headerHash_.size(); } string StratumJobEth::serializeToJson() const { - return Strings::Format("{\"jobId\":%" PRIu64"" - - ",\"chain\":\"%s\"" - ",\"height\":%u" - ",\"parent\":\"%s\"" - - ",\"networkTarget\":\"0x%s\"" - ",\"headerHash\":\"%s\"" - ",\"sHash\":\"%s\"" - - ",\"uncles\":\"%u\"" - ",\"transactions\":\"%u\"" - ",\"gasUsedPercent\":\"%f\"" - - ",\"rpcAddress\":\"%s\"" - ",\"rpcUserPwd\":\"%s\"" - - // backward compatible - ",\"rskNetworkTarget\":\"0x%s\"" - ",\"rskBlockHashForMergedMining\":\"%s\"" - ",\"rskFeesForMiner\":\"\"" - ",\"rskdRpcAddress\":\"\"" - ",\"rskdRpcUserPwd\":\"\"" - ",\"isRskCleanJob\":false" - "}", - jobId_, - - EthConsensus::getChainStr(chain_).c_str(), - height_, - parent_.c_str(), - - networkTarget_.GetHex().c_str(), - headerHash_.c_str(), - seedHash_.c_str(), - - uncles_, - transactions_, - gasUsedPercent_, - - rpcAddress_.c_str(), - rpcUserPwd_.c_str(), - - // backward compatible - networkTarget_.GetHex().c_str(), - headerHash_.c_str() - ); + return Strings::Format( + "{\"jobId\":%" PRIu64 + "" + + ",\"chain\":\"%s\"" + ",\"height\":%u" + ",\"parent\":\"%s\"" + + ",\"networkTarget\":\"0x%s\"" + ",\"headerHash\":\"%s\"" + ",\"sHash\":\"%s\"" + + ",\"uncles\":\"%u\"" + ",\"transactions\":\"%u\"" + ",\"gasUsedPercent\":\"%f\"" + + ",\"rpcAddress\":\"%s\"" + ",\"rpcUserPwd\":\"%s\"" + + // backward compatible + ",\"rskNetworkTarget\":\"0x%s\"" + ",\"rskBlockHashForMergedMining\":\"%s\"" + ",\"rskFeesForMiner\":\"\"" + ",\"rskdRpcAddress\":\"\"" + ",\"rskdRpcUserPwd\":\"\"" + ",\"isRskCleanJob\":false" + "}", + jobId_, + + EthConsensus::getChainStr(chain_).c_str(), + height_, + parent_.c_str(), + + networkTarget_.GetHex().c_str(), + headerHash_.c_str(), + seedHash_.c_str(), + + uncles_, + transactions_, + gasUsedPercent_, + + rpcAddress_.c_str(), + rpcUserPwd_.c_str(), + + // backward compatible + networkTarget_.GetHex().c_str(), + headerHash_.c_str()); } -bool StratumJobEth::unserializeFromJson(const char *s, size_t len) -{ +bool StratumJobEth::unserializeFromJson(const char *s, size_t len) { JsonNode j; - if (!JsonNode::parse(s, s + len, j)) - { + if (!JsonNode::parse(s, s + len, j)) { return false; } @@ -123,8 +121,7 @@ bool StratumJobEth::unserializeFromJson(const char *s, size_t len) j["height"].type() != Utilities::JS::type::Int || j["networkTarget"].type() != Utilities::JS::type::Str || j["headerHash"].type() != Utilities::JS::type::Str || - j["sHash"].type() != Utilities::JS::type::Str) - { + j["sHash"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "parse eth stratum job failure: " << s; return false; } @@ -139,8 +136,7 @@ bool StratumJobEth::unserializeFromJson(const char *s, size_t len) if (j["parent"].type() == Utilities::JS::type::Str && j["uncles"].type() == Utilities::JS::type::Int && j["transactions"].type() == Utilities::JS::type::Int && - j["gasUsedPercent"].type() == Utilities::JS::type::Real) - { + j["gasUsedPercent"].type() == Utilities::JS::type::Real) { parent_ = j["parent"].str(); uncles_ = j["uncles"].uint32(); transactions_ = j["transactions"].uint32(); @@ -148,8 +144,7 @@ bool StratumJobEth::unserializeFromJson(const char *s, size_t len) } if (j["rpcAddress"].type() == Utilities::JS::type::Str && - j["rpcUserPwd"].type() == Utilities::JS::type::Str) - { + j["rpcUserPwd"].type() == Utilities::JS::type::Str) { rpcAddress_ = j["rpcAddress"].str(); rpcUserPwd_ = j["rpcUserPwd"].str(); } diff --git a/src/eth/StratumEth.h b/src/eth/StratumEth.h index 9a41ac95a..deee566b5 100644 --- a/src/eth/StratumEth.h +++ b/src/eth/StratumEth.h @@ -40,60 +40,55 @@ // If there is no forward compatibility, one of the versions of Share // will be considered invalid, resulting in loss of users' hashrate. - - -class ShareEthBytesVersion -{ +class ShareEthBytesVersion { public: - - uint32_t version_ = 0;//0 - uint32_t checkSum_ = 0;//4 - - int64_t workerHashId_ = 0;//8 - int32_t userId_ = 0;//16 - int32_t status_ = 0;//20 - int64_t timestamp_ = 0;//24 - IpAddress ip_ = 0;//32 - - uint64_t headerHash_ = 0;//48 - uint64_t shareDiff_ = 0;//56 - uint64_t networkDiff_ = 0;//64 - uint64_t nonce_ = 0;//72 - uint32_t sessionId_ = 0;//80 - uint32_t height_ = 0;//84 + uint32_t version_ = 0; // 0 + uint32_t checkSum_ = 0; // 4 + + int64_t workerHashId_ = 0; // 8 + int32_t userId_ = 0; // 16 + int32_t status_ = 0; // 20 + int64_t timestamp_ = 0; // 24 + IpAddress ip_ = 0; // 32 + + uint64_t headerHash_ = 0; // 48 + uint64_t shareDiff_ = 0; // 56 + uint64_t networkDiff_ = 0; // 64 + uint64_t nonce_ = 0; // 72 + uint32_t sessionId_ = 0; // 80 + uint32_t height_ = 0; // 84 uint32_t checkSum() const { uint64_t c = 0; - c += (uint64_t) version_; - c += (uint64_t) workerHashId_; - c += (uint64_t) userId_; - c += (uint64_t) status_; - c += (uint64_t) timestamp_; - c += (uint64_t) ip_.addrUint64[0]; - c += (uint64_t) ip_.addrUint64[1]; - c += (uint64_t) headerHash_; - c += (uint64_t) shareDiff_; - c += (uint64_t) networkDiff_; - c += (uint64_t) nonce_; - c += (uint64_t) sessionId_; - c += (uint64_t) height_; - - return ((uint32_t) c) + ((uint32_t) (c >> 32)); + c += (uint64_t)version_; + c += (uint64_t)workerHashId_; + c += (uint64_t)userId_; + c += (uint64_t)status_; + c += (uint64_t)timestamp_; + c += (uint64_t)ip_.addrUint64[0]; + c += (uint64_t)ip_.addrUint64[1]; + c += (uint64_t)headerHash_; + c += (uint64_t)shareDiff_; + c += (uint64_t)networkDiff_; + c += (uint64_t)nonce_; + c += (uint64_t)sessionId_; + c += (uint64_t)height_; + + return ((uint32_t)c) + ((uint32_t)(c >> 32)); } - }; - -class ShareEth : public sharebase::EthMsg -{ +class ShareEth : public sharebase::EthMsg { public: - - const static uint32_t CURRENT_VERSION_FOUNDATION = 0x00110003u; // first 0011: ETH, second 0002: version 3 - const static uint32_t CURRENT_VERSION_CLASSIC = 0x00160003u; // first 0016: ETC, second 0002: version 3 - const static uint32_t BYTES_VERSION_FOUNDATION = 0x00110002u; // first 0011: ETH, second 0002: version 3 - const static uint32_t BYTES_VERSION_CLASSIC = 0x00160002u; // first 0016: ETC, second 0002: version 3 - + const static uint32_t CURRENT_VERSION_FOUNDATION = + 0x00110003u; // first 0011: ETH, second 0002: version 3 + const static uint32_t CURRENT_VERSION_CLASSIC = + 0x00160003u; // first 0016: ETC, second 0002: version 3 + const static uint32_t BYTES_VERSION_FOUNDATION = + 0x00110002u; // first 0011: ETH, second 0002: version 3 + const static uint32_t BYTES_VERSION_CLASSIC = + 0x00160002u; // first 0016: ETC, second 0002: version 3 ShareEth() { set_version(0); @@ -127,116 +122,130 @@ class ShareEth : public sharebase::EthMsg inline static uint32_t getVersion(EthConsensus::Chain chain) { switch (chain) { - case EthConsensus::Chain::FOUNDATION: - return CURRENT_VERSION_FOUNDATION; - case EthConsensus::Chain::CLASSIC: - return CURRENT_VERSION_CLASSIC; - case EthConsensus::Chain::UNKNOWN: - LOG(FATAL) << "Unknown chain"; - return 0; + case EthConsensus::Chain::FOUNDATION: + return CURRENT_VERSION_FOUNDATION; + case EthConsensus::Chain::CLASSIC: + return CURRENT_VERSION_CLASSIC; + case EthConsensus::Chain::UNKNOWN: + LOG(FATAL) << "Unknown chain"; + return 0; } // should not be here LOG(FATAL) << "Inexpectant const value"; return 0; } - EthConsensus::Chain getChain() const { - - return getChain(version()); - } + EthConsensus::Chain getChain() const { return getChain(version()); } - double score() const - { + double score() const { - if (!StratumStatus::isAccepted(status()) || sharediff() == 0 || networkdiff() == 0) { + if (!StratumStatus::isAccepted(status()) || sharediff() == 0 || + networkdiff() == 0) { return 0.0; } double result = 0.0; - // Network diff may less than share diff on testnet or regression test network. - // On regression test network, the network diff may be zero. - // But no matter how low the network diff is, you can only dig one block at a time. + // Network diff may less than share diff on testnet or regression test + // network. On regression test network, the network diff may be zero. But no + // matter how low the network diff is, you can only dig one block at a time. if (networkdiff() < sharediff()) { result = 1.0; - } - else { + } else { result = (double)sharediff() / (double)networkdiff(); } + DLOG(INFO) << "当前sharefiff: "<<(double)sharediff(); + DLOG(INFO) << "当前networkdiff: "<<(double)networkdiff(); // Share of the uncle block has a lower reward. if (StratumStatus::isStale(status())) { result *= EthConsensus::getUncleBlockRewardRatio(height(), getChain()); + DLOG(INFO) << "当前出现叔块高度为: "<checkSum() != share->checkSum_) { - DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_<< ", checkSum(): " << share->checkSum(); + DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_ + << ", checkSum(): " << share->checkSum(); return false; } @@ -262,16 +271,15 @@ class ShareEth : public sharebase::EthMsg return true; } - - bool SerializeToArrayWithLength(string& data, uint32_t& size) const { + bool SerializeToArrayWithLength(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - *((uint32_t*)data.data()) = size; - uint8_t * payload = (uint8_t *)data.data(); + *((uint32_t *)data.data()) = size; + uint8_t *payload = (uint8_t *)data.data(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { - DLOG(INFO) << "base.SerializeToArray failed!"; + DLOG(INFO) << "base.SerializeToArray failed!"; return false; } @@ -279,12 +287,12 @@ class ShareEth : public sharebase::EthMsg return true; } - bool SerializeToArrayWithVersion(string& data, uint32_t& size) const { + bool SerializeToArrayWithVersion(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - uint8_t * payload = (uint8_t *)data.data(); - *((uint32_t*)payload) = version(); + uint8_t *payload = (uint8_t *)data.data(); + *((uint32_t *)payload) = version(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { DLOG(INFO) << "SerializeToArray failed!"; @@ -295,20 +303,18 @@ class ShareEth : public sharebase::EthMsg return true; } - size_t getsharelength() { - return IsInitialized() ? ByteSize() : 0; - } + size_t getsharelength() { return IsInitialized() ? ByteSize() : 0; } }; - - -class StratumJobEth : public StratumJob -{ +class StratumJobEth : public StratumJob { public: StratumJobEth(); string serializeToJson() const override; bool unserializeFromJson(const char *s, size_t len) override; - bool initFromGw(const RskWorkEth &latestRskBlockJson, EthConsensus::Chain chain, uint8_t serverId); + bool initFromGw( + const RskWorkEth &latestRskBlockJson, + EthConsensus::Chain chain, + uint8_t serverId); EthConsensus::Chain chain_ = EthConsensus::Chain::UNKNOWN; uint32_t height_ = 0; @@ -346,9 +352,11 @@ struct StratumTraitsEth { }; struct LocalJobType : public LocalJob { LocalJobType(uint64_t jobId, const std::string &headerHash) - : LocalJob(jobId), headerHash_(headerHash) { + : LocalJob(jobId) + , headerHash_(headerHash) {} + bool operator==(const std::string &headerHash) const { + return headerHash_ == headerHash; } - bool operator==(const std::string &headerHash) const { return headerHash_ == headerHash; } std::string headerHash_; }; @@ -361,8 +369,8 @@ enum class StratumProtocolEth { NICEHASH_STRATUM, }; -inline const char* getProtocolString(StratumProtocolEth protocol) { - switch(protocol) { +inline const char *getProtocolString(StratumProtocolEth protocol) { + switch (protocol) { case StratumProtocolEth::ETHPROXY: return "ETHPROXY"; case StratumProtocolEth::STRATUM: diff --git a/src/eth/StratumMinerEth.cc b/src/eth/StratumMinerEth.cc index bfb74b383..ebfea5bba 100644 --- a/src/eth/StratumMinerEth.cc +++ b/src/eth/StratumMinerEth.cc @@ -29,20 +29,24 @@ #include "CommonEth.h" -///////////////////////////////// StratumSessionEth //////////////////////////////// -StratumMinerEth::StratumMinerEth(StratumSessionEth &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId, - StratumProtocolEth ethProtocol) - : StratumMinerBase(session, diffController, clientAgent, workerName, workerId), ethProtocol_(ethProtocol) { +///////////////////////////////// StratumSessionEth +/////////////////////////////////// +StratumMinerEth::StratumMinerEth( + StratumSessionEth &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId, + StratumProtocolEth ethProtocol) + : StratumMinerBase(session, diffController, clientAgent, workerName, workerId) + , ethProtocol_(ethProtocol) { } -void StratumMinerEth::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMinerEth::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "eth_getWork") { handleRequest_GetWork(idStr, jparams); } else if (method == "eth_submitHashrate") { @@ -52,27 +56,33 @@ void StratumMinerEth::handleRequest(const std::string &idStr, } } -void StratumMinerEth::handleRequest_GetWork(const string &idStr, const JsonNode &jparams) { - getSession().sendMiningNotifyWithId(getSession().getServer().GetJobRepository()->getLatestStratumJobEx(), idStr); +void StratumMinerEth::handleRequest_GetWork( + const string &idStr, const JsonNode &jparams) { + getSession().sendMiningNotifyWithId( + getSession().getServer().GetJobRepository()->getLatestStratumJobEx(), + idStr); } -void StratumMinerEth::handleRequest_SubmitHashrate(const string &idStr, const JsonNode &jparams) { +void StratumMinerEth::handleRequest_SubmitHashrate( + const string &idStr, const JsonNode &jparams) { responseTrue(idStr); } -void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode &jparams) { +void StratumMinerEth::handleRequest_Submit( + const string &idStr, const JsonNode &jparams) { auto &session = getSession(); if (session.getState() != StratumSession::AUTHENTICATED) { responseError(idStr, StratumStatus::UNAUTHORIZED); // there must be something wrong, send reconnect command - const string s = "{\"id\":null,\"method\":\"client.reconnect\",\"params\":[]}\n"; + const string s = + "{\"id\":null,\"method\":\"client.reconnect\",\"params\":[]}\n"; session.sendData(s); return; } - //etherminer (STRATUM) + // etherminer (STRATUM) // {"id": 4, "method": "mining.submit", // "params": ["0x7b9d694c26a210b9f0d35bb9bfdd70a413351111.fatrat1117", // "ae778d304393d441bf8e1c47237261675caa3827997f671d8e5ec3bd5d862503", @@ -80,27 +90,31 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & // "0xae778d304393d441bf8e1c47237261675caa3827997f671d8e5ec3bd5d862503", // "0x52fdd9e9a796903c6b88af4192717e77d9a9c6fa6a1366540b65e6bcfa9069aa"]} - //Claymore (ETHPROXY) + // Claymore (ETHPROXY) //{"id":4,"method":"eth_submitWork", //"params":["0x17a0eae8082fb64c","0x94a789fba387d454312db3287f8440f841de762522da8ba620b7fcf34a80330c", //"0x2cc7dad9f2f92519891a2d5f67378e646571b89e5994fe9290d6d669e480fdff"]} - //NICEHASH_STRATUM + // NICEHASH_STRATUM // {"id": 244, // "method": "mining.submit", // "params": [ "username", "bf0488aa", "6a909d9bbc0f" ] // } - //Note in above example that minernonce is 6 bytes, because provided extranonce was 2 bytes. - //If pool provides 3 bytes extranonce, then minernonce must be 5 bytes. + // Note in above example that minernonce is 6 bytes, because provided + // extranonce was 2 bytes. If pool provides 3 bytes extranonce, then + // minernonce must be 5 bytes. auto params = (const_cast(jparams)).array(); if (StratumProtocolEth::STRATUM == ethProtocol_ && params.size() < 5) { responseError(idStr, StratumStatus::ILLEGAL_PARARMS); return; - } else if (StratumProtocolEth::ETHPROXY == ethProtocol_ && params.size() < 3) { + } else if ( //我们用的这个,params里面有三个参数 + StratumProtocolEth::ETHPROXY == ethProtocol_ && params.size() < 3) { responseError(idStr, StratumStatus::ILLEGAL_PARARMS); return; - } else if (StratumProtocolEth::NICEHASH_STRATUM == ethProtocol_ && params.size() < 3) { + } else if ( + StratumProtocolEth::NICEHASH_STRATUM == ethProtocol_ && + params.size() < 3) { responseError(idStr, StratumStatus::ILLEGAL_PARARMS); return; } @@ -111,20 +125,17 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & jobId = params[1].str(); sNonce = params[2].str(); sHeader = params[3].str(); - } - break; - case StratumProtocolEth::ETHPROXY: { + } break; + case StratumProtocolEth::ETHPROXY: { //用的这个 sNonce = params[0].str(); sHeader = params[1].str(); jobId = sHeader; - } - break; + } break; case StratumProtocolEth::NICEHASH_STRATUM: { jobId = params[1].str(); sNonce = params[2].str(); sHeader = jobId; - } - break; + } break; } // Claymore's jobId starting with "0x" @@ -147,7 +158,8 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & auto &worker = session.getWorker(); auto extraNonce1 = session.getSessionId(); auto clientIp = session.getClientIp(); - shared_ptr exjob = server.GetJobRepository()->getStratumJobEx(localJob->jobId_); + shared_ptr exjob = + server.GetJobRepository()->getStratumJobEx(localJob->jobId_); if (exjob.get() == nullptr) { responseFalse(idStr, StratumStatus::JOB_NOT_FOUND); return; @@ -166,6 +178,7 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & uint64_t nonce = stoull(sNonce, nullptr, 16); uint32_t height = sjob->height_; + DLOG(INFO) <<"当前难度"<< sjob->networkTarget_.GetHex(); uint64_t networkDiff = Eth_TargetToDifficulty(sjob->networkTarget_.GetHex()); // Used to prevent duplicate shares. (sHeader has a prefix "0x") uint64_t headerPrefix = stoull(sHeader.substr(2, 16), nullptr, 16); @@ -185,7 +198,7 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & share.set_userid(worker.userId_); share.set_sharediff(jobDiff.currentJobDiff_); share.set_networkdiff(networkDiff); - share.set_timestamp((uint64_t) time(nullptr)); + share.set_timestamp((uint64_t)time(nullptr)); share.set_status(StratumStatus::REJECT_NO_REASON); share.set_height(height); share.set_nonce(nonce); @@ -199,19 +212,24 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & if (!localJob->addLocalShare(localShare)) { responseFalse(idStr, StratumStatus::DUPLICATE_SHARE); // add invalid share to counter - invalidSharesCounter_.insert((int64_t) time(nullptr), 1); + invalidSharesCounter_.insert((int64_t)time(nullptr), 1); return; } - // The mixHash is used to submit the work to the Ethereum node. // We don't need to pay attention to whether the mixHash submitted // by the miner is correct, because we recalculated it. // SolvedShare will be accepted correctly by the ETH node if // the difficulty is reached in our calculations. uint256 shareMixHash; - share.set_status(server.checkShareAndUpdateDiff(share, localJob->jobId_, nonce, uint256S(sHeader), - jobDiff.jobDiffs_, shareMixHash, worker.fullName_)); + share.set_status(server.checkShareAndUpdateDiff( + share, + localJob->jobId_, + nonce, + uint256S(sHeader), + jobDiff.jobDiffs_, + shareMixHash, + worker.fullName_)); if (StratumStatus::isAccepted(share.status())) { DLOG(INFO) << "share reached the diff: " << share.sharediff(); @@ -223,16 +241,24 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & // shares in a short time, we just drop them. if (handleShare(idStr, share.status(), share.sharediff())) { if (StratumStatus::isSolved(share.status())) { - server.sendSolvedShare2Kafka(sNonce, sHeader, shareMixHash.GetHex(), height, networkDiff, worker, chain); + server.sendSolvedShare2Kafka( + sNonce, + sHeader, + shareMixHash.GetHex(), + height, + networkDiff, + worker, + chain); } } else { // check if there is invalid share spamming - int64_t invalidSharesNum = invalidSharesCounter_.sum(time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE); + int64_t invalidSharesNum = invalidSharesCounter_.sum( + time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE); // too much invalid shares, don't send them to kafka if (invalidSharesNum >= INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT) { - LOG(WARNING) << "invalid share spamming, diff: " - << share.sharediff() << ", uid: " << worker.userId_ - << ", uname: \"" << worker.userName_ << "\", ip: " << clientIp + LOG(WARNING) << "invalid share spamming, diff: " << share.sharediff() + << ", uid: " << worker.userId_ << ", uname: \"" + << worker.userName_ << "\", ip: " << clientIp << "checkshare result: " << share.status(); return; } @@ -243,11 +269,11 @@ void StratumMinerEth::handleRequest_Submit(const string &idStr, const JsonNode & std::string message; uint32_t size = 0; if (!share.SerializeToArrayWithVersion(message, size)) { - LOG(ERROR) << "share SerializeToBuffer failed!"<< share.toString(); + LOG(ERROR) << "share SerializeToBuffer failed!" << share.toString(); return; } - server.sendShare2Kafka((const uint8_t *) message.data(), size); + server.sendShare2Kafka((const uint8_t *)message.data(), size); } void StratumMinerEth::responseError(const string &idStr, int code) { @@ -276,9 +302,13 @@ void StratumMinerEth::responseFalse(const string &idStr, int code) { void StratumMinerEth::rpc2ResponseFalse(const string &idStr, int errCode) { char buf[1024]; - int len = snprintf(buf, sizeof(buf), - "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":false,\"data\":{\"code\":%d,\"message\":\"%s\"}}\n", - idStr.empty() ? "null" : idStr.c_str(), - errCode, StratumStatus::toString(errCode)); + int len = snprintf( + buf, + sizeof(buf), + "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":false,\"data\":{\"code\":%d," + "\"message\":\"%s\"}}\n", + idStr.empty() ? "null" : idStr.c_str(), + errCode, + StratumStatus::toString(errCode)); getSession().sendData(buf, len); } diff --git a/src/eth/StratumMinerEth.h b/src/eth/StratumMinerEth.h index b8f217773..64a2f6649 100644 --- a/src/eth/StratumMinerEth.h +++ b/src/eth/StratumMinerEth.h @@ -29,21 +29,24 @@ class StratumMinerEth : public StratumMinerBase { public: - StratumMinerEth(StratumSessionEth &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId, - StratumProtocolEth ethProtocol); - - void handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) override; + StratumMinerEth( + StratumSessionEth &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId, + StratumProtocolEth ethProtocol); + + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; private: void handleRequest_GetWork(const string &idStr, const JsonNode &jparams); - void handleRequest_SubmitHashrate(const string &idStr, const JsonNode &jparams); + void + handleRequest_SubmitHashrate(const string &idStr, const JsonNode &jparams); void handleRequest_Submit(const string &idStr, const JsonNode &jparams); void responseError(const string &idStr, int code); void responseTrue(const string &idStr); diff --git a/src/eth/StratumServerEth.cc b/src/eth/StratumServerEth.cc index a27e77925..2449b6d65 100644 --- a/src/eth/StratumServerEth.cc +++ b/src/eth/StratumServerEth.cc @@ -39,18 +39,22 @@ using namespace std; - -////////////////////////////////// JobRepositoryEth /////////////////////////////// -JobRepositoryEth::JobRepositoryEth(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerEth *server) +////////////////////////////////// JobRepositoryEth +////////////////////////////////// +JobRepositoryEth::JobRepositoryEth( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerEth *server) : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) , light_(nullptr) , nextLight_(nullptr) - , epochs_(0xffffffffffffffff) -{ + , epochs_(0xffffffffffffffff) { loadLightFromFile(); } -shared_ptr JobRepositoryEth::createStratumJobEx(shared_ptr sjob, bool isClean){ +shared_ptr JobRepositoryEth::createStratumJobEx( + shared_ptr sjob, bool isClean) { return std::make_shared(sjob, isClean); } @@ -67,26 +71,24 @@ void JobRepositoryEth::broadcastStratumJob(shared_ptr sjob) { LOG(INFO) << "received new height stratum job, height: " << sjobEth->height_ << ", headerHash: " << sjobEth->headerHash_; } - + shared_ptr exJob(createStratumJobEx(sjobEth, isClean)); - { - ScopeLock sl(lock_); - - if (isClean) { - // mark all jobs as stale, should do this before insert new job - // stale shares will not be rejected, they will be marked as ACCEPT_STALE and have lower rewards. - for (auto it : exJobs_) { - it.second->markStale(); - } - } - // insert new job - exJobs_[sjobEth->jobId_] = exJob; + if (isClean) { + // mark all jobs as stale, should do this before insert new job + // stale shares will not be rejected, they will be marked as ACCEPT_STALE + // and have lower rewards. + for (auto it : exJobs_) { + it.second->markStale(); + } } - //send job first + // insert new job + exJobs_[sjobEth->jobId_] = exJob; + + // send job first sendMiningNotify(exJob); - //then, create light for verification + // then, create light for verification newLightNonBlocking(sjobEth); } @@ -105,14 +107,14 @@ void JobRepositoryEth::newLightNonBlocking(shared_ptr job) { return; } - boost::thread t(boost::bind(&JobRepositoryEth::_newLightThread, this, job->height_)); + boost::thread t( + boost::bind(&JobRepositoryEth::_newLightThread, this, job->height_)); t.detach(); } -void JobRepositoryEth::_newLightThread(uint64_t height) -{ +void JobRepositoryEth::_newLightThread(uint64_t height) { uint64_t const newEpochs = height / ETHASH_EPOCH_LENGTH; - //same seed do nothing + // same seed do nothing if (newEpochs == epochs_) { return; } @@ -131,14 +133,12 @@ void JobRepositoryEth::_newLightThread(uint64_t height) if (nullptr == nextLight_) { light_ = ethash_light_new(height); - } - else if (newEpochs == oldEpochs + 1) { - //get pre-generated light if exists + } else if (newEpochs == oldEpochs + 1) { + // get pre-generated light if exists ethash_light_delete(light_); - light_ = nextLight_; + light_ = nextLight_; nextLight_ = nullptr; - } - else { + } else { // pre-generated light unavailable because of epochs jumping ethash_light_delete(nextLight_); nextLight_ = nullptr; @@ -153,9 +153,11 @@ void JobRepositoryEth::_newLightThread(uint64_t height) } time_t elapse = time(nullptr) - now; - // Note: The performance difference between Debug and Release builds is very large. - // The Release build may complete in 5 s, while the Debug build takes more than 60 s. - LOG(INFO) << "create light for blk height: " << height << " takes " << elapse << " seconds"; + // Note: The performance difference between Debug and Release builds is very + // large. The Release build may complete in 5 s, while the Debug build takes + // more than 60 s. + LOG(INFO) << "create light for blk height: " << height << " takes " + << elapse << " seconds"; } { @@ -168,14 +170,15 @@ void JobRepositoryEth::_newLightThread(uint64_t height) nextLight_ = ethash_light_new(nextBlkNum); time_t elapse = time(nullptr) - now; - // Note: The performance difference between Debug and Release builds is very large. - // The Release build may complete in 5 s, while the Debug build takes more than 60 s. - LOG(INFO) << "create light for blk height: " << nextBlkNum << " takes " << elapse << " seconds"; + // Note: The performance difference between Debug and Release builds is very + // large. The Release build may complete in 5 s, while the Debug build takes + // more than 60 s. + LOG(INFO) << "create light for blk height: " << nextBlkNum << " takes " + << elapse << " seconds"; } } -void JobRepositoryEth::deleteLight() -{ +void JobRepositoryEth::deleteLight() { ScopeLock slLight(lightLock_); ScopeLock slNextLight(nextLightLock_); deleteLightNoLock(); @@ -204,7 +207,8 @@ void JobRepositoryEth::saveLightToFile() { std::ofstream f(kLightCacheFilePath, std::ios::binary | std::ios::trunc); if (!f) { - LOG(ERROR) << "create DAG light caching file " << kLightCacheFilePath << " failed"; + LOG(ERROR) << "create DAG light caching file " << kLightCacheFilePath + << " failed"; return; } @@ -222,11 +226,13 @@ void JobRepositoryEth::saveLightToFile() { LOG(INFO) << "DAG light was cached to file " << kLightCacheFilePath; } -void JobRepositoryEth::saveLightToFile(const ethash_light_t &light, std::ofstream &f) { +void JobRepositoryEth::saveLightToFile( + const ethash_light_t &light, std::ofstream &f) { LightCacheHeader header; header.blockNumber_ = light->block_number; header.cacheSize_ = light->cache_size; - header.checkSum_ = computeLightCacheCheckSum(header, (const uint8_t *)light->cache); + header.checkSum_ = + computeLightCacheCheckSum(header, (const uint8_t *)light->cache); f.write((const char *)&header, sizeof(header)); f.write((const char *)light->cache, header.cacheSize_); @@ -239,7 +245,8 @@ void JobRepositoryEth::loadLightFromFile() { std::ifstream f(kLightCacheFilePath, std::ios::binary); if (!f) { - LOG(WARNING) << "cannot read DAG light caching file " << kLightCacheFilePath; + LOG(WARNING) << "cannot read DAG light caching file " + << kLightCacheFilePath; return; } @@ -254,12 +261,14 @@ void JobRepositoryEth::loadLightFromFile() { } f.close(); - LOG(INFO) << "loading DAG light from file " << kLightCacheFilePath << " finished"; + LOG(INFO) << "loading DAG light from file " << kLightCacheFilePath + << " finished"; } ethash_light_t JobRepositoryEth::loadLightFromFile(std::ifstream &f) { if (f.eof()) { - LOG(WARNING) << "cannot load DAG light: file EOF reached when reading header"; + LOG(WARNING) + << "cannot load DAG light: file EOF reached when reading header"; return NULL; } @@ -268,86 +277,92 @@ ethash_light_t JobRepositoryEth::loadLightFromFile(std::ifstream &f) { f.read((char *)&header, sizeof(header)); if (f.gcount() != sizeof(header)) { - LOG(WARNING) << "cannot load DAG light: only " << f.gcount() << " bytes was read" - " but header size is " << sizeof(header) << "bytes"; + LOG(WARNING) << "cannot load DAG light: only " << f.gcount() + << " bytes was read" + " but header size is " + << sizeof(header) << "bytes"; return NULL; } // ethash_light_delete() will use free() to release the memory. // So malloc() and calloc() should used for memory allocation. // The basic logic and codes copied from ethash_light_new_internal(). - struct ethash_light *ret; - ret = (ethash_light *)calloc(sizeof(*ret), 1); - if (!ret) { - LOG(WARNING) << "cannot load DAG light: calloc " << sizeof(*ret) << " bytes failed"; - return NULL; - } + struct ethash_light *ret; + ret = (ethash_light *)calloc(sizeof(*ret), 1); + if (!ret) { + LOG(WARNING) << "cannot load DAG light: calloc " << sizeof(*ret) + << " bytes failed"; + return NULL; + } #if defined(__MIC__) - ret->cache = _mm_malloc((size_t)header.cacheSize_, 64); + ret->cache = _mm_malloc((size_t)header.cacheSize_, 64); #else - ret->cache = malloc((size_t)header.cacheSize_); + ret->cache = malloc((size_t)header.cacheSize_); #endif - if (!ret->cache) { - LOG(WARNING) << "cannot load DAG light: malloc " << header.cacheSize_ << " bytes failed (cache maybe broken)"; - goto fail_free_light; - } + if (!ret->cache) { + LOG(WARNING) << "cannot load DAG light: malloc " << header.cacheSize_ + << " bytes failed (cache maybe broken)"; + goto fail_free_light; + } if (f.eof()) { - LOG(WARNING) << "cannot load DAG light: file EOF reached when reading cache"; + LOG(WARNING) + << "cannot load DAG light: file EOF reached when reading cache"; goto fail_free_cache_mem; } f.read((char *)ret->cache, header.cacheSize_); if (f.gcount() != (std::streamsize)header.cacheSize_) { - LOG(WARNING) << "cannot load DAG light: only " << f.gcount() << " bytes was read" + LOG(WARNING) << "cannot load DAG light: only " << f.gcount() + << " bytes was read" << " but cache size is " << header.cacheSize_ << " bytes"; goto fail_free_cache_mem; } checkSum = computeLightCacheCheckSum(header, (const uint8_t *)ret->cache); if (checkSum != header.checkSum_) { - LOG(WARNING) << "cannot load DAG light: checkSum mis-matched, it should be " << header.checkSum_ - << " but is " << checkSum << " now"; + LOG(WARNING) << "cannot load DAG light: checkSum mis-matched, it should be " + << header.checkSum_ << " but is " << checkSum << " now"; goto fail_free_cache_mem; } ret->block_number = header.blockNumber_; - ret->cache_size = header.cacheSize_; - return ret; + ret->cache_size = header.cacheSize_; + return ret; fail_free_cache_mem: #if defined(__MIC__) - _mm_free(ret->cache); + _mm_free(ret->cache); #else - free(ret->cache); + free(ret->cache); #endif fail_free_light: - free(ret); - return NULL; + free(ret); + return NULL; } -uint64_t JobRepositoryEth::computeLightCacheCheckSum(const LightCacheHeader &header, const uint8_t *data) { +uint64_t JobRepositoryEth::computeLightCacheCheckSum( + const LightCacheHeader &header, const uint8_t *data) { union { uint64_t u64; - uint8_t u8[8]; + uint8_t u8[8]; } checkSum; checkSum.u64 = 0; checkSum.u64 += header.blockNumber_; checkSum.u64 += header.cacheSize_; - - for (size_t i=0; i(serverId_); - // NiceHash only accepts 2 bytes or shorter of extraNonce (startNonce) in protocol NICEHASH_STRATUM. - // However we use a 3 bytes of extraNonce. Also, the sessionID is pre-allocated, and we can't allocate - // more space for a worker after detecting that it is from NiceHash. - // So we changed the default setting to a large allocation interval. - // This can minimize the impact of mining space overlap on NiceHash miners. - sessionIDManager_->setAllocInterval(256); - #endif +bool ServerEth::setupInternal(StratumServer *sserver) { +// TODO: WORK_WITH_STRATUM_SWITCHER only effects Bitcoin's sserver +#ifndef WORK_WITH_STRATUM_SWITCHER + // Use 16 bits index of Session ID. + // The full Session ID (with server id as prefix) is 24 bits. + // Session ID will be used as starting nonce, so the single + // searching space of a miner will be 2^40 (= 2^64 - 2^24). + delete sessionIDManager_; + sessionIDManager_ = new SessionIDManagerT<16>(serverId_); + // NiceHash only accepts 2 bytes or shorter of extraNonce (startNonce) in + // protocol NICEHASH_STRATUM. However we use a 3 bytes of extraNonce. Also, + // the sessionID is pre-allocated, and we can't allocate more space for a + // worker after detecting that it is from NiceHash. So we changed the default + // setting to a large allocation interval. This can minimize the impact of + // mining space overlap on NiceHash miners. + sessionIDManager_->setAllocInterval(256); +#endif return true; } -int ServerEth::checkShareAndUpdateDiff(ShareEth &share, - const uint64_t jobId, - const uint64_t nonce, - const uint256 &header, - const std::set &jobDiffs, - uint256 &returnedMixHash, - const string &workFullName) -{ +int ServerEth::checkShareAndUpdateDiff( + ShareEth &share, + const uint64_t jobId, + const uint64_t nonce, + const uint256 &header, + const std::set &jobDiffs, + uint256 &returnedMixHash, + const string &workFullName) { JobRepositoryEth *jobRepo = GetJobRepository(); if (nullptr == jobRepo) { return StratumStatus::ILLEGAL_PARARMS; } shared_ptr exJobPtr = jobRepo->getStratumJobEx(jobId); - if (nullptr == exJobPtr) - { + if (nullptr == exJobPtr) { return StratumStatus::JOB_NOT_FOUND; } auto sjob = std::static_pointer_cast(exJobPtr->sjob_); - - DLOG(INFO) << "checking share nonce: " << hex << nonce << ", header: " << header.GetHex(); - + + DLOG(INFO) << "checking share nonce: " << hex << nonce + << ", header: " << header.GetHex(); + ethash_return_value_t r; ethash_h256_t ethashHeader = {0}; Uint256ToEthash256(header, ethashHeader); @@ -426,13 +441,13 @@ int ServerEth::checkShareAndUpdateDiff(ShareEth &share, seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtime = ((seconds)*1000 + useconds / 1000.0) + 0.5; - // Note: The performance difference between Debug and Release builds is very large. - // The Release build may complete in 4 ms, while the Debug build takes 100 ms. + // Note: The performance difference between Debug and Release builds is very + // large. The Release build may complete in 4 ms, while the Debug build takes + // 100 ms. DLOG(INFO) << "light compute takes " << mtime << " ms"; #endif - if (!ret || !r.success) - { + if (!ret || !r.success) { LOG(ERROR) << "light cache creation error, try re-create it"; jobRepo->rebuildLightNonBlocking(sjob); return StratumStatus::INTERNAL_ERROR; @@ -441,14 +456,15 @@ int ServerEth::checkShareAndUpdateDiff(ShareEth &share, returnedMixHash = Ethash256ToUint256(r.mix_hash); uint256 shareTarget = Ethash256ToUint256(r.result); - - //can not compare two uint256 directly because uint256 is little endian and uses memcmp + + // can not compare two uint256 directly because uint256 is little endian and + // uses memcmp arith_uint256 bnShareTarget = UintToArith256(shareTarget); arith_uint256 bnNetworkTarget = UintToArith256(sjob->networkTarget_); - + DLOG(INFO) << "comapre share target: " << shareTarget.GetHex() << ", network target: " << sjob->networkTarget_.GetHex(); - + // print out high diff share, 2^10 = 1024 if ((bnShareTarget >> 10) <= bnNetworkTarget) { LOG(INFO) << "high diff share, share target: " << shareTarget.GetHex() @@ -464,51 +480,64 @@ int ServerEth::checkShareAndUpdateDiff(ShareEth &share, if (exJobPtr->isStale()) { LOG(INFO) << "stale solved share: " << share.toString(); return StratumStatus::SOLVED_STALE; - } - else { + } else { LOG(INFO) << "solved share: " << share.toString(); return StratumStatus::SOLVED; } - } // higher difficulty is prior for (auto itr = jobDiffs.rbegin(); itr != jobDiffs.rend(); itr++) { auto jobTarget = uint256S(Eth_DifficultyToTarget(*itr)); - DLOG(INFO) << "comapre share target: " << shareTarget.GetHex() << ", job target: " << jobTarget.GetHex(); + DLOG(INFO) << "comapre share target: " << shareTarget.GetHex() + << ", job target: " << jobTarget.GetHex(); if (isEnableSimulator_ || bnShareTarget <= UintToArith256(jobTarget)) { share.set_sharediff(*itr); - return exJobPtr->isStale() ? StratumStatus::ACCEPT_STALE : StratumStatus::ACCEPT; + return exJobPtr->isStale() ? StratumStatus::ACCEPT_STALE + : StratumStatus::ACCEPT; } } return StratumStatus::LOW_DIFFICULTY; } -void ServerEth::sendSolvedShare2Kafka(const string &strNonce, const string &strHeader, const string &strMix, - const uint32_t height, const uint64_t networkDiff, const StratumWorker &worker, - const EthConsensus::Chain chain) -{ - string msg = Strings::Format("{\"nonce\":\"%s\",\"header\":\"%s\",\"mix\":\"%s\"," - "\"height\":%lu,\"networkDiff\":%" PRIu64 ",\"userId\":%ld," - "\"workerId\":%" PRId64 ",\"workerFullName\":\"%s\"," - "\"chain\":\"%s\"}", - strNonce.c_str(), strHeader.c_str(), strMix.c_str(), - height, networkDiff, worker.userId_, - worker.workerHashId_, filterWorkerName(worker.fullName_).c_str(), - EthConsensus::getChainStr(chain).c_str()); +void ServerEth::sendSolvedShare2Kafka( + const string &strNonce, + const string &strHeader, + const string &strMix, + const uint32_t height, + const uint64_t networkDiff, + const StratumWorker &worker, + const EthConsensus::Chain chain) { + string msg = Strings::Format( + "{\"nonce\":\"%s\",\"header\":\"%s\",\"mix\":\"%s\"," + "\"height\":%lu,\"networkDiff\":%" PRIu64 + ",\"userId\":%ld," + "\"workerId\":%" PRId64 + ",\"workerFullName\":\"%s\"," + "\"chain\":\"%s\"}", + strNonce.c_str(), + strHeader.c_str(), + strMix.c_str(), + height, + networkDiff, + worker.userId_, + worker.workerHashId_, + filterWorkerName(worker.fullName_).c_str(), + EthConsensus::getChainStr(chain).c_str()); kafkaProducerSolvedShare_->produce(msg.c_str(), msg.length()); } -unique_ptr ServerEth::createConnection(struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) -{ +unique_ptr ServerEth::createConnection( + struct bufferevent *bev, struct sockaddr *saddr, uint32_t sessionID) { return boost::make_unique(*this, bev, saddr, sessionID); } -JobRepository *ServerEth::createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) -{ - return new JobRepositoryEth(kafkaBrokers, consumerTopic, fileLastNotifyTime, this); +JobRepository *ServerEth::createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) { + return new JobRepositoryEth( + kafkaBrokers, consumerTopic, fileLastNotifyTime, this); } diff --git a/src/eth/StratumServerEth.h b/src/eth/StratumServerEth.h index 21ffcdddf..4524728bc 100644 --- a/src/eth/StratumServerEth.h +++ b/src/eth/StratumServerEth.h @@ -32,39 +32,56 @@ class JobRepositoryEth; -class ServerEth : public ServerBase -{ +class ServerEth : public ServerBase { public: - ServerEth(const int32_t shareAvgSeconds) : ServerBase(shareAvgSeconds) {} - bool setupInternal(StratumServer* sserver) override; - int checkShareAndUpdateDiff(ShareEth &share, - const uint64_t jobId, - const uint64_t nonce, - const uint256 &header, - const std::set &jobDiffs, - uint256 &returnedMixHash, - const string &workFullName); - void sendSolvedShare2Kafka(const string& strNonce, const string& strHeader, const string& strMix, - const uint32_t height, const uint64_t networkDiff, const StratumWorker &worker, - const EthConsensus::Chain chain); - - JobRepository* createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) override; - - unique_ptr createConnection(struct bufferevent *bev, struct sockaddr *saddr, const uint32_t sessionID) override; + ServerEth(const int32_t shareAvgSeconds) + : ServerBase(shareAvgSeconds) {} + bool setupInternal(StratumServer *sserver) override; + int checkShareAndUpdateDiff( + ShareEth &share, + const uint64_t jobId, + const uint64_t nonce, + const uint256 &header, + const std::set &jobDiffs, + uint256 &returnedMixHash, + const string &workFullName); + void sendSolvedShare2Kafka( + const string &strNonce, + const string &strHeader, + const string &strMix, + const uint32_t height, + const uint64_t networkDiff, + const StratumWorker &worker, + const EthConsensus::Chain chain); + + JobRepository *createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) override; + + unique_ptr createConnection( + struct bufferevent *bev, + struct sockaddr *saddr, + const uint32_t sessionID) override; }; -class JobRepositoryEth : public JobRepositoryBase -{ +class JobRepositoryEth : public JobRepositoryBase { public: - JobRepositoryEth(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerEth *server); + JobRepositoryEth( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerEth *server); virtual ~JobRepositoryEth(); - bool compute(ethash_h256_t const header, uint64_t nonce, ethash_return_value_t& r); + bool + compute(ethash_h256_t const header, uint64_t nonce, ethash_return_value_t &r); - shared_ptr createStratumJob() override { return std::make_shared(); } - shared_ptr createStratumJobEx(shared_ptr sjob, bool isClean) override; + shared_ptr createStratumJob() override { + return std::make_shared(); + } + shared_ptr + createStratumJobEx(shared_ptr sjob, bool isClean) override; void broadcastStratumJob(shared_ptr sjob) override; // re-computing light when checking share failed. @@ -77,8 +94,8 @@ class JobRepositoryEth : public JobRepositoryBase // save ethash_light_t to file struct LightCacheHeader { uint64_t checkSum_; - uint64_t blockNumber_; - uint64_t cacheSize_; + uint64_t blockNumber_; + uint64_t cacheSize_; }; void newLightNonBlocking(shared_ptr job); @@ -88,16 +105,18 @@ class JobRepositoryEth : public JobRepositoryBase // Creating a new ethash_light_t (DAG cache) is so slow (in Debug build), // it may need more than 120 seconds for current Ethereum mainnet. - // So save it to a file before shutdown and load it back at next time + // So save it to a file before shutdown and load it back at next time // to reduce the computation time required after a reboot. - // - // Note: The performance difference between Debug and Release builds is very large. - // The Release build may complete in 5 s, while the Debug build takes more than 60 s. + // + // Note: The performance difference between Debug and Release builds is very + // large. The Release build may complete in 5 s, while the Debug build takes + // more than 60 s. void saveLightToFile(); void saveLightToFile(const ethash_light_t &light, std::ofstream &f); void loadLightFromFile(); ethash_light_t loadLightFromFile(std::ifstream &f); - uint64_t computeLightCacheCheckSum(const LightCacheHeader &header, const uint8_t *data); + uint64_t computeLightCacheCheckSum( + const LightCacheHeader &header, const uint8_t *data); ethash_light_t light_; ethash_light_t nextLight_; diff --git a/src/eth/StratumSessionEth.cc b/src/eth/StratumSessionEth.cc index 725f08b4a..070e29ad0 100644 --- a/src/eth/StratumSessionEth.cc +++ b/src/eth/StratumSessionEth.cc @@ -36,28 +36,33 @@ static string stripEthAddrFromFullName(const string &fullNameStr) { const size_t pos = fullNameStr.find('.'); // The Ethereum address is 42 bytes and starting with "0x" as normal // Example: 0x00d8c82Eb65124Ea3452CaC59B64aCC230AA3482 - if (pos == 42 && fullNameStr[0] == '0' && (fullNameStr[1] == 'x' || fullNameStr[1] == 'X')) { + if (pos == 42 && fullNameStr[0] == '0' && + (fullNameStr[1] == 'x' || fullNameStr[1] == 'X')) { return fullNameStr.substr(pos + 1); } return fullNameStr; } -StratumSessionEth::StratumSessionEth(ServerEth &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1) - : StratumSessionBase(server, bev, saddr, extraNonce1) - , ethProtocol_(StratumProtocolEth::ETHPROXY) - , nicehashLastSentDiff_(0) - , currentJobDiff_(0){ +StratumSessionEth::StratumSessionEth( + ServerEth &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1) + : StratumSessionBase(server, bev, saddr, extraNonce1) + , ethProtocol_(StratumProtocolEth::ETHPROXY) + , nicehashLastSentDiff_(0) + , currentJobDiff_(0) { } -void StratumSessionEth::sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) { - // Some ETH stratum variants have no set difficulty method, but change the target directly +void StratumSessionEth::sendSetDifficulty( + LocalJob &localJob, uint64_t difficulty) { + // Some ETH stratum variants have no set difficulty method, but change the + // target directly currentJobDiff_ = difficulty; } -void StratumSessionEth::sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) { +void StratumSessionEth::sendMiningNotify( + shared_ptr exJobPtr, bool isFirstJob) { if (StratumProtocolEth::ETHPROXY == ethProtocol_) { // AntMiner E3 need id to be 0, otherwise it will not be able to mine. // It does not actively call `eth_getWork` like other ETHProxy miners. @@ -67,7 +72,8 @@ void StratumSessionEth::sendMiningNotify(shared_ptr exJobPtr, bool } } -void StratumSessionEth::sendMiningNotifyWithId(shared_ptr exJobPtr, const string &idStr) { +void StratumSessionEth::sendMiningNotifyWithId( + shared_ptr exJobPtr, const string &idStr) { if (state_ < AUTHENTICATED || exJobPtr == nullptr) { LOG(ERROR) << "eth sendMiningNotify failed, state: " << state_; return; @@ -103,54 +109,56 @@ void StratumSessionEth::sendMiningNotifyWithId(shared_ptr exJobPtr // Miners will fills 0 after the prefix to 64 bits. uint32_t startNoncePrefix = extraNonce1_; - // Tips: NICEHASH_STRATUM use an extrNnonce, it is really an extraNonce (not startNonce) - // and is sent at the subscribe of the session. + // Tips: NICEHASH_STRATUM use an extrNnonce, it is really an extraNonce (not + // startNonce) and is sent at the subscribe of the session. - DLOG(INFO) << "new eth stratum job mining.notify: share difficulty=" << std::hex << currentJobDiff_ - << ", share target=" << strShareTarget << ", protocol=" << getProtocolString(ethProtocol_); + DLOG(INFO) << "new eth stratum job mining.notify: share difficulty=" + << std::hex << currentJobDiff_ + << ", share target=" << strShareTarget + << ", protocol=" << getProtocolString(ethProtocol_); string strNotify; switch (ethProtocol_) { case StratumProtocolEth::STRATUM: { - //Etherminer mining.notify + // Etherminer mining.notify //{"id":6,"method":"mining.notify","params": //["dd159c7ec5b056ad9e95e7c997829f667bc8e34c6d43fcb9e0c440ed94a85d80", //"dd159c7ec5b056ad9e95e7c997829f667bc8e34c6d43fcb9e0c440ed94a85d80", //"a8784097a4d03c2d2ac6a3a2beebd0606aa30a8536a700446b40800841c0162c", //"0000000112e0be826d694b2e62d01511f12a6061fbaec8bc02357593e70e52ba",false]} - strNotify = Strings::Format("{\"id\":%s,\"method\":\"mining.notify\"," - "\"params\":[\"%s\",\"%s\",\"%s\",\"%s\",%s]," - "\"height\":%lu}\n", - idStr.c_str(), - header.c_str(), - header.c_str(), - seed.c_str(), - strShareTarget.c_str(), - exJobPtr->isClean_ ? "true" : "false", - ethJob->height_); - } - break; + strNotify = Strings::Format( + "{\"id\":%s,\"method\":\"mining.notify\"," + "\"params\":[\"%s\",\"%s\",\"%s\",\"%s\",%s]," + "\"height\":%lu}\n", + idStr.c_str(), + header.c_str(), + header.c_str(), + seed.c_str(), + strShareTarget.c_str(), + exJobPtr->isClean_ ? "true" : "false", + ethJob->height_); + } break; case StratumProtocolEth::ETHPROXY: { - //Clymore eth_getWork + // Clymore eth_getWork //{"id":3,"jsonrpc":"2.0","result": //["0x599fffbc07777d4b6455c0e7ca479c9edbceef6c3fec956fecaaf4f2c727a492", //"0x1261dfe17d0bf58cb2861ae84734488b1463d282b7ee88ccfa18b7a92a7b77f7", //"0x0112e0be826d694b2e62d01511f12a6061fbaec8bc02357593e70e52ba","0x4ec6f5"]} - strNotify = Strings::Format("{\"id\":%s,\"jsonrpc\":\"2.0\"," - "\"result\":[\"0x%s\",\"0x%s\",\"0x%s\"," - // nonce cannot start with 0x because of - // a compatibility issue with AntMiner E3. - "\"%06x\"]," - "\"height\":%lu}\n", - idStr.c_str(), - header.c_str(), - seed.c_str(), - //Claymore use 58 bytes target - strShareTarget.substr(6, 58).c_str(), - startNoncePrefix, - ethJob->height_); - } - break; + strNotify = Strings::Format( + "{\"id\":%s,\"jsonrpc\":\"2.0\"," + "\"result\":[\"0x%s\",\"0x%s\",\"0x%s\"," + // nonce cannot start with 0x because of + // a compatibility issue with AntMiner E3. + "\"%06x\"]," + "\"height\":%lu}\n", + idStr.c_str(), + header.c_str(), + seed.c_str(), + // Claymore use 58 bytes target + strShareTarget.substr(6, 58).c_str(), + startNoncePrefix, + ethJob->height_); + } break; case StratumProtocolEth::NICEHASH_STRATUM: { // send new difficulty if (currentJobDiff_ != nicehashLastSentDiff_) { @@ -159,8 +167,11 @@ void StratumSessionEth::sendMiningNotifyWithId(shared_ptr exJobPtr // "method": "mining.set_difficulty", // "params": [ 0.5 ] // } - strNotify += Strings::Format("{\"id\":%s,\"method\":\"mining.set_difficulty\"," - "\"params\":[%lf]}\n", idStr.c_str(), Eth_DiffToNicehashDiff(currentJobDiff_)); + strNotify += Strings::Format( + "{\"id\":%s,\"method\":\"mining.set_difficulty\"," + "\"params\":[%lf]}\n", + idStr.c_str(), + Eth_DiffToNicehashDiff(currentJobDiff_)); nicehashLastSentDiff_ = currentJobDiff_; } @@ -173,17 +184,17 @@ void StratumSessionEth::sendMiningNotifyWithId(shared_ptr exJobPtr // "645cf20198c2f3861e947d4f67e3ab63b7b2e24dcc9095bd9123e7b33371f6cc", // true // ]} - strNotify += Strings::Format("{\"id\":%s,\"method\":\"mining.notify\"," - "\"params\":[\"%s\",\"%s\",\"%s\",%s]," - "\"height\":%lu}\n", - idStr.c_str(), - header.c_str(), - seed.c_str(), - header.c_str(), - exJobPtr->isClean_ ? "true" : "false", - ethJob->height_); - } - break; + strNotify += Strings::Format( + "{\"id\":%s,\"method\":\"mining.notify\"," + "\"params\":[\"%s\",\"%s\",\"%s\",%s]," + "\"height\":%lu}\n", + idStr.c_str(), + header.c_str(), + seed.c_str(), + header.c_str(), + exJobPtr->isClean_ ? "true" : "false", + ethJob->height_); + } break; } DLOG(INFO) << strNotify; @@ -197,24 +208,22 @@ void StratumSessionEth::sendMiningNotifyWithId(shared_ptr exJobPtr clearLocalJobs(); } -void StratumSessionEth::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionEth::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "mining.subscribe") { handleRequest_Subscribe(idStr, jparams, jroot); - } - else if (method == "mining.authorize" || method == "eth_submitLogin") { + } else if (method == "mining.authorize" || method == "eth_submitLogin") { handleRequest_Authorize(idStr, jparams, jroot); - } - else if (dispatcher_) { + } else { dispatcher_->handleRequest(idStr, method, jparams, jroot); } } -void StratumSessionEth::handleRequest_Subscribe(const string &idStr, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionEth::handleRequest_Subscribe( + const string &idStr, const JsonNode &jparams, const JsonNode &jroot) { if (state_ != CONNECTED) { responseError(idStr, StratumStatus::UNKNOWN); @@ -224,44 +233,48 @@ void StratumSessionEth::handleRequest_Subscribe(const string &idStr, auto params = jparams.children(); if (params->size() >= 1) { - setClientAgent(params->at(0).str().substr(0, 30)); // 30 is max len + setClientAgent(params->at(0).str().substr(0, 30)); // 30 is max len } string protocolStr; if (params->size() >= 2) { protocolStr = params->at(1).str(); // tolower - std::transform(protocolStr.begin(), protocolStr.end(), protocolStr.begin(), ::tolower); + std::transform( + protocolStr.begin(), protocolStr.end(), protocolStr.begin(), ::tolower); } - // session id and miner ip need to pass within params if working with stratum switcher + // session id and miner ip need to pass within params if working with stratum + // switcher #ifdef WORK_WITH_STRATUM_SWITCHER // params[0] = client version [require] - // params[1] = protocol version [require, can be empty] - // params[2] = session id / ExtraNonce1 [require] - // params[3] = miner's real IP (unit32) [optional] - - if (params->size() < 3) { - responseError(idStr, StratumStatus::CLIENT_IS_NOT_SWITCHER); - LOG(ERROR) << "A non-switcher subscribe request is detected and rejected."; - LOG(ERROR) << "Cmake option POOL__WORK_WITH_STRATUM_SWITCHER enabled, you can only connect to the sserver via a stratum switcher."; - return; - } + // params[1] = protocol version [require, can be empty] + // params[2] = session id / ExtraNonce1 [require] + // params[3] = miner's real IP (unit32) [optional] + + if (params->size() < 3) { + responseError(idStr, StratumStatus::CLIENT_IS_NOT_SWITCHER); + LOG(ERROR) << "A non-switcher subscribe request is detected and rejected."; + LOG(ERROR) << "Cmake option POOL__WORK_WITH_STRATUM_SWITCHER enabled, you " + "can only connect to the sserver via a stratum switcher."; + return; + } - string extraNonce1Str = params->at(2).str().substr(0, 8); // 8 is max len - sscanf(extraNonce1Str.c_str(), "%x", &extraNonce1_); // convert hex to int + string extraNonce1Str = params->at(2).str().substr(0, 8); // 8 is max len + sscanf(extraNonce1Str.c_str(), "%x", &extraNonce1_); // convert hex to int - // receive miner's IP from stratumSwitcher - if (params->size() >= 4) { - clientIpInt_ = htonl(params->at(3).uint32()); + // receive miner's IP from stratumSwitcher + if (params->size() >= 4) { + clientIpInt_ = htonl(params->at(3).uint32()); - // ipv4 - clientIp_.resize(INET_ADDRSTRLEN); - struct in_addr addr; - addr.s_addr = clientIpInt_; - clientIp_ = inet_ntop(AF_INET, &addr, (char *)clientIp_.data(), (socklen_t)clientIp_.size()); - LOG(INFO) << "client real IP: " << clientIp_; - } + // ipv4 + clientIp_.resize(INET_ADDRSTRLEN); + struct in_addr addr; + addr.s_addr = clientIpInt_; + clientIp_ = inet_ntop( + AF_INET, &addr, (char *)clientIp_.data(), (socklen_t)clientIp_.size()); + LOG(INFO) << "client real IP: " << clientIp_; + } #endif // WORK_WITH_STRATUM_SWITCHER if (protocolStr.substr(0, 16) == "ethereumstratum/") { @@ -286,19 +299,22 @@ void StratumSessionEth::handleRequest_Subscribe(const string &idStr, // ], // "error": null // } - const string s = Strings::Format("{\"id\":%s,\"result\":[[" - "\"mining.notify\"," - "\"%06x\"," - "\"EthereumStratum/1.0.0\"" - "],\"%s\"],\"error\":null}\n", - idStr.c_str(), extraNonce1_, noncePrefix.c_str()); + const string s = Strings::Format( + "{\"id\":%s,\"result\":[[" + "\"mining.notify\"," + "\"%06x\"," + "\"EthereumStratum/1.0.0\"" + "],\"%s\"],\"error\":null}\n", + idStr.c_str(), + extraNonce1_, + noncePrefix.c_str()); sendData(s); } #ifdef WORK_WITH_STRATUM_SWITCHER - else if (protocolStr.substr(0, 9) == "ethproxy/") { + else if (protocolStr.substr(0, 9) == "ethproxy/") { // required for stratum switcher - // Because ethproxy has no subscribe phase, switcher has no chance to set session id. - // So deliberately added a subscribe phase of ethproxy here. + // Because ethproxy has no subscribe phase, switcher has no chance to set + // session id. So deliberately added a subscribe phase of ethproxy here. ethProtocol_ = StratumProtocolEth::ETHPROXY; responseTrue(idStr); } @@ -311,16 +327,17 @@ void StratumSessionEth::handleRequest_Subscribe(const string &idStr, state_ = SUBSCRIBED; } -void StratumSessionEth::handleRequest_Authorize(const string &idStr, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionEth::handleRequest_Authorize( + const string &idStr, const JsonNode &jparams, const JsonNode &jroot) { // const type cannot access string indexed object member JsonNode &jsonRoot = const_cast(jroot); #ifndef WORK_WITH_STRATUM_SWITCHER - if (StratumProtocolEth::ETHPROXY == ethProtocol_ && jsonRoot["method"].str() == "eth_submitLogin") { + if (StratumProtocolEth::ETHPROXY == ethProtocol_ && + jsonRoot["method"].str() == "eth_submitLogin") { // Subscribe is not required for ETHPROXY (without stratum switcher). - // But if WORK_WITH_STRATUM_SWITCHER enabled, subscribe for ETHProxy is required. + // But if WORK_WITH_STRATUM_SWITCHER enabled, subscribe for ETHProxy is + // required. state_ = SUBSCRIBED; } #endif @@ -330,10 +347,15 @@ void StratumSessionEth::handleRequest_Authorize(const string &idStr, return; } - // STRATUM / NICEHASH_STRATUM: {"id":3, "method":"mining.authorize", "params":["test.aaa", "x"]} - // ETH_PROXY (Claymore): {"worker": "eth1.0", "jsonrpc": "2.0", "params": ["0x00d8c82Eb65124Ea3452CaC59B64aCC230AA3482.test.aaa", "x"], "id": 2, "method": "eth_submitLogin"} - // ETH_PROXY (EthMiner, situation 1): {"id":1, "method":"eth_submitLogin", "params":["0x00d8c82Eb65124Ea3452CaC59B64aCC230AA3482"], "worker":"test.aaa"} - // ETH_PROXY (EthMiner, situation 2): {"id":1, "method":"eth_submitLogin", "params":["test"], "worker":"aaa"} + // STRATUM / NICEHASH_STRATUM: {"id":3, "method":"mining.authorize", + // "params":["test.aaa", "x"]} ETH_PROXY (Claymore): {"worker": + // "eth1.0", "jsonrpc": "2.0", "params": + // ["0x00d8c82Eb65124Ea3452CaC59B64aCC230AA3482.test.aaa", "x"], "id": 2, + // "method": "eth_submitLogin"} ETH_PROXY (EthMiner, situation 1): {"id":1, + // "method":"eth_submitLogin", + // "params":["0x00d8c82Eb65124Ea3452CaC59B64aCC230AA3482"], + // "worker":"test.aaa"} ETH_PROXY (EthMiner, situation 2): {"id":1, + // "method":"eth_submitLogin", "params":["test"], "worker":"aaa"} if (jparams.children()->size() < 1) { responseError(idStr, StratumStatus::INVALID_USERNAME); @@ -352,18 +374,20 @@ void StratumSessionEth::handleRequest_Authorize(const string &idStr, if (jparams.children()->size() > 1) { password = jparams.children()->at(1).str(); } - + checkUserAndPwd(idStr, fullName, password); return; } -unique_ptr StratumSessionEth::createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) { - return boost::make_unique(*this, - *getServer().defaultDifficultyController_, - clientAgent, - workerName, - workerId, - ethProtocol_); +unique_ptr StratumSessionEth::createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { + return boost::make_unique( + *this, + *getServer().defaultDifficultyController_, + clientAgent, + workerName, + workerId, + ethProtocol_); } diff --git a/src/eth/StratumSessionEth.h b/src/eth/StratumSessionEth.h index 9f196c239..209409cea 100644 --- a/src/eth/StratumSessionEth.h +++ b/src/eth/StratumSessionEth.h @@ -30,33 +30,40 @@ class StratumSessionEth : public StratumSessionBase { public: - StratumSessionEth(ServerEth &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1); + StratumSessionEth( + ServerEth &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1); void sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) override; - void sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; - void sendMiningNotifyWithId(shared_ptr exJobPtr, const string &idStr); + void + sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; + void sendMiningNotifyWithId( + shared_ptr exJobPtr, const string &idStr); protected: - void handleRequest(const std::string &idStr, const std::string &method, - const JsonNode &jparams, const JsonNode &jroot) override; - void handleRequest_Subscribe(const std::string &idStr, - const JsonNode &jparams, - const JsonNode &jroot); - void handleRequest_Authorize(const std::string &idStr, - const JsonNode &jparams, - const JsonNode &jroot); + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; + void handleRequest_Subscribe( + const std::string &idStr, const JsonNode &jparams, const JsonNode &jroot); + void handleRequest_Authorize( + const std::string &idStr, const JsonNode &jparams, const JsonNode &jroot); + public: - std::unique_ptr createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) override; + std::unique_ptr createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) override; private: StratumProtocolEth ethProtocol_; - // Record the difficulty of the last time sent to the miner in NICEHASH_STRATUM protocol. + // Record the difficulty of the last time sent to the miner in + // NICEHASH_STRATUM protocol. uint64_t nicehashLastSentDiff_; uint64_t currentJobDiff_; }; -#endif // #ifndef STRATUM_SESSION_ETH_H_ +#endif // #ifndef STRATUM_SESSION_ETH_H_ diff --git a/src/gbtmaker/GbtMakerMain.cc b/src/gbtmaker/GbtMakerMain.cc index 90b15eef5..f68fec674 100644 --- a/src/gbtmaker/GbtMakerMain.cc +++ b/src/gbtmaker/GbtMakerMain.cc @@ -53,12 +53,13 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("gbtmaker")); - fprintf(stderr, "Usage:\tgbtmaker -c \"gbtmaker.cfg\" [-l ]\n"); + fprintf( + stderr, "Usage:\tgbtmaker -c \"gbtmaker.cfg\" [-l ]\n"); } int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -67,15 +68,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -88,25 +90,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("gbtmaker"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -118,18 +119,20 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); bool isCheckZmq = true; cfg.lookupValue("gbtmaker.is_check_zmq", isCheckZmq); int32_t rpcCallInterval = 5; cfg.lookupValue("gbtmaker.rpcinterval", rpcCallInterval); - gGbtMaker = new GbtMaker(cfg.lookup("bitcoind.zmq_addr"), - cfg.lookup("bitcoind.rpc_addr"), - cfg.lookup("bitcoind.rpc_userpwd"), - cfg.lookup("kafka.brokers"), - cfg.lookup("gbtmaker.rawgbt_topic"), - rpcCallInterval, isCheckZmq); + gGbtMaker = new GbtMaker( + cfg.lookup("bitcoind.zmq_addr"), + cfg.lookup("bitcoind.rpc_addr"), + cfg.lookup("bitcoind.rpc_userpwd"), + cfg.lookup("kafka.brokers"), + cfg.lookup("gbtmaker.rawgbt_topic"), + rpcCallInterval, + isCheckZmq); try { if (!gGbtMaker->init()) { @@ -138,12 +141,9 @@ int main(int argc, char **argv) { #ifdef CHAIN_TYPE_BCH bool runLightGbt = false; cfg.lookupValue("gbtmaker.lightgbt", runLightGbt); - if(runLightGbt) - { + if (runLightGbt) { gGbtMaker->runLightGbt(); - } - else - { + } else { gGbtMaker->run(); } #else @@ -151,7 +151,7 @@ int main(int argc, char **argv) { #endif } delete gGbtMaker; - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/gwmaker/GwMakerMain.cc b/src/gwmaker/GwMakerMain.cc index 204b58aea..e41fa58da 100644 --- a/src/gwmaker/GwMakerMain.cc +++ b/src/gwmaker/GwMakerMain.cc @@ -49,11 +49,10 @@ using namespace std; using namespace libconfig; - static vector> gGwMakers; void handler(int sig) { - for (auto gwMaker: gGwMakers) { + for (auto gwMaker : gGwMakers) { if (gwMaker) gwMaker->stop(); } @@ -67,7 +66,7 @@ void usage() { shared_ptr createGwMakerHandler(const GwMakerDefinition &def) { shared_ptr handler; - if (def.chainType_ == "ETH") + if (def.chainType_ == "ETH") handler = make_shared(); else if (def.chainType_ == "SIA") handler = make_shared(); @@ -85,12 +84,11 @@ shared_ptr createGwMakerHandler(const GwMakerDefinition &def) { return handler; } -GwMakerDefinition createGwMakerDefinition(const Setting &setting) -{ +GwMakerDefinition createGwMakerDefinition(const Setting &setting) { GwMakerDefinition def; - readFromSetting(setting, "chain_type", def.chainType_); - readFromSetting(setting, "rpc_addr", def.rpcAddr_); + readFromSetting(setting, "chain_type", def.chainType_); + readFromSetting(setting, "rpc_addr", def.rpcAddr_); readFromSetting(setting, "rpc_userpwd", def.rpcUserPwd_); readFromSetting(setting, "rawgw_topic", def.rawGwTopic_); readFromSetting(setting, "rpc_interval", def.rpcInterval_); @@ -103,21 +101,24 @@ GwMakerDefinition createGwMakerDefinition(const Setting &setting) return def; } -void createGwMakers(const libconfig::Config &cfg, const string &brokers, vector> &makers) -{ +void createGwMakers( + const libconfig::Config &cfg, + const string &brokers, + vector> &makers) { const Setting &root = cfg.getRoot(); const Setting &workerDefs = root["gw_workers"]; - for (int i = 0; i < workerDefs.getLength(); i++) - { + for (int i = 0; i < workerDefs.getLength(); i++) { GwMakerDefinition def = createGwMakerDefinition(workerDefs[i]); if (!def.enabled_) { - LOG(INFO) << "chain: " << def.chainType_ << ", topic: " << def.rawGwTopic_ << ", disabled."; + LOG(INFO) << "chain: " << def.chainType_ << ", topic: " << def.rawGwTopic_ + << ", disabled."; continue; } - - LOG(INFO) << "chain: " << def.chainType_ << ", topic: " << def.rawGwTopic_ << ", enabled."; + + LOG(INFO) << "chain: " << def.chainType_ << ", topic: " << def.rawGwTopic_ + << ", enabled."; auto handle = createGwMakerHandler(def); makers.push_back(std::make_shared(handle, brokers)); @@ -131,7 +132,7 @@ void workerThread(shared_ptr gwMaker) { int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -140,15 +141,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -161,25 +163,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("gwmaker"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -191,10 +192,9 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); - try - { + try { vector> workers; string brokers = cfg.lookup("kafka.brokers"); @@ -207,13 +207,12 @@ int main(int argc, char **argv) { createGwMakers(cfg, brokers, gGwMakers); // init & run GwMaker - for (auto gwMaker : gGwMakers) - { + for (auto gwMaker : gGwMakers) { if (gwMaker->init()) { workers.push_back(std::make_shared(workerThread, gwMaker)); - } - else { - LOG(FATAL) << "gwmaker init failure, chain: " << gwMaker->getChainType() << ", topic: " << gwMaker->getRawGwTopic(); + } else { + LOG(FATAL) << "gwmaker init failure, chain: " << gwMaker->getChainType() + << ", topic: " << gwMaker->getRawGwTopic(); } } @@ -225,10 +224,8 @@ int main(int argc, char **argv) { LOG(INFO) << "worker exit"; } } - - } - catch (std::exception &e) - { + + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); } diff --git a/src/jobmaker/JobMakerMain.cc b/src/jobmaker/JobMakerMain.cc index e6b91eb8a..e7797d56d 100644 --- a/src/jobmaker/JobMakerMain.cc +++ b/src/jobmaker/JobMakerMain.cc @@ -55,7 +55,7 @@ using namespace libconfig; static vector> gJobMakers; void handler(int sig) { - for (auto jobMaker: gJobMakers) { + for (auto jobMaker : gJobMakers) { if (jobMaker) jobMaker->stop(); } @@ -63,21 +63,21 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("jobmaker")); - fprintf(stderr, "Usage:\tjobmaker -c \"jobmaker.cfg\" [-l ]\n"); + fprintf( + stderr, "Usage:\tjobmaker -c \"jobmaker.cfg\" [-l ]\n"); } -bool isGwChain(const string &chainType) -{ - return ("ETH" == chainType || - "SIA" == chainType || - "BTM" == chainType || - "DCR" == chainType); +bool isGwChain(const string &chainType) { + return ( + "ETH" == chainType || "SIA" == chainType || "BTM" == chainType || + "DCR" == chainType); } -shared_ptr createGwJobMakerHandler(shared_ptr def) { +shared_ptr +createGwJobMakerHandler(shared_ptr def) { shared_ptr handler; - if (def->chainType_ == "ETH") + if (def->chainType_ == "ETH") handler = make_shared(); else if (def->chainType_ == "SIA") handler = make_shared(); @@ -93,7 +93,8 @@ shared_ptr createGwJobMakerHandler(shared_ptr createGbtJobMakerHandler(shared_ptr def) { +shared_ptr +createGbtJobMakerHandler(shared_ptr def) { shared_ptr handler; if (def->chainType_ == CHAIN_TYPE_STR) @@ -106,8 +107,8 @@ shared_ptr createGbtJobMakerHandler(shared_ptr createGwJobMakerDefinition(const Setting &setting) -{ +shared_ptr +createGwJobMakerDefinition(const Setting &setting) { shared_ptr def; string chainType; @@ -126,22 +127,21 @@ shared_ptr createGwJobMakerDefinition(const Setting &setti auto defEth = make_shared(); defEth->chain_ = chain; def = defEth; - } - else { + } else { def = make_shared(); } def->chainType_ = chainType; - readFromSetting(setting, "rawgw_topic", def->rawGwTopic_); - readFromSetting(setting, "job_topic", def->jobTopic_); + readFromSetting(setting, "rawgw_topic", def->rawGwTopic_); + readFromSetting(setting, "job_topic", def->jobTopic_); - readFromSetting(setting, "job_interval", def->jobInterval_); - readFromSetting(setting, "max_job_delay", def->maxJobDelay_); - readFromSetting(setting, "work_life_time", def->workLifeTime_); + readFromSetting(setting, "job_interval", def->jobInterval_); + readFromSetting(setting, "max_job_delay", def->maxJobDelay_); + readFromSetting(setting, "work_life_time", def->workLifeTime_); readFromSetting(setting, "zookeeper_lock_path", def->zookeeperLockPath_); - readFromSetting(setting, "file_last_job_time", def->fileLastJobTime_, true); + readFromSetting(setting, "file_last_job_time", def->fileLastJobTime_, true); readFromSetting(setting, "id", def->serverId_); def->enabled_ = false; @@ -150,32 +150,33 @@ shared_ptr createGwJobMakerDefinition(const Setting &setti return def; } -shared_ptr createGbtJobMakerDefinition(const Setting &setting) -{ +shared_ptr +createGbtJobMakerDefinition(const Setting &setting) { shared_ptr def = make_shared(); - readFromSetting(setting, "chain_type", def->chainType_); - readFromSetting(setting, "testnet", def->testnet_); + readFromSetting(setting, "chain_type", def->chainType_); + readFromSetting(setting, "testnet", def->testnet_); - readFromSetting(setting, "payout_address", def->payoutAddr_); - readFromSetting(setting, "coinbase_info", def->coinbaseInfo_); - readFromSetting(setting, "block_version", def->blockVersion_); + readFromSetting(setting, "payout_address", def->payoutAddr_); + readFromSetting(setting, "coinbase_info", def->coinbaseInfo_); + readFromSetting(setting, "block_version", def->blockVersion_); - readFromSetting(setting, "rawgbt_topic", def->rawGbtTopic_); - readFromSetting(setting, "auxpow_gw_topic", def->auxPowGwTopic_); - readFromSetting(setting, "rsk_rawgw_topic", def->rskRawGwTopic_); - readFromSetting(setting, "job_topic", def->jobTopic_); + readFromSetting(setting, "rawgbt_topic", def->rawGbtTopic_); + readFromSetting(setting, "auxpow_gw_topic", def->auxPowGwTopic_); + readFromSetting(setting, "rsk_rawgw_topic", def->rskRawGwTopic_); + readFromSetting(setting, "job_topic", def->jobTopic_); - readFromSetting(setting, "job_interval", def->jobInterval_); - readFromSetting(setting, "max_job_delay", def->maxJobDelay_); - readFromSetting(setting, "gbt_life_time", def->gbtLifeTime_); + readFromSetting(setting, "job_interval", def->jobInterval_); + readFromSetting(setting, "max_job_delay", def->maxJobDelay_); + readFromSetting(setting, "gbt_life_time", def->gbtLifeTime_); readFromSetting(setting, "empty_gbt_life_time", def->emptyGbtLifeTime_); def->mergedMiningNotifyPolicy_ = 1; - readFromSetting(setting, "merged_mining_notify", def->mergedMiningNotifyPolicy_, true); + readFromSetting( + setting, "merged_mining_notify", def->mergedMiningNotifyPolicy_, true); readFromSetting(setting, "zookeeper_lock_path", def->zookeeperLockPath_); - readFromSetting(setting, "file_last_job_time", def->fileLastJobTime_, true); + readFromSetting(setting, "file_last_job_time", def->fileLastJobTime_, true); readFromSetting(setting, "id", def->serverId_); def->enabled_ = false; @@ -184,43 +185,48 @@ shared_ptr createGbtJobMakerDefinition(const Setting &set return def; } -void createJobMakers(const libconfig::Config &cfg, const string &kafkaBrokers, const string &zkBrokers, vector> &makers) -{ +void createJobMakers( + const libconfig::Config &cfg, + const string &kafkaBrokers, + const string &zkBrokers, + vector> &makers) { const Setting &root = cfg.getRoot(); const Setting &workerDefs = root["job_workers"]; - for (int i = 0; i < workerDefs.getLength(); i++) - { + for (int i = 0; i < workerDefs.getLength(); i++) { string chainType; readFromSetting(workerDefs[i], "chain_type", chainType); - - if (isGwChain(chainType)) - { + + if (isGwChain(chainType)) { auto def = createGwJobMakerDefinition(workerDefs[i]); if (!def->enabled_) { - LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->jobTopic_ << ", disabled."; + LOG(INFO) << "chain: " << def->chainType_ + << ", topic: " << def->jobTopic_ << ", disabled."; continue; } - - LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->jobTopic_ << ", enabled."; + + LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->jobTopic_ + << ", enabled."; auto handle = createGwJobMakerHandler(def); - makers.push_back(std::make_shared(handle, kafkaBrokers, zkBrokers)); - } - else - { + makers.push_back( + std::make_shared(handle, kafkaBrokers, zkBrokers)); + } else { auto def = createGbtJobMakerDefinition(workerDefs[i]); if (!def->enabled_) { - LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->jobTopic_ << ", disabled."; + LOG(INFO) << "chain: " << def->chainType_ + << ", topic: " << def->jobTopic_ << ", disabled."; continue; } - - LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->jobTopic_ << ", enabled."; + + LOG(INFO) << "chain: " << def->chainType_ << ", topic: " << def->jobTopic_ + << ", enabled."; auto handle = createGbtJobMakerHandler(def); - makers.push_back(std::make_shared(handle, kafkaBrokers, zkBrokers)); + makers.push_back( + std::make_shared(handle, kafkaBrokers, zkBrokers)); } } } @@ -235,7 +241,7 @@ void workerThread(shared_ptr jobmaker) { int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -244,15 +250,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -265,25 +272,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("jobmaker"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -295,7 +301,7 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); try { vector> workers; @@ -309,8 +315,7 @@ int main(int argc, char **argv) { createJobMakers(cfg, kafkaBrokers, zkBrokers, gJobMakers); // init & run JobMaker - for (auto jobmaker : gJobMakers) - { + for (auto jobmaker : gJobMakers) { workers.push_back(std::make_shared(workerThread, jobmaker)); } @@ -323,8 +328,7 @@ int main(int argc, char **argv) { } } - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/nmcauxmaker/NMCAuxBlockMakerMain.cc b/src/nmcauxmaker/NMCAuxBlockMakerMain.cc index 16a4c1c0a..8d79dc5f5 100644 --- a/src/nmcauxmaker/NMCAuxBlockMakerMain.cc +++ b/src/nmcauxmaker/NMCAuxBlockMakerMain.cc @@ -53,12 +53,14 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("nmcauxmaker")); - fprintf(stderr, "Usage:\tnmcauxmaker -c \"nmcauxmaker.cfg\" [-l ]\n"); + fprintf( + stderr, + "Usage:\tnmcauxmaker -c \"nmcauxmaker.cfg\" [-l ]\n"); } int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -67,15 +69,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -88,25 +91,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("nmcauxmaker"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -118,7 +120,7 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); bool isCheckZmq = true; cfg.lookupValue("nmcauxmaker.is_check_zmq", isCheckZmq); @@ -129,13 +131,16 @@ int main(int argc, char **argv) { string coinbaseAddress; cfg.lookupValue("nmcauxmaker.payout_address", coinbaseAddress); - gNMCAuxBlockMaker = new NMCAuxBlockMaker(cfg.lookup("namecoind.zmq_addr"), - cfg.lookup("namecoind.rpc_addr"), - cfg.lookup("namecoind.rpc_userpwd"), - cfg.lookup("kafka.brokers"), - cfg.lookup("nmcauxmaker.auxpow_gw_topic"), - rpcCallInterval, fileLastRpcCallTime, - isCheckZmq, coinbaseAddress); + gNMCAuxBlockMaker = new NMCAuxBlockMaker( + cfg.lookup("namecoind.zmq_addr"), + cfg.lookup("namecoind.rpc_addr"), + cfg.lookup("namecoind.rpc_userpwd"), + cfg.lookup("kafka.brokers"), + cfg.lookup("nmcauxmaker.auxpow_gw_topic"), + rpcCallInterval, + fileLastRpcCallTime, + isCheckZmq, + coinbaseAddress); try { if (!gNMCAuxBlockMaker->init()) { @@ -144,7 +149,7 @@ int main(int argc, char **argv) { gNMCAuxBlockMaker->run(); } delete gNMCAuxBlockMaker; - } catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/poolwatcher/PoolWatcherMain.cc b/src/poolwatcher/PoolWatcherMain.cc index 80f30597a..8d2ff39a5 100644 --- a/src/poolwatcher/PoolWatcherMain.cc +++ b/src/poolwatcher/PoolWatcherMain.cc @@ -55,12 +55,14 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("poolwatcher")); - fprintf(stderr, "Usage:\tpoolwatcher -c \"poolwatcher.cfg\" [-l ]\n"); + fprintf( + stderr, + "Usage:\tpoolwatcher -c \"poolwatcher.cfg\" [-l ]\n"); } int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -69,15 +71,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -90,25 +93,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("poolwatcher"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -120,7 +122,7 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); // check if we are using testnet3 bool isTestnet3 = true; @@ -135,10 +137,11 @@ int main(int argc, char **argv) { bool disableChecking = false; cfg.lookupValue("poolwatcher.disable_checking", disableChecking); - gClientContainer = new ClientContainerBitcoin(cfg.lookup("kafka.brokers"), - cfg.lookup("poolwatcher.job_topic"), - cfg.lookup("poolwatcher.rawgbt_topic"), - disableChecking); + gClientContainer = new ClientContainerBitcoin( + cfg.lookup("kafka.brokers"), + cfg.lookup("poolwatcher.job_topic"), + cfg.lookup("poolwatcher.rawgbt_topic"), + disableChecking); // add pools { @@ -164,8 +167,7 @@ int main(int argc, char **argv) { gClientContainer->run(); } delete gClientContainer; - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/rsk/GwMakerRsk.cc b/src/rsk/GwMakerRsk.cc index 9db5bbc8b..198d46cf8 100644 --- a/src/rsk/GwMakerRsk.cc +++ b/src/rsk/GwMakerRsk.cc @@ -28,7 +28,7 @@ @author Martin Medina @copyright RSK Labs Ltd. - @version 1.0 30/03/17 + @version 1.0 30/03/17 maintained by YihaoPeng since Feb 20, 2018 */ @@ -40,13 +40,13 @@ ///////////////////////////////GwMakerHandlerRsk//////////////////////////////////// bool GwMakerHandlerRsk::checkFields(JsonNode &r) { - if (r["result"].type() != Utilities::JS::type::Obj || - r["result"]["parentBlockHash"].type() != Utilities::JS::type::Str || - r["result"]["blockHashForMergedMining"].type() != Utilities::JS::type::Str || - r["result"]["target"].type() != Utilities::JS::type::Str || - r["result"]["feesPaidToMiner"].type() != Utilities::JS::type::Str || - r["result"]["notify"].type() != Utilities::JS::type::Bool) - { + if (r["result"].type() != Utilities::JS::type::Obj || + r["result"]["parentBlockHash"].type() != Utilities::JS::type::Str || + r["result"]["blockHashForMergedMining"].type() != + Utilities::JS::type::Str || + r["result"]["target"].type() != Utilities::JS::type::Str || + r["result"]["feesPaidToMiner"].type() != Utilities::JS::type::Str || + r["result"]["notify"].type() != Utilities::JS::type::Bool) { return false; } @@ -56,28 +56,30 @@ bool GwMakerHandlerRsk::checkFields(JsonNode &r) { string GwMakerHandlerRsk::constructRawMsg(JsonNode &r) { LOG(INFO) << "chain: " << def_.chainType_ << ", topic: " << def_.rawGwTopic_ - << ", parent block hash: " << r["result"]["parentBlockHash"].str() - << ", block hash for merge mining: " << r["result"]["blockHashForMergedMining"].str() - << ", target: " << r["result"]["target"].str() - << ", fees paid to miner: " << r["result"]["feesPaidToMiner"].str() - << ", notify: " << r["result"]["notify"].boolean(); + << ", parent block hash: " << r["result"]["parentBlockHash"].str() + << ", block hash for merge mining: " + << r["result"]["blockHashForMergedMining"].str() + << ", target: " << r["result"]["target"].str() + << ", fees paid to miner: " << r["result"]["feesPaidToMiner"].str() + << ", notify: " << r["result"]["notify"].boolean(); - return Strings::Format("{\"created_at_ts\":%u," - "\"chainType\":\"%s\"," - "\"rpcAddress\":\"%s\"," - "\"rpcUserPwd\":\"%s\"," - "\"target\":\"%s\"," - "\"parentBlockHash\":\"%s\"," - "\"blockHashForMergedMining\":\"%s\"," - "\"feesPaidToMiner\":\"%s\"," - "\"notify\":\"%s\"}", - (uint32_t)time(nullptr), - def_.chainType_.c_str(), - def_.rpcAddr_.c_str(), - def_.rpcUserPwd_.c_str(), - r["result"]["target"].str().c_str(), - r["result"]["parentBlockHash"].str().c_str(), - r["result"]["blockHashForMergedMining"].str().c_str(), - r["result"]["feesPaidToMiner"].str().c_str(), - r["result"]["notify"].boolean() ? "true" : "false"); + return Strings::Format( + "{\"created_at_ts\":%u," + "\"chainType\":\"%s\"," + "\"rskdRpcAddress\":\"%s\"," + "\"rskdRpcUserPwd\":\"%s\"," + "\"target\":\"%s\"," + "\"parentBlockHash\":\"%s\"," + "\"blockHashForMergedMining\":\"%s\"," + "\"feesPaidToMiner\":\"%s\"," + "\"notify\":\"%s\"}", + (uint32_t)time(nullptr), + def_.chainType_.c_str(), + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + r["result"]["target"].str().c_str(), + r["result"]["parentBlockHash"].str().c_str(), + r["result"]["blockHashForMergedMining"].str().c_str(), + r["result"]["feesPaidToMiner"].str().c_str(), + r["result"]["notify"].boolean() ? "true" : "false"); } diff --git a/src/rsk/GwMakerRsk.h b/src/rsk/GwMakerRsk.h index 3bdf8d949..306c5cb59 100644 --- a/src/rsk/GwMakerRsk.h +++ b/src/rsk/GwMakerRsk.h @@ -28,7 +28,7 @@ @author Martin Medina @copyright RSK Labs Ltd. - @version 1.0 30/03/17 + @version 1.0 30/03/17 maintained by YihaoPeng since Feb 20, 2018 */ @@ -41,11 +41,13 @@ #include "GwMaker.h" #include "utilities_js.hpp" -class GwMakerHandlerRsk : public GwMakerHandlerJson -{ +class GwMakerHandlerRsk : public GwMakerHandlerJson { bool checkFields(JsonNode &r) override; string constructRawMsg(JsonNode &r) override; - string getRequestData() override { return "{\"jsonrpc\": \"2.0\", \"method\": \"mnr_getWork\", \"params\": [], \"id\": 1}"; } + string getRequestData() override { + return "{\"jsonrpc\": \"2.0\", \"method\": \"mnr_getWork\", \"params\": " + "[], \"id\": 1}"; + } }; #endif diff --git a/src/rsk/RskSolvedShareData.h b/src/rsk/RskSolvedShareData.h index 68c62c2f6..899560640 100644 --- a/src/rsk/RskSolvedShareData.h +++ b/src/rsk/RskSolvedShareData.h @@ -36,17 +36,21 @@ class RskSolvedShareData { public: uint64_t jobId_; - int64_t workerId_; // found by who - int32_t userId_; - int32_t height_; - uint8_t header80_[80]; - char workerFullName_[40]; // . - char feesForMiner_[80]; - char rpcAddress_[80]; - char rpcUserPwd_[80]; - - RskSolvedShareData(): jobId_(0), workerId_(0), userId_(0), height_(0) { - memset(header80_, 0, sizeof(header80_)); + int64_t workerId_; // found by who + int32_t userId_; + int32_t height_; + uint8_t header80_[80]; + char workerFullName_[40]; // . + char feesForMiner_[80]; + char rpcAddress_[80]; + char rpcUserPwd_[80]; + + RskSolvedShareData() + : jobId_(0) + , workerId_(0) + , userId_(0) + , height_(0) { + memset(header80_, 0, sizeof(header80_)); memset(workerFullName_, 0, sizeof(workerFullName_)); memset(feesForMiner_, 0, sizeof(feesForMiner_)); memset(rpcAddress_, 0, sizeof(rpcAddress_)); diff --git a/src/rsk/RskWork.cc b/src/rsk/RskWork.cc index d4164b660..12711d646 100644 --- a/src/rsk/RskWork.cc +++ b/src/rsk/RskWork.cc @@ -34,26 +34,28 @@ #include "Utils.h" -RskWork::RskWork() : initialized_(false) {} +RskWork::RskWork() + : initialized_(false) { +} -bool RskWork::validate(JsonNode &work) -{ +bool RskWork::validate(JsonNode &work) { // check fields are valid - if (work["created_at_ts"].type() != Utilities::JS::type::Int || - work["rskdRpcAddress"].type() != Utilities::JS::type::Str || - work["rskdRpcUserPwd"].type() != Utilities::JS::type::Str || - work["parentBlockHash"].type() != Utilities::JS::type::Str || - work["blockHashForMergedMining"].type() != Utilities::JS::type::Str || - work["target"].type() != Utilities::JS::type::Str || - work["feesPaidToMiner"].type() != Utilities::JS::type::Str || - work["notify"].type() != Utilities::JS::type::Str) { + if (work["created_at_ts"].type() != Utilities::JS::type::Int || + work["rskdRpcAddress"].type() != Utilities::JS::type::Str || + work["rskdRpcUserPwd"].type() != Utilities::JS::type::Str || + work["parentBlockHash"].type() != Utilities::JS::type::Str || + work["blockHashForMergedMining"].type() != Utilities::JS::type::Str || + work["target"].type() != Utilities::JS::type::Str || + work["feesPaidToMiner"].type() != Utilities::JS::type::Str || + work["notify"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "rsk getwork fields failure"; return false; } // check timestamp if (work["created_at_ts"].uint32() + 60u < time(nullptr)) { - LOG(ERROR) << "too old rsk getwork: " << date("%F %T", work["created_at_ts"].uint32()); + LOG(ERROR) << "too old rsk getwork: " + << date("%F %T", work["created_at_ts"].uint32()); return false; } @@ -65,7 +67,7 @@ void RskWork::initialize(JsonNode &work) { blockHash_ = work["blockHashForMergedMining"].str(); target_ = work["target"].str(); fees_ = work["feesPaidToMiner"].str(); - rpcAddress_ = work["rskdRpcAddress"].str(); + rpcAddress_ = work["rskdRpcAddress"].str(); rpcUserPwd_ = work["rskdRpcUserPwd"].str(); notifyFlag_ = work["notify"].boolean(); @@ -74,25 +76,23 @@ void RskWork::initialize(JsonNode &work) { bool RskWork::initFromGw(const string &rawGetWork) { JsonNode work; - //DLOG(INFO) << "initFromGw: " << rawGetWork; + // DLOG(INFO) << "initFromGw: " << rawGetWork; // check is valid json - if (!JsonNode::parse(rawGetWork.c_str(), - rawGetWork.c_str() + rawGetWork.length(), - work)) { + if (!JsonNode::parse( + rawGetWork.c_str(), rawGetWork.c_str() + rawGetWork.length(), work)) { LOG(ERROR) << "decode rsk getwork json fail: >" << rawGetWork << "<"; return false; } - if (!validate(work)) + if (!validate(work)) return false; - initialize(work); return true; } -bool RskWork::isInitialized() const { - return initialized_; +bool RskWork::isInitialized() const { + return initialized_; } u_int32_t RskWork::getCreatedAt() const { @@ -123,8 +123,7 @@ bool RskWork::getNotifyFlag() const { return notifyFlag_; } -bool RskWorkEth::validate(JsonNode &work) -{ +bool RskWorkEth::validate(JsonNode &work) { // check fields are valid if (work["created_at_ts"].type() != Utilities::JS::type::Int || work["rpcAddress"].type() != Utilities::JS::type::Str || @@ -136,26 +135,24 @@ bool RskWorkEth::validate(JsonNode &work) work["height"].type() != Utilities::JS::type::Int || work["uncles"].type() != Utilities::JS::type::Int || work["transactions"].type() != Utilities::JS::type::Int || - work["gasUsedPercent"].type() != Utilities::JS::type::Real) - { + work["gasUsedPercent"].type() != Utilities::JS::type::Real) { LOG(ERROR) << "getwork fields failure"; return false; } // check timestamp - if (work["created_at_ts"].uint32() + 60u < time(nullptr)) - { - LOG(ERROR) << "too old getwork: " << date("%F %T", work["created_at_ts"].uint32()); + if (work["created_at_ts"].uint32() + 60u < time(nullptr)) { + LOG(ERROR) << "too old getwork: " + << date("%F %T", work["created_at_ts"].uint32()); return false; } return true; } -void RskWorkEth::initialize(JsonNode &work) -{ - //LOG(INFO) << "RskWorkEth:: initialize"; +void RskWorkEth::initialize(JsonNode &work) { + // LOG(INFO) << "RskWorkEth:: initialize"; created_at = work["created_at_ts"].uint32(); - rpcAddress_ = work["rpcAddress"].str(); + rpcAddress_ = work["rpcAddress"].str(); rpcUserPwd_ = work["rpcUserPwd"].str(); parent_ = work["parent"].str(); target_ = work["target"].str(); diff --git a/src/rsk/RskWork.h b/src/rsk/RskWork.h index df4924f32..9a1059c07 100644 --- a/src/rsk/RskWork.h +++ b/src/rsk/RskWork.h @@ -53,7 +53,7 @@ class RskWork { public: RskWork(); - virtual ~RskWork() {}; + virtual ~RskWork(){}; bool initFromGw(const string &rawGetWork); bool isInitialized() const; @@ -64,16 +64,16 @@ class RskWork { string getRpcAddress() const; string getRpcUserPwd() const; bool getNotifyFlag() const; - + private: virtual bool validate(JsonNode &work); - virtual void initialize(JsonNode &work); + virtual void initialize(JsonNode &work); }; class RskWorkEth : public RskWork { virtual bool validate(JsonNode &work); virtual void initialize(JsonNode &work); - + string seedHash_; string parent_; uint32_t height_; diff --git a/src/sharelogger/ShareLoggerMain.cc b/src/sharelogger/ShareLoggerMain.cc index b194720b7..6a1c32bf0 100644 --- a/src/sharelogger/ShareLoggerMain.cc +++ b/src/sharelogger/ShareLoggerMain.cc @@ -48,12 +48,10 @@ using namespace std; using namespace libconfig; -//ShareLogWriter *gShareLogWriter = nullptr; +// ShareLogWriter *gShareLogWriter = nullptr; vector> writers; -void handler(int sig) -{ - for (auto writer : writers) - { +void handler(int sig) { + for (auto writer : writers) { if (writer) writer->stop(); } @@ -61,81 +59,77 @@ void handler(int sig) void usage() { fprintf(stderr, BIN_VERSION_STRING("sharelogger")); - fprintf(stderr, "Usage:\tsharelogger -c \"sharelogger.cfg\" [-l ]\n"); + fprintf( + stderr, + "Usage:\tsharelogger -c \"sharelogger.cfg\" [-l ]\n"); } -void workerThread(shared_ptr w) -{ +void workerThread(shared_ptr w) { if (w != nullptr) w->run(); } -std::shared_ptr newShareLogWriter(const string &kafkaBrokers, const Setting &def) { +std::shared_ptr +newShareLogWriter(const string &kafkaBrokers, const Setting &def) { string chainType = def.lookup("chain_type"); int compressionLevel = Z_DEFAULT_COMPRESSION; def.lookupValue("compression_level", compressionLevel); - - #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == chainType) -#else +#else if (false) #endif { - return make_shared(def.lookup("chain_type").c_str(), - kafkaBrokers.c_str(), - def.lookup("data_dir").c_str(), - def.lookup("kafka_group_id").c_str(), - def.lookup("share_topic"), - compressionLevel); - } - else if (chainType == "ETH") { - return make_shared(def.lookup("chain_type").c_str(), - kafkaBrokers.c_str(), - def.lookup("data_dir").c_str(), - def.lookup("kafka_group_id").c_str(), - def.lookup("share_topic"), - compressionLevel); - } - else if (chainType == "BTM") { - return make_shared(def.lookup("chain_type").c_str(), - kafkaBrokers.c_str(), - def.lookup("data_dir").c_str(), - def.lookup("kafka_group_id").c_str(), - def.lookup("share_topic"), - compressionLevel); - } - else if (chainType == "DCR") { - return make_shared(chainType.c_str(), - kafkaBrokers.c_str(), - def.lookup("data_dir").c_str(), - def.lookup("kafka_group_id").c_str(), - def.lookup("share_topic"), - compressionLevel); - } - else { + return make_shared( + def.lookup("chain_type").c_str(), + kafkaBrokers.c_str(), + def.lookup("data_dir").c_str(), + def.lookup("kafka_group_id").c_str(), + def.lookup("share_topic"), + compressionLevel); + } else if (chainType == "ETH") { + return make_shared( + def.lookup("chain_type").c_str(), + kafkaBrokers.c_str(), + def.lookup("data_dir").c_str(), + def.lookup("kafka_group_id").c_str(), + def.lookup("share_topic"), + compressionLevel); + } else if (chainType == "BTM") { + return make_shared( + def.lookup("chain_type").c_str(), + kafkaBrokers.c_str(), + def.lookup("data_dir").c_str(), + def.lookup("kafka_group_id").c_str(), + def.lookup("share_topic"), + compressionLevel); + } else if (chainType == "DCR") { + return make_shared( + chainType.c_str(), + kafkaBrokers.c_str(), + def.lookup("data_dir").c_str(), + def.lookup("kafka_group_id").c_str(), + def.lookup("share_topic"), + compressionLevel); + } else { LOG(FATAL) << "Unknown chain type " << chainType; return nullptr; } } -int main(int argc, char **argv) -{ +int main(int argc, char **argv) { char *optLogDir = NULL; char *optConf = NULL; int c; - if (argc <= 1) - { + if (argc <= 1) { usage(); return 1; } - while ((c = getopt(argc, argv, "c:l:h")) != -1) - { - switch (c) - { + while ((c = getopt(argc, argv, "c:l:h")) != -1) { + switch (c) { case 'c': optConf = optarg; break; @@ -159,25 +153,20 @@ int main(int argc, char **argv) // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("sharelogger"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } - catch (const FileIOException &fioex) - { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; return (EXIT_FAILURE); - } - catch (const ParseException &pex) - { + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() << " - " << pex.getError() << std::endl; return (EXIT_FAILURE); @@ -195,14 +184,12 @@ int main(int argc, char **argv) signal(SIGTERM, handler); signal(SIGINT, handler); - try - { + try { string brokers = cfg.lookup("kafka.brokers"); const Setting &root = cfg.getRoot(); const Setting &defs = root["sharelog_writers"]; - for (int i = 0; i < defs.getLength(); ++i) - { + for (int i = 0; i < defs.getLength(); ++i) { const Setting &def = defs[i]; bool enabled = false; def.lookupValue("enabled", enabled); @@ -217,19 +204,15 @@ int main(int argc, char **argv) workers.push_back(std::make_shared(workerThread, writer)); // run - for (auto pWorker : workers) - { - if (pWorker->joinable()) - { + for (auto pWorker : workers) { + if (pWorker->joinable()) { LOG(INFO) << "wait for worker " << pWorker->get_id(); pWorker->join(); LOG(INFO) << "worker exit"; } } } - } - catch (std::exception &e) - { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/sia/BlockMakerSia.cc b/src/sia/BlockMakerSia.cc index 9ba2b9b5c..7b5d73e37 100644 --- a/src/sia/BlockMakerSia.cc +++ b/src/sia/BlockMakerSia.cc @@ -26,13 +26,14 @@ #include //////////////////////////////////////BlockMakerSia////////////////////////////////////////////////// -BlockMakerSia::BlockMakerSia(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB) - : BlockMaker(def, kafkaBrokers, poolDB) -{ +BlockMakerSia::BlockMakerSia( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB) + : BlockMaker(def, kafkaBrokers, poolDB) { } -void BlockMakerSia::processSolvedShare(rd_kafka_message_t *rkmessage) -{ +void BlockMakerSia::processSolvedShare(rd_kafka_message_t *rkmessage) { if (rkmessage->len != 80) { LOG(ERROR) << "incorrect header len: " << rkmessage->len; return; @@ -40,10 +41,15 @@ void BlockMakerSia::processSolvedShare(rd_kafka_message_t *rkmessage) char buf[80] = {0}; memcpy(buf, rkmessage->payload, 80); - for (const auto &itr : def()->nodes) - { + for (const auto &itr : def()->nodes) { string response; - rpcCall(itr.rpcAddr_.c_str(), itr.rpcUserPwd_.c_str(), buf, 80, response, "Sia-Agent"); + rpcCall( + itr.rpcAddr_.c_str(), + itr.rpcUserPwd_.c_str(), + buf, + 80, + response, + "Sia-Agent"); LOG(INFO) << "submission result: " << response; } } diff --git a/src/sia/BlockMakerSia.h b/src/sia/BlockMakerSia.h index 399be97e4..03272d011 100644 --- a/src/sia/BlockMakerSia.h +++ b/src/sia/BlockMakerSia.h @@ -27,10 +27,12 @@ #include "BlockMaker.h" #include "Common.h" -class BlockMakerSia : public BlockMaker -{ +class BlockMakerSia : public BlockMaker { public: - BlockMakerSia(shared_ptr def, const char *kafkaBrokers, const MysqlConnectInfo &poolDB); + BlockMakerSia( + shared_ptr def, + const char *kafkaBrokers, + const MysqlConnectInfo &poolDB); void processSolvedShare(rd_kafka_message_t *rkmessage) override; }; diff --git a/src/sia/GwMakerSia.cc b/src/sia/GwMakerSia.cc index a6f462fad..84f283aaa 100644 --- a/src/sia/GwMakerSia.cc +++ b/src/sia/GwMakerSia.cc @@ -27,8 +27,7 @@ #include ///////////////////////////////GwMakerHandlerSia//////////////////////////////////// -string GwMakerHandlerSia::processRawGw(const string &msg) -{ +string GwMakerHandlerSia::processRawGw(const string &msg) { if (msg.length() != 112) return ""; @@ -39,26 +38,24 @@ string GwMakerHandlerSia::processRawGw(const string &msg) // timestamp [72-80) [40-48) // merkle root [80-112) [48-80) string targetStr; - for (int i = 0; i < 32; ++i) - { + for (int i = 0; i < 32; ++i) { uint8_t val = (uint8_t)msg[i]; targetStr += Strings::Format("%02x", val); } - //Claymore purposely reverses the timestamp + // Claymore purposely reverses the timestamp //"00000000000000021f3e8ede65495c4311ef59e5b7a4338542e573819f5979e90000000000000000cd33aa5a00000000486573a66f31f5911959fce210ef557c715f716d0f022e1ba9f396294fc39d42" string headerStr; - for (int i = 32; i < 112; ++i) - { + for (int i = 32; i < 112; ++i) { uint8_t val = (uint8_t)msg[i]; headerStr += Strings::Format("%02x", val); } - //time stamp + // time stamp // uint64_t timestamp = *((uint64*)&msg[72]); - // string timestampStr = Strings::Format("%08x%08x", timestamp >> 32, timestamp & 0xFFFFFFFF); - // DLOG(INFO) << "timestamp string=" << timestampStr; + // string timestampStr = Strings::Format("%08x%08x", timestamp >> 32, + // timestamp & 0xFFFFFFFF); DLOG(INFO) << "timestamp string=" << timestampStr; // headerStr += timestampStr; @@ -68,22 +65,22 @@ string GwMakerHandlerSia::processRawGw(const string &msg) // headerStr += Strings::Format("%02x", val); // } - LOG(INFO) << "chain: " << def_.chainType_ - << ", topic: " << def_.rawGwTopic_ - << ", target: " << targetStr - << ", hHash: " << headerStr; + LOG(INFO) << "chain: " << def_.chainType_ << ", topic: " << def_.rawGwTopic_ + << ", target: " << targetStr << ", hHash: " << headerStr; - //LOG(INFO) << "Sia work target 0x" << targetStr << ", blkId 0x" << blkIdStr << ; - return Strings::Format("{\"created_at_ts\":%u," - "\"chainType\":\"%s\"," - "\"rpcAddress\":\"%s\"," - "\"rpcUserPwd\":\"%s\"," - "\"target\":\"%s\"," - "\"hHash\":\"%s\"}", - (uint32_t)time(nullptr), - def_.chainType_.c_str(), - def_.rpcAddr_.c_str(), - def_.rpcUserPwd_.c_str(), - targetStr.c_str(), - headerStr.c_str()); + // LOG(INFO) << "Sia work target 0x" << targetStr << ", blkId 0x" << blkIdStr + // << ; + return Strings::Format( + "{\"created_at_ts\":%u," + "\"chainType\":\"%s\"," + "\"rpcAddress\":\"%s\"," + "\"rpcUserPwd\":\"%s\"," + "\"target\":\"%s\"," + "\"hHash\":\"%s\"}", + (uint32_t)time(nullptr), + def_.chainType_.c_str(), + def_.rpcAddr_.c_str(), + def_.rpcUserPwd_.c_str(), + targetStr.c_str(), + headerStr.c_str()); } diff --git a/src/sia/GwMakerSia.h b/src/sia/GwMakerSia.h index 0f2ca81f7..f263226d6 100644 --- a/src/sia/GwMakerSia.h +++ b/src/sia/GwMakerSia.h @@ -29,8 +29,7 @@ #include "GwMaker.h" #include "utilities_js.hpp" -class GwMakerHandlerSia : public GwMakerHandler -{ +class GwMakerHandlerSia : public GwMakerHandler { string processRawGw(const string &gw) override; string getRequestData() override { return ""; } string getUserAgent() override { return "Sia-Agent"; } diff --git a/src/sia/JobMakerSia.cc b/src/sia/JobMakerSia.cc index 3f5a7dd97..c170e6521 100644 --- a/src/sia/JobMakerSia.cc +++ b/src/sia/JobMakerSia.cc @@ -27,15 +27,13 @@ #include "Utils.h" ////////////////////////////////JobMakerHandlerSia////////////////////////////////// -JobMakerHandlerSia::JobMakerHandlerSia() : time_(0) -{ +JobMakerHandlerSia::JobMakerHandlerSia() + : time_(0) { } -bool JobMakerHandlerSia::processMsg(const string &msg) -{ +bool JobMakerHandlerSia::processMsg(const string &msg) { JsonNode j; - if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), j)) - { + if (!JsonNode::parse(msg.c_str(), msg.c_str() + msg.length(), j)) { LOG(ERROR) << "deserialize sia work failed " << msg; return false; } @@ -53,50 +51,48 @@ bool JobMakerHandlerSia::processMsg(const string &msg) return processMsg(j); } -bool JobMakerHandlerSia::processMsg(JsonNode &j) -{ +bool JobMakerHandlerSia::processMsg(JsonNode &j) { target_ = j["target"].str(); return true; } -bool JobMakerHandlerSia::validate(JsonNode &j) -{ +bool JobMakerHandlerSia::validate(JsonNode &j) { // check fields are valid if (j.type() != Utilities::JS::type::Obj || - j["created_at_ts"].type() != Utilities::JS::type::Int || - j["rpcAddress"].type() != Utilities::JS::type::Str || - j["rpcUserPwd"].type() != Utilities::JS::type::Str || - j["target"].type() != Utilities::JS::type::Str || - j["hHash"].type() != Utilities::JS::type::Str) { - LOG(ERROR) << "work format not expected"; + j["created_at_ts"].type() != Utilities::JS::type::Int || + j["rpcAddress"].type() != Utilities::JS::type::Str || + j["rpcUserPwd"].type() != Utilities::JS::type::Str || + j["target"].type() != Utilities::JS::type::Str || + j["hHash"].type() != Utilities::JS::type::Str) { + LOG(ERROR) << "work format not expected"; return false; - } + } // check timestamp - if (j["created_at_ts"].uint32() + def()->maxJobDelay_ < time(nullptr)) - { - LOG(ERROR) << "too old sia work: " << date("%F %T", j["created_at_ts"].uint32()); + if (j["created_at_ts"].uint32() + def()->maxJobDelay_ < time(nullptr)) { + LOG(ERROR) << "too old sia work: " + << date("%F %T", j["created_at_ts"].uint32()); return false; } return true; } -string JobMakerHandlerSia::makeStratumJobMsg() -{ - if (0 == header_.size() || - 0 == target_.size()) +string JobMakerHandlerSia::makeStratumJobMsg() { + if (0 == header_.size() || 0 == target_.size()) return ""; uint64_t jobId = generateJobId(djb2(header_.c_str())); - return Strings::Format("{\"created_at_ts\":%u" - ",\"jobId\":%" PRIu64 "" - ",\"target\":\"%s\"" - ",\"hHash\":\"%s\"" - "}", - time_, - jobId, - target_.c_str(), - header_.c_str()); + return Strings::Format( + "{\"created_at_ts\":%u" + ",\"jobId\":%" PRIu64 + "" + ",\"target\":\"%s\"" + ",\"hHash\":\"%s\"" + "}", + time_, + jobId, + target_.c_str(), + header_.c_str()); } diff --git a/src/sia/JobMakerSia.h b/src/sia/JobMakerSia.h index ca0e134b9..a727d4c98 100644 --- a/src/sia/JobMakerSia.h +++ b/src/sia/JobMakerSia.h @@ -27,9 +27,7 @@ #include "JobMaker.h" #include "utilities_js.hpp" - -class JobMakerHandlerSia : public GwJobMakerHandler -{ +class JobMakerHandlerSia : public GwJobMakerHandler { public: JobMakerHandlerSia(); virtual ~JobMakerHandlerSia() {} @@ -42,7 +40,6 @@ class JobMakerHandlerSia : public GwJobMakerHandler string header_; uint32_t time_; bool validate(JsonNode &j); - }; #endif diff --git a/src/sia/StratumMinerSia.cc b/src/sia/StratumMinerSia.cc index ca7a73e1f..2aa9657f9 100644 --- a/src/sia/StratumMinerSia.cc +++ b/src/sia/StratumMinerSia.cc @@ -32,31 +32,37 @@ #include -///////////////////////////////// StratumSessionSia //////////////////////////////// -StratumMinerSia::StratumMinerSia(StratumSessionSia &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) - : StratumMinerBase(session, diffController, clientAgent, workerName, workerId) { +///////////////////////////////// StratumSessionSia +/////////////////////////////////// +StratumMinerSia::StratumMinerSia( + StratumSessionSia &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) + : StratumMinerBase( + session, diffController, clientAgent, workerName, workerId) { } -void StratumMinerSia::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumMinerSia::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "mining.submit") { handleRequest_Submit(idStr, jparams); } } -void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode &jparams) { +void StratumMinerSia::handleRequest_Submit( + const string &idStr, const JsonNode &jparams) { auto &session = getSession(); auto &server = session.getServer(); if (session.getState() != StratumSession::AUTHENTICATED) { session.responseError(idStr, StratumStatus::UNAUTHORIZED); // there must be something wrong, send reconnect command - const string s = "{\"id\":null,\"method\":\"client.reconnect\",\"params\":[]}\n"; + const string s = + "{\"id\":null,\"method\":\"client.reconnect\",\"params\":[]}\n"; session.sendData(s); return; } @@ -69,7 +75,8 @@ void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode & } string header = params[2].str(); - //string header = "00000000000000021f3e8ede65495c4311ef59e5b7a4338542e573819f5979e982719d0366014155e935aa5a00000000201929782a8fe3209b152520c51d2a82dc364e4a3eb6fb8131439835e278ff8b"; + // string header = + // "00000000000000021f3e8ede65495c4311ef59e5b7a4338542e573819f5979e982719d0366014155e935aa5a00000000201929782a8fe3209b152520c51d2a82dc364e4a3eb6fb8131439835e278ff8b"; if (162 == header.length()) header = header.substr(2, 160); if (header.length() != 160) { @@ -83,7 +90,8 @@ void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode & bHeader[i] = strtol(header.substr(i * 2, 2).c_str(), 0, 16); // uint64_t nonce = strtoull(header.substr(64, 16).c_str(), nullptr, 16); // uint64_t timestamp = strtoull(header.substr(80, 16).c_str(), nullptr, 16); - // DLOG(INFO) << "nonce=" << std::hex << nonce << ", timestamp=" << std::hex << timestamp; + // DLOG(INFO) << "nonce=" << std::hex << nonce << ", timestamp=" << std::hex + // << timestamp; // //memcpy(bHeader + 32, &nonce, 8); // memcpy(bHeader + 40, ×tamp, 8); // for (int i = 48; i < 80; ++i) @@ -96,16 +104,16 @@ void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode & uint8_t out[32] = {0}; int ret = blake2b(out, 32, bHeader, 80, nullptr, 0); DLOG(INFO) << "blake2b return=" << ret; - //str = ""; + // str = ""; for (int i = 0; i < 32; ++i) str += Strings::Format("%02x", out[i]); DLOG(INFO) << str; - uint8_t shortJobId = (uint8_t) atoi(params[1].str()); + uint8_t shortJobId = (uint8_t)atoi(params[1].str()); LocalJob *localJob = session.findLocalJob(shortJobId); if (nullptr == localJob) { session.responseError(idStr, StratumStatus::JOB_NOT_FOUND); - LOG(ERROR) << "sia local job not found " << (int) shortJobId; + LOG(ERROR) << "sia local job not found " << (int)shortJobId; return; } @@ -125,13 +133,13 @@ void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode & return; } - uint64_t nonce = *((uint64_t *) (bHeader + 32)); + uint64_t nonce = *((uint64_t *)(bHeader + 32)); LocalShare localShare(nonce, 0, 0); if (!server.isEnableSimulator_ && !localJob->addLocalShare(localShare)) { session.responseError(idStr, StratumStatus::DUPLICATE_SHARE); LOG(ERROR) << "duplicated share nonce " << std::hex << nonce; // add invalid share to counter - invalidSharesCounter_.insert((int64_t) time(nullptr), 1); + invalidSharesCounter_.insert((int64_t)time(nullptr), 1); return; } @@ -152,18 +160,17 @@ void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode & ip.fromIpv4Int(clientIp); share.set_ip(ip.toString()); - share.set_userid(worker.userId_); share.set_sharediff(difficulty); - share.set_timestamp((uint32_t) time(nullptr)); + share.set_timestamp((uint32_t)time(nullptr)); share.set_status(StratumStatus::REJECT_NO_REASON); arith_uint256 shareTarget(str); arith_uint256 networkTarget = UintToArith256(sjob->networkTarget_); if (shareTarget < networkTarget) { - //valid share - //submit share + // valid share + // submit share server.sendSolvedShare2Kafka(bHeader, 80); diffController_->addAcceptedShare(share.sharediff()); LOG(INFO) << "sia solution found"; @@ -174,9 +181,9 @@ void StratumMinerSia::handleRequest_Submit(const string &idStr, const JsonNode & std::string message; uint32_t size = 0; if (!share.SerializeToArrayWithVersion(message, size)) { - LOG(ERROR) << "share SerializeToArray failed!"<< share.toString(); + LOG(ERROR) << "share SerializeToArray failed!" << share.toString(); return; } - server.sendShare2Kafka((const uint8_t *) message.data(), size); + server.sendShare2Kafka((const uint8_t *)message.data(), size); } diff --git a/src/sia/StratumMinerSia.h b/src/sia/StratumMinerSia.h index 9c648aa21..3078b98d6 100644 --- a/src/sia/StratumMinerSia.h +++ b/src/sia/StratumMinerSia.h @@ -29,16 +29,18 @@ class StratumMinerSia : public StratumMinerBase { public: - StratumMinerSia(StratumSessionSia &session, - const DiffController &diffController, - const std::string &clientAgent, - const std::string &workerName, - int64_t workerId); - - void handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) override; + StratumMinerSia( + StratumSessionSia &session, + const DiffController &diffController, + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId); + + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; private: void handleRequest_Submit(const string &idStr, const JsonNode &jparams); diff --git a/src/sia/StratumServerSia.cc b/src/sia/StratumServerSia.cc index a0c92df8e..223e96505 100644 --- a/src/sia/StratumServerSia.cc +++ b/src/sia/StratumServerSia.cc @@ -31,58 +31,55 @@ using namespace std; -//////////////////////////////////// JobRepositorySia ///////////////////////////////// -JobRepositorySia::JobRepositorySia(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerSia *server) : - JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) -{ +//////////////////////////////////// JobRepositorySia +//////////////////////////////////// +JobRepositorySia::JobRepositorySia( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerSia *server) + : JobRepositoryBase(kafkaBrokers, consumerTopic, fileLastNotifyTime, server) { } -JobRepositorySia::~JobRepositorySia() -{ - +JobRepositorySia::~JobRepositorySia() { } -shared_ptr JobRepositorySia::createStratumJobEx(shared_ptr sjob, bool isClean){ +shared_ptr JobRepositorySia::createStratumJobEx( + shared_ptr sjob, bool isClean) { return std::make_shared(sjob, isClean); } void JobRepositorySia::broadcastStratumJob(shared_ptr sjob) { LOG(INFO) << "broadcast sia stratum job " << std::hex << sjob->jobId_; shared_ptr exJob(createStratumJobEx(sjob, true)); - { - ScopeLock sl(lock_); - // mark all jobs as stale, should do this before insert new job - for (auto it : exJobs_) - it.second->markStale(); + // mark all jobs as stale, should do this before insert new job + for (auto it : exJobs_) + it.second->markStale(); - // insert new job - exJobs_[sjob->jobId_] = exJob; - } + // insert new job + exJobs_[sjob->jobId_] = exJob; sendMiningNotify(exJob); } ////////////////////////////////// ServierSia /////////////////////////////// -ServerSia::~ServerSia() -{ - +ServerSia::~ServerSia() { } -unique_ptr ServerSia::createConnection(struct bufferevent *bev, - struct sockaddr *saddr, - const uint32_t sessionID) { +unique_ptr ServerSia::createConnection( + struct bufferevent *bev, struct sockaddr *saddr, const uint32_t sessionID) { return boost::make_unique(*this, bev, saddr, sessionID); } -JobRepository *ServerSia::createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) -{ - return new JobRepositorySia(kafkaBrokers, consumerTopic, fileLastNotifyTime, this); +JobRepository *ServerSia::createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) { + return new JobRepositorySia( + kafkaBrokers, consumerTopic, fileLastNotifyTime, this); } void ServerSia::sendSolvedShare2Kafka(uint8_t *buf, int len) { - kafkaProducerSolvedShare_->produce(buf, len); + kafkaProducerSolvedShare_->produce(buf, len); } - diff --git a/src/sia/StratumServerSia.h b/src/sia/StratumServerSia.h index e1755e6ea..37916e365 100644 --- a/src/sia/StratumServerSia.h +++ b/src/sia/StratumServerSia.h @@ -29,30 +29,39 @@ class JobRepositorySia; -class ServerSia : public ServerBase -{ +class ServerSia : public ServerBase { public: - ServerSia(const int32_t shareAvgSeconds) : ServerBase(shareAvgSeconds) {} + ServerSia(const int32_t shareAvgSeconds) + : ServerBase(shareAvgSeconds) {} virtual ~ServerSia(); + unique_ptr createConnection( + struct bufferevent *bev, + struct sockaddr *saddr, + const uint32_t sessionID) override; - unique_ptr createConnection(struct bufferevent *bev, struct sockaddr *saddr, const uint32_t sessionID) override; - void sendSolvedShare2Kafka(uint8_t *buf, int len); -private: - JobRepository* createJobRepository(const char *kafkaBrokers, - const char *consumerTopic, - const string &fileLastNotifyTime) override; +private: + JobRepository *createJobRepository( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime) override; }; -class JobRepositorySia : public JobRepositoryBase -{ +class JobRepositorySia : public JobRepositoryBase { public: - JobRepositorySia(const char *kafkaBrokers, const char *consumerTopic, const string &fileLastNotifyTime, ServerSia *server); + JobRepositorySia( + const char *kafkaBrokers, + const char *consumerTopic, + const string &fileLastNotifyTime, + ServerSia *server); ~JobRepositorySia(); - shared_ptr createStratumJob() override { return std::make_shared(); } - shared_ptr createStratumJobEx(shared_ptr sjob, bool isClean) override; + shared_ptr createStratumJob() override { + return std::make_shared(); + } + shared_ptr + createStratumJobEx(shared_ptr sjob, bool isClean) override; void broadcastStratumJob(shared_ptr sjob) override; }; diff --git a/src/sia/StratumSessionSia.cc b/src/sia/StratumSessionSia.cc index 49908744f..1819e7465 100644 --- a/src/sia/StratumSessionSia.cc +++ b/src/sia/StratumSessionSia.cc @@ -32,18 +32,23 @@ #include -StratumSessionSia::StratumSessionSia(ServerSia &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1) - : StratumSessionBase(server, bev, saddr, extraNonce1), shortJobId_(0) { +StratumSessionSia::StratumSessionSia( + ServerSia &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1) + : StratumSessionBase(server, bev, saddr, extraNonce1) + , shortJobId_(0) { } -void StratumSessionSia::sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) { - static_cast(localJob).jobDifficulty_ = difficulty; +void StratumSessionSia::sendSetDifficulty( + LocalJob &localJob, uint64_t difficulty) { + static_cast(localJob).jobDifficulty_ = + difficulty; } -void StratumSessionSia::sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) { +void StratumSessionSia::sendMiningNotify( + shared_ptr exJobPtr, bool isFirstJob) { if (state_ < AUTHENTICATED || nullptr == exJobPtr) { LOG(ERROR) << "sia sendMiningNotify failed, state: " << state_; @@ -70,13 +75,14 @@ void StratumSessionSia::sendMiningNotify(shared_ptr exJobPtr, bool DiffToTarget(jobDifficulty, shareTarget); } string strShareTarget = shareTarget.GetHex(); - LOG(INFO) << "new sia stratum job mining.notify: share difficulty=" << jobDifficulty << ", share target=" - << strShareTarget; - const string strNotify = Strings::Format("{\"id\":6,\"jsonrpc\":\"2.0\",\"method\":\"mining.notify\"," - "\"params\":[\"%u\",\"0x%s\",\"0x%s\"]}\n", - ljob.shortJobId_, - siaJob->blockHashForMergedMining_.c_str(), - strShareTarget.c_str()); + LOG(INFO) << "new sia stratum job mining.notify: share difficulty=" + << jobDifficulty << ", share target=" << strShareTarget; + const string strNotify = Strings::Format( + "{\"id\":6,\"jsonrpc\":\"2.0\",\"method\":\"mining.notify\"," + "\"params\":[\"%u\",\"0x%s\",\"0x%s\"]}\n", + ljob.shortJobId_, + siaJob->blockHashForMergedMining_.c_str(), + strShareTarget.c_str()); sendData(strNotify); // send notify string @@ -84,25 +90,23 @@ void StratumSessionSia::sendMiningNotify(shared_ptr exJobPtr, bool clearLocalJobs(); } -void StratumSessionSia::handleRequest(const std::string &idStr, - const std::string &method, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionSia::handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) { if (method == "mining.subscribe") { handleRequest_Subscribe(idStr, jparams, jroot); - } - else if (method == "mining.authorize") { + } else if (method == "mining.authorize") { // TODO: implement this... - //handleRequest_Authorize(idStr, jparams, jroot); - } - else if (dispatcher_) { + // handleRequest_Authorize(idStr, jparams, jroot); + } else { dispatcher_->handleRequest(idStr, method, jparams, jroot); } } -void StratumSessionSia::handleRequest_Subscribe(const string &idStr, - const JsonNode &jparams, - const JsonNode &jroot) { +void StratumSessionSia::handleRequest_Subscribe( + const string &idStr, const JsonNode &jparams, const JsonNode &jroot) { if (state_ != CONNECTED) { responseError(idStr, StratumStatus::UNKNOWN); return; @@ -110,16 +114,19 @@ void StratumSessionSia::handleRequest_Subscribe(const string &idStr, state_ = SUBSCRIBED; - const string s = Strings::Format("{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":true}\n", idStr.c_str()); + const string s = Strings::Format( + "{\"id\":%s,\"jsonrpc\":\"2.0\",\"result\":true}\n", idStr.c_str()); sendData(s); } -unique_ptr StratumSessionSia::createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) { - return boost::make_unique(*this, - *getServer().defaultDifficultyController_, - clientAgent, - workerName, - workerId); +unique_ptr StratumSessionSia::createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) { + return boost::make_unique( + *this, + *getServer().defaultDifficultyController_, + clientAgent, + workerName, + workerId); } diff --git a/src/sia/StratumSessionSia.h b/src/sia/StratumSessionSia.h index a4642de05..355745162 100644 --- a/src/sia/StratumSessionSia.h +++ b/src/sia/StratumSessionSia.h @@ -30,27 +30,32 @@ class StratumSessionSia : public StratumSessionBase { public: - StratumSessionSia(ServerSia &server, - struct bufferevent *bev, - struct sockaddr *saddr, - uint32_t extraNonce1); + StratumSessionSia( + ServerSia &server, + struct bufferevent *bev, + struct sockaddr *saddr, + uint32_t extraNonce1); void sendSetDifficulty(LocalJob &localJob, uint64_t difficulty) override; - void sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; + void + sendMiningNotify(shared_ptr exJobPtr, bool isFirstJob) override; protected: - void handleRequest(const std::string &idStr, const std::string &method, - const JsonNode &jparams, const JsonNode &jroot) override; - void handleRequest_Subscribe(const std::string &idStr, - const JsonNode &jparams, - const JsonNode &jroot); + void handleRequest( + const std::string &idStr, + const std::string &method, + const JsonNode &jparams, + const JsonNode &jroot) override; + void handleRequest_Subscribe( + const std::string &idStr, const JsonNode &jparams, const JsonNode &jroot); public: - std::unique_ptr createMiner(const std::string &clientAgent, - const std::string &workerName, - int64_t workerId) override; + std::unique_ptr createMiner( + const std::string &clientAgent, + const std::string &workerName, + int64_t workerId) override; private: - uint8_t shortJobId_; //Claymore jobId starts from 0 + uint8_t shortJobId_; // Claymore jobId starts from 0 }; -#endif // #ifndef STRATUM_SESSION_SIA_H_ +#endif // #ifndef STRATUM_SESSION_SIA_H_ diff --git a/src/sia/StratumSia.cc b/src/sia/StratumSia.cc index 766749407..f19c5e837 100644 --- a/src/sia/StratumSia.cc +++ b/src/sia/StratumSia.cc @@ -29,43 +29,36 @@ #include StratumJobSia::StratumJobSia() - : nTime_(0U) -{ - + : nTime_(0U) { } -StratumJobSia::~StratumJobSia() -{ - +StratumJobSia::~StratumJobSia() { } - -string StratumJobSia::serializeToJson() const -{ - return Strings::Format("{\"created_at_ts\":%u" - ",\"jobId\":%" PRIu64 "" - ",\"target\":\"%s\"" - ",\"hHash\":\"%s\"" - "}", - nTime_, - jobId_, - networkTarget_.GetHex().c_str(), - blockHashForMergedMining_.c_str()); +string StratumJobSia::serializeToJson() const { + return Strings::Format( + "{\"created_at_ts\":%u" + ",\"jobId\":%" PRIu64 + "" + ",\"target\":\"%s\"" + ",\"hHash\":\"%s\"" + "}", + nTime_, + jobId_, + networkTarget_.GetHex().c_str(), + blockHashForMergedMining_.c_str()); } ///////////////////////////////StratumJobSia/////////////////////////// -bool StratumJobSia::unserializeFromJson(const char *s, size_t len) -{ +bool StratumJobSia::unserializeFromJson(const char *s, size_t len) { JsonNode j; - if (!JsonNode::parse(s, s + len, j)) - { + if (!JsonNode::parse(s, s + len, j)) { return false; } if (j["created_at_ts"].type() != Utilities::JS::type::Int || j["jobId"].type() != Utilities::JS::type::Int || j["target"].type() != Utilities::JS::type::Str || - j["hHash"].type() != Utilities::JS::type::Str) - { + j["hHash"].type() != Utilities::JS::type::Str) { LOG(ERROR) << "parse sia stratum job failure: " << s; return false; } @@ -77,4 +70,3 @@ bool StratumJobSia::unserializeFromJson(const char *s, size_t len) return true; } - diff --git a/src/sia/StratumSia.h b/src/sia/StratumSia.h index 629889dd7..b0fd61cf8 100644 --- a/src/sia/StratumSia.h +++ b/src/sia/StratumSia.h @@ -29,144 +29,141 @@ #include #include "sia/sia.pb.h" - -class ShareSiaBytesVersion -{ +class ShareSiaBytesVersion { public: - - uint32_t version_ = 0; - uint32_t checkSum_ = 0; - - int64_t workerHashId_ = 0;//8 - int32_t userId_ = 0;//16 - int32_t status_ = 0;//20 - int64_t timestamp_ = 0;//24 - IpAddress ip_ = 0;//32 - - uint64_t jobId_ = 0;//48 - uint64_t shareDiff_ = 0;//56 - uint32_t blkBits_ = 0;//64 - uint32_t height_ = 0;//68 - uint32_t nonce_ = 0;//72 - uint32_t sessionId_ = 0;//76 + uint32_t version_ = 0; + uint32_t checkSum_ = 0; + + int64_t workerHashId_ = 0; // 8 + int32_t userId_ = 0; // 16 + int32_t status_ = 0; // 20 + int64_t timestamp_ = 0; // 24 + IpAddress ip_ = 0; // 32 + + uint64_t jobId_ = 0; // 48 + uint64_t shareDiff_ = 0; // 56 + uint32_t blkBits_ = 0; // 64 + uint32_t height_ = 0; // 68 + uint32_t nonce_ = 0; // 72 + uint32_t sessionId_ = 0; // 76 uint32_t checkSum() const { uint64_t c = 0; - c += (uint64_t) version_; - c += (uint64_t) workerHashId_; - c += (uint64_t) userId_; - c += (uint64_t) status_; - c += (uint64_t) timestamp_; - c += (uint64_t) ip_.addrUint64[0]; - c += (uint64_t) ip_.addrUint64[1]; - c += (uint64_t) jobId_; - c += (uint64_t) shareDiff_; - c += (uint64_t) blkBits_; - c += (uint64_t) height_; - c += (uint64_t) nonce_; - c += (uint64_t) sessionId_; - - return ((uint32_t) c) + ((uint32_t) (c >> 32)); + c += (uint64_t)version_; + c += (uint64_t)workerHashId_; + c += (uint64_t)userId_; + c += (uint64_t)status_; + c += (uint64_t)timestamp_; + c += (uint64_t)ip_.addrUint64[0]; + c += (uint64_t)ip_.addrUint64[1]; + c += (uint64_t)jobId_; + c += (uint64_t)shareDiff_; + c += (uint64_t)blkBits_; + c += (uint64_t)height_; + c += (uint64_t)nonce_; + c += (uint64_t)sessionId_; + + return ((uint32_t)c) + ((uint32_t)(c >> 32)); } - }; - -class ShareSia : public sharebase::SiaMsg -{ +class ShareSia : public sharebase::SiaMsg { public: - - const static uint32_t BYTES_VERSION = 0x00010003u; // first 0001: bitcoin, second 0003: version 3. - const static uint32_t CURRENT_VERSION = 0x00010004u; // first 0001: bitcoin, second 0003: version 4. + const static uint32_t BYTES_VERSION = + 0x00010003u; // first 0001: bitcoin, second 0003: version 3. + const static uint32_t CURRENT_VERSION = + 0x00010004u; // first 0001: bitcoin, second 0003: version 4. // Please pay attention to memory alignment when adding / removing fields. - // Please note that changing the Share structure will be incompatible with the old deployment. - // Also, think carefully when removing fields. Some fields are not used by BTCPool itself, - // but are important to external statistics programs. - - // TODO: Change to a data structure that is easier to upgrade, such as ProtoBuf. + // Please note that changing the Share structure will be incompatible with the + // old deployment. Also, think carefully when removing fields. Some fields are + // not used by BTCPool itself, but are important to external statistics + // programs. + // TODO: Change to a data structure that is easier to upgrade, such as + // ProtoBuf. - ShareSia() { - set_version(CURRENT_VERSION); - } + ShareSia() { set_version(CURRENT_VERSION); } ShareSia(const ShareSia &r) = default; ShareSia &operator=(const ShareSia &r) = default; - double score() const - { - if (sharediff() == 0 || blkbits() == 0) - { + double score() const { + if (sharediff() == 0 || blkbits() == 0) { return 0.0; } double networkDifficulty = 0.0; BitsToDifficulty(blkbits(), &networkDifficulty); - // Network diff may less than share diff on testnet or regression test network. - // On regression test network, the network diff may be zero. - // But no matter how low the network diff is, you can only dig one block at a time. - if (networkDifficulty < (double)sharediff()) - { + // Network diff may less than share diff on testnet or regression test + // network. On regression test network, the network diff may be zero. But no + // matter how low the network diff is, you can only dig one block at a time. + if (networkDifficulty < (double)sharediff()) { return 1.0; } return (double)sharediff() / networkDifficulty; } - - bool isValid() const - { + bool isValid() const { if (version() != CURRENT_VERSION) { return false; } - if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || - height() == 0 || blkbits() == 0 || sharediff() == 0) - { + if (jobid() == 0 || userid() == 0 || workerhashid() == 0 || height() == 0 || + blkbits() == 0 || sharediff() == 0) { return false; } return true; } - string toString() const - { + string toString() const { double networkDifficulty = 0.0; BitsToDifficulty(blkbits(), &networkDifficulty); - return Strings::Format("share(jobId: %" PRIu64 ", ip: %s, userId: %d, " - "workerId: %" PRId64 ", time: %u/%s, height: %u, " - "blkBits: %08x/%lf, shareDiff: %" PRIu64 ", " - "status: %d/%s)", - jobid(), ip().c_str(), userid(), - workerhashid(), timestamp(), date("%F %T", timestamp()).c_str(), height(), - blkbits(), networkDifficulty, sharediff(), - status(), StratumStatus::toString(status())); + return Strings::Format( + "share(jobId: %" PRIu64 + ", ip: %s, userId: %d, " + "workerId: %" PRId64 + ", time: %u/%s, height: %u, " + "blkBits: %08x/%lf, shareDiff: %" PRIu64 + ", " + "status: %d/%s)", + jobid(), + ip().c_str(), + userid(), + workerhashid(), + timestamp(), + date("%F %T", timestamp()).c_str(), + height(), + blkbits(), + networkDifficulty, + sharediff(), + status(), + StratumStatus::toString(status())); } - - bool SerializeToBuffer(string& data, uint32_t& size) const{ + bool SerializeToBuffer(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size); if (!SerializeToArray((uint8_t *)data.data(), size)) { DLOG(INFO) << "base.SerializeToArray failed!" << std::endl; return false; - } return true; } - bool SerializeToArrayWithLength(string& data, uint32_t& size) const { + bool SerializeToArrayWithLength(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - *((uint32_t*)data.data()) = size; - uint8_t * payload = (uint8_t *)data.data(); + *((uint32_t *)data.data()) = size; + uint8_t *payload = (uint8_t *)data.data(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { - DLOG(INFO) << "base.SerializeToArray failed!"; + DLOG(INFO) << "base.SerializeToArray failed!"; return false; } @@ -174,12 +171,12 @@ class ShareSia : public sharebase::SiaMsg return true; } - bool SerializeToArrayWithVersion(string& data, uint32_t& size) const { + bool SerializeToArrayWithVersion(string &data, uint32_t &size) const { size = ByteSize(); data.resize(size + sizeof(uint32_t)); - uint8_t * payload = (uint8_t *)data.data(); - *((uint32_t*)payload) = version(); + uint8_t *payload = (uint8_t *)data.data(); + *((uint32_t *)payload) = version(); if (!SerializeToArray(payload + sizeof(uint32_t), size)) { DLOG(INFO) << "SerializeToArray failed!"; @@ -190,27 +187,31 @@ class ShareSia : public sharebase::SiaMsg return true; } - bool UnserializeWithVersion(const uint8_t* data, uint32_t size){ + bool UnserializeWithVersion(const uint8_t *data, uint32_t size) { - if(nullptr == data || size <= 0) { + if (nullptr == data || size <= 0) { return false; } - const uint8_t * payload = data; - uint32_t version = *((uint32_t*)payload); + const uint8_t *payload = data; + uint32_t version = *((uint32_t *)payload); if (version == CURRENT_VERSION) { - if (!ParseFromArray((const uint8_t *)(payload + sizeof(uint32_t)), size - sizeof(uint32_t))) { + if (!ParseFromArray( + (const uint8_t *)(payload + sizeof(uint32_t)), + size - sizeof(uint32_t))) { DLOG(INFO) << "share ParseFromArray failed!"; return false; } - } else if ((version == BYTES_VERSION) && size == sizeof(ShareSiaBytesVersion)) { + } else if ( + (version == BYTES_VERSION) && size == sizeof(ShareSiaBytesVersion)) { - ShareSiaBytesVersion* share = (ShareSiaBytesVersion*) payload; + ShareSiaBytesVersion *share = (ShareSiaBytesVersion *)payload; if (share->checkSum() != share->checkSum_) { - DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_<< ", checkSum(): " << share->checkSum(); + DLOG(INFO) << "checkSum mismatched! checkSum_: " << share->checkSum_ + << ", checkSum(): " << share->checkSum(); return false; } @@ -236,16 +237,12 @@ class ShareSia : public sharebase::SiaMsg return true; } - uint32_t getsharelength() { - return IsInitialized() ? ByteSize() : 0; - } - + uint32_t getsharelength() { return IsInitialized() ? ByteSize() : 0; } }; -//static_assert(sizeof(ShareSia) == 80, "ShareBitcoin should be 80 bytes"); +// static_assert(sizeof(ShareSia) == 80, "ShareBitcoin should be 80 bytes"); -class StratumJobSia : public StratumJob -{ +class StratumJobSia : public StratumJob { public: uint32_t nTime_; string blockHashForMergedMining_; @@ -268,8 +265,12 @@ struct StratumTraitsSia { using JobDiffType = uint64_t; struct LocalJobType : public LocalJob { LocalJobType(uint64_t jobId, uint8_t shortJobId) - : LocalJob(jobId), shortJobId_(shortJobId), jobDifficulty_(0) {} - bool operator==(uint8_t shortJobId) const { return shortJobId_ == shortJobId; } + : LocalJob(jobId) + , shortJobId_(shortJobId) + , jobDifficulty_(0) {} + bool operator==(uint8_t shortJobId) const { + return shortJobId_ == shortJobId; + } uint8_t shortJobId_; uint64_t jobDifficulty_; }; diff --git a/src/simulator/SimulatorMain.cc b/src/simulator/SimulatorMain.cc index e5c4c93b3..e93ac9dbd 100644 --- a/src/simulator/SimulatorMain.cc +++ b/src/simulator/SimulatorMain.cc @@ -47,12 +47,13 @@ using namespace libconfig; void usage() { fprintf(stderr, BIN_VERSION_STRING("simulator")); - fprintf(stderr, "Usage:\tsimulator -c \"simulator.cfg\" [-l ]\n"); + fprintf( + stderr, "Usage:\tsimulator -c \"simulator.cfg\" [-l ]\n"); } int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -61,15 +62,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -82,25 +84,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("simulator"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -112,7 +113,7 @@ int main(int argc, char **argv) { }*/ // ignore SIGPIPE, avoiding process be killed - signal(SIGPIPE, SIG_IGN); + signal(SIGPIPE, SIG_IGN); try { int32_t port = 3333; @@ -132,15 +133,16 @@ int main(int argc, char **argv) { StratumClient::registerFactory("ETH"); // new StratumClientWrapper - auto wrapper = boost::make_unique(cfg.lookup("simulator.ss_ip").c_str(), - (unsigned short)port, numConns, - cfg.lookup("simulator.username"), - cfg.lookup("simulator.minername_prefix"), - passwd, - cfg.lookup("simulator.type")); + auto wrapper = boost::make_unique( + cfg.lookup("simulator.ss_ip").c_str(), + (unsigned short)port, + numConns, + cfg.lookup("simulator.username"), + cfg.lookup("simulator.minername_prefix"), + passwd, + cfg.lookup("simulator.type")); wrapper->run(); - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/slparser/ShareLogParserMain.cc b/src/slparser/ShareLogParserMain.cc index 99d60a0c5..7763cacc4 100644 --- a/src/slparser/ShareLogParserMain.cc +++ b/src/slparser/ShareLogParserMain.cc @@ -69,99 +69,148 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("slparser")); fprintf(stderr, "Usage:\n\tslparser -c \"slparser.cfg\" -l \"log_dir\"\n"); - fprintf(stderr, "\tslparser -c \"slparser.cfg\" -l \"log_dir2\" -d \"20160830\"\n"); - fprintf(stderr, "\tslparser -c \"slparser.cfg\" -l \"log_dir3\" -d \"20160830\" -u \"puid(0: dump all, >0: someone's)\"\n"); + fprintf( + stderr, + "\tslparser -c \"slparser.cfg\" -l \"log_dir2\" -d \"20160830\"\n"); + fprintf( + stderr, + "\tslparser -c \"slparser.cfg\" -l \"log_dir3\" -d \"20160830\" -u " + "\"puid(0: dump all, >0: someone's)\"\n"); } -std::shared_ptr newShareLogDumper(const string &chainType, const string &dataDir, - time_t timestamp, const std::set &uids) -{ +std::shared_ptr newShareLogDumper( + const string &chainType, + const string &dataDir, + time_t timestamp, + const std::set &uids) { #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == chainType) -#else +#else if (false) #endif { - return std::make_shared(chainType.c_str(), dataDir, timestamp, uids); - } - else if (chainType == "ETH") { - return std::make_shared(chainType.c_str(), dataDir, timestamp, uids); - } - else if (chainType == "BTM") { - return std::make_shared(chainType.c_str(), dataDir, timestamp, uids); - } - else if (chainType == "DCR") { - return std::make_shared(chainType.c_str(), dataDir, timestamp, uids); - } - else { + return std::make_shared( + chainType.c_str(), dataDir, timestamp, uids); + } else if (chainType == "ETH") { + return std::make_shared( + chainType.c_str(), dataDir, timestamp, uids); + } else if (chainType == "BTM") { + return std::make_shared( + chainType.c_str(), dataDir, timestamp, uids); + } else if (chainType == "DCR") { + return std::make_shared( + chainType.c_str(), dataDir, timestamp, uids); + } else { LOG(FATAL) << "newShareLogDumper: unknown chain type " << chainType; return nullptr; } } -std::shared_ptr newShareLogParser(const string &chainType, const string &dataDir, - time_t timestamp, const MysqlConnectInfo &poolDBInfo, - const int dupShareTrackingHeight) -{ +std::shared_ptr newShareLogParser( + const string &chainType, + const string &dataDir, + time_t timestamp, + const MysqlConnectInfo &poolDBInfo, + const int dupShareTrackingHeight, + bool acceptStale) { #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == chainType) -#else +#else if (false) -#endif -{ - return std::make_shared(chainType.c_str(), dataDir, timestamp, poolDBInfo, nullptr); - } - else if (chainType == "ETH") { - return std::make_shared(chainType.c_str(), dataDir, timestamp, poolDBInfo, - std::make_shared(dupShareTrackingHeight)); - } - else if (chainType == "BTM") { - return std::make_shared(chainType.c_str(), dataDir, timestamp, poolDBInfo, - std::make_shared(dupShareTrackingHeight)); - } - else if (chainType == "DCR") { - return std::make_shared(chainType.c_str(), dataDir, timestamp, poolDBInfo, nullptr); - } - else { +#endif + { + return std::make_shared( + chainType.c_str(), + dataDir, + timestamp, + poolDBInfo, + nullptr, + acceptStale); + } else if (chainType == "ETH") { + return std::make_shared( + chainType.c_str(), + dataDir, + timestamp, + poolDBInfo, + std::make_shared(dupShareTrackingHeight), + acceptStale); + } else if (chainType == "BTM") { + return std::make_shared( + chainType.c_str(), + dataDir, + timestamp, + poolDBInfo, + std::make_shared(dupShareTrackingHeight), + acceptStale); + } else if (chainType == "DCR") { + return std::make_shared( + chainType.c_str(), + dataDir, + timestamp, + poolDBInfo, + nullptr, + acceptStale); + } else { LOG(FATAL) << "newShareLogParser: unknown chain type " << chainType; return nullptr; } } -std::shared_ptr newShareLogParserServer(const string &chainType, const string &dataDir, - const string &httpdHost, unsigned short httpdPort, - const MysqlConnectInfo &poolDBInfo, - const uint32_t kFlushDBInterval, - const int dupShareTrackingHeight) -{ +std::shared_ptr newShareLogParserServer( + const string &chainType, + const string &dataDir, + const string &httpdHost, + unsigned short httpdPort, + const MysqlConnectInfo &poolDBInfo, + const uint32_t kFlushDBInterval, + const int dupShareTrackingHeight, + bool acceptStale) { #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == chainType) -#else +#else if (false) -#endif +#endif { - return std::make_shared(chainType.c_str(), dataDir, - httpdHost, httpdPort, - poolDBInfo, kFlushDBInterval, nullptr); - } - else if (chainType == "ETH") { - return std::make_shared(chainType.c_str(), dataDir, - httpdHost, httpdPort, - poolDBInfo, kFlushDBInterval, - std::make_shared(dupShareTrackingHeight)); - } - else if (chainType == "BTM") { - return std::make_shared(chainType.c_str(), dataDir, - httpdHost, httpdPort, - poolDBInfo, kFlushDBInterval, - std::make_shared(dupShareTrackingHeight)); - } - else if (chainType == "DCR") { - return std::make_shared(chainType.c_str(), dataDir, - httpdHost, httpdPort, - poolDBInfo, kFlushDBInterval, nullptr); - } - else { + return std::make_shared( + chainType.c_str(), + dataDir, + httpdHost, + httpdPort, + poolDBInfo, + kFlushDBInterval, + nullptr, + acceptStale); + } else if (chainType == "ETH") { + return std::make_shared( + chainType.c_str(), + dataDir, + httpdHost, + httpdPort, + poolDBInfo, + kFlushDBInterval, + std::make_shared(dupShareTrackingHeight), + acceptStale); + } else if (chainType == "BTM") { + return std::make_shared( + chainType.c_str(), + dataDir, + httpdHost, + httpdPort, + poolDBInfo, + kFlushDBInterval, + std::make_shared(dupShareTrackingHeight), + acceptStale); + } else if (chainType == "DCR") { + return std::make_shared( + chainType.c_str(), + dataDir, + httpdHost, + httpdPort, + poolDBInfo, + kFlushDBInterval, + nullptr, + acceptStale); + } else { LOG(FATAL) << "newShareLogParserServer: unknown chain type " << chainType; return nullptr; } @@ -169,9 +218,9 @@ std::shared_ptr newShareLogParserServer(const string &chai int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int32_t optDate = 0; - int32_t optPUID = -1; // pool user id + int32_t optPUID = -1; // pool user id int c; if (argc <= 1) { @@ -180,21 +229,22 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:d:u:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'd': - optDate = atoi(optarg); - break; - case 'u': - optPUID = atoi(optarg); - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'd': + optDate = atoi(optarg); + break; + case 'u': + optPUID = atoi(optarg); + break; + case 'h': + default: + usage(); + exit(0); } } @@ -207,25 +257,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("slparser"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } try { @@ -244,24 +293,29 @@ int main(int argc, char **argv) { { int32_t poolDBPort = 3306; cfg.lookupValue("pooldb.port", poolDBPort); - poolDBInfo = new MysqlConnectInfo(cfg.lookup("pooldb.host"), poolDBPort, - cfg.lookup("pooldb.username"), - cfg.lookup("pooldb.password"), - cfg.lookup("pooldb.dbname")); + poolDBInfo = new MysqlConnectInfo( + cfg.lookup("pooldb.host"), + poolDBPort, + cfg.lookup("pooldb.username"), + cfg.lookup("pooldb.password"), + cfg.lookup("pooldb.dbname")); } // chain type string chainType = cfg.lookup("sharelog.chain_type"); + + // whether stale shares are accepted + bool acceptStale = false; + cfg.lookupValue("sharelog.accept_stale", acceptStale); + // Track duplicate shares within N blocks. int32_t dupShareTrackingHeight = 3; - cfg.lookupValue("dup_share_checker.tracking_height_number", dupShareTrackingHeight); - - // The hard fork Constantinople of Ethereum mainnet has been delayed. - // So set a default height that won't arrive (9999999). - // The user can change the height in the configuration file - // after the fork height is determined. + cfg.lookupValue( + "dup_share_checker.tracking_height_number", dupShareTrackingHeight); + + // The hard fork Constantinople of Ethereum mainnet if (chainType == "ETH") { - int constantinopleHeight = 9999999; + int constantinopleHeight = 7280000; cfg.lookupValue("sharelog.constantinople_height", constantinopleHeight); EthConsensus::setHardForkConstantinopleHeight(constantinopleHeight); } @@ -270,15 +324,18 @@ int main(int argc, char **argv) { // dump shares to stdout ////////////////////////////////////////////////////////////////////////////// if (optDate != 0 && optPUID != -1) { - const string tsStr = Strings::Format("%04d-%02d-%02d 00:00:00", - optDate/10000, - optDate/100 % 100, optDate % 100); + const string tsStr = Strings::Format( + "%04d-%02d-%02d 00:00:00", + optDate / 10000, + optDate / 100 % 100, + optDate % 100); const time_t ts = str2time(tsStr.c_str(), "%F %T"); std::set uids; if (optPUID > 0) - uids.insert(optPUID); + uids.insert(optPUID); - std::shared_ptr sldumper = newShareLogDumper(chainType, cfg.lookup("sharelog.data_dir"), ts, uids); + std::shared_ptr sldumper = newShareLogDumper( + chainType, cfg.lookup("sharelog.data_dir"), ts, uids); sldumper->dump2stdout(); google::ShutdownGoogleLogging(); @@ -289,15 +346,20 @@ int main(int argc, char **argv) { // re-run someday's share bin log ////////////////////////////////////////////////////////////////////////////// if (optDate != 0) { - const string tsStr = Strings::Format("%04d-%02d-%02d 00:00:00", - optDate/10000, - optDate/100 % 100, optDate % 100); + const string tsStr = Strings::Format( + "%04d-%02d-%02d 00:00:00", + optDate / 10000, + optDate / 100 % 100, + optDate % 100); const time_t ts = str2time(tsStr.c_str(), "%F %T"); - std::shared_ptr slparser = newShareLogParser(chainType, - cfg.lookup("sharelog.data_dir"), - ts, *poolDBInfo, - dupShareTrackingHeight); + std::shared_ptr slparser = newShareLogParser( + chainType, + cfg.lookup("sharelog.data_dir"), + ts, + *poolDBInfo, + dupShareTrackingHeight, + acceptStale); do { if (slparser->init() == false) { LOG(ERROR) << "init failure"; @@ -328,21 +390,23 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); int32_t port = 8081; cfg.lookupValue("slparserhttpd.port", port); uint32_t kFlushDBInterval = 20; cfg.lookupValue("slparserhttpd.flush_db_interval", kFlushDBInterval); - gShareLogParserServer = newShareLogParserServer(chainType, - cfg.lookup("sharelog.data_dir"), - cfg.lookup("slparserhttpd.ip"), - port, *poolDBInfo, - kFlushDBInterval, - dupShareTrackingHeight); + gShareLogParserServer = newShareLogParserServer( + chainType, + cfg.lookup("sharelog.data_dir"), + cfg.lookup("slparserhttpd.ip"), + port, + *poolDBInfo, + kFlushDBInterval, + dupShareTrackingHeight, + acceptStale); gShareLogParserServer->run(); - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/slparser/slparser.cfg b/src/slparser/slparser.cfg index b8f6ee70b..dff0c9c72 100644 --- a/src/slparser/slparser.cfg +++ b/src/slparser/slparser.cfg @@ -22,11 +22,11 @@ sharelog = { chain_type = "ETH"; data_dir = "/work/btcpool/data/sharelog"; - # The hard fork Constantinople of Ethereum mainnet has been delayed. - # So set a default height that won't arrive (9999999). - # The user can change the height in the configuration file - # after the fork height is determined. - constantinople_height = 9999999; + # Whether stale shares are accepted + accept_stale = false; + + # The hard fork Constantinople of Ethereum mainnet + constantinople_height = 7280000; }; # Used to detect duplicate share attacks on ETH mining. diff --git a/src/sserver/StratumServerMain.cc b/src/sserver/StratumServerMain.cc index 12a226a8b..66300c83b 100644 --- a/src/sserver/StratumServerMain.cc +++ b/src/sserver/StratumServerMain.cc @@ -49,10 +49,8 @@ using namespace libconfig; StratumServer *gStratumServer = nullptr; -void handler(int sig) -{ - if (gStratumServer) - { +void handler(int sig) { + if (gStratumServer) { gStratumServer->stop(); } } @@ -62,21 +60,17 @@ void usage() { fprintf(stderr, "Usage:\tsserver -c \"sserver.cfg\" [-l ]\n"); } -int main(int argc, char **argv) -{ +int main(int argc, char **argv) { char *optLogDir = NULL; char *optConf = NULL; int c; - if (argc <= 1) - { + if (argc <= 1) { usage(); return 1; } - while ((c = getopt(argc, argv, "c:l:h")) != -1) - { - switch (c) - { + while ((c = getopt(argc, argv, "c:l:h")) != -1) { + switch (c) { case 'c': optConf = optarg; break; @@ -100,25 +94,20 @@ int main(int argc, char **argv) // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("sserver"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } - catch (const FileIOException &fioex) - { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; return (EXIT_FAILURE); - } - catch (const ParseException &pex) - { + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() << " - " << pex.getError() << std::endl; return (EXIT_FAILURE); @@ -135,9 +124,10 @@ int main(int argc, char **argv) signal(SIGTERM, handler); signal(SIGINT, handler); + // ignore SIGPIPE, avoiding process be killed + signal(SIGPIPE, SIG_IGN); - try - { + try { // check if we are using testnet3 bool isTestnet3 = true; cfg.lookupValue("testnet", isTestnet3); @@ -154,20 +144,19 @@ int main(int argc, char **argv) cfg.lookupValue("sserver.port", port); cfg.lookupValue("sserver.id", serverId); - if (serverId > 0xFFu || serverId == 0) - { + if (serverId > 0xFFu || serverId == 0) { LOG(FATAL) << "invalid server id, range: [1, 255]"; return (EXIT_FAILURE); } - if (cfg.exists("sserver.share_avg_seconds")) - { + if (cfg.exists("sserver.share_avg_seconds")) { cfg.lookupValue("sserver.share_avg_seconds", shareAvgSeconds); } bool isEnableSimulator = false; cfg.lookupValue("sserver.enable_simulator", isEnableSimulator); bool isSubmitInvalidBlock = false; - cfg.lookupValue("sserver.enable_submit_invalid_block", isSubmitInvalidBlock); + cfg.lookupValue( + "sserver.enable_submit_invalid_block", isSubmitInvalidBlock); bool isDevModeEnabled = false; cfg.lookupValue("sserver.enable_dev_mode", isDevModeEnabled); @@ -193,59 +182,61 @@ int main(int argc, char **argv) uint32_t diffAdjustPeriod = 300; cfg.lookupValue("sserver.diff_adjust_period", diffAdjustPeriod); - if (0 == defaultDifficulty || - 0 == maxDifficulty || - 0 == minDifficulty || - 0 == diffAdjustPeriod) - { - LOG(FATAL) << "difficulty settings are not expected: def=" << defaultDifficulty << ", min=" << minDifficulty << ", max=" << maxDifficulty << ", adjustPeriod=" << diffAdjustPeriod; + if (0 == defaultDifficulty || 0 == maxDifficulty || 0 == minDifficulty || + 0 == diffAdjustPeriod) { + LOG(FATAL) << "difficulty settings are not expected: def=" + << defaultDifficulty << ", min=" << minDifficulty + << ", max=" << maxDifficulty + << ", adjustPeriod=" << diffAdjustPeriod; return 1; } if ((int32_t)diffAdjustPeriod < (int32_t)shareAvgSeconds) { - LOG(FATAL) << "`diff_adjust_period` should not less than `share_avg_seconds`"; + LOG(FATAL) + << "`diff_adjust_period` should not less than `share_avg_seconds`"; return 1; } - shared_ptr dc = make_shared(defaultDifficulty, maxDifficulty, minDifficulty, shareAvgSeconds, diffAdjustPeriod); + shared_ptr dc = make_shared( + defaultDifficulty, + maxDifficulty, + minDifficulty, + shareAvgSeconds, + diffAdjustPeriod); evthread_use_pthreads(); // new StratumServer - gStratumServer = new StratumServer(cfg.lookup("sserver.ip").c_str(), - (unsigned short)port, - cfg.lookup("kafka.brokers").c_str(), - cfg.lookup("users.list_id_api_url"), - serverId, - fileLastMiningNotifyTime, - isEnableSimulator, - isSubmitInvalidBlock, - isDevModeEnabled, - devFixedDifficulty, - cfg.lookup("sserver.job_topic"), - maxJobDelay, - dc, - cfg.lookup("sserver.solved_share_topic"), - cfg.lookup("sserver.share_topic"), - cfg.lookup("sserver.common_events_topic")); - - if (!gStratumServer->createServer(cfg.lookup("sserver.type"), shareAvgSeconds, cfg)) - { + gStratumServer = new StratumServer( + cfg.lookup("sserver.ip").c_str(), + (unsigned short)port, + cfg.lookup("kafka.brokers").c_str(), + cfg.lookup("users.list_id_api_url"), + serverId, + fileLastMiningNotifyTime, + isEnableSimulator, + isSubmitInvalidBlock, + isDevModeEnabled, + devFixedDifficulty, + cfg.lookup("sserver.job_topic"), + maxJobDelay, + dc, + cfg.lookup("sserver.solved_share_topic"), + cfg.lookup("sserver.share_topic"), + cfg.lookup("sserver.common_events_topic")); + + if (!gStratumServer->createServer( + cfg.lookup("sserver.type"), shareAvgSeconds, cfg)) { LOG(FATAL) << "createServer failed"; return 1; } - if (!gStratumServer->init()) - { + if (!gStratumServer->init()) { LOG(FATAL) << "init failure"; - } - else - { + } else { gStratumServer->run(); } delete gStratumServer; - } - catch (std::exception &e) - { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/statshttpd/StatsHttpdMain.cc b/src/statshttpd/StatsHttpdMain.cc index 591e9c79f..8ce33c3bc 100644 --- a/src/statshttpd/StatsHttpdMain.cc +++ b/src/statshttpd/StatsHttpdMain.cc @@ -65,54 +65,107 @@ void handler(int sig) { void usage() { fprintf(stderr, BIN_VERSION_STRING("statshttpd")); - fprintf(stderr, "Usage:\tstatshttpd -c \"statshttpd.cfg\" [-l ]\n"); + fprintf( + stderr, + "Usage:\tstatshttpd -c \"statshttpd.cfg\" [-l ]\n"); } -std::shared_ptr newStatsServer(const string &chainType, const char *kafkaBrokers, - const char *kafkaShareTopic, const char *kafkaCommonEventsTopic, - const string &httpdHost, unsigned short httpdPort, - const MysqlConnectInfo *poolDBInfo, const RedisConnectInfo *redisInfo, - const uint32_t redisConcurrency, const string &redisKeyPrefix, const int redisKeyExpire, - const int redisPublishPolicy, const int redisIndexPolicy, - const time_t kFlushDBInterval, const string &fileLastFlushTime, - const int dupShareTrackingHeight) -{ +std::shared_ptr newStatsServer( + const string &chainType, + const char *kafkaBrokers, + const char *kafkaShareTopic, + const char *kafkaCommonEventsTopic, + const string &httpdHost, + unsigned short httpdPort, + const MysqlConnectInfo *poolDBInfo, + const RedisConnectInfo *redisInfo, + const uint32_t redisConcurrency, + const string &redisKeyPrefix, + const int redisKeyExpire, + const int redisPublishPolicy, + const int redisIndexPolicy, + const time_t kFlushDBInterval, + const string &fileLastFlushTime, + const int dupShareTrackingHeight, + bool acceptStale) { #if defined(CHAIN_TYPE_STR) if (CHAIN_TYPE_STR == chainType) -#else +#else if (false) -#endif +#endif { - return std::make_shared(kafkaBrokers, kafkaShareTopic, kafkaCommonEventsTopic, - httpdHost, httpdPort, poolDBInfo, redisInfo, - redisConcurrency, redisKeyPrefix, redisKeyExpire, - redisPublishPolicy, redisIndexPolicy, - kFlushDBInterval, fileLastFlushTime, nullptr); - } - else if (chainType == "ETH") { - return std::make_shared(kafkaBrokers, kafkaShareTopic, kafkaCommonEventsTopic, - httpdHost, httpdPort, poolDBInfo, redisInfo, - redisConcurrency, redisKeyPrefix, redisKeyExpire, - redisPublishPolicy, redisIndexPolicy, - kFlushDBInterval, fileLastFlushTime, - std::make_shared(dupShareTrackingHeight)); - } - else if (chainType == "BTM") { - return std::make_shared(kafkaBrokers, kafkaShareTopic, kafkaCommonEventsTopic, - httpdHost, httpdPort, poolDBInfo, redisInfo, - redisConcurrency, redisKeyPrefix, redisKeyExpire, - redisPublishPolicy, redisIndexPolicy, - kFlushDBInterval, fileLastFlushTime, - std::make_shared(dupShareTrackingHeight)); - } - else if (chainType == "DCR") { - return std::make_shared(kafkaBrokers, kafkaShareTopic, kafkaCommonEventsTopic, - httpdHost, httpdPort, poolDBInfo, redisInfo, - redisConcurrency, redisKeyPrefix, redisKeyExpire, - redisPublishPolicy, redisIndexPolicy, - kFlushDBInterval, fileLastFlushTime, nullptr); - } - else { + return std::make_shared( + kafkaBrokers, + kafkaShareTopic, + kafkaCommonEventsTopic, + httpdHost, + httpdPort, + poolDBInfo, + redisInfo, + redisConcurrency, + redisKeyPrefix, + redisKeyExpire, + redisPublishPolicy, + redisIndexPolicy, + kFlushDBInterval, + fileLastFlushTime, + nullptr, + acceptStale); + } else if (chainType == "ETH") { + return std::make_shared( + kafkaBrokers, + kafkaShareTopic, + kafkaCommonEventsTopic, + httpdHost, + httpdPort, + poolDBInfo, + redisInfo, + redisConcurrency, + redisKeyPrefix, + redisKeyExpire, + redisPublishPolicy, + redisIndexPolicy, + kFlushDBInterval, + fileLastFlushTime, + std::make_shared(dupShareTrackingHeight), + acceptStale); + } else if (chainType == "BTM") { + return std::make_shared( + kafkaBrokers, + kafkaShareTopic, + kafkaCommonEventsTopic, + httpdHost, + httpdPort, + poolDBInfo, + redisInfo, + redisConcurrency, + redisKeyPrefix, + redisKeyExpire, + redisPublishPolicy, + redisIndexPolicy, + kFlushDBInterval, + fileLastFlushTime, + std::make_shared(dupShareTrackingHeight), + acceptStale); + } else if (chainType == "DCR") { + return std::make_shared( + kafkaBrokers, + kafkaShareTopic, + kafkaCommonEventsTopic, + httpdHost, + httpdPort, + poolDBInfo, + redisInfo, + redisConcurrency, + redisKeyPrefix, + redisKeyExpire, + redisPublishPolicy, + redisIndexPolicy, + kFlushDBInterval, + fileLastFlushTime, + nullptr, + acceptStale); + } else { LOG(FATAL) << "newStatsServer: unknown chain type " << chainType; return nullptr; } @@ -120,7 +173,7 @@ std::shared_ptr newStatsServer(const string &chainType, const char int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -129,15 +182,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -150,25 +204,24 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; LOG(INFO) << BIN_VERSION_STRING("statshttpd"); // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -180,7 +233,7 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); try { bool useMysql = true; @@ -192,10 +245,12 @@ int main(int argc, char **argv) { if (useMysql) { int32_t poolDBPort = 3306; cfg.lookupValue("pooldb.port", poolDBPort); - poolDBInfo = new MysqlConnectInfo(cfg.lookup("pooldb.host"), poolDBPort, - cfg.lookup("pooldb.username"), - cfg.lookup("pooldb.password"), - cfg.lookup("pooldb.dbname")); + poolDBInfo = new MysqlConnectInfo( + cfg.lookup("pooldb.host"), + poolDBPort, + cfg.lookup("pooldb.username"), + cfg.lookup("pooldb.password"), + cfg.lookup("pooldb.dbname")); } RedisConnectInfo *redisInfo = nullptr; @@ -208,7 +263,8 @@ int main(int argc, char **argv) { if (useRedis) { int32_t redisPort = 6379; cfg.lookupValue("redis.port", redisPort); - redisInfo = new RedisConnectInfo(cfg.lookup("redis.host"), redisPort, cfg.lookup("redis.password")); + redisInfo = new RedisConnectInfo( + cfg.lookup("redis.host"), redisPort, cfg.lookup("redis.password")); cfg.lookupValue("redis.key_prefix", redisKeyPrefix); cfg.lookupValue("redis.key_expire", redisKeyExpire); @@ -216,31 +272,41 @@ int main(int argc, char **argv) { cfg.lookupValue("redis.index_policy", redisIndexPolicy); cfg.lookupValue("redis.concurrency", redisConcurrency); } - + string fileLastFlushTime; int32_t port = 8080; int32_t flushInterval = 20; + bool acceptStale = false; int32_t dupShareTrackingHeight = 3; cfg.lookupValue("statshttpd.port", port); cfg.lookupValue("statshttpd.flush_db_interval", flushInterval); cfg.lookupValue("statshttpd.file_last_flush_time", fileLastFlushTime); - cfg.lookupValue("dup_share_checker.tracking_height_number", dupShareTrackingHeight); - gStatsServer = newStatsServer(cfg.lookup("statshttpd.chain_type"), - cfg.lookup("kafka.brokers").c_str(), - cfg.lookup("statshttpd.share_topic").c_str(), - cfg.lookup("statshttpd.common_events_topic").c_str(), - cfg.lookup("statshttpd.ip").c_str(), - (unsigned short)port, poolDBInfo, - redisInfo, redisConcurrency, redisKeyPrefix, - redisKeyExpire, redisPublishPolicy, redisIndexPolicy, - (time_t)flushInterval, fileLastFlushTime, - dupShareTrackingHeight); + cfg.lookupValue("statshttpd.accept_stale", acceptStale); + cfg.lookupValue( + "dup_share_checker.tracking_height_number", dupShareTrackingHeight); + gStatsServer = newStatsServer( + cfg.lookup("statshttpd.chain_type"), + cfg.lookup("kafka.brokers").c_str(), + cfg.lookup("statshttpd.share_topic").c_str(), + cfg.lookup("statshttpd.common_events_topic").c_str(), + cfg.lookup("statshttpd.ip").c_str(), + (unsigned short)port, + poolDBInfo, + redisInfo, + redisConcurrency, + redisKeyPrefix, + redisKeyExpire, + redisPublishPolicy, + redisIndexPolicy, + (time_t)flushInterval, + fileLastFlushTime, + dupShareTrackingHeight, + acceptStale); if (gStatsServer->init()) { - gStatsServer->run(); + gStatsServer->run(); } - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/src/statshttpd/statshttpd.cfg b/src/statshttpd/statshttpd.cfg index b41a62a55..dab7994c8 100644 --- a/src/statshttpd/statshttpd.cfg +++ b/src/statshttpd/statshttpd.cfg @@ -32,6 +32,9 @@ statshttpd = { use_mysql = true; # write mining workers' info to redis use_redis = false; + + # Whether stale shares are accepted + accept_stale = false; }; # Used to detect duplicate share attacks on ETH mining. diff --git a/test/TestBitcoinUtils.cc b/test/TestBitcoinUtils.cc index ce649ac1b..7b28cf1c6 100644 --- a/test/TestBitcoinUtils.cc +++ b/test/TestBitcoinUtils.cc @@ -27,14 +27,14 @@ #include "bitcoin/BitcoinUtils.h" - -//////////////////////////////// Block Rewards ///////////////////////////////// +//////////////////////////////// Block Rewards +//////////////////////////////////// TEST(BitcoinUtils, GetBlockReward) { // using mainnet SelectParams(CBaseChainParams::MAIN); auto consensus = Params().GetConsensus(); int64_t reward = 0; - + reward = GetBlockReward(1, consensus); ASSERT_EQ(reward, 5000000000); // 50 BTC @@ -60,73 +60,73 @@ TEST(BitcoinUtils, GetBlockReward) { ASSERT_EQ(reward, 1250000000); // 12.5 BTC reward = GetBlockReward(629999, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 1250000000); // 12.5 BTC - #else - ASSERT_EQ(reward, 100000000); // 1 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 1250000000); // 12.5 BTC +#else + ASSERT_EQ(reward, 100000000); // 1 UBTC +#endif reward = GetBlockReward(630000, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 625000000); // 6.25 BTC - #else - ASSERT_EQ(reward, 100000000); // 1 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 625000000); // 6.25 BTC +#else + ASSERT_EQ(reward, 100000000); // 1 UBTC +#endif reward = GetBlockReward(700000, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 625000000); // 6.25 BTC - #else - ASSERT_EQ(reward, 100000000); // 1 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 625000000); // 6.25 BTC +#else + ASSERT_EQ(reward, 100000000); // 1 UBTC +#endif reward = GetBlockReward(5000000, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 596); // 596 satoshi - #else - ASSERT_EQ(reward, 25000000); // 0.25 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 596); // 596 satoshi +#else + ASSERT_EQ(reward, 25000000); // 0.25 UBTC +#endif reward = GetBlockReward(6719999, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 2); // 2 satoshi - #else - ASSERT_EQ(reward, 12500000); // 0.125 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 2); // 2 satoshi +#else + ASSERT_EQ(reward, 12500000); // 0.125 UBTC +#endif // The 32th halvings. reward = GetBlockReward(6720000, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 1); // 1 satoshi - #else - ASSERT_EQ(reward, 12500000); // 0.125 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 1); // 1 satoshi +#else + ASSERT_EQ(reward, 12500000); // 0.125 UBTC +#endif reward = GetBlockReward(6929999, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 1); // 1 satoshi - #else - ASSERT_EQ(reward, 12500000); // 0.125 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 1); // 1 satoshi +#else + ASSERT_EQ(reward, 12500000); // 0.125 UBTC +#endif // The 33th and the lastest halvings. reward = GetBlockReward(6930000, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 0); // 0 satoshi - #else - ASSERT_EQ(reward, 12500000); // 0.125 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 0); // 0 satoshi +#else + ASSERT_EQ(reward, 12500000); // 0.125 UBTC +#endif // The 63th halvings (in fact does not exist). // Detects if the calculation method is affected by the int64_t sign bit. // If the method is affected by the sign bit, -2 may be returned. reward = GetBlockReward(13300000, consensus); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ(reward, 0); // 0 satoshi - #else - ASSERT_EQ(reward, 1562500); // 0.015625 UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ(reward, 0); // 0 satoshi +#else + ASSERT_EQ(reward, 1562500); // 0.015625 UBTC +#endif reward = GetBlockReward(70000000, consensus); - ASSERT_EQ(reward, 0); // 0 satoshi + ASSERT_EQ(reward, 0); // 0 satoshi } diff --git a/test/TestCommon.cc b/test/TestCommon.cc index 980cb6514..352dd5978 100644 --- a/test/TestCommon.cc +++ b/test/TestCommon.cc @@ -37,41 +37,45 @@ TEST(Common, score2Str) { // 10e-25 - ASSERT_EQ(score2Str(0.0000000000000000000000001), "0.0000000000000000000000001"); - ASSERT_EQ(score2Str(0.0000000000000000000000009), "0.0000000000000000000000009"); - ASSERT_EQ(score2Str(0.000000000000000000000001), "0.0000000000000000000000010"); - ASSERT_EQ(score2Str(0.00000000000000000000001), "0.0000000000000000000000100"); - ASSERT_EQ(score2Str(0.0000000000000000000001), "0.0000000000000000000001000"); + ASSERT_EQ( + score2Str(0.0000000000000000000000001), "0.0000000000000000000000001"); + ASSERT_EQ( + score2Str(0.0000000000000000000000009), "0.0000000000000000000000009"); + ASSERT_EQ( + score2Str(0.000000000000000000000001), "0.0000000000000000000000010"); + ASSERT_EQ( + score2Str(0.00000000000000000000001), "0.0000000000000000000000100"); + ASSERT_EQ(score2Str(0.0000000000000000000001), "0.0000000000000000000001000"); ASSERT_EQ(score2Str(0.000000000000000000001), "0.0000000000000000000010000"); - ASSERT_EQ(score2Str(0.00000000000000000001), "0.0000000000000000000100000"); - ASSERT_EQ(score2Str(0.0000000000000000001), "0.0000000000000000001000000"); - ASSERT_EQ(score2Str(0.000000000000000001), "0.0000000000000000010000000"); + ASSERT_EQ(score2Str(0.00000000000000000001), "0.0000000000000000000100000"); + ASSERT_EQ(score2Str(0.0000000000000000001), "0.0000000000000000001000000"); + ASSERT_EQ(score2Str(0.000000000000000001), "0.0000000000000000010000000"); ASSERT_EQ(score2Str(0.00000000000000001), "0.0000000000000000100000000"); - ASSERT_EQ(score2Str(0.0000000000000001), "0.0000000000000001000000000"); - ASSERT_EQ(score2Str(0.000000000000001), "0.0000000000000010000000000"); - ASSERT_EQ(score2Str(0.00000000000001), "0.0000000000000100000000000"); + ASSERT_EQ(score2Str(0.0000000000000001), "0.0000000000000001000000000"); + ASSERT_EQ(score2Str(0.000000000000001), "0.0000000000000010000000000"); + ASSERT_EQ(score2Str(0.00000000000001), "0.0000000000000100000000000"); ASSERT_EQ(score2Str(0.0000000000001), "0.0000000000001000000000000"); - ASSERT_EQ(score2Str(0.000000000001), "0.0000000000010000000000000"); - ASSERT_EQ(score2Str(0.00000000001), "0.0000000000100000000000000"); + ASSERT_EQ(score2Str(0.000000000001), "0.0000000000010000000000000"); + ASSERT_EQ(score2Str(0.00000000001), "0.0000000000100000000000000"); - ASSERT_EQ(score2Str(0.0000000001), "0.000000000100000000000000"); + ASSERT_EQ(score2Str(0.0000000001), "0.000000000100000000000000"); ASSERT_EQ(score2Str(0.000000001), "0.00000000100000000000000"); - ASSERT_EQ(score2Str(0.00000001), "0.0000000100000000000000"); - ASSERT_EQ(score2Str(0.0000001), "0.000000100000000000000"); - ASSERT_EQ(score2Str(0.000001), "0.00000100000000000000"); + ASSERT_EQ(score2Str(0.00000001), "0.0000000100000000000000"); + ASSERT_EQ(score2Str(0.0000001), "0.000000100000000000000"); + ASSERT_EQ(score2Str(0.000001), "0.00000100000000000000"); ASSERT_EQ(score2Str(0.00001), "0.0000100000000000000"); - ASSERT_EQ(score2Str(0.0001), "0.000100000000000000"); - ASSERT_EQ(score2Str(0.001), "0.00100000000000000"); - ASSERT_EQ(score2Str(0.01), "0.0100000000000000"); + ASSERT_EQ(score2Str(0.0001), "0.000100000000000000"); + ASSERT_EQ(score2Str(0.001), "0.00100000000000000"); + ASSERT_EQ(score2Str(0.01), "0.0100000000000000"); ASSERT_EQ(score2Str(0.1), "0.100000000000000"); ASSERT_EQ(score2Str(1.0), "1.00000000000000"); - ASSERT_EQ(score2Str(10.0), "10.0000000000000"); - ASSERT_EQ(score2Str(100.0), "100.000000000000"); - ASSERT_EQ(score2Str(1000.0), "1000.00000000000"); + ASSERT_EQ(score2Str(10.0), "10.0000000000000"); + ASSERT_EQ(score2Str(100.0), "100.000000000000"); + ASSERT_EQ(score2Str(1000.0), "1000.00000000000"); ASSERT_EQ(score2Str(10000.0), "10000.0000000000"); - ASSERT_EQ(score2Str(100000.0), "100000.000000000"); - ASSERT_EQ(score2Str(1000000.0), "1000000.00000000"); - ASSERT_EQ(score2Str(10000000.0), "10000000.0000000"); + ASSERT_EQ(score2Str(100000.0), "100000.000000000"); + ASSERT_EQ(score2Str(1000000.0), "1000000.00000000"); + ASSERT_EQ(score2Str(10000000.0), "10000000.0000000"); ASSERT_EQ(score2Str(100000000.0), "100000000.000000"); ASSERT_EQ(score2Str(123412345678.0), "123412345678.00"); @@ -84,53 +88,78 @@ TEST(Common, BitsToTarget) { bits = 0x1b0404cb; BitsToTarget(bits, target); - ASSERT_EQ(target, uint256S("00000000000404CB000000000000000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "00000000000404CB000000000000000000000000000000000000000000000000")); } TEST(Common, DiffToTarget) { uint256 target, t2; DiffToTarget(1, target, false); DiffToTarget(1, t2, true); - ASSERT_EQ(target, uint256S("00000000ffff0000000000000000000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "00000000ffff0000000000000000000000000000000000000000000000000000")); ASSERT_EQ(target, t2); DiffToTarget(2, target, false); DiffToTarget(2, t2, true); - ASSERT_EQ(target, uint256S("000000007fff8000000000000000000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "000000007fff8000000000000000000000000000000000000000000000000000")); ASSERT_EQ(target, t2); DiffToTarget(1 << 10, target, false); DiffToTarget(1 << 10, t2, true); - ASSERT_EQ(target, uint256S("00000000003fffc0000000000000000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "00000000003fffc0000000000000000000000000000000000000000000000000")); ASSERT_EQ(target, t2); DiffToTarget(1 << 20, target, false); DiffToTarget(1 << 20, t2, true); - ASSERT_EQ(target, uint256S("0000000000000ffff00000000000000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "0000000000000ffff00000000000000000000000000000000000000000000000")); ASSERT_EQ(target, t2); DiffToTarget(1 << 30, target, false); DiffToTarget(1 << 30, t2, true); - ASSERT_EQ(target, uint256S("0000000000000003fffc00000000000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "0000000000000003fffc00000000000000000000000000000000000000000000")); ASSERT_EQ(target, t2); DiffToTarget(1ll << 63, target, false); DiffToTarget(1ll << 63, t2, true); - ASSERT_EQ(target, uint256S("000000000000000000000001fffe000000000000000000000000000000000000")); + ASSERT_EQ( + target, + uint256S( + "000000000000000000000001fffe000000000000000000000000000000000000")); ASSERT_EQ(target, t2); } TEST(Common, uint256) { uint256 u1, u2; - u1 = uint256S("00000000000000000392381eb1be66cd8ef9e2143a0e13488875b3e1649a3dc9"); - u2 = uint256S("00000000000000000392381eb1be66cd8ef9e2143a0e13488875b3e1649a3dc9"); + u1 = uint256S( + "00000000000000000392381eb1be66cd8ef9e2143a0e13488875b3e1649a3dc9"); + u2 = uint256S( + "00000000000000000392381eb1be66cd8ef9e2143a0e13488875b3e1649a3dc9"); ASSERT_EQ(UintToArith256(u1) == UintToArith256(u2), true); ASSERT_EQ(UintToArith256(u1) >= UintToArith256(u2), true); - ASSERT_EQ(UintToArith256(u1) < UintToArith256(u2), false); + ASSERT_EQ(UintToArith256(u1) < UintToArith256(u2), false); - u1 = uint256S("00000000000000000392381eb1be66cd8ef9e2143a0e13488875b3e1649a3dc9"); - u2 = uint256S("000000000000000000cc35a4f0ebd7b5c8165b28d73e6369f49098c1a632d1a9"); + u1 = uint256S( + "00000000000000000392381eb1be66cd8ef9e2143a0e13488875b3e1649a3dc9"); + u2 = uint256S( + "000000000000000000cc35a4f0ebd7b5c8165b28d73e6369f49098c1a632d1a9"); ASSERT_EQ(UintToArith256(u1) > UintToArith256(u2), true); } @@ -138,153 +167,156 @@ TEST(Common, TargetToDiff) { // 0x00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF / // 0x00000000000404CB000000000000000000000000000000000000000000000000 // = 16307.669773817162 (pdiff) - ASSERT_EQ(TargetToDiff("0x00000000000404CB000000000000000000000000000000000000000000000000"), 16307ULL); - -// uint256 t; -// DiffToTarget(pow(2, 0), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 1), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 2), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 3), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 4), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 5), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 6), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 7), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 8), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 9), t); -// printf("%s\n", t.ToString().c_str()); -// -// // 1024 -// DiffToTarget(pow(2, 10), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 11), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 12), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 13), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 14), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 15), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 16), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 17), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 18), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 19), t); -// printf("%s\n", t.ToString().c_str()); -// -// // 1,048,576 -// DiffToTarget(pow(2, 20), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 21), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 22), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 23), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 24), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 25), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 26), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 27), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 28), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 29), t); -// printf("%s\n", t.ToString().c_str()); -// -// // 1,073,741,824 -// DiffToTarget(pow(2, 30), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 31), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 32), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 33), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 34), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 35), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 36), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 37), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 38), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 39), t); -// printf("%s\n", t.ToString().c_str()); -// -// // 1,099,511,627,776 -// DiffToTarget(pow(2, 40), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 41), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 42), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 43), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 44), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 45), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 46), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 47), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 48), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 49), t); -// printf("%s\n", t.ToString().c_str()); -// -// DiffToTarget(pow(2, 50), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 51), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 52), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 53), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 54), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 55), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 56), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 57), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 58), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 59), t); -// printf("%s\n", t.ToString().c_str()); -// -// DiffToTarget(pow(2, 60), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 61), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 62), t); -// printf("%s\n", t.ToString().c_str()); -// DiffToTarget(pow(2, 63), t); -// printf("%s\n", t.ToString().c_str()); + ASSERT_EQ( + TargetToDiff( + "0x00000000000404CB000000000000000000000000000000000000000000000000"), + 16307ULL); + + // uint256 t; + // DiffToTarget(pow(2, 0), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 1), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 2), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 3), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 4), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 5), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 6), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 7), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 8), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 9), t); + // printf("%s\n", t.ToString().c_str()); + // + // // 1024 + // DiffToTarget(pow(2, 10), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 11), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 12), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 13), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 14), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 15), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 16), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 17), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 18), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 19), t); + // printf("%s\n", t.ToString().c_str()); + // + // // 1,048,576 + // DiffToTarget(pow(2, 20), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 21), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 22), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 23), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 24), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 25), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 26), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 27), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 28), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 29), t); + // printf("%s\n", t.ToString().c_str()); + // + // // 1,073,741,824 + // DiffToTarget(pow(2, 30), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 31), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 32), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 33), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 34), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 35), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 36), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 37), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 38), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 39), t); + // printf("%s\n", t.ToString().c_str()); + // + // // 1,099,511,627,776 + // DiffToTarget(pow(2, 40), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 41), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 42), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 43), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 44), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 45), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 46), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 47), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 48), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 49), t); + // printf("%s\n", t.ToString().c_str()); + // + // DiffToTarget(pow(2, 50), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 51), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 52), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 53), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 54), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 55), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 56), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 57), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 58), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 59), t); + // printf("%s\n", t.ToString().c_str()); + // + // DiffToTarget(pow(2, 60), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 61), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 62), t); + // printf("%s\n", t.ToString().c_str()); + // DiffToTarget(pow(2, 63), t); + // printf("%s\n", t.ToString().c_str()); } TEST(Common, BitsToDifficulty) { // 0x1b0404cb: https://en.bitcoin.it/wiki/Difficulty double d; - BitsToDifficulty(0x1b0404cbu, &d); // diff = 16307.420939 + BitsToDifficulty(0x1b0404cbu, &d); // diff = 16307.420939 ASSERT_EQ((uint64_t)(d * 10000.0), 163074209ull); } @@ -292,8 +324,9 @@ TEST(Common, formatDifficulty) { ASSERT_EQ(formatDifficulty(UINT64_MAX), 9223372036854775808ull); // 2^32 = UINT32_MAX + 1 - ASSERT_EQ(formatDifficulty(UINT32_MAX), (uint64_t)UINT32_MAX + 1); - ASSERT_EQ(formatDifficulty((uint64_t)UINT32_MAX + 1), (uint64_t)UINT32_MAX + 1); + ASSERT_EQ(formatDifficulty(UINT32_MAX), (uint64_t)UINT32_MAX + 1); + ASSERT_EQ( + formatDifficulty((uint64_t)UINT32_MAX + 1), (uint64_t)UINT32_MAX + 1); ASSERT_EQ(formatDifficulty(0), 1ULL); ASSERT_EQ(formatDifficulty(1), 1ULL); @@ -302,12 +335,20 @@ TEST(Common, formatDifficulty) { } TEST(Common, Eth_TargetToDifficulty) { - ASSERT_EQ(Eth_TargetToDifficulty("0x000000029794e0c85b08583ac96ea15f8b6f4d6bbcd1ee76326cd948d541eac3"), 0x62c2d313ull); - ASSERT_EQ(Eth_TargetToDifficulty( "000000029794e0c85b08583ac96ea15f8b6f4d6bbcd1ee76326cd948d541eac3"), 0x62c2d313ull); + ASSERT_EQ( + Eth_TargetToDifficulty( + "0x000000029794e0c85b08583ac96ea15f8b6f4d6bbcd1ee76326cd948d541eac3"), + 0x62c2d313ull); + ASSERT_EQ( + Eth_TargetToDifficulty( + "000000029794e0c85b08583ac96ea15f8b6f4d6bbcd1ee76326cd948d541eac3"), + 0x62c2d313ull); } TEST(Common, Eth_DifficultyToTarget) { - ASSERT_EQ(Eth_DifficultyToTarget(0x62c2d313ull), "000000029794e0c85b08583ac96ea15f8b6f4d6bbcd1ee76326cd948d541eac3"); + ASSERT_EQ( + Eth_DifficultyToTarget(0x62c2d313ull), + "000000029794e0c85b08583ac96ea15f8b6f4d6bbcd1ee76326cd948d541eac3"); } TEST(Common, Bytom_TargetCompactAndDifficulty) { @@ -334,5 +375,4 @@ TEST(Common, Bytom_TargetCompactAndDifficulty) { uint64_t diff = Bytom_TargetCompactToDifficulty(targetCompact); EXPECT_EQ(20UL, diff); } - } diff --git a/test/TestMain.cc b/test/TestMain.cc index 0c08d1d20..5d1191059 100644 --- a/test/TestMain.cc +++ b/test/TestMain.cc @@ -54,7 +54,7 @@ void handler(int sig) { } } -typedef char * CString; +typedef char *CString; int main(int argc, char **argv) { signal(SIGSEGV, handler); @@ -68,19 +68,17 @@ int main(int argc, char **argv) { FLAGS_colorlogtostderr = true; LOG(INFO) << BIN_VERSION_STRING("unittest"); - - CString * newArgv = new CString [argc]; + + CString *newArgv = new CString[argc]; memcpy(newArgv, argv, argc * sizeof(CString)); string testname = "--gtest_filter="; if (argc == 2 && newArgv[1][0] != '-') { testname.append(newArgv[1]); - newArgv[1] = (char*)testname.c_str(); + newArgv[1] = (char *)testname.c_str(); } testing::InitGoogleTest(&argc, newArgv); - + int ret = RUN_ALL_TESTS(); - delete [] newArgv; + delete[] newArgv; return ret; } - - diff --git a/test/TestSimulatorMiner.cc b/test/TestSimulatorMiner.cc index 8ef7fbee0..8b6bfd8fd 100644 --- a/test/TestSimulatorMiner.cc +++ b/test/TestSimulatorMiner.cc @@ -40,15 +40,14 @@ TEST(SIMULATOR, miner) { // config file name: simulator.cfg const char *conf = "simulator.cfg"; libconfig::Config cfg; - try - { + try { cfg.readFile(conf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file: " << conf << std::endl; return; - } catch(const ParseException &pex) { + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; + << " - " << pex.getError() << std::endl; return; } @@ -69,7 +68,9 @@ TEST(SIMULATOR, miner) { // req: mining.subscribe { - sbuf = "{\"id\":1,\"method\":\"mining.subscribe\",\"params\":[\"__simulator__/0.1\"]}\n"; + sbuf = + "{\"id\":1,\"method\":\"mining.subscribe\",\"params\":[\"__simulator__/" + "0.1\"]}\n"; // send 1 byte each time, this will trigger read events several times for (size_t i = 0; i < sbuf.size(); i++) { conn.send(sbuf.substr(i, 1)); @@ -83,7 +84,7 @@ TEST(SIMULATOR, miner) { conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); jresult = jnode["result"]; - jerror = jnode["error"]; + jerror = jnode["error"]; ASSERT_EQ(jerror.type(), Utilities::JS::type::Null); auto resArr = jresult.array(); @@ -99,15 +100,16 @@ TEST(SIMULATOR, miner) { // req: mining.authorize { - sbuf = Strings::Format("{\"id\": 1, \"method\": \"mining.authorize\"," - "\"params\": [\"\%s.simulator_test\", \"\"]}\n", - userName.c_str()); + sbuf = Strings::Format( + "{\"id\": 1, \"method\": \"mining.authorize\"," + "\"params\": [\"\%s.simulator_test\", \"\"]}\n", + userName.c_str()); conn.send(sbuf); conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); jresult = jnode["result"]; - jerror = jnode["error"]; + jerror = jnode["error"]; ASSERT_TRUE(jresult.boolean()); ASSERT_EQ(jerror.type(), Utilities::JS::type::Null); } @@ -142,18 +144,21 @@ TEST(SIMULATOR, miner) { // req: mining.submit { - sbuf = Strings::Format("{\"params\": [\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" - ",\"id\":4,\"method\": \"mining.submit\"}\n", - userName.c_str(), - latestJobId.c_str(), - extraNonce2, - nTime, nNonce); + sbuf = Strings::Format( + "{\"params\": " + "[\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" + ",\"id\":4,\"method\": \"mining.submit\"}\n", + userName.c_str(), + latestJobId.c_str(), + extraNonce2, + nTime, + nNonce); conn.send(sbuf); conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); jresult = jnode["result"]; - jerror = jnode["error"]; + jerror = jnode["error"]; ASSERT_TRUE(jresult.boolean()); ASSERT_TRUE(jerror.type() == Utilities::JS::type::Null); } @@ -161,12 +166,15 @@ TEST(SIMULATOR, miner) { // req: mining.submit, duplicate // {"id":4,"result":null,"error":(22,"Duplicate share",null)} { - sbuf = Strings::Format("{\"params\": [\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" - ",\"id\":4,\"method\": \"mining.submit\"}\n", - userName.c_str(), - latestJobId.c_str(), - extraNonce2, - nTime, nNonce); + sbuf = Strings::Format( + "{\"params\": " + "[\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" + ",\"id\":4,\"method\": \"mining.submit\"}\n", + userName.c_str(), + latestJobId.c_str(), + extraNonce2, + nTime, + nNonce); conn.send(sbuf); conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); @@ -177,12 +185,15 @@ TEST(SIMULATOR, miner) { // {"id":4,"result":null,"error":(31,"Time too old",null)} { nTime = time(nullptr) - 86400; - sbuf = Strings::Format("{\"params\": [\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" - ",\"id\":4,\"method\": \"mining.submit\"}\n", - userName.c_str(), - latestJobId.c_str(), - ++extraNonce2, - nTime, nNonce); + sbuf = Strings::Format( + "{\"params\": " + "[\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" + ",\"id\":4,\"method\": \"mining.submit\"}\n", + userName.c_str(), + latestJobId.c_str(), + ++extraNonce2, + nTime, + nNonce); conn.send(sbuf); conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); @@ -192,13 +203,16 @@ TEST(SIMULATOR, miner) { // req: mining.submit, time too new // {"id":4,"result":null,"error":(32,"Time too new",null)} { - nTime = time(nullptr) + 3600*2; - sbuf = Strings::Format("{\"params\": [\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" - ",\"id\":4,\"method\": \"mining.submit\"}\n", - userName.c_str(), - latestJobId.c_str(), - ++extraNonce2, - nTime, nNonce); + nTime = time(nullptr) + 3600 * 2; + sbuf = Strings::Format( + "{\"params\": " + "[\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" + ",\"id\":4,\"method\": \"mining.submit\"}\n", + userName.c_str(), + latestJobId.c_str(), + ++extraNonce2, + nTime, + nNonce); conn.send(sbuf); conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); @@ -209,19 +223,18 @@ TEST(SIMULATOR, miner) { // {"id":4,"result":null,"error":(21,"Job not found (=stale)",null)} { nTime = time(nullptr); - sbuf = Strings::Format("{\"params\": [\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" - ",\"id\":4,\"method\": \"mining.submit\"}\n", - userName.c_str(), - "s982asd2das", - ++extraNonce2, - nTime, nNonce); + sbuf = Strings::Format( + "{\"params\": " + "[\"%s.simulator_test\",\"%s\",\"%016llx\",\"%08x\",\"%08x\"]" + ",\"id\":4,\"method\": \"mining.submit\"}\n", + userName.c_str(), + "s982asd2das", + ++extraNonce2, + nTime, + nNonce); conn.send(sbuf); conn.getLine(line); ASSERT_TRUE(JsonNode::parse(line.data(), line.data() + line.size(), jnode)); ASSERT_TRUE(jnode["error"].type() != Utilities::JS::type::Null); } - } - - - diff --git a/test/TestStatistics.cc b/test/TestStatistics.cc index d75d07b8a..f7a21593e 100644 --- a/test/TestStatistics.cc +++ b/test/TestStatistics.cc @@ -51,7 +51,6 @@ TEST(StatsWindow, clear) { } } - TEST(StatsWindow, sum01) { int windowSize = 60; StatsWindow sw(windowSize); @@ -68,14 +67,14 @@ TEST(StatsWindow, sum01) { ASSERT_EQ(sw.sum(windowSize - 1, i), i * val); } - for (int i = windowSize; i < windowSize*2; i++) { + for (int i = windowSize; i < windowSize * 2; i++) { ASSERT_EQ(sw.sum(i, 1), 0); } - for (int i = windowSize; i < windowSize*2; i++) { + for (int i = windowSize; i < windowSize * 2; i++) { ASSERT_EQ(sw.sum(i, windowSize), (windowSize - (i % windowSize + 1)) * val); } - for (int i = windowSize*2; i < windowSize*3; i++) { + for (int i = windowSize * 2; i < windowSize * 3; i++) { ASSERT_EQ(sw.sum(i, windowSize), 0); } } @@ -96,19 +95,18 @@ TEST(StatsWindow, sum02) { ASSERT_EQ(sw.sum(windowSize - 1, i), i * val); } - for (int i = windowSize; i < windowSize*2; i++) { + for (int i = windowSize; i < windowSize * 2; i++) { ASSERT_EQ(sw.sum(i, 1), 0); } - for (int i = windowSize; i < windowSize*2; i++) { + for (int i = windowSize; i < windowSize * 2; i++) { ASSERT_EQ(sw.sum(i, windowSize), (windowSize - (i % windowSize + 1)) * val); } - for (int i = windowSize*2; i < windowSize*3; i++) { + for (int i = windowSize * 2; i < windowSize * 3; i++) { ASSERT_EQ(sw.sum(i, windowSize), 0); } } - TEST(StatsWindow, sum03) { StatsWindow sw(5); sw.insert(0, 1); @@ -143,26 +141,24 @@ TEST(StatsWindow, sum03) { ASSERT_EQ(sw.sum(8, 5), 35); } - TEST(StatsWindow, map) { int windowSize = 10; StatsWindow sw(windowSize); for (int i = 0; i < windowSize; i++) { sw.insert(i, i * 2); } - ASSERT_EQ(sw.sum(windowSize-1), sw.sum(windowSize-1, windowSize)); + ASSERT_EQ(sw.sum(windowSize - 1), sw.sum(windowSize - 1, windowSize)); - int64_t sum = sw.sum(windowSize-1, windowSize); + int64_t sum = sw.sum(windowSize - 1, windowSize); sw.mapDivide(2); - int64_t sum2 = sw.sum(windowSize-1, windowSize); - ASSERT_EQ(sum/2, sum2); + int64_t sum2 = sw.sum(windowSize - 1, windowSize); + ASSERT_EQ(sum / 2, sum2); sw.mapMultiply(2); - int64_t sum3 = sw.sum(windowSize-1, windowSize); + int64_t sum3 = sw.sum(windowSize - 1, windowSize); ASSERT_EQ(sum, sum3); } - //////////////////////////////// ShareStatsDay /////////////////////////////// TEST(ShareStatsDay, ShareStatsDay) { // using mainnet @@ -184,16 +180,16 @@ TEST(ShareStatsDay, ShareStatsDay) { share.set_blkbits(0x1d00ffffu); // accept - for (uint32_t i = 0; i < 24; i++) { // hour idx range: [0, 23] + for (uint32_t i = 0; i < 24; i++) { // hour idx range: [0, 23] share.set_sharediff(shareValue); - stats.processShare(i, share); + stats.processShare(i, share, false); } // reject share.set_status(StratumStatus::REJECT_NO_REASON); for (uint32_t i = 0; i < 24; i++) { share.set_sharediff(shareValue); - stats.processShare(i, share); + stats.processShare(i, share, false); } ShareStats ss; @@ -223,17 +219,17 @@ TEST(ShareStatsDay, ShareStatsDay) { share.set_blkbits(0x18050edcu); // accept - for (uint32_t i = 0; i < 24; i++) { // hour idx range: [0, 23] + for (uint32_t i = 0; i < 24; i++) { // hour idx range: [0, 23] share.set_sharediff(shareValue); - stats.processShare(i, share); -// LOG(INFO) << score2Str(share.score()); + stats.processShare(i, share, false); + // LOG(INFO) << score2Str(share.score()); } // reject share.set_status(StratumStatus::REJECT_NO_REASON); for (uint32_t i = 0; i < 24; i++) { share.set_sharediff(shareValue); - stats.processShare(i, share); + stats.processShare(i, share, false); } ShareStats ss; @@ -242,58 +238,59 @@ TEST(ShareStatsDay, ShareStatsDay) { ASSERT_EQ(ss.shareAccept_, shareValue); ASSERT_EQ(ss.shareReject_, shareValue); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ((uint64_t)ss.earn_, 24697859UL); // satoshi - #else - ASSERT_EQ((uint64_t)ss.earn_, 1975828UL); // satoshi, only for UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ((uint64_t)ss.earn_, 24697859UL); // satoshi +#else + ASSERT_EQ((uint64_t)ss.earn_, 1975828UL); // satoshi, only for UBTC +#endif } stats.getShareStatsDay(&ss); ASSERT_EQ(ss.shareAccept_, shareValue * 24); ASSERT_EQ(ss.shareReject_, shareValue * 24); - #ifndef CHAIN_TYPE_UBTC - ASSERT_EQ((uint64_t)ss.earn_, 592748626UL); // satoshi - #else - ASSERT_EQ((uint64_t)ss.earn_, 47419890UL); // satoshi, only for UBTC - #endif +#ifndef CHAIN_TYPE_UBTC + ASSERT_EQ((uint64_t)ss.earn_, 592748626UL); // satoshi +#else + ASSERT_EQ((uint64_t)ss.earn_, 47419890UL); // satoshi, only for UBTC +#endif } } //////////////////////////////// GlobalShare /////////////////////////////// TEST(GlobalShare, GlobalShareEth) { - ShareEth share1; - share1.set_headerhash(0x12345678); - share1.set_nonce(0x87654321); + ShareEth share1; + share1.set_headerhash(0x12345678); + share1.set_nonce(0x87654321); - ShareEth share2; - share2.set_headerhash(0x12345678); - share2.set_nonce(0x33333333); + ShareEth share2; + share2.set_headerhash(0x12345678); + share2.set_nonce(0x33333333); - ShareEth share3; - share3.set_headerhash(0x33333333); - share3.set_nonce(0x55555555); + ShareEth share3; + share3.set_headerhash(0x33333333); + share3.set_nonce(0x55555555); - GlobalShareEth a(share1); - GlobalShareEth b(share2); - GlobalShareEth c(share3); - GlobalShareEth d(share3); + GlobalShareEth a(share1); + GlobalShareEth b(share2); + GlobalShareEth c(share3); + GlobalShareEth d(share3); - ASSERT_EQ(a < a, false); + ASSERT_EQ(a < a, false); - ASSERT_EQ(c < d, false); - ASSERT_EQ(d < c, false); + ASSERT_EQ(c < d, false); + ASSERT_EQ(d < c, false); - ASSERT_EQ(a < b, false); - ASSERT_EQ(b < a, true); + ASSERT_EQ(a < b, false); + ASSERT_EQ(b < a, true); - ASSERT_EQ(a < c, true); - ASSERT_EQ(c < a, false); + ASSERT_EQ(a < c, true); + ASSERT_EQ(c < a, false); - ASSERT_EQ(b < c, true); - ASSERT_EQ(c < b, false); + ASSERT_EQ(b < c, true); + ASSERT_EQ(c < b, false); } -//////////////////////////////// DuplicateShareChecker /////////////////////////////// +//////////////////////////////// DuplicateShareChecker +////////////////////////////////// TEST(DuplicateShareChecker, DuplicateShareCheckerEth) { // same share { diff --git a/test/TestStratum.cc b/test/TestStratum.cc index c11d90c9e..bea71ffa4 100644 --- a/test/TestStratum.cc +++ b/test/TestStratum.cc @@ -43,7 +43,6 @@ #include - TEST(Stratum, jobId2Time) { uint64_t jobId; @@ -51,7 +50,8 @@ TEST(Stratum, jobId2Time) { // ---------- ---------- // uint32_t uint32_t // const string jobIdStr = Strings::Format("%08x%s", (uint32_t)time(nullptr), - // gbtHash.ToString().substr(0, 8).c_str()); + // gbtHash.ToString().substr(0, + // 8).c_str()); jobId = (1469002809ull << 32) | 0x00000000FFFFFFFFull; ASSERT_EQ(jobId2Time(jobId), 1469002809u); @@ -64,16 +64,20 @@ TEST(Stratum, Share) { ASSERT_EQ(s.isValid(), false); ASSERT_EQ(s.score(), 0); - ASSERT_EQ(s.toString(), "share(jobId: 0, ip: 0.0.0.0, userId: 0, workerId: 0, " - "time: 0/1970-01-01 00:00:00, height: 0, blkBits: 00000000/inf, " - "shareDiff: 0, status: 0/Share rejected)"); + ASSERT_EQ( + s.toString(), + "share(jobId: 0, ip: 0.0.0.0, userId: 0, workerId: 0, " + "time: 0/1970-01-01 00:00:00, height: 0, blkBits: 00000000/inf, " + "shareDiff: 0, status: 0/Share rejected)"); IpAddress ip; ip.fromIpv4Int(htonl(167772161)); - s.set_ip(ip.toString()); // 167772161 : 10.0.0.1 - ASSERT_EQ(s.toString(), "share(jobId: 0, ip: 10.0.0.1, userId: 0, workerId: 0, " - "time: 0/1970-01-01 00:00:00, height: 0, blkBits: 00000000/inf, " - "shareDiff: 0, status: 0/Share rejected)"); + s.set_ip(ip.toString()); // 167772161 : 10.0.0.1 + ASSERT_EQ( + s.toString(), + "share(jobId: 0, ip: 10.0.0.1, userId: 0, workerId: 0, " + "time: 0/1970-01-01 00:00:00, height: 0, blkBits: 00000000/inf, " + "shareDiff: 0, status: 0/Share rejected)"); } TEST(Stratum, Share2) { @@ -95,7 +99,9 @@ TEST(Stratum, StratumWorker) { int64_t workerId; ASSERT_EQ(w.getUserName("abcd"), "abcd"); - ASSERT_EQ(w.getUserName("abcdabcdabcdabcdabcdabcdabcd"), "abcdabcdabcdabcdabcdabcdabcd"); + ASSERT_EQ( + w.getUserName("abcdabcdabcdabcdabcdabcdabcd"), + "abcdabcdabcdabcdabcdabcdabcd"); ASSERT_EQ(w.getUserName("abcd."), "abcd"); ASSERT_EQ(w.getUserName("abcd.123"), "abcd"); ASSERT_EQ(w.getUserName("abcd.123.456"), "abcd"); @@ -104,23 +110,25 @@ TEST(Stratum, StratumWorker) { // echo -n '123' |openssl dgst -sha256 -binary |openssl dgst -sha256 // w.setUserIDAndNames(INT32_MAX, "abcd.123"); - ASSERT_EQ(w.fullName_, "abcd.123"); - ASSERT_EQ(w.userId_, INT32_MAX); - ASSERT_EQ(w.userName_, "abcd"); + ASSERT_EQ(w.fullName_, "abcd.123"); + ASSERT_EQ(w.userId_, INT32_MAX); + ASSERT_EQ(w.userName_, "abcd"); ASSERT_EQ(w.workerName_, "123"); - // '123' dsha256 : 5a77d1e9612d350b3734f6282259b7ff0a3f87d62cfef5f35e91a5604c0490a3 - // uint256 : a390044c60a5915ef3f5fe2cd6873f0affb7592228f634370b352d61e9d1775a + // '123' dsha256 : + // 5a77d1e9612d350b3734f6282259b7ff0a3f87d62cfef5f35e91a5604c0490a3 + // uint256 : + // a390044c60a5915ef3f5fe2cd6873f0affb7592228f634370b352d61e9d1775a u = strtoull("a390044c60a5915e", nullptr, 16); memcpy((uint8_t *)&workerId, (uint8_t *)&u, 8); ASSERT_EQ(w.workerHashId_, workerId); - w.setUserIDAndNames(0, "abcdefg"); - ASSERT_EQ(w.fullName_, "abcdefg.__default__"); - ASSERT_EQ(w.userId_, 0); - ASSERT_EQ(w.userName_, "abcdefg"); + ASSERT_EQ(w.fullName_, "abcdefg.__default__"); + ASSERT_EQ(w.userId_, 0); + ASSERT_EQ(w.userName_, "abcdefg"); ASSERT_EQ(w.workerName_, "__default__"); - // '__default__' dsha256 : e00f302bc411fde77d954283be6904911742f2ac76c8e79abef5dff4e6a19770 + // '__default__' dsha256 : + // e00f302bc411fde77d954283be6904911742f2ac76c8e79abef5dff4e6a19770 // uint256 : 7097a1e6f4dff5be u = strtoull("7097a1e6f4dff5be", nullptr, 16); memcpy((uint8_t *)&workerId, (uint8_t *)&u, 8); @@ -129,52 +137,100 @@ TEST(Stratum, StratumWorker) { // check allow chars w.setUserIDAndNames(0, "abcdefg.azAZ09-._:|^/"); ASSERT_EQ(w.workerName_, "azAZ09-._:|^/"); - ASSERT_EQ(w.fullName_, "abcdefg.azAZ09-._:|^/"); + ASSERT_EQ(w.fullName_, "abcdefg.azAZ09-._:|^/"); // some of them are bad chars w.setUserIDAndNames(0, "abcdefg.~!@#$%^&*()+={}|[]\\<>?,./"); ASSERT_EQ(w.workerName_, "^|./"); - ASSERT_EQ(w.fullName_, "abcdefg.^|./"); + ASSERT_EQ(w.fullName_, "abcdefg.^|./"); // all bad chars w.setUserIDAndNames(0, "abcdefg.~!@#$%&*()+={}[]\\<>?,"); ASSERT_EQ(w.workerName_, "__default__"); - ASSERT_EQ(w.fullName_, "abcdefg.__default__"); + ASSERT_EQ(w.fullName_, "abcdefg.__default__"); } TEST(JobMaker, BitcoinAddress) { // main net SelectParams(CBaseChainParams::MAIN); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("1A1zP1eP5QGefi2DMPPfTL5SLmv7DivfNa"), false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "1A1zP1eP5QGefi2DMPPfTL5SLmv7DivfNa"), + false); #ifdef CHAIN_TYPE_BTC - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bc1qw508c6qejxtdg4y5r3zarvary4c5xw7kv8f3t4"), false); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bc1qrp33g0q5c5txsp8arysrx4k6zdkfs4nde4xj0gdcccefvpysxf3qccfmv3"), false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bc1qw508c6qejxtdg4y5r3zarvary4c5xw7kv8f3t4"), + false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bc1qrp33g0q5c5txsp8arysrx4k6zdkfs4nde4xj0gdcccefvpysxf3qccfmv3"), + false); #endif #ifdef CHAIN_TYPE_BCH - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bitcoincash:qp3wjpa3tjlj042z2wv7hahsldgwhwy0rq9sywjpyy"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bitcoincash:qp3wjpa3tjlj142z2wv7hahsldgwhwy0rq9sywjpyy"), false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bitcoincash:qp3wjpa3tjlj042z2wv7hahsldgwhwy0rq9sywjpyy"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bitcoincash:qp3wjpa3tjlj142z2wv7hahsldgwhwy0rq9sywjpyy"), + false); #endif // test net SelectParams(CBaseChainParams::TESTNET); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("myxopLJB19oFtNBdrADD5Z34Aw6P8o9P8U"), false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "myxopLJB19oFtNBdrADD5Z34Aw6P8o9P8U"), + false); #ifdef CHAIN_TYPE_BTC - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("tb1qw508d6qejxtdg6y5r3zarvary0c5xw7kxpjzsx"), false); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("tb1qrp33g0q5c5txsp9arysrx4k6zdkgs4nce4xj0gdcccefvpysxf3q0sl5k7"), false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "tb1qw508d6qejxtdg6y5r3zarvary0c5xw7kxpjzsx"), + false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "tb1qrp33g0q5c5txsp9arysrx4k6zdkgs4nce4xj0gdcccefvpysxf3q0sl5k7"), + false); #endif #ifdef CHAIN_TYPE_BCH - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bchtest:qr99vqygcra4umcz374pzzz7vslrgw50ts58trd220"), true); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("bchtest:qr99vqygcra5umcz374pzzz7vslrgw50ts58trd220"), false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bchtest:qr99vqygcra4umcz374pzzz7vslrgw50ts58trd220"), + true); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "bchtest:qr99vqygcra5umcz374pzzz7vslrgw50ts58trd220"), + false); #endif } @@ -191,11 +247,23 @@ TEST(Stratum, StratumJobBitcoin) { gbt += " \"proposal\""; gbt += " ],"; gbt += " \"version\": 536870912,"; - gbt += " \"previousblockhash\": \"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c34\","; + gbt += + " \"previousblockhash\": " + "\"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c34\","; gbt += " \"transactions\": ["; gbt += " {"; - gbt += " \"data\": \"01000000010291939c5ae8191c2e7d4ce8eba7d6616a66482e3200037cb8b8c2d0af45b445000000006a47304402204df709d9e149804e358de4b082e41d8bb21b3c9d347241b728b1362aafcb153602200d06d9b6f2eca899f43dcd62ec2efb2d9ce2e10adf02738bb908420d7db93ede012103cae98ab925e20dd6ae1f76e767e9e99bc47b3844095c68600af9c775104fb36cffffffff0290f1770b000000001976a91400dc5fd62f6ee48eb8ecda749eaec6824a780fdd88aca08601000000000017a914eb65573e5dd52d3d950396ccbe1a47daf8f400338700000000\","; - gbt += " \"hash\": \"bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca79ff\","; + gbt += + " \"data\": " + "\"01000000010291939c5ae8191c2e7d4ce8eba7d6616a66482e3200037cb8b8c2d0af" + "45b445000000006a47304402204df709d9e149804e358de4b082e41d8bb21b3c9d3472" + "41b728b1362aafcb153602200d06d9b6f2eca899f43dcd62ec2efb2d9ce2e10adf0273" + "8bb908420d7db93ede012103cae98ab925e20dd6ae1f76e767e9e99bc47b3844095c68" + "600af9c775104fb36cffffffff0290f1770b000000001976a91400dc5fd62f6ee48eb8" + "ecda749eaec6824a780fdd88aca08601000000000017a914eb65573e5dd52d3d950396" + "ccbe1a47daf8f400338700000000\","; + gbt += + " \"hash\": " + "\"bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca79ff\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 10000,"; @@ -206,8 +274,13 @@ TEST(Stratum, StratumJobBitcoin) { gbt += " \"flags\": \"\""; gbt += " },"; gbt += " \"coinbasevalue\": 312659655,"; - gbt += " \"longpollid\": \"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c341911\","; - gbt += " \"target\": \"000000000000018ae20000000000000000000000000000000000000000000000\","; + gbt += + " \"longpollid\": " + "\"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c341911" + "\","; + gbt += + " \"target\": " + "\"000000000000018ae20000000000000000000000000000000000000000000000\","; gbt += " \"mintime\": 1469001544,"; gbt += " \"mutable\": ["; gbt += " \"time\","; @@ -224,10 +297,22 @@ TEST(Stratum, StratumJobBitcoin) { blockVersion = 0; SelectParams(CBaseChainParams::TESTNET); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"), true); - - CTxDestination poolPayoutAddrTestnet = BitcoinUtils::DecodeDestination("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"); - res = sjob.initFromGbt(gbt.c_str(), poolCoinbaseInfo, poolPayoutAddrTestnet, blockVersion, "", RskWork(), 1, false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"), + true); + + CTxDestination poolPayoutAddrTestnet = + BitcoinUtils::DecodeDestination("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"); + res = sjob.initFromGbt( + gbt.c_str(), + poolCoinbaseInfo, + poolPayoutAddrTestnet, + blockVersion, + "", + RskWork(), + 1, + false); ASSERT_EQ(res, true); const string jsonStr = sjob.serializeToJson(); @@ -235,34 +320,47 @@ TEST(Stratum, StratumJobBitcoin) { res = sjob2.unserializeFromJson(jsonStr.c_str(), jsonStr.length()); ASSERT_EQ(res, true); - ASSERT_EQ(sjob2.prevHash_, uint256S("000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c34")); - ASSERT_EQ(sjob2.prevHashBeStr_, "03979c349a31fd2dc3fe929586643caabb46c03b532b2e774f2ea23900000000"); + ASSERT_EQ( + sjob2.prevHash_, + uint256S("000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979" + "c34")); + ASSERT_EQ( + sjob2.prevHashBeStr_, + "03979c349a31fd2dc3fe929586643caabb46c03b532b2e774f2ea23900000000"); ASSERT_EQ(sjob2.height_, 898487); // 46 bytes, 5 bytes (timestamp), 9 bytes (poolCoinbaseInfo) - // 02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e03b7b50d 0402363d58 2f4254432e434f4d2f - ASSERT_EQ(sjob2.coinbase1_.substr(0, 92), - "02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e03b7b50d"); + // 02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e03b7b50d + // 0402363d58 2f4254432e434f4d2f + ASSERT_EQ( + sjob2.coinbase1_.substr(0, 92), + "0200000001000000000000000000000000000000000000000000000000000000000000" + "0000ffffffff1e03b7b50d"); ASSERT_EQ(sjob2.coinbase1_.substr(102, 18), "2f4254432e434f4d2f"); // 0402363d58 -> 0x583d3602 = 1480406530 = 2016-11-29 16:02:10 - uint32_t ts = (uint32_t)strtoull(sjob2.coinbase1_.substr(94, 8).c_str(), nullptr, 16); + uint32_t ts = + (uint32_t)strtoull(sjob2.coinbase1_.substr(94, 8).c_str(), nullptr, 16); ts = HToBe(ts); ASSERT_EQ(ts == time(nullptr) || ts + 1 == time(nullptr), true); - ASSERT_EQ(sjob2.coinbase2_, - "ffffffff" // sequence - "01" // 1 output - // c7cea21200000000 -> 0000000012a2cec7 -> 312659655 - "c7cea21200000000" - // 0x19 -> 25 bytes - "1976a914ca560088c0fb5e6f028faa11085e643e343a8f5c88ac" - // lock_time - "00000000"); + ASSERT_EQ( + sjob2.coinbase2_, + "ffffffff" // sequence + "01" // 1 output + // c7cea21200000000 -> 0000000012a2cec7 -> 312659655 + "c7cea21200000000" + // 0x19 -> 25 bytes + "1976a914ca560088c0fb5e6f028faa11085e643e343a8f5c88ac" + // lock_time + "00000000"); ASSERT_EQ(sjob2.merkleBranch_.size(), 1U); - ASSERT_EQ(sjob2.merkleBranch_[0], uint256S("bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca79ff")); + ASSERT_EQ( + sjob2.merkleBranch_[0], + uint256S("bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca7" + "9ff")); ASSERT_EQ(sjob2.nVersion_, 536870912); - ASSERT_EQ(sjob2.nBits_, 436308706U); - ASSERT_EQ(sjob2.nTime_, 1469006933U); - ASSERT_EQ(sjob2.minTime_, 1469001544U); + ASSERT_EQ(sjob2.nBits_, 436308706U); + ASSERT_EQ(sjob2.nTime_, 1469006933U); + ASSERT_EQ(sjob2.minTime_, 1469001544U); ASSERT_EQ(sjob2.coinbaseValue_, 312659655); ASSERT_GE(time(nullptr), jobId2Time(sjob2.jobId_)); } @@ -290,16 +388,37 @@ TEST(Stratum, StratumJobWithWitnessCommitment) { gbt += " \"vbavailable\": {"; gbt += " },"; gbt += " \"vbrequired\": 0,"; - gbt += " \"previousblockhash\": \"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772\","; + gbt += + " \"previousblockhash\": " + "\"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772\","; gbt += " \"transactions\": ["; gbt += " {"; - gbt += " \"data\": \"0100000002449f651247d5c09d3020c30616cb1807c268e2c2346d1de28442b89ef34c976d000000006a47304402203eae3868946a312ba712f9c9a259738fee6e3163b05d206e0f5b6c7980"; - gbt += "161756022017827f248432f7313769f120fb3b7a65137bf93496a1ae7d6a775879fbdfb8cd0121027d7b71dab3bb16582c97fc0ccedeacd8f75ebee62fa9c388290294ee3bc3e935feffffffcbc82a21497f8db"; - gbt += "8d57d054fefea52aba502a074ed984efc81ec2ef211194aa6010000006a47304402207f5462295e52fb4213f1e63802d8fe9ec020ac8b760535800564694ea87566a802205ee01096fc9268eac483136ce08250"; - gbt += "6ac951a7dbc9e4ae24dca07ca2a1fdf2f30121023b86e60ef66fe8ace403a0d77d27c80ba9ba5404ee796c47c03c73748e59d125feffffff0286c35b00000000001976a914ab29f668d284fd2d65cec5f098432"; - gbt += "c4ece01055488ac8093dc14000000001976a914ac19d3fd17710e6b9a331022fe92c693fdf6659588ac8dd70f00\","; - gbt += " \"txid\": \"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; - gbt += " \"hash\": \"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; + gbt += + " \"data\": " + "\"0100000002449f651247d5c09d3020c30616cb1807c268e2c2346d1de28442b89ef3" + "4c976d000000006a47304402203eae3868946a312ba712f9c9a259738fee6e3163b05d" + "206e0f5b6c7980"; + gbt += + "161756022017827f248432f7313769f120fb3b7a65137bf93496a1ae7d6a775879fbdf" + "b8cd0121027d7b71dab3bb16582c97fc0ccedeacd8f75ebee62fa9c388290294ee3bc3" + "e935feffffffcbc82a21497f8db"; + gbt += + "8d57d054fefea52aba502a074ed984efc81ec2ef211194aa6010000006a47304402207" + "f5462295e52fb4213f1e63802d8fe9ec020ac8b760535800564694ea87566a802205ee" + "01096fc9268eac483136ce08250"; + gbt += + "6ac951a7dbc9e4ae24dca07ca2a1fdf2f30121023b86e60ef66fe8ace403a0d77d27c8" + "0ba9ba5404ee796c47c03c73748e59d125feffffff0286c35b00000000001976a914ab" + "29f668d284fd2d65cec5f098432"; + gbt += + "c4ece01055488ac8093dc14000000001976a914ac19d3fd17710e6b9a331022fe92c69" + "3fdf6659588ac8dd70f00\","; + gbt += + " \"txid\": " + "\"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; + gbt += + " \"hash\": " + "\"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 37400,"; @@ -307,13 +426,30 @@ TEST(Stratum, StratumJobWithWitnessCommitment) { gbt += " \"weight\": 1488"; gbt += " },"; gbt += " {"; - gbt += " \"data\": \"0100000001043f5e73755b5c6919b4e361f4cae84c8805452de3df265a6e2d3d71cbcb385501000000da0047304402202b14552521cd689556d2e44d914caf2195da37b80de4f8cd0fad9adf"; - gbt += "7ef768ef022026fcddd992f447c39c48c3ce50c5960e2f086ebad455159ffc3e36a5624af2f501483045022100f2b893e495f41b22cd83df6908c2fa4f917fd7bce9f8da14e6ab362042e11f7d022075bc2451e"; - gbt += "1cf2ae2daec0f109a3aceb6558418863070f5e84c945262018503240147522102632178d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951ec7d3a9da9de171617026442fcd30"; - gbt += "f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9143e9a6b79be836762c8ef591cf16b76af1327ced58790dfdf8c0000000017a9148ce5408cfeaddb7ccb2545ded41ef47810945"; + gbt += + " \"data\": " + "\"0100000001043f5e73755b5c6919b4e361f4cae84c8805452de3df265a6e2d3d71cb" + "cb385501000000da0047304402202b14552521cd689556d2e44d914caf2195da37b80d" + "e4f8cd0fad9adf"; + gbt += + "7ef768ef022026fcddd992f447c39c48c3ce50c5960e2f086ebad455159ffc3e36a562" + "4af2f501483045022100f2b893e495f41b22cd83df6908c2fa4f917fd7bce9f8da14e6" + "ab362042e11f7d022075bc2451e"; + gbt += + "1cf2ae2daec0f109a3aceb6558418863070f5e84c94526201850324014752210263217" + "8d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951ec" + "7d3a9da9de171617026442fcd30"; + gbt += + "f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9143e9a" + "6b79be836762c8ef591cf16b76af1327ced58790dfdf8c0000000017a9148ce5408cfe" + "addb7ccb2545ded41ef47810945"; gbt += "4848700000000\","; - gbt += " \"txid\": \"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; - gbt += " \"hash\": \"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; + gbt += + " \"txid\": " + "\"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; + gbt += + " \"hash\": " + "\"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 20000,"; @@ -321,13 +457,30 @@ TEST(Stratum, StratumJobWithWitnessCommitment) { gbt += " \"weight\": 1332"; gbt += " },"; gbt += " {"; - gbt += " \"data\": \"01000000013faf73481d6b96c2385b9a4300f8974b1b30c34be30000c7dcef11f68662de4501000000db00483045022100f9881f4c867b5545f6d7a730ae26f598107171d0f68b860bd973db"; - gbt += "b855e073a002207b511ead1f8be8a55c542ce5d7e91acfb697c7fa2acd2f322b47f177875bffc901483045022100a37aa9998b9867633ab6484ad08b299de738a86ae997133d827717e7ed73d953022011e3f99"; - gbt += "d1bd1856f6a7dc0bf611de6d1b2efb60c14fc5931ba09da01558757f60147522102632178d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951ec7d3a9da9de171617026442fcd"; - gbt += "30f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9148d57003ecbaa310a365f8422602cc507a702197e87806868a90000000017a9148ce5408cfeaddb7ccb2545ded41ef478109"; + gbt += + " \"data\": " + "\"01000000013faf73481d6b96c2385b9a4300f8974b1b30c34be30000c7dcef11f686" + "62de4501000000db00483045022100f9881f4c867b5545f6d7a730ae26f598107171d0" + "f68b860bd973db"; + gbt += + "b855e073a002207b511ead1f8be8a55c542ce5d7e91acfb697c7fa2acd2f322b47f177" + "875bffc901483045022100a37aa9998b9867633ab6484ad08b299de738a86ae997133d" + "827717e7ed73d953022011e3f99"; + gbt += + "d1bd1856f6a7dc0bf611de6d1b2efb60c14fc5931ba09da01558757f60147522102632" + "178d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951" + "ec7d3a9da9de171617026442fcd"; + gbt += + "30f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9148d" + "57003ecbaa310a365f8422602cc507a702197e87806868a90000000017a9148ce5408c" + "feaddb7ccb2545ded41ef478109"; gbt += "454848700000000\","; - gbt += " \"txid\": \"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; - gbt += " \"hash\": \"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; + gbt += + " \"txid\": " + "\"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; + gbt += + " \"hash\": " + "\"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 20000,"; @@ -339,8 +492,13 @@ TEST(Stratum, StratumJobWithWitnessCommitment) { gbt += " \"flags\": \"\""; gbt += " },"; gbt += " \"coinbasevalue\": 319367518,"; - gbt += " \"longpollid\": \"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772604597\","; - gbt += " \"target\": \"0000000000001714480000000000000000000000000000000000000000000000\","; + gbt += + " \"longpollid\": " + "\"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e98037726045" + "97\","; + gbt += + " \"target\": " + "\"0000000000001714480000000000000000000000000000000000000000000000\","; gbt += " \"mintime\": 1480831053,"; gbt += " \"mutable\": ["; gbt += " \"time\","; @@ -354,15 +512,30 @@ TEST(Stratum, StratumJobWithWitnessCommitment) { gbt += " \"curtime\": 1480834892,"; gbt += " \"bits\": \"1a171448\","; gbt += " \"height\": 1038222,"; - gbt += " \"default_witness_commitment\": \"6a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac69ed22bf8\""; + gbt += + " \"default_witness_commitment\": " + "\"6a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac6" + "9ed22bf8\""; gbt += "}}"; blockVersion = 0; SelectParams(CBaseChainParams::TESTNET); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"), true); - - CTxDestination poolPayoutAddrTestnet = BitcoinUtils::DecodeDestination("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"); - res = sjob.initFromGbt(gbt.c_str(), poolCoinbaseInfo, poolPayoutAddrTestnet, blockVersion, "", RskWork(), 1, false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"), + true); + + CTxDestination poolPayoutAddrTestnet = + BitcoinUtils::DecodeDestination("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"); + res = sjob.initFromGbt( + gbt.c_str(), + poolCoinbaseInfo, + poolPayoutAddrTestnet, + blockVersion, + "", + RskWork(), + 1, + false); ASSERT_EQ(res, true); const string jsonStr = sjob.serializeToJson(); @@ -370,28 +543,35 @@ TEST(Stratum, StratumJobWithWitnessCommitment) { res = sjob2.unserializeFromJson(jsonStr.c_str(), jsonStr.length()); ASSERT_EQ(res, true); - ASSERT_EQ(sjob2.prevHash_, uint256S("0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772")); - ASSERT_EQ(sjob2.prevHashBeStr_, "e98037722f631f70eeb00c155d52e0f3407654b2e5bda1220000004700000000"); + ASSERT_EQ( + sjob2.prevHash_, + uint256S("0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803" + "772")); + ASSERT_EQ( + sjob2.prevHashBeStr_, + "e98037722f631f70eeb00c155d52e0f3407654b2e5bda1220000004700000000"); ASSERT_EQ(sjob2.height_, 1038222); - ASSERT_EQ(sjob2.coinbase2_, - "ffffffff" // sequence - "02" // 2 outputs - // 5e29091300000000 -> 000000001309295e -> 319367518 - "5e29091300000000" - // 0x19 -> 25 bytes - "1976a914ca560088c0fb5e6f028faa11085e643e343a8f5c88ac" - // - "0000000000000000" - // 0x26 -> 38 bytes - "266a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac69ed22bf8" - // lock_time - "00000000"); + ASSERT_EQ( + sjob2.coinbase2_, + "ffffffff" // sequence + "02" // 2 outputs + // 5e29091300000000 -> 000000001309295e -> 319367518 + "5e29091300000000" + // 0x19 -> 25 bytes + "1976a914ca560088c0fb5e6f028faa11085e643e343a8f5c88ac" + // + "0000000000000000" + // 0x26 -> 38 bytes + "266a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac6" + "9ed22bf8" + // lock_time + "00000000"); ASSERT_EQ(sjob2.nVersion_, 536870912); - ASSERT_EQ(sjob2.nBits_, 0x1a171448U); - ASSERT_EQ(sjob2.nTime_, 1480834892U); - ASSERT_EQ(sjob2.minTime_, 1480831053U); + ASSERT_EQ(sjob2.nBits_, 0x1a171448U); + ASSERT_EQ(sjob2.nTime_, 1480834892U); + ASSERT_EQ(sjob2.minTime_, 1480831053U); ASSERT_EQ(sjob2.coinbaseValue_, 319367518); ASSERT_GE(time(nullptr), jobId2Time(sjob2.jobId_)); } @@ -420,16 +600,37 @@ TEST(Stratum, StratumJobWithSegwitPayoutAddr) { gbt += " \"vbavailable\": {"; gbt += " },"; gbt += " \"vbrequired\": 0,"; - gbt += " \"previousblockhash\": \"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772\","; + gbt += + " \"previousblockhash\": " + "\"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772\","; gbt += " \"transactions\": ["; gbt += " {"; - gbt += " \"data\": \"0100000002449f651247d5c09d3020c30616cb1807c268e2c2346d1de28442b89ef34c976d000000006a47304402203eae3868946a312ba712f9c9a259738fee6e3163b05d206e0f5b6c7980"; - gbt += "161756022017827f248432f7313769f120fb3b7a65137bf93496a1ae7d6a775879fbdfb8cd0121027d7b71dab3bb16582c97fc0ccedeacd8f75ebee62fa9c388290294ee3bc3e935feffffffcbc82a21497f8db"; - gbt += "8d57d054fefea52aba502a074ed984efc81ec2ef211194aa6010000006a47304402207f5462295e52fb4213f1e63802d8fe9ec020ac8b760535800564694ea87566a802205ee01096fc9268eac483136ce08250"; - gbt += "6ac951a7dbc9e4ae24dca07ca2a1fdf2f30121023b86e60ef66fe8ace403a0d77d27c80ba9ba5404ee796c47c03c73748e59d125feffffff0286c35b00000000001976a914ab29f668d284fd2d65cec5f098432"; - gbt += "c4ece01055488ac8093dc14000000001976a914ac19d3fd17710e6b9a331022fe92c693fdf6659588ac8dd70f00\","; - gbt += " \"txid\": \"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; - gbt += " \"hash\": \"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; + gbt += + " \"data\": " + "\"0100000002449f651247d5c09d3020c30616cb1807c268e2c2346d1de28442b89ef3" + "4c976d000000006a47304402203eae3868946a312ba712f9c9a259738fee6e3163b05d" + "206e0f5b6c7980"; + gbt += + "161756022017827f248432f7313769f120fb3b7a65137bf93496a1ae7d6a775879fbdf" + "b8cd0121027d7b71dab3bb16582c97fc0ccedeacd8f75ebee62fa9c388290294ee3bc3" + "e935feffffffcbc82a21497f8db"; + gbt += + "8d57d054fefea52aba502a074ed984efc81ec2ef211194aa6010000006a47304402207" + "f5462295e52fb4213f1e63802d8fe9ec020ac8b760535800564694ea87566a802205ee" + "01096fc9268eac483136ce08250"; + gbt += + "6ac951a7dbc9e4ae24dca07ca2a1fdf2f30121023b86e60ef66fe8ace403a0d77d27c8" + "0ba9ba5404ee796c47c03c73748e59d125feffffff0286c35b00000000001976a914ab" + "29f668d284fd2d65cec5f098432"; + gbt += + "c4ece01055488ac8093dc14000000001976a914ac19d3fd17710e6b9a331022fe92c69" + "3fdf6659588ac8dd70f00\","; + gbt += + " \"txid\": " + "\"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; + gbt += + " \"hash\": " + "\"c284853b65e7887c5fd9b635a932e2e0594d19849b22914a8e6fb180fea0954f\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 37400,"; @@ -437,13 +638,30 @@ TEST(Stratum, StratumJobWithSegwitPayoutAddr) { gbt += " \"weight\": 1488"; gbt += " },"; gbt += " {"; - gbt += " \"data\": \"0100000001043f5e73755b5c6919b4e361f4cae84c8805452de3df265a6e2d3d71cbcb385501000000da0047304402202b14552521cd689556d2e44d914caf2195da37b80de4f8cd0fad9adf"; - gbt += "7ef768ef022026fcddd992f447c39c48c3ce50c5960e2f086ebad455159ffc3e36a5624af2f501483045022100f2b893e495f41b22cd83df6908c2fa4f917fd7bce9f8da14e6ab362042e11f7d022075bc2451e"; - gbt += "1cf2ae2daec0f109a3aceb6558418863070f5e84c945262018503240147522102632178d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951ec7d3a9da9de171617026442fcd30"; - gbt += "f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9143e9a6b79be836762c8ef591cf16b76af1327ced58790dfdf8c0000000017a9148ce5408cfeaddb7ccb2545ded41ef47810945"; + gbt += + " \"data\": " + "\"0100000001043f5e73755b5c6919b4e361f4cae84c8805452de3df265a6e2d3d71cb" + "cb385501000000da0047304402202b14552521cd689556d2e44d914caf2195da37b80d" + "e4f8cd0fad9adf"; + gbt += + "7ef768ef022026fcddd992f447c39c48c3ce50c5960e2f086ebad455159ffc3e36a562" + "4af2f501483045022100f2b893e495f41b22cd83df6908c2fa4f917fd7bce9f8da14e6" + "ab362042e11f7d022075bc2451e"; + gbt += + "1cf2ae2daec0f109a3aceb6558418863070f5e84c94526201850324014752210263217" + "8d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951ec" + "7d3a9da9de171617026442fcd30"; + gbt += + "f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9143e9a" + "6b79be836762c8ef591cf16b76af1327ced58790dfdf8c0000000017a9148ce5408cfe" + "addb7ccb2545ded41ef47810945"; gbt += "4848700000000\","; - gbt += " \"txid\": \"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; - gbt += " \"hash\": \"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; + gbt += + " \"txid\": " + "\"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; + gbt += + " \"hash\": " + "\"28b1a5c2f0bb667aea38e760b6d55163abc9be9f1f830d9969edfab902d17a0f\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 20000,"; @@ -451,13 +669,30 @@ TEST(Stratum, StratumJobWithSegwitPayoutAddr) { gbt += " \"weight\": 1332"; gbt += " },"; gbt += " {"; - gbt += " \"data\": \"01000000013faf73481d6b96c2385b9a4300f8974b1b30c34be30000c7dcef11f68662de4501000000db00483045022100f9881f4c867b5545f6d7a730ae26f598107171d0f68b860bd973db"; - gbt += "b855e073a002207b511ead1f8be8a55c542ce5d7e91acfb697c7fa2acd2f322b47f177875bffc901483045022100a37aa9998b9867633ab6484ad08b299de738a86ae997133d827717e7ed73d953022011e3f99"; - gbt += "d1bd1856f6a7dc0bf611de6d1b2efb60c14fc5931ba09da01558757f60147522102632178d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951ec7d3a9da9de171617026442fcd"; - gbt += "30f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9148d57003ecbaa310a365f8422602cc507a702197e87806868a90000000017a9148ce5408cfeaddb7ccb2545ded41ef478109"; + gbt += + " \"data\": " + "\"01000000013faf73481d6b96c2385b9a4300f8974b1b30c34be30000c7dcef11f686" + "62de4501000000db00483045022100f9881f4c867b5545f6d7a730ae26f598107171d0" + "f68b860bd973db"; + gbt += + "b855e073a002207b511ead1f8be8a55c542ce5d7e91acfb697c7fa2acd2f322b47f177" + "875bffc901483045022100a37aa9998b9867633ab6484ad08b299de738a86ae997133d" + "827717e7ed73d953022011e3f99"; + gbt += + "d1bd1856f6a7dc0bf611de6d1b2efb60c14fc5931ba09da01558757f60147522102632" + "178d046673c9729d828cfee388e121f497707f810c131e0d3fc0fe0bd66d62103a0951" + "ec7d3a9da9de171617026442fcd"; + gbt += + "30f34d66100fab539853b43f508787d452aeffffffff0240420f000000000017a9148d" + "57003ecbaa310a365f8422602cc507a702197e87806868a90000000017a9148ce5408c" + "feaddb7ccb2545ded41ef478109"; gbt += "454848700000000\","; - gbt += " \"txid\": \"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; - gbt += " \"hash\": \"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; + gbt += + " \"txid\": " + "\"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; + gbt += + " \"hash\": " + "\"67878210e268d87b4e6587db8c6e367457cea04820f33f01d626adbe5619b3dd\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 20000,"; @@ -469,8 +704,13 @@ TEST(Stratum, StratumJobWithSegwitPayoutAddr) { gbt += " \"flags\": \"\""; gbt += " },"; gbt += " \"coinbasevalue\": 319367518,"; - gbt += " \"longpollid\": \"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772604597\","; - gbt += " \"target\": \"0000000000001714480000000000000000000000000000000000000000000000\","; + gbt += + " \"longpollid\": " + "\"0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e98037726045" + "97\","; + gbt += + " \"target\": " + "\"0000000000001714480000000000000000000000000000000000000000000000\","; gbt += " \"mintime\": 1480831053,"; gbt += " \"mutable\": ["; gbt += " \"time\","; @@ -484,14 +724,29 @@ TEST(Stratum, StratumJobWithSegwitPayoutAddr) { gbt += " \"curtime\": 1480834892,"; gbt += " \"bits\": \"1a171448\","; gbt += " \"height\": 1038222,"; - gbt += " \"default_witness_commitment\": \"6a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac69ed22bf8\""; + gbt += + " \"default_witness_commitment\": " + "\"6a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac6" + "9ed22bf8\""; gbt += "}}"; blockVersion = 0; SelectParams(CBaseChainParams::TESTNET); - ASSERT_EQ(BitcoinUtils::IsValidDestinationString("tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7"), true); - CTxDestination poolPayoutAddrTestnet = BitcoinUtils::DecodeDestination("tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7"); - res = sjob.initFromGbt(gbt.c_str(), poolCoinbaseInfo, poolPayoutAddrTestnet, blockVersion, "", RskWork(), 1, false); + ASSERT_EQ( + BitcoinUtils::IsValidDestinationString( + "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7"), + true); + CTxDestination poolPayoutAddrTestnet = BitcoinUtils::DecodeDestination( + "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7"); + res = sjob.initFromGbt( + gbt.c_str(), + poolCoinbaseInfo, + poolPayoutAddrTestnet, + blockVersion, + "", + RskWork(), + 1, + false); ASSERT_EQ(res, true); const string jsonStr = sjob.serializeToJson(); @@ -499,28 +754,35 @@ TEST(Stratum, StratumJobWithSegwitPayoutAddr) { res = sjob2.unserializeFromJson(jsonStr.c_str(), jsonStr.length()); ASSERT_EQ(res, true); - ASSERT_EQ(sjob2.prevHash_, uint256S("0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803772")); - ASSERT_EQ(sjob2.prevHashBeStr_, "e98037722f631f70eeb00c155d52e0f3407654b2e5bda1220000004700000000"); + ASSERT_EQ( + sjob2.prevHash_, + uint256S("0000000000000047e5bda122407654b25d52e0f3eeb00c152f631f70e9803" + "772")); + ASSERT_EQ( + sjob2.prevHashBeStr_, + "e98037722f631f70eeb00c155d52e0f3407654b2e5bda1220000004700000000"); ASSERT_EQ(sjob2.height_, 1038222); - ASSERT_EQ(sjob2.coinbase2_, - "ffffffff" // sequence - "02" // 2 outputs - // 5e29091300000000 -> 000000001309295e -> 319367518 - "5e29091300000000" - // 0x22 -> 34 bytes - "2200201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262" - // - "0000000000000000" - // 0x26 -> 38 bytes - "266a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac69ed22bf8" - // lock_time - "00000000"); + ASSERT_EQ( + sjob2.coinbase2_, + "ffffffff" // sequence + "02" // 2 outputs + // 5e29091300000000 -> 000000001309295e -> 319367518 + "5e29091300000000" + // 0x22 -> 34 bytes + "2200201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262" + // + "0000000000000000" + // 0x26 -> 38 bytes + "266a24aa21a9ed842a6d6672504c2b7abb796fdd7cfbd7262977b71b945452e17fbac6" + "9ed22bf8" + // lock_time + "00000000"); ASSERT_EQ(sjob2.nVersion_, 536870912); - ASSERT_EQ(sjob2.nBits_, 0x1a171448U); - ASSERT_EQ(sjob2.nTime_, 1480834892U); - ASSERT_EQ(sjob2.minTime_, 1480831053U); + ASSERT_EQ(sjob2.nBits_, 0x1a171448U); + ASSERT_EQ(sjob2.nTime_, 1480834892U); + ASSERT_EQ(sjob2.minTime_, 1480831053U); ASSERT_EQ(sjob2.coinbaseValue_, 319367518); ASSERT_GE(time(nullptr), jobId2Time(sjob2.jobId_)); } @@ -541,11 +803,23 @@ TEST(Stratum, StratumJobWithRskWork) { gbt += " \"proposal\""; gbt += " ],"; gbt += " \"version\": 536870912,"; - gbt += " \"previousblockhash\": \"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c34\","; + gbt += + " \"previousblockhash\": " + "\"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c34\","; gbt += " \"transactions\": ["; gbt += " {"; - gbt += " \"data\": \"01000000010291939c5ae8191c2e7d4ce8eba7d6616a66482e3200037cb8b8c2d0af45b445000000006a47304402204df709d9e149804e358de4b082e41d8bb21b3c9d347241b728b1362aafcb153602200d06d9b6f2eca899f43dcd62ec2efb2d9ce2e10adf02738bb908420d7db93ede012103cae98ab925e20dd6ae1f76e767e9e99bc47b3844095c68600af9c775104fb36cffffffff0290f1770b000000001976a91400dc5fd62f6ee48eb8ecda749eaec6824a780fdd88aca08601000000000017a914eb65573e5dd52d3d950396ccbe1a47daf8f400338700000000\","; - gbt += " \"hash\": \"bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca79ff\","; + gbt += + " \"data\": " + "\"01000000010291939c5ae8191c2e7d4ce8eba7d6616a66482e3200037cb8b8c2d0af" + "45b445000000006a47304402204df709d9e149804e358de4b082e41d8bb21b3c9d3472" + "41b728b1362aafcb153602200d06d9b6f2eca899f43dcd62ec2efb2d9ce2e10adf0273" + "8bb908420d7db93ede012103cae98ab925e20dd6ae1f76e767e9e99bc47b3844095c68" + "600af9c775104fb36cffffffff0290f1770b000000001976a91400dc5fd62f6ee48eb8" + "ecda749eaec6824a780fdd88aca08601000000000017a914eb65573e5dd52d3d950396" + "ccbe1a47daf8f400338700000000\","; + gbt += + " \"hash\": " + "\"bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca79ff\","; gbt += " \"depends\": ["; gbt += " ],"; gbt += " \"fee\": 10000,"; @@ -556,8 +830,13 @@ TEST(Stratum, StratumJobWithRskWork) { gbt += " \"flags\": \"\""; gbt += " },"; gbt += " \"coinbasevalue\": 312659655,"; - gbt += " \"longpollid\": \"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c341911\","; - gbt += " \"target\": \"000000000000018ae20000000000000000000000000000000000000000000000\","; + gbt += + " \"longpollid\": " + "\"000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c341911" + "\","; + gbt += + " \"target\": " + "\"000000000000018ae20000000000000000000000000000000000000000000000\","; gbt += " \"mintime\": 1469001544,"; gbt += " \"mutable\": ["; gbt += " \"time\","; @@ -574,37 +853,63 @@ TEST(Stratum, StratumJobWithRskWork) { uint32_t creationTime = (uint32_t)time(nullptr); string rawgw; - rawgw = Strings::Format("{\"created_at_ts\": %u," - "\"rskdRpcAddress\":\"http://10.0.2.2:4444\"," - "\"rskdRpcUserPwd\":\"user:pass\"," - "\"target\":\"0x5555555555555555555555555555555555555555555555555555555555555555\"," - "\"parentBlockHash\":\"0x13532f616f89e3ac2e0a9ef7363be28e7f2ca39764684995fb30c0d96e664ae4\"," - "\"blockHashForMergedMining\":\"0xe6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d\"," - "\"feesPaidToMiner\":\"0\"," - "\"notify\":\"true\"}", creationTime); + rawgw = Strings::Format( + "{\"created_at_ts\": %u," + "\"rskdRpcAddress\":\"http://10.0.2.2:4444\"," + "\"rskdRpcUserPwd\":\"user:pass\"," + "\"target\":" + "\"0x5555555555555555555555555555555555555555555555555555555555555555\"" + "," + "\"parentBlockHash\":" + "\"0x13532f616f89e3ac2e0a9ef7363be28e7f2ca39764684995fb30c0d96e664ae4\"" + "," + "\"blockHashForMergedMining\":" + "\"0xe6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d\"" + "," + "\"feesPaidToMiner\":\"0\"," + "\"notify\":\"true\"}", + creationTime); blockVersion = 0; SelectParams(CBaseChainParams::TESTNET); - + bool resInitRskWork = rskWork.initFromGw(rawgw); - + ASSERT_TRUE(resInitRskWork); ASSERT_EQ(rskWork.isInitialized(), true); ASSERT_EQ(rskWork.getCreatedAt(), creationTime); - ASSERT_EQ(rskWork.getBlockHash(), "0xe6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d"); - ASSERT_EQ(rskWork.getTarget(), "0x5555555555555555555555555555555555555555555555555555555555555555"); + ASSERT_EQ( + rskWork.getBlockHash(), + "0xe6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d"); + ASSERT_EQ( + rskWork.getTarget(), + "0x5555555555555555555555555555555555555555555555555555555555555555"); ASSERT_EQ(rskWork.getFees(), "0"); ASSERT_EQ(rskWork.getRpcAddress(), "http://10.0.2.2:4444"); ASSERT_EQ(rskWork.getRpcUserPwd(), "user:pass"); ASSERT_EQ(rskWork.getNotifyFlag(), true); - CTxDestination poolPayoutAddrTestnet = BitcoinUtils::DecodeDestination("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"); - sjob.initFromGbt(gbt.c_str(), poolCoinbaseInfo, poolPayoutAddrTestnet, blockVersion, "", rskWork, 1, true); + CTxDestination poolPayoutAddrTestnet = + BitcoinUtils::DecodeDestination("myxopLJB19oFtNBdrAxD5Z34Aw6P8o9P8U"); + sjob.initFromGbt( + gbt.c_str(), + poolCoinbaseInfo, + poolPayoutAddrTestnet, + blockVersion, + "", + rskWork, + 1, + true); // check rsk required data copied properly to the stratum job - ASSERT_EQ(sjob.blockHashForMergedMining_, "0xe6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d"); - ASSERT_EQ(sjob.rskNetworkTarget_, uint256S("0x5555555555555555555555555555555555555555555555555555555555555555")); + ASSERT_EQ( + sjob.blockHashForMergedMining_, + "0xe6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d"); + ASSERT_EQ( + sjob.rskNetworkTarget_, + uint256S("0x55555555555555555555555555555555555555555555555555555555555" + "55555")); ASSERT_EQ(sjob.feesForMiner_, "0"); ASSERT_EQ(sjob.rskdRpcAddress_, "http://10.0.2.2:4444"); ASSERT_EQ(sjob.rskdRpcUserPwd_, "user:pass"); @@ -612,45 +917,63 @@ TEST(Stratum, StratumJobWithRskWork) { // check rsk merged mining tag present in the coinbase // Hex("RSKBLOCK:") = 0x52534b424c4f434b3a - string rskTagHex = "52534b424c4f434b3ae6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d"; + string rskTagHex = + "52534b424c4f434b3ae6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae836" + "4604e9f8a15d"; size_t rskTagPos = sjob.coinbase2_.find(rskTagHex); ASSERT_NE(rskTagPos, string::npos); - ASSERT_EQ(sjob.prevHash_, uint256S("000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979c34")); - ASSERT_EQ(sjob.prevHashBeStr_, "03979c349a31fd2dc3fe929586643caabb46c03b532b2e774f2ea23900000000"); + ASSERT_EQ( + sjob.prevHash_, + uint256S("000000004f2ea239532b2e77bb46c03b86643caac3fe92959a31fd2d03979" + "c34")); + ASSERT_EQ( + sjob.prevHashBeStr_, + "03979c349a31fd2dc3fe929586643caabb46c03b532b2e774f2ea23900000000"); ASSERT_EQ(sjob.height_, 898487); // 46 bytes, 5 bytes (timestamp), 9 bytes (poolCoinbaseInfo) - // 02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e03b7b50d 0402363d58 2f4254432e434f4d2f - ASSERT_EQ(sjob.coinbase1_.substr(0, 92), - "02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e03b7b50d"); + // 02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1e03b7b50d + // 0402363d58 2f4254432e434f4d2f + ASSERT_EQ( + sjob.coinbase1_.substr(0, 92), + "0200000001000000000000000000000000000000000000000000000000000000000000" + "0000ffffffff1e03b7b50d"); ASSERT_EQ(sjob.coinbase1_.substr(102, 18), "2f4254432e434f4d2f"); // 0402363d58 -> 0x583d3602 = 1480406530 = 2016-11-29 16:02:10 - uint32_t ts = (uint32_t)strtoull(sjob.coinbase1_.substr(94, 8).c_str(), nullptr, 16); + uint32_t ts = + (uint32_t)strtoull(sjob.coinbase1_.substr(94, 8).c_str(), nullptr, 16); ts = HToBe(ts); ASSERT_EQ(ts == time(nullptr) || ts + 1 == time(nullptr), true); - ASSERT_EQ(sjob.coinbase2_, - "ffffffff" // sequence - "02" // 2 outputs. Rsk tag is stored in an additional CTxOut besides the cb's standard output - - // c7cea21200000000 -> 0000000012a2cec7 -> 312659655 - "c7cea21200000000" - // 0x19 -> 25 bytes of first output script - "1976a914ca560088c0fb5e6f028faa11085e643e343a8f5c88ac" - - // rsk tx out value - "0000000000000000" - // 0x29 = 41 bytes of second output script containing the rsk merged mining tag - "2952534b424c4f434b3ae6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8364604e9f8a15d" - - // lock_time - "00000000"); + ASSERT_EQ( + sjob.coinbase2_, + "ffffffff" // sequence + "02" // 2 outputs. Rsk tag is stored in an additional CTxOut besides the + // cb's standard output + + // c7cea21200000000 -> 0000000012a2cec7 -> 312659655 + "c7cea21200000000" + // 0x19 -> 25 bytes of first output script + "1976a914ca560088c0fb5e6f028faa11085e643e343a8f5c88ac" + + // rsk tx out value + "0000000000000000" + // 0x29 = 41 bytes of second output script containing the rsk merged + // mining tag + "2952534b424c4f434b3ae6b0a8e84e0ce68471ca28db4f51b71139b0ab78ae1c3e0ae8" + "364604e9f8a15d" + + // lock_time + "00000000"); ASSERT_EQ(sjob.merkleBranch_.size(), 1U); - ASSERT_EQ(sjob.merkleBranch_[0], uint256S("bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca79ff")); + ASSERT_EQ( + sjob.merkleBranch_[0], + uint256S("bd36bd4fff574b573152e7d4f64adf2bb1c9ab0080a12f8544c351f65aca7" + "9ff")); ASSERT_EQ(sjob.nVersion_, 536870912); - ASSERT_EQ(sjob.nBits_, 436308706U); - ASSERT_EQ(sjob.nTime_, 1469006933U); - ASSERT_EQ(sjob.minTime_, 1469001544U); + ASSERT_EQ(sjob.nBits_, 436308706U); + ASSERT_EQ(sjob.nTime_, 1469006933U); + ASSERT_EQ(sjob.minTime_, 1469001544U); ASSERT_EQ(sjob.coinbaseValue_, 312659655); ASSERT_GE(time(nullptr), jobId2Time(sjob.jobId_)); } diff --git a/test/TestStratumServer.cc b/test/TestStratumServer.cc index c25c7eea5..0082f9ca9 100644 --- a/test/TestStratumServer.cc +++ b/test/TestStratumServer.cc @@ -39,7 +39,6 @@ TEST(StratumServer, SessionIDManager24Bits) { // KafkaProducer a("", "", 0); // a.produce("", 0); - SessionIDManagerT<24> m(0xFFu); uint32_t j, sessionID; @@ -104,7 +103,7 @@ TEST(StratumServer, SessionIDManager16BitsWithInterval) { uint32_t j, sessionID; // fill all session ids - // Use std::vector because the compile time of + // Use std::vector because the compile time of // std::bitset<0x100000000ull> is too long. { std::vector ids(0x100000000ull); @@ -185,42 +184,71 @@ TEST(StratumServer, SessionIDManager8Bits) { #endif // #ifndef WORK_WITH_STRATUM_SWITCHER TEST(StratumServerBitcoin, CheckShare) { - string sjobJson = "{\"jobId\":6645522065066147329,\"gbtHash\":\"d349be274f007c2e1ee773b33bd21ef43d2615c089b7c5460b66584881a10683\"," - "\"prevHash\":\"00000000000000000019d1d9c84df0ecc23e549b86644ad47cb92570a26b12a5\",\"prevHashBeStr\":\"a26b12a57cb9257086644ad4c23e" - "549bc84df0ec0019d1d90000000000000000\",\"height\":558201,\"coinbase1\":\"020000000100000000000000000000000000000000000000000000000" - "00000000000000000ffffffff4b03798408041ba3395c612f4254432e434f4d2ffabe6d6dc807e51bd76025d65ccad2ba8ba1e9fba5f09118b6b55a348638cc17b" - "14e3909080000005fb54ad0\",\"coinbase2\":\"ffffffff036734ec4a0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000" - "266a24aa21a9ed40cbdaa98da815640f815b938df95bffe0775d8078771bc47ed4f43ac4e30b0600000000000000002952534b424c4f434b3a9ad45fdcc194d788" - "895f3ad389b583ea327f826353f7edf6b168db038372cb2700000000\",\"merkleBranch\":\"53146311555e15816f4549a893ff2eb50e60741ecccb2996bafd" - "dcf4ee008d5ac504967e375b2522af2be8411b1b032dda0e700c2e8913d869533256ff30caccea4ba404b68e625cfd3237e07e8deddb342690b08314d2638b5272" - "b74ab12fa3b3812908cd6bef999dea979875ba2730615be08b480e4b6f7b878000510a778c557f44bc3f21813d138d25530df85a89a38e2d2827f758ebc68a62e8" - "225933a5af086e72d9a65fd9be526648e8bcf74271308d9d273425b47bd12db075e841ba703f4c8a20be62d036958278b16f214d7fcd35c46a9f9fb1910618fa9e" - "029d3f96518aae34efbdabfbfbc055bffe891d93edbc7539ae9c0a22a35e87d5ccb033b89976cbb624af024b53c6a02309cb838eb285ecf675b801f1dd7f2d5c92" - "4cb1491731c28bea800b12b94bb4f70502a40559c8edb5f73b906ba8e814f10e852ef87365a49346c4b7361b75e38f1d9b96f028880227b7186a0b114e170b170b" - "47\",\"nVersion\":536870912,\"nBits\":389159077,\"nTime\":1547281171,\"minTime\":1547277926,\"coinbaseValue\":1256993895,\"witness" - "Commitment\":\"6a24aa21a9ed40cbdaa98da815640f815b938df95bffe0775d8078771bc47ed4f43ac4e30b06\",\"nmcBlockHash\":\"c807e51bd76025d65" - "ccad2ba8ba1e9fba5f09118b6b55a348638cc17b14e3909\",\"nmcBits\":402868319,\"nmcHeight\":433937,\"nmcRpcAddr\":\"http://127.0.0.1:899" - "9\",\"nmcRpcUserpass\":\"user:pass\",\"rskBlockHashForMergedMining\":\"0x9ad45fdcc194d788895f3ad389b583ea327f826353f7edf6b168db038" - "372cb27\",\"rskNetworkTarget\":\"0x00000000000000001386e3444eba74f8a750a71a75ed0b7fecdfd282a8cef091\",\"rskFeesForMiner\":\"0\",\"" - "rskdRpcAddress\":\"http://127.0.0.1:4444\",\"rskdRpcUserPwd\":\"user:pass\",\"isRskCleanJob\":true}"; - + string sjobJson = + "{\"jobId\":6645522065066147329,\"gbtHash\":" + "\"d349be274f007c2e1ee773b33bd21ef43d2615c089b7c5460b66584881a10683\"," + "\"prevHash\":" + "\"00000000000000000019d1d9c84df0ecc23e549b86644ad47cb92570a26b12a5\"," + "\"prevHashBeStr\":\"a26b12a57cb9257086644ad4c23e" + "549bc84df0ec0019d1d90000000000000000\",\"height\":558201,\"coinbase1\":" + "\"020000000100000000000000000000000000000000000000000000000" + "00000000000000000ffffffff4b03798408041ba3395c612f4254432e434f4d2ffabe6d6" + "dc807e51bd76025d65ccad2ba8ba1e9fba5f09118b6b55a348638cc17b" + "14e3909080000005fb54ad0\",\"coinbase2\":" + "\"ffffffff036734ec4a0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619" + "970000000000000000" + "266a24aa21a9ed40cbdaa98da815640f815b938df95bffe0775d8078771bc47ed4f43ac4" + "e30b0600000000000000002952534b424c4f434b3a9ad45fdcc194d788" + "895f3ad389b583ea327f826353f7edf6b168db038372cb2700000000\"," + "\"merkleBranch\":\"53146311555e15816f4549a893ff2eb50e60741ecccb2996bafd" + "dcf4ee008d5ac504967e375b2522af2be8411b1b032dda0e700c2e8913d869533256ff30" + "caccea4ba404b68e625cfd3237e07e8deddb342690b08314d2638b5272" + "b74ab12fa3b3812908cd6bef999dea979875ba2730615be08b480e4b6f7b878000510a77" + "8c557f44bc3f21813d138d25530df85a89a38e2d2827f758ebc68a62e8" + "225933a5af086e72d9a65fd9be526648e8bcf74271308d9d273425b47bd12db075e841ba" + "703f4c8a20be62d036958278b16f214d7fcd35c46a9f9fb1910618fa9e" + "029d3f96518aae34efbdabfbfbc055bffe891d93edbc7539ae9c0a22a35e87d5ccb033b8" + "9976cbb624af024b53c6a02309cb838eb285ecf675b801f1dd7f2d5c92" + "4cb1491731c28bea800b12b94bb4f70502a40559c8edb5f73b906ba8e814f10e852ef873" + "65a49346c4b7361b75e38f1d9b96f028880227b7186a0b114e170b170b" + "47\",\"nVersion\":536870912,\"nBits\":389159077,\"nTime\":1547281171," + "\"minTime\":1547277926,\"coinbaseValue\":1256993895,\"witness" + "Commitment\":" + "\"6a24aa21a9ed40cbdaa98da815640f815b938df95bffe0775d8078771bc47ed4f43ac4" + "e30b06\",\"nmcBlockHash\":\"c807e51bd76025d65" + "ccad2ba8ba1e9fba5f09118b6b55a348638cc17b14e3909\",\"nmcBits\":402868319," + "\"nmcHeight\":433937,\"nmcRpcAddr\":\"http://127.0.0.1:899" + "9\",\"nmcRpcUserpass\":\"user:pass\",\"rskBlockHashForMergedMining\":" + "\"0x9ad45fdcc194d788895f3ad389b583ea327f826353f7edf6b168db038" + "372cb27\",\"rskNetworkTarget\":" + "\"0x00000000000000001386e3444eba74f8a750a71a75ed0b7fecdfd282a8cef091\"," + "\"rskFeesForMiner\":\"0\",\"" + "rskdRpcAddress\":\"http://" + "127.0.0.1:4444\",\"rskdRpcUserPwd\":\"user:pass\",\"isRskCleanJob\":" + "true}"; + auto sjob = std::make_shared(); sjob->unserializeFromJson(sjobJson.c_str(), sjobJson.size()); StratumJobExBitcoin exjob(sjob, true); - + CBlockHeader header; std::vector coinbaseBin; exjob.generateBlockHeader( - &header, &coinbaseBin, - 0xfe0000c3u, "260103fe60004690", - sjob->merkleBranch_, sjob->prevHash_, - sjob->nBits_, sjob->nVersion_, - 0x5c39a313u, 0x07ba7929u, - 0x00013f00u - ); - - uint256 blkHash = uint256S("1028e53e8145994a9ebe4f39eb6a7e3fd4036f2f21a05a5a696e8ac6d0829ef4"); + &header, + &coinbaseBin, + 0xfe0000c3u, + "260103fe60004690", + sjob->merkleBranch_, + sjob->prevHash_, + sjob->nBits_, + sjob->nVersion_, + 0x5c39a313u, + 0x07ba7929u, + 0x00013f00u); + + uint256 blkHash = uint256S( + "1028e53e8145994a9ebe4f39eb6a7e3fd4036f2f21a05a5a696e8ac6d0829ef4"); ASSERT_EQ(blkHash, header.GetHash()); } diff --git a/test/TestStratumSession.cc b/test/TestStratumSession.cc index 64aba4269..fb8fee788 100644 --- a/test/TestStratumSession.cc +++ b/test/TestStratumSession.cc @@ -37,32 +37,31 @@ using namespace std; using namespace testing; TEST(StratumSession, LocalShare) { - LocalShare ls1(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); + LocalShare ls1(0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); { - LocalShare ls2(0xFFFFFFFFFFFFFFFEULL, - 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); + LocalShare ls2( + 0xFFFFFFFFFFFFFFFEULL, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); ASSERT_EQ(ls2 < ls1, true); } { - LocalShare ls2(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFEU, 0xFFFFFFFFU, 0xFFFFFFFFU); + LocalShare ls2( + 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFEU, 0xFFFFFFFFU, 0xFFFFFFFFU); ASSERT_EQ(ls2 < ls1, true); } { - LocalShare ls2(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFFU, 0xFFFFFFFEU, 0xFFFFFFFFU); + LocalShare ls2( + 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFU, 0xFFFFFFFEU, 0xFFFFFFFFU); ASSERT_EQ(ls2 < ls1, true); } { - LocalShare ls2(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFEU); + LocalShare ls2( + 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFEU); ASSERT_EQ(ls2 < ls1, true); } { - LocalShare ls2(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); + LocalShare ls2( + 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); ASSERT_EQ(ls2 < ls1, false); ASSERT_EQ(ls2 < ls2, false); } @@ -78,13 +77,13 @@ TEST(StratumSession, LocalJob) { LocalJob lj(0); { - LocalShare ls1(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); + LocalShare ls1( + 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); ASSERT_EQ(lj.addLocalShare(ls1), true); } { - LocalShare ls1(0xFFFFFFFFFFFFFFFFULL, - 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); + LocalShare ls1( + 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFU, 0xFFFFFFFFU, 0xFFFFFFFFU); ASSERT_EQ(lj.addLocalShare(ls1), false); } { @@ -99,31 +98,36 @@ TEST(StratumSession, LocalJob) { class StratumSessionMock : public IStratumSession { public: - MOCK_METHOD3(addWorker, void (const string &, const string &, int64_t)); - MOCK_METHOD3(createMiner, unique_ptr (const string &, const string &, int64_t)); - MOCK_CONST_METHOD1(decodeSessionId, uint16_t (const string &)); + MOCK_METHOD3(addWorker, void(const string &, const string &, int64_t)); + MOCK_METHOD3( + createMiner, + unique_ptr(const string &, const string &, int64_t)); + MOCK_CONST_METHOD1(decodeSessionId, uint16_t(const string &)); MOCK_METHOD0(getDispatcher, StratumMessageDispatcher &()); - MOCK_METHOD1(responseTrue, void (const string &)); - MOCK_METHOD2(responseError, void (const string &, int)); + MOCK_METHOD1(responseTrue, void(const string &)); + MOCK_METHOD2(responseError, void(const string &, int)); MOCK_METHOD2(sendData, void(const char *, size_t)); MOCK_METHOD1(sendData, void(const string &)); - MOCK_METHOD2(sendSetDifficulty, void (LocalJob &, uint64_t)); + MOCK_METHOD2(sendSetDifficulty, void(LocalJob &, uint64_t)); }; class StratumMinerMock : public StratumMiner { public: - StratumMinerMock(IStratumSession &session, - const DiffController &diffController, - const string &clientAgent, - const string &workerName, - int64_t workerId) - : StratumMiner(session, diffController, clientAgent, workerName, workerId) { + StratumMinerMock( + IStratumSession &session, + const DiffController &diffController, + const string &clientAgent, + const string &workerName, + int64_t workerId) + : StratumMiner(session, diffController, clientAgent, workerName, workerId) { } - MOCK_METHOD4(handleRequest, void (const string &, const string &, const JsonNode &, const JsonNode &)); - MOCK_METHOD1(handleExMessage, void (const string &)); - MOCK_METHOD1(addLocalJob, uint64_t (LocalJob &)); - MOCK_METHOD1(removeLocalJob, void (LocalJob &)); + MOCK_METHOD4( + handleRequest, + void(const string &, const string &, const JsonNode &, const JsonNode &)); + MOCK_METHOD1(handleExMessage, void(const string &)); + MOCK_METHOD1(addLocalJob, uint64_t(LocalJob &)); + MOCK_METHOD1(removeLocalJob, void(LocalJob &)); }; static DiffController diffController(0x4000, 0x4000000000000000, 0x2, 10, 3000); @@ -132,12 +136,14 @@ TEST(StratumSession, StratumClientAgentHandler_RegisterWorker) { StratumSessionMock connection; StratumMessageAgentDispatcher agent(connection, diffController); - // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | worker_name | + // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | + // worker_name | string exMessage; const string clientAgent = "cgminer\"1'"; - const string workerName = "bitkevin.testcase"; + const string workerName = "bitkevin.testcase"; const uint16_t sessionId = StratumMessageEx::AGENT_MAX_SESSION_ID; - exMessage.resize(1+1+2+2 + clientAgent.length() + 1 + workerName.length() + 1); + exMessage.resize( + 1 + 1 + 2 + 2 + clientAgent.length() + 1 + workerName.length() + 1); uint8_t *p = (uint8_t *)exMessage.data(); @@ -174,12 +180,14 @@ TEST(StratumSession, StratumClientAgentHandler_RegisterWorker2) { StratumSessionMock connection; StratumMessageAgentDispatcher agent(connection, diffController); - // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | worker_name | + // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | + // worker_name | string exMessage; const string clientAgent = "\"'"; const string workerName = "a.b"; const uint16_t sessionId = 0; - exMessage.resize(1+1+2+2 + clientAgent.length() + 1 + workerName.length() + 1, 0); + exMessage.resize( + 1 + 1 + 2 + 2 + clientAgent.length() + 1 + workerName.length() + 1, 0); uint8_t *p = (uint8_t *)exMessage.data(); @@ -216,12 +224,14 @@ TEST(StratumSession, StratumClientAgentHandler_RegisterWorker3) { StratumSessionMock connection; StratumMessageAgentDispatcher agent(connection, diffController); - // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | worker_name | + // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | + // worker_name | string exMessage; const string clientAgent; const string workerName; const uint16_t sessionId = 0; - exMessage.resize(1+1+2+2 + clientAgent.length() + 1 + workerName.length() + 1, 0); + exMessage.resize( + 1 + 1 + 2 + 2 + clientAgent.length() + 1 + workerName.length() + 1, 0); uint8_t *p = (uint8_t *)exMessage.data(); @@ -258,10 +268,11 @@ TEST(StratumSession, StratumClientAgentHandler_RegisterWorker4) { StratumSessionMock session; StratumMessageAgentDispatcher agent(session, diffController); - // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | worker_name | + // | magic_number(1) | cmd(1) | len (2) | session_id(2) | clientAgent | + // worker_name | string exMessage; const uint16_t sessionId = StratumMessageEx::AGENT_MAX_SESSION_ID; - exMessage.resize(1+1+2+2 + 1 + 1, 0); + exMessage.resize(1 + 1 + 2 + 2 + 1 + 1, 0); uint8_t *p = (uint8_t *)exMessage.data(); @@ -335,7 +346,7 @@ TEST(StratumSession, StratumClientAgentHandler_RegisterWorker4) { // // exMessage.resize(exMessage.size() + 1); - (*(uint16_t *)(exMessage.data() + 2))++; // len++ + (*(uint16_t *)(exMessage.data() + 2))++; // len++ exMessage[exMessage.size() - 1] = 'n'; exMessage[exMessage.size() - 2] = '\0'; @@ -367,14 +378,15 @@ TEST(StratumSession, StratumClientAgentHandler_SubmitShare) { // // SUBMIT_SHARE / SUBMIT_SHARE_WITH_TIME: - // | magic_number(1) | cmd(1) | len (2) | jobId (uint8_t) | session_id (uint16_t) | - // | extra_nonce2 (uint32_t) | nNonce (uint32_t) | [nTime (uint32_t) |] + // | magic_number(1) | cmd(1) | len (2) | jobId (uint8_t) | session_id + // (uint16_t) | | extra_nonce2 (uint32_t) | nNonce (uint32_t) | [nTime + // (uint32_t) |] // const string jobId = "9"; const uint16_t sessionId = StratumMessageEx::AGENT_MAX_SESSION_ID; string exMessage; - exMessage.resize(1+1+2+1+2+4+4, 0); + exMessage.resize(1 + 1 + 2 + 1 + 2 + 4 + 4, 0); uint8_t *p = (uint8_t *)exMessage.data(); @@ -390,10 +402,10 @@ TEST(StratumSession, StratumClientAgentHandler_SubmitShare) { *(uint16_t *)p = sessionId; p += 2; // extra_nonce2 - *(uint32_t *)p = 0x12345678; // 305419896 + *(uint32_t *)p = 0x12345678; // 305419896 p += 4; // nonce - *(uint32_t *)p = 0x90abcdef; // 2427178479 + *(uint32_t *)p = 0x90abcdef; // 2427178479 p += 4; ASSERT_EQ((size_t)(p - (uint8_t *)exMessage.data()), exMessage.size()); @@ -403,11 +415,14 @@ TEST(StratumSession, StratumClientAgentHandler_SubmitShare) { string workerName = "__default__"; auto workerId = StratumWorker::calcWorkerId(workerName); auto session = new StratumMinerMock(connection, dc, "", workerName, workerId); - EXPECT_CALL(connection, createMiner("", workerName, workerId)).WillOnce(Return(ByMove(unique_ptr(session)))); + EXPECT_CALL(connection, createMiner("", workerName, workerId)) + .WillOnce(Return(ByMove(unique_ptr(session)))); EXPECT_CALL(connection, addWorker("", workerName, workerId)).Times(1); - EXPECT_CALL(connection, decodeSessionId(exMessage)).WillOnce(Return(sessionId)); + EXPECT_CALL(connection, decodeSessionId(exMessage)) + .WillOnce(Return(sessionId)); EXPECT_CALL(*session, handleExMessage(exMessage)).Times(1); - agent.registerWorker(sessionId, "", "__default__", StratumWorker::calcWorkerId("__default__")); + agent.registerWorker( + sessionId, "", "__default__", StratumWorker::calcWorkerId("__default__")); agent.handleExMessage(exMessage); // please check ouput log } @@ -418,14 +433,15 @@ TEST(StratumSession, StratumClientAgentHandler_SubmitShare_with_time) { // // SUBMIT_SHARE / SUBMIT_SHARE_WITH_TIME: - // | magic_number(1) | cmd(1) | len (2) | jobId (uint8_t) | session_id (uint16_t) | - // | extra_nonce2 (uint32_t) | nNonce (uint32_t) | [nTime (uint32_t) |] + // | magic_number(1) | cmd(1) | len (2) | jobId (uint8_t) | session_id + // (uint16_t) | | extra_nonce2 (uint32_t) | nNonce (uint32_t) | [nTime + // (uint32_t) |] // const string jobId = "9"; const uint16_t sessionId = StratumMessageEx::AGENT_MAX_SESSION_ID; string exMessage; - exMessage.resize(1+1+2+1+2+4+4+4, 0); + exMessage.resize(1 + 1 + 2 + 1 + 2 + 4 + 4 + 4, 0); uint8_t *p = (uint8_t *)exMessage.data(); @@ -441,13 +457,13 @@ TEST(StratumSession, StratumClientAgentHandler_SubmitShare_with_time) { *(uint16_t *)p = sessionId; p += 2; // extra_nonce2 - *(uint32_t *)p = 0x12345678u; // 305419896 + *(uint32_t *)p = 0x12345678u; // 305419896 p += 4; // nonce - *(uint32_t *)p = 0xFFabcdefu; // 4289449455 + *(uint32_t *)p = 0xFFabcdefu; // 4289449455 p += 4; // time - *(uint32_t *)p = 0xcdef90abu; // 3455029419 + *(uint32_t *)p = 0xcdef90abu; // 3455029419 p += 4; ASSERT_EQ((size_t)(p - (uint8_t *)exMessage.data()), exMessage.size()); @@ -457,11 +473,14 @@ TEST(StratumSession, StratumClientAgentHandler_SubmitShare_with_time) { string workerName = "__default__"; auto workerId = StratumWorker::calcWorkerId(workerName); auto session = new StratumMinerMock(connection, dc, "", workerName, workerId); - EXPECT_CALL(connection, createMiner("", workerName, workerId)).WillOnce(Return(ByMove(unique_ptr(session)))); + EXPECT_CALL(connection, createMiner("", workerName, workerId)) + .WillOnce(Return(ByMove(unique_ptr(session)))); EXPECT_CALL(connection, addWorker("", workerName, workerId)).Times(1); - EXPECT_CALL(connection, decodeSessionId(exMessage)).WillOnce(Return(sessionId)); + EXPECT_CALL(connection, decodeSessionId(exMessage)) + .WillOnce(Return(sessionId)); EXPECT_CALL(*session, handleExMessage(exMessage)).Times(1); - agent.registerWorker(sessionId, "", "__default__", StratumWorker::calcWorkerId("__default__")); + agent.registerWorker( + sessionId, "", "__default__", StratumWorker::calcWorkerId("__default__")); agent.handleExMessage(exMessage); // please check ouput log } @@ -494,9 +513,11 @@ TEST(StratumSession, StratumClientAgentHandler_UNREGISTER_WORKER) { string workerName = "__default__"; auto workerId = StratumWorker::calcWorkerId(workerName); auto session = new StratumMinerMock(connection, dc, "", workerName, workerId); - EXPECT_CALL(connection, createMiner("", workerName, workerId)).WillOnce(Return(ByMove(unique_ptr(session)))); + EXPECT_CALL(connection, createMiner("", workerName, workerId)) + .WillOnce(Return(ByMove(unique_ptr(session)))); EXPECT_CALL(connection, addWorker("", workerName, workerId)).Times(1); - agent.registerWorker(sessionId, "", "__default__", StratumWorker::calcWorkerId("__default__")); + agent.registerWorker( + sessionId, "", "__default__", StratumWorker::calcWorkerId("__default__")); agent.handleExMessage(exMessage); // please check ouput log } @@ -505,24 +526,25 @@ TEST(StratumSession, StratumClientAgentHandler) { StratumSessionMock connection; StratumMessageAgentDispatcher agent(connection, diffController); - map > diffSessionIds; + map> diffSessionIds; string data; // // CMD_MINING_SET_DIFF: - // | magic_number(1) | cmd(1) | len (2) | diff_2_exp(1) | count(2) | session_id (2) ... | + // | magic_number(1) | cmd(1) | len (2) | diff_2_exp(1) | count(2) | + // session_id (2) ... | // { // diff: 1, session_id: 0 diffSessionIds[1].push_back(0); - agent.getSetDiffCommand(diffSessionIds, data); + agent.getSetDiffCommand(diffSessionIds, data); uint8_t *p = (uint8_t *)data.data(); ASSERT_EQ(data.length(), 9U); - ASSERT_EQ(*(uint8_t *)(p+ 4), 1); // diff_2exp - ASSERT_EQ(*(uint16_t *)(p+ 5), 1); // count - ASSERT_EQ(*(uint16_t *)(p+ 7), 0); // first session id + ASSERT_EQ(*(uint8_t *)(p + 4), 1); // diff_2exp + ASSERT_EQ(*(uint16_t *)(p + 5), 1); // count + ASSERT_EQ(*(uint16_t *)(p + 7), 0); // first session id } { @@ -535,38 +557,38 @@ TEST(StratumSession, StratumClientAgentHandler) { agent.getSetDiffCommand(diffSessionIds, data); // 65535 = 32764 + 32764 + 7 - size_t l1 = 1+1+2+1+2+ 32764 * 2; - size_t l2 = 1+1+2+1+2+ 32764 * 2; - size_t l3 = 1+1+2+1+2+ 7 * 2; + size_t l1 = 1 + 1 + 2 + 1 + 2 + 32764 * 2; + size_t l2 = 1 + 1 + 2 + 1 + 2 + 32764 * 2; + size_t l3 = 1 + 1 + 2 + 1 + 2 + 7 * 2; ASSERT_EQ(data.length(), l1 + l2 + l3); uint8_t *p = (uint8_t *)data.data(); - ASSERT_EQ(*p, StratumMessageEx::CMD_MAGIC_NUMBER); - ASSERT_EQ(*(p+l1), StratumMessageEx::CMD_MAGIC_NUMBER); - ASSERT_EQ(*(p+l1+l2), StratumMessageEx::CMD_MAGIC_NUMBER); + ASSERT_EQ(*p, StratumMessageEx::CMD_MAGIC_NUMBER); + ASSERT_EQ(*(p + l1), StratumMessageEx::CMD_MAGIC_NUMBER); + ASSERT_EQ(*(p + l1 + l2), StratumMessageEx::CMD_MAGIC_NUMBER); // check length - ASSERT_EQ(*(uint16_t *)(p+2), l1); - ASSERT_EQ(*(uint16_t *)(p+2+l1), l2); - ASSERT_EQ(*(uint16_t *)(p+2+l1+l2), l3); + ASSERT_EQ(*(uint16_t *)(p + 2), l1); + ASSERT_EQ(*(uint16_t *)(p + 2 + l1), l2); + ASSERT_EQ(*(uint16_t *)(p + 2 + l1 + l2), l3); // check diff - ASSERT_EQ(*(uint8_t *)(p+4), 63); - ASSERT_EQ(*(uint8_t *)(p+4+l1), 63); - ASSERT_EQ(*(uint8_t *)(p+4+l1+l2), 63); + ASSERT_EQ(*(uint8_t *)(p + 4), 63); + ASSERT_EQ(*(uint8_t *)(p + 4 + l1), 63); + ASSERT_EQ(*(uint8_t *)(p + 4 + l1 + l2), 63); // check count - ASSERT_EQ(*(uint16_t *)(p+5), 32764); - ASSERT_EQ(*(uint16_t *)(p+5+l1), 32764); - ASSERT_EQ(*(uint16_t *)(p+5+l1+l2), 7); + ASSERT_EQ(*(uint16_t *)(p + 5), 32764); + ASSERT_EQ(*(uint16_t *)(p + 5 + l1), 32764); + ASSERT_EQ(*(uint16_t *)(p + 5 + l1 + l2), 7); // check first session id - ASSERT_EQ(*(uint16_t *)(p+7), 0); - ASSERT_EQ(*(uint16_t *)(p+7+l1), 32764); - ASSERT_EQ(*(uint16_t *)(p+7+l1+l2), 32764 + 32764); + ASSERT_EQ(*(uint16_t *)(p + 7), 0); + ASSERT_EQ(*(uint16_t *)(p + 7 + l1), 32764); + ASSERT_EQ(*(uint16_t *)(p + 7 + l1 + l2), 32764 + 32764); // last session id - ASSERT_EQ(*(uint16_t *)(p+l1+l2+l3-2), 65535 - 1); + ASSERT_EQ(*(uint16_t *)(p + l1 + l2 + l3 - 2), 65535 - 1); } } @@ -577,11 +599,11 @@ TEST(StratumSession, SetDiff) { string password = "d=1024"; uint64_t d = 0u, md = 0u; - vector arr; // key=value,key=value + vector arr; // key=value,key=value split(arr, password, is_any_of(",")); for (auto it = arr.begin(); it != arr.end(); it++) { - vector arr2; // key,value + vector arr2; // key,value split(arr2, *it, is_any_of("=")); if (arr2.size() != 2 || arr2[1].empty()) { continue; @@ -590,8 +612,7 @@ TEST(StratumSession, SetDiff) { if (arr2[0] == "d") { // 'd' : start difficulty d = strtoull(arr2[1].c_str(), nullptr, 10); - } - else if (arr2[0] == "md") { + } else if (arr2[0] == "md") { // 'md' : minimum difficulty md = strtoull(arr2[1].c_str(), nullptr, 10); } @@ -605,11 +626,11 @@ TEST(StratumSession, SetDiff) { string password = "md=2048"; uint64_t d = 0u, md = 0u; - vector arr; // key=value,key=value + vector arr; // key=value,key=value split(arr, password, is_any_of(",")); for (auto it = arr.begin(); it != arr.end(); it++) { - vector arr2; // key,value + vector arr2; // key,value split(arr2, *it, is_any_of("=")); if (arr2.size() != 2 || arr2[1].empty()) { continue; @@ -618,8 +639,7 @@ TEST(StratumSession, SetDiff) { if (arr2[0] == "d") { // 'd' : start difficulty d = strtoull(arr2[1].c_str(), nullptr, 10); - } - else if (arr2[0] == "md") { + } else if (arr2[0] == "md") { // 'md' : minimum difficulty md = strtoull(arr2[1].c_str(), nullptr, 10); } @@ -633,11 +653,11 @@ TEST(StratumSession, SetDiff) { string password = "d=1024,md=2048"; uint64_t d = 0u, md = 0u; - vector arr; // key=value,key=value + vector arr; // key=value,key=value split(arr, password, is_any_of(",")); for (auto it = arr.begin(); it != arr.end(); it++) { - vector arr2; // key,value + vector arr2; // key,value split(arr2, *it, is_any_of("=")); if (arr2.size() != 2 || arr2[1].empty()) { continue; @@ -646,27 +666,25 @@ TEST(StratumSession, SetDiff) { if (arr2[0] == "d") { // 'd' : start difficulty d = strtoull(arr2[1].c_str(), nullptr, 10); - } - else if (arr2[0] == "md") { + } else if (arr2[0] == "md") { // 'md' : minimum difficulty md = strtoull(arr2[1].c_str(), nullptr, 10); } } - ASSERT_EQ(d, 1024u); + ASSERT_EQ(d, 1024u); ASSERT_EQ(md, 2048u); } - { string password = "d=1025,md=2500"; uint64_t d = 0u, md = 0u; - vector arr; // key=value,key=value + vector arr; // key=value,key=value split(arr, password, is_any_of(",")); for (auto it = arr.begin(); it != arr.end(); it++) { - vector arr2; // key,value + vector arr2; // key,value split(arr2, *it, is_any_of("=")); if (arr2.size() != 2 || arr2[1].empty()) { continue; @@ -675,16 +693,15 @@ TEST(StratumSession, SetDiff) { if (arr2[0] == "d") { // 'd' : start difficulty d = strtoull(arr2[1].c_str(), nullptr, 10); - } - else if (arr2[0] == "md") { + } else if (arr2[0] == "md") { // 'md' : minimum difficulty md = strtoull(arr2[1].c_str(), nullptr, 10); } } // set min diff first - //if (md >= DiffController::kMinDiff_) { - if (md >= 64) { + // if (md >= DiffController::kMinDiff_) { + if (md >= 64) { // diff must be 2^N double i = 1; while ((uint64_t)exp2(i) < md) { @@ -696,16 +713,16 @@ TEST(StratumSession, SetDiff) { } // than set current diff - //if (d >= DiffController::kMinDiff_) { - if (d >= 64) { + // if (d >= DiffController::kMinDiff_) { + if (d >= 64) { // diff must be 2^N double i = 1; while ((uint64_t)exp2(i) < d) { i++; } d = (uint64_t)exp2(i); - - ASSERT_EQ(d, 2048u); + + ASSERT_EQ(d, 2048u); } } } diff --git a/test/TestUtils.cc b/test/TestUtils.cc index 458312031..c8e08d0f3 100644 --- a/test/TestUtils.cc +++ b/test/TestUtils.cc @@ -67,24 +67,28 @@ TEST(Utils, share2HashrateG) { // if diff = 1, every 10 secons per share, // hashrate will 0.429497 Ghs ~ 429 Mhs h = share2HashrateG(1, 10); - ASSERT_EQ((int64_t)(h*1000), 429); + ASSERT_EQ((int64_t)(h * 1000), 429); } -TEST(Utils, Bin2Hex) -{ - const vector bin = {(char)0xF0, (char)0xFA, (char)0x6E, (char)0xCD, (char)0xCD, (char)0xCD, (char)0xCD, (char)0xCD}; - const string rightHex = "f0fa6ecdcdcdcdcd"; - const string rightHexReverse = "cdcdcdcdcd6efaf0"; - - string result1; - Bin2Hex(bin, result1); - EXPECT_EQ(result1, rightHex); - EXPECT_NE(result1, rightHexReverse); - - string result2; - Bin2HexR(bin, result2); - EXPECT_EQ(result2, rightHexReverse); - EXPECT_NE(result2, rightHex); - - +TEST(Utils, Bin2Hex) { + const vector bin = {(char)0xF0, + (char)0xFA, + (char)0x6E, + (char)0xCD, + (char)0xCD, + (char)0xCD, + (char)0xCD, + (char)0xCD}; + const string rightHex = "f0fa6ecdcdcdcdcd"; + const string rightHexReverse = "cdcdcdcdcd6efaf0"; + + string result1; + Bin2Hex(bin, result1); + EXPECT_EQ(result1, rightHex); + EXPECT_NE(result1, rightHexReverse); + + string result2; + Bin2HexR(bin, result2); + EXPECT_EQ(result2, rightHexReverse); + EXPECT_NE(result2, rightHex); } \ No newline at end of file diff --git a/tools/bitcoin/gbtsync/TestGbtSync/testdataoperationmanager_common.h b/tools/bitcoin/gbtsync/TestGbtSync/testdataoperationmanager_common.h index bf8cf9272..d306d10f9 100644 --- a/tools/bitcoin/gbtsync/TestGbtSync/testdataoperationmanager_common.h +++ b/tools/bitcoin/gbtsync/TestGbtSync/testdataoperationmanager_common.h @@ -4,11 +4,16 @@ #include "gtest/gtest.h" #include "gbtsync/gbtsync.h" - -void CheckReadyToLoad(DataOperationManagerBase& manager); -void CheckContent(DataOperationManagerBase& manager, const std::string& id, const std::vector& expectedValue); -void CheckContent(DataOperationManagerBase& manager, const std::string& id, const std::string& expectedValueStr); -void CheckContents(DataOperationManagerBase& manager); -void CheckSaveDelete(DataOperationManagerBase& manager); +void CheckReadyToLoad(DataOperationManagerBase &manager); +void CheckContent( + DataOperationManagerBase &manager, + const std::string &id, + const std::vector &expectedValue); +void CheckContent( + DataOperationManagerBase &manager, + const std::string &id, + const std::string &expectedValueStr); +void CheckContents(DataOperationManagerBase &manager); +void CheckSaveDelete(DataOperationManagerBase &manager); #endif // _TEST_DATA_OPERATION_MANAGER_COMMON_H_ \ No newline at end of file diff --git a/tools/bitcoin/gbtsync/datamanager.h b/tools/bitcoin/gbtsync/datamanager.h index 5dd03cc66..17adb9ad4 100644 --- a/tools/bitcoin/gbtsync/datamanager.h +++ b/tools/bitcoin/gbtsync/datamanager.h @@ -9,63 +9,49 @@ class DataHandler; class DataOperationManagerBase; -class DataManager -{ +class DataManager { public: - using AddAndRemoveDataListPair = std::pair, std::vector>; + using AddAndRemoveDataListPair = + std::pair, std::vector>; - DataManager(DataOperationManagerBase* operationManager); - // store data persistently and keep info in the cache - bool AddData(std::string id, std::vector&& data); - // remove data from persistent storage and cache - bool RemoveData(const std::string& id); - - // list files from drive - // return new detected filenames - AddAndRemoveDataListPair DiffDataHandles(bool updateCache = true); + DataManager(DataOperationManagerBase *operationManager); + // store data persistently and keep info in the cache + bool AddData(std::string id, std::vector &&data); + // remove data from persistent storage and cache + bool RemoveData(const std::string &id); - void SetName(std::string name) - { - m_Name = std::move(name); - } + // list files from drive + // return new detected filenames + AddAndRemoveDataListPair DiffDataHandles(bool updateCache = true); - const std::string& GetName() const - { - return m_Name; - } + void SetName(std::string name) { m_Name = std::move(name); } - const std::unordered_map>& GetDataHandlers() const - { - return m_DataHandlers; - } + const std::string &GetName() const { return m_Name; } - std::shared_ptr GetDataHandler(const std::string& id) const - { - auto iter = m_DataHandlers.find(id); - if(iter != m_DataHandlers.end()) - { - return iter->second; - } - return std::shared_ptr(); - } + const std::unordered_map> & + GetDataHandlers() const { + return m_DataHandlers; + } - void EnableSyncDelete(bool b) - { - m_syncDelete = b; - } - - bool IsSyncDelete() const - { - return m_syncDelete; + std::shared_ptr GetDataHandler(const std::string &id) const { + auto iter = m_DataHandlers.find(id); + if (iter != m_DataHandlers.end()) { + return iter->second; } + return std::shared_ptr(); + } + + void EnableSyncDelete(bool b) { m_syncDelete = b; } + + bool IsSyncDelete() const { return m_syncDelete; } - void ClearLoadedData(); + void ClearLoadedData(); private: - std::unique_ptr m_FileOperationManager; - std::unordered_map> m_DataHandlers; - std::string m_Name; - bool m_syncDelete; + std::unique_ptr m_FileOperationManager; + std::unordered_map> m_DataHandlers; + std::string m_Name; + bool m_syncDelete; }; #endif // _DATA_MANAGER_H_ \ No newline at end of file diff --git a/tools/bitcoin/gbtsync/dataoperation_base.h b/tools/bitcoin/gbtsync/dataoperation_base.h index 8fe057682..9f956a27f 100644 --- a/tools/bitcoin/gbtsync/dataoperation_base.h +++ b/tools/bitcoin/gbtsync/dataoperation_base.h @@ -5,58 +5,55 @@ #include #include -class DataHandlerLoadOperationBase -{ +class DataHandlerLoadOperationBase { public: - virtual ~DataHandlerLoadOperationBase() = default; - virtual std::string Id() const = 0; - virtual bool DoLoad(std::vector& outData) = 0; + virtual ~DataHandlerLoadOperationBase() = default; + virtual std::string Id() const = 0; + virtual bool DoLoad(std::vector &outData) = 0; }; -class DataHandler -{ +class DataHandler { public: - DataHandler(DataHandlerLoadOperationBase* loadOperation); - DataHandler(DataHandlerLoadOperationBase* loadOperation, std::vector&& data); - - bool Load(); - void Unload(); - bool IsLoaded() const; - std::vector GiveupData() - { - std::vector newContainer; - newContainer = std::move(m_Data); - Unload(); - return newContainer; - } - const std::vector& Data() const - { - return m_Data; - } - - const std::vector& GetData() - { - if(!IsLoaded()) - Load(); - return m_Data; - } + DataHandler(DataHandlerLoadOperationBase *loadOperation); + DataHandler( + DataHandlerLoadOperationBase *loadOperation, std::vector &&data); + + bool Load(); + void Unload(); + bool IsLoaded() const; + std::vector GiveupData() { + std::vector newContainer; + newContainer = std::move(m_Data); + Unload(); + return newContainer; + } + const std::vector &Data() const { return m_Data; } + + const std::vector &GetData() { + if (!IsLoaded()) + Load(); + return m_Data; + } private: - - std::unique_ptr m_LoadOperation; - std::vector m_Data; - bool m_Loaded; + std::unique_ptr m_LoadOperation; + std::vector m_Data; + bool m_Loaded; }; -class DataOperationManagerBase -{ +class DataOperationManagerBase { public: - virtual ~DataOperationManagerBase() = default; - virtual std::unique_ptr GetDataHandler(std::string id) const = 0; - virtual bool GetDataList(std::vector& out, std::regex regex = std::regex(".*"), bool checkNotation = false) = 0; - virtual std::unique_ptr StoreData(std::string id, std::vector&& data, bool forceOverwrite = false)= 0; - virtual bool DeleteData(const std::string& id) = 0; + virtual ~DataOperationManagerBase() = default; + virtual std::unique_ptr GetDataHandler(std::string id) const = 0; + virtual bool GetDataList( + std::vector &out, + std::regex regex = std::regex(".*"), + bool checkNotation = false) = 0; + virtual std::unique_ptr StoreData( + std::string id, + std::vector &&data, + bool forceOverwrite = false) = 0; + virtual bool DeleteData(const std::string &id) = 0; }; - #endif diff --git a/tools/bitcoin/gbtsync/dataoperation_file.h b/tools/bitcoin/gbtsync/dataoperation_file.h index 3c5975ae0..3c318cdb0 100644 --- a/tools/bitcoin/gbtsync/dataoperation_file.h +++ b/tools/bitcoin/gbtsync/dataoperation_file.h @@ -3,67 +3,57 @@ #include "dataoperation_base.h" -class DataHandlerLoadOperationFile : public DataHandlerLoadOperationBase -{ +class DataHandlerLoadOperationFile : public DataHandlerLoadOperationBase { public: - bool DoLoad(std::vector& outData) override; - const std::string& GetFilename() const - { - return m_Filename; - } - std::string Id() const override - { - return GetFilename(); - } + bool DoLoad(std::vector &outData) override; + const std::string &GetFilename() const { return m_Filename; } + std::string Id() const override { return GetFilename(); } private: - friend class FileDataOperationManager; - DataHandlerLoadOperationFile(std::string filename, int startOffset, int dataSize); + friend class FileDataOperationManager; + DataHandlerLoadOperationFile( + std::string filename, int startOffset, int dataSize); - std::string m_Filename; - int m_StartOffset; - int m_DataSize; + std::string m_Filename; + int m_StartOffset; + int m_DataSize; }; -class FileDataOperationManager : public DataOperationManagerBase -{ +class FileDataOperationManager : public DataOperationManagerBase { public: + FileDataOperationManager( + std::string path, + std::vector filePrefix, + std::vector filePosfix, + std::string trashPath); - FileDataOperationManager(std::string path, std::vector filePrefix, std::vector filePosfix, std::string trashPath); + std::unique_ptr GetDataHandler(std::string id) const override; + bool GetDataList( + std::vector &out, + std::regex regex = std::regex(".*"), + bool checkNotation = false) override; + std::unique_ptr StoreData( + std::string id, + std::vector &&data, + bool forceOverwrite = false) override; + bool DeleteData(const std::string &id) override; - std::unique_ptr GetDataHandler(std::string id) const override; - bool GetDataList(std::vector& out, std::regex regex = std::regex(".*"), bool checkNotation = false) override; - std::unique_ptr StoreData(std::string id, std::vector&& data, bool forceOverwrite = false) override; - bool DeleteData(const std::string& id) override; + int GetFileDataSize(const std::string &filename) const; + bool IsFileReadyToLoad(const std::string &filename) const; - int GetFileDataSize(const std::string& filename) const; - bool IsFileReadyToLoad(const std::string& filename) const; + const std::string &GetPath() const { return m_DirPath; } - const std::string& GetPath() const - { - return m_DirPath; - } + const std::string &GetTrashPath() const { return m_DirTrashPath; } - const std::string& GetTrashPath() const - { - return m_DirTrashPath; - } + const std::vector &GetFilePrefix() const { return m_FilePrefix; } - const std::vector& GetFilePrefix() const - { - return m_FilePrefix; - } - - const std::vector& GetFilePostfix() const - { - return m_FilePostfix; - } + const std::vector &GetFilePostfix() const { return m_FilePostfix; } private: - std::vector m_FilePrefix; - std::vector m_FilePostfix; - std::string m_DirPath; - std::string m_DirTrashPath; + std::vector m_FilePrefix; + std::vector m_FilePostfix; + std::string m_DirPath; + std::string m_DirTrashPath; }; #endif // _FILE_OPERATIONS_H_ \ No newline at end of file diff --git a/tools/bitcoin/gbtsync/dataoperation_mysql.h b/tools/bitcoin/gbtsync/dataoperation_mysql.h index bebf43381..cfc0787fa 100644 --- a/tools/bitcoin/gbtsync/dataoperation_mysql.h +++ b/tools/bitcoin/gbtsync/dataoperation_mysql.h @@ -4,72 +4,72 @@ #include "dataoperation_base.h" #include -struct StatementCloser -{ - StatementCloser(MYSQL_STMT* s) - : statement(s) - { - } +struct StatementCloser { + StatementCloser(MYSQL_STMT *s) + : statement(s) {} - ~StatementCloser() - { - if(statement) - { - mysql_stmt_close(statement); - } + ~StatementCloser() { + if (statement) { + mysql_stmt_close(statement); } + } - MYSQL_STMT* statement; + MYSQL_STMT *statement; }; - -class DataHandlerLoadOperationMysql : public DataHandlerLoadOperationBase -{ +class DataHandlerLoadOperationMysql : public DataHandlerLoadOperationBase { public: - ~DataHandlerLoadOperationMysql(); - bool DoLoad(std::vector& outData) override; - std::string Id() const override - { - return m_ID; - } + ~DataHandlerLoadOperationMysql(); + bool DoLoad(std::vector &outData) override; + std::string Id() const override { return m_ID; } private: - friend class MysqlDataOperationManager; - DataHandlerLoadOperationMysql(MYSQL* const * connection, std::string id, std::string tableName); - - MYSQL* const * m_Connection; - std::string m_ID; - std::string m_TableName; - MYSQL_BIND m_BindParam[1]; + friend class MysqlDataOperationManager; + DataHandlerLoadOperationMysql( + MYSQL *const *connection, std::string id, std::string tableName); + + MYSQL *const *m_Connection; + std::string m_ID; + std::string m_TableName; + MYSQL_BIND m_BindParam[1]; }; -class MysqlDataOperationManager : public DataOperationManagerBase -{ +class MysqlDataOperationManager : public DataOperationManagerBase { public: + MysqlDataOperationManager( + std::string server, + std::string username, + std::string password, + std::string dbname, + std::string tablename, + int port = 0); + ~MysqlDataOperationManager(); + + std::unique_ptr GetDataHandler(std::string id) const override; + bool GetDataList( + std::vector &out, + std::regex regex = std::regex(".*"), + bool checkNotation = false) override; + std::unique_ptr StoreData( + std::string id, + std::vector &&data, + bool forceOverwrite = false) override; + bool DeleteData(const std::string &id) override; + + bool IsExists(const std::string &id) const; - MysqlDataOperationManager(std::string server, std::string username, std::string password - , std::string dbname, std::string tablename, int port = 0); - ~MysqlDataOperationManager(); - - std::unique_ptr GetDataHandler(std::string id) const override; - bool GetDataList(std::vector& out, std::regex regex = std::regex(".*"), bool checkNotation = false) override; - std::unique_ptr StoreData(std::string id, std::vector&& data, bool forceOverwrite = false) override; - bool DeleteData(const std::string& id) override; - - bool IsExists(const std::string& id) const; private: - bool InitConnection(); - bool ValidateConnection(); - - std::string m_Server; - std::string m_Username; - std::string m_Password; - std::string m_Database; - std::string m_TableName; - int m_Port; + bool InitConnection(); + bool ValidateConnection(); - MYSQL* m_Connection; + std::string m_Server; + std::string m_Username; + std::string m_Password; + std::string m_Database; + std::string m_TableName; + int m_Port; + MYSQL *m_Connection; }; #endif // _MYSQL_OPERATIONS_H_ \ No newline at end of file diff --git a/tools/bitcoin/gbtsync/gbtsync.h b/tools/bitcoin/gbtsync/gbtsync.h index ca5f74732..b93179e69 100644 --- a/tools/bitcoin/gbtsync/gbtsync.h +++ b/tools/bitcoin/gbtsync/gbtsync.h @@ -12,38 +12,40 @@ #include - using namespace std; - -class SyncWorker -{ +class SyncWorker { public: - void DoDiffs(); + void DoDiffs(); - std::unique_ptr dataManager; - std::chrono::duration diffPeriod; + std::unique_ptr dataManager; + std::chrono::duration diffPeriod; - DataManager::AddAndRemoveDataListPair diffResult; - std::chrono::time_point lastEndDiff; + DataManager::AddAndRemoveDataListPair diffResult; + std::chrono::time_point lastEndDiff; }; -class SyncManager -{ +class SyncManager { public: - SyncManager(); - void Run(); - void AddWorker(DataManager* dataManager, std::chrono::duration diffPeriod); - void Stop(); + SyncManager(); + void Run(); + void + AddWorker(DataManager *dataManager, std::chrono::duration diffPeriod); + void Stop(); private: - void DoInit(); - void DoDiffs(); - void DoSync(); - void Sync(DataManager::AddAndRemoveDataListPair& diffResult, DataManager& sourceManager, DataManager& destManager, bool giveupData); + void DoInit(); + void DoDiffs(); + void DoSync(); + void Sync( + DataManager::AddAndRemoveDataListPair &diffResult, + DataManager &sourceManager, + DataManager &destManager, + bool giveupData); + private: - std::vector> m_Workers; - bool m_KeepRun; + std::vector> m_Workers; + bool m_KeepRun; }; #endif \ No newline at end of file diff --git a/tools/kafka_repeater/main.cc b/tools/kafka_repeater/main.cc index 2cce1bfe8..1bd4717d6 100644 --- a/tools/kafka_repeater/main.cc +++ b/tools/kafka_repeater/main.cc @@ -52,15 +52,15 @@ void handler(int sig) { } void usage() { - fprintf(stderr, "Usage:\n\tkafka_repeater -c \"kafka_repeater.cfg\" -l \"log_kafka_repeater\"\n"); + fprintf( + stderr, + "Usage:\n\tkafka_repeater -c \"kafka_repeater.cfg\" -l " + "\"log_kafka_repeater\"\n"); } -template -void readFromSetting(const S &setting, - const string &key, - V &value, - bool optional = false) -{ +template +void readFromSetting( + const S &setting, const string &key, V &value, bool optional = false) { if (!setting.lookupValue(key, value) && !optional) { LOG(FATAL) << "config section missing key: " << key; } @@ -68,7 +68,7 @@ void readFromSetting(const S &setting, int main(int argc, char **argv) { char *optLogDir = NULL; - char *optConf = NULL; + char *optConf = NULL; int c; if (argc <= 1) { @@ -77,15 +77,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "c:l:h")) != -1) { switch (c) { - case 'c': - optConf = optarg; - break; - case 'l': - optLogDir = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'c': + optConf = optarg; + break; + case 'l': + optLogDir = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -98,23 +99,22 @@ int main(int argc, char **argv) { } // Log messages at a level >= this flag are automatically sent to // stderr in addition to log files. - FLAGS_stderrthreshold = 3; // 3: FATAL - FLAGS_max_log_size = 100; // max log file size 100 MB - FLAGS_logbuflevel = -1; // don't buffer logs + FLAGS_stderrthreshold = 3; // 3: FATAL + FLAGS_max_log_size = 100; // max log file size 100 MB + FLAGS_logbuflevel = -1; // don't buffer logs FLAGS_stop_logging_if_full_disk = true; // Read the file. If there is an error, report it and exit. libconfig::Config cfg; - try - { + try { cfg.readFile(optConf); - } catch(const FileIOException &fioex) { + } catch (const FileIOException &fioex) { std::cerr << "I/O error while reading file." << std::endl; - return(EXIT_FAILURE); - } catch(const ParseException &pex) { + return (EXIT_FAILURE); + } catch (const ParseException &pex) { std::cerr << "Parse error at " << pex.getFile() << ":" << pex.getLine() - << " - " << pex.getError() << std::endl; - return(EXIT_FAILURE); + << " - " << pex.getError() << std::endl; + return (EXIT_FAILURE); } // lock cfg file: @@ -126,7 +126,7 @@ int main(int argc, char **argv) { }*/ signal(SIGTERM, handler); - signal(SIGINT, handler); + signal(SIGINT, handler); try { bool enableShareConvBtcV2ToV1 = false; @@ -136,83 +136,112 @@ int main(int argc, char **argv) { bool enableMessageHexPrinter = false; int repeatedNumberDisplayInterval = 10; - readFromSetting(cfg, "share_convertor.bitcoin_v2_to_v1", enableShareConvBtcV2ToV1, true); - readFromSetting(cfg, "share_diff_changer.bitcoin_v1", enableShareDiffChangerBtcV1, true); - readFromSetting(cfg, "share_diff_changer.bitcoin_v2_to_v1", enableShareDiffChangerBtcV2ToV1, true); - readFromSetting(cfg, "share_printer.bitcoin_v1", enableSharePrinterBtcV1, true); - readFromSetting(cfg, "message_printer.print_hex", enableMessageHexPrinter, true); - readFromSetting(cfg, "log.repeated_number_display_interval", repeatedNumberDisplayInterval, true); - + readFromSetting( + cfg, + "share_convertor.bitcoin_v2_to_v1", + enableShareConvBtcV2ToV1, + true); + readFromSetting( + cfg, + "share_diff_changer.bitcoin_v1", + enableShareDiffChangerBtcV1, + true); + readFromSetting( + cfg, + "share_diff_changer.bitcoin_v2_to_v1", + enableShareDiffChangerBtcV2ToV1, + true); + readFromSetting( + cfg, "share_printer.bitcoin_v1", enableSharePrinterBtcV1, true); + readFromSetting( + cfg, "message_printer.print_hex", enableMessageHexPrinter, true); + readFromSetting( + cfg, + "log.repeated_number_display_interval", + repeatedNumberDisplayInterval, + true); if (enableShareConvBtcV2ToV1) { gKafkaRepeater = new ShareConvertorBitcoinV2ToV1( - cfg.lookup("kafka.in_brokers"), cfg.lookup("kafka.in_topic"), cfg.lookup("kafka.in_group_id"), - cfg.lookup("kafka.out_brokers"), cfg.lookup("kafka.out_topic") - ); - } - else if (enableShareDiffChangerBtcV1) { + cfg.lookup("kafka.in_brokers"), + cfg.lookup("kafka.in_topic"), + cfg.lookup("kafka.in_group_id"), + cfg.lookup("kafka.out_brokers"), + cfg.lookup("kafka.out_topic")); + } else if (enableShareDiffChangerBtcV1) { gKafkaRepeater = new ShareDiffChangerBitcoinV1( - cfg.lookup("kafka.in_brokers"), cfg.lookup("kafka.in_topic"), cfg.lookup("kafka.in_group_id"), - cfg.lookup("kafka.out_brokers"), cfg.lookup("kafka.out_topic") - ); + cfg.lookup("kafka.in_brokers"), + cfg.lookup("kafka.in_topic"), + cfg.lookup("kafka.in_group_id"), + cfg.lookup("kafka.out_brokers"), + cfg.lookup("kafka.out_topic")); int jobTimeOffset = 30; - readFromSetting(cfg, "share_diff_changer.job_time_offset", jobTimeOffset, true); - - if (!dynamic_cast(gKafkaRepeater)->initStratumJobConsumer( - cfg.lookup("share_diff_changer.job_brokers"), cfg.lookup("share_diff_changer.job_topic"), - cfg.lookup("share_diff_changer.job_group_id"), jobTimeOffset - )) { + readFromSetting( + cfg, "share_diff_changer.job_time_offset", jobTimeOffset, true); + + if (!dynamic_cast(gKafkaRepeater) + ->initStratumJobConsumer( + cfg.lookup("share_diff_changer.job_brokers"), + cfg.lookup("share_diff_changer.job_topic"), + cfg.lookup("share_diff_changer.job_group_id"), + jobTimeOffset)) { LOG(FATAL) << "kafka repeater init failed"; return 1; } - } - else if (enableShareDiffChangerBtcV2ToV1) { + } else if (enableShareDiffChangerBtcV2ToV1) { gKafkaRepeater = new ShareDiffChangerBitcoinV2ToV1( - cfg.lookup("kafka.in_brokers"), cfg.lookup("kafka.in_topic"), cfg.lookup("kafka.in_group_id"), - cfg.lookup("kafka.out_brokers"), cfg.lookup("kafka.out_topic") - ); + cfg.lookup("kafka.in_brokers"), + cfg.lookup("kafka.in_topic"), + cfg.lookup("kafka.in_group_id"), + cfg.lookup("kafka.out_brokers"), + cfg.lookup("kafka.out_topic")); int jobTimeOffset = 30; - readFromSetting(cfg, "share_diff_changer.job_time_offset", jobTimeOffset, true); - - if (!dynamic_cast(gKafkaRepeater)->initStratumJobConsumer( - cfg.lookup("share_diff_changer.job_brokers"), cfg.lookup("share_diff_changer.job_topic"), - cfg.lookup("share_diff_changer.job_group_id"), jobTimeOffset - )) { + readFromSetting( + cfg, "share_diff_changer.job_time_offset", jobTimeOffset, true); + + if (!dynamic_cast(gKafkaRepeater) + ->initStratumJobConsumer( + cfg.lookup("share_diff_changer.job_brokers"), + cfg.lookup("share_diff_changer.job_topic"), + cfg.lookup("share_diff_changer.job_group_id"), + jobTimeOffset)) { LOG(FATAL) << "kafka repeater init failed"; return 1; } - } - else if (enableSharePrinterBtcV1) { + } else if (enableSharePrinterBtcV1) { gKafkaRepeater = new SharePrinterBitcoinV1( - cfg.lookup("kafka.in_brokers"), cfg.lookup("kafka.in_topic"), cfg.lookup("kafka.in_group_id"), - cfg.lookup("kafka.out_brokers"), cfg.lookup("kafka.out_topic") - ); - } - else if (enableMessageHexPrinter) { + cfg.lookup("kafka.in_brokers"), + cfg.lookup("kafka.in_topic"), + cfg.lookup("kafka.in_group_id"), + cfg.lookup("kafka.out_brokers"), + cfg.lookup("kafka.out_topic")); + } else if (enableMessageHexPrinter) { gKafkaRepeater = new MessagePrinter( - cfg.lookup("kafka.in_brokers"), cfg.lookup("kafka.in_topic"), cfg.lookup("kafka.in_group_id"), - cfg.lookup("kafka.out_brokers"), cfg.lookup("kafka.out_topic") - ); - } - else { + cfg.lookup("kafka.in_brokers"), + cfg.lookup("kafka.in_topic"), + cfg.lookup("kafka.in_group_id"), + cfg.lookup("kafka.out_brokers"), + cfg.lookup("kafka.out_topic")); + } else { gKafkaRepeater = new KafkaRepeater( - cfg.lookup("kafka.in_brokers"), cfg.lookup("kafka.in_topic"), cfg.lookup("kafka.in_group_id"), - cfg.lookup("kafka.out_brokers"), cfg.lookup("kafka.out_topic") - ); + cfg.lookup("kafka.in_brokers"), + cfg.lookup("kafka.in_topic"), + cfg.lookup("kafka.in_group_id"), + cfg.lookup("kafka.out_brokers"), + cfg.lookup("kafka.out_topic")); } - if (!gKafkaRepeater->init()) { LOG(FATAL) << "kafka repeater init failed"; return 1; } - gKafkaRepeater->runMessageNumberDisplayThread(repeatedNumberDisplayInterval); + gKafkaRepeater->runMessageNumberDisplayThread( + repeatedNumberDisplayInterval); gKafkaRepeater->run(); - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/tools/share_convertor/main.cc b/tools/share_convertor/main.cc index f776767df..6ebdb5d96 100644 --- a/tools/share_convertor/main.cc +++ b/tools/share_convertor/main.cc @@ -41,12 +41,15 @@ using namespace std; using namespace libconfig; void usage() { - fprintf(stderr, "Usage:\n\tshare_convertor -i \"\" -o \"\"\n"); + fprintf( + stderr, + "Usage:\n\tshare_convertor -i \"\" -o " + "\"\"\n"); } int main(int argc, char **argv) { char *inFile = NULL; - char *outFile = NULL; + char *outFile = NULL; int c; if (argc <= 1) { @@ -55,15 +58,16 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "i:o:h")) != -1) { switch (c) { - case 'i': - inFile = optarg; - break; - case 'o': - outFile = optarg; - break; - case 'h': default: - usage(); - exit(0); + case 'i': + inFile = optarg; + break; + case 'o': + outFile = optarg; + break; + case 'h': + default: + usage(); + exit(0); } } @@ -78,10 +82,11 @@ int main(int argc, char **argv) { } if (outFile == NULL) { - LOG(FATAL) << "missing output file (-o \"\")"; + LOG(FATAL) + << "missing output file (-o \"\")"; return 1; } - + // open file (auto-detecting compression format or non-compression) LOG(INFO) << "open input file: " << inFile; zstr::ifstream in(inFile, std::ios::binary); @@ -109,8 +114,7 @@ int main(int argc, char **argv) { in.read((char *)&v2, sizeof(v2)); } LOG(INFO) << "completed."; - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; } diff --git a/tools/sharelog_to_parquet/main.cc b/tools/sharelog_to_parquet/main.cc index d433533eb..28f86da29 100644 --- a/tools/sharelog_to_parquet/main.cc +++ b/tools/sharelog_to_parquet/main.cc @@ -51,11 +51,14 @@ using parquet::Type; using parquet::schema::GroupNode; using parquet::schema::PrimitiveNode; - const size_t DEFAULT_NUM_ROWS_PER_ROW_GROUP = 1000000; void usage() { - fprintf(stderr, "Usage:\n\tsharelog_to_parquet -i \"\" -o \"\" [-n %ld]\n", DEFAULT_NUM_ROWS_PER_ROW_GROUP); + fprintf( + stderr, + "Usage:\n\tsharelog_to_parquet -i \"\" -o " + "\"\" [-n %ld]\n", + DEFAULT_NUM_ROWS_PER_ROW_GROUP); } class ParquetWriter { @@ -95,77 +98,99 @@ class ParquetWriter { auto rgWriter = fileWriter->AppendRowGroup(); // job_id - auto jobIdWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - jobIdWriter->WriteBatch(1, nullptr, nullptr, (int64_t*)&(share->jobId_)); + auto jobIdWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + jobIdWriter->WriteBatch(1, nullptr, nullptr, (int64_t *)&(share->jobId_)); } // worker_id - auto workerIdWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - workerIdWriter->WriteBatch(1, nullptr, nullptr, (int64_t*)&(share->workerHashId_)); + auto workerIdWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + workerIdWriter->WriteBatch( + 1, nullptr, nullptr, (int64_t *)&(share->workerHashId_)); } // ip_long - auto ipLongWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - ipLongWriter->WriteBatch(1, nullptr, nullptr, (int32_t*)&(share->ip_)); + auto ipLongWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + ipLongWriter->WriteBatch(1, nullptr, nullptr, (int32_t *)&(share->ip_)); } // user_id - auto userIdWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - userIdWriter->WriteBatch(1, nullptr, nullptr, (int32_t*)&(share->userId_)); + auto userIdWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + userIdWriter->WriteBatch( + 1, nullptr, nullptr, (int32_t *)&(share->userId_)); } // share_diff - auto shareDiffWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - shareDiffWriter->WriteBatch(1, nullptr, nullptr, (int64_t*)&(share->share_)); + auto shareDiffWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + shareDiffWriter->WriteBatch( + 1, nullptr, nullptr, (int64_t *)&(share->share_)); } // timestamp - auto timestampWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { + auto timestampWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { int64_t timeMillis = share->timestamp_ * 1000; timestampWriter->WriteBatch(1, nullptr, nullptr, &timeMillis); } // block_bits - auto blkBitsWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - blkBitsWriter->WriteBatch(1, nullptr, nullptr, (int32_t*)&(share->blkBits_)); + auto blkBitsWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + blkBitsWriter->WriteBatch( + 1, nullptr, nullptr, (int32_t *)&(share->blkBits_)); } // result - auto resultWriter = static_cast(rgWriter->NextColumn()); - for (share=sharesBegin; share!=sharesEnd; share++) { - resultWriter->WriteBatch(1, nullptr, nullptr, (int32_t*)&(share->result_)); + auto resultWriter = + static_cast(rgWriter->NextColumn()); + for (share = sharesBegin; share != sharesEnd; share++) { + resultWriter->WriteBatch( + 1, nullptr, nullptr, (int32_t *)&(share->result_)); } - + // Save current RowGroup rgWriter->Close(); } - ~ParquetWriter() { - fileWriter->Close(); - } + ~ParquetWriter() { fileWriter->Close(); } protected: std::shared_ptr setupSchema() { parquet::schema::NodeVector fields; - fields.push_back(PrimitiveNode::Make("job_id", Repetition::REQUIRED, Type::INT64, LogicalType::INT_64)); - fields.push_back(PrimitiveNode::Make("worker_id", Repetition::REQUIRED, Type::INT64, LogicalType::INT_64)); - fields.push_back(PrimitiveNode::Make("ip_long", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); - fields.push_back(PrimitiveNode::Make("user_id", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); - fields.push_back(PrimitiveNode::Make("share_diff", Repetition::REQUIRED, Type::INT64, LogicalType::INT_64)); - fields.push_back(PrimitiveNode::Make("timestamp", Repetition::REQUIRED, Type::INT64, LogicalType::TIMESTAMP_MILLIS)); - fields.push_back(PrimitiveNode::Make("block_bits", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); - fields.push_back(PrimitiveNode::Make("result", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); - - // Create a GroupNode named 'share_bitcoin_v1' using the primitive nodes defined above - // This GroupNode is the root node of the schema tree + fields.push_back(PrimitiveNode::Make( + "job_id", Repetition::REQUIRED, Type::INT64, LogicalType::INT_64)); + fields.push_back(PrimitiveNode::Make( + "worker_id", Repetition::REQUIRED, Type::INT64, LogicalType::INT_64)); + fields.push_back(PrimitiveNode::Make( + "ip_long", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); + fields.push_back(PrimitiveNode::Make( + "user_id", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); + fields.push_back(PrimitiveNode::Make( + "share_diff", Repetition::REQUIRED, Type::INT64, LogicalType::INT_64)); + fields.push_back(PrimitiveNode::Make( + "timestamp", + Repetition::REQUIRED, + Type::INT64, + LogicalType::TIMESTAMP_MILLIS)); + fields.push_back(PrimitiveNode::Make( + "block_bits", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); + fields.push_back(PrimitiveNode::Make( + "result", Repetition::REQUIRED, Type::INT32, LogicalType::INT_32)); + + // Create a GroupNode named 'share_bitcoin_v1' using the primitive nodes + // defined above This GroupNode is the root node of the schema tree return std::static_pointer_cast( GroupNode::Make("share_bitcoin_v1", Repetition::REQUIRED, fields)); } @@ -173,7 +198,7 @@ class ParquetWriter { int main(int argc, char **argv) { char *inFile = NULL; - char *outFile = NULL; + char *outFile = NULL; size_t batchShareNum = DEFAULT_NUM_ROWS_PER_ROW_GROUP; int c; @@ -183,18 +208,19 @@ int main(int argc, char **argv) { } while ((c = getopt(argc, argv, "i:o:n:h")) != -1) { switch (c) { - case 'i': - inFile = optarg; - break; - case 'o': - outFile = optarg; - break; - case 'n': - batchShareNum = strtoull(optarg, nullptr, 10); - break; - case 'h': default: - usage(); - exit(0); + case 'i': + inFile = optarg; + break; + case 'o': + outFile = optarg; + break; + case 'n': + batchShareNum = strtoull(optarg, nullptr, 10); + break; + case 'h': + default: + usage(); + exit(0); } } @@ -212,7 +238,7 @@ int main(int argc, char **argv) { LOG(FATAL) << "missing output file (-o \"\")"; return 1; } - + // open file (auto-detecting compression format or non-compression) LOG(INFO) << "open input file: " << inFile; zstr::ifstream in(inFile, std::ios::binary); @@ -225,7 +251,8 @@ int main(int argc, char **argv) { ParquetWriter out; auto stat = out.open(outFile); if (!stat.ok()) { - LOG(FATAL) << "open output file " << outFile << " failed: " << stat.message(); + LOG(FATAL) << "open output file " << outFile + << " failed: " << stat.message(); return 1; } @@ -247,8 +274,7 @@ int main(int argc, char **argv) { delete[] shares; LOG(INFO) << "completed."; - } - catch (std::exception & e) { + } catch (std::exception &e) { LOG(FATAL) << "exception: " << e.what(); return 1; }