diff --git a/apps/io_tester/io_tester.cc b/apps/io_tester/io_tester.cc index 10cc5c2cdc..4ca31e3f89 100644 --- a/apps/io_tester/io_tester.cc +++ b/apps/io_tester/io_tester.cc @@ -18,6 +18,7 @@ /* * Copyright (C) 2017 ScyllaDB */ +#include "seastar/core/shard_id.hh" #include #include #include @@ -36,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -71,7 +73,7 @@ static auto random_seed = std::chrono::duration_cast( static thread_local std::default_random_engine random_generator(random_seed); class context; -enum class request_type { seqread, seqwrite, randread, randwrite, append, cpu, unlink }; +enum class request_type { seqread, seqwrite, randread, randwrite, append, cpu, unlink, steal }; namespace std { @@ -451,6 +453,7 @@ class class_data { { request_type::append , "APPEND" }, { request_type::cpu , "CPU" }, { request_type::unlink, "UNLINK" }, + { request_type::steal, "STEAL" }, }[_config.type];; } @@ -822,6 +825,70 @@ class cpu_class_data : public class_data { } }; + +static double to_ms(auto d) { + return (d / 1ns) / 1000000.; +} + +class steal_class_data : public class_data { + using clock = std::chrono::steady_clock; + + static constexpr auto output_interval = 100ms; + + clock::time_point _next_output = clock::now() + output_interval; + bool first = true; + double _last_true, _last_metric; + std::chrono::nanoseconds _last_awake, _last_cpu; +public: + steal_class_data(job_config cfg) : class_data(std::move(cfg)) {} + + future<> do_start(sstring dir, directory_entry_type type) override { + return make_ready_future<>(); + } + + future issue_request(char *buf, io_intent* intent) override { + // We do want the execution time to be a busy loop, and not just a bunch of + // continuations until our time is up: by doing this we can also simulate the behavior + // of I/O continuations in the face of reactor stalls. + // fmt::print("request\n"); + + auto now = clock::now(); + if (now >= _next_output) { + auto& e = engine(); + + auto awake = e.total_awake_time(); + auto cpu = e.total_cpu_time(); + auto true_steal = to_ms(awake - cpu); + auto metric_steal = to_ms(e.total_steal_time()); + + if (!first) { + fmt::print("{} steal: {:6.3f} ms awake: {:6.3f} cpu: {:6.3f}\n", + this_shard_id(), + true_steal - _last_true, + to_ms(awake - _last_awake), + to_ms(cpu - _last_cpu) + ); + // fmt::print("metric steal: {:6.3f} ms\n", metric_steal - _last_metric); + // fmt::print("metric steal: {:.2}\n", metric_steal - _last_metric); + } + first = false; + _last_true = true_steal; + _last_metric = metric_steal; + _last_awake = awake; + _last_cpu = cpu; + + _next_output = now + output_interval; + } + + return make_ready_future(1); + } + + virtual void emit_results(YAML::Emitter& out) override { + auto throughput = total_data() / total_duration().count(); + out << YAML::Key << "throughput" << YAML::Value << throughput; + } +}; + std::unique_ptr job_config::gen_class_data() { if (type == request_type::cpu) { return std::make_unique(*this); @@ -829,9 +896,12 @@ std::unique_ptr job_config::gen_class_data() { return std::make_unique(*this); } else if ((type == request_type::seqread) || (type == request_type::randread)) { return std::make_unique(*this); + } else if (type == request_type::steal) { + return std::make_unique(*this); } else { return std::make_unique(*this); } + throw std::runtime_error("bad type"); } /// YAML parsing functions @@ -908,6 +978,7 @@ struct convert { { "randwrite", request_type::randwrite }, { "append", request_type::append}, { "cpu", request_type::cpu}, + { "steal", request_type::steal}, { "unlink", request_type::unlink }, }; auto reqstr = node.as();