From 4bd4ae656eaca7504aec82d305d2228905444e19 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Thu, 30 Jul 2015 17:45:49 -0400 Subject: [PATCH 01/13] Allow secondary location for intercepts. Allow the configuration to specify an alternative location for intercept files, instead of assuming that they are located with riak_test/intercepts; this allows projects to keep related intercepts locally. --- src/rt_intercept.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rt_intercept.erl b/src/rt_intercept.erl index abb5890f8..e3e6461fc 100644 --- a/src/rt_intercept.erl +++ b/src/rt_intercept.erl @@ -25,11 +25,12 @@ files_to_mods(Files) -> [list_to_atom(filename:basename(F, ".erl")) || F <- Files]. -default_intercept_path_glob() -> - filename:join([rt_local:home_dir(), "intercepts", "*.erl"]). +default_intercept_path_globs() -> + Globs = rt_config:get(intercept_globs, []), + [filename:join([rt_local:home_dir(), "intercepts", "*.erl"])] ++ Globs. intercept_files() -> - intercept_files([default_intercept_path_glob()]). + intercept_files(default_intercept_path_globs()). intercept_files(Globs) -> lists:concat([filelib:wildcard(Glob) || Glob <- Globs]). @@ -37,7 +38,7 @@ intercept_files(Globs) -> %% @doc Load the intercepts on the nodes under test. -spec load_intercepts([node()]) -> ok. load_intercepts(Nodes) -> - load_intercepts(Nodes, [default_intercept_path_glob()]). + load_intercepts(Nodes, default_intercept_path_globs()). -spec load_intercepts([node()], [string()]) -> ok. load_intercepts(Nodes, Globs) -> @@ -52,7 +53,7 @@ load_intercepts(Nodes, Globs) -> end. load_code(Node) -> - load_code(Node, [default_intercept_path_glob()]). + load_code(Node, default_intercept_path_glob()). load_code(Node, Globs) -> rt:wait_until_pingable(Node), @@ -122,7 +123,7 @@ wait_until_loaded(Node, Tries) -> end. are_intercepts_loaded(Node) -> - are_intercepts_loaded(Node, [default_intercept_path_glob()]). + are_intercepts_loaded(Node, default_intercept_path_globs()). are_intercepts_loaded(Node, Globs) -> Results = [rpc:call(Node, code, is_loaded, [Mod]) From f43fe5d73e208829733a2538f03398b64a5c3e84 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sat, 8 Aug 2015 00:29:30 -0400 Subject: [PATCH 02/13] Fix typo. --- src/rt_intercept.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rt_intercept.erl b/src/rt_intercept.erl index e3e6461fc..0175fb7b5 100644 --- a/src/rt_intercept.erl +++ b/src/rt_intercept.erl @@ -53,7 +53,7 @@ load_intercepts(Nodes, Globs) -> end. load_code(Node) -> - load_code(Node, default_intercept_path_glob()). + load_code(Node, default_intercept_path_globs()). load_code(Node, Globs) -> rt:wait_until_pingable(Node), From 584c509531b9c5907ce5547b80e3f81d38209e60 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sat, 8 Aug 2015 00:45:46 -0400 Subject: [PATCH 03/13] Rename observer to avoid collision. --- src/{observer.erl => rt_observer.erl} | 2 +- src/rtperf.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename src/{observer.erl => rt_observer.erl} (99%) diff --git a/src/observer.erl b/src/rt_observer.erl similarity index 99% rename from src/observer.erl rename to src/rt_observer.erl index 4e7ee2d6f..1c7a22230 100644 --- a/src/observer.erl +++ b/src/rt_observer.erl @@ -1,4 +1,4 @@ --module(observer). +-module(rt_observer). -compile(export_all). -record(history, {network, diff --git a/src/rtperf.erl b/src/rtperf.erl index 7b46a5360..1f4c90a0f 100644 --- a/src/rtperf.erl +++ b/src/rtperf.erl @@ -108,7 +108,7 @@ start_data_collectors(Nodes) -> PrepDir = "/tmp/perf-"++OSPid, file:make_dir(PrepDir), {ok, Hostname} = inet:gethostname(), - P = observer:watch(Nodes, {Hostname, 65001, PrepDir}), + P = rt_observer:watch(Nodes, {Hostname, 65001, PrepDir}), lager:info("started data collector: ~p", [P]), P. From bf33b2e9173ab5497668cb91c18851c0419c4590 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sat, 8 Aug 2015 00:45:52 -0400 Subject: [PATCH 04/13] Remove dead symlink. --- riak | 1 - 1 file changed, 1 deletion(-) delete mode 120000 riak diff --git a/riak b/riak deleted file mode 120000 index a5208ef76..000000000 --- a/riak +++ /dev/null @@ -1 +0,0 @@ -riak-2.0 \ No newline at end of file From f7a428896a9ef03ae4ae3b531492a0848ac016b1 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sat, 8 Aug 2015 01:04:54 -0400 Subject: [PATCH 05/13] Rename overload to prevent name clash. --- tests/overload.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/overload.erl b/tests/overload.erl index 1e473f73b..833df4bb0 100644 --- a/tests/overload.erl +++ b/tests/overload.erl @@ -17,7 +17,7 @@ %% under the License. %% %% ------------------------------------------------------------------- --module(overload). +-module(overload_test). -compile(export_all). -include_lib("eunit/include/eunit.hrl"). From b3dad450a1b2e497925d07730d78b9b4a5c37e91 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sat, 8 Aug 2015 01:13:43 -0400 Subject: [PATCH 06/13] Fix error in naming. --- tests/{overload.erl => overload_test.erl} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{overload.erl => overload_test.erl} (100%) diff --git a/tests/overload.erl b/tests/overload_test.erl similarity index 100% rename from tests/overload.erl rename to tests/overload_test.erl From 500d87f46cbf5265614a0a550e21abb83ac64ee4 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sat, 8 Aug 2015 01:22:34 -0400 Subject: [PATCH 07/13] Assume config is stored in current directory. --- src/rt_config.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rt_config.erl b/src/rt_config.erl index 4a916de63..0aa7c3d52 100644 --- a/src/rt_config.erl +++ b/src/rt_config.erl @@ -51,7 +51,7 @@ get_os_env(Var, Default) -> %% @doc Load the configuration from the specified config file. load(Config, undefined) -> - load(Config, filename:join([os:getenv("HOME"), ".riak_test.config"])); + load(Config, filename:join([os:getenv("PWD"), ".riak_test.config"])); load(undefined, ConfigFile) -> load_dot_config("default", ConfigFile); load(ConfigName, ConfigFile) -> From a87b390a7f0450533fa0bb8515164f1c4fa60fba Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Mon, 7 Sep 2015 13:09:25 -0700 Subject: [PATCH 08/13] Make Erlang 17 compatible. --- rebar.config | 8 +++++--- src/rt_cover.erl | 8 +++++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/rebar.config b/rebar.config index 2ecbeec34..e1dca17d4 100644 --- a/rebar.config +++ b/rebar.config @@ -1,17 +1,19 @@ -{require_otp_vsn, "R13B04|R14|R15|R16"}. {cover_enabled, true}. {edoc_opts, [{preprocess, true}]}. %%{edoc_opts, [{doclet, edown_doclet}, {pretty_printer, erl_pp}]}. %%{edoc_opts, [{doclet, my_layout}, {pretty_printer, erl_pp}]}. %%{edoc_opts, [{layout, my_layout}, {file_suffix, ".xml"}, {pretty_printer, erl_pp}]}. {erl_opts, [{src_dirs, [src, intercepts, perf]}, - warnings_as_errors, {parse_transform, lager_transform}]}. + {platform_define, "^[0-9]+", namespaced_types}, + warnings_as_errors, + {parse_transform, lager_transform}]}. + {erl_first_files, ["src/rt_intercept_pt.erl"]}. {eunit_opts, [verbose]}. {deps, [ - {lager, ".*", {git, "git://github.com/basho/lager", {tag, "2.0.3"}}}, + {lager, "(2.0|2.1|2.2).*", {git, "git://github.com/basho/lager.git", {tag, "2.2.0"}}}, {getopt, ".*", {git, "git://github.com/jcomellas/getopt", {tag, "v0.4"}}}, {meck, ".*", {git, "git://github.com/basho/meck.git", {tag, "0.8.2"}}}, {mapred_verify, ".*", {git, "git://github.com/basho/mapred_verify", {branch, "master"}}}, diff --git a/src/rt_cover.erl b/src/rt_cover.erl index 5a970039f..d19f5e83f 100644 --- a/src/rt_cover.erl +++ b/src/rt_cover.erl @@ -40,6 +40,12 @@ stop_on_node/1 ]). +-ifdef(namespaced_types). +-type cover_dict() :: dict:dict(). +-else. +-type cover_dict() :: dict(). +-endif. + -define(COVER_SERVER, cover_server). -record(cover_info, {module :: atom(), @@ -280,7 +286,7 @@ acc_cov(CovList) when is_list(CovList) -> end, lists:foldl(AddCov, {0, 0}, CovList). --spec group_by_app(ModCovList:: [#cover_info{}], Mod2App :: dict()) -> +-spec group_by_app(ModCovList:: [#cover_info{}], Mod2App :: cover_dict()) -> [{string(), number(), [#cover_info{}]}]. group_by_app(ModCovList, Mod2App) -> D1 = lists:foldl(fun(ModCov = #cover_info{module=Mod}, Acc) -> From 9bfd18ffa6edb763bea9cd00463b3f7b2f54c026 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Mon, 7 Sep 2015 16:39:09 -0700 Subject: [PATCH 09/13] Remove Riak specific tests. --- tests/always_fail_test.erl | 7 - tests/always_pass_test.erl | 8 - tests/basic_command_line.erl | 168 --- tests/bucket_props_roundtrip.erl | 100 -- tests/bucket_props_validation.erl | 182 --- tests/bucket_types.erl | 509 ------- tests/cause_bdp.erl | 31 - tests/cluster_meta_basic.erl | 175 --- tests/cluster_meta_proxy_server.erl | 123 -- tests/cluster_meta_rmr.erl | 227 --- tests/cuttlefish_configuration.erl | 24 - tests/ensemble_basic.erl | 31 - tests/ensemble_basic2.erl | 76 - tests/ensemble_basic3.erl | 97 -- tests/ensemble_basic4.erl | 67 - tests/ensemble_byzantine.erl | 304 ---- tests/ensemble_interleave.erl | 101 -- tests/ensemble_remove_node.erl | 90 -- tests/ensemble_remove_node2.erl | 106 -- tests/ensemble_ring_changes.erl | 153 -- tests/ensemble_start_without_aae.erl | 34 - tests/ensemble_sync.erl | 214 --- tests/ensemble_util.erl | 160 --- tests/ensemble_vnode_crash.erl | 108 -- tests/gh_riak_core_154.erl | 57 - tests/gh_riak_core_155.erl | 69 - tests/gh_riak_core_176.erl | 82 -- tests/gh_riak_kv_765.erl | 123 -- tests/hooks.erl | 83 -- tests/http_bucket_types.erl | 438 ------ tests/http_security.erl | 579 -------- tests/jmx_verify.erl | 237 ---- tests/kv679_dataloss.erl | 159 --- tests/kv679_tombstone.erl | 224 --- tests/kv679_tombstone2.erl | 158 --- tests/kv679_uid.erl | 55 - tests/loaded_upgrade.erl | 237 ---- tests/loaded_upgrade_worker_sup.erl | 248 ---- tests/mapred_basic_compat.erl | 262 ---- tests/mapred_buffer_prereduce.erl | 75 - tests/mapred_dead_pipe.erl | 130 -- tests/mapred_http_errors.erl | 97 -- tests/mapred_javascript.erl | 151 -- tests/mapred_notfound_failover.erl | 230 --- tests/mapred_search_switch.erl | 303 ---- tests/mapred_verify_rt.erl | 55 - tests/overload_proxy.erl | 157 --- tests/overload_test.erl | 501 ------- tests/partition_repair.erl | 337 ----- tests/pb_cipher_suites.erl | 227 --- tests/pb_security.erl | 819 ----------- tests/pipe_verify_basics.erl | 138 -- tests/pipe_verify_examples.erl | 60 - tests/pipe_verify_exceptions.erl | 545 ------- tests/pipe_verify_handoff.erl | 263 ---- tests/pipe_verify_handoff_blocking.erl | 237 ---- .../pipe_verify_restart_input_forwarding.erl | 178 --- tests/pipe_verify_sink_types.erl | 202 --- tests/post_generate_key.erl | 99 -- tests/pr_pw.erl | 148 -- tests/repl_aae_fullsync.erl | 611 -------- tests/repl_aae_fullsync_bench.erl | 73 - tests/repl_aae_fullsync_bt.erl | 303 ---- tests/repl_aae_fullsync_custom_n.erl | 113 -- tests/repl_aae_fullsync_util.erl | 80 -- tests/repl_bucket_types.erl | 469 ------- tests/repl_cancel_fullsync.erl | 147 -- tests/repl_consistent_object_filter.erl | 156 --- tests/repl_fs_bench.erl | 156 --- tests/repl_fs_stat_caching.erl | 109 -- tests/repl_handoff_deadlock_aae.erl | 35 - tests/repl_handoff_deadlock_common.erl | 151 -- tests/repl_handoff_deadlock_keylist.erl | 35 - tests/repl_location_failures.erl | 108 -- tests/repl_rt_cascading_rtq.erl | 185 --- tests/repl_rt_heartbeat.erl | 255 ---- tests/repl_rt_overload.erl | 190 --- tests/repl_rt_pending.erl | 249 ---- tests/repl_util.erl | 637 --------- tests/replication.erl | 706 ---------- tests/replication2.erl | 770 ---------- tests/replication2_connections.erl | 306 ---- tests/replication2_console_tests.erl | 123 -- tests/replication2_dirty.erl | 259 ---- tests/replication2_fsschedule.erl | 220 --- tests/replication2_pg.erl | 1009 ------------- tests/replication2_rt_sink_connection.erl | 123 -- tests/replication2_ssl.erl | 390 ------ tests/replication2_upgrade.erl | 105 -- tests/replication_object_reformat.erl | 239 ---- tests/replication_ssl.erl | 263 ---- tests/replication_stats.erl | 108 -- tests/replication_upgrade.erl | 96 -- tests/riak667_mixed.erl | 334 ----- tests/riak667_safe.erl | 102 -- tests/riak_admin_console_tests.erl | 246 ---- tests/riak_control.erl | 261 ---- tests/riak_control_authentication.erl | 229 --- tests/riak_rex.erl | 59 - tests/riaknostic_rt.erl | 108 -- tests/rolling_capabilities.erl | 86 -- tests/rpc_output.erl | 84 -- tests/rt_basic_test.erl | 29 - tests/rt_cascading.erl | 1246 ----------------- tests/secondary_index_tests.erl | 332 ----- tests/sibling_explosion.erl | 90 -- tests/test_cluster.erl | 29 - tests/verify_2i_aae.erl | 228 --- tests/verify_2i_limit.erl | 126 -- tests/verify_2i_mixed_cluster.erl | 75 - tests/verify_2i_returnterms.erl | 76 - tests/verify_2i_stream.erl | 68 - tests/verify_2i_timeout.erl | 69 - tests/verify_aae.erl | 304 ---- tests/verify_api_timeouts.erl | 245 ---- tests/verify_asis_put.erl | 40 - tests/verify_backup_restore.erl | 292 ---- tests/verify_basic_upgrade.erl | 48 - tests/verify_bdp_event_handler.erl | 133 -- tests/verify_bitcask_tombstone2_upgrade.erl | 91 -- tests/verify_build_cluster.erl | 107 -- tests/verify_busy_dist_port.erl | 109 -- tests/verify_capabilities.erl | 267 ---- tests/verify_claimant.erl | 79 -- tests/verify_cluster_converge.erl | 15 - tests/verify_commit_hooks.erl | 76 - tests/verify_conditional_postcommit.erl | 80 -- tests/verify_corruption_filtering.erl | 121 -- tests/verify_counter_capability.erl | 84 -- tests/verify_counter_converge.erl | 111 -- tests/verify_counter_repl.erl | 132 -- tests/verify_crdt_capability.erl | 101 -- tests/verify_cs_bucket.erl | 84 -- tests/verify_down.erl | 68 - tests/verify_dt_context.erl | 222 --- tests/verify_dt_converge.erl | 340 ----- tests/verify_dt_upgrade.erl | 91 -- tests/verify_dvv_repl.erl | 170 --- tests/verify_dynamic_ring.erl | 213 --- tests/verify_handoff.erl | 146 -- tests/verify_handoff_mixed.erl | 186 --- tests/verify_handoff_write_once.erl | 207 --- tests/verify_kv_health_check.erl | 63 - tests/verify_leave.erl | 59 - tests/verify_link_walk_urls.erl | 137 -- tests/verify_listkeys.erl | 250 ---- tests/verify_listkeys_eqcfsm.erl | 246 ---- tests/verify_membackend.erl | 313 ----- tests/verify_mr_prereduce_node_down.erl | 93 -- tests/verify_no_writes_on_read.erl | 49 - tests/verify_object_limits.erl | 130 -- tests/verify_reset_bucket_props.erl | 79 -- tests/verify_riak_lager.erl | 53 - tests/verify_riak_object_reformat.erl | 64 - tests/verify_riak_stats.erl | 826 ----------- tests/verify_search.erl | 68 - tests/verify_secondary_index_reformat.erl | 121 -- tests/verify_snmp.erl | 97 -- tests/verify_snmp_repl.erl | 99 -- tests/verify_staged_clustering.erl | 183 --- tests/verify_tick_change.erl | 85 -- tests/verify_vclock.erl | 204 --- tests/verify_vclock_encoding_upgrade.erl | 45 - tests/verify_write_once.erl | 333 ----- tests/vnode_util.erl | 80 -- tests/yz_core_properties_create_unload.erl | 160 --- tests/yz_crdt.erl | 119 -- tests/yz_default_bucket_type_upgrade.erl | 94 -- tests/yz_ensemble.erl | 117 -- tests/yz_extractors.erl | 191 --- tests/yz_handoff.erl | 208 --- tests/yz_schema_change_reset.erl | 315 ----- 172 files changed, 32757 deletions(-) delete mode 100644 tests/always_fail_test.erl delete mode 100644 tests/always_pass_test.erl delete mode 100644 tests/basic_command_line.erl delete mode 100644 tests/bucket_props_roundtrip.erl delete mode 100644 tests/bucket_props_validation.erl delete mode 100644 tests/bucket_types.erl delete mode 100644 tests/cause_bdp.erl delete mode 100644 tests/cluster_meta_basic.erl delete mode 100644 tests/cluster_meta_proxy_server.erl delete mode 100644 tests/cluster_meta_rmr.erl delete mode 100644 tests/cuttlefish_configuration.erl delete mode 100644 tests/ensemble_basic.erl delete mode 100644 tests/ensemble_basic2.erl delete mode 100644 tests/ensemble_basic3.erl delete mode 100644 tests/ensemble_basic4.erl delete mode 100644 tests/ensemble_byzantine.erl delete mode 100644 tests/ensemble_interleave.erl delete mode 100644 tests/ensemble_remove_node.erl delete mode 100644 tests/ensemble_remove_node2.erl delete mode 100644 tests/ensemble_ring_changes.erl delete mode 100644 tests/ensemble_start_without_aae.erl delete mode 100644 tests/ensemble_sync.erl delete mode 100644 tests/ensemble_util.erl delete mode 100644 tests/ensemble_vnode_crash.erl delete mode 100644 tests/gh_riak_core_154.erl delete mode 100644 tests/gh_riak_core_155.erl delete mode 100644 tests/gh_riak_core_176.erl delete mode 100644 tests/gh_riak_kv_765.erl delete mode 100644 tests/hooks.erl delete mode 100644 tests/http_bucket_types.erl delete mode 100644 tests/http_security.erl delete mode 100644 tests/jmx_verify.erl delete mode 100644 tests/kv679_dataloss.erl delete mode 100644 tests/kv679_tombstone.erl delete mode 100644 tests/kv679_tombstone2.erl delete mode 100644 tests/kv679_uid.erl delete mode 100644 tests/loaded_upgrade.erl delete mode 100644 tests/loaded_upgrade_worker_sup.erl delete mode 100644 tests/mapred_basic_compat.erl delete mode 100644 tests/mapred_buffer_prereduce.erl delete mode 100644 tests/mapred_dead_pipe.erl delete mode 100644 tests/mapred_http_errors.erl delete mode 100644 tests/mapred_javascript.erl delete mode 100644 tests/mapred_notfound_failover.erl delete mode 100644 tests/mapred_search_switch.erl delete mode 100644 tests/mapred_verify_rt.erl delete mode 100644 tests/overload_proxy.erl delete mode 100644 tests/overload_test.erl delete mode 100644 tests/partition_repair.erl delete mode 100644 tests/pb_cipher_suites.erl delete mode 100644 tests/pb_security.erl delete mode 100644 tests/pipe_verify_basics.erl delete mode 100644 tests/pipe_verify_examples.erl delete mode 100644 tests/pipe_verify_exceptions.erl delete mode 100644 tests/pipe_verify_handoff.erl delete mode 100644 tests/pipe_verify_handoff_blocking.erl delete mode 100644 tests/pipe_verify_restart_input_forwarding.erl delete mode 100644 tests/pipe_verify_sink_types.erl delete mode 100644 tests/post_generate_key.erl delete mode 100644 tests/pr_pw.erl delete mode 100644 tests/repl_aae_fullsync.erl delete mode 100644 tests/repl_aae_fullsync_bench.erl delete mode 100644 tests/repl_aae_fullsync_bt.erl delete mode 100644 tests/repl_aae_fullsync_custom_n.erl delete mode 100644 tests/repl_aae_fullsync_util.erl delete mode 100644 tests/repl_bucket_types.erl delete mode 100644 tests/repl_cancel_fullsync.erl delete mode 100644 tests/repl_consistent_object_filter.erl delete mode 100644 tests/repl_fs_bench.erl delete mode 100644 tests/repl_fs_stat_caching.erl delete mode 100644 tests/repl_handoff_deadlock_aae.erl delete mode 100644 tests/repl_handoff_deadlock_common.erl delete mode 100644 tests/repl_handoff_deadlock_keylist.erl delete mode 100644 tests/repl_location_failures.erl delete mode 100644 tests/repl_rt_cascading_rtq.erl delete mode 100644 tests/repl_rt_heartbeat.erl delete mode 100644 tests/repl_rt_overload.erl delete mode 100644 tests/repl_rt_pending.erl delete mode 100644 tests/repl_util.erl delete mode 100644 tests/replication.erl delete mode 100644 tests/replication2.erl delete mode 100644 tests/replication2_connections.erl delete mode 100644 tests/replication2_console_tests.erl delete mode 100644 tests/replication2_dirty.erl delete mode 100644 tests/replication2_fsschedule.erl delete mode 100644 tests/replication2_pg.erl delete mode 100644 tests/replication2_rt_sink_connection.erl delete mode 100644 tests/replication2_ssl.erl delete mode 100644 tests/replication2_upgrade.erl delete mode 100644 tests/replication_object_reformat.erl delete mode 100644 tests/replication_ssl.erl delete mode 100644 tests/replication_stats.erl delete mode 100644 tests/replication_upgrade.erl delete mode 100644 tests/riak667_mixed.erl delete mode 100644 tests/riak667_safe.erl delete mode 100644 tests/riak_admin_console_tests.erl delete mode 100644 tests/riak_control.erl delete mode 100644 tests/riak_control_authentication.erl delete mode 100644 tests/riak_rex.erl delete mode 100644 tests/riaknostic_rt.erl delete mode 100644 tests/rolling_capabilities.erl delete mode 100644 tests/rpc_output.erl delete mode 100644 tests/rt_basic_test.erl delete mode 100644 tests/rt_cascading.erl delete mode 100644 tests/secondary_index_tests.erl delete mode 100644 tests/sibling_explosion.erl delete mode 100644 tests/test_cluster.erl delete mode 100644 tests/verify_2i_aae.erl delete mode 100644 tests/verify_2i_limit.erl delete mode 100644 tests/verify_2i_mixed_cluster.erl delete mode 100644 tests/verify_2i_returnterms.erl delete mode 100644 tests/verify_2i_stream.erl delete mode 100644 tests/verify_2i_timeout.erl delete mode 100644 tests/verify_aae.erl delete mode 100644 tests/verify_api_timeouts.erl delete mode 100644 tests/verify_asis_put.erl delete mode 100644 tests/verify_backup_restore.erl delete mode 100644 tests/verify_basic_upgrade.erl delete mode 100644 tests/verify_bdp_event_handler.erl delete mode 100644 tests/verify_bitcask_tombstone2_upgrade.erl delete mode 100644 tests/verify_build_cluster.erl delete mode 100644 tests/verify_busy_dist_port.erl delete mode 100644 tests/verify_capabilities.erl delete mode 100644 tests/verify_claimant.erl delete mode 100644 tests/verify_cluster_converge.erl delete mode 100644 tests/verify_commit_hooks.erl delete mode 100644 tests/verify_conditional_postcommit.erl delete mode 100644 tests/verify_corruption_filtering.erl delete mode 100644 tests/verify_counter_capability.erl delete mode 100644 tests/verify_counter_converge.erl delete mode 100644 tests/verify_counter_repl.erl delete mode 100644 tests/verify_crdt_capability.erl delete mode 100644 tests/verify_cs_bucket.erl delete mode 100644 tests/verify_down.erl delete mode 100644 tests/verify_dt_context.erl delete mode 100644 tests/verify_dt_converge.erl delete mode 100644 tests/verify_dt_upgrade.erl delete mode 100644 tests/verify_dvv_repl.erl delete mode 100644 tests/verify_dynamic_ring.erl delete mode 100644 tests/verify_handoff.erl delete mode 100644 tests/verify_handoff_mixed.erl delete mode 100644 tests/verify_handoff_write_once.erl delete mode 100644 tests/verify_kv_health_check.erl delete mode 100644 tests/verify_leave.erl delete mode 100644 tests/verify_link_walk_urls.erl delete mode 100644 tests/verify_listkeys.erl delete mode 100644 tests/verify_listkeys_eqcfsm.erl delete mode 100644 tests/verify_membackend.erl delete mode 100644 tests/verify_mr_prereduce_node_down.erl delete mode 100644 tests/verify_no_writes_on_read.erl delete mode 100644 tests/verify_object_limits.erl delete mode 100644 tests/verify_reset_bucket_props.erl delete mode 100644 tests/verify_riak_lager.erl delete mode 100644 tests/verify_riak_object_reformat.erl delete mode 100644 tests/verify_riak_stats.erl delete mode 100644 tests/verify_search.erl delete mode 100644 tests/verify_secondary_index_reformat.erl delete mode 100644 tests/verify_snmp.erl delete mode 100644 tests/verify_snmp_repl.erl delete mode 100644 tests/verify_staged_clustering.erl delete mode 100644 tests/verify_tick_change.erl delete mode 100644 tests/verify_vclock.erl delete mode 100644 tests/verify_vclock_encoding_upgrade.erl delete mode 100644 tests/verify_write_once.erl delete mode 100644 tests/vnode_util.erl delete mode 100644 tests/yz_core_properties_create_unload.erl delete mode 100644 tests/yz_crdt.erl delete mode 100644 tests/yz_default_bucket_type_upgrade.erl delete mode 100644 tests/yz_ensemble.erl delete mode 100644 tests/yz_extractors.erl delete mode 100644 tests/yz_handoff.erl delete mode 100644 tests/yz_schema_change_reset.erl diff --git a/tests/always_fail_test.erl b/tests/always_fail_test.erl deleted file mode 100644 index 39d6e341a..000000000 --- a/tests/always_fail_test.erl +++ /dev/null @@ -1,7 +0,0 @@ -%% @doc A test that always returns `fail'. --module(always_fail_test). --export([confirm/0]). - --spec confirm() -> pass | fail. -confirm() -> - fail. diff --git a/tests/always_pass_test.erl b/tests/always_pass_test.erl deleted file mode 100644 index e71c9645c..000000000 --- a/tests/always_pass_test.erl +++ /dev/null @@ -1,8 +0,0 @@ -%% @doc A test that always returns `pass'. --module(always_pass_test). --behavior(riak_test). --export([confirm/0]). - --spec confirm() -> pass | fail. -confirm() -> - pass. diff --git a/tests/basic_command_line.erl b/tests/basic_command_line.erl deleted file mode 100644 index 958a5455e..000000000 --- a/tests/basic_command_line.erl +++ /dev/null @@ -1,168 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(basic_command_line). --include_lib("eunit/include/eunit.hrl"). - --behavior(riak_test). --compile(export_all). --export([confirm/0]). - -confirm() -> - - %% Deploy a node to test against - lager:info("Deploy node to test command line"), - [Node] = rt:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), - - %% Verify node-up behavior - ping_up_test(Node), - attach_direct_up_test(Node), - status_up_test(Node), - console_up_test(Node), - start_up_test(Node), - getpid_up_test(Node), - - %% Stop the node, Verify node-down behavior - stop_test(Node), - ping_down_test(Node), - attach_down_test(Node), - attach_direct_down_test(Node), - status_down_test(Node), - console_test(Node), - start_test(Node), - getpid_down_test(Node), - - pass. - -console_up_test(Node) -> - lager:info("Node is already up, `riak console` should fail"), - {ok, ConsoleFail} = rt:riak(Node, ["console"]), - ?assert(rt:str(ConsoleFail, "Node is already running")), - ok. - -console_test(Node) -> - %% Make sure the cluster will start up with /usr/sbin/riak console, then quit - lager:info("Testing riak console on ~s", [Node]), - - %% Stop node, to test console working - rt:console(Node, [{expect, "\(abort with ^G\)"}, - {send, "riak_core_ring_manager:get_my_ring()."}, - {expect, "dict,"}, - {send, "q()."}, - {expect, "ok"}]), - rt:wait_until_unpingable(Node), - ok. - -start_up_test(Node) -> - %% Try starting again and check you get the node is already running message - lager:info("Testing riak start now will return 'already running'"), - {ok, StartOut} = rt:riak(Node, ["start"]), - ?assert(rt:str(StartOut, "Node is already running!")), - ok. - - -start_test(Node) -> - %% Test starting with /bin/riak start - lager:info("Testing riak start works on ~s", [Node]), - - {ok, StartPass} = rt:riak(Node, ["start"]), - lager:info("StartPass: ~p", [StartPass]), - ?assert(StartPass =:= "" orelse string:str(StartPass, "WARNING") =/= 0), - rt:stop_and_wait(Node), - ok. - -stop_test(Node) -> - ?assert(rt:is_pingable(Node)), - - {ok, "ok\n"} = rt:riak(Node, "stop"), - - ?assertNot(rt:is_pingable(Node)), - ok. - -ping_up_test(Node) -> - - %% check /usr/sbin/riak ping - lager:info("Testing riak ping on ~s", [Node]), - - %% ping / pong - %% rt:start_and_wait(Node), - lager:info("Node up, should ping"), - {ok, PongOut} = rt:riak(Node, ["ping"]), - ?assert(rt:str(PongOut, "pong")), - ok. - -ping_down_test(Node) -> - %% ping / pang - lager:info("Node down, should pang"), - {ok, PangOut} = rt:riak(Node, ["ping"]), - ?assert(rt:str(PangOut, "not responding to pings")), - ok. - -attach_down_test(Node) -> - lager:info("Testing riak attach while down"), - {ok, AttachOut} = rt:riak(Node, ["attach"]), - ?assert(rt:str(AttachOut, "Node is not running!")), - ok. - -attach_direct_up_test(Node) -> - lager:info("Testing riak attach-direct"), - - rt:attach_direct(Node, [{expect, "\(^D to exit\)"}, - {send, "riak_core_ring_manager:get_my_ring()."}, - {expect, "dict,"}, - {send, [4]}]), %% 4 = Ctrl + D - ok. - -attach_direct_down_test(Node) -> - lager:info("Testing riak attach-direct while down"), - {ok, AttachOut} = rt:riak(Node, ["attach-direct"]), - ?assert(rt:str(AttachOut, "Node is not running!")), - ok. - -status_up_test(Node) -> - lager:info("Test riak-admin status on ~s", [Node]), - - {ok, {ExitCode, StatusOut}} = rt:admin(Node, ["status"], [return_exit_code]), - io:format("Result of status: ~s", [StatusOut]), - ?assertEqual(0, ExitCode), - ?assert(rt:str(StatusOut, "1-minute stats")), - ?assert(rt:str(StatusOut, "kernel_version")), - - ok. - -status_down_test(Node) -> - lager:info("Test riak-admin status while down"), - {ok, {ExitCode, StatusOut}} = rt:admin(Node, ["status"], [return_exit_code]), - ?assertEqual(1, ExitCode), - ?assert(rt:str(StatusOut, "Node is not running!")), - ok. - -getpid_up_test(Node) -> - lager:info("Test riak getpid on ~s", [Node]), - {ok, PidOut} = rt:riak(Node, ["getpid"]), - ?assertNot(rt:str(PidOut, "")), - ?assert(rt:str(PidOut, rpc:call(Node, os, getpid, []))), - ok. - -getpid_down_test(Node) -> - lager:info("Test riak getpid fails on ~s", [Node]), - {ok, PidOut} = rt:riak(Node, ["getpid"]), - ?assert(rt:str(PidOut, "Node is not running!")), - ok. diff --git a/tests/bucket_props_roundtrip.erl b/tests/bucket_props_roundtrip.erl deleted file mode 100644 index ef2fe5c84..000000000 --- a/tests/bucket_props_roundtrip.erl +++ /dev/null @@ -1,100 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(bucket_props_roundtrip). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"pbc_props_verify">>). --define(COMMIT_HOOK, {struct, [{<<"mod">>, <<"foo">>}, {<<"fun">>, <<"bar">>}]}). --define(CHASHFUN, {riak_core_util, chash_bucketonly_keyfun}). --define(LINKFUN, {modfun, raw_link_walker, mapreduce_linkfun}). --define(PROPS, - [ - {allow_mult, true, false}, - {backend, <<"custom">>, <<"other">>}, - {basic_quorum, true, false}, - {big_vclock, 100, 50}, - {chash_keyfun, ?CHASHFUN, {riak_core_util, chash_std_keyfun}}, - {dw, 0, quorum}, - {last_write_wins, true, false}, - {linkfun, ?LINKFUN, {modfun, riak_kv_wm_link_walker, mapreduce_linkfun}}, - {n_val, 2, 3}, - {notfound_ok, false, true}, - {old_vclock, 10000, 86400}, - {postcommit, [?COMMIT_HOOK], []}, - {pr, 2, 0}, - {precommit, [?COMMIT_HOOK], []}, - {pw, all, 0}, - {r, all, quorum}, - {repl, realtime, false}, - {rw, 1, quorum}, - {search, true, false}, - {small_vclock, 10, 50}, - {w, one, quorum}, - {young_vclock, 0, 20} - ]). - -confirm() -> - [Node] = Nodes = rt:build_cluster(1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - - [ check_prop_set_and_get(Node, Prop, FirstVal, SecondVal) || - {Prop, FirstVal, SecondVal} <- ?PROPS ], - - pass. - -check_prop_set_and_get(Node, Prop, One, Two) -> - lager:info("-------- Testing roundtrip for property '~p' ---------", [Prop]), - HTTP = rt:httpc(Node), - PBC = rt:pbc(Node), - lager:info("HTTP set = ~p", [One]), - http_set_property(HTTP, Prop, One), - lager:info("PBC get should == ~p", [One]), - ?assertEqual(One, pbc_get_property(PBC, Prop)), - - lager:info("PBC set = ~p", [Two]), - pbc_set_property(PBC, Prop, Two), - lager:info("HTTP get should = ~p", [Two]), - ?assertEqual(Two, http_get_property(HTTP, Prop)), - ok. - - -http_set_property(Client, Prop, Value) -> - rhc:set_bucket(Client, ?BUCKET, [{Prop, Value}]). - -http_get_property(Client, Prop) -> - {ok, Props} = rhc:get_bucket(Client, ?BUCKET), - case lists:keyfind(Prop, 1, Props) of - {Prop, Value} -> - Value; - _ -> undefined - end. - -pbc_set_property(Client, Prop, Value) -> - riakc_pb_socket:set_bucket(Client, ?BUCKET, [{Prop, Value}]). - -pbc_get_property(Client, Prop) -> - {ok, Props} = riakc_pb_socket:get_bucket(Client, ?BUCKET), - case lists:keyfind(Prop, 1, Props) of - {Prop, Value} -> - Value; - _ -> undefined - end. diff --git a/tests/bucket_props_validation.erl b/tests/bucket_props_validation.erl deleted file mode 100644 index 2786418dc..000000000 --- a/tests/bucket_props_validation.erl +++ /dev/null @@ -1,182 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2013, Basho Technologies -%%% @doc -%%% riak_test for bucket validation -%%% @end - --module(bucket_props_validation). - --behavior(riak_test). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - [Node] = rt:build_cluster(1), - - Connections = get_connections(Node), - Buckets = {druuid:v4_str(), druuid:v4_str()}, - - DefaultProps = default_props(), - ValidProps = valid_props(), - - %% Check we are starting in a default state - verify_props(Connections, Buckets, DefaultProps), - - %% Verify attempting to set invalid properties results in the - %% expected errors - verify_props_errors(set_props(Connections, Buckets, invalid_props())), - - %% Verify no props were harmed in the making of this request - verify_props(Connections, Buckets, DefaultProps), - - %% Set valid properties and verify they are present when - %% retreiving the bucket properties - ?assertEqual({ok, ok}, set_props(Connections, Buckets, ValidProps)), - verify_props(Connections, Buckets, ValidProps), - - close_connections(Connections), - pass. - -get_connections(Node) -> - {rt:httpc(Node), rt:pbc(Node)}. - -close_connections({_Http, PBC}) -> - riakc_pb_socket:stop(PBC). - -get_props({Http, PBC}, {HttpBucket, PbcBucket}) -> - {ok, PbcProps} = riakc_pb_socket:get_bucket(PBC, PbcBucket), - {ok, HttpProps} = rhc:get_bucket(Http, HttpBucket), - {HttpProps, PbcProps}. - -set_props({Http, PBC}, {HttpBucket, PbcBucket}, Props) -> - HttpRes = rhc:set_bucket(Http, HttpBucket, Props), - PbcRes = try riakc_pb_socket:set_bucket(PBC, PbcBucket, Props) of - NormalRes -> - NormalRes - catch - Error:Reason -> - {Error, Reason} - end, - {HttpRes, PbcRes}. - -verify_props(Connections, Buckets, Expected) -> - {HttpProps, PbcProps} = get_props(Connections, Buckets), - ?assert(sets:is_subset(sets:from_list(Expected), sets:from_list(HttpProps))), - ?assert(sets:is_subset(sets:from_list(Expected), sets:from_list(PbcProps))). -verify_props_errors({HttpResult, PBCResult}) -> - verify_errors(http_errors(HttpResult)), - ?assertEqual({error, function_clause}, PBCResult). - -http_errors(Result) -> - ?assertMatch({error, _}, Result), - {error, {ok, "400", _H, Errors0}} = Result, - {struct, Errors} = mochijson2:decode(Errors0), - Errors. - -verify_errors(Errors) -> - ?assertEqual(13, length(Errors)), - [?assert(verify_error(binary_to_existing_atom(Prop, latin1), - binary_to_atom(Message, latin1))) || {Prop, Message} <- Errors]. - -verify_error(allow_mult, not_boolean) -> - true; -verify_error(basic_quorum, not_boolean) -> - true; -verify_error(last_write_wins, not_boolean) -> - true; -verify_error(notfound_ok, not_boolean) -> - true; -verify_error(big_vclock, not_integer) -> - true; -verify_error(n_val, not_integer) -> - true; -verify_error(old_vclock, not_integer) -> - true; -verify_error(small_vclock, not_integer) -> - true; -verify_error(young_vclock, not_integer) -> - true; -verify_error(Quorum, not_valid_quorum) when Quorum =:= dw; - Quorum =:= pw; - Quorum =:= pr; - Quorum =:= r; - Quorum =:= rw; - Quorum =:= w -> - true; -verify_error(_, _) -> - false. - -default_props() -> - [{allow_mult,false}, - {basic_quorum,false}, - {big_vclock,50}, - {chash_keyfun,{riak_core_util,chash_std_keyfun}}, - {dw,quorum}, - {last_write_wins,false}, - {linkfun,{modfun,riak_kv_wm_link_walker,mapreduce_linkfun}}, - {n_val,3}, - {notfound_ok,true}, - {old_vclock,86400}, - {postcommit,[]}, - {pr,0}, - {precommit,[]}, - {pw,0}, - {r,quorum}, - {rw,quorum}, - {small_vclock,50}, - {w,quorum}, - {young_vclock,20}]. - -invalid_props() -> - [{allow_mult, elephant}, - {basic_quorum,fasle}, - {big_vclock, 89.90}, - {dw, qurum}, - {last_write_wins, 90}, - {n_val,<<"3">>}, - {notfound_ok,<<"truish">>}, - {old_vclock, -9890}, - {pr, -90}, - {pw, "seventeen"}, - {r, <<"florum">>}, - {rw, -9090929288989898398.9090093923232}, - {small_vclock, wibble}, - {w, "a horse a horse my kingdom"}, - {young_vclock, "12"}]. - -valid_props() -> - [{allow_mult, true}, - {basic_quorum, true}, - {big_vclock, 90}, - {dw, one}, - {last_write_wins, false}, - {n_val, 4}, - {notfound_ok, false}, - {old_vclock, 9090909}, - {pr, all}, - {pw, quorum}, - {r, 4}, - {rw, 1}, - {small_vclock, 22}, - {w, all}, - {young_vclock, 18}]. diff --git a/tests/bucket_types.erl b/tests/bucket_types.erl deleted file mode 100644 index 1715cee82..000000000 --- a/tests/bucket_types.erl +++ /dev/null @@ -1,509 +0,0 @@ --module(bucket_types). - --behavior(riak_test). --export([confirm/0, mapred_modfun/3, mapred_modfun_type/3]). - --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - application:start(inets), - lager:info("Deploy some nodes"), - Nodes = rt:build_cluster(4, [], [ - {riak_core, [{default_bucket_props, - [{n_val, 2}]}]}]), - Node = hd(Nodes), - - RMD = riak_test_runner:metadata(), - HaveIndexes = case proplists:get_value(backend, RMD) of - undefined -> false; %% default is da 'cask - bitcask -> false; - _ -> true - end, - - PB = rt:pbc(Node), - - lager:info("default type get/put test"), - %% write explicitly to the default type - riakc_pb_socket:put(PB, riakc_obj:new({<<"default">>, <<"bucket">>}, - <<"key">>, <<"value">>)), - - %% read from the default bucket implicitly - {ok, O1} = riakc_pb_socket:get(PB, <<"bucket">>, <<"key">>), - %% read from the default bucket explicitly - {ok, O2} = riakc_pb_socket:get(PB, {<<"default">>, <<"bucket">>}, <<"key">>), - - %% same object, but slightly different presentation - ?assertEqual(riakc_obj:key(O1), riakc_obj:key(O2)), - ?assertEqual(riakc_obj:get_value(O1), riakc_obj:get_value(O2)), - ?assertEqual(riakc_obj:only_bucket(O1), riakc_obj:only_bucket(O2)), - ?assertEqual(riakc_obj:vclock(O1), riakc_obj:vclock(O2)), - ?assertEqual(undefined, riakc_obj:bucket_type(O1)), - ?assertEqual(<<"default">>, riakc_obj:bucket_type(O2)), - - %% write implicitly to the default bucket - riakc_pb_socket:put(PB, riakc_obj:update_value(O1, <<"newvalue">>)), - - %% read from the default bucket explicitly - {ok, O3} = riakc_pb_socket:get(PB, {<<"default">>, <<"bucket">>}, <<"key">>), - - ?assertEqual(<<"newvalue">>, riakc_obj:get_value(O3)), - - lager:info("list_keys test"), - %% list keys - ?assertEqual({ok, [<<"key">>]}, riakc_pb_socket:list_keys(PB, <<"bucket">>)), - ?assertEqual({ok, [<<"key">>]}, riakc_pb_socket:list_keys(PB, {<<"default">>, - <<"bucket">>})), - lager:info("list_buckets test"), - %% list buckets - ?assertEqual({ok, [<<"bucket">>]}, riakc_pb_socket:list_buckets(PB)), - ?assertEqual({ok, [<<"bucket">>]}, riakc_pb_socket:list_buckets(PB, <<"default">>)), - - lager:info("default type delete test"), - %% delete explicitly via the default bucket - ok = riakc_pb_socket:delete(PB, {<<"default">>, <<"bucket">>}, <<"key">>), - - %% read from the default bucket implicitly - {error, notfound} = riakc_pb_socket:get(PB, <<"bucket">>, <<"key">>), - %% read from the default bucket explicitly - {error, notfound} = riakc_pb_socket:get(PB, {<<"default">>, <<"bucket">>}, <<"key">>), - - %% write it again - riakc_pb_socket:put(PB, riakc_obj:new({<<"default">>, <<"bucket">>}, - <<"key">>, <<"newestvalue">>)), - - {ok, O4} = riakc_pb_socket:get(PB, {<<"default">>, <<"bucket">>}, <<"key">>), - - %% delete explicitly via the default bucket - ok = riakc_pb_socket:delete_obj(PB, O4), - - %% read from the default bucket implicitly - {error, notfound} = riakc_pb_socket:get(PB, <<"bucket">>, <<"key">>), - %% read from the default bucket explicitly - {error, notfound} = riakc_pb_socket:get(PB, {<<"default">>, <<"bucket">>}, <<"key">>), - - ?assertEqual(ok, rt:wait_until(fun() -> - rt:pbc_really_deleted(PB, - <<"bucket">>, - [<<"key">>]) - end)), - - %% now there should be no buckets or keys to be listed... - %% - %% list keys - ?assertEqual({ok, []}, riakc_pb_socket:list_keys(PB, <<"bucket">>)), - ?assertEqual({ok, []}, riakc_pb_socket:list_keys(PB, {<<"default">>, - <<"bucket">>})), - %% list buckets - ?assertEqual({ok, []}, riakc_pb_socket:list_buckets(PB)), - ?assertEqual({ok, []}, riakc_pb_socket:list_buckets(PB, <<"default">>)), - - - lager:info("custom type get/put test"), - Type = <<"mytype">>, - TypeProps = [{n_val, 3}], - lager:info("Create bucket type ~p, wait for propagation", [Type]), - rt:create_and_activate_bucket_type(Node, Type, TypeProps), - rt:wait_until_bucket_type_status(Type, active, Nodes), - rt:wait_until_bucket_props(Nodes, {Type, <<"bucket">>}, TypeProps), - - lager:info("doing put"), - riakc_pb_socket:put(PB, riakc_obj:new({Type, <<"bucket">>}, - <<"key">>, <<"newestvalue">>)), - - lager:info("custom type list_keys test"), - ?assertEqual({ok, []}, riakc_pb_socket:list_keys(PB, <<"bucket">>)), - ?assertEqual({ok, [<<"key">>]}, riakc_pb_socket:list_keys(PB, {Type, - <<"bucket">>})), - lager:info("doing get"), - {ok, O5} = riakc_pb_socket:get(PB, {Type, <<"bucket">>}, <<"key">>), - - ?assertEqual(<<"newestvalue">>, riakc_obj:get_value(O5)), - - lager:info("doing get"), - %% this type is NOT aliased to the default buckey - {error, notfound} = riakc_pb_socket:get(PB, <<"bucket">>, <<"key">>), - - lager:info("custom type list_buckets test"), - %% list buckets - ?assertEqual({ok, []}, riakc_pb_socket:list_buckets(PB)), - ?assertEqual({ok, [<<"bucket">>]}, riakc_pb_socket:list_buckets(PB, Type)), - - %%% Beginning of UTF-8 test - - lager:info("UTF-8 type get/put test"), - %% こんにちは - konnichiwa (Japanese) - UnicodeType = unicode:characters_to_binary([12371,12435,12395,12385,12399], utf8), - %% سلام - Salam (Arabic) - UnicodeBucket = unicode:characters_to_binary([1587,1604,1575,1605], utf8), - lager:info("Create bucket type, wait for propagation"), - rt:create_and_activate_bucket_type(Node, UnicodeType, TypeProps), - rt:wait_until_bucket_type_status(UnicodeType, active, Nodes), - rt:wait_until_bucket_props(Nodes, {UnicodeType, UnicodeBucket}, TypeProps), - - lager:info("doing put"), - riakc_pb_socket:put(PB, riakc_obj:new({UnicodeType, UnicodeBucket}, - <<"key">>, <<"yetanothervalue">>)), - - lager:info("custom type list_keys test"), - ?assertEqual({ok, [<<"key">>]}, riakc_pb_socket:list_keys(PB, - {UnicodeType, - UnicodeBucket})), - lager:info("doing get"), - {ok, O6} = riakc_pb_socket:get(PB, {UnicodeType, UnicodeBucket}, <<"key">>), - - ?assertEqual(<<"yetanothervalue">>, riakc_obj:get_value(O6)), - - lager:info("custom type list_buckets test"), - %% list buckets - ?assertEqual({ok, [UnicodeBucket]}, riakc_pb_socket:list_buckets(PB, UnicodeType)), - - %%% End of UTF-8 test - - lager:info("bucket properties tests"), - riakc_pb_socket:set_bucket(PB, {<<"default">>, <<"mybucket">>}, - [{n_val, 5}]), - {ok, BProps} = riakc_pb_socket:get_bucket(PB, <<"mybucket">>), - ?assertEqual(5, proplists:get_value(n_val, BProps)), - - riakc_pb_socket:reset_bucket(PB, {<<"default">>, <<"mybucket">>}), - - {ok, BProps1} = riakc_pb_socket:get_bucket(PB, <<"mybucket">>), - ?assertEqual(2, proplists:get_value(n_val, BProps1)), - - riakc_pb_socket:set_bucket(PB, {Type, <<"mybucket">>}, - [{n_val, 5}]), - {ok, BProps2} = riakc_pb_socket:get_bucket(PB, <<"mybucket">>), - %% the default in the app.config is set to 2... - ?assertEqual(2, proplists:get_value(n_val, BProps2)), - - {ok, BProps3} = riakc_pb_socket:get_bucket(PB, {Type, - <<"mybucket">>}), - ?assertEqual(5, proplists:get_value(n_val, BProps3)), - - %% Check our unicode brethren - riakc_pb_socket:set_bucket(PB, {UnicodeType, UnicodeBucket}, - [{n_val, 4}]), - {ok, UBProps1} = riakc_pb_socket:get_bucket(PB, {UnicodeType, - UnicodeBucket}), - ?assertEqual(4, proplists:get_value(n_val, UBProps1)), - - riakc_pb_socket:reset_bucket(PB, {Type, <<"mybucket">>}), - - {ok, BProps4} = riakc_pb_socket:get_bucket(PB, {Type, - <<"mybucket">>}), - ?assertEqual(3, proplists:get_value(n_val, BProps4)), - - riakc_pb_socket:reset_bucket(PB, {UnicodeType, UnicodeBucket}), - - {ok, UBProps2} = riakc_pb_socket:get_bucket(PB, {UnicodeType, - UnicodeBucket}), - - ?assertEqual(3, proplists:get_value(n_val, UBProps2)), - - {error, NTGR} = riakc_pb_socket:get_bucket(PB, {<<"nonexistent">>, <<"mybucket">>}), - - lager:info("GOT ERROR ~s", [NTGR]), - - ?assertMatch(<<"No bucket-type named 'nonexistent'", _/binary>>, NTGR), - - {error, NTSR} = riakc_pb_socket:set_bucket(PB, {<<"nonexistent">>, <<"mybucket">>}, [{n_val, 3}]), - - lager:info("GOT ERROR ~s", [NTSR]), - - ?assertMatch(<<"No bucket-type named 'nonexistent'", _/binary>>, NTSR), - - lager:info("bucket type properties test"), - - riakc_pb_socket:set_bucket_type(PB, Type, - [{n_val, 5}]), - - {ok, BProps5} = riakc_pb_socket:get_bucket_type(PB, Type), - - ?assertEqual(5, proplists:get_value(n_val, BProps5)), - - %% check that the bucket inherits from its type - {ok, BProps6} = riakc_pb_socket:get_bucket(PB, {Type, - <<"mybucket">>}), - ?assertEqual(5, proplists:get_value(n_val, BProps6)), - - riakc_pb_socket:set_bucket_type(PB, Type, [{n_val, 3}]), - - {ok, BProps7} = riakc_pb_socket:get_bucket_type(PB, Type), - - ?assertEqual(3, proplists:get_value(n_val, BProps7)), - - %% Repeat type checks for unicode type/bucket - - riakc_pb_socket:set_bucket_type(PB, UnicodeType, - [{n_val, 5}]), - - {ok, UBProps3} = riakc_pb_socket:get_bucket_type(PB, UnicodeType), - - ?assertEqual(5, proplists:get_value(n_val, UBProps3)), - - %% check that the bucket inherits from its type - {ok, UBProps4} = riakc_pb_socket:get_bucket(PB, {UnicodeType, - UnicodeBucket}), - - ?assertEqual(5, proplists:get_value(n_val, UBProps4)), - - riakc_pb_socket:set_bucket_type(PB, UnicodeType, [{n_val, 3}]), - - {ok, UBProps5} = riakc_pb_socket:get_bucket_type(PB, UnicodeType), - - ?assertEqual(3, proplists:get_value(n_val, UBProps5)), - - %% make sure a regular bucket under the default type reflects app.config - {ok, BProps8} = riakc_pb_socket:get_bucket(PB, {<<"default">>, - <<"mybucket">>}), - ?assertEqual(2, proplists:get_value(n_val, BProps8)), - - %% make sure the type we previously created is NOT affected - {ok, BProps9} = riakc_pb_socket:get_bucket_type(PB, Type), - - ?assertEqual(3, proplists:get_value(n_val, BProps9)), - - %% make sure a bucket under that type is also not affected - {ok, BProps10} = riakc_pb_socket:get_bucket(PB, {Type, - <<"mybucket">>}), - ?assertEqual(3, proplists:get_value(n_val, BProps10)), - - %% make sure a newly created type is not affected either - %% create a new type - Type2 = <<"mynewtype">>, - rt:create_and_activate_bucket_type(Node, Type2, []), - rt:wait_until_bucket_type_status(Type2, active, Nodes), - - {ok, BProps11} = riakc_pb_socket:get_bucket_type(PB, Type2), - - ?assertEqual(3, proplists:get_value(n_val, BProps11)), - - %% 2i tests - - case HaveIndexes of - false -> ok; - true -> - Obj01 = riakc_obj:new(<<"test">>, <<"JRD">>, <<"John Robert Doe, 25">>), - Obj02 = riakc_obj:new({Type, <<"test">>}, <<"JRD">>, <<"Jane Rachel Doe, 21">>), - - Obj1 = riakc_obj:update_metadata(Obj01, - riakc_obj:set_secondary_index( - riakc_obj:get_update_metadata(Obj01), - [{{integer_index, "age"}, - [25]},{{binary_index, "name"}, - [<<"John">>, <<"Robert">> - ,<<"Doe">>]}])), - - Obj2 = riakc_obj:update_metadata(Obj02, - riakc_obj:set_secondary_index( - riakc_obj:get_update_metadata(Obj02), - [{{integer_index, "age"}, - [21]},{{binary_index, "name"}, - [<<"Jane">>, <<"Rachel">> - ,<<"Doe">>]}])), - - riakc_pb_socket:put(PB, Obj1), - riakc_pb_socket:put(PB, Obj2), - - ?assertMatch({ok, {index_results_v1, [<<"JRD">>], _, _}}, riakc_pb_socket:get_index(PB, <<"test">>, - {binary_index, - "name"}, - <<"John">>)), - - ?assertMatch({ok, {index_results_v1, [], _, _}}, riakc_pb_socket:get_index(PB, <<"test">>, - {binary_index, - "name"}, - <<"Jane">>)), - - ?assertMatch({ok, {index_results_v1, [<<"JRD">>], _, _}}, riakc_pb_socket:get_index(PB, - {Type, - <<"test">>}, - {binary_index, - "name"}, - <<"Jane">>)), - - %% wild stab at the undocumented cs_bucket_fold - {ok, ReqID} = riakc_pb_socket:cs_bucket_fold(PB, <<"test">>, []), - accumulate(ReqID), - - {ok, ReqID2} = riakc_pb_socket:cs_bucket_fold(PB, {Type, - <<"test">>}, []), - accumulate(ReqID2), - ok - end, - - - Store = fun(Bucket, {K,V, BI, II}) -> - O=riakc_obj:new(Bucket, K), - MD=riakc_obj:add_secondary_index(dict:new(), - {{binary_index, "b_idx"}, - [BI]}), - MD2=riakc_obj:add_secondary_index(MD, {{integer_index, - "i_idx"}, [II]}), - OTwo=riakc_obj:update_metadata(O,MD2), - lager:info("storing ~p", [OTwo]), - riakc_pb_socket:put(PB,riakc_obj:update_value(OTwo, V, "application/json")) - end, - - [Store(<<"MRbucket">>, KV) || KV <- [ - {<<"foo">>, <<"2">>, <<"a">>, 4}, - {<<"bar">>, <<"3">>, <<"b">>, 7}, - {<<"baz">>, <<"4">>, <<"a">>, 4}]], - - ?assertEqual({ok, [{1, [9]}]}, - riakc_pb_socket:mapred_bucket(PB, <<"MRbucket">>, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - [Store({Type, <<"MRbucket">>}, KV) || KV <- [ - {<<"foo">>, <<"2">>, <<"a">>, 4}, - {<<"bar">>, <<"3">>, <<"b">>, 7}, - {<<"baz">>, <<"4">>, <<"a">>, 4}, - {<<"bam">>, <<"5">>, <<"a">>, 3}]], - - ?assertEqual({ok, [{0, [<<"2">>]}]}, - riakc_pb_socket:mapred(PB, {{Type, <<"MRbucket">>}, - [[<<"starts_with">>, <<"f">>]]}, - [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, true}])), - - ?assertEqual({ok, [{1, [14]}]}, - riakc_pb_socket:mapred_bucket(PB, {Type, <<"MRbucket">>}, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - ?assertEqual({ok, [{1, [3]}]}, - riakc_pb_socket:mapred(PB, - [{<<"MRbucket">>, <<"foo">>}, - {<<"MRbucket">>, <<"bar">>}, - {<<"MRbucket">>, <<"baz">>}], - [{map, {jsanon, <<"function (v) { return [1]; }">>}, - undefined, false}, - {reduce, {jsanon, - <<"function(v) { - total = v.reduce( - function(prev,curr,idx,array) { - return prev+curr; - }, 0); - return [total]; - }">>}, - undefined, true}])), - - ?assertEqual({ok, [{1, [4]}]}, - riakc_pb_socket:mapred(PB, - [{{{Type, <<"MRbucket">>}, <<"foo">>}, - undefined}, - {{{Type, <<"MRbucket">>}, <<"bar">>}, - undefined}, - {{{Type, <<"MRbucket">>}, <<"baz">>}, - undefined}, - {{{Type, <<"MRbucket">>}, <<"bam">>}, - undefined}], - [{map, {jsanon, <<"function (v) { return [1]; }">>}, - undefined, false}, - {reduce, {jsanon, - <<"function(v) { - total = v.reduce( - function(prev,curr,idx,array) { - return prev+curr; - }, 0); - return [total]; - }">>}, - undefined, true}])), - - case HaveIndexes of - false -> ok; - true -> - {ok, [{1, Results}]} = riakc_pb_socket:mapred(PB, - {index,<<"MRbucket">>,{integer_index, - "i_idx"},3,5}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results)), - - {ok, [{1, Results1}]} = riakc_pb_socket:mapred(PB, - {index,{Type, - <<"MRbucket">>},{integer_index, - "i_idx"},3,5}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>, <<"5">>], lists:sort(Results1)), - - {ok, [{1, Results2}]} = riakc_pb_socket:mapred(PB, - {index,<<"MRbucket">>,{binary_index, - "b_idx"}, <<"a">>}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results2)), - - {ok, [{1, Results3}]} = riakc_pb_socket:mapred(PB, - {index,{Type, - <<"MRbucket">>},{binary_index, - "b_idx"}, <<"a">>}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>, <<"5">>], lists:sort(Results3)), - ok - end, - - %% load this module on all the nodes - ok = rt:load_modules_on_nodes([?MODULE], Nodes), - - %% do a modfun mapred using the function from this module - ?assertEqual({ok, [{1, [2]}]}, - riakc_pb_socket:mapred_bucket(PB, {modfun, ?MODULE, - mapred_modfun, []}, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - %% do a modfun mapred using the function from this module - ?assertEqual({ok, [{1, [5]}]}, - riakc_pb_socket:mapred_bucket(PB, {modfun, ?MODULE, - mapred_modfun_type, []}, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - riakc_pb_socket:stop(PB), - pass. - -accumulate(ReqID) -> - receive - {ReqID, {done, _}} -> - ok; - {ReqID, Msg} -> - lager:info("got ~p", [Msg]), - accumulate(ReqID) - end. - -mapred_modfun(Pipe, Args, _Timeout) -> - lager:info("Args for mapred modfun are ~p", [Args]), - riak_pipe:queue_work(Pipe, {{<<"MRbucket">>, <<"foo">>}, {struct, []}}), - riak_pipe:eoi(Pipe). - -mapred_modfun_type(Pipe, Args, _Timeout) -> - lager:info("Args for mapred modfun are ~p", [Args]), - riak_pipe:queue_work(Pipe, {{{<<"mytype">>, <<"MRbucket">>}, <<"bam">>}, {struct, []}}), - riak_pipe:eoi(Pipe). diff --git a/tests/cause_bdp.erl b/tests/cause_bdp.erl deleted file mode 100644 index 66ea8fe74..000000000 --- a/tests/cause_bdp.erl +++ /dev/null @@ -1,31 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% cause_bdp - helper module used by verify_busy_dist_port -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(cause_bdp). --compile(export_all). - -spam_nodes(TargetNodes) -> - [[spawn(?MODULE, spam, [N]) || _ <- lists:seq(1,1000*200)] || N <- TargetNodes]. - -spam(Node) -> - timer:sleep(random:uniform(100)), - catch rpc:call(Node, erlang, whereis, [rex]). diff --git a/tests/cluster_meta_basic.erl b/tests/cluster_meta_basic.erl deleted file mode 100644 index b659ec107..000000000 --- a/tests/cluster_meta_basic.erl +++ /dev/null @@ -1,175 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(cluster_meta_basic). --behavior(riak_test). --export([confirm/0, object_count/2]). --include_lib("eunit/include/eunit.hrl"). - --define(PREFIX1, {a, b}). --define(PREFIX2, {fold, prefix}). --define(KEY1, key1). --define(KEY2, key2). --define(VAL1, val1). --define(VAL2, val2). - -confirm() -> - Nodes = rt:build_cluster(5), - ok = test_fold_full_prefix(Nodes), - ok = test_metadata_conflicts(Nodes), - ok = test_writes_after_partial_cluster_failure(Nodes), - pass. - -%% 1. write a key and waits til it propogates around the cluster -%% 2. stop the immediate eager peers of the node that performed the write -%% 3. perform an update of the key from the same node and wait until it reaches all alive nodes -%% 4. bring up stopped nodes and ensure that either lazily queued messages or anti-entropy repair -%% propogates key to all nodes in cluster -test_writes_after_partial_cluster_failure([N1 | _]=Nodes) -> - lager:info("testing writes after partial cluster failure"), - metadata_put(N1, ?PREFIX1, ?KEY1, ?VAL1), - wait_until_metadata_value(Nodes, ?PREFIX1, ?KEY1, ?VAL1), - print_tree(N1, Nodes), - - StopNodes = eager_peers(N1, N1), - AliveNodes = Nodes -- StopNodes, - lager:info("stopping nodes: ~p remaining nodes: ~p", [StopNodes, AliveNodes]), - [rt:stop(N) || N <- StopNodes], - - metadata_put(N1, ?PREFIX1, ?KEY1, ?VAL2), - wait_until_metadata_value(AliveNodes, ?PREFIX1, ?KEY1, ?VAL2), - - lager:info("bring stopped nodes back up: ~p", [StopNodes]), - [rt:start(N) || N <- StopNodes], - wait_until_metadata_value(Nodes, ?PREFIX1, ?KEY1, ?VAL2), - ok. - -%% 1. write several keys to a prefix, fold over them accumulating a list -%% 2. ensure list of keys and values match those written to prefix -test_fold_full_prefix([N1 | _]=Nodes) -> - rt:load_modules_on_nodes([?MODULE], Nodes), - lager:info("testing prefix (~p) fold on ~p", [?PREFIX2, N1]), - KeysAndVals = [{I, I} || I <- lists:seq(1, 10)], - [metadata_put(N1, ?PREFIX2, K, V) || {K, V} <- KeysAndVals], - %% we don't use a resolver but shouldn't have conflicts either, so assume that in - %% head of fold function - FoldRes = [{K, V} || {K, [V]} <- metadata_to_list(N1, ?PREFIX2)], - SortedRes = lists:ukeysort(1, FoldRes), - ?assertEqual(KeysAndVals, SortedRes), - ok. - -test_metadata_conflicts([N1, N2 | _]=Nodes) -> - rt:load_modules_on_nodes([?MODULE], Nodes), - lager:info("testing conflicting writes to a key"), - write_conflicting(N1, N2, ?PREFIX1, ?KEY2, ?VAL1, ?VAL2), - - %% assert that we still have siblings since write_conflicting uses allow_put=false - lager:info("checking object count after resolve on get w/o put"), - ?assertEqual(2, rpc:call(N1, ?MODULE, object_count, [?PREFIX1, ?KEY2])), - ?assertEqual(2, rpc:call(N2, ?MODULE, object_count, [?PREFIX1, ?KEY2])), - - %% iterate over the values and ensure we can resolve w/o doing a put - ?assertEqual([{?KEY2, lists:usort([?VAL1, ?VAL2])}], - metadata_to_list(N1, ?PREFIX1, [{allow_put, false}])), - ?assertEqual([{?KEY2, lists:usort([?VAL1, ?VAL2])}], - metadata_to_list(N2, ?PREFIX1, [{allow_put, false}])), - lager:info("checking object count after resolve on itr_key_values w/o put"), - ?assertEqual(2, rpc:call(N1, ?MODULE, object_count, [?PREFIX1, ?KEY2])), - ?assertEqual(2, rpc:call(N2, ?MODULE, object_count, [?PREFIX1, ?KEY2])), - - %% assert that we no longer have siblings when allow_put=true - lager:info("checking object count afger resolve on get w/ put"), - wait_until_metadata_value(N1, ?PREFIX1, ?KEY2, - [{resolver, fun list_resolver/2}], - lists:usort([?VAL1, ?VAL2])), - wait_until_metadata_value([N1, N2], ?PREFIX1, ?KEY2, - [{resolver, fun list_resolver/2}, {allow_put, false}], - lists:usort([?VAL1, ?VAL2])), - wait_until_object_count([N1, N2], ?PREFIX1, ?KEY2, 1), - ok. - -write_conflicting(N1, N2, Prefix, Key, Val1, Val2) -> - rpc:call(N1, riak_core_metadata_manager, put, [{Prefix, Key}, undefined, Val1]), - rpc:call(N2, riak_core_metadata_manager, put, [{Prefix, Key}, undefined, Val2]), - wait_until_metadata_value([N1, N2], Prefix, Key, - [{resolver, fun list_resolver/2}, - {allow_put, false}], - lists:usort([Val1, Val2])). - - -object_count(Prefix, Key) -> - Obj = riak_core_metadata_manager:get({Prefix, Key}), - case Obj of - undefined -> 0; - _ -> riak_core_metadata_object:value_count(Obj) - end. - - -list_resolver(X1, X2) when is_list(X2) andalso is_list(X1) -> - lists:usort(X1 ++ X2); -list_resolver(X1, X2) when is_list(X2) -> - lists:usort([X1 | X2]); -list_resolver(X1, X2) when is_list(X1) -> - lists:usort(X1 ++ [X2]); -list_resolver(X1, X2) -> - lists:usort([X1, X2]). - -metadata_to_list(Node, FullPrefix) -> - metadata_to_list(Node, FullPrefix, []). - -metadata_to_list(Node, FullPrefix, Opts) -> - rpc:call(Node, riak_core_metadata, to_list, [FullPrefix, Opts]). - -metadata_put(Node, Prefix, Key, FunOrVal) -> - ok = rpc:call(Node, riak_core_metadata, put, [Prefix, Key, FunOrVal]). - -metadata_get(Node, Prefix, Key, Opts) -> - rpc:call(Node, riak_core_metadata, get, [Prefix, Key, Opts]). - -wait_until_metadata_value(Nodes, Prefix, Key, Val) -> - wait_until_metadata_value(Nodes, Prefix, Key, [], Val). - -wait_until_metadata_value(Nodes, Prefix, Key, Opts, Val) when is_list(Nodes) -> - [wait_until_metadata_value(Node, Prefix, Key, Opts, Val) || Node <- Nodes]; -wait_until_metadata_value(Node, Prefix, Key, Opts, Val) -> - lager:info("wait until {~p, ~p} equals ~p on ~p", [Prefix, Key, Val, Node]), - F = fun() -> - Val =:= metadata_get(Node, Prefix, Key, Opts) - end, - ?assertEqual(ok, rt:wait_until(F)), - ok. - -wait_until_object_count(Nodes, Prefix, Key, Count) when is_list(Nodes) -> - [wait_until_object_count(Node, Prefix, Key, Count) || Node <- Nodes]; -wait_until_object_count(Node, Prefix, Key, Count) -> - lager:info("wait until {~p, ~p} has object count ~p on ~p", [Prefix, Key, Count, Node]), - F = fun() -> - Count =:= rpc:call(Node, ?MODULE, object_count, [Prefix, Key]) - end, - ?assertEqual(ok, rt:wait_until(F)), - ok. - - -eager_peers(Node, Root) -> - {Eagers, _} = rpc:call(Node, riak_core_broadcast, debug_get_peers, [Node, Root]), - Eagers. - -print_tree(Root, Nodes) -> - Tree = rpc:call(Root, riak_core_broadcast, debug_get_tree, [Root, Nodes]), - lager:info("broadcast tree: ~p", [Tree]). diff --git a/tests/cluster_meta_proxy_server.erl b/tests/cluster_meta_proxy_server.erl deleted file mode 100644 index 9455dbfde..000000000 --- a/tests/cluster_meta_proxy_server.erl +++ /dev/null @@ -1,123 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(cluster_meta_proxy_server). - --compile(export_all). - --behaviour(gen_server). - -%% API --export([start_link/0, history/0, is_empty/1]). - -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --record(state, {history :: [any()], - last_msg_ts :: non_neg_integer()}). - --define(SERVER, ?MODULE). - -start_link() -> - gen_server:start_link({global, ?SERVER}, ?MODULE, [], []). - -history() -> - gen_server:call({global, ?SERVER}, history, infinity). - -is_empty(ThresholdSecs) -> - gen_server:call({global, ?SERVER}, {is_empty, ThresholdSecs}, infinity). - -init([]) -> - {ok, #state{history = []}}. - -handle_call({is_empty, ThresholdSecs}, _From, State=#state{last_msg_ts=LastMsgTs}) -> - Now = moment(), - Reply = case process_info(self(), message_queue_len) of - {message_queue_len, 0} when (Now - LastMsgTs) > ThresholdSecs -> - true; - {message_queue_len, _} -> - false - end, - {reply, Reply, State}; -handle_call(history, _From, State=#state{history=History}) -> - {reply, lists:reverse(History), State}. - -handle_cast({From, Server, To, Msg}, State) -> -% {Keep, State1} = keep_msg(node_name(From), node_name(To), State), - State1 = add_to_history({From, To, Msg}, State), - gen_server:cast({Server, To}, Msg), -% true -> event_logger:event({dropped, node_name(From), node_name(To), Msg}) end, - {noreply, State1}; -handle_cast(_Msg, State) -> - {noreply, State}. - -%%-------------------------------------------------------------------- -%% Function: handle_info(Info, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% Description: Handling all non call/cast messages -%%-------------------------------------------------------------------- -handle_info(_Info, State) -> - {noreply, State}. - -%%-------------------------------------------------------------------- -%% Function: terminate(Reason, State) -> void() -%% Description: This function is called by a gen_server when it is about to -%% terminate. It should be the opposite of Module:init/1 and do any necessary -%% cleaning up. When it returns, the gen_server terminates with Reason. -%% The return value is ignored. -%%-------------------------------------------------------------------- -terminate(_Reason, _State) -> - ok. - -%%-------------------------------------------------------------------- -%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState} -%% Description: Convert process state when code is changed -%%-------------------------------------------------------------------- -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -%%-------------------------------------------------------------------- -%%% Internal functions -%%-------------------------------------------------------------------- - -add_to_history(Entry, State=#state{history=History}) -> - State#state{history=[Entry | History], last_msg_ts = moment()}. - -moment() -> - calendar:datetime_to_gregorian_seconds(calendar:universal_time()). - - -%% keep_msg(From, To, State) -> -%% Key = {From, To}, -%% Cfg = State#state.config, -%% {Keep, Cfg1} = -%% case proplists:get_value(Key, Cfg, []) of -%% [] -> {true, Cfg}; -%% [{keep, N}|Xs] -> {true, store(Key, [{keep, N - 1} || N > 1] ++ Xs, Cfg)}; -%% [{drop, N}|Xs] -> {false, store(Key, [{drop, N - 1} || N > 1] ++ Xs, Cfg)} -%% end, -%% {Keep, State#state{ config = Cfg1 }}. - -%% store(Key, [], Cfg) -> lists:keydelete(Key, 1, Cfg); -%% store(Key, Xs, Cfg) -> lists:keystore(Key, 1, Cfg, {Key, Xs}). - -%% node_name(Node) -> -%% list_to_atom(hd(string:tokens(atom_to_list(Node),"@"))). diff --git a/tests/cluster_meta_rmr.erl b/tests/cluster_meta_rmr.erl deleted file mode 100644 index bc5fcab11..000000000 --- a/tests/cluster_meta_rmr.erl +++ /dev/null @@ -1,227 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(cluster_meta_rmr). --behavior(riak_test). --export([confirm/0]). - --define(CM_PREFIX, {test, cm}). - -confirm() -> - rt:set_conf(all, [{"ring_size", "128"}]), - Seed = erlang:now(), - lager:info("SEED: ~p", [Seed]), - random:seed(Seed), -% run([10,20,40], 10, 50, 10). - run([5], 1, 5, 1), - pass. - -run(NumNodes, SuperRounds, NumRounds, StableRounds) when is_list(NumNodes) -> - [begin - [begin - lager:info("starting super round ~p: ~p nodes ~p total rounds ~p stable rounds", - [S, N, NumRounds, StableRounds]), - run(N, NumRounds, StableRounds) - end || S <- lists:seq(1, SuperRounds)] - end || N <- NumNodes]. - -run(NumNodes, NumRounds, StableRounds) -> - {ok, Pid} = cluster_meta_proxy_server:start_link(), - unlink(Pid), - AllNodes = setup_nodes(NumNodes), - %% ensures any gossip messages being proxied during initial cluster setup - %% are drained before we proceed -% wait_until_no_messages(), -% {ok, R} = rpc:call(hd(AllNodes), riak_core_ring_manager, get_my_ring, []), -% Nodes = riak_core_ring:active_members(R), -% lager:info("GOSSIP TREE: ~p", [riak_core_util:build_tree(2, Nodes, [cycles])]), - lager:info("running ~p broadcast rounds on nodes: ~p", [NumRounds, AllNodes]), - DownNodes = run_rounds(NumRounds, StableRounds, fun broadcast/2, fun wait_until_broadcast_consistent/2, AllNodes, []), -% lager:info("running ~p gossip rounds on nodes: ~p", [NumRounds, AllNodes]), -% run_rounds(NumRounds, fun gossip/2, fun wait_until_gossip_consistent/2, AllNodes), - calc_stuff(AllNodes, NumNodes, NumRounds), - exit(Pid, kill), - %% start all the down nodes so we can clean them :( - [rt:start(Node) || Node <- DownNodes], - rt:clean_cluster(AllNodes). - -setup_nodes(NumNodes) -> - Nodes = rt:build_cluster(NumNodes), - [begin - ok = rpc:call(Node, application, set_env, [riak_core, broadcast_exchange_timer, 4294967295]), - ok = rpc:call(Node, application, set_env, [riak_core, gossip_limit, {10000000, 4294967295}]), - rt_intercept:add(Node, {riak_core_broadcast, [{{send,2}, global_send}]}) - end || Node <- Nodes], - Nodes. - -run_rounds(0, _, _, _, _, DownNodes) -> - DownNodes; -run_rounds(_, _, _, _, [_SenderNode], DownNodes) -> - lager:info("ran out of nodes to shut down"), - DownNodes; -run_rounds(Round, 0, SendFun, ConsistentFun, [SenderNode | OtherNodes]=UpNodes, DownNodes) -> - lager:info("round ~p (unstable): starting", [Round]), - %% get down nodes too just so it prints nicer, debug_get_tree handles them being down - Tree = rpc:call(SenderNode, riak_core_broadcast, debug_get_tree, [SenderNode, UpNodes ++ DownNodes]), - lager:info("round ~p (unstable): tree before sending ~p", [Round, Tree]), - {FailedNode, RemainingNodes} = fail_node(Round, OtherNodes), - NewUpNodes = [SenderNode | RemainingNodes], - SendFun(SenderNode, Round), - lager:info("round: ~p (unstable): waiting until updates have reached all running nodes", [Round]), - try ConsistentFun(NewUpNodes, Round) of - _ -> - run_rounds(Round - 1, 0, SendFun, ConsistentFun, NewUpNodes, [FailedNode | DownNodes]) - catch - _:_ -> - NumDown = length([FailedNode | DownNodes]), - NumUp = length(NewUpNodes), - Total = NumDown + NumUp, - lager:error("round ~p (unstable): consistency check failed w/ ~p down ~p up, total ~p", - [Round, NumDown, NumUp, Total]), - [FailedNode | DownNodes] - end; -run_rounds(Round, StableRound, SendFun, ConsistentFun, [SenderNode | _]=UpNodes, DownNodes) -> - lager:info("round ~p (stable): starting", [Round]), - SendFun(SenderNode, Round), - lager:info("round ~p (stable): waiting until there are no messages left", [Round]), - wait_until_no_messages(), - lager:info("round ~p (stable): waiting until updates have reached all running nodes", [Round]), - ConsistentFun(UpNodes, Round), - run_rounds(Round - 1, StableRound - 1, SendFun, ConsistentFun, UpNodes, DownNodes). - -fail_node(Round, OtherNodes) -> - Failed = lists:nth(random:uniform(length(OtherNodes)), OtherNodes), - lager:info("round: ~p (unstable): shutting down ~p", [Round, Failed]), - rt:stop(Failed), - {Failed, lists:delete(Failed, OtherNodes)}. - -calc_stuff(AllNodes, NumNodes, NumRounds) -> - History = cluster_meta_proxy_server:history(), - %% GossipHistory = [{From, To, element(2, riak_core_ring:get_meta(round, R))} || - %% {From, To, {reconcile_ring, R}} <- History, riak_core_ring:get_meta(round, R) =/= undefined], - %% lager:info("GOSSIP HISTORY:"), - %% [lager:info("~p", [X]) || X <- GossipHistory], - ResultsDict = calc_stuff(AllNodes, History), - ResultsList = lists:reverse(orddict:to_list(ResultsDict)), - ResultsFileName = io_lib:format("results-~p.csv", [NumNodes]), - {ok, ResultsFile} = file:open(ResultsFileName, [write]), - io:format(ResultsFile, "round,broadcastrmr,gossiprmr,broadcastldh,gossipldh~n", []), - [io:format(ResultsFile, "~p,~p,~p,~p,~p~n", [abs(Round - NumRounds), BRMR, GRMR, BLDH, GLDH]) - || {{round, Round}, {{BRMR, GRMR}, {BLDH, GLDH}}} <- ResultsList], - lager:info("NumNodes: ~p NumRounds: ~p RESULTS: ~p", [NumNodes, NumRounds, ResultsList]). - -calc_stuff(AllNodes, History) -> - CountDict = lists:foldl(fun process_message/2, orddict:new(), History), - RMRN = length(AllNodes) - 1, - orddict:fold(fun(TestRound, Info, AccDict) -> - {BroadcastCount, BroadcastLDH} = proplists:get_value(broadcast, Info), - {GossipCount, GossipLDH} = proplists:get_value(gossip, Info, {0, 0}), - BroadcastRMR = (BroadcastCount / RMRN) - 1, - GossipRMR = (GossipCount / RMRN) - 1, - %% add 1 to LDHs since rounds are zero based - orddict:store(TestRound, {{BroadcastRMR, GossipRMR}, {BroadcastLDH+1, GossipLDH+1}}, AccDict) - end, orddict:new(), CountDict). - - -process_message({_From, _To, - {broadcast, {{?CM_PREFIX, {round, TestRound}}, _Ctx}, _Payload, _Mod, BCastRound, _Root, _From}}, - ResultsDict) -> - case orddict:find({round, TestRound}, ResultsDict) of - error -> - orddict:store({round, TestRound}, [{broadcast, {1, BCastRound}}], ResultsDict); - {ok, Info} -> - {CurrentCount, LastBCastRound} = proplists:get_value(broadcast, Info, {1, BCastRound}), - NewInfo = lists:keystore(broadcast, 1, Info, {broadcast, {CurrentCount+1, max(LastBCastRound, BCastRound)}}), - orddict:store({round, TestRound}, NewInfo, ResultsDict) - end; -%process_message({_From, _To, -% {reconcile_ring, OutRing}}, {CountDict, LDHDict}) -> -% CountDict1 = case riak_core_ring:get_meta(round, OutRing) of -% undefined -> CountDict; %% not a gossip message we sent (e.g. owneship change building cluster) -% {ok, Round} -> orddict:update_counter({gossip, Round}, 1, CountDict) -% end, -% {CountDict1, LDHDict}; -process_message({_From, _To, - {reconcile_ring, GossipRound, OutRing}}, ResultsDict) -> - case riak_core_ring:get_meta(round, OutRing) of - undefined -> ResultsDict; - {ok, TestRound} -> - case orddict:find({round, TestRound}, ResultsDict) of - error -> - orddict:store({round, TestRound}, [{gossip, {1, GossipRound}}], ResultsDict); - {ok, Info} -> - {CurrentCount, LastGossipRound} = proplists:get_value(gossip, Info, {1, GossipRound}), - NewInfo = lists:keystore(gossip, 1, Info, {gossip, {CurrentCount+1, max(LastGossipRound, GossipRound)}}), - orddict:store({round, TestRound}, NewInfo, ResultsDict) - end - end; -process_message(_Msg, Acc) -> - Acc. - -broadcast(SenderNode, Round) -> - %% TODO: don't use metadata manager? - Key = mk_key(Round), - Value = mk_value(Round), - ok = rpc:call(SenderNode, riak_core_metadata, put, [?CM_PREFIX, Key, Value]). - -%gossip(SenderNode, Round) -> -% Value = mk_value(Round), -% {ok, _} = rpc:call(SenderNode, riak_core_ring, update_round, [Value]). - -wait_until_no_messages() -> - F = fun() -> - cluster_meta_proxy_server:is_empty(5) - end, - ok = rt:wait_until(F). - -wait_until_broadcast_consistent(Nodes, Round) -> - Key = mk_key(Round), - Value = mk_value(Round), - wait_until_metadata_value(Nodes,Key, Value). - -wait_until_metadata_value(Nodes, Key, Val) when is_list(Nodes) -> - [wait_until_metadata_value(Node, Key, Val) || Node <- Nodes]; -wait_until_metadata_value(Node, Key, Val) -> - F = fun() -> - %% no need to resolve b/c we use a single sender - Val =:= metadata_get(Node, Key) - end, - ok = rt:wait_until(F, 10, 500). - -metadata_get(Node, Key) -> - rpc:call(Node, riak_core_metadata, get, [?CM_PREFIX, Key]). - -%wait_until_gossip_consistent(Nodes, Round) -> -% Value = mk_value(Round), -% wait_until_bucket_value(Nodes, Value). - -%wait_until_bucket_value(Nodes, Val) when is_list(Nodes) -> -% [wait_until_bucket_value(Node, Val) || Node <- Nodes]; -%wait_until_bucket_value(Node, Val) -> -% F = fun() -> -% {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_my_ring, []), -% {ok, Val} =:= riak_core_ring:get_meta(round, Ring) -% end, -% ok = rt:wait_until(F). - -mk_key(Round) -> - {round, Round}. - -mk_value(Round) -> - Round. diff --git a/tests/cuttlefish_configuration.erl b/tests/cuttlefish_configuration.erl deleted file mode 100644 index 4447bfca4..000000000 --- a/tests/cuttlefish_configuration.erl +++ /dev/null @@ -1,24 +0,0 @@ --module(cuttlefish_configuration). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - - CuttlefishConf = [ - {"ring_size", "8"}, - {"leveldb.sync_on_write", "on"} - ], - - [Node] = rt:deploy_nodes(1, {cuttlefish, CuttlefishConf}), - {ok, RingSize} = rt:rpc_get_env(Node, [{riak_core, ring_creation_size}]), - ?assertEqual(8, RingSize), - - %% test leveldb sync typo - {ok, LevelDBSync} = rt:rpc_get_env(Node, [{eleveldb, sync}]), - ?assertEqual(true, LevelDBSync), - - - pass. diff --git a/tests/ensemble_basic.erl b/tests/ensemble_basic.erl deleted file mode 100644 index 5e2b308d2..000000000 --- a/tests/ensemble_basic.erl +++ /dev/null @@ -1,31 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_basic). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NumNodes = 5, - NVal = 5, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - ensemble_util:build_cluster(NumNodes, Config, NVal), - pass. diff --git a/tests/ensemble_basic2.erl b/tests/ensemble_basic2.erl deleted file mode 100644 index dee9e815f..000000000 --- a/tests/ensemble_basic2.erl +++ /dev/null @@ -1,76 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_basic2). --export([confirm/0]). --compile({parse_transform, rt_intercept_pt}). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NumNodes = 5, - NVal = 5, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - Node = hd(Nodes), - Ensembles = ensemble_util:ensembles(Node), - lager:info("Killing all ensemble leaders"), - ok = ensemble_util:kill_leaders(Node, Ensembles), - ensemble_util:wait_until_stable(Node, NVal), - Peers = [PeerId || {PeerId, _PeerPid} <- ensemble_util:peers(Node)], - lager:info("Verifying peers wait for riak_kv_service"), - Delay = rt_config:get(kv_vnode_delay, 5000), - rt_intercept:add_and_save(Node, {riak_kv_vnode, [{{init, 1}, {[Delay], - fun(Args) -> - timer:sleep(Delay), - riak_kv_vnode_orig:init_orig(Args) - end}}]}), - rt:stop_and_wait(Node), - rt:start(Node), - lager:info("Polling peers while riak_kv starts. We should see none"), - UpNoPeersFun = - fun() -> - PL = ensemble_util:peers(Node), - NodePeers = [P || {P, _} <- PL], - NonRootPeers = [P || P <- NodePeers, element(1, P) /= root], - S = rpc:call(Node, riak_core_node_watcher, services, [Node]), - case S of - L when is_list(L) -> - case lists:member(riak_kv, L) of - true -> - true; - false -> - ?assertEqual([], NonRootPeers) - end; - Err -> - ?assertEqual(ok, {peer_get_error, Err}) - end - end, - rt:wait_until(UpNoPeersFun), - lager:info("Perfect. riak_kv is now up and no peers started before that. " - "Now check they come back up"), - SPeers = lists:sort(Peers), - ?assertEqual(ok, rt:wait_until(fun() -> - L = ensemble_util:peers(Node), - L2 = lists:sort([P || {P, _} <- L]), - SPeers == L2 - end)), - lager:info("All expected peers are back. Life is good"), - pass. diff --git a/tests/ensemble_basic3.erl b/tests/ensemble_basic3.erl deleted file mode 100644 index 50878e797..000000000 --- a/tests/ensemble_basic3.erl +++ /dev/null @@ -1,97 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_basic3). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NumNodes = 5, - NVal = 5, - Quorum = NVal div 2 + 1, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - vnode_util:load(Nodes), - Node = hd(Nodes), - Ensembles = ensemble_util:ensembles(Node), - lager:info("Killing all ensemble leaders"), - ok = ensemble_util:kill_leaders(Node, Ensembles), - ensemble_util:wait_until_stable(Node, NVal), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal), - Bucket = {<<"strong">>, <<"test">>}, - Keys = [<> || N <- lists:seq(1,1000)], - - Key1 = hd(Keys), - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key1}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - All = [VN || {VN, _} <- PL], - Other = [VN || {VN={_, Owner}, _} <- PL, - Owner =/= Node], - - Minority = NVal - Quorum, - PartitionedVN = lists:sublist(Other, Minority), - Partitioned = [VNode || {_, VNode} <- PartitionedVN], - MajorityVN = All -- PartitionedVN, - - PBC = rt:pbc(Node), - - lager:info("Partitioning quorum minority: ~p", [Partitioned]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], - - lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - - lager:info("Healing partition"), - rt:heal(Part), - - lager:info("Suspending majority vnodes"), - L = [begin - lager:info("Suspending vnode: ~p", [VIdx]), - Pid = vnode_util:suspend_vnode(VNode, VIdx), - {VN, Pid} - end || VN={VIdx, VNode} <- MajorityVN], - L2 = orddict:from_list(L), - - L3 = lists:foldl(fun({VN={VIdx, VNode}, Pid}, Suspended) -> - lager:info("Resuming vnode: ~p", [VIdx]), - vnode_util:resume_vnode(Pid), - ensemble_util:wait_until_stable(Node, Quorum), - lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - lager:info("Suspending vnode: ~p", [VIdx]), - Pid2 = vnode_util:suspend_vnode(VNode, VIdx), - orddict:store(VN, Pid2, Suspended) - end, orddict:new(), L2), - - lager:info("Resuming all vnodes"), - [vnode_util:resume_vnode(Pid) || {_, Pid} <- L3], - ensemble_util:wait_until_stable(Node, NVal), - lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - pass. diff --git a/tests/ensemble_basic4.erl b/tests/ensemble_basic4.erl deleted file mode 100644 index 494e21c55..000000000 --- a/tests/ensemble_basic4.erl +++ /dev/null @@ -1,67 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_basic4). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NumNodes = 5, - NVal = 5, - Quorum = NVal div 2 + 1, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - Node = hd(Nodes), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal), - Bucket = {<<"strong">>, <<"test">>}, - Keys = [<> || N <- lists:seq(1,1000)], - - Key1 = hd(Keys), - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key1}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - Other = [VN || {VN={_, Owner}, _} <- PL, - Owner =/= Node], - - Minority = NVal - Quorum, - PartitionedVN = lists:sublist(Other, Minority), - Partitioned = [VNode || {_, VNode} <- PartitionedVN], - - PBC = rt:pbc(Node), - - lager:info("Partitioning quorum minority: ~p", [Partitioned]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), - rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], - - lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - - lager:info("Healing partition"), - rt:heal(Part), - - pass. diff --git a/tests/ensemble_byzantine.erl b/tests/ensemble_byzantine.erl deleted file mode 100644 index 0449fc574..000000000 --- a/tests/ensemble_byzantine.erl +++ /dev/null @@ -1,304 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_byzantine). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(HARNESS, (rt_config:get(rt_harness))). - --define(NUM_NODES, 8). --define(NVAL, 5). - -confirm() -> - NumNodes = ?NUM_NODES, - NVal = ?NVAL, - _Quorum = NVal div 2 + 1, - Config = config(), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - vnode_util:load(Nodes), - Node = hd(Nodes), - ensemble_util:wait_until_stable(Node, NVal), - - create_strong_bucket_type(Node, NVal), - - Bucket = {<<"strong">>, <<"test">>}, - Key = <<"test-key">>, - Val = <<"test-val">>, - - {ok, PL} = get_preflist(Node, Bucket, Key, NVal), - ?assertEqual(NVal, length(PL)), - lager:info("PREFERENCE LIST: ~n ~p", [PL]), - - PBC = rt:pbc(Node), - - normal_write_and_read(PBC, Bucket, Key, Val), - test_lose_one_node_one_partition(PBC, Bucket, Key, Val, PL), - test_lose_all_but_one_partition(PBC, Bucket, Key, Val, PL), - test_lose_minority_synctrees(PBC, Bucket, Key, Val, PL), - test_lose_majority_synctrees(PBC, Bucket, Key, Val, PL), - test_lose_minority_synctrees_one_node_partitioned(PBC, Bucket, Key, Val, - PL, Nodes), - test_lose_all_data_and_trees_except_one_node(PBC, Bucket, Key, Val, PL), - {ok, _NewVal} = test_backup_restore_data_not_trees(Bucket, Key, Val, PL), - test_lose_all_data(PBC, Bucket, Key, PL), - - pass. - -config() -> - [{riak_core, [{default_bucket_props, [{n_val, 5}]}, - {vnode_management_timer, 1000}, - {ring_creation_size, 16}, - {enable_consensus, true}, - {target_n_val, 8}]}]. - -test_lose_minority_synctrees(PBC, Bucket, Key, Val, PL) -> - Minority = minority_vnodes(PL), - assert_lose_synctrees_and_recover(PBC, Bucket, Key, Val, PL, Minority). - -test_lose_majority_synctrees(PBC, Bucket, Key, Val, PL) -> - Majority= majority_vnodes(PL), - assert_lose_synctrees_and_recover(PBC, Bucket, Key, Val, PL, Majority). - -test_lose_minority_synctrees_one_node_partitioned(PBC, Bucket, Key, Val, PL, - Nodes) -> - Minority = minority_vnodes(PL), - {{Idx0, Node0}, primary} = hd(PL), - Ensemble = {kv, Idx0, 5}, - Rest = [Node || {{_, Node}, _} <- lists:sublist(PL, ?NVAL-1)], - - %% Partition off the last node - {{_, PartitionedNode},_} = lists:nth(?NVAL, PL), - PartInfo = rt:partition([PartitionedNode], Nodes -- [PartitionedNode]), - - %% Wipe a minority of nodes - [wipe_tree(Ensemble, Idx, Node) || {{Idx, Node}, _} <- Minority], - kill_peers(Ensemble, Rest), - - %% With a majority of nodes down (minority reboot, 1 partitioned) we - %% shouldn't be able to reach quorum. This is because we now have a majority - %% of untrusted synctrees, and all nodes are not online. - timer:sleep(10000), - {error, <<"timeout">>} = riakc_pb_socket:get(PBC, Bucket, Key, []), - - %% Heal the partition so that we can get quorum - rt:heal(PartInfo), - ensemble_util:wait_until_quorum(Node0, Ensemble), - assert_valid_read(PBC, Bucket, Key, Val). - -test_lose_all_but_one_partition(PBC, Bucket, Key, Val, PL) -> - Wiped = tl(PL), - {{Idx0, Node0}, primary} = hd(PL), - Ensemble = {kv, Idx0, 5}, - lager:info("Wiping Data on Following Vnodes: ~p", [Wiped]), - wipe_partitions(Wiped), - ensemble_util:wait_until_quorum(Node0, Ensemble), - assert_valid_read(PBC, Bucket, Key, Val). - -test_lose_one_node_one_partition(PBC, Bucket, Key, Val, PL) -> - {{Idx0, Node0}, primary} = hd(PL), - Ensemble = {kv, Idx0, 5}, - Leader = ensemble_util:get_leader_pid(Node0, Ensemble), - LeaderNode = node(Leader), - LeaderIdx = get_leader_idx(PL, LeaderNode), - lager:info("Wiping Idx ~p data on LeaderNode ~p", [LeaderIdx, LeaderNode]), - wipe_partition(LeaderIdx, LeaderNode), - ensemble_util:wait_until_quorum(LeaderNode, Ensemble), - assert_valid_read(PBC, Bucket, Key, Val). - -test_lose_all_data_and_trees_except_one_node(PBC, Bucket, Key, Val, PL) -> - Wiped = tl(PL), - {{Idx0, Node0}, primary} = hd(PL), - Ensemble = {kv, Idx0, 5}, - wipe_partitions(Wiped), - wipe_trees(Ensemble, Wiped), - ensemble_util:wait_until_quorum(Node0, Ensemble), - assert_valid_read(PBC, Bucket, Key, Val). - -test_backup_restore_data_not_trees(Bucket, Key, _Val, PL) -> - {{Idx, Node}, primary} = hd(PL), - Ensemble = {kv, Idx, 5}, - stop_nodes(PL), - backup_data(1, PL), - start_nodes(PL), - PBC = rt:pbc(Node), - ensemble_util:wait_until_quorum(Node, Ensemble), - timer:sleep(10000), - - Obj0 = rt:pbc_read(PBC, Bucket, Key), - NewVal = <<"test-val2">>, - Obj = riakc_obj:update_value(Obj0, NewVal), - riakc_pb_socket:put(PBC, Obj), - assert_valid_read(PBC, Bucket, Key, NewVal), - - stop_nodes(PL), - %% Backup the new data. - backup_data(2, PL), - %% Restore old data - restore_data(1, PL), - start_nodes(PL), - PBC1 = rt:pbc(Node), - ensemble_util:wait_until_quorum(Node, Ensemble), - - %% Fail to read the restored data. Trees match newer data than what was - %% restored - assert_failed_read(PBC1, Bucket, Key), - stop_nodes(PL), - - %% Restore New Data that matches trees - restore_data(2, PL), - start_nodes(PL), - PBC2 = rt:pbc(Node), - ensemble_util:wait_until_quorum(Node, Ensemble), - - assert_valid_read(PBC2, Bucket, Key, NewVal), - {ok, NewVal}. - -test_lose_all_data(PBC, Bucket, Key, PL) -> - wipe_partitions(PL), - {error, _}=E = riakc_pb_socket:get(PBC, Bucket, Key, []), - lager:info("All data loss error = ~p", [E]). - -assert_valid_read(PBC, Bucket, Key, Val) -> - ReadFun = fun() -> - Obj = rt:pbc_read(PBC, Bucket, Key), - Val =:= riakc_obj:get_value(Obj) - end, - ?assertEqual(ok, rt:wait_until(ReadFun)). - -assert_failed_read(PBC, Bucket, Key) -> - ?assertMatch({error, _}, riakc_pb_socket:get(PBC, Bucket, Key, [])). - -normal_write_and_read(PBC, Bucket, Key, Val) -> - lager:info("Writing a consistent key"), - ok = rt:pbc_write(PBC, Bucket, Key, Val), - lager:info("Read key to verify it exists"), - assert_valid_read(PBC, Bucket, Key, Val). - -stop_nodes(PL) -> - [rt:stop_and_wait(Node) || {{_, Node}, _} <- PL]. - -start_nodes(PL) -> - [rt:start_and_wait(Node) || {{_, Node}, _} <- PL]. - -data_path(Node) -> - ?HARNESS:node_path(Node) ++ "/data/"++backend_dir(). - -backup_path(Node, N) -> - data_path(Node) ++ integer_to_list(N) ++ ".bak". - -backup_data(N, PL) -> - [backup_node(Node, N) || {{_, Node}, _} <- PL]. - -backup_node(Node, N) -> - Path = data_path(Node), - BackupPath = backup_path(Node, N), - Cmd = "cp -R "++Path++" "++BackupPath, - lager:info("~p", [os:cmd(Cmd)]). - -restore_data(N, PL) -> - [restore_node(Node, N) || {{_, Node}, _} <- PL]. - -restore_node(Node, N) -> - Path = data_path(Node), - BackupPath = backup_path(Node, N), - rm_backend_dir(Node), - Cmd = "mv "++BackupPath++" "++Path, - ?assertEqual([], os:cmd(Cmd)). - -assert_lose_synctrees_and_recover(PBC, Bucket, Key, Val, PL, ToLose) -> - {{Idx0, Node0}, primary} = hd(PL), - Ensemble = {kv, Idx0, 5}, - [wipe_tree(Ensemble, Idx, Node) || {{Idx, Node}, _} <- ToLose], - ensemble_util:wait_until_quorum(Node0, Ensemble), - assert_valid_read(PBC, Bucket, Key, Val). - -majority_vnodes(PL) -> - Num = ?NVAL div 2 + 1, - {Majority, _} = lists:split(Num, PL), - Majority. - -minority_vnodes(PL) -> - Num = ?NVAL div 2, - {Minority, _} = lists:split(Num, PL), - Minority. - -get_leader_idx(PL, LeaderNode) -> - [{LeaderIdx, _}] = [{Idx, N} || {{Idx, N}, _} <- PL, N =:= LeaderNode], - LeaderIdx. - -kill_peers(Ensemble, Nodes) -> - Node = hd(Nodes), - {_, [View | _]} = rpc:call(Node, riak_ensemble_manager, get_views, [Ensemble]), - Peers = [P || P={_Id, N} <- View, lists:member(N, Nodes)], - lager:info("Killing Peers: ~p", [Peers]), - Pids = [rpc:call(Node, riak_ensemble_manager, get_peer_pid, - [Ensemble, Peer]) || Peer <- Peers], - [exit(Pid, kill) || Pid <- Pids, Pid =/= undefined]. - -wipe_partitions(PL) -> - [wipe_partition(Idx, Node) || {{Idx, Node}, _} <- PL]. - -wipe_trees(Ensemble, PL) -> - [wipe_tree(Ensemble, Idx, Node) || {{Idx, Node}, _} <- PL]. - -wipe_tree(Ensemble, Idx, Node) -> - rt:clean_data_dir([Node], "ensembles/trees/kv_"++integer_to_list(Idx)), - {_, [View | _]} = rpc:call(Node, riak_ensemble_manager, get_views, [Ensemble]), - [Peer] = [P || P={_Id, N} <- View, Node =:= N], - Pid = rpc:call(Node, riak_ensemble_manager, get_peer_pid, [Ensemble, Peer]), - lager:info("Peer= ~p, Pid = ~p", [Peer, Pid]), - exit(Pid, kill). - -wipe_partition(Idx, Node) -> - rm_partition_dir(Idx, Node), - vnode_util:kill_vnode({Idx, Node}). - -rm_backend_dir(Node) -> - rt:clean_data_dir([Node], backend_dir()). - -rm_partition_dir(Idx, Node) -> - RelativePath = backend_dir() ++ "/" ++ integer_to_list(Idx), - rt:clean_data_dir([Node], RelativePath). - -backend_dir() -> - TestMetaData = riak_test_runner:metadata(), - KVBackend = proplists:get_value(backend, TestMetaData), - backend_dir(KVBackend). - -backend_dir(undefined) -> - %% riak_test defaults to bitcask when undefined - backend_dir(bitcask); -backend_dir(bitcask) -> - "bitcask"; -backend_dir(eleveldb) -> - "leveldb". - -get_preflist(Node, Bucket, Key, NVal) -> - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - {ok, PL}. - -create_strong_bucket_type(Node, NVal) -> - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal). diff --git a/tests/ensemble_interleave.erl b/tests/ensemble_interleave.erl deleted file mode 100644 index 5e34659f6..000000000 --- a/tests/ensemble_interleave.erl +++ /dev/null @@ -1,101 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% Tests the specific corner case where two ensemble peers become -%% corrupted one after the other. The goal is to provoke the scenario -%% where one of the peers initially trusts the other and syncs with it, -%% but completes the sync after the peer becomes untrusted. -%% -%% Actually hitting this specific interleaving may require multiple runs, -%% but it has been observed and lead to the addition of the `check_sync` -%% logic to riak_ensemble/riak_ensemble_peer.erl that verifies a peer is -%% still trustworthy after a peer syncs with it. -%% -%% Without the check_sync addition, this test could incorectly report -%% {error, notfound} -- eg. data loss. With the addition, this test -%% should now always pass. - --module(ensemble_interleave). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NVal = 5, - Quorum = NVal div 2 + 1, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(8, Config, NVal), - Node = hd(Nodes), - vnode_util:load(Nodes), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal), - Bucket = {<<"strong">>, <<"test">>}, - Keys = [<> || N <- lists:seq(1,1000)], - - Key1 = hd(Keys), - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key1}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - All = [VN || {VN, _} <- PL], - Other = [VN || {VN={_, Owner}, _} <- PL, - Owner =/= Node], - - Minority = NVal - Quorum, - PartitionedVN = lists:sublist(Other, Minority), - Partitioned = [VNode || {_, VNode} <- PartitionedVN], - [KillFirst,KillSecond|Suspend] = All -- PartitionedVN, - - io:format("PL: ~p~n", [PL]), - PBC = rt:pbc(Node), - Options = [{timeout, 500}], - - rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], - - lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], - rt:heal(Part), - - [begin - lager:info("Suspending vnode: ~p", [VIdx]), - vnode_util:suspend_vnode(VNode, VIdx) - end || {VIdx, VNode} <- Suspend], - - vnode_util:kill_vnode(KillFirst), - timer:sleep(5000), - vnode_util:kill_vnode(KillSecond), - vnode_util:rebuild_vnode(KillFirst), - rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [automatic]), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Disabling AAE"), - rpc:multicall(Nodes, riak_kv_entropy_manager, disable, []), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Re-reading keys to verify they exist"), - Expect = [ok, {error, timeout}, {error, <<"timeout">>}, {error, <<"failed">>}], - [rt:pbc_read_check(PBC, Bucket, Key, Expect, Options) || Key <- Keys], - pass. diff --git a/tests/ensemble_remove_node.erl b/tests/ensemble_remove_node.erl deleted file mode 100644 index 2d9e8e9e0..000000000 --- a/tests/ensemble_remove_node.erl +++ /dev/null @@ -1,90 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_remove_node). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --compile({parse_transform, rt_intercept_pt}). --define(M, riak_kv_ensemble_backend_orig). - -confirm() -> - NumNodes = 3, - NVal = 3, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - [Node, Node2, Node3] = Nodes, - ok = ensemble_util:wait_until_stable(Node, NVal), - lager:info("Store a value in the root ensemble"), - {ok, _} = riak_ensemble_client:kput_once(Node, root, testerooni, - testvalue, 1000), - lager:info("Read value from the root ensemble"), - {ok, _} = riak_ensemble_client:kget(Node, root, testerooni, 1000), - - EnsembleStatusPid = spawn(fun()-> ensemble_status_server([]) end), - rt_intercept:add(Node, {riak_kv_ensemble_backend, [{{maybe_async_update, 2}, - {[EnsembleStatusPid], - fun(Changes, State) -> - case lists:keyfind(del, 1, Changes) of - false -> - ?M:maybe_async_update_orig(Changes, State); - {del, {_, Node}}-> - {ok, Ring} = riak_core_ring_manager:get_raw_ring(), - ExitingMembers = riak_core_ring:members(Ring, [exiting]), - EnsembleStatusPid ! {exiting_members, Node, ExitingMembers}, - ?M:maybe_async_update_orig(Changes, State) - end - end}}]}), - - lager:info("Removing Nodes 2 and 3 from the cluster"), - rt:leave(Node2), - ok = ensemble_util:wait_until_stable(Node, NVal), - rt:leave(Node3), - ok = ensemble_util:wait_until_stable(Node, NVal), - Remaining = Nodes -- [Node2, Node3], - rt:wait_until_nodes_agree_about_ownership(Remaining), - ok = rt:wait_until_unpingable(Node2), - ok = rt:wait_until_unpingable(Node3), - lager:info("Read value from the root ensemble"), - {ok, _Obj} = riak_ensemble_client:kget(Node, root, testerooni, 1000), - Members3 = rpc:call(Node, riak_ensemble_manager, get_members, [root]), - ?assertEqual(1, length(Members3)), - Cluster = rpc:call(Node, riak_ensemble_manager, cluster, []), - ?assertEqual(1, length(Cluster)), - EnsembleStatusPid ! {get_errors, self()}, - ExitingErrors = receive E -> E end, - ?assertEqual(ExitingErrors, []), - pass. - -ensemble_status_server(Errors) -> - receive - {exiting_members, Node, ExitingMembers} -> - case lists:member(Node, ExitingMembers) of - false -> - ensemble_status_server(Errors); - true -> - E = {invalid_exiting_status, Node, ExitingMembers}, - ensemble_status_server([E | Errors]) - end; - {get_errors, From} -> - From ! Errors - end. - - diff --git a/tests/ensemble_remove_node2.erl b/tests/ensemble_remove_node2.erl deleted file mode 100644 index 3efaa7ace..000000000 --- a/tests/ensemble_remove_node2.erl +++ /dev/null @@ -1,106 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_remove_node2). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --compile({parse_transform, rt_intercept_pt}). --define(M, riak_kv_ensemble_backend_orig). - -confirm() -> - NumNodes = 3, - NVal = 3, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - [Node, Node2, Node3] = Nodes, - ok = ensemble_util:wait_until_stable(Node, NVal), - lager:info("Store a value in the root ensemble"), - {ok, _} = riak_ensemble_client:kput_once(Node, root, testerooni, - testvalue, 1000), - lager:info("Read value from the root ensemble"), - {ok, _} = riak_ensemble_client:kget(Node, root, testerooni, 1000), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal), - Bucket = {<<"strong">>, <<"test">>}, - Key = <<"testkey">>, - PBC = rt:pbc(Node), - ok = rt:pbc_write(PBC, Bucket, Key, testval), - Val1 = rt:pbc_read(PBC, Bucket, Key), - ?assertEqual(element(1, Val1), riakc_obj), - - %% Don't allow node deletions in riak_ensemble. This should prevent the - %% nodes from ever exiting - rt_intercept:add(Node, {riak_kv_ensemble_backend, [{{maybe_async_update, 2}, - {[], - fun(Changes, State) -> - Changes2 = lists:filter(fun({del, _}) -> false; - (_) -> true - end, Changes), - ?M:maybe_async_update_orig(Changes2, State) - end}}]}), - - lager:info("Removing Nodes 2 and 3 from the cluster"), - rt:leave(Node2), - ok = ensemble_util:wait_until_stable(Node, NVal), - rt:leave(Node3), - ok = ensemble_util:wait_until_stable(Node, NVal), - Remaining = Nodes -- [Node2, Node3], - rt:wait_until_nodes_agree_about_ownership(Remaining), - - %% TODO: How do we wait indefinitely for nodes to never exit here? A 30s - %% sleep? - timer:sleep(30000), - - %% Nodes should still be in leaving state - {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_raw_ring, []), - Leaving = lists:usort(riak_core_ring:members(Ring, [leaving])), - ?assertEqual(Leaving, [Node2, Node3]), - - %% We should still be able to read from k/v ensembles, but the nodes should - %% never exit - lager:info("Reading From SC Bucket"), - Val2 = rt:pbc_read(PBC, Bucket, Key), - ?assertEqual(element(1, Val2), riakc_obj), - - ok = ensemble_util:wait_until_stable(Node, NVal), - lager:info("Read value from the root ensemble"), - {ok, _Obj} = riak_ensemble_client:kget(Node, root, testerooni, 1000), - Members3 = rpc:call(Node, riak_ensemble_manager, get_members, [root]), - ?assertEqual(3, length(Members3)), - Cluster = rpc:call(Node, riak_ensemble_manager, cluster, []), - ?assertEqual(3, length(Cluster)), - - lager:info("Removing intercept and waiting until nodes 2/3 shutdown"), - rt_intercept:add(Node, {riak_kv_ensemble_backend, [{{maybe_async_update, 2}, - {[], - fun(Changes, State) -> - ?M:maybe_async_update_orig(Changes, State) - end}}]}), - - ok = rt:wait_until_unpingable(Node2), - ok = rt:wait_until_unpingable(Node3), - rpc:call(Node, riak_core_console, member_status, [[]]), - rpc:call(Node, riak_core_console, ring_status, [[]]), - - pass. diff --git a/tests/ensemble_ring_changes.erl b/tests/ensemble_ring_changes.erl deleted file mode 100644 index 6823d76ee..000000000 --- a/tests/ensemble_ring_changes.erl +++ /dev/null @@ -1,153 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_ring_changes). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(NVAL, 3). --define(RING_SIZE, 16). - -config() -> - [{riak_core, [{default_bucket_props, [{n_val, 5}]}, - {vnode_management_timer, 1000}, - {ring_creation_size, ?RING_SIZE}, - {enable_consensus, true}, - {target_n_val, 8}]}]. - -confirm() -> - Config = config(), - {ok, Joined, NotJoined} = build_initial_cluster(Config), - Node = hd(Joined), - Bucket = {<<"strong">>, <<"test">>}, - Key = <<"test-key">>, - Val = <<"test-val">>, - create_strong_bucket_type(Node, ?NVAL), - {ok, PL} = get_preflist(Node, Bucket, Key, ?NVAL), - ?assertEqual(?NVAL, length(PL)), - lager:info("PREFERENCE LIST: ~n ~p", [PL]), - {{Idx0, _Node0}, primary} = hd(PL), - Ensemble = {kv, Idx0, 3}, - lager:info("Ensemble = ~p", [Ensemble]), - PBC = rt:pbc(Node), - {ok, Obj} = initial_write(PBC, Bucket, Key, Val), - {ok, _Obj2} = assert_update(PBC, Bucket, Key, Obj, <<"test-val2">>), - Replacements = expand_cluster(Joined, NotJoined), - {_Vsn, [View]} = rpc:call(Node, riak_ensemble_manager, get_views, [Ensemble]), - {_, F1} = hd(lists:reverse(View)), - {_, F2} = hd(tl(lists:reverse(View))), - lager:info("F1= ~p, F2=~p", [F1, F2]), - read_modify_write(PBC, Bucket, Key, <<"test-val2">>, <<"test-val3">>), - [R1, R2] = Replacements, - replace_node(Node, F1, R1), - read_modify_write(PBC, Bucket, Key, <<"test-val3">>, <<"test-val4">>), - force_replace_node(Node, F2, R2), - read_modify_write(PBC, Bucket, Key, <<"test-val4">>, <<"test-val5">>), - pass. - -read_modify_write(PBC, Bucket, Key, Expected, NewVal) -> - Obj = rt:pbc_read(PBC, Bucket, Key), - ?assertEqual(Expected, riakc_obj:get_value(Obj)), - assert_update(PBC, Bucket, Key, Obj, NewVal). - -assert_update(PBC, Bucket, Key, Obj, NewVal) -> - ok = update(PBC, Obj, NewVal), - Obj2 = rt:pbc_read(PBC, Bucket, Key), - ?assertEqual(NewVal, riakc_obj:get_value(Obj2)), - {ok, Obj2}. - -update(PBC, Obj0, NewVal) -> - lager:info("Updating Key with ~p", [NewVal]), - Obj = riakc_obj:update_value(Obj0, NewVal), - riakc_pb_socket:put(PBC, Obj). - -initial_write(PBC, Bucket, Key, Val) -> - %% maps to a riak_ensemble put_once since there is no vclock - lager:info("Writing a consistent key"), - ok = rt:pbc_write(PBC, Bucket, Key, Val), - lager:info("Read key to verify it exists"), - Obj = rt:pbc_read(PBC, Bucket, Key), - ?assertEqual(Val, riakc_obj:get_value(Obj)), - {ok, Obj}. - -get_preflist(Node, Bucket, Key, NVal) -> - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - {ok, PL}. - -create_strong_bucket_type(Node, NVal) -> - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal). - -replace_node(Node, OldNode, NewNode) -> - lager:info("Replacing ~p with ~p", [OldNode, NewNode]), - Nodes = [OldNode, NewNode], - rt:staged_join(NewNode, Node), - ?assertEqual(ok, rt:wait_until_ring_converged(Nodes)), - ok = rpc:call(Node, riak_core_claimant, replace, Nodes), - rt:plan_and_commit(Node), - rt:try_nodes_ready(Nodes, 3, 500), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)). - -force_replace_node(Node, OldNode, NewNode) -> - lager:info("Force Replacing ~p with ~p", [OldNode, NewNode]), - Nodes = [OldNode, NewNode], - rt:staged_join(NewNode, Node), - ?assertEqual(ok, rt:wait_until_ring_converged(Nodes)), - ok = rpc:call(Node, riak_core_claimant, force_replace, Nodes), - rt:plan_and_commit(Node), - rt:try_nodes_ready(Nodes, 3, 500), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)). - -expand_cluster(OldNodes, NewNodes0) -> - %% Always have 2 replacement nodes - {NewNodes, Replacements} = lists:split(length(NewNodes0)-2, NewNodes0), - lager:info("Expanding Cluster from ~p to ~p nodes", [length(OldNodes), - length(OldNodes) + length(NewNodes)]), - PNode = hd(OldNodes), - Nodes = OldNodes ++ NewNodes, - [rt:staged_join(Node, PNode) || Node <- NewNodes], - rt:plan_and_commit(PNode), - rt:try_nodes_ready(Nodes, 3, 500), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - %% Ensure each node owns a portion of the ring - rt:wait_until_nodes_agree_about_ownership(Nodes), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)), - ensemble_util:wait_until_cluster(Nodes), - ensemble_util:wait_for_membership(PNode), - ensemble_util:wait_until_stable(PNode, ?NVAL), - Replacements. - -build_initial_cluster(Config) -> - TotalNodes = 8, - InitialNodes = 3, - Nodes = rt:deploy_nodes(TotalNodes, Config), - Node = hd(Nodes), - {ToJoin, NotToJoin} = lists:split(InitialNodes, Nodes), - rt:join_cluster(ToJoin), - ensemble_util:wait_until_cluster(ToJoin), - ensemble_util:wait_for_membership(Node), - ensemble_util:wait_until_stable(Node, InitialNodes), - vnode_util:load(Nodes), - ensemble_util:wait_until_stable(Node, InitialNodes), - {ok, ToJoin, NotToJoin}. diff --git a/tests/ensemble_start_without_aae.erl b/tests/ensemble_start_without_aae.erl deleted file mode 100644 index 8a6ad042d..000000000 --- a/tests/ensemble_start_without_aae.erl +++ /dev/null @@ -1,34 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_start_without_aae). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - - NumNodes = 5, - NVal = 5, - - Config = ensemble_util:fast_config(NVal, false), - lager:info("Building cluster with consensus enabled and AAE disabled. Waiting for ensemble to stablize ..."), - - _ = ensemble_util:build_cluster_without_quorum(NumNodes, Config), - pass. diff --git a/tests/ensemble_sync.erl b/tests/ensemble_sync.erl deleted file mode 100644 index db1d19695..000000000 --- a/tests/ensemble_sync.erl +++ /dev/null @@ -1,214 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_sync). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(INTERCEPT_TAB, intercept_leader_tick_counts). - -confirm() -> - NVal = 5, - Config = ensemble_util:fast_config(NVal), - Nodes = ensemble_util:build_cluster(8, Config, NVal), - lists:foreach(fun init_intercepts/1, Nodes), - Node = hd(Nodes), - vnode_util:load(Nodes), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal), - - ExpectOkay = [ok], - ExpectTimeout = [{error, timeout}, {error, <<"timeout">>}, - {error, <<"failed">>} | ExpectOkay], - ExpectFail = [{error, notfound} | ExpectTimeout], - - Scenarios = [%% corrupted, suspended, valid, empty, bucket, expect - {1, 1, 1, 2, <<"test1">>, ExpectOkay}, - {1, 2, 0, 2, <<"test2">>, ExpectTimeout}, - {2, 1, 0, 2, <<"test3">>, ExpectTimeout}, - {3, 0, 0, 2, <<"test4">>, ExpectFail} - ], - [ok = run_scenario(Nodes, NVal, Scenario) || Scenario <- Scenarios], - pass. - --spec partition(non_neg_integer(), node(), list()) -> {[{non_neg_integer(), node()}], [node()]}. -partition(Minority, ContactNode, PL) -> - AllVnodes = [VN || {VN, _} <- PL], - OtherVnodes = [VN || {VN={_, Owner}, _} <- PL, - Owner =/= ContactNode], - NodeCounts = num_partitions_per_node(OtherVnodes), - PartitionedNodes = minority_nodes(NodeCounts, Minority), - PartitionedVnodes = minority_vnodes(OtherVnodes, PartitionedNodes), - ValidVnodes = AllVnodes -- PartitionedVnodes, - {ValidVnodes, PartitionedNodes}. - -num_partitions_per_node(Other) -> - lists:foldl(fun({_, Node}, Acc) -> - orddict:update_counter(Node, 1, Acc) - end, orddict:new(), Other). - -minority_nodes(NodeCounts, MinoritySize) -> - lists:foldl(fun({Node, Count}, Acc) -> - case Count =:= 1 andalso length(Acc) < MinoritySize of - true -> - [Node | Acc]; - false -> - Acc - end - end, [], NodeCounts). - -minority_vnodes(Vnodes, PartitionedNodes) -> - [VN || {_, Node}=VN <- Vnodes, lists:member(Node, PartitionedNodes)]. - -run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> - Node = hd(Nodes), - Quorum = NVal div 2 + 1, - Minority = NVal - Quorum, - Bucket = {<<"strong">>, Name}, - Keys = [<> || N <- lists:seq(1,1000)], - - Key1 = hd(Keys), - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key1}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - {Valid, Partitioned} = partition(Minority, Node, PL), - - {KillVN, Valid2} = lists:split(NumKill, Valid), - {SuspendVN, Valid3} = lists:split(NumSuspend, Valid2), - {AfterVN, _} = lists:split(NumValid, Valid3), - - io:format("PL: ~p~n", [PL]), - PBC = rt:pbc(Node), - Options = [{timeout, 2000}], - - rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), - wait_for_leader_tick_changes(Nodes), - ensemble_util:wait_until_stable(Node, Quorum), - - %% Write data while minority is partitioned - lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], - - lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], - rt:heal(Part), - - %% Suspend desired number of valid vnodes - S1 = [vnode_util:suspend_vnode(VNode, VIdx) || {VIdx, VNode} <- SuspendVN], - - %% Kill/corrupt desired number of valid vnodes - [vnode_util:kill_vnode(VN) || VN <- KillVN], - [vnode_util:rebuild_vnode(VN) || VN <- KillVN], - rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [automatic]), - wait_for_leader_tick_changes(Nodes), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Disabling AAE"), - rpc:multicall(Nodes, riak_kv_entropy_manager, disable, []), - ensemble_util:wait_until_stable(Node, Quorum), - - %% Suspend remaining valid vnodes to ensure data comes from repaired vnodes - S2 = [vnode_util:suspend_vnode(VNode, VIdx) || {VIdx, VNode} <- AfterVN], - wait_for_leader_tick_changes(Nodes), - ensemble_util:wait_until_stable(Node, Quorum), - - lager:info("Checking that key results match scenario"), - [rt:pbc_read_check(PBC, Bucket, Key, Expect, Options) || Key <- Keys], - - lager:info("Re-enabling AAE"), - rpc:multicall(Nodes, riak_kv_entropy_manager, enable, []), - - lager:info("Resuming all vnodes"), - [vnode_util:resume_vnode(Pid) || Pid <- S1 ++ S2], - wait_for_leader_tick_changes(Nodes), - ensemble_util:wait_until_stable(Node, NVal), - - %% Check that for other than the "all bets are off" failure case, - %% we can successfully read all keys after all vnodes are available. - case lists:member({error, notfound}, Expect) of - true -> - ok; - false -> - lager:info("Re-reading keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys] - end, - - lager:info("Scenario passed"), - lager:info("-----------------------------------------------------"), - ok. - -%% The following code is used so that we can wait for ensemble leader ticks to fire. -%% This allows us to fix a kind of race condition that we were dealing with in the -%% previous version of this test, where we were relying on ensemble_util:wait_until_stable -%% after making certain changes to the cluster. -init_intercepts(Node) -> - make_intercepts_tab(Node), - rt_intercept:add(Node, {riak_ensemble_peer, [{{leader_tick, 1}, count_leader_ticks}]}). - -make_intercepts_tab(Node) -> - SupPid = rpc:call(Node, erlang, whereis, [sasl_safe_sup]), - Opts = [named_table, public, set, {heir, SupPid, {}}], - ?INTERCEPT_TAB = rpc:call(Node, ets, new, [?INTERCEPT_TAB, Opts]). - -get_leader_tick_counts(Nodes) -> - AllCounts = [get_leader_tick_counts_for_node(N) || N <- Nodes], - lists:append(AllCounts). - -get_leader_tick_counts_for_node(Node) -> - Ensembles = rpc:call(Node, riak_kv_ensembles, local_ensembles, []), - Leaders = rpc:call(Node, lists, map, [fun riak_ensemble_manager:get_leader_pid/1, Ensembles]), - LocalLeaders = [P || P <- Leaders, node(P) =:= Node], - LookupFun = fun(P) -> - [Res] = rpc:call(Node, ets, lookup, [?INTERCEPT_TAB, P]), - Res - end, - lists:map(LookupFun, LocalLeaders). - -wait_for_leader_tick_changes(Nodes) -> - Counts = get_leader_tick_counts(Nodes), - lists:foreach(fun wait_for_leader_tick_change/1, Counts). - -wait_for_leader_tick_change({Pid, Count}) -> - F = fun() -> leader_tick_count_exceeds(Pid, Count) end, - ?assertEqual(ok, rt:wait_until(F)). - -leader_tick_count_exceeds(Pid, Count) -> - Node = node(Pid), - case rpc:call(Node, ets, lookup, [?INTERCEPT_TAB, Pid]) of - [{Pid, NewCount}] when NewCount > Count -> - true; - Res -> - %% If the count hasn't incremented, it may be because the leader - %% already stepped down, so check for that scenario as well: - case rpc:call(Node, sys, get_state, [Pid]) of - {leading, _} -> - Res; - Res2 = {badrpc, _} -> - {Res, Res2}; - {_, _} -> - %% Would be nice if there was a more explicit way to match - %% this, but if it's not a badrpc and we're not leading, we - %% must be in some other state - true - end - end. diff --git a/tests/ensemble_util.erl b/tests/ensemble_util.erl deleted file mode 100644 index d6f79145b..000000000 --- a/tests/ensemble_util.erl +++ /dev/null @@ -1,160 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_util). --compile(export_all). - --define(DEFAULT_RING_SIZE, 16). - --include_lib("eunit/include/eunit.hrl"). - -build_cluster(Num, Config, NVal) -> - Nodes = rt:deploy_nodes(Num, Config), - Node = hd(Nodes), - rt:join_cluster(Nodes), - ensemble_util:wait_until_cluster(Nodes), - ensemble_util:wait_for_membership(Node), - ensemble_util:wait_until_stable(Node, NVal), - Nodes. - -build_cluster_without_quorum(Num, Config) -> - Nodes = rt:deploy_nodes(Num, Config), - SetupLogCaptureFun = fun(Node) -> - rt:setup_log_capture(Node) - end, - lists:map(SetupLogCaptureFun, Nodes), - Node = hd(Nodes), - ok = rpc:call(Node, riak_ensemble_manager, enable, []), - _ = rpc:call(Node, riak_core_ring_manager, force_update, []), - rt:join_cluster(Nodes), - ensemble_util:wait_until_cluster(Nodes), - ensemble_util:wait_for_membership(Node), - Nodes. - -fast_config(NVal) -> - fast_config(NVal, ?DEFAULT_RING_SIZE). - -fast_config(Nval, RingSize) when is_integer(RingSize) -> - fast_config(Nval, RingSize, true); -fast_config(Nval, EnableAAE) when is_boolean(EnableAAE) -> - fast_config(Nval, ?DEFAULT_RING_SIZE, EnableAAE). - -fast_config(NVal, RingSize, EnableAAE) -> - [config_aae(EnableAAE), - {riak_core, [{default_bucket_props, [{n_val, NVal}]}, - {vnode_management_timer, 1000}, - {ring_creation_size, RingSize}, - {enable_consensus, true}]}]. - -config_aae(true) -> - {riak_kv, [{anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100}, - {anti_entropy_tick, 100}, - {anti_entropy, {on, []}}, - {anti_entropy_timeout, 5000}, - {storage_backend, riak_kv_memory_backend}]}; -config_aae(false) -> - {riak_kv, [{anti_entropy, {off, []}}]}. - -ensembles(Node) -> - rpc:call(Node, riak_kv_ensembles, ensembles, []). - -get_leader_pid(Node, Ensemble) -> - rpc:call(Node, riak_ensemble_manager, get_leader_pid, [Ensemble]). - -peers(Node) -> - rpc:call(Node, riak_ensemble_peer_sup, peers, []). - -kill_leader(Node, Ensemble) -> - case get_leader_pid(Node, Ensemble) of - undefined -> - ok; - Pid -> - exit(Pid, kill), - ok - end. - -kill_leaders(Node, Ensembles) -> - _ = [kill_leader(Node, Ensemble) || Ensemble <- Ensembles], - ok. - -wait_until_cluster(Nodes) -> - lager:info("Waiting until riak_ensemble cluster includes all nodes"), - Node = hd(Nodes), - F = fun() -> - case rpc:call(Node, riak_ensemble_manager, cluster, []) of - Nodes -> - true; - _ -> - false - end - end, - ?assertEqual(ok, rt:wait_until(F)), - lager:info("....cluster ready"), - ok. - -wait_until_stable(Node, Count) -> - lager:info("Waiting until all ensembles are stable"), - Ensembles = rpc:call(Node, riak_kv_ensembles, ensembles, []), - wait_until_quorum(Node, root), - [wait_until_quorum(Node, Ensemble) || Ensemble <- Ensembles], - [wait_until_quorum_count(Node, Ensemble, Count) || Ensemble <- Ensembles], - lager:info("....all stable"), - ok. - -wait_until_quorum(Node, Ensemble) -> - F = fun() -> - case rpc:call(Node, riak_ensemble_manager, check_quorum, - [Ensemble, 10000]) of - true -> - true; - false -> - lager:info("Not ready: ~p", [Ensemble]), - false - end - end, - ?assertEqual(ok, rt:wait_until(F)). - -wait_until_quorum_count(Node, Ensemble, Want) -> - F = fun() -> - case rpc:call(Node, riak_ensemble_manager, count_quorum, - [Ensemble, 10000]) of - Count when Count >= Want -> - true; - Count -> - lager:info("Count: ~p :: ~p < ~p", [Ensemble, Count, Want]), - false - end - end, - ?assertEqual(ok, rt:wait_until(F)). - -wait_for_membership(Node) -> - lager:info("Waiting until ensemble membership matches ring ownership"), - F = fun() -> - case rpc:call(Node, riak_kv_ensembles, check_membership, []) of - Results when is_list(Results) -> - [] =:= [x || false <- Results]; - _ -> - false - end - end, - ?assertEqual(ok, rt:wait_until(F)), - lager:info("....ownership matches"), - ok. diff --git a/tests/ensemble_vnode_crash.erl b/tests/ensemble_vnode_crash.erl deleted file mode 100644 index 59fbd56e8..000000000 --- a/tests/ensemble_vnode_crash.erl +++ /dev/null @@ -1,108 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(ensemble_vnode_crash). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --compile({parse_transform, rt_intercept_pt}). --define(M, riak_kv_ensemble_backend_orig). - -confirm() -> - NumNodes = 5, - NVal = 5, - Config = ensemble_util:fast_config(NVal), - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = ensemble_util:build_cluster(NumNodes, Config, NVal), - vnode_util:load(Nodes), - Node = hd(Nodes), - ensemble_util:wait_until_stable(Node, NVal), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - ensemble_util:wait_until_stable(Node, NVal), - Bucket = {<<"strong">>, <<"test">>}, - Keys = [<> || N <- lists:seq(1,1000)], - - Key1 = hd(Keys), - DocIdx = rpc:call(Node, riak_core_util, chash_std_keyfun, [{Bucket, Key1}]), - PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), - {{Key1Idx, Key1Node}, _} = hd(PL), - - PBC = rt:pbc(Node), - - lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], - - lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - - %% Setting up intercept to ensure that - %% riak_kv_ensemble_backend:handle_down/4 gets called when a vnode or vnode - %% proxy crashes for a given key - lager:info("Adding Intercept for riak_kv_ensemble_backend:handle_down/4"), - Self = self(), - rt_intercept:add(Node, {riak_kv_ensemble_backend, [{{handle_down, 4}, - {[Self], - fun(Ref, Pid, Reason, State) -> - Self ! {handle_down, Reason}, - ?M:maybe_async_update_orig(Ref, Pid, Reason, State) - end}}]}), - - {ok, VnodePid} =rpc:call(Key1Node, riak_core_vnode_manager, get_vnode_pid, - [Key1Idx, riak_kv_vnode]), - lager:info("Killing Vnode ~p for Key1 {~p, ~p}", [VnodePid, Key1Node, - Key1Idx]), - true = rpc:call(Key1Node, erlang, exit, [VnodePid, testkill]), - - lager:info("Waiting to receive msg indicating downed vnode"), - Count = wait_for_all_handle_downs(0), - ?assert(Count > 0), - - lager:info("Wait for stable ensembles"), - ensemble_util:wait_until_stable(Node, NVal), - lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - - lager:info("Killing Vnode Proxy for Key1"), - Proxy = rpc:call(Key1Node, riak_core_vnode_proxy, reg_name, [riak_kv_vnode, - Key1Idx]), - ProxyPid = rpc:call(Key1Node, erlang, whereis, [Proxy]), - lager:info("Killing Vnode Proxy ~p", [Proxy]), - true = rpc:call(Key1Node, erlang, exit, [ProxyPid, testkill]), - - lager:info("Waiting to receive msg indicating downed vnode proxy:"), - Count2 = wait_for_all_handle_downs(0), - ?assert(Count2 > 0), - - lager:info("Wait for stable ensembles"), - ensemble_util:wait_until_stable(Node, NVal), - lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], - - pass. - -wait_for_all_handle_downs(Count) -> - receive - {handle_down, _} -> - wait_for_all_handle_downs(Count+1) - after 5000 -> - Count - end. diff --git a/tests/gh_riak_core_154.erl b/tests/gh_riak_core_154.erl deleted file mode 100644 index ff722a483..000000000 --- a/tests/gh_riak_core_154.erl +++ /dev/null @@ -1,57 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% Automated test for issue riak_core#154 -%% Hinted handoff does not occur after a node has been restarted in Riak 1.1 --module(gh_riak_core_154). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - %% Increase handoff concurrency on nodes - NewConfig = [{riak_core, [{handoff_concurrency, 1024}]}], - Nodes = rt:build_cluster(2, NewConfig), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - [Node1, Node2] = Nodes, - - lager:info("Write data while ~p is offline", [Node2]), - rt:stop(Node2), - rt:wait_until_unpingable(Node2), - ?assertEqual([], rt:systest_write(Node1, 1000, 3)), - - lager:info("Verify that ~p is missing data", [Node2]), - rt:start(Node2), - rt:stop(Node1), - rt:wait_until_unpingable(Node1), - ?assertMatch([{_,{error,notfound}}|_], - rt:systest_read(Node2, 1000, 3)), - - lager:info("Restart ~p and wait for handoff to occur", [Node1]), - rt:start(Node1), - rt:wait_for_service(Node1, riak_kv), - rt:wait_until_transfers_complete([Node1]), - - lager:info("Verify that ~p has all data", [Node2]), - rt:stop(Node1), - ?assertEqual([], rt:systest_read(Node2, 1000, 3)), - - lager:info("gh_riak_core_154: passed"), - pass. diff --git a/tests/gh_riak_core_155.erl b/tests/gh_riak_core_155.erl deleted file mode 100644 index 6a6e7da82..000000000 --- a/tests/gh_riak_core_155.erl +++ /dev/null @@ -1,69 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(gh_riak_core_155). --behavior(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - [Node] = rt:build_cluster(1), - - %% Generate a valid preflist for our get requests - rpc:call(Node, riak_core, wait_for_service, [riak_kv]), - BKey = {<<"bucket">>, <<"value">>}, - DocIdx = riak_core_util:chash_std_keyfun(BKey), - PL = rpc:call(Node, riak_core_apl, get_apl, [DocIdx, 3, riak_kv]), - - lager:info("Adding delayed start to app.config"), - NewConfig = [{riak_core, [{delayed_start, 1000}]}], - rt:update_app_config(Node, NewConfig), - - %% Restart node, add intercept that delay proxy startup, and issue gets. - %% Gets will come in before proxies started, and should trigger crash. - rt:stop_and_wait(Node), - rt:async_start(Node), - rt:wait_until_pingable(Node), - rt_intercept:load_intercepts([Node]), - rt_intercept:add(Node, {riak_core_vnode_proxy_sup, - [{{start_proxies,1}, sleep_start_proxies}]}), - - lager:info("Installed intercept to delay riak_kv proxy startup"), - lager:info("Issuing 10000 gets against ~p", [Node]), - perform_gets(10000, Node, PL, BKey), - - lager:info("Verifying ~p has not crashed", [Node]), - [begin - ?assertEqual(pong, net_adm:ping(Node)), - timer:sleep(1000) - end || _ <- lists:seq(1,10)], - - lager:info("Test passed"), - pass. - -perform_gets(Count, Node, PL, BKey) -> - rpc:call(Node, riak_kv_vnode, get, [PL, BKey, make_ref()]), - perform_gets2(Count, Node, PL, BKey). - -perform_gets2(0, _, _, _) -> - ok; -perform_gets2(Count, Node, PL, BKey) -> - rpc:call(Node, riak_kv_vnode, get, [PL, BKey, make_ref()], 1000), - perform_gets(Count - 1, Node, PL, BKey). diff --git a/tests/gh_riak_core_176.erl b/tests/gh_riak_core_176.erl deleted file mode 100644 index 00821a615..000000000 --- a/tests/gh_riak_core_176.erl +++ /dev/null @@ -1,82 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(gh_riak_core_176). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - Nodes = rt:deploy_nodes(3), - [Node1, Node2, Node3] = Nodes, - Nodes12 = [Node1, Node2], - Nodes123 = Nodes, - - %% Stolen from riak_core_handoff_sender.erl - [_Name,Host] = string:tokens(atom_to_list(Node2), "@"), - - %% Get IP associated with node name - {ok, NodeIP} = inet:getaddr(Host, inet), - - %% Find alternative IP - {ok, IfAddrs} = inet:getifaddrs(), - Addrs = - lists:flatmap( - fun({_If, Props}) -> - [Addr || {addr, Addr} <- Props, - size(Addr) == 4] - end, IfAddrs), - AlternateIP = ip_tuple_to_string(hd(Addrs -- [NodeIP])), - - lager:info("Change ~p handoff_ip from ~p to ~p", - [Node2, NodeIP, AlternateIP]), - NewConfig = [{riak_core, [{handoff_ip, AlternateIP}]}], - rt:update_app_config(Node2, NewConfig), - rt:wait_for_service(Node2, riak_kv), - - lager:info("Write data to the cluster"), - rt:systest_write(Node1, 100), - - lager:info("Join ~p to the cluster and wait for handoff to finish", - [Node2]), - rt:join(Node2, Node1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes12)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes12)), - rt:wait_until_nodes_agree_about_ownership(Nodes12), - - %% Check 0.0.0.0 address works - lager:info("Change ~p handoff_ip to \"0.0.0.0\"", [Node3]), - rt:update_app_config(Node3, - [{riak_core, [{handoff_ip, "0.0.0.0"}]}]), - - lager:info("Join ~p to the cluster and wait for handoff to finish", - [Node3]), - rt:wait_for_service(Node3, riak_kv), - rt:join(Node3, Node1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes123)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), - rt:wait_until_nodes_agree_about_ownership(Nodes123), - - lager:info("Test gh_riak_core_176 passed"), - pass. - -ip_tuple_to_string(T) -> - L = tuple_to_list(T), - string:join([integer_to_list(X) || X <- L], "."). - diff --git a/tests/gh_riak_kv_765.erl b/tests/gh_riak_kv_765.erl deleted file mode 100644 index a8f070cdd..000000000 --- a/tests/gh_riak_kv_765.erl +++ /dev/null @@ -1,123 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% This module tests the various AAE additions made in -%% https://github.com/basho/riak_kv/pull/765 - -%% !!! DO NOT ADD TO GIDDYUP -%% -%% This module is not meant to be used as an automated CI test. It -%% exists for development/code review purposes to ensure the changes -%% made in basho/riak_kv#765 work as the pull-request claims. -%% -%% !!! DO NOT ADD TO GIDDYUP - --module(gh_riak_kv_765). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - pass = check_empty_build(), - pass = check_throttle_and_expiration(), - pass. - -check_empty_build() -> - Config = [{riak_core, [{vnode_management_timer, 1000}, - {ring_creation_size, 4}]}], - Nodes = rt:build_cluster(1, Config), - Node = hd(Nodes), - timer:sleep(2000), - Self = self(), - spawn(fun() -> - time_build(Node), - Self ! done - end), - Result = receive - done -> pass - after - 10000 -> - lager:info("Failed. Empty AAE trees were not built instantly"), - fail - end, - rt:clean_cluster(Nodes), - Result. - -check_throttle_and_expiration() -> - Config = [{riak_kv, [{anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100}, - {anti_entropy_tick, 1000}, - {anti_entropy, {off, []}}]}, - {riak_core, [{vnode_management_timer, 1000}, - {ring_creation_size, 4}]}], - Nodes = rt:build_cluster(1, Config), - Node = hd(Nodes), - timer:sleep(2000), - - lager:info("Write 1000 keys"), - rt:systest_write(Node, 1000), - enable_aae(Node), - time_build(Node), - Duration1 = rebuild(Node, 30000, 1000), - Duration2 = rebuild(Node, 30000, 5500), - ?assert(Duration2 > (2 * Duration1)), - - %% Test manual expiration - lager:info("Disabling automatic expiration"), - rpc:call(Node, application, set_env, - [riak_kv, anti_entropy_expire, never]), - lager:info("Manually expiring hashtree for partition 0"), - expire_tree(Node, 0), - pass. - -time_build(Node) -> - T0 = erlang:now(), - rt:wait_until_aae_trees_built([Node]), - Duration = timer:now_diff(erlang:now(), T0), - lager:info("Build took ~b us", [Duration]), - Duration. - -rebuild(Node, Limit, Wait) -> - rpc:call(Node, application, set_env, - [riak_kv, anti_entropy_build_throttle, {Limit, Wait}]), - rpc:call(Node, application, set_env, - [riak_kv, anti_entropy_expire, 0]), - timer:sleep(1500), - disable_aae(Node), - rpc:call(Node, ets, delete_all_objects, [ets_riak_kv_entropy]), - enable_aae(Node), - time_build(Node). - -enable_aae(Node) -> - rpc:call(Node, riak_kv_entropy_manager, enable, []). - -disable_aae(Node) -> - rpc:call(Node, riak_kv_entropy_manager, disable, []). - -expire_tree(Node, Partition) -> - Now = erlang:now(), - {ok, Tree} = rpc:call(Node, riak_kv_vnode, hashtree_pid, [Partition]), - rpc:call(Node, riak_kv_index_hashtree, expire, [Tree]), - rt:wait_until(Node, - fun(_) -> - Info = rpc:call(Node, riak_kv_entropy_info, compute_tree_info, []), - {0, Built} = lists:keyfind(0, 1, Info), - Built > Now - end), - ok. diff --git a/tests/hooks.erl b/tests/hooks.erl deleted file mode 100644 index 3b4204276..000000000 --- a/tests/hooks.erl +++ /dev/null @@ -1,83 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2010-2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% -%% Pre/post commit hooks for testing -%% --module(hooks). --compile([export_all]). - -precommit_nop(Obj) -> - Obj. - -precommit_fail(_Obj) -> - fail. - -precommit_failatom(_Obj) -> - {fail, on_purpose}. - -precommit_failstr(_Obj) -> - {fail, "on purpose"}. - -precommit_failbin(_Obj) -> - {fail, <<"on purpose">>}. - -precommit_failkey(Obj) -> - case riak_object:key(Obj) of - <<"fail">> -> - fail; - _ -> - Obj - end. - - -set_precommit(Bucket, Hook) when is_atom(Hook) -> - set_precommit(Bucket, atom_to_binary(Hook, latin1)); -set_precommit(Bucket, Hook) when is_list(Hook) -> - set_precommit(Bucket, list_to_binary(Hook)); -set_precommit(Bucket, Hook) -> - {ok,C} = riak:local_client(), - C:set_bucket(Bucket, - [{precommit, [{struct,[{<<"mod">>,<<"hooks">>}, - {<<"fun">>,Hook}]}]}]). -set_hooks() -> - set_precommit(), - set_postcommit(). - -set_precommit() -> - hooks:set_precommit(<<"failatom">>,precommit_failatom), - hooks:set_precommit(<<"failstr">>,precommit_failstr), - hooks:set_precommit(<<"failbin">>,precommit_failbin), - hooks:set_precommit(<<"failkey">>,precommit_failkey). - -set_postcommit() -> - {ok, C} = riak:local_client(), - C:set_bucket(<<"postcommit">>,[{postcommit, [{struct,[{<<"mod">>,<<"hooks">>},{<<"fun">>, <<"postcommit_msg">>}]}]}]). - -postcommit_msg(Obj) -> - Bucket = riak_object:bucket(Obj), - Key = riak_object:key(Obj), - case application:get_env(riak_test, test_pid) of - {ok, RTPid} when is_pid(RTPid) -> - RTPid ! {wrote, Bucket, Key}; - _ -> - error_logger:error_msg("No riak_test pid to send the postcommit to!") - end, - ok. diff --git a/tests/http_bucket_types.erl b/tests/http_bucket_types.erl deleted file mode 100644 index bc56b9d11..000000000 --- a/tests/http_bucket_types.erl +++ /dev/null @@ -1,438 +0,0 @@ --module(http_bucket_types). - --behavior(riak_test). --export([confirm/0, mapred_modfun/3, mapred_modfun_type/3]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(WAIT(E), ?assertEqual(ok, rt:wait_until(fun() -> (E) end))). - -confirm() -> - application:start(ibrowse), - lager:info("Deploy some nodes"), - Nodes = rt:build_cluster(4, [], [ - {riak_core, [{default_bucket_props, - [{n_val, 2}]}]}]), - Node = hd(Nodes), - - RMD = riak_test_runner:metadata(), - HaveIndexes = case proplists:get_value(backend, RMD) of - undefined -> false; %% default is da 'cask - bitcask -> false; - _ -> true - end, - - RHC = rt:httpc(Node), - lager:info("default type get/put test"), - %% write explicitly to the default type - ok = rhc:put(RHC, riakc_obj:new({<<"default">>, <<"bucket">>}, - <<"key">>, <<"value">>)), - - %% read from the default bucket implicitly - {ok, O1} = rhc:get(RHC, <<"bucket">>, <<"key">>), - %% read from the default bucket explicitly - {ok, O2} = rhc:get(RHC, {<<"default">>, <<"bucket">>}, <<"key">>), - - %% same object, but slightly different presentation - ?assertEqual(riakc_obj:key(O1), riakc_obj:key(O2)), - ?assertEqual(riakc_obj:get_value(O1), riakc_obj:get_value(O2)), - ?assertEqual(riakc_obj:only_bucket(O1), riakc_obj:only_bucket(O2)), - ?assertEqual(riakc_obj:vclock(O1), riakc_obj:vclock(O2)), - ?assertEqual(undefined, riakc_obj:bucket_type(O1)), - ?assertEqual(<<"default">>, riakc_obj:bucket_type(O2)), - - %% write implicitly to the default bucket - ok = rhc:put(RHC, riakc_obj:update_value(O1, <<"newvalue">>)), - - %% read from the default bucket explicitly - {ok, O3} = rhc:get(RHC, {<<"default">>, <<"bucket">>}, <<"key">>), - - ?assertEqual(<<"newvalue">>, riakc_obj:get_value(O3)), - - lager:info("list_keys test"), - %% list keys - ?WAIT({ok, [<<"key">>]} == rhc:list_keys(RHC, <<"bucket">>)), - ?WAIT({ok, [<<"key">>]} == rhc:list_keys(RHC, {<<"default">>, <<"bucket">>})), - - lager:info("list_buckets test"), - %% list buckets - ?WAIT({ok, [<<"bucket">>]} == rhc:list_buckets(RHC)), - ?WAIT({ok, [<<"bucket">>]} == rhc:list_buckets(RHC, <<"default">>)), - - timer:sleep(5000), - lager:info("default type delete test"), - %% delete explicitly via the default bucket - ok = rhc:delete_obj(RHC, O3), - - %% read from the default bucket implicitly - {error, {notfound, VC}} = rhc:get(RHC, <<"bucket">>, <<"key">>, [deletedvclock]), - %% read from the default bucket explicitly - {error, {notfound, VC}} = rhc:get(RHC, {<<"default">>, <<"bucket">>}, <<"key">>, - [deletedvclock]), - - %% write it again, being nice to siblings - O3a = riakc_obj:new({<<"default">>, <<"bucket">>}, - <<"key">>, <<"newestvalue">>), - ok = rhc:put(RHC, riakc_obj:set_vclock(O3a, VC)), - - {ok, O4} = rhc:get(RHC, {<<"default">>, <<"bucket">>}, <<"key">>, - [deletedvclock]), - - %% delete explicitly via the default bucket - ok = rhc:delete_obj(RHC, O4), - - %% read from the default bucket implicitly - {error, notfound} = rhc:get(RHC, <<"bucket">>, <<"key">>), - %% read from the default bucket explicitly - {error, notfound} = rhc:get(RHC, {<<"default">>, <<"bucket">>}, <<"key">>), - - timer:sleep(5000), %% wait for delete_mode 3s to expire - - %% now there shoyld be no buckets or keys to be listed... - %% - %% list keys - ?WAIT({ok, []} == rhc:list_keys(RHC, <<"bucket">>)), - ?WAIT({ok, []} == rhc:list_keys(RHC, {<<"default">>, <<"bucket">>})), - - %% list buckets - ?WAIT({ok, []} == rhc:list_buckets(RHC)), - ?WAIT({ok, []} == rhc:list_buckets(RHC, <<"default">>)), - - lager:info("custom type get/put test"), - %% create a new type - ok = rt:create_and_activate_bucket_type(Node, <<"mytype">>, [{n_val,3}]), - - %% allow cluster metadata some time to propogate - timer:sleep(1000), - - lager:info("doing put"), - ok = rhc:put(RHC, riakc_obj:new({<<"mytype">>, <<"bucket">>}, - <<"key">>, <<"newestvalue">>)), - - lager:info("doing get"), - {ok, O5} = rhc:get(RHC, {<<"mytype">>, <<"bucket">>}, <<"key">>), - - ?assertEqual(<<"newestvalue">>, riakc_obj:get_value(O5)), - - lager:info("doing get"), - %% this type is NOT aliased to the default buckey - {error, notfound} = rhc:get(RHC, <<"bucket">>, <<"key">>), - - lager:info("custom type list_keys test"), - ?WAIT({ok, []} == rhc:list_keys(RHC, <<"bucket">>)), - ?WAIT({ok, [<<"key">>]} == rhc:list_keys(RHC, {<<"mytype">>, <<"bucket">>})), - - lager:info("custom type list_buckets test"), - %% list buckets - ?WAIT({ok, []} == rhc:list_buckets(RHC)), - ?WAIT({ok, [<<"bucket">>]} == rhc:list_buckets(RHC, <<"mytype">>)), - - lager:info("UTF-8 type get/put test"), - %% こんにちは - konnichiwa (Japanese) - UnicodeTypeBin = unicode:characters_to_binary([12371,12435,12395,12385,12399], utf8), - %% سلام - Salam (Arabic) - UnicodeBucketBin = unicode:characters_to_binary([1587,1604,1575,1605], utf8), - - UCBBin = {UnicodeTypeBin, UnicodeBucketBin}, - - ok = rt:create_and_activate_bucket_type(Node, UnicodeTypeBin, [{n_val,3}]), - - lager:info("doing put"), - ok = rhc:put(RHC, riakc_obj:new(UCBBin, - <<"key">>, <<"unicode">>)), - - lager:info("doing get"), - {ok, O6} = rhc:get(RHC, UCBBin, <<"key">>), - - ?assertEqual(<<"unicode">>, riakc_obj:get_value(O6)), - - lager:info("unicode type list_keys test"), - ?WAIT({ok, [<<"key">>]}== rhc:list_keys(RHC, UCBBin)), - - lager:info("unicode type list_buckets test"), - %% list buckets - - %% This is a rather awkward representation, but it's what rhc is - %% currently giving us. Curl gives us - %% {"buckets":["\u0633\u0644\u0627\u0645"]} to illustrate where - %% the values are coming from, and those are indeed the correct - %% hexadecimal values for the UTF-8 representation of the bucket - %% name - ?WAIT({ok, [<<"0633064406270645">>]} == rhc:list_buckets(RHC, UnicodeTypeBin)), - - lager:info("bucket properties tests"), - rhc:set_bucket(RHC, {<<"default">>, <<"mybucket">>}, - [{n_val, 5}]), - {ok, BProps} = rhc:get_bucket(RHC, <<"mybucket">>), - ?assertEqual(5, proplists:get_value(n_val, BProps)), - - rhc:reset_bucket(RHC, {<<"default">>, <<"mybucket">>}), - - {ok, BProps1} = rhc:get_bucket(RHC, <<"mybucket">>), - ?assertEqual(2, proplists:get_value(n_val, BProps1)), - - rhc:set_bucket(RHC, {<<"mytype">>, <<"mybucket">>}, - [{n_val, 5}]), - {ok, BProps2} = rhc:get_bucket(RHC, <<"mybucket">>), - %% the default in the app.config is set to 2... - ?assertEqual(2, proplists:get_value(n_val, BProps2)), - - {ok, BProps3} = rhc:get_bucket(RHC, {<<"mytype">>, - <<"mybucket">>}), - ?assertEqual(5, proplists:get_value(n_val, BProps3)), - - rhc:reset_bucket(RHC, {<<"mytype">>, <<"mybucket">>}), - - {ok, BProps4} = rhc:get_bucket(RHC, {<<"mytype">>, - <<"mybucket">>}), - ?assertEqual(3, proplists:get_value(n_val, BProps4)), - - lager:info("bucket type properties test"), - - rhc:set_bucket_type(RHC, <<"mytype">>, - [{n_val, 5}]), - - {ok, BProps5} = rhc:get_bucket_type(RHC, <<"mytype">>), - - ?assertEqual(5, proplists:get_value(n_val, BProps5)), - - %% check that the bucket inherits from its type - {ok, BProps6} = rhc:get_bucket(RHC, {<<"mytype">>, - <<"mybucket">>}), - ?assertEqual(5, proplists:get_value(n_val, BProps6)), - - rhc:set_bucket_type(RHC, <<"mytype">>, [{n_val, 3}]), - - {ok, BProps7} = rhc:get_bucket_type(RHC, <<"mytype">>), - - ?assertEqual(3, proplists:get_value(n_val, BProps7)), - - %% make sure a regular bucket under the default type reflects app.config - {ok, BProps8} = rhc:get_bucket(RHC, {<<"default">>, - <<"mybucket">>}), - ?assertEqual(2, proplists:get_value(n_val, BProps8)), - - %% make sure the type we previously created is NOT affected - {ok, BProps9} = rhc:get_bucket_type(RHC, <<"mytype">>), - - ?assertEqual(3, proplists:get_value(n_val, BProps9)), - - %% make sure a bucket under that type is also not affected - {ok, BProps10} = rhc:get_bucket(RHC, {<<"mytype">>, - <<"mybucket">>}), - ?assertEqual(3, proplists:get_value(n_val, BProps10)), - - %% make sure a newly created type is not affected either - %% create a new type - ok = rt:create_and_activate_bucket_type(Node, <<"mynewtype">>, []), - %% allow cluster metadata some time to propogate - timer:sleep(1000), - - {ok, BProps11} = rhc:get_bucket_type(RHC, <<"mynewtype">>), - - ?assertEqual(3, proplists:get_value(n_val, BProps11)), - - %% 2i tests - - case HaveIndexes of - false -> ok; - true -> - Obj01 = riakc_obj:new(<<"test">>, <<"JRD">>, <<"John Robert Doe, 25">>), - Obj02 = riakc_obj:new({<<"mytype">>, <<"test">>}, <<"JRD">>, <<"Jane Rachel Doe, 21">>), - - Obj1 = riakc_obj:update_metadata(Obj01, - riakc_obj:set_secondary_index( - riakc_obj:get_update_metadata(Obj01), - [{{integer_index, "age"}, - [25]},{{binary_index, "name"}, - [<<"John">>, <<"Robert">> - ,<<"Doe">>]}])), - - Obj2 = riakc_obj:update_metadata(Obj02, - riakc_obj:set_secondary_index( - riakc_obj:get_update_metadata(Obj02), - [{{integer_index, "age"}, - [21]},{{binary_index, "name"}, - [<<"Jane">>, <<"Rachel">> - ,<<"Doe">>]}])), - - ok = rhc:put(RHC, Obj1), - ok = rhc:put(RHC, Obj2), - - ?assertMatch({ok, {index_results_v1, [<<"JRD">>], _, _}}, rhc:get_index(RHC, <<"test">>, - {binary_index, - "name"}, - <<"John">>)), - - ?assertMatch({ok, {index_results_v1, [], _, _}}, rhc:get_index(RHC, <<"test">>, - {binary_index, - "name"}, - <<"Jane">>)), - - ?assertMatch({ok, {index_results_v1, [<<"JRD">>], _, _}}, rhc:get_index(RHC, - {<<"mytype">>, - <<"test">>}, - {binary_index, - "name"}, - <<"Jane">>)) - end, - - - Store = fun(Bucket, {K,V, BI, II}) -> - O=riakc_obj:new(Bucket, K), - MD=riakc_obj:add_secondary_index(dict:new(), - {{binary_index, "b_idx"}, - [BI]}), - MD2=riakc_obj:add_secondary_index(MD, {{integer_index, - "i_idx"}, [II]}), - OTwo=riakc_obj:update_metadata(O,MD2), - ok = rhc:put(RHC,riakc_obj:update_value(OTwo, V, "application/json")) - end, - - [Store(<<"MRbucket">>, KV) || KV <- [ - {<<"foo">>, <<"2">>, <<"a">>, 4}, - {<<"bar">>, <<"3">>, <<"b">>, 7}, - {<<"baz">>, <<"4">>, <<"a">>, 4}]], - - ?assertEqual({ok, [{1, [9]}]}, - rhc:mapred_bucket(RHC, <<"MRbucket">>, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - [Store({<<"mytype">>, <<"MRbucket">>}, KV) || KV <- [ - {<<"foo">>, <<"2">>, <<"a">>, 4}, - {<<"bar">>, <<"3">>, <<"b">>, 7}, - {<<"baz">>, <<"4">>, <<"a">>, 4}, - {<<"bam">>, <<"5">>, <<"a">>, 3}]], - - ?assertEqual({ok, [{1, [14]}]}, - rhc:mapred_bucket(RHC, {<<"mytype">>, <<"MRbucket">>}, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - ?assertEqual({ok, [{1, [3]}]}, - rhc:mapred(RHC, - [{<<"MRbucket">>, <<"foo">>}, - {<<"MRbucket">>, <<"bar">>}, - {<<"MRbucket">>, <<"baz">>}], - [{map, {jsanon, <<"function (v) { return [1]; }">>}, - undefined, false}, - {reduce, {jsanon, - <<"function(v) { - total = v.reduce( - function(prev,curr,idx,array) { - return prev+curr; - }, 0); - return [total]; - }">>}, - undefined, true}])), - - ?assertEqual({ok, [{1, [4]}]}, - rhc:mapred(RHC, - [{{{<<"mytype">>, <<"MRbucket">>}, <<"foo">>}, - undefined}, - {{{<<"mytype">>, <<"MRbucket">>}, <<"bar">>}, - undefined}, - {{{<<"mytype">>, <<"MRbucket">>}, <<"baz">>}, - undefined}, - {{{<<"mytype">>, <<"MRbucket">>}, <<"bam">>}, - undefined}], - [{map, {jsanon, <<"function (v) { return [1]; }">>}, - undefined, false}, - {reduce, {jsanon, - <<"function(v) { - total = v.reduce( - function(prev,curr,idx,array) { - return prev+curr; - }, 0); - return [total]; - }">>}, - undefined, true}])), - - case HaveIndexes of - false -> ok; - true -> - {ok, [{1, Results}]} = rhc:mapred(RHC, - {index,<<"MRbucket">>,{integer_index, - "i_idx"},3,5}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results)), - - {ok, [{1, Results1}]} = rhc:mapred(RHC, - {index,{<<"mytype">>, - <<"MRbucket">>},{integer_index, - "i_idx"},3,5}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>, <<"5">>], lists:sort(Results1)), - - {ok, [{1, Results2}]} = rhc:mapred(RHC, - {index,<<"MRbucket">>,{binary_index, - "b_idx"}, <<"a">>}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results2)), - - {ok, [{1, Results3}]} = rhc:mapred(RHC, - {index,{<<"mytype">>, - <<"MRbucket">>},{binary_index, - "b_idx"}, <<"a">>}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, - undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, - reduce_set_union}, - undefined, true}]), - ?assertEqual([<<"2">>, <<"4">>, <<"5">>], lists:sort(Results3)), - ok - end, - - %% load this module on all the nodes - ok = rt:load_modules_on_nodes([?MODULE], Nodes), - - %% do a modfun mapred using the function from this module - ?assertEqual({ok, [{1, [2]}]}, - rhc:mapred_bucket(RHC, {modfun, ?MODULE, - mapred_modfun, []}, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - %% do a modfun mapred using the function from this module - ?assertEqual({ok, [{1, [5]}]}, - rhc:mapred_bucket(RHC, {modfun, ?MODULE, - mapred_modfun_type, []}, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - pass. - -mapred_modfun(Pipe, Args, _Timeout) -> - lager:info("Args for mapred modfun are ~p", [Args]), - riak_pipe:queue_work(Pipe, {{<<"MRbucket">>, <<"foo">>}, {struct, []}}), - riak_pipe:eoi(Pipe). - -mapred_modfun_type(Pipe, Args, _Timeout) -> - lager:info("Args for mapred modfun are ~p", [Args]), - riak_pipe:queue_work(Pipe, {{{<<"mytype">>, <<"MRbucket">>}, <<"bam">>}, {struct, []}}), - riak_pipe:eoi(Pipe). diff --git a/tests/http_security.erl b/tests/http_security.erl deleted file mode 100644 index 8bf09ddb6..000000000 --- a/tests/http_security.erl +++ /dev/null @@ -1,579 +0,0 @@ --module(http_security). - --behavior(riak_test). --export([confirm/0]). - --export([map_object_value/3, reduce_set_union/2, mapred_modfun_input/3]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(assertDenied(Op), ?assertMatch({error, {forbidden, _}}, Op)). - -confirm() -> - application:start(crypto), - application:start(asn1), - application:start(public_key), - application:start(ssl), - application:start(ibrowse), - io:format("turning on tracing"), - ibrowse:trace_on(), - - CertDir = rt_config:get(rt_scratch_dir) ++ "/http_certs", - - %% make a bunch of crypto keys - make_certs:rootCA(CertDir, "rootCA"), - make_certs:endusers(CertDir, "rootCA", ["site3.basho.com", "site4.basho.com"]), - - - lager:info("Deploy some nodes"), - PrivDir = rt:priv_dir(), - Conf = [ - {riak_core, [ - {default_bucket_props, [{allow_mult, true}]}, - {ssl, [ - {certfile, filename:join([CertDir, - "site3.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site3.basho.com/key.pem"])}, - {cacertfile, filename:join([CertDir, "site3.basho.com/cacerts.pem"])} - ]} - ]}, - {riak_search, [ - {enabled, true} - ]} - ], - Nodes = rt:build_cluster(4, Conf), - Node = hd(Nodes), - %% enable security on the cluster - ok = rpc:call(Node, riak_core_console, security_enable, [[]]), - enable_ssl(Node), - %%[enable_ssl(N) || N <- Nodes], - {ok, [{IP0, Port0}]} = rpc:call(Node, application, get_env, - [riak_api, http]), - {ok, [{IP, Port}]} = rpc:call(Node, application, get_env, - [riak_api, https]), - - MD = riak_test_runner:metadata(), - HaveIndexes = case proplists:get_value(backend, MD) of - undefined -> false; %% default is da 'cask - bitcask -> false; - _ -> true - end, - - lager:info("Checking non-SSL results in error"), - %% connections over regular HTTP get told to go elsewhere - C0 = rhc:create(IP0, Port0, "riak", []), - ?assertMatch({error, {ok, "426", _, _}}, rhc:ping(C0)), - - lager:info("Checking SSL demands authentication"), - C1 = rhc:create(IP, Port, "riak", [{is_ssl, true}]), - ?assertMatch({error, {ok, "401", _, _}}, rhc:ping(C1)), - - lager:info("Checking that unknown user demands reauth"), - C2 = rhc:create(IP, Port, "riak", [{is_ssl, true}, {credentials, - "user", - "pass"}]), - ?assertMatch({error, {ok, "401", _, _}}, rhc:ping(C2)), - - %% Store this in a variable so once Riak supports utf-8 usernames - %% via HTTP(s) we can test it with just one change - Username = "user", - - lager:info("Creating user"), - %% grant the user credentials - ok = rpc:call(Node, riak_core_console, add_user, [[Username, "password=password"]]), - - lager:info("Setting trust mode on user"), - %% trust anyone from this host - MyIP = case IP0 of - "127.0.0.1" -> IP0; - _ -> - {ok,Hostname} = inet:gethostname(), - {ok,A0} = inet:getaddr(Hostname, inet), - inet:ntoa(A0) - end, - ok = rpc:call(Node, riak_core_console, add_source, [[Username, - MyIP++"/32", - "trust"]]), - - lager:info("Checking that credentials are ignored in trust mode"), - %% invalid credentials should be ignored in trust mode - C3 = rhc:create(IP, Port, "riak", [{is_ssl, true}, {credentials, - Username, - "pass"}]), - ?assertEqual(ok, rhc:ping(C3)), - - lager:info("Setting password mode on user"), - %% require password from our IP - ok = rpc:call(Node, riak_core_console, add_source, [[Username, - MyIP++"/32", - "password"]]), - - lager:info("Checking that incorrect password demands reauth"), - %% invalid credentials should be rejected in password mode - C4 = rhc:create(IP, Port, "riak", [{is_ssl, true}, {credentials, - Username, - "pass"}]), - ?assertMatch({error, {ok, "401", _, _}}, rhc:ping(C4)), - - lager:info("Checking that correct password is successful"), - %% valid credentials should be accepted in password mode - C5 = rhc:create(IP, Port, "riak", [{is_ssl, true}, {credentials, - Username, - "password"}]), - - ?assertEqual(ok, rhc:ping(C5)), - - lager:info("verifying the peer certificate rejects mismatch with server cert"), - %% verifying the peer certificate reject mismatch with server cert - C6 = rhc:create(IP, Port, "riak", [{is_ssl, true}, - {credentials, Username, "password"}, - {ssl_options, [ - {cacertfile, filename:join([PrivDir, - "certs/cacert.org/ca/root.crt"])}, - {verify, verify_peer}, - {reuse_sessions, false} - ]} - ]), - - ?assertMatch({error,{conn_failed,{error,_}}}, rhc:ping(C6)), - - lager:info("verifying the peer certificate should work if the cert is valid"), - %% verifying the peer certificate should work if the cert is valid - C7 = rhc:create(IP, Port, "riak", [{is_ssl, true}, - {credentials, Username, "password"}, - {ssl_options, [ - {cacertfile, filename:join([CertDir, - "rootCA/cert.pem"])}, - {verify, verify_peer}, - {reuse_sessions, false} - ]} - ]), - - ?assertEqual(ok, rhc:ping(C7)), - - lager:info("verifying that user cannot get/put without grants"), - ?assertMatch({error, {ok, "403", _, _}}, rhc:get(C7, <<"hello">>, - <<"world">>)), - - Object = riakc_obj:new(<<"hello">>, <<"world">>, <<"howareyou">>, - <<"text/plain">>), - - ?assertMatch({error, {ok, "403", _, _}}, rhc:put(C7, Object)), - - lager:info("Granting riak_kv.get, checking get works but put doesn't"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.get", "on", - "default", "hello", "to", Username]]), - - %% key is not present - ?assertMatch({error, notfound}, rhc:get(C7, <<"hello">>, - <<"world">>)), - - ?assertMatch({error, {ok, "403", _, _}}, rhc:put(C7, Object)), - - lager:info("Granting riak_kv.put, checking put works and roundtrips with get"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.put", "on", - "default", "hello", "to", Username]]), - - %% NOW we can put - ?assertEqual(ok, rhc:put(C7, Object)), - - {ok, O} = rhc:get(C7, <<"hello">>, <<"world">>), - ?assertEqual(<<"hello">>, riakc_obj:bucket(O)), - ?assertEqual(<<"world">>, riakc_obj:key(O)), - ?assertEqual(<<"howareyou">>, riakc_obj:get_value(O)), - - lager:info("Checking that delete is disallowed"), - %% delete - ?assertMatch({error, {ok, "403", _, _}}, rhc:delete(C7, <<"hello">>, - <<"world">>)), - - lager:info("Checking that delete for non-existing key is disallowed"), - ?assertMatch({error, {ok, "403", _, _}}, rhc:delete(C7, <<"hello">>, - <<"_xxboguskey">>)), - - lager:info("Granting riak_kv.delete, checking that delete succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.delete", "on", - "default", "hello", "to", Username]]), - ?assertEqual(ok, rhc:delete(C7, <<"hello">>, - <<"world">>)), - - %% key is deleted - ?assertMatch({error, notfound}, rhc:get(C7, <<"hello">>, - <<"world">>)), - - %% write it back for list_buckets later - ?assertEqual(ok, rhc:put(C7, Object)), - - lager:info("Checking that delete for non-existing key is allowed"), - ?assertMatch({error, {ok, "404", _, _}}, rhc:delete(C7, <<"hello">>, - <<"_xxboguskey">>)), - - - %% slam the door in the user's face - lager:info("Revoking get/put/delete, checking that get/put/delete are disallowed"), - ok = rpc:call(Node, riak_core_console, revoke, - [["riak_kv.put,riak_kv.get,riak_kv.delete", "on", - "default", "hello", "from", Username]]), - - ?assertMatch({error, {ok, "403", _, _}}, rhc:get(C7, <<"hello">>, - <<"world">>)), - - ?assertMatch({error, {ok, "403", _, _}}, rhc:put(C7, Object)), - - %% list buckets - lager:info("Checking that list buckets is disallowed"), - ?assertMatch({error, {"403", _}}, rhc:list_buckets(C7)), - - lager:info("Granting riak_kv.list_buckets, checking that list_buckets succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_buckets", "on", - "default", "to", Username]]), - ?assertMatch({ok, [<<"hello">>]}, rhc:list_buckets(C7)), - - %% list keys - lager:info("Checking that list keys is disallowed"), - ?assertMatch({error, {"403", _}}, rhc:list_keys(C7, <<"hello">>)), - - lager:info("Granting riak_kv.list_keys, checking that list_keys succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_keys", "on", - "default", "to", Username]]), - - ?assertMatch({ok, [<<"world">>]}, rhc:list_keys(C7, <<"hello">>)), - - lager:info("Revoking list_keys"), - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.list_keys", "on", - "default", "from", Username]]), - - %% list keys with bucket type - rt:create_and_activate_bucket_type(Node, <<"list-keys-test">>, []), - - lager:info("Checking that list keys on a bucket-type is disallowed"), - ?assertMatch({error, {"403", _}}, rhc:list_keys(C7, {<<"list-keys-test">>, <<"hello">>})), - - lager:info("Granting riak_kv.list_keys on the bucket type, checking that list_keys succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_keys", "on", - "list-keys-test", "to", Username]]), - ?assertMatch({ok, []}, rhc:list_keys(C7, {<<"list-keys-test">>, <<"hello">>})), - - lager:info("Checking that get_bucket is disallowed"), - ?assertMatch({error, {ok, "403", _, _}}, rhc:get_bucket(C7, <<"hello">>)), - - lager:info("Granting riak_core.get_bucket, checking that get_bucket succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_core.get_bucket", "on", - "default", "hello", "to", Username]]), - - ?assertEqual(3, proplists:get_value(n_val, element(2, rhc:get_bucket(C7, - <<"hello">>)))), - - lager:info("Checking that reset_bucket is disallowed"), - ?assertMatch({error, {ok, "403", _, _}}, rhc:reset_bucket(C7, <<"hello">>)), - - lager:info("Checking that set_bucket is disallowed"), - ?assertMatch({error, {ok, "403", _, _}}, rhc:set_bucket(C7, <<"hello">>, - [{n_val, 5}])), - - lager:info("Granting set_bucket, checking that set_bucket succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_core.set_bucket", "on", - "default", "hello", "to", Username]]), - - ?assertEqual(ok, rhc:set_bucket(C7, <<"hello">>, - [{n_val, 5}])), - - ?assertEqual(5, proplists:get_value(n_val, element(2, rhc:get_bucket(C7, - <<"hello">>)))), - - %% 2i - case HaveIndexes of - false -> ok; - true -> - %% 2i permission test - lager:info("Checking 2i is disallowed"), - ?assertMatch({error, {"403", _}}, - rhc:get_index(C7, <<"hello">>, - {binary_index, - "name"}, - <<"John">>)), - - lager:info("Granting 2i permissions, checking that results come back"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.index", "on", - "default", "to", Username]]), - - %% don't actually have any indexes - ?assertMatch({ok, ?INDEX_RESULTS{}}, - rhc:get_index(C7, <<"hello">>, - {binary_index, - "name"}, - <<"John">>)), - - lager:info("Checking that 2i on a bucket-type is disallowed"), - ?assertMatch({error, {"403", _}}, - rhc:get_index(C7, {<<"list-keys-test">>, - <<"hello">>}, {binary_index, "name"}, <<"John">>)), - - lager:info("Granting riak_kv.index on the bucket type, checking that get_index succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.index", "on", - "list-keys-test", "to", Username]]), - ?assertMatch({ok, ?INDEX_RESULTS{}}, - rhc:get_index(C7, {<<"list-keys-test">>, - <<"hello">>}, {binary_index, "name"}, <<"John">>)), - - ok - end, - - %% counters - - %% grant get/put again - lager:info("Granting get/put for counters, checking value and increment"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.get,riak_kv.put", "on", - "default", "hello", "to", Username]]), - - - ?assertMatch({error, {ok, "404", _, _}}, rhc:counter_val(C7, <<"hello">>, - <<"numberofpies">>)), - - ok = rhc:counter_incr(C7, <<"hello">>, - <<"numberofpies">>, 5), - - ?assertEqual({ok, 5}, rhc:counter_val(C7, <<"hello">>, - <<"numberofpies">>)), - - %% revoke get - lager:info("Revoking get, checking that value fails but increment succeeds"), - ok = rpc:call(Node, riak_core_console, revoke, - [["riak_kv.get", "on", "default", "hello", "from", Username]]), - - ?assertMatch({error, {ok, "403", _, _}}, rhc:counter_val(C7, <<"hello">>, - <<"numberofpies">>)), - ok = rhc:counter_incr(C7, <<"hello">>, - <<"numberofpies">>, 5), - - %% revoke put - lager:info("Revoking put, checking that increment fails"), - ok = rpc:call(Node, riak_core_console, revoke, - [["riak_kv.put", "on", "default", "hello", "from", Username]]), - - ?assertMatch({error, {ok, "403", _, _}}, rhc:counter_incr(C7, <<"hello">>, - <<"numberofpies">>, 5)), - - %% mapred tests - lager:info("Checking that full-bucket mapred is disallowed"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.put", "on", - "default", "MR", "to", Username]]), - - - ok = rhc:put(C7, riakc_obj:new(<<"MR">>, <<"lobster_roll">>, <<"16">>, - <<"text/plain">>)), - - ok = rhc:put(C7, riakc_obj:new(<<"MR">>, <<"pickle_plate">>, <<"9">>, - <<"text/plain">>)), - - ok = rhc:put(C7, riakc_obj:new(<<"MR">>, <<"pimms_cup">>, <<"8">>, - <<"text/plain">>)), - - ?assertMatch({error, {"403", _}}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {jsfun, - <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, undefined, - true}])), - - lager:info("Granting list-keys, asserting full-bucket mapred is still disallowed"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_keys", "on", - "default", "MR", "to", Username]]), - - ?assertMatch({error, {"403", _}}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {jsfun, - <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, undefined, - true}])), - - lager:info("Granting mapreduce, checking that job succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.mapreduce", "on", - "default", "MR", "to", Username]]), - - ?assertEqual({ok, [{1, [33]}]}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {jsfun, - <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, undefined, - true}])), - - %% load this module on all the nodes - ok = rt:load_modules_on_nodes([?MODULE], Nodes), - - lager:info("checking erlang mapreduce works"), - ?assertMatch({ok, [{1, _}]}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {modfun, - riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, undefined, - true}])), - - lager:info("checking that insecure input modfun fails"), - ?assertMatch({error, _}, - rhc:mapred_bucket(C7, {modfun, ?MODULE, mapred_modfun_input, - []}, [{map, {modfun, - riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, undefined, - true}])), - - lager:info("checking that insecure query modfuns fail"), - ?assertMatch({error, _}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {modfun, - ?MODULE, - map_object_value}, undefined, false}, - {reduce, {modfun, - ?MODULE, - reduce_set_union}, undefined, - true}])), - - lager:info("whitelisting module path"), - {?MODULE, _ModBin, ModFile} = code:get_object_code(?MODULE), - ok = rpc:call(Node, application, set_env, [riak_kv, add_paths, [filename:dirname(ModFile)]]), - - lager:info("checking that insecure input modfun fails when whitelisted but" - " lacking permissions"), - ?assertMatch({error, {"403", _}}, - rhc:mapred_bucket(C7, {modfun, ?MODULE, mapred_modfun_input, - []}, [{map, {modfun, - riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, undefined, - true}])), - - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.mapreduce", "on", - "any", "to", Username]]), - - lager:info("checking that insecure input modfun works when whitelisted and" - " has permissions"), - ?assertMatch({ok, _}, - rhc:mapred_bucket(C7, {modfun, ?MODULE, mapred_modfun_input, - []}, [{map, {modfun, - riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, undefined, - true}])), - - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.mapreduce", "on", - "any", "from", Username]]), - - lager:info("checking that insecure query modfuns works when whitelisted"), - ?assertMatch({ok, _}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {modfun, - ?MODULE, - map_object_value}, undefined, false}, - {reduce, {modfun, - ?MODULE, - reduce_set_union}, undefined, - true}])), - - - lager:info("Revoking list-keys, checking that full-bucket mapred fails"), - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.list_keys", "on", - "default", "MR", "from", Username]]), - - ?assertMatch({error, {"403", _}}, - rhc:mapred_bucket(C7, <<"MR">>, [{map, {jsfun, - <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, undefined, - true}])), - - crdt_tests(Nodes, C7), - - URL = lists:flatten(io_lib:format("https://~s:~b", [IP, Port])), - - lager:info("checking link walking fails because it is deprecated"), - - ?assertMatch({ok, "403", _, <<"Link walking is deprecated", _/binary>>}, - ibrowse:send_req(URL ++ "/riak/hb/first/_,_,_", [], get, - [], [{response_format, binary}, {is_ssl, true}, - {ssl_options, [ - {cacertfile, filename:join([CertDir, - "rootCA/cert.pem"])}, - {verify, verify_peer}, - {reuse_sessions, false}]}])), - - lager:info("checking search 1.0 403s because search won't allow" - "connections with security enabled"), - - ?assertMatch({ok, "403", _, <<"Riak Search 1.0 is deprecated", _/binary>>}, - ibrowse:send_req(URL ++ "/solr/index/select?q=foo:bar&wt=json", [], get, - [], [{response_format, binary}, {is_ssl, true}, - {ssl_options, [ - {cacertfile, filename:join([CertDir, - "rootCA/cert.pem"])}, - {verify, verify_peer}, - {reuse_sessions, false}]}])), - pass. - -enable_ssl(Node) -> - [{http, {IP, Port}}|_] = rt:connection_info(Node), - rt:update_app_config(Node, [{riak_api, [{https, [{IP, - Port+1000}]}]}]), - rt:wait_until_pingable(Node), - rt:wait_for_service(Node, riak_kv). - -map_object_value(RiakObject, A, B) -> - riak_kv_mapreduce:map_object_value(RiakObject, A, B). - -reduce_set_union(List, A) -> - riak_kv_mapreduce:reduce_set_union(List, A). - -mapred_modfun_input(Pipe, _Args, _Timeout) -> - riak_pipe:queue_work(Pipe, {{<<"MR">>, <<"lobster_roll">>}, {struct, []}}), - riak_pipe:eoi(Pipe). - -crdt_tests([Node|_]=Nodes, RHC) -> - Username = "user", - - lager:info("Creating bucket types for CRDTs"), - Types = [{<<"counters">>, counter, riakc_counter:to_op(riakc_counter:increment(5, riakc_counter:new()))}, - {<<"sets">>, set, riakc_set:to_op(riakc_set:add_element(<<"foo">>, riakc_set:new()))}], - [ begin - rt:create_and_activate_bucket_type(Node, BType, [{allow_mult, true}, {datatype, DType}]), - rt:wait_until_bucket_type_status(BType, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, BType) - end || {BType, DType, _Op} <- Types ], - - lager:info("Checking that CRDT fetch is denied"), - - [ ?assertDenied(rhc:fetch_type(RHC, {BType, <<"bucket">>}, <<"key">>)) - || {BType, _, _} <- Types], - - lager:info("Granting CRDT riak_kv.get, checking that fetches succeed"), - - [ grant(Node, ["riak_kv.get", "on", binary_to_list(Type), "to", Username]) || {Type, _, _} <- Types ], - - [ ?assertEqual({error, {notfound, DType}}, - (rhc:fetch_type(RHC, {BType, <<"bucket">>}, <<"key">>))) || - {BType, DType, _, _} <- Types], - - lager:info("Checking that CRDT update is denied"), - - [ ?assertDenied(rhc:update_type(RHC, {BType, <<"bucket">>}, <<"key">>, Op)) - || {BType, _, Op} <- Types], - - - lager:info("Granting CRDT riak_kv.put, checking that updates succeed"), - - [ grant(Node, ["riak_kv.put", "on", binary_to_list(Type), "to", Username]) || {Type, _, _} <- Types ], - - [?assertEqual(ok, (rhc:update_type(RHC, {BType, <<"bucket">>}, <<"key">>, Op))) - || {BType, _, Op} <- Types], - - ok. - -grant(Node, Args) -> - ok = rpc:call(Node, riak_core_console, grant, [Args]). diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl deleted file mode 100644 index 731295623..000000000 --- a/tests/jmx_verify.erl +++ /dev/null @@ -1,237 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc Tests if the java VM can be started and polled. A config option -%% 'jmx_verify_wait' (or JMX_VERIFY_WAIT if set via os env) delays the -%% initial check of the jvm by that many milliseconds. It defaults to -%% 3000, though most faster machines can set it to 0. --module(jmx_verify). --behavior(riak_test). --export([confirm/0, test_supervision/0]). --include_lib("eunit/include/eunit.hrl"). - --prereq("java"). - -%% You should have curl installed locally to do this. -confirm() -> - test_supervision(), - - test_application_stop(), - - JMXPort = 41111, - Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], - Nodes = rt:deploy_nodes(1, Config), - [Node1] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), - - [{http, {IP, _Port}}|_] = rt:connection_info(Node1), - - JMXDumpCmd = jmx_dump_cmd(IP, JMXPort), - - WaitTimeout = case rt_config:config_or_os_env(jmx_verify_wait, 3000) of - N when is_integer(N) -> - N; - N when is_list(N) -> - list_to_integer(N) - end, - timer:sleep(WaitTimeout), - JMX1 = jmx_dump(JMXDumpCmd), - - %% make sure a set of stats have valid values - verify_nz(JMX1, [<<"cpu_nprocs">>, - <<"mem_total">>, - <<"mem_allocated">>, - <<"ring_creation_size">>]), - - lager:info("perform 5 x PUT and a GET to increment the stats"), - lager:info("as the stat system only does calcs for > 5 readings"), - - C = rt:httpc(Node1), - [rt:httpc_write(C, <<"systest">>, <>, <<"12345">>) || X <- lists:seq(1, 5)], - [rt:httpc_read(C, <<"systest">>, <>) || X <- lists:seq(1, 5)], - - JMX2 = jmx_dump(JMXDumpCmd), - %% make sure the stats that were supposed to increment did - verify_inc(JMX1, JMX2, [{<<"node_gets">>, 10}, - {<<"node_puts">>, 5}, - {<<"node_gets_total">>, 10}, - {<<"node_puts_total">>, 5}, - {<<"vnode_gets">>, 30}, - {<<"vnode_puts">>, 15}, - {<<"vnode_gets_total">>, 30}, - {<<"vnode_puts_total">>, 15}]), - - %% verify that fsm times were tallied - verify_nz(JMX2, [<<"node_get_fsm_time_mean">>, - <<"node_get_fsm_time_median">>, - <<"node_get_fsm_time_95">>, - <<"node_get_fsm_time_99">>, - <<"node_get_fsm_time_100">>, - <<"node_put_fsm_time_mean">>, - <<"node_put_fsm_time_median">>, - <<"node_put_fsm_time_95">>, - <<"node_put_fsm_time_99">>, - <<"node_put_fsm_time_100">>]), - - lager:info("Make PBC Connection"), - Pid = rt:pbc(Node1), - - JMX3 = jmx_dump(JMXDumpCmd), - rt:systest_write(Node1, 1), - %% make sure the stats that were supposed to increment did - verify_inc(JMX2, JMX3, [{<<"pbc_connects_total">>, 1}, - {<<"pbc_connects">>, 1}, - {<<"pbc_active">>, 1}]), - - lager:info("Force Read Repair"), - rt:pbc_write(Pid, <<"testbucket">>, <<"1">>, <<"blah!">>), - rt:pbc_set_bucket_prop(Pid, <<"testbucket">>, [{n_val, 4}]), - - JMX4 = jmx_dump(JMXDumpCmd), - - verify_inc(JMX3, JMX4, [{<<"read_repairs_total">>, 0}, - {<<"read_repairs">>, 0}]), - - _Value = rt:pbc_read(Pid, <<"testbucket">>, <<"1">>), - - %%Stats5 = get_stats(Node1), - JMX5 = jmx_dump(JMXDumpCmd), - verify_inc(JMX3, JMX5, [{<<"read_repairs_total">>, 1}, - {<<"read_repairs">>, 1}]), - pass. - -test_supervision() -> - JMXPort = 41111, - Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], - [Node|[]] = rt:deploy_nodes(1, Config), - timer:sleep(20000), - case net_adm:ping(Node) of - pang -> - lager:error("riak_jmx crash able to crash riak node"), - ?assertEqual("riak_jmx crash able to crash riak node", true); - _ -> - yay - end, - - %% Let's make sure the thing's restarting as planned - lager:info("calling riak_jmx:stop() to reset retry counters"), - rpc:call(Node, riak_jmx, stop, ["stopping for test purposes"]), - - lager:info("loading lager backend on node"), - rt:load_modules_on_nodes([riak_test_lager_backend], [Node]), - ok = rpc:call(Node, gen_event, add_handler, [lager_event, riak_test_lager_backend, [info, false]]), - ok = rpc:call(Node, lager, set_loglevel, [riak_test_lager_backend, info]), - - lager:info("Now we're capturing logs on the node, let's start jmx"), - lager:info("calling riak_jmx:start() to get these retries started"), - rpc:call(Node, riak_jmx, start, []), - - lager:info("It can fail, it can fail 10 times"), - - rt:wait_until(retry_check_fun(Node)), - rt:stop(Node), - ok_ok. - -retry_check_fun(Node) -> - fun() -> - Logs = rpc:call(Node, riak_test_lager_backend, get_logs, []), - 10 =:= lists:foldl(log_fold_fun(), 0, Logs) - end. - -log_fold_fun() -> - fun(Log, Sum) -> - try case re:run(Log, "JMX server monitor .* exited with code .*\. Retry #.*", []) of - {match, _} -> 1 + Sum; - _ -> Sum - end - catch - Err:Reason -> - lager:error("jmx supervision re:run failed w/ ~p: ~p", [Err, Reason]), - Sum - end - end. - -test_application_stop() -> - lager:info("Testing application:stop()"), - JMXPort = 41111, - Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], - Nodes = rt:deploy_nodes(1, Config), - [Node] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), - - %% Let's make sure the java process is alive! - lager:info("checking for riak_jmx.jar running."), - rt:wait_until(Node, fun(_N) -> - try case re:run(rpc:call(Node, os, cmd, ["ps -Af"]), "riak_jmx.jar", []) of - nomatch -> false; - _ -> true - end - catch - Err:Reason -> - lager:error("jmx stop re:run failed w/ ~p: ~p", [Err, Reason]), - false - end - end), - - rpc:call(Node, riak_jmx, stop, ["Stopping riak_jmx"]), - timer:sleep(20000), - case net_adm:ping(Node) of - pang -> - lager:error("riak_jmx stop takes down riak node"), - ?assertEqual("riak_jmx stop takes down riak node", true); - _ -> - yay - end, - - %% Let's make sure the java process is dead! - lager:info("checking for riak_jmx.jar not running."), - - ?assertEqual(nomatch, re:run(rpc:call(Node, os, cmd, ["ps -Af"]), "riak_jmx.jar", [])), - - rt:stop(Node). - -verify_inc(Prev, Props, Keys) -> - [begin - Old = proplists:get_value(Key, Prev, 0), - New = proplists:get_value(Key, Props, 0), - lager:info("~s: ~p -> ~p (expected ~p)", [Key, Old, New, Old + Inc]), - ?assertEqual({Key, New}, {Key, (Old + Inc)}) - end || {Key, Inc} <- Keys]. - -verify_nz(Props, Keys) -> - [?assertNotEqual({Key, proplists:get_value(Key,Props,0)}, {Key, 0}) || Key <- Keys]. - -jmx_jar_path() -> - %% Find riak_jmx.jar - DepsPath = rt:get_deps(), - Deps = string:tokens(os:cmd("ls " ++ DepsPath), "\n"), - [RiakJMX] = lists:filter(fun(X) -> string:str(X, "riak_jmx") == 1 end, Deps), - filename:join([DepsPath, RiakJMX, "priv", "riak_jmx.jar"]). - -jmx_dump_cmd(IP, Port) -> - io_lib:format("java -cp ~s com.basho.riak.jmx.Dump ~s ~p", - [jmx_jar_path(), IP, Port]). - -jmx_dump(Cmd) -> - timer:sleep(40000), %% JMX only updates every 30seconds - lager:info("Dumping JMX stats using command ~s", [Cmd]), - Output = string:strip(os:cmd(Cmd), both, $\n), - JSONOutput = mochijson2:decode(Output), - [ {Key, Value} || {struct, [{Key, Value}]} <- JSONOutput]. diff --git a/tests/kv679_dataloss.erl b/tests/kv679_dataloss.erl deleted file mode 100644 index 500314bac..000000000 --- a/tests/kv679_dataloss.erl +++ /dev/null @@ -1,159 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2014, Basho Technologies -%%% @doc -%%% riak_test for kv679 lost clock flavour. -%%% -%%% issue kv679 is a possible dataloss issue, it's basically caused by -%%% the fact that per key logical clocks can go backwards in time in -%%% certain situations. The situation under test here is as follows: -%%% Create value, write N times -%%% Fail to locally read value on coordinate (corruption, error, solar flare) -%%% write new value (new value, new clock!) -%%% replicate new value -%%% replicas drop write as clock is dominated -%%% read repair removes value. Data loss. -%%% @end - --module(kv679_dataloss). --behavior(riak_test). --compile([export_all]). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"kv679">>). --define(KEY, <<"test">>). - -confirm() -> - Conf = [ - {riak_kv, [{anti_entropy, {off, []}}]}, - {riak_core, [{default_bucket_props, [{allow_mult, true}, - {dvv_enabled, true}]}]}, - {bitcask, [{sync_strategy, o_sync}, {io_mode, nif}]}], - - [Node] = rt:deploy_nodes(1, Conf), - Client = rt:pbc(Node), - riakc_pb_socket:set_options(Client, [queue_if_disconnected]), - - %% Get preflist for key - %% assuming that the head of the preflist on a single node cluster - %% will always coordinate the writes - PL = kv679_tombstone:get_preflist(Node), - - %% Write key some times - write_key(Client, <<"phil">>, []), - - {ok, Bod} = write_key(Client, <<"bob">>, [return_body]), - - lager:info("wrote value <>"), - - VCE0 = riakc_obj:vclock(Bod), - VC0 = rpc:call(Node, riak_object, decode_vclock, [VCE0]), - lager:info("VC ~p~n", [VC0]), - - - %% delete the local data for Key - delete_datadir(hd(PL)), - - {ok, Bod2} = write_key(Client, <<"jon">>, [return_body]), - - VCE1 = riakc_obj:vclock(Bod2), - VC1 = rpc:call(Node, riak_object, decode_vclock, [VCE1]), - lager:info("VC ~p~n", [VC1]), - - - lager:info("wrote value <>"), - - %% At this point, two puts with empty contexts should be siblings - %% due to the data loss at the coordinator we lose the second - %% write - - Res = riakc_pb_socket:get(Client, ?BUCKET, ?KEY, []), - - ?assertMatch({ok, _}, Res), - {ok, O} = Res, - - VCE = riakc_obj:vclock(O), - VC = rpc:call(Node, riak_object, decode_vclock, [VCE]), - lager:info("VC ~p~n", [VC]), - - ?assertEqual([<<"bob">>, <<"jon">>, <<"phil">>], lists:sort(riakc_obj:get_values(O))), - - pass. - -write_key(Client, Val, Opts) -> - write_object(Client, riakc_obj:new(?BUCKET, ?KEY, Val), Opts). - -write_object(Client, Object, Opts) -> - riakc_pb_socket:put(Client, Object, Opts). - -delete_datadir({{Idx, Node}, Type}) -> - lager:info("deleting backend data dir for ~p ~p on ~p", - [Idx, Node, Type]), - %% Get default backend - Backend = rpc:call(Node, app_helper, get_env, [riak_kv, storage_backend]), - - %% get name from mod - BackendName = backend_name_from_mod(Backend), - %% get data root for type - DataRoot = rpc:call(Node, app_helper, get_env, [BackendName, data_root]), - %% get datadir from Idx - Path = filename:join([rtdev:relpath(current), - "dev", - "dev"++ integer_to_list(rtdev:node_id(Node)), - DataRoot, - integer_to_list(Idx)]), - lager:info("Path ~p~n", [Path]), - - %% stop node - rt:stop_and_wait(Node), - - del_dir(Path), - rt:start_and_wait(Node), - rt:wait_for_service(Node, riak_kv). - -backend_name_from_mod(riak_kv_bitcask_backend) -> - bitcask; -backend_name_from_mod(riak_kv_eleveldb_backend) -> - eleveldb. - -del_dir(Dir) -> - lists:foreach(fun(D) -> - ok = file:del_dir(D) - end, del_all_files([Dir], [])). - -del_all_files([], EmptyDirs) -> - EmptyDirs; -del_all_files([Dir | T], EmptyDirs) -> - {ok, FilesInDir} = file:list_dir(Dir), - {Files, Dirs} = lists:foldl(fun(F, {Fs, Ds}) -> - Path = Dir ++ "/" ++ F, - case filelib:is_dir(Path) of - true -> - {Fs, [Path | Ds]}; - false -> - {[Path | Fs], Ds} - end - end, {[],[]}, FilesInDir), - lists:foreach(fun(F) -> - ok = file:delete(F) - end, Files), - del_all_files(T ++ Dirs, [Dir | EmptyDirs]). diff --git a/tests/kv679_tombstone.erl b/tests/kv679_tombstone.erl deleted file mode 100644 index 690e4570e..000000000 --- a/tests/kv679_tombstone.erl +++ /dev/null @@ -1,224 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2014, Basho Technologies -%%% @doc -%%% riak_test for kv679 doomstone flavour. -%%% -%%% issue kv679 is a possible dataloss issue, it's basically caused by -%%% the fact that per key logical clocks can go backwards in time in -%%% certain situations. The situation under test here is as follows: -%%% Create value -%%% Delete value (write tombstone (including one fallback) reap tombstone from primaries only) -%%% write new value -%%% fallback hands off and the tombstone dominates the new value. -%%% @end - --module(kv679_tombstone). --behavior(riak_test). --compile([export_all]). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"kv679">>). --define(KEY, <<"test">>). - -confirm() -> - Config = [{riak_core, [{ring_creation_size, 8}, - {vnode_management_timer, 1000}, - {handoff_concurrency, 100}, - {vnode_inactivity_timeout, 1000}]}], - - Nodes = rt:build_cluster(4, Config), - - Clients=[P1, _P2, _P3, _P4] = create_pb_clients(Nodes), - - %% Get preflist for key - PL = get_preflist(hd(Nodes)), - - CoordClient = coordinating_client(Clients, PL), - - %% Write key some times - write_key(CoordClient, [<<"bob">>, <<"phil">>, <<"pete">>]), - - lager:info("wrote key thrice"), - - %% %% take a node that is a primary down - {NewPL, DeadPrimary, _} = kill_primary(PL), - - lager:info("killed a primary"), - - %% %% This way a tombstone finds its way to a fallback, to later - %% %% wreak DOOM!!!! - Client = up_client(DeadPrimary, Clients), - delete_key(Client), - - lager:info("deleted key, and have tombstone response"), - - %% %% Take down the fallback - {NewPL2, DeadFallback, _DeadPartition} = kill_fallback(NewPL), - - %% %% Bring the primary back up - _PL3 = start_node(DeadPrimary, NewPL2), - - lager:info("killed the fallback, and restored the primary"), - - %% %% wait for reaping maybe read for vclock ts and wait until there - %% %% is not one - UpClient = up_client(DeadFallback, Clients), - read_it_and_reap(UpClient), - - lager:info("read repaired, and reaped the tombstone"), - - %% %% write the key again, this will start a new clock, a clock - %% that is in the past of that lingering tombstone. We use the - %% same node to get the same clock. - write_key(CoordClient, [<<"jon">>]), - - %% %% bring up that fallback, and wait for it to hand off - start_fallback_and_wait_for_handoff(DeadFallback), - - %% Read twice, just in case (repair, reap.) - Res1 = read_key(P1), - - lager:info("TS? ~p~n", [Res1]), - Res2 = read_key(P1), - - lager:info("res ~p", [Res2]), - - ?assertMatch({ok, _}, Res2), - {ok, Obj} = Res2, - ?assertEqual(<<"jon">>, riakc_obj:get_value(Obj)), - - pass. - -%%% Client/Key ops -create_pb_clients(Nodes) -> - [begin - C = rt:pbc(N), - riakc_pb_socket:set_options(C, [queue_if_disconnected]), - {N, C} - end || N <- Nodes]. - -coordinating_client(Clients, Preflist) -> - {{_, FirstPrimary}, primary} = lists:keyfind(primary, 2, Preflist), - lists:keyfind(FirstPrimary, 1, Clients). - -up_client(DeadNode, Clients) -> - {value, _, LiveClients} = lists:keytake(DeadNode, 1, Clients), - hd(LiveClients). - -write_key(Client, Vals) -> - write_key(Client, Vals, []). - -write_key(_, [], _Opts) -> - ok; -write_key(Client, [Val | Rest], Opts) -> - ok = write_key(Client, Val, Opts), - ok = write_key(Client, Rest, Opts); -write_key({_, Client}, Val, Opts) when is_binary(Val) -> - Object = case riakc_pb_socket:get(Client, ?BUCKET, ?KEY, []) of - {ok, O1} -> - lager:info("writing existing!"), - riakc_obj:update_value(O1, Val); - _ -> - lager:info("writing new!"), - riakc_obj:new(?BUCKET, ?KEY, Val) - end, - riakc_pb_socket:put(Client, Object, Opts). - -read_key({_, Client}) -> - riakc_pb_socket:get(Client, ?BUCKET, ?KEY, []). - -delete_key({_, Client}) -> - riakc_pb_socket:delete(Client, ?BUCKET, ?KEY), - - rt:wait_until(fun() -> - case riakc_pb_socket:get(Client, ?BUCKET, ?KEY, [deletedvclock]) of - {error, notfound, VC} -> - lager:info("TSVC ~p~n", [VC]), - true; - Res -> - lager:info("no ts yet: ~p~n", [Res]), - false - end - end). - -read_it_and_reap({_, Client}) -> - rt:wait_until(fun() -> - case riakc_pb_socket:get(Client, ?BUCKET, ?KEY, [deletedvclock]) of - {error, notfound} -> - true; - Res -> - lager:info("not reaped ts yet: ~p~n", [Res]), - false - end - end). - -%%% Node ops - -start_node(Node, Preflist) -> - rt:start_and_wait(Node), - wait_for_new_pl(Preflist, Node). - -get_preflist(Node) -> - Chash = rpc:call(Node, riak_core_util, chash_key, [{?BUCKET, ?KEY}]), - UpNodes = rpc:call(Node, riak_core_node_watcher, nodes, [riak_kv]), - PL = rpc:call(Node, riak_core_apl, get_apl_ann, [Chash, 3, UpNodes]), - PL. - -kill_primary(Preflist) -> - kill_and_wait(Preflist, primary). - -kill_fallback(Preflist) -> - kill_and_wait(Preflist, fallback). - -kill_and_wait(Preflist, Type) -> - case lists:keytake(Type, 2, Preflist) of - false -> - erlang:error(no_nodes_of_type, [Type, Preflist]); - {value, {{Idx, Node}, Type}, PL2} -> - kill_node(Node), - lager:info("killed ~p~n", [Node]), - [{{_, N2}, _}|_] = PL2, - {wait_for_new_pl(Preflist, N2), Node, Idx} - end. - -kill_node(Node) -> - rt:stop_and_wait(Node). - -wait_for_new_pl(PL, Node) -> - rt:wait_until(fun() -> - NewPL = get_preflist(Node), - lager:info("new ~p~n old ~p~nNode ~p~n", [NewPL, PL, Node]), - NewPL /= PL - end), - get_preflist(Node). - -start_fallback_and_wait_for_handoff(DeadFallback) -> - %% Below is random voodoo shit as I have no idea how to _KNOW_ that handoff has happened - %% whatver, it takes 2 minutes, force_handoff? 2 minutes son! - rt:start_and_wait(DeadFallback), - rpc:call(DeadFallback, riak_core_vnode_manager, force_handoffs, []), - rt:wait_until_transfers_complete([DeadFallback]). - - - - diff --git a/tests/kv679_tombstone2.erl b/tests/kv679_tombstone2.erl deleted file mode 100644 index 3135717e1..000000000 --- a/tests/kv679_tombstone2.erl +++ /dev/null @@ -1,158 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2014, Basho Technologies -%%% @doc -%%% riak_test for kv679 another doomstone flavour. -%%% -%%% issue kv679 is a possible dataloss issue, it's basically caused by -%%% the fact that per key logical clocks can go backwards in time in -%%% certain situations. The situation under test here is as follows: -%%% Create value. Delete value (write tombstone reap tombstone from -%%% all but one crashed primary). Write new value. Crashed primary -%%% comes back, read repair tombstone dominates the new value. This -%%% test depends on things happening inside a certain time limit, so -%%% technically it is not determenistic. If you think of a better way, -%%% please let me know. -%%% @end - --module(kv679_tombstone2). --behavior(riak_test). --compile([export_all]). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"kv679">>). --define(KEY, <<"test">>). - -confirm() -> - Config = [{riak_kv, [{delete_mode, 10000}]}, %% 10 seconds to reap. - {riak_core, [{ring_creation_size, 8}, - {vnode_management_timer, 1000}, - {handoff_concurrency, 100}, - {vnode_inactivity_timeout, 1000}]}], - - %% 4 'cos I want a perfect preflist - Nodes = rt:build_cluster(4, Config), - - Clients = kv679_tombstone:create_pb_clients(Nodes), - - %% Get preflist for key - PL = kv679_tombstone:get_preflist(hd(Nodes)), - - ?assert(perfect_preflist(PL)), - - %% Patsy is the primary node that will take a fall, where the - %% lingering doomstone will stay - {CoordClient, Patsy} = get_coord_client_and_patsy(Clients, PL), - - lager:info("CoordClient ~p~nPatsy ~p~n", [CoordClient, Patsy]), - - %% Write key some times - kv679_tombstone:write_key(CoordClient, [<<"bob">>, <<"phil">>, <<"pete">>]), - - dump_clock(CoordClient), - - lager:info("wrote key thrice"), - - delete_key(CoordClient), - - lager:info("deleted key"), - - %% kill the patsy, must happen before the reap - rt:brutal_kill(Patsy), - - lager:info("killed the patsy"), - - %% A time to reap wait for the up nodes to reap, can't use - %% kv679_tombstone:read_it_and_reap - timer:sleep(15000), - - lager:info("tombstone (should be) reaped"), - - %% %% write the key again, this will start a new clock, a clock - %% that is in the past of that un-reaped primary tombstone. We use the - %% same node to get the same clock. - kv679_tombstone:write_key(CoordClient, [<<"jon">>]), - - dump_clock(CoordClient), - - %% %% Bring the patsy back up, and wait until the preflists are as - %% before - rt:start_and_wait(Patsy), - - rt:wait_until(fun() -> - PL2 = kv679_tombstone:get_preflist(Patsy), - PL == PL2 - end), - - %% Read a few times, just in case (repair, then reap, etc) - Res = [begin - timer:sleep(100), - {I, kv679_tombstone:read_key(CoordClient)} - end || I <- lists:seq(1, 5)], - - lager:info("res ~p", [Res]), - - First = hd(lists:dropwhile(fun({_I, {ok, _}}) -> false; - (_) -> true end, - Res)), - - lager:info("res ~p", [First]), - - %% The last result - {_, Res2} = hd(lists:reverse(Res)), - - ?assertMatch({ok, _}, Res2), - {ok, Obj} = Res2, - ?assertEqual(<<"jon">>, riakc_obj:get_value(Obj)), - - pass. - - -perfect_preflist(PL) -> - %% N=3 primaries, each on a unique node - length(lists:usort([Node || {{_Idx, Node}, Type} <- PL, - Type == primary])) == 3. - -get_coord_client_and_patsy(Clients, PL) -> - {CoordNode, _}=CoordClient=kv679_tombstone:coordinating_client(Clients, PL), - PL2 = [Node || {{_Idx, Node}, Type} <- PL, - Type == primary, - Node /= CoordNode], - {CoordClient, hd(lists:reverse(PL2))}. - -delete_key({_, Client}) -> - {ok, Obj} = riakc_pb_socket:get(Client, ?BUCKET, ?KEY), - riakc_pb_socket:delete_obj(Client, Obj). - -dump_clock({Node, Client}) -> - case riakc_pb_socket:get(Client, ?BUCKET, ?KEY) of - {ok, O} -> - VCE = riakc_obj:vclock(O), - VC = rpc:call(Node, riak_object, decode_vclock, [VCE]), - lager:info("VC ~p~n", [VC]), - NodeId = erlang:crc32(term_to_binary(Node)), - Id = <>, - lager:info("Coord Node ID ~p~n", [Id]); - Res -> - lager:info("no clock in ~p~n", [Res]) - end. - diff --git a/tests/kv679_uid.erl b/tests/kv679_uid.erl deleted file mode 100644 index 6a285413e..000000000 --- a/tests/kv679_uid.erl +++ /dev/null @@ -1,55 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2014, Basho Technologies -%%% @doc -%%% riak_test for kv679-ish bug were vnodes on same node get same ID -%%% -%%% @end - --module(kv679_uid). --behavior(riak_test). --compile([export_all]). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"kv679">>). --define(KEY, <<"test">>). - -confirm() -> - [Node] = rt:deploy_nodes(1), - PL = kv679_tombstone:get_preflist(Node), - - %% Get vnodeids for each primary - PartitionIdMap = get_vnodeids(PL, Node), - - lager:info("ids = ~p", [PartitionIdMap]), - %% assert each is unique - {_Idxes, VnodeIds} = lists:unzip(PartitionIdMap), - ?assertEqual(3,length(PartitionIdMap)), - ?assertEqual(3, length(lists:usort(VnodeIds))), - - pass. - -get_vnodeids(PLAnn, Node) -> - PL = [{Idx, N} || {{Idx, N}, Type} <- PLAnn, - Type == primary], - Statuses = rpc:call(Node, riak_kv_vnode, vnode_status, [PL]), - [{Idx, proplists:get_value(vnodeid, Status)} || {Idx, Status} <- Statuses]. diff --git a/tests/loaded_upgrade.erl b/tests/loaded_upgrade.erl deleted file mode 100644 index 9dd09fbdc..000000000 --- a/tests/loaded_upgrade.erl +++ /dev/null @@ -1,237 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(loaded_upgrade). - --include_lib("eunit/include/eunit.hrl"). - --export([confirm/0]). - --export([kv_valgen/1, bucket/1, erlang_mr/0, int_to_key/1]). - --define(TIME_BETWEEN_UPGRADES, 120). %% Seconds! - -confirm() -> - - case whereis(loaded_upgrade) of - undefined -> meh; - _ -> unregister(loaded_upgrade) - end, - register(loaded_upgrade, self()), - %% Build Cluster - TestMetaData = riak_test_runner:metadata(), - %% Only run 2i for level - Backend = proplists:get_value(backend, TestMetaData), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - - Config = [{riak_search, [{enabled, true}]}, {riak_pipe, [{worker_limit, 200}]}], - NumNodes = 4, - Vsns = [{OldVsn, Config} || _ <- lists:seq(1,NumNodes)], - Nodes = rt:build_cluster(Vsns), - - seed_cluster(Nodes), - - %% Now we have a cluster! - %% Let's spawn workers against it. - Concurrent = rt_config:get(load_workers, 10), - - Sups = [{rt_worker_sup:start_link([{concurrent, Concurrent}, - {node, Node}, - {backend, Backend}, - {version, OldVsn}, - {report_pid, self()}]), Node} || Node <- Nodes], - - upgrade_recv_loop(), - - [begin - exit(Sup, normal), - lager:info("Upgrading ~p", [Node]), - rt:upgrade(Node, current), - rt:wait_for_service(Node, [riak_search,riak_kv,riak_pipe]), - {ok, NewSup} = rt_worker_sup:start_link([{concurrent, Concurrent}, - {node, Node}, - {backend, Backend}, - {version, current}, - {report_pid, self()}]), - _NodeMon = init_node_monitor(Node, NewSup, self()), - upgrade_recv_loop() - end || {{ok, Sup}, Node} <- Sups], - pass. - -upgrade_recv_loop() -> - {SMega, SSec, SMicro} = os:timestamp(), - EndSecs = SSec + ?TIME_BETWEEN_UPGRADES, - EndTime = case EndSecs > 1000000 of - true -> - {SMega + 1, EndSecs - 1000000, SMicro}; - _ -> - {SMega, EndSecs, SMicro} - end, - upgrade_recv_loop(EndTime). - -%% TODO: Collect error message counts in ets table -upgrade_recv_loop(EndTime) -> - Now = os:timestamp(), - case Now > EndTime of - true -> - lager:info("Done waiting 'cause ~p > ~p", [Now, EndTime]); - _ -> - receive - {mapred, Node, bad_result} -> - ?assertEqual(true, {mapred, Node, bad_result}); - {kv, Node, not_equal} -> - ?assertEqual(true, {kv, Node, bad_result}); - {kv, Node, {notfound, Key}} -> - ?assertEqual(true, {kv, Node, {notfound, Key}}); - {listkeys, Node, not_equal} -> - ?assertEqual(true, {listkeys, Node, not_equal}); - {search, Node, bad_result} -> - ?assertEqual(true, {search, Node, bad_result}); - Msg -> - lager:debug("Received Mesg ~p", [Msg]), - upgrade_recv_loop(EndTime) - after timer:now_diff(EndTime, Now) div 1000 -> - lager:info("Done waiting 'cause ~p is up", [?TIME_BETWEEN_UPGRADES]) - end - end. - -seed_cluster(Nodes=[Node1|_]) -> - lager:info("Seeding Cluster"), - - %% For List Keys - lager:info("Writing 100 keys to ~p", [Node1]), - rt:systest_write(Node1, 100, 3), - ?assertEqual([], rt:systest_read(Node1, 100, 1)), - - seed(Node1, 0, 100, fun(Key) -> - Bin = iolist_to_binary(io_lib:format("~p", [Key])), - riakc_obj:new(<<"objects">>, Bin, Bin) - end), - - %% For KV - kv_seed(Node1), - - %% for 2i - twoi_seed(Node1), - - %% for mapred - mr_seed(Node1), - - %% For MC Serch - rt:enable_search_hook(Node1, bucket(search)), - rt:wait_until_ring_converged(Nodes), - seed_search(Node1). - -%% Buckets -bucket(kv) -> <<"utest">>; -bucket(twoi) -> <<"2ibuquot">>; -bucket(mapred) -> <<"bryanitbs">>; -bucket(search) -> <<"scotts_spam">>. - -seed_search(Node) -> - Pid = rt:pbc(Node), - SpamDir = rt_config:get(spam_dir), - Files = case SpamDir of - undefined -> undefined; - _ -> filelib:wildcard(SpamDir ++ "/*") - end, - seed_search(Pid, Files), - riakc_pb_socket:stop(Pid). - -seed_search(_Pid, []) -> ok; -seed_search(Pid, [File|Files]) -> - Key = list_to_binary(filename:basename(File)), - rt:pbc_put_file(Pid, bucket(search), Key, File), - seed_search(Pid, Files). - -kv_seed(Node) -> - ValFun = fun(Key) -> - riakc_obj:new(bucket(kv), iolist_to_binary(io_lib:format("~p", [Key])), kv_valgen(Key)) - end, - seed(Node, 0, 7999, ValFun). - -kv_valgen(Key) -> - term_to_binary(lists:seq(0, Key)). - -int_to_key(KInt) -> - list_to_binary(["", integer_to_list(KInt)]). - -%% Every 2i seeded object will have indexes -%% int_plusone -> [Key + 1, Key + 10000] -%% bin_plustwo -> [<<"Key + 2">>] -twoi_seed(Node) -> - ValFun = fun(Key) -> - Obj = riakc_obj:new(bucket(twoi), iolist_to_binary(io_lib:format("~p", [Key])), kv_valgen(Key)), - MD1 = riakc_obj:get_update_metadata(Obj), - MD2 = riakc_obj:set_secondary_index(MD1, [ - {{integer_index, "plusone"}, [Key + 1, Key + 10000]}, - {{binary_index, "plustwo"}, [int_to_key(Key + 2)]} - ]), - riakc_obj:update_metadata(Obj, MD2) - end, - seed(Node, 0, 7999, ValFun). - -erlang_mr() -> - [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_count_inputs}, none, true}]. - -mr_seed(Node) -> - %% to be used along with sequential_int keygen to populate known - %% mapreduce set - ValFun = fun(Key) -> - Value = iolist_to_binary(io_lib:format("~p", [Key])), - riakc_obj:new(bucket(mapred), Value, Value) - end, - seed(Node, 0, 9999, ValFun). - -seed(Node, Start, End, ValFun) -> - PBC = rt:pbc(Node), - - [ begin - Obj = ValFun(Key), - riakc_pb_socket:put(PBC, Obj, [{w,3}]) - end || Key <- lists:seq(Start, End)], - - riakc_pb_socket:stop(PBC). - -%% =================================================================== -%% Monitor nodes after they upgrade -%% =================================================================== -init_node_monitor(Node, Sup, TestProc) -> - spawn_link(fun() -> node_monitor(Node, Sup, TestProc) end). - -node_monitor(Node, Sup, TestProc) -> - lager:info("Monitoring node ~p to make sure it stays up.", [Node]), - erlang:process_flag(trap_exit, true), - erlang:monitor_node(Node, true), - node_monitor_loop(Node, Sup, TestProc). - -node_monitor_loop(Node, Sup, TestProc) -> - receive - {nodedown, Node} -> - lager:error("Node ~p exited after upgrade!", [Node]), - exit(Sup, normal), - ?assertEqual(nodeup, {nodedown, Node}); - {'EXIT', TestProc, _} -> - erlang:monitor_node(Node, false), - ok; - Other -> - lager:warn("Node monitor for ~p got unknown message ~p", [Node, Other]), - node_monitor_loop(Node, Sup, TestProc) - end. diff --git a/tests/loaded_upgrade_worker_sup.erl b/tests/loaded_upgrade_worker_sup.erl deleted file mode 100644 index 3e62c440c..000000000 --- a/tests/loaded_upgrade_worker_sup.erl +++ /dev/null @@ -1,248 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(loaded_upgrade_worker_sup). - --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --behavior(supervisor). - -%% API --export([assert_equal/2]). - --export([list_keys_tester/5, kv_tester/5, mapred_tester/5, - twoi_tester/5, search_tester/5, tester_start_link/4]). - --export([init/1]). --export([start_link/5]). - -%% Helper macro for declaring children of supervisor --define(CHILD(Name, FunName, Node, Vsn, ReportPid), { - list_to_atom(atom_to_list(Name) ++ "_" ++ atom_to_list(FunName)), - { ?MODULE, - tester_start_link, - [FunName, Node, Vsn, ReportPid]}, - permanent, 5000, worker, [?MODULE]}). - -start_link(Name, Node, Backend, Vsn, ReportPid) -> - supervisor:start_link(?MODULE, [Name, Node, Backend, Vsn, ReportPid]). - -init([Name, Node, Backend, Vsn, ReportPid]) -> - rt:wait_for_service(Node, [riak_search,riak_kv,riak_pipe]), - - ChildSpecs1 = [ - ?CHILD(Name, FunName, Node, Vsn, ReportPid) - || FunName <- [list_keys_tester, kv_tester, search_tester]], - - ChildSpecs = case Backend of - eleveldb -> - [?CHILD(Name, twoi_tester, Node, Vsn, ReportPid) | ChildSpecs1]; - _ -> ChildSpecs1 - end, - {ok, {{one_for_one, 1000, 60}, ChildSpecs}}. - - -%%%=================================================================== -%%% Internal functions -%%%=================================================================== - -tester_start_link(Function, Node, Vsn, ReportPid) -> - {ok, spawn_link(?MODULE, Function, [Node, 0, undefined, Vsn, ReportPid])}. - -list_keys_tester(Node, Count, Pid, Vsn, ReportPid) -> - PBC = pb_pid_recycler(Pid, Node), - case riakc_pb_socket:list_keys(PBC, <<"objects">>) of - {ok, Keys} -> - ActualKeys = lists:usort(Keys), - ExpectedKeys = lists:usort([loaded_upgrade:int_to_key(K) || K <- lists:seq(0, 100)]), - case assert_equal(ExpectedKeys, ActualKeys) of - true -> cool; - _ -> ReportPid ! {listkeys, Node, not_equal} - end; - {error, timeout} -> - ReportPid ! {listkeys, Node, timeout}; - {error, {timeout, _}} -> - ReportPid ! {listkeys, Node, timeout}; - Unexpected -> - ReportPid ! {listkeys, Node, Unexpected} - end, - list_keys_tester(Node, Count + 1, PBC, Vsn, ReportPid). - -kv_tester(Node, Count, Pid, Vsn, ReportPid) -> - PBC = pb_pid_recycler(Pid, Node), - Key = Count rem 8000, - case riakc_pb_socket:get(PBC, loaded_upgrade:bucket(kv), loaded_upgrade:int_to_key(Key)) of - {ok, Val} -> - case loaded_upgrade:kv_valgen(Key) == riakc_obj:get_value(Val) of - true -> cool; - _ -> ReportPid ! {kv, Node, not_equal} - end; - {error, disconnected} -> - ok; - {error, notfound} -> - ReportPid ! {kv, Node, {notfound, Key}}; - Unexpected -> - ReportPid ! {kv, Node, Unexpected} - end, - kv_tester(Node, Count + 1, PBC, Vsn, ReportPid). - -mapred_tester(Node, Count, Pid, Vsn, ReportPid) -> - PBC = pb_pid_recycler(Pid, Node), - case riakc_pb_socket:mapred(PBC, loaded_upgrade:bucket(mapred), loaded_upgrade:erlang_mr()) of - {ok, [{1, [10000]}]} -> - ok; - {ok, R} -> - lager:warning("Bad MR result: ~p", [R]), - ReportPid ! {mapred, Node, bad_result}; - {error, disconnected} -> - ok; - %% Finkmaster Flex says timeouts are ok - {error, timeout} -> - ok; - {error, {timeout, _}} -> - ok; - {error, <<"{\"phase\":\"listkeys\",\"error\":\"{badmatch,{'EXIT',noproc}}", _/binary>>} -> - ok; - {error, <<"{\"phase\":\"listkeys\",\"error\":\"{badmatch,{'EXIT',timeout}}", _/binary>>} -> - ok; - {error, <<"{\"phase\":0,\"error\":\"badarg", _/binary>>} -> - ok; - {error, <<"{\"phase\":0,\"error\":\"[preflist_exhausted]", _/binary>>} -> - ok; - {error, <<"{\"phase\":0,\"error\":\"{badmatch,{'EXIT',timeout}}", _/binary>>} -> - ok; - {error, <<"{\"phase\":\"listkeys\",\"error\":\"function_clause\",\"input\":\"{cover,", _/binary>>} -> - ok; - {error, <<"{\"phase\":\"listkeys\",\"error\":\"badarg\",\"input\":\"{cover,", _/binary>>} -> - ok; - {error, <<"Error processing stream message: exit:{ucs,{bad_utf8_character_code}}:[{xmerl_ucs,", _/binary>>} -> - ok; - {error, <<"{\"phase\":0,\"error\":\"[{vnode_down,{shutdown,{gen_fsm,sync_send_event,", _/binary>>} -> - ok; - {error, <<"{\"phase\":0,\"error\":\"[{vnode_down,noproc}]", _/binary>>} -> - ok; - Unexpected -> - ReportPid ! {mapred, Node, Unexpected} - end, - mapred_tester(Node, Count + 1, PBC, Vsn, ReportPid). - -twoi_tester(Node, 0, undefined, legacy, ReportPid) -> - lager:warning("Legacy nodes do not have 2i load applied"), - twoi_tester(Node, 1, undefined, legacy, ReportPid); -twoi_tester(Node, Count, Pid, legacy, ReportPid) -> - twoi_tester(Node, Count + 1, Pid, legacy, ReportPid); -twoi_tester(Node, Count, Pid, Vsn, ReportPid) -> - PBC = pb_pid_recycler(Pid, Node), - Key = Count rem 8000, - ExpectedKeys = [loaded_upgrade:int_to_key(Key)], - case { - riakc_pb_socket:get_index( - PBC, - loaded_upgrade:bucket(twoi), - {binary_index, "plustwo"}, - loaded_upgrade:int_to_key(Key + 2)), - riakc_pb_socket:get_index( - PBC, - loaded_upgrade:bucket(twoi), - {integer_index, "plusone"}, - Key + 1) - } of - {{ok, ?INDEX_RESULTS{keys=BinKeys}}, {ok, ?INDEX_RESULTS{keys=IntKeys}}} -> - case {assert_equal(ExpectedKeys, BinKeys), assert_equal(ExpectedKeys, IntKeys)} of - {true, true} -> cool; - {false, false} -> - ReportPid ! {twoi, Node, bolth_no_match}; - {false, true} -> - ReportPid ! {twoi, Node, bin_no_match}; - {true, false} -> - ReportPid ! {twoi, Node, int_no_match} - end; - {{error, Reason}, _} -> - ReportPid ! {twoi, Node, {error, Reason}}; - {_, {error, Reason}} -> - ReportPid ! {twoi, Node, {error, Reason}}; - Unexpected -> - ReportPid ! {twoi, Node, Unexpected} - end, - twoi_tester(Node, Count + 1, PBC, Vsn, ReportPid). - -search_tester(Node, Count, Pid, Vsn, ReportPid) -> - PBC = pb_pid_recycler(Pid, Node), - {Term, Size} = search_check(Count), - case riakc_pb_socket:search(PBC, loaded_upgrade:bucket(search), Term) of - {ok, Result} -> - case Size == Result#search_results.num_found of - true -> ok; - _ -> - lager:warning("Bad search result: ~p Expected: ~p", [Result#search_results.num_found, Size]), - ReportPid ! {search, Node, bad_result} - end; - {error, disconnected} -> - %% oh well, reconnect - ok; - - {error, <<"Error processing incoming message: throw:{timeout,range_loop}:[{riak_search_backend", _/binary>>} -> - case rt:is_mixed_cluster(Node) of - true -> - ok; - _ -> - ReportPid ! {search, Node, {timeout, range_loop}} - end; - - {error,<<"Error processing incoming message: error:{case_clause,", _/binary>>} -> - %% although it doesn't say so, this is the infamous badfun - case rt:is_mixed_cluster(Node) of - true -> - ok; - _ -> - ReportPid ! {search, Node, {error, badfun}} - end; - Unexpected -> - ReportPid ! {search, Node, Unexpected} - end, - search_tester(Node, Count + 1, PBC, Vsn, ReportPid). - -search_check(Count) -> - case Count rem 6 of - 0 -> { <<"mx.example.net">>, 187}; - 1 -> { <<"ZiaSun">>, 1}; - 2 -> { <<"headaches">>, 4}; - 3 -> { <<"YALSP">>, 3}; - 4 -> { <<"mister">>, 0}; - 5 -> { <<"prohibiting">>, 5} - end. - -assert_equal(Expected, Actual) -> - case Expected -- Actual of - [] -> ok; - Diff -> lager:info("Expected -- Actual: ~p", [Diff]) - end, - Actual == Expected. - -pb_pid_recycler(undefined, Node) -> - rt:pbc(Node); -pb_pid_recycler(Pid, Node) -> - case riakc_pb_socket:is_connected(Pid) of - true -> - Pid; - _ -> - riakc_pb_socket:stop(Pid), - rt:pbc(Node) - end. diff --git a/tests/mapred_basic_compat.erl b/tests/mapred_basic_compat.erl deleted file mode 100644 index 0aa0f202f..000000000 --- a/tests/mapred_basic_compat.erl +++ /dev/null @@ -1,262 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some MapReduce internals. -%% -%% This test used to be in riak_kv's test/mapred_test.erl. It was -%% called `compat_basic1_test_'. It has been moved here to avoid the -%% fragile setup and teardown stages that frequently broke eunit -%% testing. --module(mapred_basic_compat). --behavior(riak_test). --export([ - %% riak_test api - confirm/0, - - %% test helpers - inputs_gen_seq/3, - inputs_gen_bkeys_1/3 - ]). --compile([export_all]). %% because we call ?MODULE:TestName --include_lib("eunit/include/eunit.hrl"). - --define(INTS_BUCKET, <<"foonum">>). --define(LINK_BUCKET, <<"link bucket">>). --define(BUCKET_TYPE, <<"mytype">>). - -confirm() -> - Nodes = rt:build_cluster(3), - - [Node1|_] = Nodes, - %% create a new type - rt:create_and_activate_bucket_type(Node1, ?BUCKET_TYPE, [{n_val, 3}]), - rt:wait_until_bucket_type_status(?BUCKET_TYPE, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, ?BUCKET_TYPE), - - load_test_data(Nodes), - rt:load_modules_on_nodes([?MODULE], Nodes), - - [ begin - lager:info("Running test ~p", [T]), - ?MODULE:T(Nodes) - end - || T <- [empty_query, - reduce_zero_inputs, - keep_both, - keep_neither, - keep_first_only, - keep_second_only, - explicity_rereduce, - error_not_found_propagation, - basic_link, - link_not_found, - keydata, - key_filters, - map_output_with_btype, - modfun_generator1, - modfun_generator2] ], - - pass. - -load_test_data([Node|_]) -> - %% creates foonum/1..5 - this is what populates ?INTS_BUCKET - lager:info("Filling INTS_BUCKET (~s)", [?INTS_BUCKET]), - ok = rpc:call(Node, riak_kv_mrc_pipe, example_setup, []), - - lager:info("Adding Link object"), - Obj = riakc_obj:new(?LINK_BUCKET, - <<"yo">>, - <<"link val">>, - "text/plain"), - MD = riakc_obj:add_link( - riakc_obj:get_update_metadata(Obj), - [{<<"link 1">>, [{?LINK_BUCKET, <<"nokey-1">>}]}, - {<<"link 2">>, [{?LINK_BUCKET, <<"nokey-2">>}]}]), - - C = rt:pbc(Node), - ok = riakc_pb_socket:put(C, riakc_obj:update_metadata(Obj, MD)), - - %% Some bucket type entries {mytype,foonum}/bar{1..10} - [begin - K = list_to_binary("bar"++integer_to_list(N)), - V = list_to_binary(integer_to_list(N)), - O = riakc_obj:new({?BUCKET_TYPE, ?INTS_BUCKET}, K, V), - riakc_pb_socket:put(C, O) - end || N <- lists:seq(1,10)], - riakc_pb_socket:stop(C). - -rpcmr(Node, Inputs, Query) -> - rpc:call(Node, riak_kv_mrc_pipe, mapred, [Inputs, Query]). - -%% @doc This will trigger a traversal of IntsBucket, but because the -%% query is empty, the MapReduce will traverse the bucket and send -%% BKeys down the pipe. -empty_query([Node|_]) -> - {ok, BKeys} = rpcmr(Node, ?INTS_BUCKET, []), - ?assertEqual(5, length(BKeys)), - ?assertEqual({?INTS_BUCKET, <<"bar1">>}, hd(lists:sort(BKeys))). - -%% @doc AZ 479: Reduce with zero inputs -> call reduce once w/empty list -reduce_zero_inputs([Node|_]) -> - Spec = [{reduce, {modfun, riak_kv_mapreduce, reduce_sum}, none, true}], - ?assertEqual({ok, [0]}, rpcmr(Node, [], Spec)). - -%% @doc Basic compatibility: keep both stages -keep_both([Node|_]) -> - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, true}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, none, true}], - {ok, [MapRs, ReduceRs]} = rpcmr(Node, ?INTS_BUCKET, Spec), - ?assertEqual(5, length(MapRs)), - ?assertEqual([15], ReduceRs). - -%% @doc Basic compat: keep neither stages -> no output -keep_neither([Node|_]) -> - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, none, false}], - %% "Crazy" semantics: if only 1 keeper stage, then - %% return List instead of [List]. - ?assertEqual({ok, []}, rpcmr(Node, ?INTS_BUCKET, Spec)). - -%% @doc Basic compat: keep first stage only, want 'crazy' result", -keep_first_only([Node|_]) -> - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, true}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, none, false}], - %% "Crazy" semantics: if only 1 keeper stage, then - %% return List instead of [List]. - {ok, MapRs} = rpcmr(Node, ?INTS_BUCKET, Spec), - ?assertEqual(5, length(MapRs)). - -%% @doc Basic compat: keep second stage only, want 'crazy' result -keep_second_only([Node|_]) -> - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, none, true}], - %% "Crazy" semantics: if only 1 keeper stage, then - %% return List instead of [List]. - ?assertEqual({ok, [15]}, rpcmr(Node, ?INTS_BUCKET, Spec)). - -%% @doc Explicit rereduce -explicity_rereduce([Node|_]) -> - RedSpec = {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, none, true}, - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, true}] - ++ lists:duplicate(5, RedSpec), - ?assertMatch({ok, [_, [15],[15],[15],[15],[15]]}, - rpcmr(Node, ?INTS_BUCKET, Spec)). - -%% @doc Make certain that {error, not_found} goes down the pipe from a -%% map phase. -error_not_found_propagation([Node|_]) -> - Inputs = [{<<"no-such-bucket">>, <<"no-such-key!">>}], - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, - {struct,[{<<"sub">>,[<<"0">>]}]}, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_string_to_integer}, - none,true}], - ?assertEqual({ok, [0]}, rpcmr(Node, Inputs, Spec)), - B = {?BUCKET_TYPE, ?INTS_BUCKET}, - Inputs2 = [{{B, <<"nokey">>}, undefined}], - Spec2 = [{map, {modfun, riak_kv_mapreduce, map_object_value}, - {struct,[{<<"sub">>,[<<"0">>]}]}, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_string_to_integer}, - none,true}], - ?assertEqual({ok, [0]}, rpcmr(Node, Inputs2, Spec2)). - -%% @doc A map phase outputting a 4 tuple can feed objects to another map phase -map_output_with_btype([Node|_]) -> - %% Translates from regular bucket to bucket type one - Inputs = ?INTS_BUCKET, - Spec = [{map, {jsanon, <<"function(o){return[[o.bucket,o.key,null,\"mytype\"]];}">>}, undefined, false}, - {map, {modfun, riak_kv_mapreduce, map_object_value}, undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_string_to_integer}, undefined, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sort}, undefined, true} - ], - ?assertEqual({{ok, lists:seq(1,5)}, {Inputs, Spec}}, - {rpcmr(Node, Inputs, Spec), {Inputs, Spec}}). - -%% @doc Basic link phase -basic_link([Node|_]) -> - Spec = [{link, '_', <<"link 1">>, true}], - ?assertEqual({ok, [ [?LINK_BUCKET, <<"nokey-1">>, <<"link 1">>] ]}, - rpcmr(Node, ?LINK_BUCKET, Spec)). - -%% @doc Link phase + notfound -link_not_found([Node|_]) -> - Inputs = [{<<"no">>, K} || K <- [<<"no1">>, <<"no2">>]], - Spec = [{link, '_', '_', true}], - ?assertEqual({ok, []}, rpcmr(Node, Inputs, Spec)). - -%% @doc KeyData -keydata([Node|_]) -> - UnMap = fun(O, undefined, _) -> - [{riak_object:bucket(O), - riak_object:key(O)}]; - (O, KeyData, _) -> - [{{riak_object:bucket(O), - riak_object:key(O)}, - KeyData}] - end, - Normalize = fun({{B,K},D}) -> {{B,K},D}; - ({B,K}) -> {B,K}; - ([B,K]) -> {B,K}; - ([B,K,D]) -> {{B,K},D} - end, - Spec = [{map, {qfun, UnMap}, none, true}], - Inputs = [{?INTS_BUCKET, <<"bar1">>}, - {{?INTS_BUCKET, <<"bar2">>}, <<"keydata works">>}, - [?INTS_BUCKET, <<"bar3">>], - [?INTS_BUCKET, <<"bar4">>, <<"keydata still works">>]], - {ok, Results} = rpcmr(Node, Inputs, Spec), - SortedNormal = lists:sort([ Normalize(I) || I <- Inputs ]), - ?assertEqual(SortedNormal, lists:sort(Results)). - -%% @doc Key Filters -key_filters([Node|_]) -> - %% filter sould match only "bar4" key - Inputs = {?INTS_BUCKET, [[<<"ends_with">>, <<"r4">>]]}, - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, true}], - ?assertEqual({ok, [4]}, rpcmr(Node, Inputs, Spec)). - -%% @doc modfun for inputs generator -modfun_generator1([Node|_]) -> - Inputs = {modfun, ?MODULE, inputs_gen_seq, 6}, - Spec = [{reduce, {modfun, riak_kv_mapreduce, reduce_sum},none,true}], - ?assertEqual({ok, [21]}, rpcmr(Node, Inputs, Spec)). - -%% @doc runs on riak node -inputs_gen_seq(Pipe, Max, _Timeout) -> - [riak_pipe:queue_work(Pipe, X) || X <- lists:seq(1, Max)], - riak_pipe:eoi(Pipe), - ok. - -%% @doc modfun for inputs generator: make BKeys for conventional phases -modfun_generator2([Node|_]) -> - Inputs = {modfun, ?MODULE, inputs_gen_bkeys_1, {?INTS_BUCKET, 1, 5}}, - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, - none, false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_string_to_integer}, - none,false}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, - none,true}], - ?assertEqual({ok, [15]}, rpcmr(Node, Inputs, Spec)). - -%% @doc runs on riak node -inputs_gen_bkeys_1(Pipe, {Bucket, Start, End}, _Timeout) -> - BKeys = [{Bucket, list_to_binary("bar"++integer_to_list(X))} || - X <- lists:seq(Start, End)], - [riak_pipe:queue_work(Pipe, BK) || BK <- BKeys], - riak_pipe:eoi(Pipe), - ok. diff --git a/tests/mapred_buffer_prereduce.erl b/tests/mapred_buffer_prereduce.erl deleted file mode 100644 index 9356dbc34..000000000 --- a/tests/mapred_buffer_prereduce.erl +++ /dev/null @@ -1,75 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some MapReduce internals. -%% -%% This test used to be in riak_kv's test/mapred_test.erl. It was -%% called `compat_buffer_and_prereduce_test_'. It has been moved here -%% to avoid the fragile setup and teardown stages that frequently -%% broke eunit testing. --module(mapred_buffer_prereduce). --behavior(riak_test). --export([ - %% riak_test api - confirm/0 - ]). --include_lib("eunit/include/eunit.hrl"). - --define(INTS_BUCKET, <<"foonum">>). --define(NUM_INTS, 1000). - -confirm() -> - Nodes = rt:build_cluster(3), - - load_test_data(Nodes), - - [ begin - lager:info("Running test ~s (m:~p, r:~p)", - [T, M, R]), - test_batch(Nodes, M, R) - end - || {T, M, R} <- [{"default", none, none}, - {"reduce batch 10", none, - [{reduce_phase_batch_size, 10}]}, - {"reduce batch 0", none, - [{reduce_phase_batch_size, 0}]}, - {"reduce only once", none, - [reduce_phase_only_1]}, - {"predreduce batch 7", - [do_prereduce, reduce_phase_only_1], - [{reduce_phase_batch_size, 7}]}] ], - pass. - -load_test_data([Node|_]) -> - %% creates foonum/1..5 - this is what populates ?INTS_BUCKET - lager:info("Filling INTS_BUCKET (~s)", [?INTS_BUCKET]), - ok = rpc:call(Node, riak_kv_mrc_pipe, example_setup, [?NUM_INTS]). - -rpcmr(Node, Inputs, Query) -> - rpc:call(Node, riak_kv_mrc_pipe, mapred, [Inputs, Query]). - -test_batch([Node|_], MapArg, ReduceArg) -> - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, - MapArg, true}, - {reduce, {modfun, riak_kv_mapreduce, reduce_sum}, - ReduceArg, true}], - {ok, [MapRs, ReduceRs]} = rpcmr(Node, ?INTS_BUCKET, Spec), - ExpectR = (?NUM_INTS * (?NUM_INTS+1)) div 2, - ?assertEqual([ExpectR], ReduceRs), - ?assertEqual(?NUM_INTS, length(MapRs)). diff --git a/tests/mapred_dead_pipe.erl b/tests/mapred_dead_pipe.erl deleted file mode 100644 index b60728d46..000000000 --- a/tests/mapred_dead_pipe.erl +++ /dev/null @@ -1,130 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some MapReduce internals. -%% -%% This test used to be in riak_kv's test/mapred_test.erl. It was -%% called `dead_pipe_test_'. It has been moved here to avoid the -%% fragile setup and teardown stages that frequently broke eunit -%% testing. --module(mapred_dead_pipe). --behavior(riak_test). --export([ - %% riak_test api - confirm/0 - ]). --compile([export_all]). %% because we call ?MODULE:Testname --include_lib("eunit/include/eunit.hrl"). --include("rt_pipe.hrl"). - --define(INTS_BUCKET, <<"foonum">>). --define(NUM_INTS, 5). --define(JS_BUCKET, <<"jsfuns">>). --define(NOTFOUND_BKEY, {<<"does not">>, <<"exist">>}). --define(MAP_JS, <<"function(v) { return [v.values[0].data]; }">>). --define(REDUCE_JS, <<"function(v) { - Sum = function(A, B) { return A+B; }; - return [ v.reduce(Sum) ]; - }">>). - -confirm() -> - Nodes = rt:build_cluster(3), - - %% to pick up fake_builder/1 - rt:load_modules_on_nodes([?MODULE], Nodes), - - [ begin - lager:info("Running test ~p", [T]), - ?MODULE:T(Nodes) - end - || T<- [synchronous, - asynchronous] ], - pass. - -%% @doc Start and kill the pipe -start_dead_pipe(Node) -> - Spec = [{map, {modfun, riak_kv_mapreduce, map_object_value}, none, true}], - {{ok, Pipe}, _NumKeeps} = - rpc:call(Node, riak_kv_mrc_pipe, mapred_stream, [Spec]), - riak_pipe:destroy(Pipe), - Pipe. - -%% @doc Verify that sending inputs to a pipe that has already stopped -%% raises an error (synchronous send) -synchronous([Node|_]) -> - Pipe = start_dead_pipe(Node), - {error, Reason} = rpc:call(Node, riak_kv_mrc_pipe,send_inputs, - [Pipe, [{<<"foo">>, <<"bar">>}]]), - %% Each vnode should have received the input, but - %% being unable to find the fitting process, returned - %% `worker_startup_failed` (and probably also printed - %% "fitting was gone before startup") - ?assert(lists:member(worker_startup_failed, Reason)). - -%% @doc Verify that sending inputs to a pipe that has already stopped -%% raises an error (async send) -asynchronous([Node|_]) -> - Pipe = start_dead_pipe(Node), - Shim = erlang:spawn(Node, sender_shim(Pipe, self())), - ShimMon = erlang:monitor(process, Shim), - receive - {sender_death, Error} -> - {error, Reason} = Error; - {'DOWN', ShimMon, process, Shim, Error} -> - Reason = [Error] - end, - %% Each vnode should have received the input, but - %% being unable to find the fitting process, returned - %% `worker_startup_failed` (and probably also printed - %% "fitting was gone before startup") - ?assert(lists:member(worker_startup_failed, Reason)). - -%% @doc runs on riak node; we have to use a shim here because -%% riak_kv_mrc_pipe:send_inputs_async sets up a monitor, which would -%% be owned by the remote RPC process, instead of our test process, as -%% desired, so we'd never see the sender die -sender_shim(Pipe, TestProc) -> - fun() -> - %% this is a hack to make sure that the async sender - %% doesn't die immediately upon linking to the - %% already-dead builder - PipeB = Pipe#pipe{builder=erlang:spawn(fake_builder(self()))}, - {Sender, SenderRef} = riak_kv_mrc_pipe:send_inputs_async( - PipeB, [{<<"foo">>, <<"bar">>}]), - receive - {'DOWN', SenderRef, process, Sender, Error} -> - ok - end, - %% let the fake builder shut down now - PipeB#pipe.builder ! test_over, - %% and send the result back for processing - TestProc ! {sender_death, Error} - end. - -%% @doc runs on riak node -fake_builder(TestProc) -> - fun() -> - Ref = erlang:monitor(process, TestProc), - receive - test_over -> - ok; - {'DOWN',Ref,process,TestProc,_} -> - ok - end - end. diff --git a/tests/mapred_http_errors.erl b/tests/mapred_http_errors.erl deleted file mode 100644 index 5d31f8995..000000000 --- a/tests/mapred_http_errors.erl +++ /dev/null @@ -1,97 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify MapReduce returns the right kind of errors. --module(mapred_http_errors). --behavior(riak_test). --export([ - %% riak_test api - confirm/0, - - %% used on riak node - map_never_notfound/3 - ]). --compile([export_all]). %% because we call ?MODULE:TestName --include_lib("eunit/include/eunit.hrl"). - -%% @doc this map function always bails with a function clause error on -%% notfound -map_never_notfound(Object, _, _) when Object /= {error, notfound} -> - [ok]. - -confirm() -> - Nodes = rt:build_cluster(1), - - rt:load_modules_on_nodes([?MODULE], Nodes), - - [ begin - lager:info("Running test ~p", [T]), - ?MODULE:T(Nodes) - end - || T <- [proc_fun_clause, - proc_fun_clause_chunked] ], - - pass. - -httpmr(Node, Inputs, Query) -> - ibrowse:send_req(rt:http_url(Node)++"/mapred", - [{"content-type", "application/json"}], - post, - rhc_mapred:encode_mapred(Inputs, Query), - [{response_format, binary}]). - -httpmr_chunked(Node, Inputs, Query) -> - ibrowse:send_req(rt:http_url(Node)++"/mapred?chunked=true", - [{"content-type", "application/json"}], - post, - rhc_mapred:encode_mapred(Inputs, Query), - [{response_format, binary}]). - -%% @doc test that a simple variety of processing error returns useful -%% JSON details about it -proc_fun_clause([Node|_]) -> - {ok, Code, Headers, Body} = - httpmr(Node, - [{<<"doesnot">>,<<"exist">>}], - [{map, {modfun, ?MODULE, map_never_notfound}, none, true}]), - ?assertEqual("500", Code), - ?assertEqual("application/json", - proplists:get_value("Content-Type", Headers)), - assert_proc_fun_clause_json(Body). - -proc_fun_clause_chunked([Node|_]) -> - {ok, Code, Headers, Body} = - httpmr_chunked( - Node, - [{<<"doesnot">>,<<"exist">>}], - [{map, {modfun, ?MODULE, map_never_notfound}, none, true}]), - ?assertEqual("200", Code), - ?assertMatch("multipart/mixed"++_, - proplists:get_value("Content-Type", Headers)), - assert_proc_fun_clause_json(Body). - -assert_proc_fun_clause_json(Body) -> - {struct, Json} = mochijson2:decode(Body), - ?assertEqual(0, proplists:get_value(<<"phase">>, Json)), - ?assertEqual(<<"function_clause">>, - proplists:get_value(<<"error">>, Json)), - ?assertEqual( - <<"{{error,notfound},{<<\"doesnot\">>,<<\"exist\">>},undefined}">>, - proplists:get_value(<<"input">>, Json)), - ?assert(proplists:is_defined(<<"stack">>, Json)). diff --git a/tests/mapred_javascript.erl b/tests/mapred_javascript.erl deleted file mode 100644 index 5f8d8d502..000000000 --- a/tests/mapred_javascript.erl +++ /dev/null @@ -1,151 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some MapReduce internals. -%% -%% This test used to be in riak_kv's test/mapred_test.erl. It was -%% called `compat_javascript_test_'. It has been moved here to avoid -%% the fragile setup and teardown stages that frequently broke eunit -%% testing. --module(mapred_javascript). --behavior(riak_test). --export([ - %% riak_test api - confirm/0 - ]). --compile([export_all]). %% because we run tests as ?MODULE:T(Nodes) --include_lib("eunit/include/eunit.hrl"). - --define(INTS_BUCKET, <<"foonum">>). --define(NUM_INTS, 5). --define(JS_BUCKET, <<"jsfuns">>). --define(NOTFOUND_BKEY, {<<"does not">>, <<"exist">>}). --define(MAP_JS, <<"function(v) { return [v.values[0].data]; }">>). --define(REDUCE_JS, <<"function(v) { - Sum = function(A, B) { return A+B; }; - return [ v.reduce(Sum) ]; - }">>). - -confirm() -> - Nodes = rt:build_cluster(3), - - load_test_data(Nodes), - - [ begin - lager:info("Running test ~p", [T]), - ?MODULE:T(Nodes) - end - || T<- [jsanon_source, - jsanon_bkey, - jsfun, - js_notfound, - keydata] ], - pass. - -load_test_data([Node|_]) -> - %% creates foonum/1..5 - this is what populates ?INTS_BUCKET - lager:info("Filling INTS_BUCKET (~s)", [?INTS_BUCKET]), - ok = rpc:call(Node, riak_kv_mrc_pipe, example_setup, [?NUM_INTS]), - - lager:info("Adding Javascript source objects"), - Map = riakc_obj:new(?JS_BUCKET, <<"map">>, ?MAP_JS, "text/plain"), - Red = riakc_obj:new(?JS_BUCKET, <<"reduce">>, ?REDUCE_JS, "text/plain"), - - C = rt:pbc(Node), - ok = riakc_pb_socket:put(C, Map), - ok = riakc_pb_socket:put(C, Red), - riakc_pb_socket:stop(C). - - -rpcmr(Node, Inputs, Query) -> - rpc:call(Node, riak_kv_mrc_pipe, mapred, [Inputs, Query]). - -%% @doc map & reduce with jsanon-Source -jsanon_source(Nodes) -> - run_js_test(Nodes, {jsanon, ?MAP_JS}, {jsanon, ?REDUCE_JS}). - -%% @doc map & reduce with jsanon-Bucket/Key -jsanon_bkey(Nodes) -> - run_js_test(Nodes, - {jsanon, {?JS_BUCKET, <<"map">>}}, - {jsanon, {?JS_BUCKET, <<"reduce">>}}). - -%% @doc map & reduce with jsfun -jsfun(Nodes) -> - run_js_test(Nodes, - {jsfun, <<"Riak.mapValues">>}, - {jsfun, <<"Riak.reduceSum">>}). - -run_js_test([Node|_], MapFun, ReduceFun) -> - Spec = [{map, MapFun, <<>>, true}, - {reduce, ReduceFun, <<>>, true}], - {ok, [MapRs, ReduceRs]} = rpcmr(Node, ?INTS_BUCKET, Spec), - ?assertEqual(5, length(MapRs)), - ExpectR = (?NUM_INTS * (?NUM_INTS+1)) div 2, - ?assertEqual([ExpectR], ReduceRs). - -%% @doc objects not found for JS map turn into -%% {not_found, {Bucket, Key}, KeyData} tuples -js_notfound([Node|_]) -> - Spec = [{map, {jsfun, <<"Riak.mapValues">>}, <<>>, true}, - {reduce, - {jsanon, <<"function(v) { - F = function(O) { - if ((O[\"not_found\"] && - O.not_found[\"bucket\"]) || - O[\"mapred_test_pass\"]) - return {mapred_test_pass:1}; - else - return O; - } - return v.map(F); - }">>}, - <<>>, true}], - ?assertEqual({ok, [[{not_found, - ?NOTFOUND_BKEY, - undefined}], - [{struct,[{<<"mapred_test_pass">>,1}]}]]}, - rpcmr(Node, [?NOTFOUND_BKEY], Spec)). - -keydata([Node|_]) -> - UnMap = <<"function(O, KD) { - R = {b:O.bucket, k:O.key}; - if (KD != \"undefined\") - R.d = KD; - return [R]; - }">>, - Normalize = fun({{B,K},D}) -> {struct, [{<<"b">>, B}, - {<<"k">>, K}, - {<<"d">>, D}]}; - ({B,K}) -> {struct, [{<<"b">>, B}, - {<<"k">>, K}]}; - ([B,K]) -> {struct, [{<<"b">>, B}, - {<<"k">>, K}]}; - ([B,K,D]) -> {struct, [{<<"b">>, B}, - {<<"k">>, K}, - {<<"d">>, D}]} - end, - Spec = [{map, {jsanon, UnMap}, none, true}], - Inputs = [{?INTS_BUCKET, <<"bar1">>}, - {{?INTS_BUCKET, <<"bar2">>}, <<"keydata works">>}, - [?INTS_BUCKET, <<"bar3">>], - [?INTS_BUCKET, <<"bar4">>, <<"keydata still works">>]], - {ok, Results} = rpcmr(Node, Inputs, Spec), - SortedNormal = lists:sort([ Normalize(I) || I <- Inputs ]), - ?assertEqual(SortedNormal, lists:sort(Results)). diff --git a/tests/mapred_notfound_failover.erl b/tests/mapred_notfound_failover.erl deleted file mode 100644 index 8c8e2e424..000000000 --- a/tests/mapred_notfound_failover.erl +++ /dev/null @@ -1,230 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some MapReduce internals. -%% -%% This test used to be in riak_kv's test/mapred_test.erl. It was -%% called `notfound_failover_test_'. It has been moved here -%% to avoid the fragile setup and teardown stages that frequently -%% broke eunit testing. --module(mapred_notfound_failover). --behavior(riak_test). --export([ - %% riak_test api - confirm/0 - ]). --compile([export_all]). %% because we run ?MODULE:PrepareFun later --include_lib("eunit/include/eunit.hrl"). --include("rt_pipe.hrl"). - --define(INTS_BUCKET, <<"foonum">>). --define(NUM_INTS, 1000). - -confirm() -> - %% we need the volatility of memory, so we can cause a replica - %% notfound by killing a vnode - rt:set_backend(memory), - - Nodes = rt:build_cluster(3), - - %% for our custom reduce phase - rt:load_modules_on_nodes([?MODULE], Nodes), - - load_test_data(Nodes), - - [ begin - lager:info("Running test ~p", [T]), - run_test(Nodes, T) - end - || T <- [actual_notfound, - replica_notfound] ], - pass. - -load_test_data([Node|_]) -> - %% creates foonum/1..?NUM_INTS - this is what populates ?INTS_BUCKET - lager:info("Filling INTS_BUCKET (~s)", [?INTS_BUCKET]), - ok = rpc:call(Node, riak_kv_mrc_pipe, example_setup, [?NUM_INTS]). - -rpcmr(Node, Inputs, Query) -> - rpc:call(Node, riak_kv_mrc_pipe, mapred, [Inputs, Query]). - -%% @doc check the condition that used to bring down a pipe in -%% https://github.com/basho/riak_kv/issues/290 (this version checks it -%% with an actual not-found) -actual_notfound(_Node, _ChashFun, - _MissingBucket, _MissingKey, _MissingValue) -> - ok. - -%% @doc check the condition that used to bring down a pipe in -%% https://github.com/basho/riak_kv/issues/290 this version checks -%% with an object that is missing a replica -replica_notfound(Node, {HashMod, HashFun}, - MissingBucket, MissingKey, MissingValue) -> - %% create a value for the "missing" key - Obj = riakc_obj:new(MissingBucket, MissingKey, MissingValue), - C = rt:pbc(Node), - ok = riakc_pb_socket:put(C, Obj, [{w, 3}]), - riakc_pb_socket:stop(C), - %% and now kill the first replica; this will make the vnode local - %% to the kvget pipe fitting return an error (because it's the - %% memory backend), so it will have to look at another kv vnode - Hash = rpc:call(Node, HashMod, HashFun, [{MissingBucket, MissingKey}]), - [{{PrimaryIndex, PrimaryNode},_}] = - rpc:call(Node, riak_core_apl, get_primary_apl, [Hash, 1, riak_kv]), - {ok, VnodePid} = rpc:call(PrimaryNode, - riak_core_vnode_manager, get_vnode_pid, - [PrimaryIndex, riak_kv_vnode]), - exit(VnodePid, kill). - -run_test([Node|_], PrepareFun) -> - QLimit = 3, - WaitRef = make_ref(), - Spec = - [{map, {modfun, riak_kv_mapreduce, map_object_value}, - <<"include_keydata">>, false}, - {reduce, {modfun, ?MODULE, reduce_wait_for_signal}, - [{reduce_phase_batch_size, 1}, {wait, {self(), WaitRef}}], - true}], - %% mapred_plan must happen on riak node to access ring manager - PipeSpec = rpc:call(Node, riak_kv_mrc_pipe, mapred_plan, [Spec]), - %% make it easier to fill - SmallPipeSpec = [ S#fitting_spec{q_limit=QLimit} || S <- PipeSpec ], - {ok, Pipe} = rpc:call(Node, riak_pipe, exec, - [SmallPipeSpec, - [{log, sink}, {trace, [error, queue_full]}, - {sink, rt_pipe:self_sink()}]]), - ExistingKey = {?INTS_BUCKET, <<"bar1">>}, - ChashFun = (hd(SmallPipeSpec))#fitting_spec.chashfun, - {MissingBucket, MissingKey} = - find_adjacent_key(Node, ChashFun, ExistingKey), - - ValueRef = term_to_binary(make_ref()), - %% get the missing bucket/key into the right state - ?MODULE:PrepareFun(Node, ChashFun, MissingBucket, MissingKey, ValueRef), - - %% get main workers spun up - ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey]), - receive {waiting, WaitRef, ReducePid} -> ok end, - - %% reduce is now blocking, fill its queue - [ ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey]) - || _ <- lists:seq(1, QLimit) ], - - {NValMod,NValFun} = (hd(SmallPipeSpec))#fitting_spec.nval, - NVal = rpc:call(Node, NValMod, NValFun, [ExistingKey]), - - %% each of N paths through the primary preflist - [ fill_map_queue(Node, Pipe, QLimit, ExistingKey) - || _ <- lists:seq(1, NVal) ], - - %% check get queue actually full - ExpectedTOs = lists:duplicate(NVal, timeout), - {error, ExpectedTOs} = - rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey, noblock]), - - %% now inject a missing key that would need to - %% failover to the full queue - KeyDataRef = make_ref(), - ok = rpc:call(Node, riak_pipe, queue_work, - [Pipe, {{MissingBucket, MissingKey}, KeyDataRef}]), - %% and watch for it to block in the reduce queue - %% *this* is when pre-patched code would fail: - %% we'll receive an [error] trace from the kvget fitting's - %% failure to forward the bkey along its preflist - ok = consume_queue_full(Pipe, 1), - - %% let the pipe finish - riak_pipe:eoi(Pipe), - ReducePid ! {continue, WaitRef}, - - {eoi, Results, Logs} = riak_pipe:collect_results(Pipe), - ExpectVal = case PrepareFun of - actual_notfound -> - %% the object does not exist, but we told the map - %% phase to send on its keydata - check for it - KeyDataRef; - replica_notfound -> - %% the object does exist (but one replica does - %% not), and we should have found it - ValueRef - end, - ?assert(lists:member({1, ExpectVal}, Results)), - %% just to be a little extra cautious, check for - %% other errors - ?assertEqual([], [E || {_,{trace,[error],_}}=E <- Logs]). - -fill_map_queue(Node, Pipe, QLimit, ExistingKey) -> - %% give the map worker one more to block on - ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey, noblock]), - consume_queue_full(Pipe, 1), - %% map is now blocking, fill its queue - [ ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey, noblock]) - || _ <- lists:seq(1, QLimit) ], - %% give the get worker one more to block on - ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey, noblock]), - consume_queue_full(Pipe, {xform_map, 0}), - %% get is now blocking, fill its queue - [ ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, ExistingKey, noblock]) - || _ <- lists:seq(1, QLimit) ], - ok. - -find_adjacent_key(Node, {HashMod, HashFun}, ExistingKey) -> - Hash = rpc:call(Node, HashMod, HashFun, [ExistingKey]), - [ExistingHead|_] = rpc:call(Node, riak_core_apl, get_primary_apl, - [Hash, 2, riak_kv]), - [K|_] = lists:dropwhile( - fun(N) -> - K = {<<"foonum_missing">>, - list_to_binary(integer_to_list(N))}, - KH = rpc:call(Node, HashMod, HashFun, [K]), - [_,Second] = - rpc:call(Node, riak_core_apl, get_primary_apl, - [KH, 2, riak_kv]), - Second /= ExistingHead - end, - lists:seq(1, 1000)), - {<<"foonum_missing">>, list_to_binary(integer_to_list(K))}. - -consume_queue_full(Pipe, FittingName) -> - {log, {FittingName, {trace, [queue_full], _}}} = - riak_pipe:receive_result(Pipe, 5000), - ok. - -reduce_wait_for_signal(Inputs, Args) -> - case get(waited) of - true -> - Inputs; - _ -> - {TestProc, WaitRef} = proplists:get_value(wait, Args), - TestProc ! {waiting, WaitRef, self()}, - receive {continue, WaitRef} -> ok end, - put(waited, true), - Inputs - end. - -wait_until_dead(Pid) when is_pid(Pid) -> - Ref = monitor(process, Pid), - receive - {'DOWN', Ref, process, _Obj, Info} -> - Info - after 10*1000 -> - exit({timeout_waiting_for, Pid}) - end; -wait_until_dead(_) -> - ok. diff --git a/tests/mapred_search_switch.erl b/tests/mapred_search_switch.erl deleted file mode 100644 index 85434e20f..000000000 --- a/tests/mapred_search_switch.erl +++ /dev/null @@ -1,303 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify that "search" MapReduce inputs come from Yokozuna or -%% Riak Search, as the Riak node is configured. -%% -%% Without the switch code in place in Riak, this test's failure is -%% confusing: it will fail saying that riak_search was enabled, and it -%% got riak_search mapred results, but it expected not to. This is -%% because the first test also enables yokozuna without explicitly -%% selecting one or the other. The test expects Riak to return an -%% error, but without the switch code in place, Riak happily returns -%% what it has always returned: riak_search mapred results. --module(mapred_search_switch). --behavior(riak_test). --export([ - %% riak_test api - confirm/0 - ]). --compile([export_all]). %% because we run tests as ?MODULE:T(Nodes) --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - -%% name of the riak_kv appenv specifying which search provider to use --define(PROVIDER_KEY, mapred_search). - --record(env, { - nodes, % nodes running - rs_bucket, % bucket configured for riak_search - rs_keyuqs, % keys and their unique data for riak_search - rs_common, % common data for all keys for riak_search - yz_bucket, % \ - yz_keyuqs, % +- ditto, yokozuna bucket 1 - yz_common, % / - yz_index % YZ index separate from bucket name - }). - -confirm() -> - Env = setup_test_env(), - - [ confirm_config(Env, - [{riak_search, [{enabled, RS}]}, - {yokozuna, [{enabled, YZ}]}, - {riak_kv, [{?PROVIDER_KEY, P}]}]) - || RS <- [true, false], - YZ <- [true, false], - P <- [riak_search, yokozuna, undefined] ], - pass. - -setup_test_env() -> - %% must enable both RS and YZ at startup to get test data indexed; - %% nothing extra would be tested by using multiple nodes, so just - %% deploy one to make the test run faster - Nodes = rt:deploy_nodes(1, [{riak_search, [{enabled, true}]}, - {yokozuna, [{enabled, true}]}]), - ok = rt:wait_until_nodes_ready(Nodes), - ok = rt:wait_for_cluster_service(Nodes, riak_search), - ok = rt:wait_for_cluster_service(Nodes, yokozuna), - - {RSBucket, RSKeyAndUniques, RSCommon} = generate_test_data(<<"rs">>), - setup_rs_bucket(Nodes, RSBucket), - load_test_data(Nodes, RSBucket, RSKeyAndUniques, RSCommon), - - {YZBucket, YZKeyAndUniques, YZCommon} = generate_test_data(<<"yz">>), - YZIndex = generate_string(), - lager:info("yz index: ~s", [YZIndex]), - - setup_yz_index(Nodes, YZIndex), - setup_yz_bucket(Nodes, YZBucket, YZIndex), - load_test_data(Nodes, YZBucket, YZKeyAndUniques, YZCommon), - - %% give yokozuna time to auto-commit - YZSleep_ms = 1000, - lager:info("Giving Yokozuna ~bms to auto-commit", [YZSleep_ms]), - timer:sleep(YZSleep_ms), - - #env{ nodes=Nodes, - rs_bucket=RSBucket, - rs_keyuqs=RSKeyAndUniques, - rs_common=RSCommon, - yz_bucket=YZBucket, - yz_keyuqs=YZKeyAndUniques, - yz_common=YZCommon, - yz_index=YZIndex }. - -set_config(#env{nodes=Nodes}, Config) -> - [ [ set_config(Nodes, App, K, V) - || {K, V} <- Props ] - || {App, Props} <- Config ]. - -set_config(Nodes, App, K, V) -> - ?assertEqual( - {lists:duplicate(length(Nodes), ok), []}, - case V of - undefined -> - rpc:multicall(Nodes, application, unset_env, [App, K]); - _ -> - rpc:multicall(Nodes, application, set_env, [App, K, V]) - end). - -confirm_config(#env{nodes=Nodes, - rs_bucket=RSBucket, - rs_keyuqs=RSKeyAndUniques, - rs_common=RSCommon, - yz_bucket=YZBucket, - yz_keyuqs=YZKeyAndUniques, - yz_index=YZIndex}=Env, - Config) -> - lager:info("Running Config: ~p", [Config]), - set_config(Env, Config), - - RSBResults = run_bucket_mr(Nodes, RSBucket, RSCommon), - YZBResults = run_bucket_mr(Nodes, YZIndex, <<"*:*">>), - - lager:info("RS Bucket Results: ~p", [RSBResults]), - lager:info("YZ Bucket Results: ~p", [YZBResults]), - - ?assertEqual(expected_riak_search(Config), - got_riak_search(RSBResults, RSBucket, - RSKeyAndUniques)), - ?assertEqual(expected_yokozuna(Config), - got_yokozuna(YZBResults, YZBucket, YZKeyAndUniques)), - %% asking YZ to MR a bucket it hasn't indexed results in error - ?assertEqual(expected_yokozuna(Config) or expected_error(Config), - got_error(RSBResults)), - ?assertEqual(expected_error(Config), - got_error(YZBResults)). - -%% make up random test data, to fight against accidental re-runs, and -%% put it in the test log so we know where to poke when things fail -generate_test_data(System) -> - Bucket = generate_bucket_name(System), - lager:info("~s bucket: ~s", [System, Bucket]), - - Common = generate_string(), - lager:info("~s common: ~s", [System, Common]), - - KeyAndUniques = [ {generate_string(), generate_string()}, - {generate_string(), generate_string()}, - {generate_string(), generate_string()} ], - [ lager:info("~s key/uq: ~s / ~s", [System, Key, Unique]) - || {Key, Unique} <- KeyAndUniques ], - - {Bucket, KeyAndUniques, Common}. - -%% setup riak_search hook -setup_rs_bucket([Node|_], Bucket) -> - lager:info("Setting up riak_search hook"), - C = rt:httpc(Node), - ok = rhc:set_bucket(C, Bucket, [{search, true}]). - -%% setup yokozuna hook/index - bucket name == index name -setup_yz_bucket([Node|_], Bucket, Index) -> - %% attach bucket to index - %% TODO: teach rhc_bucket:httpify_prop/2 `search_index' - BUrl = iburl(Node, ["/buckets/",Bucket,"/props"]), - BHeaders = [{"content-type", "application/json"}], - BProps = mochijson2:encode([{props, {struct, [{search_index, Index}]}}]), - {ok, "204", _, _} = ibrowse:send_req(BUrl, BHeaders, put, BProps). - -setup_yz_index([Node|_]=Cluster, Index) -> - %% create index - IUrl = iburl(Node, index_path(Index)), - {ok, "204", _, _} = ibrowse:send_req(IUrl, [], put), - wait_for_index(Cluster, Index). - -index_path(Index) -> - ["/search/index/",Index]. - -%% if we start writing data too soon, it won't be indexed, so wait -%% until solr has created the index -wait_for_index(Cluster, Index) -> - IsIndexUp = - fun(Node) -> - lager:info("Waiting for index ~s on node ~p", [Index, Node]), - IUrl = iburl(Node, index_path(Index)), - case ibrowse:send_req(IUrl, [], get) of - {ok, "200", _, _} -> true; - _ -> false - end - end, - [?assertEqual(ok, rt:wait_until(Node, IsIndexUp)) || Node <- Cluster]. - -%% ibrowse really wants a list of characters, not a binary, not an iolist -iburl(Node, Path) -> - binary_to_list(list_to_binary([rt:http_url(Node), Path])). - -%% Create a set of keys, all of which have a common term in their -%% value, and each of which has a unique term in its value -load_test_data([Node|_], Bucket, KeyAndUniques, Common) -> - lager:info("Loading test data"), - C = rt:httpc(Node), - [ begin - Value = list_to_binary([Common, " ", Unique]), - ok = rhc:put(C, riakc_obj:new(Bucket, Key, Value, "text/plain")) - end - || {Key, Unique} <- KeyAndUniques ]. - -expected_riak_search(Config) -> - is_enabled(Config, riak_search) - %% if yokozuna is also enabled, riak_search is the default - and ( (not is_enabled(Config, yokozuna)) - or (yokozuna =/= provider(Config)) ). - -expected_yokozuna(Config) -> - is_enabled(Config, yokozuna) - %% if riak_search is also enabled, must explicitly pick yokozuna - and ( (not is_enabled(Config, riak_search)) - or (yokozuna == provider(Config)) ). - -expected_error(Config) -> - %% must have at least one system on to get results - not ( is_enabled(Config, yokozuna) - or is_enabled(Config, riak_search) ). - -is_enabled(Config, App) -> - true == kvc:path([App, enabled], Config). - -provider(Config) -> - case kvc:path([riak_kv, ?PROVIDER_KEY], Config) of - [] -> undefined; - Provider -> Provider - end. - -%% We only check that we got at least one match, and that any matches -%% we did get are expected, and of the RS format. We don't check that -%% all known keys are present in the result because we're not -%% interested in whether RS is working with full correctness. -got_riak_search(Results, Bucket, KeyAndUniques) -> - case Results of - {ok, [{0, Matches}]} when Matches /= [] -> - IsRS = fun({{B, K},{struct,Props}}) when B == Bucket -> - lists:keymember(K, 1, KeyAndUniques) and - lists:keymember(p, 1, Props) and - lists:keymember(score, 1, Props); - (_) -> - false - end, - lists:all(IsRS, Matches); - _ -> - false - end. - -%% similar to got_riak_search - just check that we got at least one -%% result, and that all results are in the expected YZ format - this -%% test doesn't care if YZ is fulfilling its harvest/yield promises -got_yokozuna(Results, Bucket, KeyAndUniques) -> - case Results of - {ok, [{0, Matches}]} when Matches /= [] -> - IsYZ = fun({{{<<"default">>, B}, K}, {struct, []}}) when B == Bucket -> - lists:keymember(K, 1, KeyAndUniques); - (_) -> - false - end, - lager:info("got_yokozuna: ~p ... ~p", [Matches, KeyAndUniques]), - lists:all(IsYZ, Matches); - _ -> - false - end. - -%% we don't care what the error is right now, just that it is an error -%% TODO: actually get a good error message bubbled in these cases -got_error({error, _}) -> - true; -got_error(_) -> - false. - -run_bucket_mr([Node|_], Bucket, Common) -> - C = rt:pbc(Node), - riakc_pb_socket:mapred( - C, - %% TODO: check {search, Bucket, Common, Filter} - %% independently - {search, Bucket, Common}, - []). - -%% Prefix is included to make it easy to tell what was set up for -%% riak_search vs. yokozuna -generate_bucket_name(Prefix) -> - list_to_binary([Prefix,generate_string()]). - -generate_string() -> - %% stolen from riak_core_util:unique_id_62/0, but using 36 instead - %% so as not to have to copy riak_core_util:integer_to_list - Rand = crypto:hash(sha, term_to_binary({make_ref(), os:timestamp()})), - <> = Rand, - list_to_binary(integer_to_list(I, 36)). diff --git a/tests/mapred_verify_rt.erl b/tests/mapred_verify_rt.erl deleted file mode 100644 index c09a63e5b..000000000 --- a/tests/mapred_verify_rt.erl +++ /dev/null @@ -1,55 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Runs the mapred_verify tests from -%% http://github.com/basho/mapred_verify - --module(mapred_verify_rt). - --behavior(riak_test). --export([confirm/0]). - --define(NODE_COUNT, 3). - -confirm() -> - lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), - - - %% @todo longer term fix is probably one or more of: - %% 1) add a mapred_veryify section to riak_test.config - %% 2) learn to use this "inclextra" bit of rebar to package tests.def - %% in the escript: https://github.com/basho/rebar/blob/master/src/rebar_escripter.erl#L57 - PrivDir = case code:priv_dir(mapred_verify) of - {error, bad_name} -> - erlang:error("Could not determine priv dir for mapred_verify. Make sure that your riak_test.config contains \"deps\""); - PD -> PD - end, - MRVProps = [{node, hd(Nodes)}, - %% don't need 'path' because riak_test does that for us - {keycount, 1000}, - {bodysize, 1}, - {populate, true}, - {runjobs, true}, - {testdef, filename:join(PrivDir, "tests.def")}], - - lager:info("Run mapred_verify"), - 0 = mapred_verify:do_verification(MRVProps), - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. diff --git a/tests/overload_proxy.erl b/tests/overload_proxy.erl deleted file mode 100644 index 3dd939d53..000000000 --- a/tests/overload_proxy.erl +++ /dev/null @@ -1,157 +0,0 @@ -%%%------------------------------------------------------------------- -%%% @author Jon Anderson <> -%%% @copyright (C) 2014, Jon Anderson -%%% @doc -%%% -%%% @end -%%% Created : 18 Jul 2014 by Jon Anderson <> -%%%------------------------------------------------------------------- --module(overload_proxy). - --behaviour(gen_server). -%% API --export([start_link/0, increment_count/0, get_count/0, is_settled/1, stop/0]). -%% gen_server callbacks --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3]). - --define(SERVER, ?MODULE). - --record(state, { get_fsm_count, - last_msg_ts - }). - -%%%=================================================================== -%%% API -%%%=================================================================== - -%%-------------------------------------------------------------------- -%% @doc -%% Starts the server -%% -%% @spec start_link() -> {ok, Pid} | ignore | {error, Error} -%% @end -%%-------------------------------------------------------------------- -start_link() -> - gen_server:start_link({global, ?SERVER}, ?MODULE, [], []). - -increment_count() -> - gen_server:cast({global, ?SERVER}, increment_count, infinity). - -get_count() -> - gen_server:call({global, ?SERVER}, get_count, infinity). - -is_settled(ThresholdSecs) -> - gen_server:call({global, ?SERVER}, {is_settled, ThresholdSecs}, infinity). - -stop() -> - gen_server:cast({global, ?SERVER}, stop). -%%%=================================================================== -%%% gen_server callbacks -%%%=================================================================== - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Initializes the server -%% -%% @spec init(Args) -> {ok, State} | -%% {ok, State, Timeout} | -%% ignore | -%% {stop, Reason} -%% @end -%%-------------------------------------------------------------------- -init([]) -> - {ok, #state{get_fsm_count = 0}}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Handling call messages -%% -%% @spec handle_call(Request, From, State) -> -%% {reply, Reply, State} | -%% {reply, Reply, State, Timeout} | -%% {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, Reply, State} | -%% {stop, Reason, State} -%% @end -%%-------------------------------------------------------------------- -handle_call(get_count, _From, State) -> - Reply = State#state.get_fsm_count, - {reply, Reply, State}; -handle_call({is_settled, ThresholdSecs}, _From, State=#state{last_msg_ts=LastMsgTs}) -> - Now = moment(), - Reply = case process_info(self(), message_queue_len) of - {message_queue_len, 0} when (Now - LastMsgTs) > ThresholdSecs -> - true; - {message_queue_len, _} -> - false - end, - {reply, Reply, State}; -handle_call(_Request, _From, State) -> - Reply = ok, - {reply, Reply, State}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Handling cast messages -%% -%% @spec handle_cast(Msg, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% @end -%%-------------------------------------------------------------------- -handle_cast(increment_count, State) -> - NewState = State#state{get_fsm_count = State#state.get_fsm_count + 1}, - {noreply, NewState}; -handle_cast(stop, State) -> - {stop, normal, State}; -handle_cast(_Msg, State) -> - {noreply, State}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Handling all non call/cast messages -%% -%% @spec handle_info(Info, State) -> {noreply, State} | -%% {noreply, State, Timeout} | -%% {stop, Reason, State} -%% @end -%%-------------------------------------------------------------------- -handle_info(_Info, State) -> - {noreply, State}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% This function is called by a gen_server when it is about to -%% terminate. It should be the opposite of Module:init/1 and do any -%% necessary cleaning up. When it returns, the gen_server terminates -%% with Reason. The return value is ignored. -%% -%% @spec terminate(Reason, State) -> void() -%% @end -%%-------------------------------------------------------------------- -terminate(_Reason, _State) -> - ok. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Convert process state when code is changed -%% -%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} -%% @end -%%-------------------------------------------------------------------- -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -%%%=================================================================== -%%% Internal functions -%%%=================================================================== -moment() -> - calendar:datetime_to_gregorian_seconds(calendar:universal_time()). diff --git a/tests/overload_test.erl b/tests/overload_test.erl deleted file mode 100644 index 833df4bb0..000000000 --- a/tests/overload_test.erl +++ /dev/null @@ -1,501 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(overload_test). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --cover_modules([riak_kv_vnode, - riak_kv_ensemble_backend, - riak_core_vnode_proxy]). - --define(NUM_REQUESTS, 200). --define(THRESHOLD, 100). --define(LIST_KEYS_RETRIES, 1000). --define(GET_RETRIES, 1000). --define(BUCKET, <<"test">>). --define(KEY, <<"hotkey">>). - -confirm() -> - Nodes = setup(), - - NormalType = <<"normal_type">>, - ConsistentType = <<"consistent_type">>, - WriteOnceType = <<"write_once_type">>, - - ok = create_bucket_type(Nodes, NormalType, [{n_val, 3}]), - ok = create_bucket_type(Nodes, ConsistentType, [{consistent, true}, {n_val, 5}]), - ok = create_bucket_type(Nodes, WriteOnceType, [{write_once, true}, {n_val, 1}]), - rt:wait_until(ring_manager_check_fun(hd(Nodes))), - - BKV1 = {{NormalType, ?BUCKET}, ?KEY, <<"test">>}, - BKV2 = {{ConsistentType, ?BUCKET}, ?KEY, <<"test">>}, - BKV3 = {{WriteOnceType, ?BUCKET}, ?KEY, <<"test">>}, - - Tests = [test_no_overload_protection, - test_vnode_protection, - test_fsm_protection, - test_cover_queries_overload], - - [begin - lager:info("Starting Test ~p for ~p~n", [Test, BKV]), - ok = erlang:apply(?MODULE, Test, [Nodes, BKV, IsConsistent]) - end || Test <- Tests, - {BKV, IsConsistent} <- [{BKV1, false}, - {BKV2, true}, - {BKV3, false}]], - pass. - - -setup() -> - Config = [{riak_core, [{ring_creation_size, 8}, - {default_bucket_props, [{n_val, 5}]}, - {vnode_management_timer, 1000}, - {enable_health_checks, false}, - {enable_consensus, true}, - {vnode_overload_threshold, undefined}]}, - {riak_kv, [{fsm_limit, undefined}, - {storage_backend, riak_kv_memory_backend}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100}, - {anti_entropy_tick, 100}, - {anti_entropy, {on, []}}, - {anti_entropy_timeout, 5000}]}, - {riak_api, [{pb_backlog, 1024}]}], - ensemble_util:build_cluster(5, Config, 5). - -test_no_overload_protection(_Nodes, _BKV, true) -> - ok; -test_no_overload_protection(Nodes, BKV, ConsistentType) -> - lager:info("Testing with no overload protection"), - ProcFun = build_predicate_gte(test_no_overload_protection, ?NUM_REQUESTS, - "ProcFun", "Procs"), - QueueFun = build_predicate_gte(test_no_overload_protection, ?NUM_REQUESTS, - "QueueFun", "Queue Size"), - verify_test_results(run_test(Nodes, BKV), ConsistentType, ProcFun, QueueFun). - -verify_test_results({_NumProcs, QueueLen}, true, _, QueueFun) -> - ?assert(QueueFun(QueueLen)); -verify_test_results({NumProcs, QueueLen}, false, ProcFun, QueueFun) -> - ?assert(ProcFun(NumProcs)), - ?assert(QueueFun(QueueLen)). - -test_vnode_protection(Nodes, BKV, ConsistentType) -> - %% Setting check_interval to one ensures that process_info is called - %% to check the queue length on each vnode send. - %% This allows us to artificially raise vnode queue lengths with dummy - %% messages instead of having to go through the vnode path for coverage - %% query overload testing. - lager:info("Testing with vnode queue protection enabled"), - lager:info("Setting vnode overload threshold to ~b", [?THRESHOLD]), - lager:info("Setting vnode check interval to 1"), - Config = [{riak_core, [{vnode_overload_threshold, ?THRESHOLD}, - {vnode_check_interval, 1}]}], - rt:pmap(fun(Node) -> - rt:update_app_config(Node, Config) - end, Nodes), - ProcFun = build_predicate_lt(test_vnode_protection, (?NUM_REQUESTS+1), "ProcFun", "Procs"), - QueueFun = build_predicate_lt(test_vnode_protection, (?NUM_REQUESTS), "QueueFun", "QueueSize"), - verify_test_results(run_test(Nodes, BKV), ConsistentType, ProcFun, QueueFun), - - [Node1 | _] = Nodes, - CheckInterval = ?THRESHOLD div 2, - Dropped = read_until_success(Node1), - lager:info("Unnecessary dropped requests: ~b", [Dropped]), - ?assert(Dropped =< CheckInterval), - - Victim = get_victim(Node1, BKV), - - lager:info("Suspending vnode proxy for ~p", [Victim]), - Pid = suspend_vnode_proxy(Victim), - ProcFun2 = build_predicate_gte("test_vnode_protection after suspend", - (?NUM_REQUESTS), "ProcFun", "Procs"), - QueueFun2 = build_predicate_lt("test_vnode_protection after suspend", - (?NUM_REQUESTS), "QueueFun", "QueueSize"), - verify_test_results(run_test(Nodes, BKV), ConsistentType, ProcFun2, QueueFun2), - Pid ! resume, - ok. - -%% Don't check on fast path -test_fsm_protection(_, {{<<"write_once_type">>, _}, _, _}, _) -> - ok; -test_fsm_protection(Nodes, BKV, ConsistentType) -> - lager:info("Testing with coordinator protection enabled"), - lager:info("Setting FSM limit to ~b", [?THRESHOLD]), - Config = [{riak_kv, [{fsm_limit, ?THRESHOLD}]}], - rt:pmap(fun(Node) -> - rt:update_app_config(Node, Config) - end, Nodes), - ProcFun = build_predicate_lt(test_fsm_protection, (?NUM_REQUESTS), - "ProcFun", "Procs"), - QueueFun = build_predicate_lt(test_fsm_protection, (?NUM_REQUESTS), - "QueueFun", "QueueSize"), - verify_test_results(run_test(Nodes, BKV), ConsistentType, ProcFun, QueueFun), - ok. - -test_cover_queries_overload(_Nodes, _, true) -> - ok; -test_cover_queries_overload(Nodes, _, false) -> - lager:info("Testing cover queries with vnode queue protection enabled"), - lager:info("Setting vnode overload threshold to ~b", [?THRESHOLD]), - lager:info("Setting vnode check interval to 1"), - - Config = [{riak_core, [{vnode_overload_threshold, ?THRESHOLD}, - {vnode_request_check_interval, 2}, - {vnode_check_interval, 1}]}], - rt:pmap(fun(Node) -> - rt:update_app_config(Node, Config) - end, Nodes), - - [rt:wait_for_service(Node, riak_kv) || Node <- Nodes], - rt:load_modules_on_nodes([?MODULE], Nodes), - - [Node1, Node2, Node3, Node4, Node5] = Nodes, - Pids = [begin - lager:info("Suspending all kv vnodes on ~p", [N]), - suspend_and_overload_all_kv_vnodes(N) - end || N <- [Node2, Node3, Node4, Node5]], - - [?assertEqual({error, <<"mailbox_overload">>}, KeysRes) || - KeysRes <- [list_keys(Node1) || _ <- lists:seq(1, 3)]], - - lager:info("list_keys correctly handled overload"), - - [?assertEqual({error, mailbox_overload}, BucketsRes) || - BucketsRes <- [list_buckets(Node1) || _ <- lists:seq(1, 3)]], - lager:info("list_buckets correctly handled overload"), - - lager:info("Resuming all kv vnodes"), - [resume_all_vnodes(Pid) || Pid <- Pids], - - lager:info("Waiting for vnode queues to empty"), - wait_for_all_vnode_queues_empty(Node2). - -run_test(Nodes, BKV) -> - [Node1 | _RestNodes] = Nodes, - rt:wait_for_cluster_service(Nodes, riak_kv), - lager:info("Sleeping for 5s to let process count stablize"), - timer:sleep(5000), - rt:load_modules_on_nodes([?MODULE], Nodes), - overload_proxy:start_link(), - rt_intercept:add(Node1, {riak_kv_get_fsm, [{{start_link, 4}, count_start_link_4}]}), - - Victim = get_victim(Node1, BKV), - lager:info("Suspending vnode ~p/~p", - [element(1, Victim), element(2, Victim)]), - Suspended = suspend_vnode(Victim), - - NumProcs1 = overload_proxy:get_count(), - - lager:info("Initial process count on ~p: ~b", [Node1, NumProcs1]), - lager:info("Sending ~b read requests", [?NUM_REQUESTS]), - write_once(Node1, BKV), - Reads = spawn_reads(Node1, BKV, ?NUM_REQUESTS), - timer:sleep(5000), - - rt:wait_until(fun() -> - overload_proxy:is_settled(10) - end, 5, 500), - NumProcs2 = overload_proxy:get_count(), - lager:info("Final process count on ~p: ~b", [Node1, NumProcs2]), - - QueueLen = vnode_queue_len(Victim), - lager:info("Final vnode queue length for ~p: ~b", - [Victim, QueueLen]), - - resume_vnode(Suspended), - rt:wait_until(fun() -> - vnode_queue_len(Victim) =:= 0 - end), - kill_pids(Reads), - overload_proxy:stop(), - {NumProcs2 - NumProcs1, QueueLen}. - -get_victim(ExcludeNode, {Bucket, Key, _}) -> - Hash = riak_core_util:chash_std_keyfun({Bucket, Key}), - PL = lists:sublist(riak_core_ring:preflist(Hash, rt:get_ring(ExcludeNode)), 5), - hd([IdxNode || {_, Node}=IdxNode <- PL, Node /= ExcludeNode]). - -ring_manager_check_fun(Node) -> - fun() -> - case rpc:call(Node, riak_core_ring_manager, get_chash_bin, []) of - {ok, _R} -> - true; - _ -> - false - end - end. - -create_bucket_type(Nodes, Type, Props) -> - lager:info("Create bucket type ~p, wait for propagation", [Type]), - rt:create_and_activate_bucket_type(hd(Nodes), Type, Props), - rt:wait_until_bucket_type_status(Type, active, Nodes), - rt:wait_until_bucket_props(Nodes, {Type, <<"bucket">>}, Props), - ok. - -node_overload_check(Pid) -> - fun() -> - Pid ! {verify_overload, self()}, - receive - true -> - true; - _ -> - false - end - end. - -list_keys(Node) -> - Pid = rt:pbc(Node, [{auto_reconnect, true}, {queue_if_disconnected, true}]), - Res = riakc_pb_socket:list_keys(Pid, {<<"normal_type">>, ?BUCKET}, infinity), - riakc_pb_socket:stop(Pid), - Res. - -list_buckets(Node) -> - {ok, C} = riak:client_connect(Node), - riak_client:list_buckets(30000, C). - -wait_for_all_vnode_queues_empty(Node) -> - rt:wait_until(Node, fun(N) -> - vnode_queues_empty(N) - end). - -vnode_queues_empty(Node) -> - rpc:call(Node, ?MODULE, remote_vnode_queues_empty, []). - -remote_vnode_queues_empty() -> - lists:all(fun({_, _, Pid}) -> - {message_queue_len, Len} = - process_info(Pid, message_queue_len), - Len =:= 0 - end, riak_core_vnode_manager:all_vnodes()). - -write_once(Node, {Bucket, Key, Value}) -> - lager:info("Writing to node ~p", [Node]), - PBC = rt:pbc(Node, [{auto_reconnect, true}, {queue_if_disconnected, true}]), - rt:pbc_write(PBC, Bucket, Key, Value), - riakc_pb_socket:stop(PBC). - -read_until_success(Node) -> - {ok, C} = riak:client_connect(Node), - read_until_success(C, 0). - -read_until_success(C, Count) -> - case C:get(?BUCKET, ?KEY) of - {error, mailbox_overload} -> - read_until_success(C, Count+1); - _ -> - Count - end. - -spawn_reads(Node, {Bucket, Key, _}, Num) -> - [spawn(fun() -> - PBC = rt:pbc(Node, - [{auto_reconnect, true}, - {queue_if_disconnected, true}]), - rt:wait_until(pb_get_fun(PBC, Bucket, Key), ?GET_RETRIES, ?GET_RETRIES), - %pb_get(PBC, Bucket, Key), - riakc_pb_socket:stop(PBC) - end) || _ <- lists:seq(1, Num)]. - -pb_get_fun(PBC, Bucket, Key) -> - fun() -> - case riakc_pb_socket:get(PBC, Bucket, Key) of - {error, <<"overload">>} -> -% lager:info("overload detected in pb_get, continuing..."), - true; - {error, Type} -> - lager:error("riakc_pb_socket failed with ~p, retrying...", [Type]), - false; - {ok, _Res} -> -% lager:info("riakc_pb_socket:get(~p, ~p, ~p) succeeded, Res:~p", [PBC, Bucket, Key, Res]), - true - end - end. - -pb_get(PBC, Bucket, Key) -> - case riakc_pb_socket:get(PBC, Bucket, Key) of - {error, <<"overload">>} -> - lager:info("overload detected in pb_get, continuing..."); - {error, Type} -> - lager:error("riakc_pb_socket failed with ~p, retrying...", [Type]), - pb_get(PBC, Bucket, Key); - {ok, Res} -> - lager:info("riakc_pb_socket:get(~p, ~p, ~p) succeeded, Res:~p", [PBC, Bucket, Key, Res]) - end. - -kill_pids(Pids) -> - [exit(Pid, kill) || Pid <- Pids]. - -suspend_and_overload_all_kv_vnodes(Node) -> - lager:info("Suspending vnodes on ~p", [Node]), - Pid = rpc:call(Node, ?MODULE, remote_suspend_and_overload, []), - Pid ! {overload, self()}, - receive overloaded -> - Pid - end, - rt:wait_until(node_overload_check(Pid)), - Pid. - -remote_suspend_and_overload() -> - spawn(fun() -> - Vnodes = riak_core_vnode_manager:all_vnodes(), - [begin - lager:info("Suspending vnode pid: ~p~n", [Pid]), - erlang:suspend_process(Pid, []) - end || {riak_kv_vnode, _, Pid} <- Vnodes], - ?MODULE:wait_for_input(Vnodes) - end). - -wait_for_input(Vnodes) -> - receive - {overload, From} -> - [?MODULE:overload(Pid) || - {riak_kv_vnode, _, Pid} <- Vnodes], - From ! overloaded, - wait_for_input(Vnodes); - {verify_overload, From} -> - OverloadCheck = ?MODULE:verify_overload(Vnodes), - From ! OverloadCheck, - wait_for_input(Vnodes); - resume -> - lager:info("Resuming vnodes~n"), - [erlang:resume_process(Pid) || {riak_kv_vnode, _, Pid} - <- Vnodes] - end. - -verify_overload(Vnodes) -> - MessageLists = [element(2, process_info(Pid, messages)) || - {riak_kv_vnode, _, Pid} <- Vnodes], - OverloadMsgCounts = lists:foldl(fun overload_msg_counter/2, [], MessageLists), - lists:all(fun(X) -> X >= ?NUM_REQUESTS end, OverloadMsgCounts). - -overload_msg_counter(Messages, Acc) -> - Count = lists:foldl(fun count_overload_messages/2, 0, Messages), - [Count | Acc]. - -count_overload_messages(Message, Count) -> - case Message of - {set_concurrency_limit, some_lock, 1} -> - Count + 1; - _ -> - Count - end. - -overload(Pid) -> - %% The actual message doesn't matter. This one just has the least - %% side effects. - [Pid ! {set_concurrency_limit, some_lock, 1} || - _ <- lists:seq(1, ?NUM_REQUESTS)]. - -suspend_vnode({Idx, Node}) -> - suspend_vnode(Node, Idx). - -suspend_vnode(Node, Idx) -> - rpc:call(Node, ?MODULE, remote_suspend_vnode, [Idx], infinity). - -remote_suspend_vnode(Idx) -> - spawn(fun() -> - {ok, Pid} = riak_core_vnode_manager:get_vnode_pid(Idx, riak_kv_vnode), - lager:info("Suspending vnode pid: ~p", [Pid]), - erlang:suspend_process(Pid, []), - receive resume -> - erlang:resume_process(Pid) - end - end). - -suspend_vnode_proxy({Idx, Node}) -> - suspend_vnode_proxy(Node, Idx). - -suspend_vnode_proxy(Node, Idx) -> - rpc:call(Node, ?MODULE, remote_suspend_vnode_proxy, [Idx], infinity). - -remote_suspend_vnode_proxy(Idx) -> - spawn(fun() -> - Name = riak_core_vnode_proxy:reg_name(riak_kv_vnode, Idx), - Pid = whereis(Name), - erlang:suspend_process(Pid, []), - receive resume -> - erlang:resume_process(Pid) - end - end). - -resume_all_vnodes(Pid) -> - Pid ! resume. - -resume_vnode(Pid) -> - Pid ! resume. - -process_count(Node) -> - rpc:call(Node, erlang, system_info, [process_count]). - -vnode_queue_len({Idx, Node}) -> - vnode_queue_len(Node, Idx). - -vnode_queue_len(Node, Idx) -> - rpc:call(Node, ?MODULE, remote_vnode_queue, [Idx]). - -dropped_stat(Node) -> - Stats = rpc:call(Node, riak_core_stat, get_stats, []), - proplists:get_value(dropped_vnode_requests_total, Stats). - -get_fsm_active_stat(Node) -> - Stats = rpc:call(Node, riak_kv_stat, get_stats, []), - proplists:get_value(node_get_fsm_active, Stats). - -run_count(Node) -> - timer:sleep(500), - lager:info("fsm count:~p", [get_num_running_gen_fsm(Node)]), - run_count(Node). - -run_queue_len({Idx, Node}) -> - timer:sleep(500), - Len = vnode_queue_len(Node, Idx), - lager:info("queue len on ~p is:~p", [Node, Len]), - run_queue_len({Idx, Node}). - -get_num_running_gen_fsm(Node) -> - Procs = rpc:call(Node, erlang, processes, []), - ProcInfo = [ rpc:call(Node, erlang, process_info, [P]) || P <- Procs, P /= undefined ], - - InitCalls = [ [ proplists:get_value(initial_call, Proc) ] || Proc <- ProcInfo, Proc /= undefined ], - FsmList = [ proplists:lookup(riak_kv_get_fsm, Call) || Call <- InitCalls ], - length(proplists:lookup_all(riak_kv_get_fsm, FsmList)). - -remote_vnode_queue(Idx) -> - {ok, Pid} = riak_core_vnode_manager:get_vnode_pid(Idx, riak_kv_vnode), - {message_queue_len, Len} = process_info(Pid, message_queue_len), - Len. - -%% In tests that do not expect work to be shed, we want to confirm that -%% at least ?NUM_REQUESTS (processes|queue entries) are handled. -build_predicate_gte(Test, Metric, Label, ValueLabel) -> - fun (X) -> - lager:info("in test ~p ~p, ~p:~p, expected no overload, Metric:>=~p", - [Test, Label, ValueLabel, X, Metric]), - X >= Metric - end. -%% In tests that expect work to be shed due to overload, the success -%% condition is simply that the number of (fsms|queue entries) is -%% less than ?NUM_REQUESTS. -build_predicate_lt(Test, Metric, Label, ValueLabel) -> - fun (X) -> - lager:info("in test ~p ~p, ~p:~p, expected overload, Metric:<~p", - [Test, Label, ValueLabel, X, Metric]), - X < Metric - end. diff --git a/tests/partition_repair.erl b/tests/partition_repair.erl deleted file mode 100644 index 62ba5806c..000000000 --- a/tests/partition_repair.erl +++ /dev/null @@ -1,337 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(partition_repair). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --behavior(riak_test). --export([confirm/0]). - --define(FMT(S, L), lists:flatten(io_lib:format(S, L))). - -%% @doc This test verifies that partition repair successfully repairs -%% all data after it has wiped out by a simulated disk crash. -confirm() -> - SpamDir = rt_config:config_or_os_env(spam_dir), - RingSize = list_to_integer(rt_config:config_or_os_env(ring_size, "16")), - NVal = rt_config:config_or_os_env(n_val, undefined), - TestMetaData = riak_test_runner:metadata(), - KVBackend = proplists:get_value(backend, TestMetaData), - - NumNodes = rt_config:config_or_os_env(num_nodes, 4), - HOConcurrency = rt_config:config_or_os_env(ho_concurrency, 2), - {_KVBackendMod, KVDataDir} = backend_mod_dir(KVBackend), - Bucket = <<"scotts_spam">>, - - lager:info("Build a cluster"), - lager:info("riak_search enabled: true"), - lager:info("ring_creation_size: ~p", [RingSize]), - lager:info("n_val: ~p", [NVal]), - lager:info("num nodes: ~p", [NumNodes]), - lager:info("riak_core handoff_concurrency: ~p", [HOConcurrency]), - lager:info("riak_core vnode_management_timer 1000"), - Conf = [ - {riak_core, - [ - {ring_creation_size, RingSize}, - {handoff_manager_timeout, 1000}, - {vnode_management_timer, 1000}, - {handoff_concurrency, HOConcurrency} - ]}, - {riak_search, - [ - {enabled, true} - ]}, - %% @TODO This is only to test whether the test failure happens - %% without AAE. The AAE errors found in the logs could be unrelated - {riak_kv, - [ - {anti_entropy, {off, []}} - ] - } - %% {lager, - %% [{handlers, - %% [{lager_file_backend, - %% [{"./log/console.log",debug,10485760,"$D0",5}]}]}]} - ], - - Nodes = rt:build_cluster(NumNodes, Conf), - - case NVal of - undefined -> - ok; - _ -> - lager:info("Set n_val to ~p", [NVal]), - set_search_schema_nval(Bucket, NVal) - end, - - lager:info("Enable search hook"), - rt:enable_search_hook(hd(Nodes), Bucket), - - lager:info("Insert Scott's spam emails"), - Pbc = rt:pbc(hd(Nodes)), - rt:pbc_put_dir(Pbc, Bucket, SpamDir), - - lager:info("Stash ITFs for each partition"), - %% @todo Should riak_test guarantee that the scratch pad is clean instead? - ?assertCmd("rm -rf " ++ base_stash_path()), - %% need to load the module so riak can see the fold fun - rt:load_modules_on_nodes([?MODULE], Nodes), - Ring = rt:get_ring(hd(Nodes)), - Owners = riak_core_ring:all_owners(Ring), - [stash_data(riak_search, Owner) || Owner <- Owners], - - lager:info("Stash KV data for each partition"), - [stash_data(riak_kv, Owner) || Owner <- Owners], - - lager:info("Emulate data loss for riak_search, repair, verify correct data"), - [kill_repair_verify(Owner, "merge_index", riak_search) || Owner <- Owners], - - %% TODO: parameterize backend - lager:info("Emulate data loss for riak_kv, repair, verify correct data"), - [kill_repair_verify(Owner, KVDataDir, riak_kv) || Owner <- Owners], - - lager:info("TEST PASSED"), - pass. - -kill_repair_verify({Partition, Node}, DataSuffix, Service) -> - StashPath = stash_path(Service, Partition), - {ok, [Stash]} = file:consult(StashPath), - ExpectToVerify = dict:size(Stash), - VNodeName = list_to_atom(atom_to_list(Service) ++ "_vnode"), - - %% kill the partition data - Path = DataSuffix ++ "/" ++ integer_to_list(Partition), - lager:info("Killing data for ~p on ~p at ~s", [Partition, Node, Path]), - rt:clean_data_dir([Node], Path), - - %% force restart of vnode since some data is kept in memory - lager:info("Restarting ~p vnode for ~p on ~p", [Service, Partition, Node]), - {ok, Pid} = rpc:call(Node, riak_core_vnode_manager, get_vnode_pid, - [Partition, VNodeName]), - ?assert(rpc:call(Node, erlang, exit, [Pid, kill_for_test])), - - rt:wait_until(Node, fun(N) -> not(rpc:call(N, erlang, is_process_alive, [Pid])) end), - - lager:info("Verify data is missing"), - ?assertEqual(0, count_data(Service, {Partition, Node})), - - %% repair the partition, ignore return for now - lager:info("Invoking repair for ~p on ~p", [Partition, Node]), - %% TODO: Don't ignore return, check version of Riak and if greater - %% or equal to 1.x then expect OK. - Return = - case Service of - riak_kv -> - rpc:call(Node, riak_kv_vnode, repair, [Partition]); - riak_search -> - rpc:call(Node, riak_search_vnode, repair, [Partition]) - end, - - %% Kill sending vnode to verify HO sender is killed - %% {ok, [{KPart, KNode}|_]} = Return, - %% {ok, NewPid} = rpc:call(KNode, riak_core_vnode_manager, get_vnode_pid, - %% [KPart, VNodeName]), - %% lager:info("killing src pid: ~p/~p ~p", [KNode, KPart, NewPid]), - %% KR = rpc:call(KNode, erlang, exit, [NewPid, kill]), - %% lager:info("result of kill: ~p", [KR]), - %% timer:sleep(1000), - %% ?assertNot(rpc:call(KNode, erlang, is_process_alive, [NewPid])), - - - lager:info("return value of repair_index ~p", [Return]), - lager:info("Wait for repair to finish"), - wait_for_repair(Service, {Partition, Node}, 30), - - lager:info("Verify ~p on ~p is fully repaired", [Partition, Node]), - Data2 = get_data(Service, {Partition, Node}), - {Verified, NotFound} = dict:fold(verify(Service, Data2), {0, []}, Stash), - case NotFound of - [] -> ok; - _ -> - NF = StashPath ++ ".nofound", - lager:info("Some data not found, writing that to ~s", [NF]), - ?assertEqual(ok, file:write_file(NF, io_lib:format("~p.", [NotFound]))) - end, - %% NOTE: If the following assert fails then check the .notfound - %% file written above...it contains all postings that were in the - %% stash that weren't found after the repair. - ?assertEqual({Service, ExpectToVerify}, {Service, Verified}), - - {ok, [{BeforeP, _BeforeOwner}=B, _, {AfterP, _AfterOwner}=A]} = Return, - lager:info("Verify before src partition ~p still has data", [B]), - StashPathB = stash_path(Service, BeforeP), - {ok, [StashB]} = file:consult(StashPathB), - ExpectToVerifyB = dict:size(StashB), - BeforeData = get_data(Service, B), - {VerifiedB, NotFoundB} = dict:fold(verify(Service, BeforeData), {0, []}, StashB), - case NotFoundB of - [] -> ok; - _ -> - NFB = StashPathB ++ ".notfound", - ?assertEqual(ok, file:write_file(NFB, io_lib:format("~p.", [NotFoundB]))), - throw({src_partition_missing_data, NFB}) - end, - ?assertEqual(ExpectToVerifyB, VerifiedB), - - lager:info("Verify after src partition ~p still has data", [A]), - StashPathA = stash_path(Service, AfterP), - {ok, [StashA]} = file:consult(StashPathA), - ExpectToVerifyA = dict:size(StashA), - AfterData = get_data(Service, A), - {VerifiedA, NotFoundA} = dict:fold(verify(Service, AfterData), {0, []}, StashA), - case NotFoundA of - [] -> ok; - _ -> - NFA = StashPathA ++ ".notfound", - ?assertEqual(ok, file:write_file(NFA, io_lib:format("~p.", [NotFoundA]))), - throw({src_partition_missing_data, NFA}) - end, - ?assertEqual(ExpectToVerifyA, VerifiedA). - - -verify(riak_kv, DataAfterRepair) -> - fun(BKey, StashedValue, {Verified, NotFound}) -> - StashedData={BKey, StashedValue}, - case dict:find(BKey, DataAfterRepair) of - error -> {Verified, [StashedData|NotFound]}; - {ok, Value} -> - if Value == StashedValue -> {Verified+1, NotFound}; - true -> {Verified, [StashedData|NotFound]} - end - end - end; - -verify(riak_search, PostingsAfterRepair) -> - fun(IFT, StashedPostings, {Verified, NotFound}) -> - StashedPosting={IFT, StashedPostings}, - case dict:find(IFT, PostingsAfterRepair) of - error -> {Verified, [StashedPosting|NotFound]}; - {ok, RepairedPostings} -> - case lists:all(fun is_true/1, - [lists:member(P, RepairedPostings) - || P <- StashedPostings]) of - true -> {Verified+1, NotFound}; - false -> {Verified, [StashedPosting|NotFound]} - end - end - end. - -is_true(X) -> - X == true. - -count_data(Service, {Partition, Node}) -> - dict:size(get_data(Service, {Partition, Node})). - -get_data(Service, {Partition, Node}) -> - VMaster = - case Service of - riak_kv -> riak_kv_vnode_master; - riak_search -> riak_search_vnode_master - end, - %% TODO: add compile time support for riak_test - Req = - case Service of - riak_kv -> - {riak_core_fold_req_v1, fun stash_kv/3, dict:new()}; - riak_search -> - {riak_core_fold_req_v1, fun stash_search/3, dict:new()} - end, - Data = riak_core_vnode_master:sync_command({Partition, Node}, - Req, - VMaster, - rt_config:get(rt_max_wait_time)), - Data. - -stash_data(Service, {Partition, Node}) -> - File = stash_path(Service, Partition), - ?assertEqual(ok, filelib:ensure_dir(File)), - lager:info("Stashing ~p/~p at ~p to ~p", [Service, Partition, Node, File]), - Postings = get_data(Service, {Partition, Node}), - ?assertEqual(ok, file:write_file(File, io_lib:format("~p.", [Postings]))). - -stash_kv(Key, Value, Stash) -> - dict:store(Key, Value, Stash). - -stash_search({_I,{_F,_T}}=K, _Postings=V, Stash) -> - dict:append_list(K, V, Stash). - -base_stash_path() -> - rt_config:get(rt_scratch_dir) ++ "/dev/data_stash/". - -stash_path(Service, Partition) -> - base_stash_path() ++ atom_to_list(Service) ++ "/" ++ integer_to_list(Partition) ++ ".stash". - -file_list(Dir) -> - filelib:wildcard(Dir ++ "/*"). - -wait_for_repair(_, _, 0) -> - throw(wait_for_repair_max_tries); -wait_for_repair(Service, {Partition, Node}, Tries) -> - Reply = - case Service of - riak_kv -> - rpc:call(Node, riak_kv_vnode, repair_status, [Partition]); - riak_search -> - rpc:call(Node, riak_search_vnode, repair_status, [Partition]) - end, - case Reply of - not_found -> ok; - in_progress -> - timer:sleep(timer:seconds(1)), - wait_for_repair(Service, {Partition, Node}, Tries - 1) - end. - -data_path(Node, Suffix, Partition) -> - [Name, _] = string:tokens(atom_to_list(Node), "@"), - Base = rt_config:get('rtdev_path.current') ++ "/dev/" ++ Name ++ "/data", - Base ++ "/" ++ Suffix ++ "/" ++ integer_to_list(Partition). - -backend_mod_dir(undefined) -> - %% riak_test defaults to bitcask when undefined - backend_mod_dir(bitcask); -backend_mod_dir(bitcask) -> - {riak_kv_bitcask_backend, "bitcask"}; -backend_mod_dir(eleveldb) -> - {riak_kv_eleveldb_backend, "leveldb"}. - - --spec set_search_schema_nval(binary(), pos_integer()) -> ok. -set_search_schema_nval(Bucket, NVal) -> - %% TODO: Search currently offers no easy way to pragmatically - %% change a schema and save it. This is because the external and - %% internal formats of the schema are different. The parser reads - %% the external format and an internal representation is created - %% which is then stored/access via `riak_search_config'. Rather - %% than allowing the internal format to be modified and set you - %% must send the update in the external format. - BucketStr = binary_to_list(Bucket), - SearchCmd = ?FMT("~s/dev/dev1/bin/search-cmd", [rt_config:get('rtdev_path.current')]), - GetSchema = ?FMT("~s show-schema ~s > current-schema", - [SearchCmd, BucketStr]), - ModifyNVal = ?FMT("sed -E 's/n_val, [0-9]+/n_val, ~s/' " - "current-schema > new-schema", - [NVal]), - SetSchema = ?FMT("~s set-schema ~s new-schema", [SearchCmd, BucketStr]), - ClearCache = ?FMT("~s clear-schema-cache", [SearchCmd]), - ?assertCmd(GetSchema), - ?assertCmd(ModifyNVal), - ?assertCmd(SetSchema), - ?assertCmd(ClearCache). diff --git a/tests/pb_cipher_suites.erl b/tests/pb_cipher_suites.erl deleted file mode 100644 index 5618c89e2..000000000 --- a/tests/pb_cipher_suites.erl +++ /dev/null @@ -1,227 +0,0 @@ --module(pb_cipher_suites). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(assertDenied(Op), ?assertMatch({error, <<"Permission",_/binary>>}, Op)). - -confirm() -> - application:start(crypto), - application:start(asn1), - application:start(public_key), - application:start(ssl), - application:start(inets), - - CertDir = rt_config:get(rt_scratch_dir) ++ "/pb_cipher_suites_certs", - - %% make a bunch of crypto keys - make_certs:rootCA(CertDir, "rootCA"), - make_certs:intermediateCA(CertDir, "intCA", "rootCA"), - make_certs:intermediateCA(CertDir, "revokedCA", "rootCA"), - make_certs:endusers(CertDir, "intCA", ["site1.basho.com", "site2.basho.com"]), - make_certs:endusers(CertDir, "rootCA", ["site3.basho.com", "site4.basho.com", "site5.basho.com"]), - make_certs:enduser(CertDir, "revokedCA", "site6.basho.com"), - make_certs:revoke(CertDir, "rootCA", "site5.basho.com"), - make_certs:revoke(CertDir, "rootCA", "revokedCA"), - - %% start a HTTP server to serve the CRLs - %% - %% NB: we use the 'stand_alone' option to link the server to the - %% test process, so it exits when the test process exits. - {ok, _HTTPPid} = inets:start(httpd, [{port, 8000}, {server_name, "localhost"}, - {server_root, "/tmp"}, - {document_root, CertDir}, - {modules, [mod_get]}], stand_alone), - - lager:info("Deploy some nodes"), - Conf = [{riak_core, [ - {ssl, [ - {certfile, filename:join([CertDir,"site3.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site3.basho.com/key.pem"])}, - {cacertfile, filename:join([CertDir, "site3.basho.com/cacerts.pem"])} - ]} - ]}, - {riak_search, [ - {enabled, true} - ]} - ], - - Nodes = rt:build_cluster(4, Conf), - Node = hd(Nodes), - %% enable security on the cluster - ok = rpc:call(Node, riak_core_console, security_enable, [[]]), - - - [_, {pb, {"127.0.0.1", Port}}] = rt:connection_info(Node), - - lager:info("Creating user"), - %% grant the user credentials - ok = rpc:call(Node, riak_core_console, add_user, [["user", "password=password"]]), - - lager:info("Setting password mode on user"), - %% require password on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["user", "127.0.0.1/32", - "password"]]), - - CipherList = "AES256-SHA256:RC4-SHA", - %% set a simple default cipher list, one good one a and one shitty one - rpc:call(Node, riak_core_security, set_ciphers, - [CipherList]), - - [AES, RC4] = ParsedCiphers = [begin - %% this includes the pseudo random function, which apparently - %% we don't want - {A, B, C, _D} = ssl_cipher:suite_definition(E), - {A, B, C} - end || - E <- element(1, - riak_core_ssl_util:parse_ciphers(CipherList))], - - lager:info("Check that the server's preference for ECDHE-RSA-AES128-SHA256" - "is honored"), - ?assertEqual({ok, {'tlsv1.2', AES}}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{ciphers, - lists:reverse(ParsedCiphers)}]} - ])), - - lager:info("disabling honor_cipher_info"), - rpc:call(Node, application, set_env, [riak_api, honor_cipher_order, - false]), - - lager:info("Check that the client's preference for RC4-SHA" - "is honored"), - ?assertEqual({ok, {'tlsv1.2', RC4}}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{ciphers, - lists:reverse(ParsedCiphers)}]} - ])), - - lager:info("check that connections trying to use tls 1.1 fail"), - ?assertError({badmatch, _}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{versions, ['tlsv1.1']}]} - ])), - - lager:info("check that connections trying to use tls 1.0 fail"), - ?assertError({badmatch, _}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{versions, ['tlsv1']}]} - ])), - lager:info("check that connections trying to use ssl 3.0 fail"), - ?assertError({badmatch, _}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{versions, ['sslv3']}]} - ])), - - lager:info("Enable ssl 3.0, tls 1.0 and tls 1.1 and disable tls 1.2"), - rpc:call(Node, application, set_env, [riak_api, tls_protocols, - [sslv3, tlsv1, 'tlsv1.1']]), - - lager:info("check that connections trying to use tls 1.2 fail"), - ?assertError({badmatch, _}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{versions, ['tls1.2']}]} - ])), - - lager:info("check tls 1.1 works"), - ?assertMatch({ok, {'tlsv1.1', _}}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{versions, ['tlsv1.1']}]} - ])), - - lager:info("check tls 1.0 works"), - ?assertMatch({ok, {'tlsv1', _}}, - pb_connection_info(Port, - [{credentials, "user", - "password"}, {cacertfile, - filename:join([CertDir, - "rootCA/cert.pem"])}, - {ssl_opts, [{versions, ['tlsv1']}]} - ])), - - lager:info("Reset tls protocols back to the default"), - rpc:call(Node, application, set_env, [riak_api, tls_protocols, - ['tlsv1.2']]), - - lager:info("checking CRLs are checked for client certificates by" - " default"), - - ok = rpc:call(Node, riak_core_console, add_user, [["site5.basho.com"]]), - - %% require certificate auth on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["site5.basho.com", - "127.0.0.1/32", - "certificate"]]), - - lager:info("Checking revoked certificates are denied"), - ?assertMatch({error, {tcp, _Reason}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site5.basho.com", - "password"}, - {cacertfile, filename:join([CertDir, "rootCA/cert.pem"])}, - {certfile, filename:join([CertDir, "site5.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site5.basho.com/key.pem"])} - ])), - - lager:info("Disable CRL checking"), - rpc:call(Node, application, set_env, [riak_api, check_crl, - false]), - - lager:info("Checking revoked certificates are allowed"), - {ok, PB} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site5.basho.com", - ""}, - {cacertfile, filename:join([CertDir, "rootCA/cert.pem"])}, - {certfile, filename:join([CertDir, "site5.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site5.basho.com/key.pem"])} - ]), - ?assertEqual(pong, riakc_pb_socket:ping(PB)), - riakc_pb_socket:stop(PB), - pass. - -pb_get_socket(PB) -> - %% XXX this peeks into the pb_socket internal state and plucks out the - %% socket. If the internal representation ever changes, this will break. - element(6, sys:get_state(PB)). - -pb_connection_info(Port, Config) -> - {ok, PB} = riakc_pb_socket:start("127.0.0.1", Port, Config), - ?assertEqual(pong, riakc_pb_socket:ping(PB)), - - ConnInfo = ssl:connection_info(pb_get_socket(PB)), - - riakc_pb_socket:stop(PB), - ConnInfo. - - diff --git a/tests/pb_security.erl b/tests/pb_security.erl deleted file mode 100644 index 3b06d1d7a..000000000 --- a/tests/pb_security.erl +++ /dev/null @@ -1,819 +0,0 @@ --module(pb_security). - --behavior(riak_test). --export([confirm/0]). - --export([map_object_value/3, reduce_set_union/2, mapred_modfun_input/3]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(assertDenied(Op), ?assertMatch({error, <<"Permission",_/binary>>}, Op)). - -confirm() -> - application:start(crypto), - application:start(asn1), - application:start(public_key), - application:start(ssl), - application:start(inets), - - CertDir = rt_config:get(rt_scratch_dir) ++ "/pb_security_certs", - - %% make a bunch of crypto keys - make_certs:rootCA(CertDir, "rootCA"), - make_certs:intermediateCA(CertDir, "intCA", "rootCA"), - make_certs:intermediateCA(CertDir, "revokedCA", "rootCA"), - make_certs:endusers(CertDir, "intCA", ["site1.basho.com", "site2.basho.com"]), - make_certs:endusers(CertDir, "rootCA", ["site3.basho.com", "site4.basho.com", "site5.basho.com"]), - make_certs:enduser(CertDir, "revokedCA", "site6.basho.com"), - make_certs:revoke(CertDir, "rootCA", "site5.basho.com"), - make_certs:revoke(CertDir, "rootCA", "revokedCA"), - - %% use a leaf certificate as a CA certificate and make a totally bogus new leaf certificate - make_certs:create_ca_dir(CertDir, "site1.basho.com", make_certs:ca_cnf("site1.basho.com")), - file:copy(filename:join(CertDir, "site1.basho.com/key.pem"), filename:join(CertDir, "site1.basho.com/private/key.pem")), - make_certs:enduser(CertDir, "site1.basho.com", "site7.basho.com"), - file:copy(filename:join([CertDir, "site1.basho.com", "cacerts.pem"]), filename:join(CertDir, "site7.basho.com/cacerts.pem")), - {ok, Bin} = file:read_file(filename:join(CertDir, "site1.basho.com/cert.pem")), - {ok, FD} = file:open(filename:join(CertDir, "site7.basho.com/cacerts.pem"), [append]), - file:write(FD, ["\n", Bin]), - file:close(FD), - make_certs:gencrl(CertDir, "site1.basho.com"), - - %% start a HTTP server to serve the CRLs - %% - %% NB: we use the 'stand_alone' option to link the server to the - %% test process, so it exits when the test process exits. - {ok, _HTTPPid} = inets:start(httpd, [{port, 8000}, {server_name, "localhost"}, - {server_root, "/tmp"}, - {document_root, CertDir}, - {modules, [mod_get]}], stand_alone), - - lager:info("Deploy some nodes"), - PrivDir = rt:priv_dir(), - Conf = [ - {riak_core, [ - {default_bucket_props, [{allow_mult, true}]}, - {ssl, [ - {certfile, filename:join([CertDir,"site3.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site3.basho.com/key.pem"])}, - {cacertfile, filename:join([CertDir, "site3.basho.com/cacerts.pem"])} - ]} - ]}, - {riak_search, [ - {enabled, true} - ]} - ], - - MD = riak_test_runner:metadata(), - HaveIndexes = case proplists:get_value(backend, MD) of - undefined -> false; %% default is da 'cask - bitcask -> false; - _ -> true - end, - - Nodes = rt:build_cluster(4, Conf), - Node = hd(Nodes), - %% enable security on the cluster - ok = rpc:call(Node, riak_core_console, security_enable, [[]]), - - [_, {pb, {"127.0.0.1", Port}}] = rt:connection_info(Node), - - lager:info("Checking non-SSL results in error"), - %% can connect without credentials, but not do anything - {ok, PB0} = riakc_pb_socket:start("127.0.0.1", Port, - []), - ?assertEqual({error, <<"Security is enabled, please STARTTLS first">>}, - riakc_pb_socket:ping(PB0)), - - riakc_pb_socket:stop(PB0), - - %% Hindi in Devanagari : हिन्दी - Username = [2361,2367,2344,2381,2342,2368], - UsernameBin = unicode:characters_to_binary(Username, utf8, utf8), - - lager:info("Checking SSL requires peer cert validation"), - %% can't connect without specifying cacert to validate the server - ?assertMatch({error, _}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, UsernameBin, - "pass"}])), - - lager:info("Checking that authentication is required"), - %% invalid credentials should be invalid - ?assertEqual({error, {tcp, <<"Authentication failed">>}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, UsernameBin, - "pass"}, {cacertfile, - filename:join([CertDir, "rootCA/cert.pem"])}])), - - lager:info("Creating user"), - %% grant the user credentials - ok = rpc:call(Node, riak_core_console, add_user, [[Username, "password=password"]]), - - lager:info("Setting trust mode on user"), - %% trust 'user' on localhost - ok = rpc:call(Node, riak_core_console, add_source, [[Username, "127.0.0.1/32", - "trust"]]), - - lager:info("Checking that credentials are ignored in trust mode"), - %% invalid credentials should be ignored in trust mode - {ok, PB1} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, UsernameBin, - "pass"}, {cacertfile, - filename:join([CertDir, "rootCA/cert.pem"])}]), - ?assertEqual(pong, riakc_pb_socket:ping(PB1)), - riakc_pb_socket:stop(PB1), - - lager:info("Setting password mode on user"), - %% require password on localhost - ok = rpc:call(Node, riak_core_console, add_source, [[Username, "127.0.0.1/32", - "password"]]), - - lager:info("Checking that incorrect password fails auth"), - %% invalid credentials should be invalid - ?assertEqual({error, {tcp, <<"Authentication failed">>}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, UsernameBin, - "pass"}, {cacertfile, - filename:join([CertDir, "rootCA/cert.pem"])}])), - - lager:info("Checking that correct password is successful"), - %% valid credentials should be valid - {ok, PB2} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, UsernameBin, - "password"}, {cacertfile, - filename:join([CertDir, "rootCA/cert.pem"])}]), - ?assertEqual(pong, riakc_pb_socket:ping(PB2)), - riakc_pb_socket:stop(PB2), - - lager:info("Creating a certificate-authenticated user"), - %% grant the user credential - ok = rpc:call(Node, riak_core_console, add_user, [["site4.basho.com"]]), - - %% require certificate auth on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["site4.basho.com", - "127.0.0.1/32", - "certificate"]]), - - lager:info("Checking certificate authentication"), - %% valid credentials should be valid - {ok, PB3} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site4.basho.com", - "password"}, - {cacertfile, filename:join([CertDir, "site4.basho.com/cacerts.pem"])}, - {certfile, filename:join([CertDir, "site4.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site4.basho.com/key.pem"])} - ]), - ?assertEqual(pong, riakc_pb_socket:ping(PB3)), - riakc_pb_socket:stop(PB3), - - lager:info("Creating another cert-auth user"), - %% grant the user credential - ok = rpc:call(Node, riak_core_console, add_user, [["site5.basho.com"]]), - - %% require certificate auth on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["site5.basho.com", - "127.0.0.1/32", - "certificate"]]), - - lager:info("Checking auth with mismatched user/cert fails"), - %% authing with mismatched user should fail - ?assertEqual({error, {tcp, <<"Authentication failed">>}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site5.basho.com", - "password"}, - {cacertfile, filename:join([CertDir, "rootCA/cert.pem"])}, - {certfile, filename:join([CertDir, "site4.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site4.basho.com/key.pem"])} - ])), - - lager:info("Checking revoked certificates are denied"), - ?assertMatch({error, {tcp, _Reason}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site5.basho.com", - "password"}, - {cacertfile, filename:join([CertDir, "rootCA/cert.pem"])}, - {certfile, filename:join([CertDir, "site5.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site5.basho.com/key.pem"])} - ])), - - lager:info("Checking auth with non-peer certificate fails"), - %% authing with non-peer certificate should fail - ?assertMatch({error, {tcp, _Reason}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site5.basho.com", - "password"}, - {cacertfile, filename:join([PrivDir, - "certs/CA/rootCA/cert.pem"])}, - {certfile, filename:join([PrivDir, - "certs/cacert.org/ca-cert.pem"])}, - {keyfile, filename:join([PrivDir, - "certs/cacert.org/ca-key.pem"])} - ])), - - lager:info("cert from intermediate CA should work"), - %% grant the user credential - ok = rpc:call(Node, riak_core_console, add_user, [["site1.basho.com"]]), - - %% require certificate auth on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["site1.basho.com", - "127.0.0.1/32", - "certificate"]]), - - {ok, PB4} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site1.basho.com", "password"}, - {cacertfile, filename:join([CertDir, "site1.basho.com/cacerts.pem"])}, - {certfile, filename:join([CertDir, "site1.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site1.basho.com/key.pem"])} - ]), - - ?assertEqual(pong, riakc_pb_socket:ping(PB4)), - riakc_pb_socket:stop(PB4), - - lager:info("checking certificates from a revoked CA are denied"), - %% grant the user credential - ok = rpc:call(Node, riak_core_console, add_user, [["site6.basho.com"]]), - - %% require certificate auth on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["site6.basho.com", - "127.0.0.1/32", - "certificate"]]), - - ?assertMatch({error, {tcp, _Reason}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site6.basho.com", "password"}, - {cacertfile, filename:join([CertDir, "site6.basho.com/cacerts.pem"])}, - {certfile, filename:join([CertDir, "site6.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site6.basho.com/key.pem"])} - ])), - - lager:info("checking a certificate signed by a leaf CA is not honored"), - %% grant the user credential - ok = rpc:call(Node, riak_core_console, add_user, [["site7.basho.com"]]), - - %% require certificate auth on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["site7.basho.com", - "127.0.0.1/32", - "certificate"]]), - - ?assertMatch({error, {tcp, _Reason}}, riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "site7.basho.com", "password"}, - {cacertfile, filename:join([CertDir, "site7.basho.com/cacerts.pem"])}, - {certfile, filename:join([CertDir, "site7.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, "site7.basho.com/key.pem"])} - ])), - - %% time to actually do some stuff - {ok, PB} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, UsernameBin, "password"}, - {cacertfile, - filename:join([CertDir, "rootCA/cert.pem"])}]), - ?assertEqual(pong, riakc_pb_socket:ping(PB)), - - lager:info("verifying that user cannot get/put without grants"), - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - <<"hello">>, - <<"world">>)), - - lager:info("Granting riak_kv.get, checking get works but put doesn't"), - grant(Node, ["riak_kv.get", "on", "default", "hello", "to", Username]), - - ?assertMatch({error, notfound}, riakc_pb_socket:get(PB, <<"hello">>, - <<"world">>)), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:put(PB, - riakc_obj:new(<<"hello">>, <<"world">>, - <<"howareyou">>))), - - lager:info("Granting riak_kv.put, checking put works and roundtrips with get"), - grant(Node, ["riak_kv.put", "on", "default", "hello", "to", Username]), - - ?assertEqual(ok, - riakc_pb_socket:put(PB, - riakc_obj:new(<<"hello">>, <<"world">>, - <<"1">>, "application/json"))), - - ?assertMatch({ok, _Obj}, riakc_pb_socket:get(PB, <<"hello">>, - <<"world">>)), - - %% 1.4 counters - %% - grant(Node, ["riak_kv.put,riak_kv.get", "on", "default", "counters", "to", Username]), - %% ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.put,riak_kv.get", "on", - %% "default", "counters", "to", Username]]), - - - lager:info("Checking that counters work on resources that have get/put permitted"), - ?assertEqual({error, notfound}, riakc_pb_socket:counter_val(PB, - <<"counters">>, - <<"numberofpies">>)), - ok = riakc_pb_socket:counter_incr(PB, <<"counters">>, - <<"numberofpies">>, 5), - ?assertEqual({ok, 5}, riakc_pb_socket:counter_val(PB, <<"counters">>, - <<"numberofpies">>)), - - lager:info("Revoking get, checking that counter_val fails"), - %% revoke get - ok = rpc:call(Node, riak_core_console, revoke, - [["riak_kv.get", "on", "default", "counters", "from", Username]]), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:counter_val(PB, <<"counters">>, - <<"numberofpies">>)), - ok = riakc_pb_socket:counter_incr(PB, <<"counters">>, - <<"numberofpies">>, 5), - - lager:info("Revoking put, checking that counter_incr fails"), - %% revoke put - ok = rpc:call(Node, riak_core_console, revoke, - [["riak_kv.put", "on", "default", "counters", "from", Username]]), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:counter_incr(PB, <<"counters">>, - <<"numberofpies">>, 5)), - - - lager:info("Revoking get/put, checking that get/put are disallowed"), - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.get,riak_kv.put", "on", - "default", "hello", "from", Username]]), - - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - <<"hello">>, - <<"world">>)), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:put(PB, - riakc_obj:new(<<"hello">>, <<"world">>, - <<"howareyou">>))), - - %% try the 'any' grant - lager:info("Granting get on ANY, checking user can fetch any bucket/key"), - grant(Node, ["riak_kv.get", "on", "any", "to", Username]), - - ?assertMatch({ok, _Obj}, riakc_pb_socket:get(PB, <<"hello">>, - <<"world">>)), - - lager:info("Revoking ANY permission, checking fetch fails"), - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.get", "on", - "any", "from", Username]]), - - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - <<"hello">>, - <<"world">>)), - - %% list keys - lager:info("Checking that list keys is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:list_keys(PB, <<"hello">>)), - - lager:info("Granting riak_kv.list_keys, checking that list_keys succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_keys", "on", - "default", "hello", "to", Username]]), - - ?assertMatch({ok, [<<"world">>]}, riakc_pb_socket:list_keys(PB, <<"hello">>)), - - lager:info("Checking that list buckets is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:list_buckets(PB)), - - lager:info("Granting riak_kv.list_buckets, checking that list_buckets succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_buckets", "on", - "default", "to", Username]]), - - {ok, BList} = riakc_pb_socket:list_buckets(PB), - ?assertEqual([<<"counters">>, <<"hello">>], lists:sort(BList)), - - %% still need mapreduce permission - lager:info("Checking that full-bucket mapred is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:mapred_bucket(PB, <<"hello">>, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - lager:info("Granting mapreduce, checking that job succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.mapreduce", "on", - "default", "to", Username]]), - - ?assertEqual({ok, [{1, [1]}]}, - riakc_pb_socket:mapred_bucket(PB, <<"hello">>, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - lager:info("checking mapreduce with a whitelisted modfun works"), - ?assertEqual({ok, [{1, [<<"1">>]}]}, - riakc_pb_socket:mapred_bucket(PB, <<"hello">>, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, - undefined, true}])), - - %% load this module on all the nodes - ok = rt:load_modules_on_nodes([?MODULE], Nodes), - - lager:info("checking mapreduce with a insecure modfun input fails"), - ?assertMatch({error, <<"{inputs,{insecure_module_path",_/binary>>}, - riakc_pb_socket:mapred_bucket(PB, {modfun, ?MODULE, - mapred_modfun_input, []}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, - undefined, true}])), - - lager:info("checking mapreduce with a insecure modfun phase fails"), - ?assertMatch({error, <<"{query,{insecure_module_path",_/binary>>}, - riakc_pb_socket:mapred_bucket(PB, <<"hello">>, - [{map, {modfun, ?MODULE, - map_object_value}, undefined, false}, - {reduce, {modfun, - ?MODULE, - reduce_set_union}, - undefined, true}])), - - lager:info("whitelisting module path"), - {?MODULE, _ModBin, ModFile} = code:get_object_code(?MODULE), - ok = rpc:call(Node, application, set_env, [riak_kv, add_paths, [filename:dirname(ModFile)]]), - - lager:info("checking mapreduce with a insecure modfun input fails when" - " whitelisted but lacking permissions"), - ?assertMatch({error, <<"Permission",_/binary>>}, - riakc_pb_socket:mapred_bucket(PB, {modfun, ?MODULE, - mapred_modfun_input, []}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, - undefined, true}])), - - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.mapreduce", "on", - "any", "to", Username]]), - ?assertEqual({ok, [{1, [<<"1">>]}]}, - riakc_pb_socket:mapred_bucket(PB, {modfun, ?MODULE, - mapred_modfun_input, []}, - [{map, {modfun, riak_kv_mapreduce, - map_object_value}, undefined, false}, - {reduce, {modfun, - riak_kv_mapreduce, - reduce_set_union}, - undefined, true}])), - - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.mapreduce", "on", - "any", "from", Username]]), - - lager:info("checking mapreduce with a insecure modfun phase works when" - " whitelisted"), - ?assertEqual({ok, [{1, [<<"1">>]}]}, - riakc_pb_socket:mapred_bucket(PB, <<"hello">>, - [{map, {modfun, ?MODULE, - map_object_value}, undefined, false}, - {reduce, {modfun, - ?MODULE, - reduce_set_union}, - undefined, true}])), - - - - lager:info("link walking should fail with a deprecation error"), - ?assertMatch({error, _}, riakc_pb_socket:mapred(PB, [{<<"lists">>, <<"mine">>}], - [{link, <<"items">>, '_', true}])), - - %% revoke only the list_keys permission - lager:info("Revoking list-keys, checking that full-bucket mapred fails"), - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.list_keys", "on", - "default", "hello", "from", Username]]), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:mapred_bucket(PB, <<"hello">>, - [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false}, - {reduce, {jsfun, - <<"Riak.reduceSum">>}, - undefined, true}])), - - case HaveIndexes of - false -> ok; - true -> - %% 2i permission test - lager:info("Checking 2i is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:get_index(PB, <<"hello">>, - {binary_index, - "name"}, - <<"John">>)), - - lager:info("Granting 2i permissions, checking that results come back"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.index", "on", - "default", "to", Username]]), - - %% don't actually have any indexes - ?assertMatch({ok, ?INDEX_RESULTS{keys=[]}}, - riakc_pb_socket:get_index(PB, <<"hello">>, - {binary_index, - "name"}, - <<"John">>)), - ok - end, - - %% get/set bprops - lager:info("Checking that get_bucket is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:get_bucket(PB, <<"mybucket">>)), - - lager:info("Granting riak_core.get_bucket, checking that get_bucket succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_core.get_bucket", "on", - "default", "mybucket", "to", Username]]), - - ?assertEqual(3, proplists:get_value(n_val, element(2, - riakc_pb_socket:get_bucket(PB, - <<"mybucket">>)))), - - lager:info("Checking that set_bucket is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:set_bucket(PB, <<"mybucket">>, [{n_val, 5}])), - - lager:info("Granting set_bucket, checking that set_bucket succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_core.set_bucket", "on", - "default", "mybucket", "to", Username]]), - ?assertEqual(ok, - riakc_pb_socket:set_bucket(PB, <<"mybucket">>, [{n_val, 5}])), - - ?assertEqual(5, proplists:get_value(n_val, element(2, - riakc_pb_socket:get_bucket(PB, - <<"mybucket">>)))), - - %%%%%%%%%%%% - %%% bucket type tests - %%%%%%%%%%%% - - %% create a new type - rt:create_and_activate_bucket_type(Node, <<"mytype">>, [{n_val, 3}]), - rt:wait_until_bucket_type_status(<<"mytype">>, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, <<"mytype">>), - - lager:info("Checking that get on a new bucket type is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - {<<"mytype">>, - <<"hello">>}, - <<"world">>)), - - lager:info("Granting get on the new bucket type, checking that it succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.get", "on", - "mytype", "hello", "to", Username]]), - - ?assertMatch({error, notfound}, riakc_pb_socket:get(PB, {<<"mytype">>, - <<"hello">>}, - <<"world">>)), - - lager:info("Checking that permisisons are unchanged on the default bucket type"), - %% can't read from the default bucket, though - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - <<"hello">>, - <<"world">>)), - - lager:info("Checking that put on the new bucket type is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype">>, <<"hello">>}, <<"world">>, - <<"howareyou">>))), - - lager:info("Granting put on a bucket in the new bucket type, checking that it succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.put", "on", - "mytype", "hello", "to", Username]]), - - ?assertEqual(ok, - riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype">>, <<"hello">>}, <<"world">>, - <<"howareyou">>))), - - ?assertEqual(ok, - riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype">>, - <<"hello">>}, <<"drnick">>, - <<"Hi, everybody">>))), - - ?assertMatch({ok, _Obj}, riakc_pb_socket:get(PB, {<<"mytype">>, - <<"hello">>}, - <<"world">>)), - - lager:info("Revoking get/put on the new bucket type, checking that they fail"), - ok = rpc:call(Node, riak_core_console, revoke, [["riak_kv.get,riak_kv.put", "on", - "mytype", "hello", "from", Username]]), - - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - {<<"mytype">>, - <<"hello">>}, - <<"world">>)), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype">>, <<"hello">>}, <<"world">>, - <<"howareyou">>))), - - lager:info("Checking that list keys is disallowed on the new bucket type"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:list_keys(PB, {<<"mytype">>, <<"hello">>})), - - lager:info("Granting list keys on a bucket in the new type, checking that it works"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_keys", "on", - "mytype", "hello", "to", Username]]), - - ?assertEqual([<<"drnick">>, <<"world">>], lists:sort(element(2, riakc_pb_socket:list_keys(PB, - {<<"mytype">>, - <<"hello">>})))), - - lager:info("Creating another bucket type"), - %% create a new type - rt:create_and_activate_bucket_type(Node, <<"mytype2">>, [{allow_mult, true}]), - rt:wait_until_bucket_type_status(<<"mytype2">>, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, <<"mytype2">>), - - lager:info("Checking that get on the new type is disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, - {<<"mytype2">>, - <<"hello">>}, - <<"world">>)), - - lager:info("Granting get/put on all buckets in the new type, checking that get/put works"), - %% do a wildcard grant - ok = rpc:call(Node, riak_core_console, grant, - [["riak_kv.get,riak_kv.put", "on", - "mytype2", "to", Username]]), - - ?assertMatch({error, notfound}, riakc_pb_socket:get(PB, {<<"mytype2">>, - <<"hello">>}, - <<"world">>)), - - ?assertEqual(ok, - riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype2">>, - <<"embiggen">>}, - <<"the noblest heart">>, - <<"true">>))), - - ?assertEqual(ok, - riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype2">>, - <<"cromulent">>}, - <<"perfectly">>, - <<"true">>))), - - lager:info("Checking that list buckets is disallowed on the new type"), - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:list_buckets(PB, <<"mytype2">>)), - - lager:info("Granting list buckets on the new type, checking that it succeeds"), - ok = rpc:call(Node, riak_core_console, grant, [["riak_kv.list_buckets", "on", - "mytype2", "to", Username]]), - - ?assertMatch([<<"cromulent">>, <<"embiggen">>], lists:sort(element(2, - riakc_pb_socket:list_buckets(PB, - <<"mytype2">>)))), - - - %% get/set bucket type props - - lager:info("Checking that get/set bucket-type properties are disallowed"), - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:set_bucket_type(PB, <<"mytype2">>, - [{n_val, 5}])), - - ?assertMatch({error, <<"Permission", _/binary>>}, - riakc_pb_socket:get_bucket_type(PB, <<"mytype2">>)), - - lager:info("Granting get on bucket type props, checking it succeeds and put still fails"), - ok = rpc:call(Node, riak_core_console, grant, - [["riak_core.get_bucket_type", "on", "mytype2", "to", Username]]), - - ?assertEqual(3, proplists:get_value(n_val, - element(2, riakc_pb_socket:get_bucket_type(PB, - <<"mytype2">>)))), - - ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:set_bucket_type(PB, <<"mytype2">>, - [{n_val, 5}])), - - lager:info("Granting set on bucket type props, checking it succeeds"), - ok = rpc:call(Node, riak_core_console, grant, - [["riak_core.set_bucket_type", "on", "mytype2", "to", Username]]), - - riakc_pb_socket:set_bucket_type(PB, <<"mytype2">>, [{n_val, 5}]), - - ?assertEqual(5, proplists:get_value(n_val, - element(2, riakc_pb_socket:get_bucket_type(PB, - <<"mytype2">>)))), - - riakc_pb_socket:set_bucket_type(PB, <<"mytype2">>, [{n_val, 3}]), - - crdt_tests(Nodes, PB), - - riakc_pb_socket:stop(PB), - - group_test(Node, Port, CertDir). - -group_test(Node, Port, CertDir) -> - %%%%%%%%%%%%%%%% - %% test groups - %%%%%%%%%%%%%%%% - - lager:info("Creating a new group"), - %% create a new group - ok = rpc:call(Node, riak_core_console, add_group, [["group"]]), - - lager:info("Creating a user in the group"), - %% create a new user in that group - ok = rpc:call(Node, riak_core_console, add_user, [["myuser", "groups=group"]]), - - - lager:info("Granting get/put/delete on a bucket type to the group, checking those requests work"), - - %% do a wildcard grant - grant(Node,["riak_kv.get,riak_kv.put,riak_kv.delete", "on", "mytype2", - "to", "group"]), - - %% trust 'myuser' on localhost - ok = rpc:call(Node, riak_core_console, add_source, [["myuser", "127.0.0.1/32", - "trust"]]), - - {ok, PB} = riakc_pb_socket:start("127.0.0.1", Port, - [{credentials, "myuser", "password"}, - {cacertfile, - filename:join([CertDir, "rootCA/cert.pem"])}]), - - ?assertMatch({error, notfound}, (riakc_pb_socket:get(PB, {<<"mytype2">>, - <<"hello">>}, - <<"world">>))), - ?assertEqual(ok, - (riakc_pb_socket:put(PB, - riakc_obj:new({<<"mytype2">>, <<"hello">>}, <<"world">>, - <<"howareyou">>)))), - - {ok, Obj} = riakc_pb_socket:get(PB, {<<"mytype2">>, - <<"hello">>}, - <<"world">>), - riakc_pb_socket:delete_obj(PB, Obj), - - ?assertMatch({error, notfound}, (riakc_pb_socket:get(PB, {<<"mytype2">>, - <<"hello">>}, - <<"world">>))), - - lager:info("riak search should not be running with security enabled"), - ?assertMatch({error, <<"Riak Search 1.0 is deprecated", _/binary>>}, - riakc_pb_socket:search(PB, <<"index">>, <<"foo:bar">>)), - - riakc_pb_socket:stop(PB), - pass. - -grant(Node, Args) -> - ok = rpc:call(Node, riak_core_console, grant, [Args]). - -crdt_tests([Node|_]=Nodes, PB) -> - Username = [2361,2367,2344,2381,2342,2368], - - %% rt:create_and_activate - lager:info("Creating bucket types for CRDTs"), - - Types = [{<<"counters">>, counter, riakc_counter:to_op(riakc_counter:increment(5, riakc_counter:new()))}, - {<<"sets">>, set, riakc_set:to_op(riakc_set:add_element(<<"foo">>, riakc_set:new()))}, - {<<"maps">>, map, riakc_map:to_op(riakc_map:update({<<"bar">>, counter}, fun(In) -> riakc_counter:increment(In) end, riakc_map:new()))}], - [ begin - rt:create_and_activate_bucket_type(Node, BType, [{allow_mult, true}, {datatype, DType}]), - rt:wait_until_bucket_type_status(BType, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, BType) - end || {BType, DType, _Op} <- Types ], - - lager:info("Checking that CRDT fetch is denied"), - - [ ?assertDenied(riakc_pb_socket:fetch_type(PB, {BType, <<"bucket">>}, <<"key">>)) - || {BType, _, _} <- Types ], - - lager:info("Granting CRDT riak_kv.get, checking that fetches succeed"), - - [ grant(Node, ["riak_kv.get", "on", binary_to_list(Type), "to", Username]) || {Type, _, _} <- Types ], - - [ ?assertEqual({error, {notfound, DType}}, - riakc_pb_socket:fetch_type(PB, {BType, <<"bucket">>}, <<"key">>)) || - {BType, DType, _} <- Types ], - - lager:info("Checking that CRDT update is denied"), - - [ ?assertDenied(riakc_pb_socket:update_type(PB, {BType, <<"bucket">>}, <<"key">>, Op)) - || {BType, _, Op} <- Types ], - - - lager:info("Granting CRDT riak_kv.put, checking that updates succeed"), - - [ grant(Node, ["riak_kv.put", "on", binary_to_list(Type), "to", Username]) || {Type, _, _} <- Types ], - - [ ?assertEqual(ok, riakc_pb_socket:update_type(PB, {BType, <<"bucket">>}, <<"key">>, Op)) - || {BType, _, Op} <- Types ], - - ok. - -map_object_value(RiakObject, A, B) -> - riak_kv_mapreduce:map_object_value(RiakObject, A, B). - -reduce_set_union(List, A) -> - riak_kv_mapreduce:reduce_set_union(List, A). - -mapred_modfun_input(Pipe, _Args, _Timeout) -> - riak_pipe:queue_work(Pipe, {{<<"hello">>, <<"world">>}, {struct, []}}), - riak_pipe:eoi(Pipe). diff --git a/tests/pipe_verify_basics.erl b/tests/pipe_verify_basics.erl deleted file mode 100644 index 67ea4f5e7..000000000 --- a/tests/pipe_verify_basics.erl +++ /dev/null @@ -1,138 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some basic things about riak_pipe. -%% -%% Important: this test loads this module on each Riak node, such that -%% it can reference its functions in pipe workers. -%% -%% These tests used to be known as riak_pipe:basic_test_/0. - --module(pipe_verify_basics). - --export([ - %% riak_test's entry - confirm/0 - ]). - --include_lib("eunit/include/eunit.hrl"). - --define(NODE_COUNT, 3). - -%% local copy of riak_pipe.hrl --include("rt_pipe.hrl"). - -confirm() -> - lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), - - [rt:wait_for_service(Node, riak_pipe) || Node <- Nodes], - - rt:load_modules_on_nodes([?MODULE], Nodes), - - verify_order(Nodes), - verify_trace_filtering(Nodes), - verify_recursive_countdown_1(Nodes), - verify_recursive_countdown_2(Nodes), - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -%% @doc generic driver used as a riak_pipe:generic_transform -%% argument. Sends the input '1', then sends eoi. -order_fun(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 1), - riak_pipe:eoi(Pipe). - -%% @doc generic driver used as a riak_pipe:generic_transform -%% argument. Causes a fitting to pass as output, its input multiplied -%% by two. -mult_by_2(X) -> - 2 * X. - -verify_order([RN|_]) -> - lager:info("Verifying fittings operate in order"), - - AllLog = [{log, sink}, {trace, all}], - {eoi, Res, Trace} = - rpc:call(RN, riak_pipe, generic_transform, - [fun mult_by_2/1, fun order_fun/1, AllLog, 5]), - - ?assertMatch([{_, 32}], Res), - ?assertEqual(0, length(rt_pipe:extract_trace_errors(Trace))), - Qed = rt_pipe:extract_queued(Trace), - %% NOTE: The msg to the sink doesn't appear in Trace - ?assertEqual([1,2,4,8,16], [X || {_, X} <- Qed]). - -verify_trace_filtering([RN|_]) -> - lager:info("Verify that trace messages are filtered"), - {eoi, _Res, Trace1} = - rpc:call(RN, riak_pipe, generic_transform, - [fun mult_by_2/1, fun order_fun/1, - [{log,sink}, {trace, [eoi]}], 5]), - {eoi, _Res, Trace2} = - rpc:call(RN, riak_pipe, generic_transform, - [fun mult_by_2/1, fun order_fun/1, - [{log,sink}, {trace, all}], 5]), - %% Looking too deeply into the format of the trace - %% messages, since they haven't gelled yet, is madness. - ?assert(length(Trace1) < length(Trace2)). - -verify_recursive_countdown_1([RN|_]) -> - lager:info("Verify recurse_input"), - Spec = [#fitting_spec{name=counter, - module=riak_pipe_w_rec_countdown}], - Opts = [{sink, rt_pipe:self_sink()}], - {ok, Pipe} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, 3]), - riak_pipe:eoi(Pipe), - {eoi, Res, []} = riak_pipe:collect_results(Pipe), - ?assertEqual([{counter,0},{counter,1},{counter,2},{counter,3}], Res). - -verify_recursive_countdown_2([RN|_]) -> - lager:info("Verify nondeterministic recurse_input"), - verify_recursive_countdown_2(RN, 10). - -verify_recursive_countdown_2(RN, Retries) when Retries > 0 -> - Spec = [#fitting_spec{name=counter, - module=riak_pipe_w_rec_countdown, - arg=testeoi}], - Options = [{sink, rt_pipe:self_sink()},{trace,[restart]},{log,sink}], - {ok, Pipe} = rpc:call(RN, riak_pipe, exec, [Spec, Options]), - ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, 3]), - riak_pipe:eoi(Pipe), - {eoi, Res, Trc} = riak_pipe:collect_results(Pipe), - ?assertEqual([{counter,0},{counter,0},{counter,0}, - {counter,1},{counter,2},{counter,3}], - Res), - case Trc of - [{counter,{trace,[restart],{vnode,{restart,_}}}}] -> - ok; - [] -> - lager:info("recursive countdown test #2 did not" - " trigger the done/eoi race it tests." - " Retries left: ~b", [Retries-1]), - verify_recursive_countdown_2(RN, Retries-1) - end; -verify_recursive_countdown_2(_, _) -> - lager:warning("recursive countdown test #2 did not" - " trigger the done/eoi race it tests." - " Consider re-running."). diff --git a/tests/pipe_verify_examples.erl b/tests/pipe_verify_examples.erl deleted file mode 100644 index 79aeb07df..000000000 --- a/tests/pipe_verify_examples.erl +++ /dev/null @@ -1,60 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify that the riak_pipe example use functions work. - --module(pipe_verify_examples). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(NODE_COUNT, 3). - -confirm() -> - lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), - - [rt:wait_for_service(Node, riak_pipe) || Node <- Nodes], - - verify_example(Nodes), - verify_example_transform(Nodes), - verify_example_reduce(Nodes), - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -verify_example([RN|_]) -> - lager:info("Run riak_pipe:example/0"), - ?assertMatch({eoi, [{empty_pass, "hello"}], _Trc}, - rpc:call(RN, riak_pipe, example, [])). - -verify_example_transform([RN|_]) -> - lager:info("Run riak_pipe:example_transform/0"), - ?assertEqual({eoi, [{"generic transform", 55}], []}, - rpc:call(RN, riak_pipe, example_transform, [])). - -verify_example_reduce([RN|_]) -> - lager:info("Run riak_pipe:example_reduce/0"), - {eoi, Res, []} = rpc:call(RN, riak_pipe, example_reduce, []), - ?assertEqual([{"sum reduce", {a, [55]}}, - {"sum reduce", {b, [155]}}], - lists:sort(Res)). diff --git a/tests/pipe_verify_exceptions.erl b/tests/pipe_verify_exceptions.erl deleted file mode 100644 index fd0b42c59..000000000 --- a/tests/pipe_verify_exceptions.erl +++ /dev/null @@ -1,545 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify some exceptional cases in riak_pipe. -%% -%% Important: this test loads this module and {@link rt_pipe} on each -%% Riak node, such that it can reference their functions in pipe -%% workers. -%% -%% These tests used to be known as riak_pipe:exception_test_/0. - --module(pipe_verify_exceptions). - --export([ - %% riak_test's entry - confirm/0 - ]). - --include_lib("eunit/include/eunit.hrl"). - -%% local copy of riak_pipe.hrl --include("rt_pipe.hrl"). - --define(NODE_COUNT, 3). - --define(ERR_LOG, [{log, sink}, {trace, [error]}]). --define(ALL_LOG, [{log, sink}, {trace, all}]). - -%% @doc riak_test callback -confirm() -> - lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), - - [rt:wait_for_service(Node, riak_pipe) || Node <- Nodes], - - rt:load_modules_on_nodes([?MODULE, rt_pipe], Nodes), - - verify_xbad1(Nodes), - verify_xbad2(Nodes), - verify_tail_worker_crash(Nodes), - verify_vnode_crash(Nodes), - verify_head_fitting_crash(Nodes), - verify_middle_fitting_normal(Nodes), - verify_middle_fitting_crash(Nodes), - verify_tail_fitting_crash(Nodes), - verify_worker_init_exit(Nodes), - verify_worker_init_badreturn(Nodes), - verify_worker_limit_one(Nodes), - verify_worker_limit_multiple(Nodes), - verify_under_worker_limit_one(Nodes), - verify_queue_limit(Nodes), - verify_vnode_death(Nodes), - verify_restart_after_eoi(Nodes), - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -%%% TESTS - -xbad1(Pipe) -> - ok = riak_pipe:queue_work(Pipe, [1, 2, 3]), - ok = riak_pipe:queue_work(Pipe, [4, 5, 6]), - ok = riak_pipe:queue_work(Pipe, [7, 8, bummer]), - ok = riak_pipe:queue_work(Pipe, [10, 11, 12]), - riak_pipe:eoi(Pipe). - -verify_xbad1([RN|_]) -> - lager:info("Verify correct error message from worker (xbad1)"), - - {eoi, Res, Trace} = - rpc:call(RN, riak_pipe, generic_transform, - [fun lists:sum/1, fun xbad1/1, ?ERR_LOG, 1]), - - %% three of the workers will succeed (the ones that receive only - %% integers in their lists) - ?assertMatch([{_, 6}, {_, 15}, {_, 33}], lists:sort(Res)), - - %% the one that received the atom 'bummer' will fail with a - %% 'badarith' error - [{_, {trace, [error], {error, Ps}}}] = Trace, - ?assertEqual(error, proplists:get_value(type, Ps)), - ?assertEqual(badarith, proplists:get_value(error, Ps)), - ?assertEqual([7, 8, bummer], proplists:get_value(input, Ps)). - -xbad2(Pipe) -> - [ok = riak_pipe:queue_work(Pipe, N) || N <- lists:seq(0,2)], - ok = riak_pipe:queue_work(Pipe, 500), - exit({success_so_far, riak_pipe:collect_results(Pipe, 100)}). - -verify_xbad2([RN|_]) -> - lager:info("Verify work done before crashing without eoi (xbad2)"), - - %% we get a badrpc because the code exits, but it includes the - %% test data we want - {badrpc, {'EXIT', {success_so_far, {timeout, Res, Trace}}}} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, fun xbad2/1, ?ERR_LOG, 3]), - - %% 3 fittings, send 0, 1, 2, 500; crash before eoi - ?assertMatch([{_, 497}], Res), - ?assertEqual(3, length(rt_pipe:extract_trace_errors(Trace))). - -tail_worker_crash(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 100), - timer:sleep(100), - ok = riak_pipe:queue_work(Pipe, 1), - riak_pipe:eoi(Pipe). - -verify_tail_worker_crash([RN|_]) -> - lager:info("Verify work done before tail worker crash"), - - {eoi, Res, Trace} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, - fun tail_worker_crash/1, - ?ERR_LOG, - 2]), - - %% first input is 100 - %% first worker decrements & passes on 99 - %% second worker decrements & passes on 98 - ?assertMatch([{_, 98}], Res), - - %% second input is 1 - %% first worker decrements & passes on 0 - %% second worker decrements & explodes - ?assertEqual(1, length(rt_pipe:extract_trace_errors(Trace))). - -vnode_crash(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 100), - %% give time for input to be processed - timer:sleep(100), - rt_pipe:kill_all_pipe_vnodes(), - %% give time for vnodes to actually die - timer:sleep(100), - riak_pipe:eoi(Pipe). - -verify_vnode_crash([RN|_]) -> - lager:info("Verify eoi still flows through after vnodes crash"), - {eoi, Res, Trace} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, - fun vnode_crash/1, - ?ERR_LOG, - 2]), - ?assertMatch([{_, 98}], Res), - ?assertEqual(0, length(rt_pipe:extract_trace_errors(Trace))). - -head_fitting_crash(Pipe) -> - ok = riak_pipe:queue_work(Pipe, [1, 2, 3]), - [{_, Head}|_] = Pipe#pipe.fittings, - rt_pipe:crash_fitting(Head), - {error, [worker_startup_failed]} = - riak_pipe:queue_work(Pipe, [4, 5, 6]), - %% Again, just for fun ... still fails - {error, [worker_startup_failed]} = - riak_pipe:queue_work(Pipe, [4, 5, 6]), - exit({success_so_far, riak_pipe:collect_results(Pipe, 100)}). - -verify_head_fitting_crash([RN|_]) -> - lager:info("Verify errors during head fitting crash"), - - %% we get a badrpc because the code exits, but it includes the - %% test data we want - {badrpc, {'EXIT', {success_so_far, {timeout, Res, Trace}}}} = - rpc:call(RN, riak_pipe, generic_transform, - [fun lists:sum/1, fun head_fitting_crash/1, ?ERR_LOG, 1]), - - %% the first input, [1,2,3], gets through - ?assertMatch([{_, 6}], Res), - %% but we get an exit trace, and no more outputs afterward - ?assertEqual(1, length(rt_pipe:extract_fitting_died_errors(Trace))). - -middle_fitting_normal(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 20), - timer:sleep(100), - FittingPids = [ P || {_, #fitting{pid=P}} <- Pipe#pipe.fittings], - - %% exercise riak_pipe_fitting:workers/1. There's a single worker - %% on vnode 0, because riak_pipe:generic_transform uses - %% chash=riak_pipe:zero_fun - {ok,[0]} = riak_pipe_fitting:workers(hd(FittingPids)), - - %% send fitting bogus messages - fitting should ignore because - %% they're not known - gen_fsm:send_event(hd(FittingPids), bogus_message), - {error, unknown} = - gen_fsm:sync_send_event(hd(FittingPids), bogus_message), - gen_fsm:sync_send_all_state_event(hd(FittingPids), bogus_message), - hd(FittingPids) ! bogus_message, - - %% send bogus done message - fitting should ignore it because - %% 'asdf' is not a working vnode pid - [{_, Head}|_] = Pipe#pipe.fittings, - MyRef = Head#fitting.ref, - ok = gen_fsm:sync_send_event(hd(FittingPids), {done, MyRef, asdf}), - - %% kill fittings in the middle - Third = lists:nth(3, FittingPids), - rt_pipe:crash_fitting(Third, fun() -> exit(normal) end), - Fourth = lists:nth(4, FittingPids), - rt_pipe:crash_fitting(Fourth, fun() -> exit(normal) end), - - %% This message will be lost in the middle of the - %% pipe, but we'll be able to notice it via - %% extract_trace_errors/1. - ok = riak_pipe:queue_work(Pipe, 30), - exit({success_so_far, riak_pipe:collect_results(Pipe, 100)}). - -verify_middle_fitting_normal([RN|_]) -> - lager:info("Verify middle fitting normal"), - - {badrpc, {'EXIT', {success_so_far, {timeout, Res, Trace}}}} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, - fun middle_fitting_normal/1, - ?ERR_LOG, - 5]), - - %% first input of 20 should have made it to the end, decremented - %% by 1 at each of 5 fittings - ?assertMatch([{_, 15}], Res), - - %% fittings 3 and 4 were killed - ?assertEqual(2, length(rt_pipe:extract_fitting_died_errors(Trace))), - - %% second input, of 30, should generate errors as it reaches the - %% killed third fitting - ?assertEqual(1, length(rt_pipe:extract_trace_errors(Trace))). - -middle_fitting_crash(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 20), - %% wait for input to likely be processed - timer:sleep(100), - - %% watch the builder to avoid a race later - Builder = Pipe#pipe.builder, - BuilderMonitor = erlang:monitor(process, Builder), - - FittingPids = [ P || {_, #fitting{pid=P}} <- Pipe#pipe.fittings ], - Third = lists:nth(3, FittingPids), - rt_pipe:crash_fitting(Third), - Fourth = lists:nth(4, FittingPids), - rt_pipe:crash_fitting(Fourth), - - %% avoid racing w/pipeline shutdown - receive - {'DOWN', BuilderMonitor, process, Builder, _} -> ok - after 5000 -> - lager:warning("timed out waiting for builder to exit"), - demonitor(BuilderMonitor, [flush]) - end, - - %% first fitting should also be dead - {error,[worker_startup_failed]} = riak_pipe:queue_work(Pipe, 30), - %% this eoi will have no effect if the test is running correctly - riak_pipe:eoi(Pipe), - exit({success_so_far, riak_pipe:collect_results(Pipe, 100)}). - -verify_middle_fitting_crash([RN|_]) -> - lager:info("Verify pipe tears down when a fitting crashes (middle)"), - - {badrpc, {'EXIT', {success_so_far, {timeout, Res, Trace}}}} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, - fun middle_fitting_crash/1, - ?ERR_LOG, - 5]), - - %% first input, 20, makes it through, decremented once at each of - %% five fittings - ?assertMatch([{_, 15}], Res), - - %% all fittings die because their peers die - ?assertEqual(5, length(rt_pipe:extract_fitting_died_errors(Trace))), - - %% no errors are generated, though, because the pipe is gone - %% before the second input is sent - ?assertEqual(0, length(rt_pipe:extract_trace_errors(Trace))). - -%% TODO: It isn't clear to me if TailFittingCrash is -%% really any different than MiddleFittingCrash. I'm -%% trying to exercise the patch in commit cb0447f3c46 -%% but am not having much luck. {sigh} -tail_fitting_crash(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 20), - timer:sleep(100), - FittingPids = [ P || {_, #fitting{pid=P}} <- Pipe#pipe.fittings ], - Last = lists:last(FittingPids), - rt_pipe:crash_fitting(Last), - %% try to avoid racing w/pipeline shutdown - timer:sleep(100), - {error,[worker_startup_failed]} = riak_pipe:queue_work(Pipe, 30), - riak_pipe:eoi(Pipe), - exit({success_so_far, riak_pipe:collect_results(Pipe, 100)}). - -verify_tail_fitting_crash([RN|_]) -> - lager:info("Verify pipe tears down when a fitting crashes (tail)"), - - {badrpc, {'EXIT', {success_so_far, {timeout, Res, Trace}}}} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, - fun tail_fitting_crash/1, - ?ERR_LOG, - 5]), - - %% first input, 20, makes it through, decremented once at each of - %% five fittings - ?assertMatch([{_, 15}], Res), - - %% all fittings die because their peers die - ?assertEqual(5, length(rt_pipe:extract_fitting_died_errors(Trace))), - - %% no errors are generated, though, because the pipe is gone - %% before the second input is sent - ?assertEqual(0, length(rt_pipe:extract_trace_errors(Trace))). - -verify_worker_init_exit([RN|_]) -> - lager:info("Verify error on worker startup failure (init_exit)"), - Spec = [#fitting_spec{name="init crash", - module=riak_pipe_w_crash, - arg=init_exit, - chashfun=follow}], - Opts = [{sink, rt_pipe:self_sink()}|?ERR_LOG], - {ok, Pipe} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - {error, [worker_startup_failed]} = - rpc:call(RN, riak_pipe, queue_work, [Pipe, x]), - ok = riak_pipe:eoi(Pipe), - ?assertEqual({eoi, [], []}, riak_pipe:collect_results(Pipe)). - -verify_worker_init_badreturn([RN|_]) -> - lager:info("Verify error on worker startup failure (init_badreturn)"), - Spec = [#fitting_spec{name="init crash", - module=riak_pipe_w_crash, - arg=init_badreturn, - chashfun=follow}], - Opts = [{sink, rt_pipe:self_sink()}|?ERR_LOG], - {ok, Pipe} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - {error, [worker_startup_failed]} = - rpc:call(RN, riak_pipe, queue_work, [Pipe, x]), - ok = riak_pipe:eoi(Pipe), - ?assertEqual({eoi, [], []}, riak_pipe:collect_results(Pipe)). - -send_1_100(Pipe) -> - ok = riak_pipe:queue_work(Pipe, 100), - %% Sleep so that we don't have workers being shutdown before - %% the above work item gets to the end of the pipe. - timer:sleep(100), - riak_pipe:eoi(Pipe). - -verify_worker_limit_one([RN|_]) -> - lager:info("Verify worker limit for one pipe"), - PipeLen = 90, - {eoi, Res, Trace} = - rpc:call(RN, riak_pipe, generic_transform, - [fun rt_pipe:decr_or_crash/1, - fun send_1_100/1, - ?ALL_LOG, - PipeLen]), - ?assertEqual([], Res), - Started = rt_pipe:extract_init_started(Trace), - ?assertEqual(PipeLen, length(Started)), - [Ps] = rt_pipe:extract_trace_errors(Trace), % exactly one error! - ?assertEqual({badmatch,{error,[worker_limit_reached]}}, - proplists:get_value(error, Ps)). - -verify_worker_limit_multiple([RN|_]) -> - lager:info("Verify worker limit for multiple pipes"), - PipeLen = 90, - Spec = lists:duplicate( - PipeLen, - #fitting_spec{name="worker limit mult pipes", - module=riak_pipe_w_xform, - arg=fun rt_pipe:xform_or_crash/3, - %% force all workers onto one vnode - chashfun={riak_pipe, zero_part}}), - {ok, Pipe1} = rpc:call(RN, riak_pipe, exec, - [Spec, [{sink, rt_pipe:self_sink()}|?ALL_LOG]]), - {ok, Pipe2} = rpc:call(RN, riak_pipe, exec, - [Spec, [{sink, rt_pipe:self_sink()}|?ALL_LOG]]), - ok = rpc:call(RN, riak_pipe, queue_work, [Pipe1, 100]), - %% plenty of time to start all workers - timer:sleep(100), - %% At worker limit, can't even start 1st worker @ Head2 - ?assertEqual({error, [worker_limit_reached]}, - rpc:call(RN, riak_pipe, queue_work, [Pipe2, 100])), - {timeout, [], Trace1} = riak_pipe:collect_results(Pipe1, 500), - {timeout, [], Trace2} = riak_pipe:collect_results(Pipe2, 500), - %% exactly one error: the 65th worker will fail to start - ?assertMatch([_], rt_pipe:extract_trace_errors(Trace1)), - ?assertEqual([], rt_pipe:extract_queued(Trace2)), - %% cleanup before next test - riak_pipe:destroy(Pipe1), - riak_pipe:destroy(Pipe2). - -verify_under_worker_limit_one([RN|_]) -> - lager:info("Verify that many workers + many fittings still under limit"), - - %% 20 * Ring size > worker limit, if indeed the worker - %% limit were enforced per node instead of per vnode. - PipeLen = 20, - Spec = lists:duplicate( - PipeLen, - #fitting_spec{name="foo", - module=riak_pipe_w_xform, - arg=fun rt_pipe:xform_or_crash/3}), - {ok, Pipe1} = rpc:call(RN, riak_pipe, exec, - [Spec, [{sink, rt_pipe:self_sink()}|?ALL_LOG]]), - [ok = rpc:call(RN, riak_pipe, queue_work, [Pipe1, X]) || - X <- lists:seq(101, 200)], - riak_pipe:eoi(Pipe1), - {eoi, Res, Trace1} = riak_pipe:collect_results(Pipe1, 500), - %% all inputs make it through - ?assertEqual(100, length(Res)), - %% no errors - ?assertEqual([], rt_pipe:extract_trace_errors(Trace1)). - -sleep1fun(X) -> - timer:sleep(1), - X. - -send_100_100(Pipe) -> - [ok = riak_pipe:queue_work(Pipe, 100) || - _ <- lists:seq(1,100)], - %% Sleep so that we don't have workers being shutdown before - %% the above work item gets to the end of the pipe. - timer:sleep(100), - riak_pipe:eoi(Pipe). - -verify_queue_limit([RN|_]) -> - lager:info("Verify queue size limits are enforced"), - verify_queue_limit(RN, 10). - -verify_queue_limit(RN, Retries) when Retries > 0 -> - {eoi, Res, Trace} = - rpc:call(RN, riak_pipe, generic_transform, - [fun sleep1fun/1, - fun send_100_100/1, - ?ALL_LOG, 1]), - - %% all inputs make it through, after blocking - ?assertEqual(100, length(Res)), - - %% we get as many unblocking messages as blocking messages - Full = length(rt_pipe:extract_queue_full(Trace)), - NoLongerFull = length(rt_pipe:extract_unblocking(Trace)), - ?assertEqual(Full, NoLongerFull), - - case Full of - [] -> - lager:info("Queues were never full; Retries left: ~b", - [Retries-1]); - _ -> - ok - end; -verify_queue_limit(_, _) -> - lager:warning("Queues were never full; Consider re-running."). - -verify_vnode_death([RN|_]) -> - lager:info("Verify a vnode death does not kill the pipe"), - - {ok, Pipe} = - rpc:call(RN, riak_pipe, exec, - [[#fitting_spec{name=vnode_death_test, - module=riak_pipe_w_crash}], - [{sink, rt_pipe:self_sink()}]]), - %% this should kill vnode such that it never - %% responds to the enqueue request - rpc:call(RN, riak_pipe, queue_work, [Pipe, vnode_killer]), - riak_pipe:eoi(Pipe), - {eoi, Res, []} = riak_pipe:collect_results(Pipe), - ?assertEqual([], Res). - -%% workers restarted because of recursive inputs should -%% not increase the "fail" counter -%% -%% methodology: send an input to partition A and -%% imediately send eoi; have A send a recursive input to -%% partition B; have B send a recursive input to C; -%% finally have C send a recursive in put back to A -%% -%% this flow should give worker A time to start shutting -%% down, but not to finish, resulting in an input in its -%% queue after it completes its done/1 function -verify_restart_after_eoi([RN|_]) -> - lager:info("Verify worker restart via recursive inputs after eoi"), - - Inputs = [0, 1, 2, 0], - ChashFun = fun([Head|_]) -> - chash:key_of(Head) - end, - Spec = [#fitting_spec{name=restarter, - module=riak_pipe_w_crash, - arg={recurse_done_pause, 500}, - chashfun=ChashFun}], - - %% just make sure we are bouncing between partitions - {ok, R} = rpc:call(RN, riak_core_ring_manager, get_my_ring, []), - ?assert(riak_core_ring:preflist( - ChashFun(Inputs), R) /= - riak_core_ring:preflist( - ChashFun(tl(Inputs)), R)), - ?assert(riak_core_ring:preflist( - ChashFun(Inputs), R) /= - riak_core_ring:preflist( - ChashFun(tl(tl(Inputs))), R)), - - {ok, Pipe} = - rpc:call(RN, riak_pipe, exec, - [Spec, - [{sink, rt_pipe:self_sink()}, - {log, sink}, - {trace, [error, done, restart]}]]), - ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, Inputs]), - riak_pipe:eoi(Pipe), - {eoi, [], Trace} = riak_pipe:collect_results(Pipe), - - %% no error traces -- the error will say - %% {reason, normal} if the worker received new - %% inputs while shutting down due to eoi - ?assertEqual([], rt_pipe:extract_trace_errors(Trace)), - - %% A should have restarted, but not counted failure - [Restarted] = rt_pipe:extract_restart(Trace), - Dones = rt_pipe:extract_vnode_done(Trace), - RestartStats = proplists:get_value(Restarted, Dones), - ?assertEqual(0, proplists:get_value(failures, RestartStats)). diff --git a/tests/pipe_verify_handoff.erl b/tests/pipe_verify_handoff.erl deleted file mode 100644 index ac578e5a8..000000000 --- a/tests/pipe_verify_handoff.erl +++ /dev/null @@ -1,263 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify handoff between riak_pipe vnodes. -%% -%% Important: this test loads this module and {@link rt_pipe} on each -%% Riak node, such that it can reference their functions in pipe -%% workers. -%% -%% This test is a largely rewritten version of riak_pipe:limits_test_/0. -%% -%% Strategy: -%% 1. start one node -%% 2. start pipes on that node -%% 3. put inputs in the pipe -%% 3a. use a worker that waits for a signal from the test process -%% before processing its input, so that we can ensure active -%% workers and queue contents -%% 4. start and join second node -%% 5. wait for agreement on owners -%% 6. add more inputs, to start some workers on second node -%% 7. give the signal to the workers to process things -%% 8. count archive commands/etc. - --module(pipe_verify_handoff). - --export([ - %% riak_test's entry - confirm/0, - - %% test machinery - runner_wait/1, - collector/0 - ]). - --include_lib("eunit/include/eunit.hrl"). - -%% local copy of riak_pipe.hrl --include("rt_pipe.hrl"). - --define(NODE_COUNT, 2). --define(ALL_LOG, [{log, sink}, {trace, all}]). - -%% @doc riak_test callback -confirm() -> - lager:info("Start ~b nodes", [?NODE_COUNT]), - NodeDefs = lists:duplicate(?NODE_COUNT, {current, default}), - Services = [riak_pipe], - [Primary,Secondary] = Nodes = rt:deploy_nodes(NodeDefs, Services), - %% Ensure each node owns 100% of it's own ring - [?assertEqual([Node], rt:owners_according_to(Node)) || Node <- Nodes], - - lager:info("Load useful modules"), - rt:load_modules_on_nodes([?MODULE, rt_pipe], Nodes), - - lager:info("Start run coordinator"), - Runner = spawn_link(?MODULE, runner_wait, [[]]), - - P1Spec = [#fitting_spec{name="p1handoff", - module=riak_pipe_w_xform, - arg=pause_until_signal(Runner)}], - P2Spec = [#fitting_spec{name="p2handoff", - module=riak_pipe_w_xform, - arg=pause_until_signal(Runner)}], - - lager:info("Start two pipes on Primary"), - {ok, Pipe1} = - rpc:call(Primary, riak_pipe, exec, - [P1Spec, [{sink, rt_pipe:self_sink()}|?ALL_LOG]]), - {ok, Pipe2} = - rpc:call(Primary, riak_pipe, exec, - [P2Spec, [{sink, rt_pipe:self_sink()}|?ALL_LOG]]), - - lager:info("Send some inputs to both pipes"), - [ok = rpc:call(Primary, riak_pipe, queue_work, [Pipe1, X]) || - X <- lists:seq(1, 20)], - [ok = rpc:call(Primary, riak_pipe, queue_work, [Pipe2, X]) || - X <- lists:seq(101, 120)], - - P1Status1 = pipe_status(Primary, Pipe1), - P2Status1 = pipe_status(Primary, Pipe2), - - lager:info("Start and register intercept log collector"), - Collector = spawn_link(Primary, ?MODULE, collector, []), - rpc:call(Primary, erlang, register, [riak_test_collector, Collector]), - - lager:info("Install pipe vnode intercept"), - Intercept = {riak_pipe_vnode, - [{{handle_handoff_command,3}, log_handoff_command}]}, - ok = rt_intercept:add(Primary, Intercept), - - lager:info("Join Secondary to Primary"), - %% Give slave a chance to start and master to notice it. - rt:join(Secondary, Primary), - rt:wait_until_no_pending_changes(Nodes), - rt:wait_until_nodes_agree_about_ownership(Nodes), - - lager:info("Unpause workers"), - Runner ! go, - - ok = rt:wait_until_transfers_complete(Nodes), - - lager:info("Add more inputs to Pipe2"), - [ok = rpc:call(Primary, riak_pipe, queue_work, [Pipe2, X]) || - X <- lists:seq(121, 140)], - - %% transfers completing takes so long that the pipe is extremely - %% likely to have finished all of its inputs by now - - P1Status2 = pipe_status(Primary, Pipe1), - P2Status2 = pipe_status(Primary, Pipe2), - - lager:info("Send eoi and collect results"), - riak_pipe:eoi(Pipe1), - riak_pipe:eoi(Pipe2), - {eoi, Out1, Trace1} = riak_pipe:collect_results(Pipe1, 1000), - {eoi, Out2, Trace2} = riak_pipe:collect_results(Pipe2, 1000), - - %% no errors on either pipe, all items make it through; if these - %% are wrong, we dropped things somewhere - ?assertEqual([], rt_pipe:extract_trace_errors(Trace1)), - ?assertEqual(20, length(Out1)), - ?assertEqual([], rt_pipe:extract_trace_errors(Trace2)), - ?assertEqual(40, length(Out2)), - - %% VM trace verification - timer:sleep(1000), - lager:info("Collect intercept log"), - PTraces = get_collection(Collector), - - %% time to compare things - - P1PrimaryWorkers1 = partitions_on_node(Primary, P1Status1), - P1SecondaryWorkers2 = partitions_on_node(Secondary, P1Status2), - P2PrimaryWorkers1 = partitions_on_node(Primary, P2Status1), - P2SecondaryWorkers2 = partitions_on_node(Secondary, P2Status2), - - %% workers moved - P1MovedPrimaryToSecondary = ordsets:intersection( - ordsets:from_list(P1PrimaryWorkers1), - ordsets:from_list(P1SecondaryWorkers2)), - P2MovedPrimaryToSecondary = ordsets:intersection( - ordsets:from_list(P2PrimaryWorkers1), - ordsets:from_list(P2SecondaryWorkers2)), - %% vnodes moved - AllMovedPrimaryToSecondary = ordsets:union( - P1MovedPrimaryToSecondary, - P2MovedPrimaryToSecondary), - - PFoldReqs = [X || X <- PTraces, - %% it would be really nice to import ?FOLD_REQ - %% from riak_core_vnode.hrl - (X == riak_core_fold_req_v1 orelse - X == riak_core_fold_req_v2)], - PArchives = [X || cmd_archive=X <- PTraces], - - %% number of active vnodes migrating from Primary to Secondary, - %% should be one fold per move, otherwise inputs were directed - %% incorrectly after transfers settled - ?assertEqual(length(AllMovedPrimaryToSecondary), - length(PFoldReqs)), - - %% number of workers migrating from Secondary to Primary, should - %% be one archive per move, otherwise inputs were directed - %% incorrectly after transfers settled - ?assertEqual(length(P1MovedPrimaryToSecondary) - +length(P2MovedPrimaryToSecondary), - length(PArchives)), - - case ordsets:intersection(ordsets:from_list(P1PrimaryWorkers1), - ordsets:from_list(P2PrimaryWorkers1)) of - [] -> - lager:warning("Multiple archives in a single fold was not tested"); - _ -> - ok - end, - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -%%% Run pausing bits - -%% @doc Create a worker function that asks the specified process for -%% permission before sending its input as output. -pause_until_signal(Runner) -> - fun(I, P, D) -> - Runner ! {wait, self()}, - receive go -> - riak_pipe_vnode_worker:send_output(I, P, D) - end - end. - -%% @doc Phase one of worker-pausing process: just collect requests, -%% waiting for overall signal to allow processing to happen. -runner_wait(Waiting) -> - receive - go -> - [ W ! go || W <- Waiting ], - runner_go(); - {wait, W} -> - runner_wait([W|Waiting]) - end. - -%% @doc Phase two of worker-pausing process: just let workers do their -%% processing as soon as they ask. -runner_go() -> - receive - {wait, W} -> - W ! go, - runner_go() - end. - -%%% Status filtering bits - -%% @doc Dig through a riak_pipe:status/1 response to determine which -%% partitions are on the given node. -partitions_on_node(Node, PipeStatus) -> - [proplists:get_value(partition, W) - || W <- PipeStatus, - Node == proplists:get_value(node, W)]. - -%% @doc Call riak_pipe:status/1 on the given node, and extract the -%% status list from it. It is expected that the given pipe has exactly -%% one fitting. -pipe_status(Node, Pipe) -> - [{_Name, Status}] = rpc:call(Node, riak_pipe, status, [Pipe]), - Status. - -%% @doc entry point for collector process -collector() -> - collector([]). -collector(Acc) -> - receive - {send_collection, Ref, Pid} -> - Pid ! {collection, Ref, lists:reverse(Acc)}; - Any -> - collector([Any|Acc]) - end. - -get_collection(Collector) -> - Ref = make_ref(), - Collector ! {send_collection, Ref, self()}, - receive {collection, Ref, Collection} -> - Collection - end. diff --git a/tests/pipe_verify_handoff_blocking.erl b/tests/pipe_verify_handoff_blocking.erl deleted file mode 100644 index 4c48bdabc..000000000 --- a/tests/pipe_verify_handoff_blocking.erl +++ /dev/null @@ -1,237 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify handoff of the blocking queue between riak_pipe vnodes. -%% -%% Important: this test loads this module and {@link rt_pipe} on each -%% Riak node, such that it can reference their functions in pipe -%% workers. -%% -%% This test is similar to pipe_verify_handoff, but specifically tests -%% handoff while input-senders are waiting for responses. Mostly this -%% is tested in the case where the input is put into a pipe worker's -%% blocking queue, but because message timing is not tightly -%% controlled, it may also trigger other cases. -%% -%% While an input is in-flight to the vnode, or in a worker's blocking -%% queue, the sender of that input is waiting on a response, while -%% also monitoring the vnode. During handoff, the vnode that should -%% respond to the sender will change, but the monitoring does not, -%% currently. This is a bug. -%% -%% The testing strategy is to block workers while filling their queues -%% to the point they begin blocking, then add a node to the cluster -%% and watch their handoff progress. The slightly tricky point is that -%% we have to allow workers to process some inputs, or they won't -%% handoff (because the vnode waits for them to be between inputs to -%% archive), but we don't want them to process so many inputs that -%% they consume their blocking queues before handing off. - -%% Please Note: Under rare circumstances, this test may fail with a -%% "{badmatch,{error,[{vnode_down,noproc}]}}' error. This is not a -%% failure of this test but rather a side effect of a race condition -%% in riak_core_vnode_proxy. It manifests due to the fact that the -%% test is attempting to send a command to a vnode that is in fact -%% down, however monitor only works by issuing a command and getting -%% a PID. In some instances, get_vnode_pid fails because vnode shutdown -%% is queued up in the mailbox before monitor node. Unfortunately, the -%% fix would require a fundamental shift in the architecture of -%% riak_core, which at the time of this writing is not feasible for -%% this rare failure case. --module(pipe_verify_handoff_blocking). - --export([ - %% riak_test's entry - confirm/0, - - %% test machinery - runner_wait/1, - queue_filler/3 - ]). - --include_lib("eunit/include/eunit.hrl"). - -%% local copy of riak_pipe.hrl --include("rt_pipe.hrl"). - --define(NODE_COUNT, 2). --define(ALL_LOG, [{log, sink}, {trace, all}]). --define(FILLER_COUNT, 5). - -%% @doc riak_test callback -confirm() -> - %% static list of inputs, so we keep hitting the same partitions - Inputs = lists:seq(1, 20), - - lager:info("Start ~b nodes", [?NODE_COUNT]), - NodeDefs = lists:duplicate(?NODE_COUNT, {current, default}), - Services = [riak_pipe], - [Primary,Secondary] = Nodes = rt:deploy_nodes(NodeDefs, Services), - %% Ensure each node owns 100% of it's own ring - [?assertEqual([Node], rt:owners_according_to(Node)) || Node <- Nodes], - - lager:info("Load useful modules"), - rt:load_modules_on_nodes([?MODULE, rt_pipe], Nodes), - - lager:info("Start run coordinator"), - Runner = spawn_link(?MODULE, runner_wait, [[]]), - - Spec = [#fitting_spec{name="blockhandoff", - module=riak_pipe_w_xform, - arg=pause_until_signal(Runner)}], - - lager:info("Start pipe on Primary"), - {ok, Pipe} = - rpc:call(Primary, riak_pipe, exec, - [Spec, [{sink, rt_pipe:self_sink()}|?ALL_LOG]]), - - InitialInputCount = fill_queues(Primary, Pipe, Inputs), - Fillers = keep_queues_full(Primary, Pipe, Inputs), - - _Status1 = pipe_status(Primary, Pipe), - - lager:info("Join Secondary to Primary"), - %% Give slave a chance to start and master to notice it. - rt:join(Secondary, Primary), - rt:wait_until_no_pending_changes(Nodes), - rt:wait_until_nodes_agree_about_ownership(Nodes), - - lager:info("Unpause workers"), - Runner ! go, - - ok = rt:wait_until_transfers_complete(Nodes), - - FillerInputCount = stop_fillers(Fillers), - - %% if we make it this far, then no filler ever saw the vnode_down - %% error message; otherwise badmatches in queue_filler/4 will have - %% halted the test - - _Status2 = pipe_status(Primary, Pipe), - - lager:info("Send eoi and collect results"), - riak_pipe:eoi(Pipe), - {eoi, Out, Trace} = riak_pipe:collect_results(Pipe, 1000), - - %% no errors on either pipe, all items make it through; if these - %% are wrong, we dropped things somewhere - ?assertEqual([], rt_pipe:extract_trace_errors(Trace)), - ?assertEqual(InitialInputCount+FillerInputCount, length(Out)), - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -%%% queue filling - -%% @doc fill pipe vnode queues by repeatedly sending each input in the -%% input list until the queue reports timeout. -fill_queues(Node, Pipe, Inputs) -> - lists:sum([ fill_queue(Node, Pipe, I, 0) || I <- Inputs ]). - -%% @doc fill one vnode queue by repeatedly sending the same input -%% until it reports timeout -fill_queue(Node, Pipe, Input, Count) -> - case rpc:call(Node, riak_pipe, queue_work, [Pipe, Input, noblock]) of - {error, [timeout]} -> - %% This queue is now full - Count; - ok -> - %% not full yet; add more - fill_queue(Node, Pipe, Input, Count+1) - end. - -%% @doc spawn workers that will keep sending inputs to the pipe in -%% order to keep the queues full to test handoff of blocking queues -keep_queues_full(Node, Pipe, Inputs) -> - [ spawn_link(?MODULE, queue_filler, [Node, Pipe, shuffle(Inputs)]) - || _ <- lists:seq(1, ?FILLER_COUNT) ]. - -%% @doc Send each element of Inputs into Pipe, until told not to -queue_filler(Node, Pipe, Inputs) -> - %% putting Inputs in a queue means we don't have to track out - %% progress through them separately - queue_filler(Node, Pipe, queue:from_list(Inputs), 0). - -queue_filler(Node, Pipe, Inputs, Count) -> - receive - {stop, Owner} -> Owner ! {done, Count} - after 0 -> - {{value, I}, Q} = queue:out(Inputs), - ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, I]), - queue_filler(Node, Pipe, queue:in(I, Q), Count+1) - end. - -%% @doc tell all fillers to stop and collect and sum their send counts -stop_fillers(Fillers) -> - lists:sum([ receive {done, Count} -> Count end - || _ <- [ F ! {stop, self()} || F <- Fillers ] ]). - -%%% Run pausing bits - -%% @doc Create a worker function that asks the specified process for -%% permission before sending its input as output. -pause_until_signal(Runner) -> - fun(I, P, D) -> - Runner ! {wait, self()}, - receive go -> - riak_pipe_vnode_worker:send_output(I, P, D) - end - end. - -%% @doc Phase one of worker-pausing process: just collect requests, -%% waiting for overall signal to allow processing to happen. -runner_wait(Waiting) -> - receive - go -> - [ W ! go || W <- Waiting ], - runner_go(); - {wait, W} -> - runner_wait([W|Waiting]) - end. - -%% @doc Phase two of worker-pausing process: just let workers do their -%% processing as soon as they ask. -runner_go() -> - receive - {wait, W} -> - W ! go, - runner_go() - end. - -%%% Status filtering bits - -%% @doc Call riak_pipe:status/1 on the given node, and extract the -%% status list from it. It is expected that the given pipe has exactly -%% one fitting. -pipe_status(Node, Pipe) -> - [{_Name, Status}] = rpc:call(Node, riak_pipe, status, [Pipe]), - Status. - -%% @doc Shuffle the elements of a list. (Thanks Micah) -shuffle([]) -> - []; -shuffle([E]) -> - [E]; -shuffle(List) -> - Max = length(List), - Keyed = [{random:uniform(Max), E} || E <- List], - Sorted = lists:sort(Keyed), - [N || {_, N} <- Sorted]. diff --git a/tests/pipe_verify_restart_input_forwarding.erl b/tests/pipe_verify_restart_input_forwarding.erl deleted file mode 100644 index 1df9301ac..000000000 --- a/tests/pipe_verify_restart_input_forwarding.erl +++ /dev/null @@ -1,178 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify that inputs are forwarded properly if a pipe worker -%% fails to restart. -%% -%% Important: this test loads this module and {@link rt_pipe} on each -%% Riak node, such that it can reference their functions in pipe -%% workers. -%% -%% IMPORTANT: this test must be run on a ONE-node cluster, because -%% riak_pipe_w_crash uses ETS to determine a "restart" situation, and -%% sets the fitting process as the heir of the table, so it survives -%% the worker's restart -%% -%% These tests used to be a component of riak_pipe:exception_test_/0. - --module(pipe_verify_restart_input_forwarding). - --export([ - %% riak_test's entry - confirm/0 - ]). - --include_lib("eunit/include/eunit.hrl"). - -%% local copy of riak_pipe.hrl --include("rt_pipe.hrl"). - -%% must be 1 for verify_worker_restart_failure_input_forwarding --define(NODE_COUNT, 1). - --define(ERR_LOG, [{log, sink}, {trace, [error]}]). --define(ALL_LOG, [{log, sink}, {trace, all}]). - -%% @doc riak_test callback -confirm() -> - lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), - - [rt:wait_for_service(Node, riak_pipe) || Node <- Nodes], - - rt:load_modules_on_nodes([?MODULE, rt_pipe], Nodes), - - verify_worker_restart_failure_input_forwarding(Nodes), - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -verify_worker_restart_failure_input_forwarding([RN]) -> - lager:info("Verify input forwarding after worker restart failure"), - - %% make a worker fail, and then also fail to restart, and check - %% that the input that killed it generates a processing error, - %% while the inputs that were queued for it get sent to another - %% vnode - Spec = [#fitting_spec{name=restarter, - module=riak_pipe_w_crash, - arg=init_restartfail, - %% use nval=2 to get some failover - nval=2}], - Opts = [{sink, rt_pipe:self_sink()}, - {log, sink}, - {trace,[error,restart,restart_fail,queue]}], - {ok, Pipe} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - - Inputs1 = lists:seq(0,127), - Inputs2 = lists:seq(128,255), - Inputs3 = lists:seq(256,383), - - %% sleep, send more inputs - - %% this should make one of the riak_pipe_w_crash workers die with - %% unprocessed items in its queue, and then also deliver a few - %% more inputs to that worker, which will be immediately - %% redirected to an alternate vnode - - %% send many inputs, send crash, send more inputs - [ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, N]) || N <- Inputs1], - ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, init_restartfail]), - [ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, N]) || N <- Inputs2], - %% one worker should now have both the crashing input and a valid - %% input following it waiting in its queue - the test is whether - %% or not that valid input following the crash gets redirected - %% correctly - - %% wait for the worker to crash, then send more input at it - %% - the test is whether the new inputs are redirected correctly - timer:sleep(2000), - [ok = rpc:call(RN, riak_pipe, queue_work, [Pipe, N]) || N <- Inputs3], - - %% flush the pipe - ok = riak_pipe:eoi(Pipe), - {eoi, Results, Trace} = riak_pipe:collect_results(Pipe), - - %% all results should have completed correctly - ?assertEqual(length(Inputs1++Inputs2++Inputs3), length(Results)), - - %% There should be one trace errors: - %% - the processing error (worker crash) - Errors = rt_pipe:extract_trace_errors(Trace), - ?assertEqual(1, length(Errors)), - ?assert(is_list(hd(Errors))), - ?assertMatch(init_restartfail, proplists:get_value(input, hd(Errors))), - Restarter = proplists:get_value(partition, hd(Errors)), - %% ... and also one message about the worker - %% restart failure - ?assertMatch([Restarter], rt_pipe:extract_restart_fail(Trace)), - - Queued = rt_pipe:extract_queued(Trace), - - %% find out who caught the restartfail - Restarted = [ P || {P, init_restartfail} <- Queued ], - ?assertMatch([Restarter], Restarted), - - %% what input arrived after the crashing input, - %% but before the crash? - {_PreCrashIn, PostCrashIn0} = - lists:splitwith(fun is_integer/1, - [ I || {P,I} <- Queued, P == Restarter]), - %% drop actual crash input - PostCrashIn = tl(PostCrashIn0), - %% make sure the input was actually enqueued - %% before the crash (otherwise test was bogus) - ?assert(length(PostCrashIn) > 0), - - %% so where did the post-crash inputs end up? - ReQueued = lists:map( - fun(I) -> - Re = [ P || {P,X} <- Queued, - X == I, - P /= Restarter ], - ?assertMatch([_Part], Re), - hd(Re) - end, - PostCrashIn), - ?assertMatch([_Requeuer], lists:usort(ReQueued)), - [Requeuer|_] = ReQueued, - - %% finally, did the inputs destined for the crashed worker that - %% were sent *after* the worker crashed, also get forwarded to the - %% correct location? - Destined = lists:filter( - fun(I) -> - [{P,_}] = rpc:call(RN, riak_core_apl, get_apl, - [chash:key_of(I), 1, riak_pipe]), - P == Restarter - end, - Inputs3), - Forwarded = lists:map( - fun(I) -> - [Part] = [P || {P,X} <- Queued, X == I], - Part - end, - Destined), - ?assertMatch([_Forward], lists:usort(Forwarded)), - [Forward|_] = Forwarded, - - %% consistent hash means this should be the same - ?assertEqual(Requeuer, Forward). diff --git a/tests/pipe_verify_sink_types.erl b/tests/pipe_verify_sink_types.erl deleted file mode 100644 index 32b42d50c..000000000 --- a/tests/pipe_verify_sink_types.erl +++ /dev/null @@ -1,202 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify different riak_pipe sink types. -%% -%% These tests used to be known as riak_pipe:sink_types_test_/0 - --module(pipe_verify_sink_types). - --export([ - %% riak_test's entry - confirm/0 - ]). - --include_lib("eunit/include/eunit.hrl"). - -%% local copy of riak_pipe.hrl --include("rt_pipe.hrl"). - --define(NODE_COUNT, 3). --define(ALL_LOG, [{log, sink}, {trace, all}]). - -%% @doc riak_test callback -confirm() -> - lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), - - [rt:wait_for_service(Node, riak_pipe) || Node <- Nodes], - - verify_raw(Nodes), - verify_fsm(Nodes), - verify_fsm_timeout(Nodes), - verify_fsm_sync_period(Nodes), - verify_fsm_infinity_sync_period(Nodes), - verify_invalid_type(Nodes), - - rt_pipe:assert_no_zombies(Nodes), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -%%% TESTS - -%% @doc the basics: 'raw' is the default with nothing specified (so -%% all other tests should have covered it), but try specifying it -%% explicitly here -verify_raw([RN|_]) -> - lager:info("Verify explicit 'raw' sink type"), - Spec = [#fitting_spec{name=r, - module=riak_pipe_w_pass}], - Opts = [{sink_type, raw},{sink, rt_pipe:self_sink()}], - {ok, P} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - rpc:call(RN, riak_pipe, queue_work, [P, 1]), - riak_pipe:eoi(P), - Result = riak_pipe:collect_results(P, 1000), - ?assertEqual({eoi, [{r, 1}], []}, Result). - -%% @doc rt_pipe_test_sink *only* accepts results delivered as -%% gen_fsm events that are tagged as sync vs async -verify_fsm([RN|_]) -> - lager:info("Verify 'fsm' sink type"), - PipeRef = make_ref(), - {ok, SinkPid} = rt_pipe_sink_fsm:start_link(PipeRef), - Spec = [#fitting_spec{name=fs, - module=riak_pipe_w_pass}], - Sink = #fitting{pid=SinkPid, ref=PipeRef}, - Opts = [{sink, Sink}, {sink_type, {fsm, 0, 5000}}], - {ok, P} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - rpc:call(RN, riak_pipe, queue_work, [P, {sync, 1}]), - riak_pipe:eoi(P), - Result = rt_pipe_sink_fsm:get_results(SinkPid), - ?assertEqual({eoi, [{fs, {sync, 1}}], []}, Result). - -%% @doc purposefully disable acking one output, to trigger the timeout -%% on the gen_fsm:sync_send_event -verify_fsm_timeout([RN|_]) -> - lager:info("Verify sink fsm timeout"), - PipeRef = make_ref(), - SinkOpts = [{skip_ack, [{fst,{sync, 2}}]}], - {ok, SinkPid} = rt_pipe_sink_fsm:start_link( - PipeRef, SinkOpts), - Spec = [#fitting_spec{name=fst, - module=riak_pipe_w_pass}], - Sink = #fitting{pid=SinkPid, ref=PipeRef}, - Opts = [{log, sink}, - {trace, [error]}, - {sink, Sink}, - %% a short timeout, to fit eunit - {sink_type, {fsm, 0, 1000}}], - {ok, P} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - rpc:call(RN, riak_pipe, queue_work, [P, {sync, 1}]), - rpc:call(RN, riak_pipe, queue_work, [P, {sync, 2}]), - rpc:call(RN, riak_pipe, queue_work, [P, {sync, 3}]), - riak_pipe:eoi(P), - {eoi, Results, Logs} = - rt_pipe_sink_fsm:get_results(SinkPid), - - %% make sure that all results did make it to the sink - ?assertEqual([{fst, {sync, 1}}, {fst, {sync, 2}}, {fst, {sync, 3}}], - lists:sort(Results)), - %% but that we also logged an error... - [{fst,{trace,[error],{error,Props}}}] = Logs, - %% ...about the input "2"... - ?assertEqual({sync, 2}, - proplists:get_value(input, Props)), - %% ...timing out on its way to the sink - ?assertEqual({badmatch,{error,timeout}}, - proplists:get_value(error, Props)). - -%% @doc make sure that the sink messages are sent synchronously on the -%% Period, and asynchronously otherwise -verify_fsm_sync_period([RN|_]) -> - lager:info("Verify fsm sink sync period"), - PipeRef = make_ref(), - {ok, SinkPid} = rt_pipe_sink_fsm:start_link(PipeRef, []), - %% force a single worker, to make it easy to test the sync period - Spec = [#fitting_spec{name=fst, - module=riak_pipe_w_pass, - chashfun={riak_pipe, zero_part}}], - Sink = #fitting{pid=SinkPid, ref=PipeRef}, - Opts = [{log, sink}, - {trace, [error]}, - {sink, Sink}, - {sink_type, {fsm, 2, 1000}}], - {ok, P} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - rpc:call(RN, riak_pipe, queue_work, [P, {sync, 1}]), - rpc:call(RN, riak_pipe, queue_work, [P, {async, 2}]), - rpc:call(RN, riak_pipe, queue_work, [P, {async, 3}]), - rpc:call(RN, riak_pipe, queue_work, [P, {sync, 4}]), - riak_pipe:eoi(P), - {eoi, Results, []} = - rt_pipe_sink_fsm:get_results(SinkPid), - - %% make sure that all results did make it to the sink - %% ('async' sorts before 'sync') - ?assertEqual([{fst, {async, 2}}, - {fst, {async, 3}}, - {fst, {sync, 1}}, - {fst, {sync, 4}}], - lists:sort(Results)). - -%% @doc infinite period means sink results are always delivered -%% asynchronously -verify_fsm_infinity_sync_period([RN|_]) -> - PipeRef = make_ref(), - {ok, SinkPid} = rt_pipe_sink_fsm:start_link(PipeRef, []), - %% force a single worker, to make it easy to test the sync period - Spec = [#fitting_spec{name=fst, - module=riak_pipe_w_pass, - chashfun={riak_pipe, zero_part}}], - Sink = #fitting{pid=SinkPid, ref=PipeRef}, - Opts = [{log, sink}, - {trace, [error]}, - {sink, Sink}, - {sink_type, {fsm, infinity, 1000}}], - {ok, P} = rpc:call(RN, riak_pipe, exec, [Spec, Opts]), - rpc:call(RN, riak_pipe, queue_work, [P, {async, 1}]), - rpc:call(RN, riak_pipe, queue_work, [P, {async, 2}]), - rpc:call(RN, riak_pipe, queue_work, [P, {async, 3}]), - rpc:call(RN, riak_pipe, queue_work, [P, {async, 4}]), - riak_pipe:eoi(P), - {eoi, Results, []} = - rt_pipe_sink_fsm:get_results(SinkPid), - - %% make sure that all results did make it to the sink - ?assertEqual([{fst, {async, 1}}, - {fst, {async, 2}}, - {fst, {async, 3}}, - {fst, {async, 4}}], - lists:sort(Results)). - -%% @doc ensure behavior is predictable when an unknown sink type is -%% specified -verify_invalid_type([RN|_]) -> - Spec = [#fitting_spec{module=riak_pipe_w_pass}], - case rpc:call(RN, riak_pipe, exec, - [Spec, [{sink_type, invalid}]]) of - {invalid_sink_type, {sink_type, invalid}} -> - %% hooray! the correct error - ok; - {ok, P} -> - %% if we made it here, the test failed; kill the pipe and - %% blow up - riak_pipe:destroy(P), - ?assert(false) - end. diff --git a/tests/post_generate_key.erl b/tests/post_generate_key.erl deleted file mode 100644 index b8197caa9..000000000 --- a/tests/post_generate_key.erl +++ /dev/null @@ -1,99 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verify that POSTing to a bucket URL generates a key for an -%% object correctly. --module(post_generate_key). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - Nodes = rt:build_cluster(1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - - [Base|_] = rt:http_url(Nodes), - - Bucket = "post_generate_key", - OldPostUrl = old_url(Base, Bucket), - NewPostUrl = new_url(Base, Bucket), - - OldPostResult = post(OldPostUrl), - NewPostResult = post(NewPostUrl), - - ?assert(post_was_successful(OldPostResult)), - ?assert(post_was_successful(NewPostResult)), - - OldLocation = location_header(OldPostResult), - NewLocation = location_header(NewPostResult), - ?assert(is_old_url(OldLocation)), - ?assert(is_new_url(NewLocation)), - - OldGetResult = get_url(Base++OldLocation), - NewGetResult = get_url(Base++NewLocation), - - ?assert(get_was_successful(OldGetResult)), - ?assert(get_was_successful(NewGetResult)), - - pass. - -old_url(Base, Bucket) -> - Base++"/riak/"++Bucket. - -new_url(Base, Bucket) -> - Base++"/buckets/"++Bucket++"/keys". - -post(Url) -> - ibrowse:send_req(Url, [{"content-type", "text/plain"}], - post, "foobar"). - -get_url(Url) -> - ibrowse:send_req(Url, [{"accept", "text/plain"}], get). - -post_was_successful({ok, "201", _, _}) -> true; -post_was_successful(Other) -> - lager:warning("That's not a 201: ~p", [Other]), - false. - -location_header({ok, _, Headers, _}) -> - proplists:get_value("Location", Headers). - -is_old_url(Url) -> - case re:run(Url, "^/riak/") of - {match, _} -> - true; - nomatch -> - lager:warning("That's not an old url: ~s", [Url]), - false - end. - -is_new_url(Url) -> - case re:run(Url, "^/buckets/.*/keys/") of - {match, _} -> - true; - nomatch -> - lager:warning("That's not a new url: ~s", [Url]), - false - end. - - -get_was_successful({ok, "200", _, _}) -> true; -get_was_successful(Other) -> - lager:warning("That's not a 200: ~p", [Other]), - false. diff --git a/tests/pr_pw.erl b/tests/pr_pw.erl deleted file mode 100644 index 15b5a59a7..000000000 --- a/tests/pr_pw.erl +++ /dev/null @@ -1,148 +0,0 @@ --module(pr_pw). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - application:start(inets), - lager:info("Deploy some nodes"), - Nodes = rt:build_cluster(4), - - %% calculate the preflist for foo/bar - {ok, Ring} = rpc:call(hd(Nodes), riak_core_ring_manager, get_my_ring, []), - UpNodes = rpc:call(hd(Nodes), riak_core_node_watcher, nodes, [riak_kv]), - DocIdx = rpc:call(hd(Nodes), riak_core_util, chash_key, [{<<"foo">>, - <<"bar">>}]), - N = 3, - Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes), - lager:info("Preflist is ~p", [Preflist2]), - PLNodes = [Node || {{_Index, Node}, _Status} <- Preflist2], - lager:info("Nodes in preflist ~p", [PLNodes]), - [SafeNode] = Nodes -- PLNodes, - lager:info("Node not involved in this preflist ~p", [SafeNode]), - %% connect to the only node in the preflist we won't break, to avoid - %% random put forwarding - {ok, C} = riak:client_connect(hd(PLNodes)), - NodeUrl = rt:http_url(hd(PLNodes)), - UrlFun=fun(Key, Value, Params) -> - lists:flatten(io_lib:format("~s/riak/~s/~s~s", - [NodeUrl, Key, Value, Params])) - end, - - Obj = riak_object:new(<<"foo">>, <<"bar">>, <<42:32/integer>>), - ?assertEqual(ok, C:put(Obj, [{pw, all}])), - ?assertMatch({ok, _}, C:get(<<"foo">>, <<"bar">>, [{pr, all}])), - - %% check pr/pw can't be violated - ?assertEqual({error, {pw_val_violation, evil}}, C:put(Obj, [{pw, evil}])), - ?assertEqual({error, {pr_val_violation, evil}}, C:get(<<"foo">>, <<"bar">>, - [{pr, evil}])), - - ?assertMatch({ok, {{_, 400, _}, _, "pr query parameter must be"++_}}, - httpc:request(get, {UrlFun(<<"foo">>, <<"bar">>, <<"?pr=evil">>), []}, [], [])), - - ?assertMatch({ok, {{_, 400, _}, _, "pw query parameter must be"++_}}, - httpc:request(put, {UrlFun(<<"foo">>, <<"bar">>, - <<"?pw=evil">>), [], "text/plain", <<42:32/integer>>}, [], [])), - - %% install an intercept to emulate a node that kernel paniced or - %% something where it can take some time for the node_watcher to spot the - %% downed node - {{Index, Node}, _} = lists:last(Preflist2), - make_intercepts_tab(Node, Index), - rt_intercept:add(Node, {riak_kv_vnode, [{{do_get,4}, drop_do_get}, - {{do_put, 7}, drop_do_put}]}), - lager:info("disabling do_get for index ~p on ~p", [Index, Node]), - rt:log_to_nodes(Nodes, "disabling do_get for index ~p on ~p", [Index, Node]), - timer:sleep(100), - - %% one vnode will never return, so we get timeouts - ?assertEqual({error, timeout}, - C:get(<<"foo">>, <<"bar">>, [{pr, all}])), - ?assertEqual({error, timeout}, C:put(Obj, [{pw, all}])), - - %% we can still meet quorum, though - ?assertEqual(ok, C:put(Obj, [{pw, quorum}])), - ?assertMatch({ok, _}, - C:get(<<"foo">>, <<"bar">>, [{pr, quorum}])), - - rt:stop_and_wait(Node), - - %% there's now a fallback in the preflist, so PR/PW won't be satisfied - %% anymore - ?assertEqual({error, {pr_val_unsatisfied, 3, 2}}, - C:get(<<"foo">>, <<"bar">>, [{pr, all}])), - ?assertEqual({error, {pw_val_unsatisfied, 3, 2}}, C:put(Obj, [{pw, all}])), - - ?assertMatch({ok, {{_, 503, _}, _, "PR-value unsatisfied: 2/3\n"}}, - httpc:request(get, {UrlFun(<<"foo">>, <<"bar">>, <<"?pr=all">>), []}, [], [])), - - ?assertMatch({ok, {{_, 503, _}, _, "PW-value unsatisfied: 2/3\n"}}, - httpc:request(put, {UrlFun(<<"foo">>, <<"bar">>, - <<"?pw=all">>), [], "text/plain", <<42:32/integer>>}, [], [])), - - %% emulate another node failure - {{Index2, Node2}, _} = lists:nth(2, Preflist2), - make_intercepts_tab(Node2, Index2), - rt_intercept:add(Node2, {riak_kv_vnode, [{{do_get,4}, drop_do_get}, - {{do_put, 7}, drop_do_put}]}), - lager:info("disabling do_get for index ~p on ~p", [Index2, Node2]), - rt:log_to_nodes(Nodes, "disabling do_get for index ~p on ~p", [Index2, Node2]), - timer:sleep(100), - - %% can't even meet quorum now - ?assertEqual({error, timeout}, - C:get(<<"foo">>, <<"bar">>, [{pr, quorum}])), - ?assertEqual({error, timeout}, C:put(Obj, [{pw, quorum}])), - - %% restart the node - rt:start_and_wait(Node), - rt:wait_for_service(Node, riak_kv), - - %% we can make quorum again - ?assertEqual(ok, C:put(Obj, [{pw, quorum}])), - ?assertMatch({ok, _}, C:get(<<"foo">>, <<"bar">>, [{pr, quorum}])), - %% intercepts still in force on second node, so we'll get timeouts - ?assertEqual({error, timeout}, - C:get(<<"foo">>, <<"bar">>, [{pr, all}])), - ?assertEqual({error, timeout}, C:put(Obj, [{pw, all}])), - - %% reboot the node - rt:stop_and_wait(Node2), - rt:start_and_wait(Node2), - rt:wait_for_service(Node2, riak_kv), - - %% everything is happy again - ?assertEqual(ok, C:put(Obj, [{pw, all}])), - ?assertMatch({ok, _}, C:get(<<"foo">>, <<"bar">>, [{pr, all}])), - - %% make a vnode start to fail puts - make_intercepts_tab(Node2, Index2), - rt_intercept:add(Node2, {riak_kv_vnode, [{{do_put, 7}, error_do_put}]}), - lager:info("failing do_put for index ~p on ~p", [Index2, Node2]), - rt:log_to_nodes(Nodes, "failing do_put for index ~p on ~p", [Index2, Node2]), - timer:sleep(100), - - %% there's now a failing vnode in the preflist, so PW/DW won't be satisfied - %% anymore - ?assertEqual({error, {pw_val_unsatisfied, 3, 2}}, C:put(Obj, [{pw, all}])), - ?assertEqual({error, {dw_val_unsatisfied, 3, 2}}, C:put(Obj, [{dw, all}])), - - ?assertMatch({ok, {{_, 503, _}, _, "PW-value unsatisfied: 2/3\n"}}, - httpc:request(put, {UrlFun(<<"foo">>, <<"bar">>, - <<"?pw=all">>), [], "text/plain", <<42:32/integer>>}, [], [])), - ?assertMatch({ok, {{_, 503, _}, _, "DW-value unsatisfied: 2/3\n"}}, - httpc:request(put, {UrlFun(<<"foo">>, <<"bar">>, - <<"?dw=all">>), [], "text/plain", <<42:32/integer>>}, [], [])), - pass. - -make_intercepts_tab(Node, Partition) -> - SupPid = rpc:call(Node, erlang, whereis, [sasl_safe_sup]), - intercepts_tab = rpc:call(Node, ets, new, [intercepts_tab, [named_table, - public, set, {heir, SupPid, {}}]]), - true = rpc:call(Node, ets, insert, [intercepts_tab, {drop_do_get_partitions, - [Partition]}]), - true = rpc:call(Node, ets, insert, [intercepts_tab, {drop_do_put_partitions, - [Partition]}]). diff --git a/tests/repl_aae_fullsync.erl b/tests/repl_aae_fullsync.erl deleted file mode 100644 index a04e86012..000000000 --- a/tests/repl_aae_fullsync.erl +++ /dev/null @@ -1,611 +0,0 @@ -%% @doc -%% This module implements a riak_test to exercise the Active -%% Anti-Entropy Fullsync replication. It sets up two clusters, runs a -%% fullsync over all partitions, and verifies the missing keys were -%% replicated to the sink cluster. - --module(repl_aae_fullsync). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(TEST_BUCKET, <<"repl-aae-fullsync-systest_a">>). --define(NUM_KEYS, 1000). - --define(CONF(Retries), [ - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, 1}]} - ] - }, - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, aae}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_soft_retries, 10}, - {max_fssource_retries, Retries} - ]} - ]). - -confirm() -> - difference_test(), - deadlock_test(), - simple_test(), - bidirectional_test(), - dual_test(), - pass. - -simple_test() -> - %% Deploy 6 nodes. - Nodes = rt:deploy_nodes(6, ?CONF(5), [riak_kv, riak_repl]), - - %% Break up the 6 nodes into three clustes. - {ANodes, BNodes} = lists:split(3, Nodes), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Building two clusters."), - [repl_util:make_cluster(N) || N <- [ANodes, BNodes]], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = get_leader(AFirst), - LeaderB = get_leader(BFirst), - - lager:info("Finding connection manager ports."), - BPort = get_port(LeaderB), - - lager:info("Connecting cluster A to B"), - connect_cluster(LeaderA, BPort, "B"), - - %% Write keys prior to fullsync. - write_to_cluster(AFirst, 1, ?NUM_KEYS), - - %% Read keys prior to fullsync. - read_from_cluster(BFirst, 1, ?NUM_KEYS, ?NUM_KEYS), - - %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - - lager:info("Test fullsync from cluster A leader ~p to cluster B", - [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - TargetA = hd(ANodes -- [LeaderA]), - TargetB = hd(BNodes), - - %% Flush AAE trees to disk. - perform_sacrifice(AFirst), - - %% Validate replication from A -> B is fault-tolerant regardless of - %% errors occurring on the source or destination. - validate_intercepted_fullsync(TargetA, LeaderA, "B"), - validate_intercepted_fullsync(TargetB, LeaderA, "B"), - - %% Verify data is replicated from A -> B successfully once the - %% intercepts are removed. - validate_completed_fullsync(LeaderA, BFirst, "B", 1, ?NUM_KEYS), - - rt:clean_cluster(Nodes), - - pass. - -dual_test() -> - %% Deploy 6 nodes. - Nodes = rt:deploy_nodes(6, ?CONF(infinity), [riak_kv, riak_repl]), - - %% Break up the 6 nodes into three clustes. - {ANodes, Rest} = lists:split(2, Nodes), - {BNodes, CNodes} = lists:split(2, Rest), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - lager:info("CNodes: ~p", [CNodes]), - - lager:info("Building three clusters."), - [repl_util:make_cluster(N) || N <- [ANodes, BNodes, CNodes]], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - CFirst = hd(CNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - repl_util:name_cluster(CFirst, "C"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - rt:wait_until_ring_converged(CNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - rt:wait_until_transfers_complete(CNodes), - - lager:info("Get leaders."), - LeaderA = get_leader(AFirst), - LeaderB = get_leader(BFirst), - LeaderC = get_leader(CFirst), - - lager:info("Finding connection manager ports."), - APort = get_port(LeaderA), - BPort = get_port(LeaderB), - CPort = get_port(LeaderC), - - lager:info("Connecting all clusters into fully connected topology."), - connect_cluster(LeaderA, BPort, "B"), - connect_cluster(LeaderA, CPort, "C"), - connect_cluster(LeaderB, APort, "A"), - connect_cluster(LeaderB, CPort, "C"), - connect_cluster(LeaderC, APort, "A"), - connect_cluster(LeaderC, BPort, "B"), - - %% Write keys to cluster A, verify B and C do not have them. - write_to_cluster(AFirst, 1, ?NUM_KEYS), - read_from_cluster(BFirst, 1, ?NUM_KEYS, ?NUM_KEYS), - read_from_cluster(CFirst, 1, ?NUM_KEYS, ?NUM_KEYS), - - %% Enable fullsync from A to B. - lager:info("Enabling fullsync from A to B"), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - %% Enable fullsync from A to C. - lager:info("Enabling fullsync from A to C"), - repl_util:enable_fullsync(LeaderA, "C"), - rt:wait_until_ring_converged(ANodes), - - %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - rt:wait_until_aae_trees_built(CNodes), - - %% Flush AAE trees to disk. - perform_sacrifice(AFirst), - - %% Verify data is replicated from A -> B successfully - validate_completed_fullsync(LeaderA, BFirst, "B", 1, ?NUM_KEYS), - - %% Verify data is replicated from A -> C successfully - validate_completed_fullsync(LeaderA, CFirst, "C", 1, ?NUM_KEYS), - - write_to_cluster(AFirst, - ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS), - read_from_cluster(BFirst, - ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS, ?NUM_KEYS), - read_from_cluster(CFirst, - ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS, ?NUM_KEYS), - - %% Verify that duelling fullsyncs eventually complete - {Time, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - read_from_cluster(BFirst, ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS, 0), - read_from_cluster(CFirst, ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS, 0), - lager:info("Fullsync A->B and A->C completed in ~p seconds", - [Time/1000/1000]), - - pass. - -bidirectional_test() -> - %% Deploy 6 nodes. - Nodes = rt:deploy_nodes(6, ?CONF(5), [riak_kv, riak_repl]), - - %% Break up the 6 nodes into three clustes. - {ANodes, BNodes} = lists:split(3, Nodes), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Building two clusters."), - [repl_util:make_cluster(N) || N <- [ANodes, BNodes]], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = get_leader(AFirst), - LeaderB = get_leader(BFirst), - - lager:info("Finding connection manager ports."), - APort = get_port(LeaderA), - BPort = get_port(LeaderB), - - lager:info("Connecting cluster A to B"), - connect_cluster(LeaderA, BPort, "B"), - - lager:info("Connecting cluster B to A"), - connect_cluster(LeaderB, APort, "A"), - - %% Write keys to cluster A, verify B does not have them. - write_to_cluster(AFirst, 1, ?NUM_KEYS), - read_from_cluster(BFirst, 1, ?NUM_KEYS, ?NUM_KEYS), - - %% Enable fullsync from A to B. - lager:info("Enabling fullsync from A to B"), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - %% Enable fullsync from B to A. - lager:info("Enabling fullsync from B to A"), - repl_util:enable_fullsync(LeaderB, "A"), - rt:wait_until_ring_converged(BNodes), - - %% Flush AAE trees to disk. - perform_sacrifice(AFirst), - - %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - - %% Verify A replicated to B. - validate_completed_fullsync(LeaderA, BFirst, "B", 1, ?NUM_KEYS), - - %% Write keys to cluster B, verify A does not have them. - write_to_cluster(AFirst, ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS), - read_from_cluster(BFirst, ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS, ?NUM_KEYS), - - %% Flush AAE trees to disk. - perform_sacrifice(BFirst), - - %% Wait for trees to compute. - rt:wait_until_aae_trees_built(BNodes), - - %% Verify B replicated to A. - validate_completed_fullsync(LeaderB, AFirst, "A", ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS), - - %% Clean. - rt:clean_cluster(Nodes), - - pass. - -difference_test() -> - %% Deploy 6 nodes. - Nodes = rt:deploy_nodes(6, ?CONF(5), [riak_kv, riak_repl]), - - %% Break up the 6 nodes into three clustes. - {ANodes, BNodes} = lists:split(3, Nodes), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Building two clusters."), - [repl_util:make_cluster(N) || N <- [ANodes, BNodes]], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = get_leader(AFirst), - LeaderB = get_leader(BFirst), - - lager:info("Finding connection manager ports."), - BPort = get_port(LeaderB), - - lager:info("Connecting cluster A to B"), - connect_cluster(LeaderA, BPort, "B"), - - %% Get PBC connections. - APBC = rt:pbc(LeaderA), - BPBC = rt:pbc(LeaderB), - - %% Write key. - ok = riakc_pb_socket:put(APBC, - riakc_obj:new(<<"foo">>, <<"bar">>, - <<"baz">>), - [{timeout, 4000}]), - - %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - - lager:info("Test fullsync from cluster A leader ~p to cluster B", - [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - %% Flush AAE trees to disk. - perform_sacrifice(AFirst), - - %% Wait for fullsync. - {Time1, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA, "B"]), - lager:info("Fullsync completed in ~p seconds", [Time1/1000/1000]), - - %% Read key from after fullsync. - {ok, O1} = riakc_pb_socket:get(BPBC, <<"foo">>, <<"bar">>, - [{timeout, 4000}]), - ?assertEqual(<<"baz">>, riakc_obj:get_value(O1)), - - %% Put, generate sibling. - ok = riakc_pb_socket:put(APBC, - riakc_obj:new(<<"foo">>, <<"bar">>, - <<"baz2">>), - [{timeout, 4000}]), - - %% Wait for fullsync. - {Time2, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA, "B"]), - lager:info("Fullsync completed in ~p seconds", [Time2/1000/1000]), - - %% Read key from after fullsync. - {ok, O2} = riakc_pb_socket:get(BPBC, <<"foo">>, <<"bar">>, - [{timeout, 4000}]), - ?assertEqual([<<"baz">>, <<"baz2">>], lists:sort(riakc_obj:get_values(O2))), - - rt:clean_cluster(Nodes), - - pass. - -deadlock_test() -> - %% Deploy 6 nodes. - Nodes = rt:deploy_nodes(6, ?CONF(5), [riak_kv, riak_repl]), - - %% Break up the 6 nodes into three clustes. - {ANodes, BNodes} = lists:split(3, Nodes), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Building two clusters."), - [repl_util:make_cluster(N) || N <- [ANodes, BNodes]], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = get_leader(AFirst), - LeaderB = get_leader(BFirst), - - lager:info("Finding connection manager ports."), - BPort = get_port(LeaderB), - - lager:info("Connecting cluster A to B"), - connect_cluster(LeaderA, BPort, "B"), - - %% Add intercept for delayed comparison of hashtrees. - Intercept = {riak_kv_index_hashtree, [{{compare, 4}, delayed_compare}]}, - [ok = rt_intercept:add(Target, Intercept) || Target <- ANodes], - - %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - - lager:info("Test fullsync from cluster A leader ~p to cluster B", - [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - %% Start fullsync. - lager:info("Starting fullsync to cluster B."), - rpc:call(LeaderA, riak_repl_console, fullsync, [["start", "B"]]), - - %% Wait for fullsync to initialize and the AAE repl processes to - %% stall from the suspended intercepts. - %% TODO: What can be done better here? - timer:sleep(25000), - - %% Attempt to get status from fscoordintor. - Result = rpc:call(LeaderA, riak_repl2_fscoordinator, status, [], 500), - lager:info("Status result: ~p", [Result]), - ?assertNotEqual({badrpc, timeout}, Result), - - rt:clean_cluster(Nodes), - - pass. - -%% @doc Required for 1.4+ Riak, write sacrificial keys to force AAE -%% trees to flush to disk. -perform_sacrifice(Node) -> - ?assertEqual([], repl_util:do_write(Node, 1, 2000, - <<"sacrificial">>, 1)). - -%% @doc Validate fullsync completed and all keys are available. -validate_completed_fullsync(ReplicationLeader, - DestinationNode, - DestinationCluster, - Start, - End) -> - ok = check_fullsync(ReplicationLeader, DestinationCluster, 0), - lager:info("Verify: Reading ~p keys repl'd from A(~p) to ~p(~p)", - [?NUM_KEYS, ReplicationLeader, - DestinationCluster, DestinationNode]), - ?assertEqual(0, - repl_util:wait_for_reads(DestinationNode, - Start, - End, - ?TEST_BUCKET, - 1)). - -%% @doc Assert we can perform one fullsync cycle, and that the number of -%% expected failures is correct. -check_fullsync(Node, Cluster, ExpectedFailures) -> - {Time, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [Node, Cluster]), - lager:info("Fullsync completed in ~p seconds", [Time/1000/1000]), - - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - - Props = case proplists:get_value(fullsync_coordinator, Status) of - [{_Name, Props0}] -> - Props0; - Multiple -> - {_Name, Props0} = lists:keyfind(Cluster, 1, Multiple), - Props0 - end, - - %% check that the expected number of partitions failed to sync - ?assertEqual(ExpectedFailures, - proplists:get_value(error_exits, Props)), - - ok. - -%% @doc Validate fullsync handles errors for all possible intercept -%% combinations. -validate_intercepted_fullsync(InterceptTarget, - ReplicationLeader, - ReplicationCluster) -> - NumIndicies = length(rpc:call(InterceptTarget, - riak_core_ring, - my_indices, - [rt:get_ring(InterceptTarget)])), - lager:info("~p owns ~p indices", - [InterceptTarget, NumIndicies]), - - %% Before enabling fullsync, ensure trees on one source node return - %% not_built to defer fullsync process. - validate_intercepted_fullsync(InterceptTarget, - {riak_kv_index_hashtree, - [{{get_lock, 2}, not_built}]}, - ReplicationLeader, - ReplicationCluster, - NumIndicies), - - %% Before enabling fullsync, ensure trees on one source node return - %% already_locked to defer fullsync process. - validate_intercepted_fullsync(InterceptTarget, - {riak_kv_index_hashtree, - [{{get_lock, 2}, already_locked}]}, - ReplicationLeader, - ReplicationCluster, - NumIndicies), - - %% Emulate in progress ownership transfers. - validate_intercepted_fullsync(InterceptTarget, - {riak_kv_vnode, - [{{hashtree_pid, 1}, wrong_node}]}, - ReplicationLeader, - ReplicationCluster, - NumIndicies). - -%% @doc Add an intercept on a target node to simulate a given failure -%% mode, and then enable fullsync replication and verify completes -%% a full cycle. Subsequently reboot the node. -validate_intercepted_fullsync(InterceptTarget, - Intercept, - ReplicationLeader, - ReplicationCluster, - NumIndicies) -> - lager:info("Validating intercept ~p on ~p.", - [Intercept, InterceptTarget]), - - %% Add intercept. - ok = rt_intercept:add(InterceptTarget, Intercept), - - %% Verify fullsync. - ok = check_fullsync(ReplicationLeader, - ReplicationCluster, - NumIndicies), - - %% Reboot node. - rt:stop_and_wait(InterceptTarget), - rt:start_and_wait(InterceptTarget), - - %% Wait for riak_kv and riak_repl to initialize. - rt:wait_for_service(InterceptTarget, riak_kv), - rt:wait_for_service(InterceptTarget, riak_repl), - - %% Wait until AAE trees are compueted on the rebooted node. - rt:wait_until_aae_trees_built([InterceptTarget]). - -%% @doc Given a node, find the port that the cluster manager is -%% listening on. -get_port(Node) -> - {ok, {_IP, Port}} = rpc:call(Node, - application, - get_env, - [riak_core, cluster_mgr]), - Port. - -%% @doc Given a node, find out who the current replication leader in its -%% cluster is. -get_leader(Node) -> - rpc:call(Node, riak_core_cluster_mgr, get_leader, []). - -%% @doc Connect two clusters using a given name. -connect_cluster(Source, Port, Name) -> - lager:info("Connecting ~p to ~p for cluster ~p.", - [Source, Port, Name]), - repl_util:connect_cluster(Source, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(Source, Name)). - -%% @doc Write a series of keys and ensure they are all written. -write_to_cluster(Node, Start, End) -> - lager:info("Writing ~p keys to node ~p.", [End - Start, Node]), - ?assertEqual([], - repl_util:do_write(Node, Start, End, ?TEST_BUCKET, 1)). - -%% @doc Read from cluster a series of keys, asserting a certain number -%% of errors. -read_from_cluster(Node, Start, End, Errors) -> - lager:info("Reading ~p keys from node ~p.", [End - Start, Node]), - Res2 = rt:systest_read(Node, Start, End, ?TEST_BUCKET, 1), - ?assertEqual(Errors, length(Res2)). diff --git a/tests/repl_aae_fullsync_bench.erl b/tests/repl_aae_fullsync_bench.erl deleted file mode 100644 index 6c3a9377a..000000000 --- a/tests/repl_aae_fullsync_bench.erl +++ /dev/null @@ -1,73 +0,0 @@ -%% @doc -%% This module implements a riak_test to exercise the Active Anti-Entropy Fullsync replication. -%% It sets up two clusters, runs a fullsync over all partitions, and verifies the missing keys -%% were replicated to the sink cluster. - --module(repl_aae_fullsync_bench). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NumNodesWanted = 6, %% total number of nodes needed - ClusterASize = 3, %% how many to allocate to cluster A - NumKeysAOnly = 5000, %% how many keys on A that are missing on B - NumKeysBoth = 45000, %% number of common keys on both A and B - Conf = [ %% riak configuration - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, aae}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled} - ]} - ], - - %% build clusters - {ANodes, BNodes} = repl_aae_fullsync_util:make_clusters(NumNodesWanted, ClusterASize, Conf), - - %% run test - aae_fs_test(NumKeysAOnly, NumKeysBoth, ANodes, BNodes), - pass. - -aae_fs_test(NumKeysAOnly, NumKeysBoth, ANodes, BNodes) -> - %% populate them with data - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - repl_aae_fullsync_util:prepare_cluster_data(TestBucket, NumKeysAOnly, NumKeysBoth, ANodes, BNodes), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - AllNodes = ANodes ++ BNodes, - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - %%--------------------------------------------------------- - %% TEST: fullsync, check that non-RT'd keys get repl'd to B - %% keys: 1..NumKeysAOnly - %%--------------------------------------------------------- - - rt:log_to_nodes(AllNodes, "Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), - lager:info("Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - {Time,_} = timer:tc(repl_util,start_and_wait_until_fullsync_complete,[LeaderA]), - lager:info("Fullsync completed in ~p seconds", [Time/1000/1000]), - - %% verify data is replicated to B - NumKeysToVerify = min(1000, NumKeysAOnly), - rt:log_to_nodes(AllNodes, "Verify: Reading ~p keys repl'd from A(~p) to B(~p)", - [NumKeysToVerify, LeaderA, BFirst]), - lager:info("Verify: Reading ~p keys repl'd from A(~p) to B(~p)", - [NumKeysToVerify, LeaderA, BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 1, NumKeysToVerify, TestBucket, 2)), - - ok. - diff --git a/tests/repl_aae_fullsync_bt.erl b/tests/repl_aae_fullsync_bt.erl deleted file mode 100644 index 96c537003..000000000 --- a/tests/repl_aae_fullsync_bt.erl +++ /dev/null @@ -1,303 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(repl_aae_fullsync_bt). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(ENSURE_READ_ITERATIONS, 5). --define(ENSURE_READ_INTERVAL, 1000). - -%% Replication Bucket Types test -%% - -setup(Type) -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - - {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes = make_clusters(Type), - - PBA = rt:pbc(LeaderA), - PBB = rt:pbc(LeaderB), - - {DefinedType, UndefType} = Types = {<<"working_type">>, <<"undefined_type">>}, - - rt:create_and_activate_bucket_type(LeaderA, - DefinedType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, DefinedType), - - case Type of - current -> - rt:create_and_activate_bucket_type(LeaderB, - DefinedType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, BNodes), - rt:wait_until_bucket_type_visible(BNodes, DefinedType); - mixed -> - ok; - aae -> - rt:create_and_activate_bucket_type(LeaderB, - DefinedType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, BNodes), - rt:wait_until_bucket_type_visible(BNodes, DefinedType) - end, - - rt:create_and_activate_bucket_type(LeaderA, - UndefType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(UndefType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, UndefType), - - connect_clusters(LeaderA, LeaderB), - {ClusterNodes, Types, PBA, PBB}. - -cleanup({ClusterNodes, _Types, PBA, PBB}, CleanCluster) -> - riakc_pb_socket:stop(PBA), - riakc_pb_socket:stop(PBB), - {_, _, ANodes, BNodes} = ClusterNodes, - case CleanCluster of - true -> - rt:clean_cluster(ANodes ++ BNodes); - false -> - ok - end. - -%% @doc riak_test entry point -confirm() -> - AAESetupData = setup(aae), - aae_fullsync_test(AAESetupData), - cleanup(AAESetupData, true), - pass. - -aae_fullsync_test({ClusterNodes, BucketTypes, PBA, PBB}) -> - {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes, - {DefinedType, UndefType} = BucketTypes, - - lager:info("Enabling AAE fullsync between ~p and ~p", [LeaderA, LeaderB]), - enable_fullsync(LeaderA, ANodes), - - Bin = <<"data data data">>, - Key = <<"key">>, - Bucket = <<"fullsync-kicked">>, - DefaultObj = riakc_obj:new(Bucket, Key, Bin), - lager:info("doing untyped put on A, bucket:~p", [Bucket]), - riakc_pb_socket:put(PBA, DefaultObj, [{w,3}]), - - BucketTyped = {DefinedType, <<"fullsync-typekicked">>}, - KeyTyped = <<"keytyped">>, - ObjTyped = riakc_obj:new(BucketTyped, KeyTyped, Bin), - - lager:info("doing typed put on A, bucket:~p", [BucketTyped]), - riakc_pb_socket:put(PBA, ObjTyped, [{w,3}]), - - UndefBucketTyped = {UndefType, <<"fullsync-badtype">>}, - UndefKeyTyped = <<"badkeytyped">>, - UndefObjTyped = riakc_obj:new(UndefBucketTyped, UndefKeyTyped, Bin), - - lager:info("doing typed put on A where type is not " - "defined on B, bucket:~p", - [UndefBucketTyped]), - - riakc_pb_socket:put(PBA, UndefObjTyped, [{w,3}]), - - lager:info("waiting for AAE trees to build on all nodes"), - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - - perform_sacrifice(LeaderA), - - {SyncTime1, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - lager:info("AAE Fullsync completed in ~p seconds", [SyncTime1/1000/1000]), - - ReadResult1 = riakc_pb_socket:get(PBB, Bucket, Key), - ReadResult2 = riakc_pb_socket:get(PBB, BucketTyped, KeyTyped), - ReadResult3 = riakc_pb_socket:get(PBB, UndefBucketTyped, UndefKeyTyped), - - ?assertMatch({ok, _}, ReadResult1), - ?assertMatch({ok, _}, ReadResult2), - ?assertMatch({error, _}, ReadResult3), - - {ok, ReadObj1} = ReadResult1, - {ok, ReadObj2} = ReadResult2, - - ?assertEqual(Bin, riakc_obj:get_value(ReadObj1)), - ?assertEqual(Bin, riakc_obj:get_value(ReadObj2)), - ?assertEqual({error, <<"no_type">>}, ReadResult3), - - DefaultProps = get_current_bucket_props(BNodes, DefinedType), - ?assertEqual({n_val, 3}, lists:keyfind(n_val, 1, DefaultProps)), - - update_props(DefinedType, [{n_val, 1}], LeaderB, BNodes), - ok = rt:wait_until(fun() -> - UpdatedProps = get_current_bucket_props(BNodes, DefinedType), - {n_val, 1} =:= lists:keyfind(n_val, 1, UpdatedProps) - end), - - UnequalObjBin = <<"unequal props val">>, - UnequalPropsObj = riakc_obj:new(BucketTyped, KeyTyped, UnequalObjBin), - lager:info("doing put of typed bucket on A where bucket properties (n_val 3 versus n_val 1) are not equal on B"), - riakc_pb_socket:put(PBA, UnequalPropsObj, [{w,3}]), - - {SyncTime2, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - lager:info("AAE Fullsync completed in ~p seconds", [SyncTime2/1000/1000]), - - lager:info("checking to ensure the bucket contents were not updated."), - ensure_bucket_not_updated(PBB, BucketTyped, KeyTyped, Bin). - -%% @doc Turn on fullsync replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_fullsync(LeaderA, ANodes) -> - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - Port = repl_util:get_port(LeaderB), - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")). - -cluster_conf_aae() -> - [ - {riak_core, - [ - {ring_creation_size, 8} - ] - }, - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, aae}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_soft_retries, 10}, - {max_fssource_retries, infinity} - ]} - ]. - -cluster_conf() -> - [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_cluster, 20}, - {max_fssource_node, 20}, - {max_fssink_node, 20}, - {rtq_max_bytes, 1048576} - ]} - ]. - -deploy_nodes(NumNodes, current) -> - rt:deploy_nodes(NumNodes, cluster_conf(), [riak_kv, riak_repl]); -deploy_nodes(NumNodes, aae) -> - rt:deploy_nodes(NumNodes, cluster_conf_aae(), [riak_kv, riak_repl]); -deploy_nodes(_, mixed) -> - Conf = cluster_conf(), - rt:deploy_nodes([{current, Conf}, {previous, Conf}], [riak_kv, riak_repl]). - -%% @doc Create two clusters of 1 node each and connect them for replication: -%% Cluster "A" -> cluster "B" -make_clusters(Type) -> - NumNodes = rt_config:get(num_nodes, 2), - ClusterASize = rt_config:get(cluster_a_size, 1), - - lager:info("Deploy ~p nodes", [NumNodes]), - Nodes = deploy_nodes(NumNodes, Type), - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - %% Name the clusters - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - %% get the leader for the first cluster - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - ALeader = repl_util:get_leader(hd(ANodes)), - BLeader = repl_util:get_leader(hd(BNodes)), - - lager:info("ALeader: ~p BLeader: ~p", [ALeader, BLeader]), - {ALeader, BLeader, ANodes, BNodes}. - -ensure_bucket_not_updated(Pid, Bucket, Key, Bin) -> - Results = [ value_unchanged(Pid, Bucket, Key, Bin) || _I <- lists:seq(1, ?ENSURE_READ_ITERATIONS)], - ?assertEqual(false, lists:member(false, Results)). - -value_unchanged(Pid, Bucket, Key, Bin) -> - case riakc_pb_socket:get(Pid, Bucket, Key) of - {error, E} -> - lager:info("Got error:~p from get on cluster B", [E]), - false; - {ok, Res} -> - ?assertEqual(Bin, riakc_obj:get_value(Res)), - true - end, - timer:sleep(?ENSURE_READ_INTERVAL). - -update_props(Type, Updates, Node, Nodes) -> - lager:info("Setting bucket properties ~p for bucket type ~p on node ~p", - [Updates, Type, Node]), - rpc:call(Node, riak_core_bucket_type, update, [Type, Updates]), - rt:wait_until_ring_converged(Nodes), - - get_current_bucket_props(Nodes, Type). - -%% fetch bucket properties via rpc -%% from a node or a list of nodes (one node is chosen at random) -get_current_bucket_props(Nodes, Type) when is_list(Nodes) -> - Node = lists:nth(length(Nodes), Nodes), - get_current_bucket_props(Node, Type); -get_current_bucket_props(Node, Type) when is_atom(Node) -> - rpc:call(Node, - riak_core_bucket_type, - get, - [Type]). - -%% @doc Required for 1.4+ Riak, write sacrificial keys to force AAE -%% trees to flush to disk. -perform_sacrifice(Node) -> - ?assertEqual([], repl_util:do_write(Node, 1, 2000, - <<"sacrificial">>, 1)). diff --git a/tests/repl_aae_fullsync_custom_n.erl b/tests/repl_aae_fullsync_custom_n.erl deleted file mode 100644 index a8294bcd2..000000000 --- a/tests/repl_aae_fullsync_custom_n.erl +++ /dev/null @@ -1,113 +0,0 @@ -%% @doc -%% This module implements a riak_test to exercise the Active Anti-Entropy Fullsync replication. -%% It sets up two clusters, runs a fullsync over all partitions, and verifies the missing keys -%% were replicated to the sink cluster. -%% -%% Specically, this test sets a custom N-value on the test bucket on the source cluster, which -%% is going to break the AAE fullsync. We don't yet handle AAE fullsync when bucket N values differ -%% between the two clusters. "not_responsible" is returned reply during the hashtree compare and -%% the fullsync source module should restart the connection using the keylist strategy. - --module(repl_aae_fullsync_custom_n). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - NumNodesWanted = 6, %% total number of nodes needed - ClusterASize = 3, %% how many to allocate to cluster A - NumKeysAOnly = 10000, %% how many keys on A that are missing on B - NumKeysBoth = 10000, %% number of common keys on both A and B - Conf = [ %% riak configuration - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, 1}]} - ] - }, - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, aae}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled} - ]} - ], - - %% build clusters - {ANodes, BNodes} = repl_aae_fullsync_util:make_clusters(NumNodesWanted, ClusterASize, Conf), - - %% run test - aae_fs_test(NumKeysAOnly, NumKeysBoth, ANodes, BNodes), - pass. - -aae_fs_test(NumKeysAOnly, NumKeysBoth, ANodes, BNodes) -> - AFirst = hd(ANodes), - BFirst = hd(BNodes), - AllNodes = ANodes ++ BNodes, - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - - %% Set a different bucket N value between the two clusters - NewProps = [{n_val, 2}], - DefaultProps = get_current_bucket_props(BNodes, TestBucket), - lager:info("Setting custom bucket n_val = ~p on node ~p", [2, AFirst]), - update_props(DefaultProps, NewProps, AFirst, ANodes, TestBucket), - - %% populate them with data - repl_aae_fullsync_util:prepare_cluster_data(TestBucket, NumKeysAOnly, NumKeysBoth, ANodes, BNodes), - - %%--------------------------------------------------------- - %% TEST: fullsync, check that non-RT'd keys get repl'd to B - %% keys: 1..NumKeysAOnly - %%--------------------------------------------------------- - - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - rt:log_to_nodes(AllNodes, "Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), - lager:info("Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - %% Start fullsync and wait for it to finish. - {Time,_} = timer:tc(repl_util,start_and_wait_until_fullsync_complete,[LeaderA]), - lager:info("Fullsync completed in ~p seconds", [Time/1000/1000]), - - %% verify data is replicated to B - rt:log_to_nodes(AllNodes, "Verify: Reading ~p keys repl'd from A(~p) to B(~p)", - [NumKeysAOnly, LeaderA, BFirst]), - lager:info("Verify: Reading ~p keys repl'd from A(~p) to B(~p)", - [NumKeysAOnly, LeaderA, BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 1, NumKeysAOnly, - TestBucket, 1)), - - ok. - -update_props(DefaultProps, NewProps, Node, Nodes, Bucket) -> - lager:info("Setting bucket properties ~p for bucket ~p on node ~p", - [NewProps, Bucket, Node]), - rpc:call(Node, riak_core_bucket, set_bucket, [Bucket, NewProps]), - rt:wait_until_ring_converged(Nodes), - - UpdatedProps = get_current_bucket_props(Nodes, Bucket), - ?assertNotEqual(DefaultProps, UpdatedProps). - -%% fetch bucket properties via rpc -%% from a node or a list of nodes (one node is chosen at random) -get_current_bucket_props(Nodes, Bucket) when is_list(Nodes) -> - Node = lists:nth(length(Nodes), Nodes), - get_current_bucket_props(Node, Bucket); -get_current_bucket_props(Node, Bucket) when is_atom(Node) -> - rpc:call(Node, - riak_core_bucket, - get_bucket, - [Bucket]). diff --git a/tests/repl_aae_fullsync_util.erl b/tests/repl_aae_fullsync_util.erl deleted file mode 100644 index 33714c77e..000000000 --- a/tests/repl_aae_fullsync_util.erl +++ /dev/null @@ -1,80 +0,0 @@ -%% @doc -%% This module implements a riak_test to exercise the Active Anti-Entropy Fullsync replication. -%% It sets up two clusters and starts a single fullsync worker for a single AAE tree. --module(repl_aae_fullsync_util). --export([make_clusters/3, - prepare_cluster_data/5]). --include_lib("eunit/include/eunit.hrl"). - --import(rt, [deploy_nodes/3, - join/2, - log_to_nodes/2, - log_to_nodes/3]). - -make_clusters(NumNodesWanted, ClusterSize, Conf) -> - NumNodes = rt_config:get(num_nodes, NumNodesWanted), - ClusterASize = rt_config:get(cluster_a_size, ClusterSize), - lager:info("Deploy ~p nodes", [NumNodes]), - Nodes = deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - {ANodes, BNodes}. - -prepare_cluster_data(TestBucket, NumKeysAOnly, _NumKeysBoth, [AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - AllNodes = ANodes ++ BNodes, - log_to_nodes(AllNodes, "Starting AAE Fullsync test"), - - %% clusters are not connected, connect them - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - %% we'll need to wait for cluster names before continuing - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - %% get the leader for the first cluster - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - {ok, {_IP, Port}} = rpc:call(BFirst, application, get_env, - [riak_core, cluster_mgr]), - - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - %% make sure we are connected - lager:info("Wait for cluster connection A:~p -> B:~p:~p", [LeaderA, BFirst, Port]), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - %%--------------------------------------------------- - %% TEST: write data, NOT replicated by RT or fullsync - %% keys: 1..NumKeysAOnly - %%--------------------------------------------------- - - lager:info("Writing ~p keys to A(~p)", [NumKeysAOnly, AFirst]), - ?assertEqual([], repl_util:do_write(AFirst, 1, NumKeysAOnly, TestBucket, 2)), - - %% check that the keys we wrote initially aren't replicated yet, because - %% we've disabled fullsync_on_connect - lager:info("Check keys written before repl was connected are not present"), - Res2 = rt:systest_read(BFirst, 1, NumKeysAOnly, TestBucket, 1, <<>>, true), - ?assertEqual(NumKeysAOnly, length(Res2)), - - %% wait for the AAE trees to be built so that we don't get a not_built error - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - ok. diff --git a/tests/repl_bucket_types.erl b/tests/repl_bucket_types.erl deleted file mode 100644 index aacec149b..000000000 --- a/tests/repl_bucket_types.erl +++ /dev/null @@ -1,469 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(repl_bucket_types). --behaviour(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --define(ENSURE_READ_ITERATIONS, 5). --define(ENSURE_READ_INTERVAL, 1000). - -%% Replication Bucket Types test -%% - -setup(Type) -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - - {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes = make_clusters(Type), - - PBA = rt:pbc(LeaderA), - PBB = rt:pbc(LeaderB), - - {DefinedType, UndefType} = Types = {<<"working_type">>, <<"undefined_type">>}, - - rt:create_and_activate_bucket_type(LeaderA, - DefinedType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, DefinedType), - - case Type of - current -> - rt:create_and_activate_bucket_type(LeaderB, - DefinedType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, BNodes), - rt:wait_until_bucket_type_visible(BNodes, DefinedType); - mixed -> - ok - end, - - rt:create_and_activate_bucket_type(LeaderA, - UndefType, - [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(UndefType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, UndefType), - - connect_clusters(LeaderA, LeaderB), - {ClusterNodes, Types, PBA, PBB}. - -cleanup({ClusterNodes, _Types, PBA, PBB}, CleanCluster) -> - riakc_pb_socket:stop(PBA), - riakc_pb_socket:stop(PBB), - {_, _, ANodes, BNodes} = ClusterNodes, - case CleanCluster of - true -> - rt:clean_cluster(ANodes ++ BNodes); - false -> - ok - end. - -%% @doc riak_test entry point -confirm() -> - %% Test two clusters of the current version - SetupData = setup(current), - realtime_test(SetupData), - fullsync_test(SetupData), - cleanup(SetupData, true), - - %% Test a cluster of the current version replicating to a cluster - %% of the previous version - MixedSetupData = setup(mixed), - realtime_mixed_version_test(MixedSetupData), - fullsync_mixed_version_test(MixedSetupData), - cleanup(MixedSetupData, false), - pass. - -realtime_test({ClusterNodes, BucketTypes, PBA, PBB}) -> - {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes, - {DefinedType, UndefType} = BucketTypes, - - %% Enable RT replication from cluster "A" to cluster "B" - lager:info("Enabling realtime between ~p and ~p", [LeaderA, LeaderB]), - enable_rt(LeaderA, ANodes), - - Bin = <<"data data data">>, - Key = <<"key">>, - Bucket = <<"kicked">>, - DefaultObj = riakc_obj:new(Bucket, Key, Bin), - lager:info("doing untyped put on A, bucket:~p", [Bucket]), - riakc_pb_socket:put(PBA, DefaultObj, [{w,3}]), - - UntypedWait = make_pbget_fun(PBB, Bucket, Key, Bin), - ?assertEqual(ok, rt:wait_until(UntypedWait)), - - BucketTyped = {DefinedType, <<"typekicked">>}, - KeyTyped = <<"keytyped">>, - ObjTyped = riakc_obj:new(BucketTyped, KeyTyped, Bin), - - lager:info("doing typed put on A, bucket:~p", [BucketTyped]), - riakc_pb_socket:put(PBA, ObjTyped, [{w,3}]), - - TypedWait = make_pbget_fun(PBB, BucketTyped, KeyTyped, Bin), - ?assertEqual(ok, rt:wait_until(TypedWait)), - - UndefBucketTyped = {UndefType, <<"badtype">>}, - UndefKeyTyped = <<"badkeytyped">>, - UndefObjTyped = riakc_obj:new(UndefBucketTyped, UndefKeyTyped, Bin), - UndefObjTyped2 = riakc_obj:new(UndefBucketTyped, UndefKeyTyped, <<"data1">>), - - lager:info("doing typed put on A where type is not defined on B, bucket:~p", [UndefBucketTyped]), - riakc_pb_socket:put(PBA, UndefObjTyped, [{w,3}]), - riakc_pb_socket:put(PBA, UndefObjTyped2, [{w,3}]), - - lager:info("waiting for undefined type pb get on B, should get error <<\"no_type\">>"), - - ErrorResult = riakc_pb_socket:get(PBB, UndefBucketTyped, UndefKeyTyped), - ?assertEqual({error, <<"no_type">>}, ErrorResult), - - % checking the rtq had drained on the source cluster - ensure_rtq_drained(ANodes), - - DefaultProps = get_current_bucket_props(BNodes, DefinedType), - ?assertEqual({n_val, 3}, lists:keyfind(n_val, 1, DefaultProps)), - - update_props(DefinedType, [{n_val, 1}], LeaderB, BNodes), - ok = rt:wait_until(fun() -> - UpdatedProps = get_current_bucket_props(BNodes, DefinedType), - {n_val, 1} =:= lists:keyfind(n_val, 1, UpdatedProps) - end), - - UnequalObjBin = <<"unequal props val">>, - UnequalPropsObj = riakc_obj:new(BucketTyped, KeyTyped, UnequalObjBin), - lager:info("doing put of typed bucket on A where bucket properties " - "(n_val 3 versus n_val 1) are not equal on B"), - riakc_pb_socket:put(PBA, UnequalPropsObj, [{w,3}]), - - lager:info("checking to ensure the bucket contents were not updated."), - ensure_bucket_not_updated(PBB, BucketTyped, KeyTyped, Bin), - disable_rt(LeaderA, ANodes), - - update_props(DefinedType, [{n_val, 3}], LeaderB, BNodes), - ok = rt:wait_until(fun() -> - UpdatedProps2 = get_current_bucket_props(BNodes, DefinedType), - {n_val, 3} =:= lists:keyfind(n_val, 1, UpdatedProps2) - end), - - disable_rt(LeaderA, ANodes). - -realtime_mixed_version_test({ClusterNodes, BucketTypes, PBA, PBB}) -> - {LeaderA, LeaderB, ANodes, _BNodes} = ClusterNodes, - {DefinedType, _UndefType} = BucketTypes, - - %% Enable RT replication from cluster "A" to cluster "B" - lager:info("Enabling realtime between ~p and ~p", [LeaderA, LeaderB]), - enable_rt(LeaderA, ANodes), - - Bin = <<"data data data">>, - Key = <<"key">>, - Bucket = <<"kicked">>, - DefaultObj = riakc_obj:new(Bucket, Key, Bin), - lager:info("doing untyped put on A, bucket:~p", [Bucket]), - riakc_pb_socket:put(PBA, DefaultObj, [{w,3}]), - - %% make sure we rt replicate a "default" type bucket - UntypedWait = make_pbget_fun(PBB, Bucket, Key, Bin), - ?assertEqual(ok, rt:wait_until(UntypedWait)), - - DowngradedBucketTyped = {DefinedType, <<"typekicked">>}, - KeyTyped = <<"keytyped">>, - ObjTyped = riakc_obj:new(DowngradedBucketTyped, KeyTyped, Bin), - - lager:info("doing typed put on A with downgraded B, bucket:~p", [DowngradedBucketTyped]), - riakc_pb_socket:put(PBA, ObjTyped, [{w,3}]), - - lager:info("checking to ensure the bucket contents were not sent to previous version B."), - ensure_bucket_not_sent(PBB, DowngradedBucketTyped, KeyTyped). - -fullsync_test({ClusterNodes, BucketTypes, PBA, PBB}) -> - {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes, - {DefinedType, UndefType} = BucketTypes, - - %% Enable RT replication from cluster "A" to cluster "B" - lager:info("Enabling fullsync between ~p and ~p", [LeaderA, LeaderB]), - enable_fullsync(LeaderA, ANodes), - - Bin = <<"data data data">>, - Key = <<"key">>, - Bucket = <<"fullsync-kicked">>, - DefaultObj = riakc_obj:new(Bucket, Key, Bin), - lager:info("doing untyped put on A, bucket:~p", [Bucket]), - riakc_pb_socket:put(PBA, DefaultObj, [{w,3}]), - - BucketTyped = {DefinedType, <<"fullsync-typekicked">>}, - KeyTyped = <<"keytyped">>, - ObjTyped = riakc_obj:new(BucketTyped, KeyTyped, Bin), - - lager:info("doing typed put on A, bucket:~p", [BucketTyped]), - riakc_pb_socket:put(PBA, ObjTyped, [{w,3}]), - - UndefBucketTyped = {UndefType, <<"fullsync-badtype">>}, - UndefKeyTyped = <<"badkeytyped">>, - UndefObjTyped = riakc_obj:new(UndefBucketTyped, UndefKeyTyped, Bin), - - lager:info("doing typed put on A where type is not " - "defined on B, bucket:~p", - [UndefBucketTyped]), - - riakc_pb_socket:put(PBA, UndefObjTyped, [{w,3}]), - - {SyncTime1, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - lager:info("Fullsync completed in ~p seconds", [SyncTime1/1000/1000]), - - ReadResult1 = riakc_pb_socket:get(PBB, Bucket, Key), - ReadResult2 = riakc_pb_socket:get(PBB, BucketTyped, KeyTyped), - ReadResult3 = riakc_pb_socket:get(PBB, UndefBucketTyped, UndefKeyTyped), - - ?assertMatch({ok, _}, ReadResult1), - ?assertMatch({ok, _}, ReadResult2), - ?assertMatch({error, _}, ReadResult3), - - {ok, ReadObj1} = ReadResult1, - {ok, ReadObj2} = ReadResult2, - - ?assertEqual(Bin, riakc_obj:get_value(ReadObj1)), - ?assertEqual(Bin, riakc_obj:get_value(ReadObj2)), - ?assertEqual({error, <<"no_type">>}, ReadResult3), - - DefaultProps = get_current_bucket_props(BNodes, DefinedType), - ?assertEqual({n_val, 3}, lists:keyfind(n_val, 1, DefaultProps)), - - update_props(DefinedType, [{n_val, 1}], LeaderB, BNodes), - ok = rt:wait_until(fun() -> - UpdatedProps = get_current_bucket_props(BNodes, DefinedType), - {n_val, 1} =:= lists:keyfind(n_val, 1, UpdatedProps) - end), - - UnequalObjBin = <<"unequal props val">>, - UnequalPropsObj = riakc_obj:new(BucketTyped, KeyTyped, UnequalObjBin), - lager:info("doing put of typed bucket on A where bucket properties (n_val 3 versus n_val 1) are not equal on B"), - riakc_pb_socket:put(PBA, UnequalPropsObj, [{w,3}]), - - {SyncTime2, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - lager:info("Fullsync completed in ~p seconds", [SyncTime2/1000/1000]), - - lager:info("checking to ensure the bucket contents were not updated."), - ensure_bucket_not_updated(PBB, BucketTyped, KeyTyped, Bin). - -fullsync_mixed_version_test({ClusterNodes, BucketTypes, PBA, PBB}) -> - {LeaderA, LeaderB, ANodes, _BNodes} = ClusterNodes, - {DefinedType, _UndefType} = BucketTypes, - - %% Enable RT replication from cluster "A" to cluster "B" - lager:info("Enabling fullsync between ~p and ~p", [LeaderA, LeaderB]), - enable_fullsync(LeaderA, ANodes), - - Bin = <<"good data">>, - Key = <<"key">>, - Bucket = <<"fullsync-kicked">>, - DefaultObj = riakc_obj:new(Bucket, Key, Bin), - lager:info("doing untyped put on A, bucket:~p", [Bucket]), - riakc_pb_socket:put(PBA, DefaultObj, [{w,3}]), - - BucketTyped = {DefinedType, Bucket}, - KeyTyped = <<"keytyped">>, - BadBin = <<"overwritten">>, - ObjTyped = riakc_obj:new(BucketTyped, KeyTyped, BadBin), - - lager:info("doing typed put on A, bucket:~p", [BucketTyped]), - riakc_pb_socket:put(PBA, ObjTyped, [{w,3}]), - - {SyncTime1, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - lager:info("Fullsync completed in ~p seconds", [SyncTime1/1000/1000]), - - ReadResult1 = riakc_pb_socket:get(PBB, Bucket, Key), - ?assertMatch({ok, _}, ReadResult1), - - %% The following check appears to be the best we can do. If a 2.x source - %% sends a typed bucket to the 1.x sink, the put will occur. - %% The bucket is undefined to the interfaces, but some parts of it - %% appear to be written to the sink node. Since we cannot check using pb, - %% here we at least make sure we haven't written over an existing default - %% bucket with data from a typed bucket of the same name. - ensure_bucket_not_updated(PBB, Bucket, Key, Bin). - -%% @doc Turn on Realtime replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_rt(LeaderA, ANodes) -> - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes). - -%% @doc Turn off Realtime replication on the cluster lead by LeaderA. -disable_rt(LeaderA, ANodes) -> - repl_util:disable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - repl_util:stop_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes). - -%% @doc Turn on fullsync replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_fullsync(LeaderA, ANodes) -> - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes). - -%% @doc Connect two clusters using a given name. -connect_cluster(Source, Port, Name) -> - lager:info("Connecting ~p to ~p for cluster ~p.", - [Source, Port, Name]), - repl_util:connect_cluster(Source, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(Source, Name)). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - Port = repl_util:get_port(LeaderB), - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")). - -cluster_conf() -> - [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_cluster, 20}, - {max_fssource_node, 20}, - {max_fssink_node, 20}, - {rtq_max_bytes, 1048576} - ]} - ]. - -deploy_nodes(NumNodes, current) -> - rt:deploy_nodes(NumNodes, cluster_conf(), [riak_kv, riak_repl]); -deploy_nodes(_, mixed) -> - Conf = cluster_conf(), - rt:deploy_nodes([{current, Conf}, {previous, Conf}], [riak_kv, riak_repl]). - -%% @doc Create two clusters of 1 node each and connect them for replication: -%% Cluster "A" -> cluster "B" -make_clusters(Type) -> - NumNodes = rt_config:get(num_nodes, 2), - ClusterASize = rt_config:get(cluster_a_size, 1), - - lager:info("Deploy ~p nodes", [NumNodes]), - Nodes = deploy_nodes(NumNodes, Type), - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - %% Name the clusters - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - %% get the leader for the first cluster - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - ALeader = repl_util:get_leader(hd(ANodes)), - BLeader = repl_util:get_leader(hd(BNodes)), - - lager:info("ALeader: ~p BLeader: ~p", [ALeader, BLeader]), - {ALeader, BLeader, ANodes, BNodes}. - -make_pbget_fun(Pid, Bucket, Key, Bin) -> - fun() -> - case riakc_pb_socket:get(Pid, Bucket, Key) of - {ok, O6} -> - ?assertEqual(Bin, riakc_obj:get_value(O6)), - true; - _ -> - false - end - end. - -ensure_bucket_not_sent(Pid, Bucket, Key) -> - Results = [ assert_bucket_not_found(Pid, Bucket, Key) || _I <- lists:seq(1, ?ENSURE_READ_ITERATIONS)], - ?assertEqual(false, lists:member(false, Results)). - -ensure_bucket_not_updated(Pid, Bucket, Key, Bin) -> - Results = [ value_unchanged(Pid, Bucket, Key, Bin) || _I <- lists:seq(1, ?ENSURE_READ_ITERATIONS)], - ?assertEqual(false, lists:member(false, Results)). - -value_unchanged(Pid, Bucket, Key, Bin) -> - case riakc_pb_socket:get(Pid, Bucket, Key) of - {error, E} -> - lager:info("Got error:~p from get on cluster B", [E]), - false; - {ok, Res} -> - ?assertEqual(Bin, riakc_obj:get_value(Res)), - true - end, - timer:sleep(?ENSURE_READ_INTERVAL). - - -assert_bucket_not_found(Pid, Bucket, Key) -> - case riakc_pb_socket:get(Pid, Bucket, Key) of - {error, notfound} -> - true; - {error, <<"no_type">>} -> - true; - {ok, Res} -> - lager:error("Found bucket:~p and key:~p on sink when we should not have", [Res, Key]), - false - end. - -update_props(Type, Updates, Node, Nodes) -> - lager:info("Setting bucket properties ~p for bucket type ~p on node ~p", - [Updates, Type, Node]), - rpc:call(Node, riak_core_bucket_type, update, [Type, Updates]), - rt:wait_until_ring_converged(Nodes), - - get_current_bucket_props(Nodes, Type). - -%% fetch bucket properties via rpc -%% from a node or a list of nodes (one node is chosen at random) -get_current_bucket_props(Nodes, Type) when is_list(Nodes) -> - Node = lists:nth(length(Nodes), Nodes), - get_current_bucket_props(Node, Type); -get_current_bucket_props(Node, Type) when is_atom(Node) -> - rpc:call(Node, - riak_core_bucket_type, - get, - [Type]). - -ensure_rtq_drained(ANodes) -> - lager:info("making sure the rtq has drained"), - Got = lists:map(fun(Node) -> - [] =:= rpc:call(Node, riak_repl2_rtq, dumpq, []) - end, ANodes), - Expected = [true || _ <- lists:seq(1, length(ANodes))], - ?assertEqual(Expected, Got). diff --git a/tests/repl_cancel_fullsync.erl b/tests/repl_cancel_fullsync.erl deleted file mode 100644 index 03c69b9ef..000000000 --- a/tests/repl_cancel_fullsync.erl +++ /dev/null @@ -1,147 +0,0 @@ --module(repl_cancel_fullsync). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(TEST_BUCKET, - <<"repl-cancel-fullsync-failures-systest_a">>). --define(NUM_KEYS, 1000). - --define(CONF(Retries), [ - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, 1}]} - ] - }, - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, keylist}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_retries, Retries} - ]} - ]). - -%% @doc Ensure we can cancel a fullsync and restart it. -confirm() -> - rt:set_advanced_conf(all, ?CONF(5)), - - Nodes = [ANodes, BNodes] = rt:build_clusters([3, 3]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = repl_util:get_leader(AFirst), - LeaderB = repl_util:get_leader(BFirst), - - lager:info("Finding connection manager ports."), - BPort = repl_util:get_port(LeaderB), - - lager:info("Connecting cluster A to B"), - repl_util:connect_cluster_by_name(LeaderA, BPort, "B"), - - repl_util:write_to_cluster(AFirst, 1, ?NUM_KEYS, ?TEST_BUCKET), - - repl_util:read_from_cluster(BFirst, 1, ?NUM_KEYS, ?TEST_BUCKET, - ?NUM_KEYS), - - lager:info("Test fullsync from cluster A leader ~p to cluster B", - [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - lager:info("Starting fullsync."), - rt:log_to_nodes(Nodes, "Starting fullsync."), - R1 = rpc:call(LeaderA, riak_repl_console, fullsync, [["start"]]), - ?assertEqual(ok, R1), - repl_util:wait_until_fullsync_started(LeaderA), - lager:info("Fullsync running."), - - %% Get all active keylist server pids - Coordinators = [Pid || {"B", Pid} <- - riak_repl2_fscoordinator_sup:started(LeaderA)], - States = [sys:get_state(P) || P <- Coordinators], - KeylistPids = lists:flatten([element(14, State) || State <- States]), - KLStates = [sys:get_state(Pid) || {Pid, _} <- KeylistPids], - [?assertEqual(state, element(1, State)) || State <- KLStates], - - lager:info("Stopping fullsync."), - rt:log_to_nodes(Nodes, "Stopping fullsync."), - R2 = rpc:call(LeaderA, riak_repl_console, fullsync, [["stop"]]), - ?assertEqual(ok, R2), - repl_util:wait_until_fullsync_stopped(LeaderA), - lager:info("Fullsync stopped."), - - %% Give keylist pids time to stop - timer:sleep(500), - %% Ensure keylist pids are actually gone - Exits = [catch sys:get_state(Pid) || {Pid, _} <- KeylistPids], - [?assertMatch({'EXIT', _}, Exit) || Exit <- Exits], - - [{"B", S1}] = rpc:call(LeaderA, riak_repl2_fscoordinator, status, []), - ?assertEqual(true, lists:member({fullsyncs_completed, 0}, S1)), - lager:info("Fullsync not completed."), - - [{"B", S2}] = rpc:call(LeaderA, riak_repl2_fscoordinator, status, []), - ?assertEqual(true, lists:member({in_progress, 0}, S2)), - lager:info("** ~p", [S2]), - - lager:info("Starting fullsync."), - rt:log_to_nodes(Nodes, "Starting fullsync."), - R3 = rpc:call(LeaderA, riak_repl_console, fullsync, [["start"]]), - ?assertEqual(ok, R3), - repl_util:wait_until_fullsync_started(LeaderA), - lager:info("Fullsync running again."), - - Res = rt:wait_until(LeaderA, - fun(_) -> - Status = rpc:call(LeaderA, - riak_repl_console, - status, - [quiet]), - case proplists:get_value(server_fullsyncs, Status) of - 1 -> - true; - _ -> - false - end - end), - ?assertEqual(ok, Res), - repl_util:read_from_cluster(BFirst, 1, ?NUM_KEYS, ?TEST_BUCKET, 0), - [{"B", S3}] = rpc:call(LeaderA, riak_repl2_fscoordinator, status, []), - ?assertEqual(true, lists:member({fullsyncs_completed, 1}, S3)), - lager:info("Fullsync Complete"), - - rt:log_to_nodes(Nodes, "Test completed."), - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), - - pass. diff --git a/tests/repl_consistent_object_filter.erl b/tests/repl_consistent_object_filter.erl deleted file mode 100644 index a75b534cc..000000000 --- a/tests/repl_consistent_object_filter.erl +++ /dev/null @@ -1,156 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(repl_consistent_object_filter). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -%% Test to verify that replication properly filters consistent bucket -%% types. This is intended to be a temporary state of affairs so this -%% test should have a limited life span. -%% -%% Currently this test only exercises fullsync replication. The write -%% path for consistent objects bypasses the riak_kv postcommit hooks -%% that are the mechanism by which realtime replication works. As a -%% result, no attempt is ever made to replicate consistent objects. - -%% @doc riak_test entry point -confirm() -> - %% Start up two >1.3.2 clusters and connect them, - {LeaderA, LeaderB, ANodes, BNodes} = make_clusters(), - - PBA = get_pb_pid(LeaderA), - PBB = get_pb_pid(LeaderB), - - BucketType = <<"consistent_type">>, - - %% Create consistent bucket type on cluster A - rt:create_and_activate_bucket_type(LeaderA, - BucketType, - [{consistent, true}, {n_val, 5}]), - rt:wait_until_bucket_type_status(BucketType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, BucketType), - - %% Create consistent bucket type on cluster B - rt:create_and_activate_bucket_type(LeaderB, - BucketType, - [{consistent, true}, {n_val, 5}]), - rt:wait_until_bucket_type_status(BucketType, active, BNodes), - rt:wait_until_bucket_type_visible(BNodes, BucketType), - - connect_clusters(LeaderA, LeaderB), - - %% Create two riak objects and execute consistent put of those - %% objects - Bucket = <<"unclebucket">>, - Key1 = <<"Maizy">>, - Key2 = <<"Miles">>, - Bin1 = <<"Take this quarter, go downtown, and have a rat gnaw that thing off your face! Good day to you, madam.">>, - Bin2 = <<"My Uncle was micro waving our socks and the dog threw up on the couch for an hour.">>, - Obj1 = riakc_obj:new({BucketType, Bucket}, Key1, Bin1), - Obj2 = riakc_obj:new({BucketType, Bucket}, Key2, Bin2), - lager:info("doing 2 consistent puts on A, bucket:~p", [Bucket]), - ok = riakc_pb_socket:put(PBA, Obj1), - ok = riakc_pb_socket:put(PBA, Obj2), - - %% Enable fullsync and wait for it to complete - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - {Time, _} = timer:tc(repl_util, start_and_wait_until_fullsync_complete, [LeaderA]), - lager:info("Fullsync completed in ~p seconds", [Time/1000/1000]), - - %% Attempt to read the objects from cluster B to verify they have - %% not been replicated via realtime replication - BReadRes3 = riakc_pb_socket:get(PBB, {BucketType, Bucket}, Key1), - BReadRes4 = riakc_pb_socket:get(PBB, {BucketType, Bucket}, Key2), - - ?assertEqual({error, notfound}, BReadRes3), - ?assertEqual({error, notfound}, BReadRes4), - - riakc_pb_socket:stop(PBA), - riakc_pb_socket:stop(PBB), - pass. - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - {ok, {_IP, Port}} = rpc:call(LeaderB, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")). - -%% @doc Create two clusters of 1 node each and connect them for replication: -%% Cluster "A" -> cluster "B" -make_clusters() -> - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, 3), - NVal = 5, - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = ensemble_util:fast_config(NVal) ++ - [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {max_fssource_node, 2}, - {max_fssink_node, 2}, - {max_fssource_cluster, 5}, - {max_fssource_retries, 5} - ]} - ], - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - ensemble_util:wait_until_stable(AFirst, NVal), - ensemble_util:wait_until_stable(BFirst, NVal), - - %% get the leader for the first cluster - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - %% Name the clusters - repl_util:name_cluster(AFirst, "A"), - rt:wait_until_ring_converged(ANodes), - - repl_util:name_cluster(BFirst, "B"), - rt:wait_until_ring_converged(BNodes), - - ?assertEqual(true, rpc:call(AFirst, riak_ensemble_manager, enabled, [])), - ensemble_util:wait_until_cluster(ANodes), - ensemble_util:wait_for_membership(AFirst), - ensemble_util:wait_until_stable(AFirst, NVal), - - ?assertEqual(true, rpc:call(BFirst, riak_ensemble_manager, enabled, [])), - ensemble_util:wait_until_cluster(BNodes), - ensemble_util:wait_for_membership(BFirst), - ensemble_util:wait_until_stable(BFirst, NVal), - - LeaderA = repl_util:get_leader(AFirst), - LeaderB = repl_util:get_leader(BFirst), - - {LeaderA, LeaderB, ANodes, BNodes}. - -get_pb_pid(Leader) -> - {ok, [{IP, PortA}] } = rpc:call(Leader, application, get_env, [riak_api, pb]), - {ok, Pid} = riakc_pb_socket:start_link(IP, PortA, []), - Pid. diff --git a/tests/repl_fs_bench.erl b/tests/repl_fs_bench.erl deleted file mode 100644 index 657a25aa6..000000000 --- a/tests/repl_fs_bench.erl +++ /dev/null @@ -1,156 +0,0 @@ --module(repl_fs_bench). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(DIFF_NUM_KEYS, 10). --define(FULL_NUM_KEYS, 100). --define(TEST_BUCKET, <<"repl_bench">>). - --define(HARNESS, (rt_config:get(rt_harness))). - --define(CONF(Strategy), [ - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, 1}, {allow_mult, false}]} - ] - }, - {riak_kv, - [ - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, Strategy}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_retries, infinity}, - {max_fssource_cluster, 1}, - {max_fssource_node, 1}, - {max_fssink_node, 1} - ]} - ]). - -confirm() -> - {E1, F1, D1, N1} = fullsync_test(keylist, 0), - {E2, F2, D2, N2} = fullsync_test(keylist, 10), - {E3, F3, D3, N3} = fullsync_test(keylist, 100), - - {E4, F4, D4, N4} = fullsync_test(aae, 0), - {E5, F5, D5, N5} = fullsync_test(aae, 10), - {E6, F6, D6, N6} = fullsync_test(aae, 100), - - lager:info("Keylist Empty: ~pms ~pms ~pms", [E1 / 1000, E2 / 1000, E3 / 1000]), - lager:info("Keylist Full: ~pms ~pms ~pms", [F1 / 1000, F2 / 1000, F3 / 1000]), - lager:info("Keylist Diff: ~pms ~pms ~pms", [D1 / 1000, D2 / 1000, D3 / 1000]), - lager:info("Keylist None: ~pms ~pms ~pms", [N1 / 1000, N2 / 1000, N3 / 1000]), - - lager:info("AAE Empty: ~pms ~pms ~pms", [E4 / 1000, E5 / 1000, E6 / 1000]), - lager:info("AAE Full: ~pms ~pms ~pms", [F4 / 1000, F5 / 1000, F6 / 1000]), - lager:info("AAE Diff: ~pms ~pms ~pms", [D4 / 1000, D5 / 1000, D6 / 1000]), - lager:info("AAE None: ~pms ~pms ~pms", [N4 / 1000, N5 / 1000, N6 / 1000]), - - pass. - -%% @doc Perform a fullsync, with given latency injected via intercept -%% and return times for each fullsync time. -fullsync_test(Strategy, Latency) -> - rt:set_advanced_conf(all, ?CONF(Strategy)), - - [ANodes, BNodes] = rt:build_clusters([3, 3]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - [rt_intercept:load_code(Node) || Node <- ANodes], - - case {Strategy, Latency} of - {aae, 10} -> - [rt_intercept:add(Node, - {riak_repl_aae_source, - [{{get_reply, 1}, delayed_get_reply}]}) - || Node <- ANodes], - ok; - {keylist, 10} -> - [rt_intercept:add(Node, - {riak_repl2_fssource, - [{{handle_info, 2}, slow_handle_info}]}) - || Node <- ANodes], - ok; - {aae, 100} -> - [rt_intercept:add(Node, - {riak_repl_aae_source, - [{{get_reply, 1}, really_delayed_get_reply}]}) - || Node <- ANodes], - ok; - {keylist, 100} -> - [rt_intercept:add(Node, - {riak_repl2_fssource, - [{{handle_info, 2}, really_slow_handle_info}]}) - || Node <- ANodes], - ok; - _ -> - ok - end, - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - LeaderA = rpc:call(AFirst, - riak_core_cluster_mgr, get_leader, []), - - {ok, {IP, Port}} = rpc:call(BFirst, - application, get_env, [riak_core, cluster_mgr]), - - repl_util:connect_cluster(LeaderA, IP, Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - %% Perform fullsync of an empty cluster. - rt:wait_until_aae_trees_built(ANodes ++ BNodes), - {EmptyTime, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - %% Write keys and perform fullsync. - repl_util:write_to_cluster(AFirst, 0, ?FULL_NUM_KEYS, ?TEST_BUCKET), - rt:wait_until_aae_trees_built(ANodes ++ BNodes), - {FullTime, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - %% Rewrite first 10% keys and perform fullsync. - repl_util:write_to_cluster(AFirst, 0, ?DIFF_NUM_KEYS, ?TEST_BUCKET), - rt:wait_until_aae_trees_built(ANodes ++ BNodes), - {DiffTime, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - %% Write no keys, and perform the fullsync. - rt:wait_until_aae_trees_built(ANodes ++ BNodes), - {NoneTime, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA]), - - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), - - {EmptyTime, FullTime, DiffTime, NoneTime}. diff --git a/tests/repl_fs_stat_caching.erl b/tests/repl_fs_stat_caching.erl deleted file mode 100644 index 6f187ef49..000000000 --- a/tests/repl_fs_stat_caching.erl +++ /dev/null @@ -1,109 +0,0 @@ -%% @doc Tests to ensure a stalling or blocking fssource process does not -%% cause status call to timeout. Useful for only 2.0 and up (and up is -%% a regression test). --module(repl_fs_stat_caching). --behavior(riak_test). - --include_lib("eunit/include/eunit.hrl"). --define(TEST_BUCKET, <<"repl_fs_stat_caching">>). - --export([confirm/0]). - -confirm() -> - {{SrcLead, SrcCluster}, {SinkLead, _SinkCluster}} = setup(), - SinkPort = repl_util:get_cluster_mgr_port(SinkLead), - repl_util:connect_cluster(SrcLead, "127.0.0.1", SinkPort), - - lager:info("Loading source cluster"), - [] = repl_util:do_write(SrcLead, 1, 1000, ?TEST_BUCKET, 1), - - repl_util:enable_fullsync(SrcLead, "sink"), - rpc:call(SrcLead, riak_repl_console, fullsync, [["start", "sink"]]), - - % and now, the actual test. - % find a random fssource, suspend it, and then ensure we can get a - % status. - {ok, Suspended} = suspend_an_fs_source(SrcCluster), - lager:info("Suspended: ~p", [Suspended]), - {ok, Status} = rt:riak_repl(SrcLead, "status"), - FailLine = "RPC to '" ++ atom_to_list(SrcLead) ++ "' failed: timeout\n", - ?assertNotEqual(FailLine, Status), - - true = rpc:block_call(node(Suspended), erlang, resume_process, [Suspended]), - - pass. - -setup() -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - NodeCount = rt_config:get(num_nodes, 6), - - lager:info("Deploy ~p nodes", [NodeCount]), - Nodes = rt:deploy_nodes(NodeCount, cluster_conf(), [riak_kv, riak_repl]), - SplitSize = NodeCount div 2, - {SourceNodes, SinkNodes} = lists:split(SplitSize, Nodes), - - lager:info("making cluster Source from ~p", [SourceNodes]), - repl_util:make_cluster(SourceNodes), - - lager:info("making cluster Sink from ~p", [SinkNodes]), - repl_util:make_cluster(SinkNodes), - - SrcHead = hd(SourceNodes), - SinkHead = hd(SinkNodes), - repl_util:name_cluster(SrcHead, "source"), - repl_util:name_cluster(SinkHead, "sink"), - - rt:wait_until_ring_converged(SourceNodes), - rt:wait_until_ring_converged(SinkNodes), - - rt:wait_until_transfers_complete(SourceNodes), - rt:wait_until_transfers_complete(SinkNodes), - - ok = repl_util:wait_until_leader_converge(SourceNodes), - ok = repl_util:wait_until_leader_converge(SinkNodes), - - SourceLead = repl_util:get_leader(SrcHead), - SinkLead = repl_util:get_leader(SinkHead), - - {{SourceLead, SourceNodes}, {SinkLead, SinkNodes}}. - -cluster_conf() -> - [ - {riak_repl, [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_cluster, 3}, - {max_fssource_node, 1}, - {max_fssink_node, 20}, - {rtq_max_bytes, 1048576} - ]} - ]. - -suspend_an_fs_source([]) -> - {error, no_nodes}; - -suspend_an_fs_source(Nodes) -> - suspend_an_fs_source(Nodes, 10000). - -suspend_an_fs_source([_Node | _Tail], 0) -> - {error, tries_ran_out}; - -suspend_an_fs_source([Node | Tail], TriesLeft) -> - Pids = rpc:call(Node, riak_repl2_fssource_sup, enabled, []), - case maybe_suspend_an_fs_source(Node, Pids) of - false -> - suspend_an_fs_source(Tail ++ [Node], TriesLeft - 1); - Pid -> - {ok, Pid} - end. - -maybe_suspend_an_fs_source(_Node, []) -> - false; - -maybe_suspend_an_fs_source(Node, [{_Remote, Pid} | Tail]) -> - case rpc:block_call(Node, erlang, suspend_process, [Pid]) of - false -> - maybe_suspend_an_fs_source(Node, Tail); - true -> - Pid - end. diff --git a/tests/repl_handoff_deadlock_aae.erl b/tests/repl_handoff_deadlock_aae.erl deleted file mode 100644 index c4885c5fb..000000000 --- a/tests/repl_handoff_deadlock_aae.erl +++ /dev/null @@ -1,35 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%%% @copyright (C) 2015, Basho Technologies -%%% @doc -%%% riak_test for timing deadlock on riak_repl2_keylist_server:diff_bloom and riak_repl_aae_source:finish_sending_differences -%%% which can occur when the owner VNode is handed off after the original monitor (MonRef) is created. Neither function -%%% handles the {'DOWN', MonRef, process, VNodePid, normal} case (which happens when the node exits after handoff. -%%% Note that this uses code from verify_counter_repl as its tests, as that seemed to be able to evoke the issue, even without intercepts -%%% -%%% Also tests fixes for riak_repl2_fscoordinator which used to cache the owner of an index at replication start, but those could change with handoff. -%%% @end --module(repl_handoff_deadlock_aae). --behavior(riak_test). --export([confirm/0]). - -confirm() -> - repl_handoff_deadlock_common:confirm(aae). \ No newline at end of file diff --git a/tests/repl_handoff_deadlock_common.erl b/tests/repl_handoff_deadlock_common.erl deleted file mode 100644 index 8a9ce9dee..000000000 --- a/tests/repl_handoff_deadlock_common.erl +++ /dev/null @@ -1,151 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%%% @copyright (C) 2015, Basho Technologies -%%% @doc -%%% riak_test for timing deadlock on riak_repl2_keylist_server:diff_bloom and riak_repl_aae_source:finish_sending_differences -%%% which can occur when the owner VNode is handed off after the original monitor (MonRef) is created. Neither function -%%% handles the {'DOWN', MonRef, process, VNodePid, normal} case (which happens when the node exits after handoff. -%%% Note that this uses code from verify_counter_repl as its tests, as that seemed to be able to evoke the issue, even without intercepts -%%% -%%% Also tests fixes for riak_repl2_fscoordinator which used to cache the owner of an index at replication start, but those could change with handoff. -%%% @end --module(repl_handoff_deadlock_common). --export([confirm/1]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"counter-bucket">>). --define(KEY, <<"counter-key">>). - --define(CONF(Strategy), [ - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, Strategy}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% Force usage of Bloom to invoke race - {fullsync_direct_percentage_limit, 0}, - {fullsync_direct_limit, 1} - - ]} -]). - -confirm(Strategy) -> - - inets:start(), - lager:info("Testing fullsync handoff deadlock with strategy ~p~n", [Strategy]), - {ClusterA, ClusterB} = make_clusters(Strategy), - - %% Simulate stop of 1/10th of vnodes before fold begins to provoke deadlock - [ - rt_intercept:add(Node, {riak_core_vnode_master, - [{{command_return_vnode, 4}, - stop_vnode_after_bloom_fold_request_succeeds}]}) - || {_, Node} <- ClusterA ++ ClusterB], - - %% Write the data to both sides of the cluster - write_data(hd(ClusterA), 1, 1000), - write_data(hd(ClusterB), 1001, 1000), - - %% let the repl flow - repl_power_activate(ClusterA, ClusterB), - verify_data(hd(ClusterA), 2000), - verify_data(hd(ClusterB), 2000), - pass. - -make_clusters(KeylistOrAae) -> - Nodes = rt:deploy_nodes(6, ?CONF(KeylistOrAae), [riak_kv, riak_repl]), - {ClusterA, ClusterB} = lists:split(3, Nodes), - A = make_cluster(ClusterA, "A"), - B = make_cluster(ClusterB, "B"), - {A, B}. - -make_cluster(Nodes, Name) -> - repl_util:make_cluster(Nodes), - repl_util:name_cluster(hd(Nodes), Name), - repl_util:wait_until_leader_converge(Nodes), - Clients = [ rt:pbc(Node) || Node <- Nodes ], - lists:zip(Clients, Nodes). - -write_data({Client, _Node}, Start, Count) -> - [riakc_pb_socket:put(Client, riakc_obj:new(?BUCKET, integer_to_binary(Num), integer_to_binary(Num))) - || Num <- lists:seq(Start, (Start-1) + Count)]. - -verify_data({Client, Node}, Count) -> - [ - begin - case (riakc_pb_socket:get(Client, ?BUCKET, integer_to_binary(Num), [{notfound_ok, false}])) of - {error, notfound} -> - erlang:error({not_found, lists:flatten(io_lib:format("Could not find ~p in cluster with node ~p", [Num, Node]))}); - {ok, Object} -> - ?assertEqual(riakc_obj:get_value(Object), integer_to_binary(Num)) - end - end - || Num <- lists:seq(1, Count)]. - - -%% Set up bi-directional full sync replication. -repl_power_activate(ClusterA, ClusterB) -> - lager:info("repl power...ACTIVATE!"), - LeaderA = get_leader(hd(ClusterA)), - info("got leader A"), - LeaderB = get_leader(hd(ClusterB)), - info("Got leader B"), - MgrPortA = get_mgr_port(hd(ClusterA)), - info("Got manager port A"), - MgrPortB = get_mgr_port(hd(ClusterB)), - info("Got manager port B"), - info("connecting A to B"), - repl_util:connect_cluster(LeaderA, "127.0.0.1", MgrPortB), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - info("A connected to B"), - info("connecting B to A"), - repl_util:connect_cluster(LeaderB, "127.0.0.1", MgrPortA), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderB, "A")), - info("B connected to A"), - info("Enabling Fullsync bi-directional"), - repl_util:enable_fullsync(LeaderA, "B"), - info("Enabled A->B"), - repl_util:enable_fullsync(LeaderB, "A"), - info("Enabled B->A"), - info("Awaiting fullsync completion"), - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - info("A->B complete"), - repl_util:start_and_wait_until_fullsync_complete(LeaderB), - info("B->A complete"). - -get_leader({_, Node}) -> - rpc:call(Node, riak_core_cluster_mgr, get_leader, []). - -get_mgr_port({_, Node}) -> - {ok, {_IP, Port}} = rpc:call(Node, application, get_env, - [riak_core, cluster_mgr]), - Port. - -info(Message) -> - lager:info(Message). diff --git a/tests/repl_handoff_deadlock_keylist.erl b/tests/repl_handoff_deadlock_keylist.erl deleted file mode 100644 index f5ec70316..000000000 --- a/tests/repl_handoff_deadlock_keylist.erl +++ /dev/null @@ -1,35 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%%% @copyright (C) 2015, Basho Technologies -%%% @doc -%%% riak_test for timing deadlock on riak_repl2_keylist_server:diff_bloom and riak_repl_aae_source:finish_sending_differences -%%% which can occur when the owner VNode is handed off after the original monitor (MonRef) is created. Neither function -%%% handles the {'DOWN', MonRef, process, VNodePid, normal} case (which happens when the node exits after handoff. -%%% Note that this uses code from verify_counter_repl as its tests, as that seemed to be able to evoke the issue, even without intercepts -%%% -%%% Also tests fixes for riak_repl2_fscoordinator which used to cache the owner of an index at replication start, but those could change with handoff. -%%% @end --module(repl_handoff_deadlock_keylist). --behavior(riak_test). --export([confirm/0]). - -confirm() -> - repl_handoff_deadlock_common:confirm(keylist). \ No newline at end of file diff --git a/tests/repl_location_failures.erl b/tests/repl_location_failures.erl deleted file mode 100644 index cb74985f5..000000000 --- a/tests/repl_location_failures.erl +++ /dev/null @@ -1,108 +0,0 @@ -%% @doc Verify that location_down messages during replication occur -%% and are handled correctly. - --module(repl_location_failures). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(TEST_BUCKET, <<"repl-location-failures-systest_a">>). --define(NUM_KEYS, 1000). - --define(CONF(Retries), [ - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, 1}]} - ] - }, - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, keylist}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_reserve_retries, Retries} - ]} - ]). - -confirm() -> - rt:set_advanced_conf(all, ?CONF(5)), - - [ANodes, BNodes] = rt:build_clusters([3, 3]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = repl_util:get_leader(AFirst), - LeaderB = repl_util:get_leader(BFirst), - - lager:info("Finding connection manager ports."), - BPort = repl_util:get_port(LeaderB), - - lager:info("Connecting cluster A to B"), - repl_util:connect_cluster_by_name(LeaderA, BPort, "B"), - - %% Write keys prior to fullsync. - repl_util:write_to_cluster(AFirst, 1, ?NUM_KEYS, ?TEST_BUCKET), - - %% Read keys prior to fullsync. - repl_util:read_from_cluster(BFirst, 1, ?NUM_KEYS, ?TEST_BUCKET, - ?NUM_KEYS), - - lager:info("Test fullsync from cluster A leader ~p to cluster B", - [LeaderA]), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - BIndicies = length(rpc:call(LeaderB, - riak_core_ring, - my_indices, - [rt:get_ring(LeaderB)])), - - lager:warning("BIndicies: ~p", [BIndicies]), - - repl_util:validate_intercepted_fullsync(LeaderB, - {riak_repl2_fs_node_reserver, - [{{handle_call, 3}, - down_reserve}]}, - LeaderA, - "B", - BIndicies), - - - %% Verify data is replicated from A -> B successfully once the - %% intercepts are removed. - repl_util:validate_completed_fullsync(LeaderA, BFirst, "B", 1, - ?NUM_KEYS, ?TEST_BUCKET), - - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), - - pass. diff --git a/tests/repl_rt_cascading_rtq.erl b/tests/repl_rt_cascading_rtq.erl deleted file mode 100644 index 8d31446f6..000000000 --- a/tests/repl_rt_cascading_rtq.erl +++ /dev/null @@ -1,185 +0,0 @@ --module(repl_rt_cascading_rtq). --compile(export_all). - --include_lib("eunit/include/eunit.hrl"). - --define(TEST_BUCKET, <<"rt-cascading-rtq-systest-a">>). - -setup() -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - - {SourceLeader, SinkLeaderA, SinkLeaderB, _, _, _} = ClusterNodes = make_clusters(), - - connect_clusters(SourceLeader, SinkLeaderA, "SinkA"), - connect_clusters(SourceLeader, SinkLeaderB, "SinkB"), - ClusterNodes. - -confirm() -> - SetupData = setup(), - rtq_data_buildup_test(SetupData), - pass. - -%% This test case is designed to ensure that there is no realtime -%% queue buildup on sink nodes that do not serve as source nodes for -%% any other clusters. It constructs a simple toplogy with a single -%% source cluster replicating to two sinks. The toplogy for this test -%% is as follows: -%% +--------+ -%% | Source | -%% +--------+ -%% ^ ^ -%% / \ -%% V V -%% +-------+ +-------+ -%% | SinkA | | SinkB | -%% +-------+ +-------+ -rtq_data_buildup_test(ClusterNodes) -> - {SourceLeader, SinkLeaderA, SinkLeaderB, SourceNodes, _SinkANodes, _SinkBNodes} = ClusterNodes, - - %% Enable RT replication from source cluster "SinkA" - lager:info("Enabling realtime between ~p and ~p", [SourceLeader, SinkLeaderB]), - enable_rt(SourceLeader, SourceNodes, "SinkA"), - %% Enable RT replication from source cluster "SinkB" - lager:info("Enabling realtime between ~p and ~p", [SourceLeader, SinkLeaderA]), - enable_rt(SourceLeader, SourceNodes, "SinkB"), - - %% Get the baseline byte count for the rtq for each sink cluster - SinkAInitialQueueSize = rtq_bytes(SinkLeaderA), - SinkBInitialQueueSize = rtq_bytes(SinkLeaderB), - - %% Write keys to source cluster A - KeyCount = 1001, - write_to_cluster(SourceLeader, 1, KeyCount), - read_from_cluster(SinkLeaderA, 1, KeyCount, 0), - read_from_cluster(SinkLeaderB, 1, KeyCount, 0), - - %% Verify the rt queue is still at the initial size for both sink clusters - ?assertEqual(SinkAInitialQueueSize, rtq_bytes(SinkLeaderA)), - ?assertEqual(SinkBInitialQueueSize, rtq_bytes(SinkLeaderB)). - -rtq_bytes(Node) -> - RtqStatus = rpc:call(Node, riak_repl2_rtq, status, []), - proplists:get_value(bytes, RtqStatus). - -make_clusters() -> - NodeCount = rt_config:get(num_nodes, 6), - lager:info("Deploy ~p nodes", [NodeCount]), - Nodes = deploy_nodes(NodeCount, true), - - {SourceNodes, SinkNodes} = lists:split(2, Nodes), - {SinkANodes, SinkBNodes} = lists:split(2, SinkNodes), - lager:info("SinkANodes: ~p", [SinkANodes]), - lager:info("SinkBNodes: ~p", [SinkBNodes]), - - lager:info("Build source cluster"), - repl_util:make_cluster(SourceNodes), - - lager:info("Build sink cluster A"), - repl_util:make_cluster(SinkANodes), - - lager:info("Build sink cluster B"), - repl_util:make_cluster(SinkBNodes), - - SourceFirst = hd(SourceNodes), - AFirst = hd(SinkANodes), - BFirst = hd(SinkBNodes), - - %% Name the clusters - repl_util:name_cluster(SourceFirst, "Source"), - repl_util:name_cluster(AFirst, "SinkA"), - repl_util:name_cluster(BFirst, "SinkB"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(SourceNodes), - rt:wait_until_ring_converged(SinkANodes), - rt:wait_until_ring_converged(SinkBNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(SourceNodes), - rt:wait_until_transfers_complete(SinkANodes), - rt:wait_until_transfers_complete(SinkBNodes), - - %% get the leader for the source cluster - lager:info("waiting for leader to converge on the source cluster"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(SourceNodes)), - - %% get the leader for the first sink cluster - lager:info("waiting for leader to converge on sink cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(SinkANodes)), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(SinkBNodes)), - - SourceLeader = repl_util:get_leader(SourceFirst), - ALeader = repl_util:get_leader(AFirst), - BLeader = repl_util:get_leader(BFirst), - - %% Uncomment the following 2 lines to verify that pre-2.0 versions - %% of Riak behave as expected if cascading writes are disabled for - %% the sink clusters. - %% disable_cascading(ALeader, SinkANodes), - %% disable_cascading(BLeader, SinkBNodes), - - lager:info("Source Leader: ~p SinkALeader: ~p SinkBLeader: ~p", [SourceLeader, ALeader, BLeader]), - {SourceLeader, ALeader, BLeader, SourceNodes, SinkANodes, SinkBNodes}. - -%% @doc Connect two clusters using a given name. -connect_cluster(Source, Port, Name) -> - lager:info("Connecting ~p to ~p for cluster ~p.", - [Source, Port, Name]), - repl_util:connect_cluster(Source, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(Source, Name)). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(SourceLeader, SinkLeader, SinkName) -> - SinkPort = repl_util:get_port(SinkLeader), - lager:info("connect source cluster to ~p on port ~p", [SinkName, SinkPort]), - repl_util:connect_cluster(SourceLeader, "127.0.0.1", SinkPort), - ?assertEqual(ok, repl_util:wait_for_connection(SourceLeader, SinkName)). - -cluster_conf(_CascadingWrites) -> - [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_cluster, 20}, - {max_fssource_node, 20}, - {max_fssink_node, 20}, - {rtq_max_bytes, 1048576} - ]} - ]. - -deploy_nodes(NumNodes, true) -> - rt:deploy_nodes(NumNodes, cluster_conf(always), [riak_kv, riak_repl]); -deploy_nodes(NumNodes, false) -> - rt:deploy_nodes(NumNodes, cluster_conf(never), [riak_kv, riak_repl]). - -%% @doc Turn on Realtime replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_rt(SourceLeader, SourceNodes, SinkName) -> - repl_util:enable_realtime(SourceLeader, SinkName), - rt:wait_until_ring_converged(SourceNodes), - - repl_util:start_realtime(SourceLeader, SinkName), - rt:wait_until_ring_converged(SourceNodes). - -%% @doc Turn off Realtime replication on the cluster lead by LeaderA. -disable_cascading(Leader, Nodes) -> - rpc:call(Leader, riak_repl_console, realtime_cascades, [["never"]]), - rt:wait_until_ring_converged(Nodes). - -%% @doc Write a series of keys and ensure they are all written. -write_to_cluster(Node, Start, End) -> - lager:info("Writing ~p keys to node ~p.", [End - Start, Node]), - ?assertEqual([], - repl_util:do_write(Node, Start, End, ?TEST_BUCKET, 1)). - -%% @doc Read from cluster a series of keys, asserting a certain number -%% of errors. -read_from_cluster(Node, Start, End, Errors) -> - lager:info("Reading ~p keys from node ~p.", [End - Start, Node]), - Res2 = rt:systest_read(Node, Start, End, ?TEST_BUCKET, 1), - ?assertEqual(Errors, length(Res2)). diff --git a/tests/repl_rt_heartbeat.erl b/tests/repl_rt_heartbeat.erl deleted file mode 100644 index 7306404a1..000000000 --- a/tests/repl_rt_heartbeat.erl +++ /dev/null @@ -1,255 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(repl_rt_heartbeat). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(RPC_TIMEOUT, 5000). --define(HB_TIMEOUT, 2). --define(HB_INTERVAL, 1). - -%% Replication Realtime Heartbeat test -%% Valid for EE version 1.3.2 and up -%% -%% If both sides of an RT replication connection support it, a heartbeat -%% message is sent from the RT Source to the RT Sink every -%% {riak_repl, rt_heartbeat_interval} which default to 15s. If -%% a response is not received in {riak_repl, rt_heartbeat_timeout}, also -%% default to 15s then the source connection exits and will be re-established -%% by the supervisor. -%% -%% RT Heartbeat messages are supported between EE releases 1.3.2 and up. -%% -%% Test: -%% ----- -%% Change the heartbeat_interval and heartbeat_timeout to 2 seconds, -%% Start up two >1.3.2 clusters and connect them, -%% Enable RT replication, -%% Write some objects to the source cluster (A), -%% Verify they got to the sink cluster (B), -%% Verify that heartbeats are being acknowledged by the sink (B) back to source (A), -%% Interupt the connection so that packets can not flow from A -> B, -%% Verify that the connection is restarted after the heartbeat_timeout period, -%% Verify that heartbeats are being acknowledged by the sink (B) back to source (A), -%% Write some objects to the source cluster (A), -%% Verify they got to the sink cluster (B), -%% Have a cold beverage. - -%% @doc riak_test entry point -confirm() -> - %% Start up two >1.3.2 clusters and connect them, - {LeaderA, LeaderB, ANodes, _BNodes} = make_connected_clusters(), - - %% load intercepts. See ../intercepts/riak_repl_rt_intercepts.erl - load_intercepts(LeaderA), - load_intercepts(LeaderB), - - %% Enable RT replication from cluster "A" to cluster "B" - enable_rt(LeaderA, ANodes), - - %% Verify that heartbeats are being acknowledged by the sink (B) back to source (A) - ?assertEqual(verify_heartbeat_messages(LeaderA), true), - - %% Verify RT repl of objects - verify_rt(LeaderA, LeaderB), - - %% Cause heartbeat messages to not be delivered, but remember the current - %% Pid of the RT connection. It should change after we stop heartbeats - %% because the RT connection will restart if all goes well. - RTConnPid1 = get_rt_conn_pid(LeaderA), - lager:info("Suspending HB"), - suspend_heartbeat_messages(LeaderA), - - %% sleep longer than the HB timeout interval to force re-connection; - %% and give it time to restart the RT connection. Wait an extra 2 seconds. - timer:sleep(timer:seconds(?HB_TIMEOUT) + 2000), - - %% Verify that RT connection has restarted by noting that it's Pid has changed - RTConnPid2 = get_rt_conn_pid(LeaderA), - ?assertNotEqual(RTConnPid1, RTConnPid2), - - %% Verify that heart beats are not being ack'd - rt:log_to_nodes([LeaderA], "Verify suspended HB"), - ?assertEqual(verify_heartbeat_messages(LeaderA), false), - - %% Resume heartbeat messages from source and allow some time to ack back. - %% Wait one second longer than the timeout - rt:log_to_nodes([LeaderA], "Resuming HB"), - resume_heartbeat_messages(LeaderA), - timer:sleep(timer:seconds(?HB_TIMEOUT) + 1000), - - %% Verify that heartbeats are being acknowledged by the sink (B) back to source (A) - rt:log_to_nodes([LeaderA], "Verify resumed HB"), - ?assertEqual(verify_heartbeat_messages(LeaderA), true), - - %% Verify RT repl of objects - verify_rt(LeaderA, LeaderB), - - verify_hb_noresponse(LeaderA, LeaderB), - - pass. - -%% @doc Turn on Realtime replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_rt(LeaderA, ANodes) -> - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes). - -%% @doc Verify that RealTime replication is functioning correctly by -%% writing some objects to cluster A and checking they can be -%% read from cluster B. Each call creates a new bucket so that -%% verification can be tested multiple times independently. -verify_rt(LeaderA, LeaderB) -> - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - First = 101, - Last = 200, - - %% Write some objects to the source cluster (A), - lager:info("Writing ~p keys to ~p, which should RT repl to ~p", - [Last-First+1, LeaderA, LeaderB]), - ?assertEqual([], repl_util:do_write(LeaderA, First, Last, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading ~p keys written from ~p", [Last-First+1, LeaderB]), - ?assertEqual(0, repl_util:wait_for_reads(LeaderB, First, Last, TestBucket, 2)). - -verify_hb_noresponse(LeaderA, LeaderB) -> - - lager:info("Testing heartbeats with no responses, should not crash"), - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - First = 1, - Last = 20000, - - %% suspend HB responses from the sink, write some data, then stop writing - %% data, hb timeout should not crash the node - suspend_heartbeat_responses(LeaderB), - - %% Write some objects to the source cluster (A), - lager:info("Writing ~p keys to ~p, which should RT repl to ~p", - [Last-First+1, LeaderA, LeaderB]), - ?assertEqual([], repl_util:do_write(LeaderA, First, Last, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading ~p keys written from ~p", [Last-First+1, LeaderB]), - timer:sleep(?HB_TIMEOUT + 1000), - ?assertEqual(0, repl_util:wait_for_reads(LeaderB, First, Last, TestBucket, 2)). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - {ok, {_IP, Port}} = rpc:call(LeaderB, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")). - -%% @doc Create two clusters of 3 nodes each and connect them for replication: -%% Cluster "A" -> cluster "B" -make_connected_clusters() -> - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, 3), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% override defaults for RT heartbeat so that we - %% can see faults sooner and have a quicker test. - {rt_heartbeat_interval, ?HB_TIMEOUT}, - {rt_heartbeat_timeout, ?HB_TIMEOUT} - ]} - ], - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - %% get the leader for the first cluster - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - %% Name the clusters - repl_util:name_cluster(AFirst, "A"), - rt:wait_until_ring_converged(ANodes), - - repl_util:name_cluster(BFirst, "B"), - rt:wait_until_ring_converged(BNodes), - - %% Connect for replication - connect_clusters(AFirst, BFirst), - - {AFirst, BFirst, ANodes, BNodes}. - -%% @doc Load intercepts file from ../intercepts/riak_repl2_rtsource_helper_intercepts.erl -load_intercepts(Node) -> - rt_intercept:load_code(Node). - -%% @doc Suspend heartbeats from the source node -suspend_heartbeat_messages(Node) -> - %% disable forwarding of the heartbeat function call - lager:info("Suspend sending of heartbeats from node ~p", [Node]), - rt_intercept:add(Node, {riak_repl2_rtsource_helper, - [{{send_heartbeat, 1}, drop_send_heartbeat}]}). - -%% @doc Resume heartbeats from the sink node -resume_heartbeat_messages(Node) -> - %% enable forwarding of the heartbeat function call - lager:info("Resume sending of heartbeats from node ~p", [Node]), - rt_intercept:add(Node, {riak_repl2_rtsource_helper, - [{{send_heartbeat, 1}, forward_send_heartbeat}]}). - -suspend_heartbeat_responses(Node) -> - - lager:info("Suspending sending of heartbeat responses from node ~p", [Node]), - rt_intercept:add(Node, {riak_repl2_rtsink_conn, - [{{send_heartbeat, 2}, drop_send_heartbeat_resp}]}). - -%% @doc Get the Pid of the first RT source connection on Node -get_rt_conn_pid(Node) -> - [{_Remote, Pid}|Rest] = rpc:call(Node, riak_repl2_rtsource_conn_sup, enabled, []), - case Rest of - [] -> ok; - RR -> lager:info("Other connections: ~p", [RR]) - end, - Pid. - -%% @doc Verify that heartbeat messages are being ack'd from the RT sink back to source Node -verify_heartbeat_messages(Node) -> - lager:info("Verify heartbeats"), - Pid = get_rt_conn_pid(Node), - Status = rpc:call(Node, riak_repl2_rtsource_conn, status, [Pid], ?RPC_TIMEOUT), - HBRTT = proplists:get_value(hb_rtt, Status), - case HBRTT of - undefined -> - false; - RTT -> - is_integer(RTT) - end. diff --git a/tests/repl_rt_overload.erl b/tests/repl_rt_overload.erl deleted file mode 100644 index 1440d5b1e..000000000 --- a/tests/repl_rt_overload.erl +++ /dev/null @@ -1,190 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(repl_rt_overload). --behaviour(riak_test). --export([confirm/0, check_size/1, slow_trim_q/1]). --include_lib("eunit/include/eunit.hrl"). - --define(RTSINK_MAX_WORKERS, 1). --define(RTSINK_MAX_PENDING, 1). - -%% Replication Realtime Overload test -%% -%% This attempts to push rtq to the point of overload, so that -%% the overload control will flip on (when the rtq mailbox gets over 2k) -%% then off again. This should cause some overload_drops, though -%% traffic should recover once this clears. -%% -%% This test makes use of the riak_repl2_rtq_intecept.erl in order to slow -%% the trim_q call on the first iteration (causing the mailbox blacklog), -%% then clear that condition and allow traffic to recover. -%% - -%% @doc riak_test entry point -confirm() -> - %% Start up two >1.3.2 clusters and connect them, - {LeaderA, LeaderB, ANodes, _BNodes} = make_connected_clusters(), - - %% load intercepts. See ../intercepts/riak_repl_rt_intercepts.erl - load_intercepts(LeaderB), - - %% Enable RT replication from cluster "A" to cluster "B" - lager:info("Enabling realtime between ~p and ~p", [LeaderA, LeaderB]), - enable_rt(LeaderA, ANodes), - - %% Verify RT repl of objects - verify_rt(LeaderA, LeaderB), - - lager:info("Slowing trim_q calls on leader A"), - slow_trim_q(LeaderA), - - check_rtq_msg_q(LeaderA), - - verify_overload_writes(LeaderA, LeaderB), - - pass. - -%% @doc Turn on Realtime replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_rt(LeaderA, ANodes) -> - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes). - -%% @doc Verify that RealTime replication is functioning correctly by -%% writing some objects to cluster A and checking they can be -%% read from cluster B. Each call creates a new bucket so that -%% verification can be tested multiple times independently. -verify_rt(LeaderA, LeaderB) -> - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - First = 101, - Last = 200, - - %% Write some objects to the source cluster (A), - lager:info("Writing ~p keys to ~p, which should RT repl to ~p", - [Last-First+1, LeaderA, LeaderB]), - ?assertEqual([], repl_util:do_write(LeaderA, First, Last, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading ~p keys written from ~p", [Last-First+1, LeaderB]), - ?assertEqual(0, repl_util:wait_for_reads(LeaderB, First, Last, TestBucket, 2)). - -verify_overload_writes(LeaderA, LeaderB) -> - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - First = 1, - Last = 10000, - - %% Write some objects to the source cluster (A), - lager:info("Writing ~p keys to ~p, to ~p", - [Last-First+1, LeaderA, LeaderB]), - ?assertEqual([], repl_util:do_write(LeaderA, First, Last, TestBucket, 2)), - - lager:info("Reading ~p keys from ~p", [Last-First+1, LeaderB]), - NumReads = rt:systest_read(LeaderB, First, Last, TestBucket, 2), - - lager:info("systest_read saw ~p errors", [length(NumReads)]), - - Status = rpc:call(LeaderA, riak_repl2_rtq, status, []), - {_, OverloadDrops} = lists:keyfind(overload_drops, 1, Status), - - lager:info("overload_drops: ~p", [OverloadDrops]), - - % If there are overload_drops, overload has done its job - ?assert(OverloadDrops > 0). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - {ok, {_IP, Port}} = rpc:call(LeaderB, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")). - -%% @doc Create two clusters of 1 node each and connect them for replication: -%% Cluster "A" -> cluster "B" -make_connected_clusters() -> - NumNodes = rt_config:get(num_nodes, 2), - ClusterASize = rt_config:get(cluster_a_size, 1), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {rtq_max_bytes, 1048576}, - {rtsink_max_workers, ?RTSINK_MAX_WORKERS}, - {rt_heartbeat_timeout, ?RTSINK_MAX_PENDING} - ]} - ], - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - %% get the leader for the first cluster - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - %% Name the clusters - repl_util:name_cluster(AFirst, "A"), - rt:wait_until_ring_converged(ANodes), - - repl_util:name_cluster(BFirst, "B"), - rt:wait_until_ring_converged(BNodes), - - %% Connect for replication - connect_clusters(AFirst, BFirst), - - {AFirst, BFirst, ANodes, BNodes}. - -%% @doc Load intercepts file from ../intercepts/riak_repl2_rtq_intercepts.erl -load_intercepts(Node) -> - rt_intercept:load_code(Node). - -%% @doc Slow down handle_info (write calls) -% slow_write_calls(Node) -> -% %% disable forwarding of the heartbeat function call -% lager:info("Slowing down sink do_write calls on ~p", [Node]), -% rt_intercept:add(Node, {riak_repl2_rtsink_conn, -% [{{handle_info, 2}, slow_handle_info}]}). - -slow_trim_q(Node) -> - lager:info("Slowing down trim_q calls on ~p", [Node]), - rt_intercept:add(Node, {riak_repl2_rtq, - [{{trim_q, 1}, slow_trim_q}]}). - -check_rtq_msg_q(Node) -> - Pid = spawn(?MODULE, check_size, [Node]), - Pid. - -check_size(Node) -> - Pid = rpc:call(Node, erlang, whereis, [riak_repl2_rtq]), - Len = rpc:call(Node, erlang, process_info, [Pid, message_queue_len]), - io:format("mailbox size of riak_repl2_rtq: ~p", [Len]), - - timer:sleep(2000), - check_size(Node). diff --git a/tests/repl_rt_pending.erl b/tests/repl_rt_pending.erl deleted file mode 100644 index 24754389a..000000000 --- a/tests/repl_rt_pending.erl +++ /dev/null @@ -1,249 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(repl_rt_pending). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(RPC_TIMEOUT, 5000). --define(HB_TIMEOUT, 2000). - -%% Replication Realtime Heartbeat test -%% Valid for EE version 1.3.2 and up -%% -%% If both sides of an RT replication connection support it, a heartbeat -%% message is sent from the RT Source to the RT Sink every -%% {riak_repl, rt_heartbeat_interval} which default to 15s. If -%% a response is not received in {riak_repl, rt_heartbeat_timeout}, also -%% default to 15s then the source connection exits and will be re-established -%% by the supervisor. -%% -%% RT Heartbeat messages are supported between EE releases 1.3.2 and up. -%% -%% Test: -%% ----- -%% Change the heartbeat_interval and heartbeat_timeout to 2 seconds, -%% Start up two >1.3.2 clusters and connect them, -%% Enable RT replication, -%% Write some objects to the source cluster (A), -%% Verify they got to the sink cluster (B), -%% Verify that heartbeats are being acknowledged by the sink (B) back to source (A), -%% Interupt the connection so that packets can not flow from A -> B, -%% Verify that the connection is restarted after the heartbeat_timeout period, -%% Verify that heartbeats are being acknowledged by the sink (B) back to source (A), -%% Write some objects to the source cluster (A), -%% Verify they got to the sink cluster (B), -%% Have a cold beverage. - -%% @doc riak_test entry point -confirm() -> - %% Start up two >1.3.2 clusters and connect them, - {LeaderA, LeaderB, ANodes, BNodes} = make_connected_clusters(), - - %% load intercepts. See ../intercepts/riak_repl_rt_intercepts.erl - load_intercepts(LeaderA), - - %% Enable RT replication from cluster "A" to cluster "B" - enable_bi_rt(LeaderA, ANodes, LeaderB, BNodes), - - %% Verify that heartbeats are being acknowledged by the sink (B) back to source (A) - %%?assertEqual(verify_heartbeat_messages(LeaderA), true), - - %% Verify RT repl of objects - write_n_keys(LeaderA, LeaderB, 1, 10000), - - write_n_keys(LeaderB, LeaderA, 10001, 20000), - - RTQStatus = rpc:call(LeaderA, riak_repl2_rtq, status, []), - - Consumers = proplists:get_value(consumers, RTQStatus), - case proplists:get_value("B", Consumers) of - undefined -> - []; - Consumer -> - Unacked = proplists:get_value(unacked, Consumer, 0), - lager:info("unacked: ~p", [Unacked]), - ?assertEqual(0, Unacked) - end, - %% Cause heartbeat messages to not be delivered, but remember the current - %% Pid of the RT connection. It should change after we stop heartbeats - %% because the RT connection will restart if all goes well. - %RTConnPid1 = get_rt_conn_pid(LeaderA), - %lager:info("Suspending HB"), - %suspend_heartbeat_messages(LeaderA), - - %% sleep longer than the HB timeout interval to force re-connection; - %% and give it time to restart the RT connection. Wait an extra 2 seconds. - %timer:sleep(?HB_TIMEOUT + 2000), - - %% Verify that RT connection has restarted by noting that it's Pid has changed - %RTConnPid2 = get_rt_conn_pid(LeaderA), - %?assertNotEqual(RTConnPid1, RTConnPid2), - - %% Verify that heart beats are not being ack'd - %rt:log_to_nodes([LeaderA], "Verify suspended HB"), - %?assertEqual(verify_heartbeat_messages(LeaderA), false), - - %% Resume heartbeat messages from source and allow some time to ack back. - %% Wait one second longer than the timeout - %rt:log_to_nodes([LeaderA], "Resuming HB"), - %resume_heartbeat_messages(LeaderA), - %timer:sleep(?HB_TIMEOUT + 1000), - - %% Verify that heartbeats are being acknowledged by the sink (B) back to source (A) - %rt:log_to_nodes([LeaderA], "Verify resumed HB"), - %?assertEqual(verify_heartbeat_messages(LeaderA), true), - - %% Verify RT repl of objects - %verify_rt(LeaderA, LeaderB), - - pass. - -%% @doc Turn on Realtime replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_bi_rt(LeaderA, ANodes, LeaderB, BNodes) -> - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - repl_util:enable_realtime(LeaderB, "A"), - rt:wait_until_ring_converged(BNodes), - - repl_util:start_realtime(LeaderB, "A"), - rt:wait_until_ring_converged(ANodes). - - -%% @doc Verify that RealTime replication is functioning correctly by -%% writing some objects to cluster A and checking they can be -%% read from cluster B. Each call creates a new bucket so that -%% verification can be tested multiple times independently. -write_n_keys(Source, Destination, M, N) -> - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - First = M, - Last = N, - - %% Write some objects to the source cluster (A), - lager:info("Writing ~p keys to ~p, which should RT repl to ~p", - [Last-First+1, Source, Destination]), - ?assertEqual([], repl_util:do_write(Source, First, Last, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading ~p keys written from ~p", [Last-First+1, Destination]), - ?assertEqual(0, repl_util:wait_for_reads(Destination, First, Last, TestBucket, 2)). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - {ok, {_IP, PortB}} = rpc:call(LeaderB, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, PortB]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", PortB), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - {ok, {_IP, PortA}} = rpc:call(LeaderA, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster B:~p to A on port ~p", [LeaderB, PortA]), - repl_util:connect_cluster(LeaderB, "127.0.0.1", PortA), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderB, "A")). - - - -%% @doc Create two clusters of 3 nodes each and connect them for replication: -%% Cluster "A" -> cluster "B" -make_connected_clusters() -> - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, 3), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% override defaults for RT heartbeat so that we - %% can see faults sooner and have a quicker test. - {rt_heartbeat_interval, ?HB_TIMEOUT}, - {rt_heartbeat_timeout, ?HB_TIMEOUT} - ]} - ], - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - %% get the leader for the first cluster - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - %% get the leader for the second cluster - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - %% Name the clusters - repl_util:name_cluster(AFirst, "A"), - rt:wait_until_ring_converged(ANodes), - - repl_util:name_cluster(BFirst, "B"), - rt:wait_until_ring_converged(BNodes), - - %% Connect for replication - connect_clusters(AFirst, BFirst), - - {AFirst, BFirst, ANodes, BNodes}. - -%% @doc Load intercepts file from ../intercepts/riak_repl2_rtsource_helper_intercepts.erl -load_intercepts(Node) -> - rt_intercept:load_code(Node). - -%% @doc Suspend heartbeats from the source node -%suspend_heartbeat_messages(Node) -> - %% disable forwarding of the heartbeat function call -% lager:info("Suspend sending of heartbeats from node ~p", [Node]), -% rt_intercept:add(Node, {riak_repl2_rtsource_helper, -% [{{send_heartbeat, 1}, drop_send_heartbeat}]}). - -%% @doc Resume heartbeats from the source node -%resume_heartbeat_messages(Node) -> - %% enable forwarding of the heartbeat function call -% lager:info("Resume sending of heartbeats from node ~p", [Node]), -% rt_intercept:add(Node, {riak_repl2_rtsource_helper, -% [{{send_heartbeat, 1}, forward_send_heartbeat}]}). - -%% @doc Get the Pid of the first RT source connection on Node -%get_rt_conn_pid(Node) -> -% [{_Remote, Pid}|Rest] = rpc:call(Node, riak_repl2_rtsource_conn_sup, enabled, []), -% case Rest of -% [] -> ok; -% RR -> lager:info("Other connections: ~p", [RR]) -% end, -% Pid. - -%% @doc Verify that heartbeat messages are being ack'd from the RT sink back to source Node -%verify_heartbeat_messages(Node) -> -% lager:info("Verify heartbeats"), -% Pid = get_rt_conn_pid(Node), -% Status = rpc:call(Node, riak_repl2_rtsource_conn, status, [Pid], ?RPC_TIMEOUT), -% HBRTT = proplists:get_value(hb_rtt, Status), -% case HBRTT of -% undefined -> -% false; -% RTT -> -% is_integer(RTT) -% end. diff --git a/tests/repl_util.erl b/tests/repl_util.erl deleted file mode 100644 index a7be23f35..000000000 --- a/tests/repl_util.erl +++ /dev/null @@ -1,637 +0,0 @@ --module(repl_util). --export([make_cluster/1, - name_cluster/2, - node_has_version/2, - nodes_with_version/2, - nodes_all_have_version/2, - wait_until_is_leader/1, - is_leader/1, - wait_until_is_not_leader/1, - wait_until_leader/1, - wait_until_new_leader/2, - wait_until_leader_converge/1, - wait_until_connection/1, - wait_until_no_connection/1, - wait_for_reads/5, - wait_until_fullsync_started/1, - wait_until_fullsync_stopped/1, - start_and_wait_until_fullsync_complete/1, - start_and_wait_until_fullsync_complete/2, - start_and_wait_until_fullsync_complete/3, - start_and_wait_until_fullsync_complete/4, - connect_cluster/3, - disconnect_cluster/2, - wait_for_connection/2, - wait_for_disconnect/2, - wait_for_full_disconnect/1, - wait_until_connection_errors/2, - wait_until_connections_clear/1, - enable_realtime/2, - disable_realtime/2, - enable_fullsync/2, - start_realtime/2, - stop_realtime/2, - stop_fullsync/2, - disable_fullsync/2, - do_write/5, - get_fs_coord_status_item/3, - num_partitions/1, - get_cluster_mgr_port/1, - maybe_reconnect_rt/3, - connect_rt/3, - connect_cluster_by_name/3, - connect_cluster_by_name/4, - get_port/1, - get_leader/1, - write_to_cluster/4, - write_to_cluster/5, - read_from_cluster/5, - read_from_cluster/6, - check_fullsync/3, - validate_completed_fullsync/6, - validate_intercepted_fullsync/5 - ]). --include_lib("eunit/include/eunit.hrl"). - -make_cluster(Nodes) -> - [First|Rest] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - [rt:wait_for_service(N, riak_kv) || N <- Nodes], - [rt:join(Node, First) || Node <- Rest], - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)). - -name_cluster(Node, Name) -> - lager:info("Naming cluster ~p",[Name]), - Res = rpc:call(Node, riak_repl_console, clustername, [[Name]]), - ?assertEqual(ok, Res). - -wait_until_is_leader(Node) -> - lager:info("wait_until_is_leader(~p)", [Node]), - rt:wait_until(Node, fun is_leader/1). - -is_leader(Node) -> - case rpc:call(Node, riak_core_cluster_mgr, get_leader, []) of - {badrpc, Wut} -> - lager:info("Badrpc during is_leader for ~p. Error: ~p", [Node, Wut]), - false; - Leader -> - lager:info("Checking: ~p =:= ~p", [Leader, Node]), - Leader =:= Node - end. - - -wait_until_is_not_leader(Node) -> - lager:info("wait_until_is_not_leader(~p)", [Node]), - rt:wait_until(Node, fun is_not_leader/1). - -is_not_leader(Node) -> - case rpc:call(Node, riak_core_cluster_mgr, get_leader, []) of - {badrpc, Wut} -> - lager:info("Badrpc during is_not leader for ~p. Error: ~p", [Node, Wut]), - false; - Leader -> - lager:info("Checking: ~p =/= ~p", [Leader, Node]), - Leader =/= Node - end. - -wait_until_leader(Node) -> - wait_until_new_leader(Node, undefined). - -wait_until_new_leader(Node, OldLeader) -> - Res = rt:wait_until(Node, - fun(_) -> - Status = rpc:call(Node, riak_core_cluster_mgr, get_leader, []), - case Status of - {badrpc, _} -> - false; - undefined -> - false; - OldLeader -> - false; - _Other -> - true - end - end), - ?assertEqual(ok, Res). - -wait_until_leader_converge([Node|_] = Nodes) -> - rt:wait_until(Node, - fun(_) -> - LeaderResults = - [rpc:call(N, riak_core_cluster_mgr, get_leader, []) || - N <- Nodes], - {Leaders, Errors} = - lists:partition(leader_result_filter_fun(), LeaderResults), - UniqueLeaders = lists:usort(Leaders), - Errors == [] andalso length(UniqueLeaders) == 1 - end). - -leader_result_filter_fun() -> - fun(L) -> - case L of - undefined -> - false; - {badrpc, _} -> - false; - _ -> - true - end - end. - -wait_until_connection(Node) -> - rt:wait_until(Node, - fun(_) -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - case Status of - {badrpc, _} -> - false; - _ -> - case proplists:get_value(fullsync_coordinator, Status) of - [] -> - false; - [_C] -> - true; - Conns -> - lager:warning("multiple connections detected: ~p", - [Conns]), - true - end - end - end). %% 40 seconds is enough for repl - -wait_until_no_connection(Node) -> - rt:wait_until(Node, - fun(_) -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - case Status of - {badrpc, _} -> - false; - _ -> - case proplists:get_value(connected_clusters, Status) of - [] -> - true; - _ -> - false - end - end - end). %% 40 seconds is enough for repl - -wait_until_fullsync_started(SourceLeader) -> - rt:wait_until(fun() -> - lager:info("Waiting for fullsync to start"), - Coordinators = [Pid || {"B", Pid} <- - riak_repl2_fscoordinator_sup:started(SourceLeader)], - lists:any(fun riak_repl2_fscoordinator:is_running/1, - Coordinators) - end). - -wait_until_fullsync_stopped(SourceLeader) -> - rt:wait_until(fun() -> - lager:info("Waiting for fullsync to stop"), - Coordinators = [Pid || {"B", Pid} <- - riak_repl2_fscoordinator_sup:started(SourceLeader)], - not lists:any(fun riak_repl2_fscoordinator:is_running/1, - Coordinators) - end). - -wait_for_reads(Node, Start, End, Bucket, R) -> - rt:wait_until(Node, - fun(_) -> - Reads = rt:systest_read(Node, Start, End, Bucket, R, <<>>, true), - Reads == [] - end), - Reads = rt:systest_read(Node, Start, End, Bucket, R, <<>>, true), - lager:info("Reads: ~p", [Reads]), - length(Reads). - -get_fs_coord_status_item(Node, SinkName, ItemName) -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - FS_CoordProps = proplists:get_value(fullsync_coordinator, Status), - ClusterProps = proplists:get_value(SinkName, FS_CoordProps), - proplists:get_value(ItemName, ClusterProps). - -start_and_wait_until_fullsync_complete(Node) -> - start_and_wait_until_fullsync_complete(Node, undefined). - -start_and_wait_until_fullsync_complete(Node, Cluster) -> - start_and_wait_until_fullsync_complete(Node, Cluster, undefined). - -start_and_wait_until_fullsync_complete(Node, Cluster, NotifyPid) -> - start_and_wait_until_fullsync_complete(Node, Cluster, NotifyPid, 20). - -start_and_wait_until_fullsync_complete(Node, Cluster, NotifyPid, Retries) -> - Status0 = rpc:call(Node, riak_repl_console, status, [quiet]), - Count0 = proplists:get_value(server_fullsyncs, Status0), - Count = fullsync_count(Count0, Status0, Cluster), - - lager:info("Waiting for fullsync count to be ~p", [Count]), - - lager:info("Starting fullsync on: ~p", [Node]), - rpc:call(Node, riak_repl_console, fullsync, [fullsync_start_args(Cluster)]), - - %% sleep because of the old bug where stats will crash if you call it too - %% soon after starting a fullsync - timer:sleep(500), - - %% Send message to process and notify fullsync has began. - fullsync_notify(NotifyPid), - - case rt:wait_until(make_fullsync_wait_fun(Node, Count), 100, 1000) of - ok -> - ok; - _ when Retries > 0 -> - ?assertEqual(ok, wait_until_connection(Node)), - lager:warning("Node failed to fullsync, retrying"), - start_and_wait_until_fullsync_complete(Node, Cluster, NotifyPid, Retries-1) - end, - lager:info("Fullsync on ~p complete", [Node]). - -fullsync_count(Count, Status, undefined) -> - %% count the # of fullsync enabled clusters - FullSyncClusters = proplists:get_value(fullsync_enabled, Status), - Count + length(string:tokens(FullSyncClusters, ", ")); -fullsync_count(Count, _Status, _Cluster) -> - Count + 1. - -fullsync_start_args(undefined) -> - lager:info("No cluster for fullsync start"), - ["start"]; -fullsync_start_args(Cluster) -> - lager:info("Cluster for fullsync start: ~p", [Cluster]), - ["start", Cluster]. - -fullsync_notify(NotifyPid) when is_pid(NotifyPid) -> - NotifyPid ! fullsync_started; -fullsync_notify(_) -> - ok. - -make_fullsync_wait_fun(Node, Count) -> - fun() -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - case Status of - {badrpc, _} -> - false; - _ -> - case proplists:get_value(server_fullsyncs, Status) of - C when C >= Count -> - true; - _ -> - false - end - end - end. - -connect_cluster(Node, IP, Port) -> - Res = rpc:call(Node, riak_repl_console, connect, - [[IP, integer_to_list(Port)]]), - ?assertEqual(ok, Res). - -disconnect_cluster(Node, Name) -> - Res = rpc:call(Node, riak_repl_console, disconnect, - [[Name]]), - ?assertEqual(ok, Res). - -wait_for_connection(Node, Name) -> - rt:wait_until(Node, - fun(_) -> - lager:info("Waiting for repl connection to cluster named ~p on node ~p", [Name, Node]), - case rpc:call(Node, riak_core_cluster_mgr, - get_connections, []) of - {ok, Connections} -> - Conn = [P || {{cluster_by_name, N}, P} <- Connections, N == Name], - case Conn of - [] -> - false; - [Pid] -> - try riak_core_cluster_conn:status(Pid, 2000) of - {Pid, status, _} -> - true; - _ -> - false - catch - _W:_Y -> - Pid ! {self(), status}, - receive - {Pid, status, _} -> - true; - {Pid, connecting, _} -> - false - %% Never wait forever for the response. Allow wait_until to work. - after 2000 -> - false - end - end - end; - _ -> - false - end - end). - -%% @doc Wait for disconnect from this node to the -%% named cluster. -wait_for_disconnect(Node, Name) -> - rt:wait_until(Node, fun(_) -> - lager:info("Attempting to verify disconnect on ~p from ~p.", - [Node, Name]), - try - {ok, Connections} = rpc:call(Node, - riak_core_cluster_mgr, - get_connections, - []), - lager:info("Waiting for sink disconnect on ~p: ~p.", - [Node, Connections]), - Conn = [P || {{cluster_by_name, N}, P} <- Connections, N == Name], - case Conn of - [] -> - true; - _ -> - false - end - catch - _:Error -> - lager:info("Caught error: ~p.", [Error]), - false - end - end). - -%% @doc Wait for full disconnect from all clusters and IP's -wait_for_full_disconnect(Node) -> - rt:wait_until(Node, fun(_) -> - lager:info("Attempting to verify full disconnect on ~p.", - [Node]), - try - {ok, Connections} = rpc:call(Node, - riak_core_cluster_mgr, - get_connections, - []), - lager:info("Waiting for sink disconnect on ~p: ~p.", - [Node, Connections]), - case Connections of - [] -> - true; - _ -> - false - end - catch - _:Error -> - lager:info("Caught error: ~p.", [Error]), - false - end - end). - -%% @doc Wait until canceled connections are cleared -wait_until_connections_clear(Node) -> - rt:wait_until(Node, fun(_) -> - try - Status = rpc:call(Node, - riak_core_connection_mgr, - get_request_states, - []), - lager:info("Waiting for cancelled connections to clear on ~p: ~p.", - [Node, Status]), - case Status of - [] -> - true; - _ -> - false - end - catch - _:Error -> - lager:info("Caught error: ~p.", [Error]), - false - end - end). - -%% @doc Wait until errors in connection -wait_until_connection_errors(Node, BNode) -> - {ok, {_IP, Port}} = rpc:call(BNode, application, get_env, - [riak_core, cluster_mgr]), - rt:wait_until(Node, fun(_) -> - try - Failures = rpc:call(Node, - riak_core_connection_mgr, - get_connection_errors, - [{"127.0.0.1",Port}]), - lager:info("Waiting for endpoint connection failures on ~p: ~p.", - [Node, Failures]), - case orddict:size(Failures) of - 0 -> - false; - _ -> - true - end - catch - _:Error -> - lager:info("Caught error: ~p.", [Error]), - false - end - end). - -enable_realtime(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, realtime, [["enable", Cluster]]), - ?assertEqual(ok, Res). - -disable_realtime(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, realtime, [["disable", Cluster]]), - ?assertEqual(ok, Res). - -enable_fullsync(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, fullsync, [["enable", Cluster]]), - ?assertEqual(ok, Res). - -disable_fullsync(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, fullsync, [["disable", Cluster]]), - ?assertEqual(ok, Res). - -stop_fullsync(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, fullsync, [["stop", Cluster]]), - ?assertEqual(ok, Res). - -start_realtime(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, realtime, [["start", Cluster]]), - ?assertEqual(ok, Res). - -stop_realtime(Node, Cluster) -> - Res = rpc:call(Node, riak_repl_console, realtime, [["stop", Cluster]]), - ?assertEqual(ok, Res). - -do_write(Node, Start, End, Bucket, W) -> - case rt:systest_write(Node, Start, End, Bucket, W) of - [] -> - []; - Errors -> - lager:warning("~p errors while writing: ~p", - [length(Errors), Errors]), - timer:sleep(1000), - lists:flatten([rt:systest_write(Node, S, S, Bucket, W) || - {S, _Error} <- Errors]) - end. - -%% does the node meet the version requirement? -node_has_version(Node, Version) -> - NodeVersion = rtdev:node_version(rtdev:node_id(Node)), - case NodeVersion of - current -> - %% current always satisfies any version check - true; - _ -> - NodeVersion >= Version - end. - -nodes_with_version(Nodes, Version) -> - [Node || Node <- Nodes, node_has_version(Node, Version)]. - -nodes_all_have_version(Nodes, Version) -> - Nodes == nodes_with_version(Nodes, Version). - -%% Return the number of partitions in the cluster where Node is a member. -num_partitions(Node) -> - {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_raw_ring, []), - N = riak_core_ring:num_partitions(Ring), - N. - -get_cluster_mgr_port(Node) -> - {ok, {_Ip, Port}} = rpc:call(Node, application, get_env, [riak_core, cluster_mgr]), - Port. - -maybe_reconnect_rt(SourceNode, SinkPort, SinkName) -> - case repl_util:wait_for_connection(SourceNode, SinkName) of - fail -> - connect_rt(SourceNode, SinkPort, SinkName); - Oot -> - Oot - end. - -connect_rt(SourceNode, SinkPort, SinkName) -> - repl_util:connect_cluster(SourceNode, "127.0.0.1", SinkPort), - repl_util:wait_for_connection(SourceNode, SinkName), - repl_util:enable_realtime(SourceNode, SinkName), - repl_util:start_realtime(SourceNode, SinkName). - -%% @doc Connect two clusters using a given name. -connect_cluster_by_name(Source, Port, Name) -> - lager:info("Connecting ~p to ~p for cluster ~p.", - [Source, Port, Name]), - repl_util:connect_cluster(Source, "127.0.0.1", Port), - ?assertEqual(ok, repl_util:wait_for_connection(Source, Name)). - -%% @doc Connect two clusters using a given name. -connect_cluster_by_name(Source, Destination, Port, Name) -> - lager:info("Connecting ~p to ~p for cluster ~p.", - [Source, Port, Name]), - repl_util:connect_cluster(Source, Destination, Port), - ?assertEqual(ok, repl_util:wait_for_connection(Source, Name)). - -%% @doc Given a node, find the port that the cluster manager is -%% listening on. -get_port(Node) -> - {ok, {_IP, Port}} = rpc:call(Node, - application, - get_env, - [riak_core, cluster_mgr]), - Port. - -%% @doc Given a node, find out who the current replication leader in its -%% cluster is. -get_leader(Node) -> - rpc:call(Node, riak_core_cluster_mgr, get_leader, []). - -%% @doc Validate fullsync completed and all keys are available. -validate_completed_fullsync(ReplicationLeader, - DestinationNode, - DestinationCluster, - Start, - End, - Bucket) -> - ok = check_fullsync(ReplicationLeader, DestinationCluster, 0), - lager:info("Verify: Reading ~p keys repl'd from A(~p) to ~p(~p)", - [End - Start, ReplicationLeader, - DestinationCluster, DestinationNode]), - ?assertEqual(0, - repl_util:wait_for_reads(DestinationNode, - Start, - End, - Bucket, - 1)). - -%% @doc Write a series of keys and ensure they are all written. -write_to_cluster(Node, Start, End, Bucket) -> - write_to_cluster(Node, Start, End, Bucket, 1). - -%% @doc Write a series of keys and ensure they are all written. -write_to_cluster(Node, Start, End, Bucket, Quorum) -> - lager:info("Writing ~p keys to node ~p.", [End - Start, Node]), - ?assertEqual([], - repl_util:do_write(Node, Start, End, Bucket, Quorum)). - -%% @doc Read from cluster a series of keys, asserting a certain number -%% of errors. -read_from_cluster(Node, Start, End, Bucket, Errors) -> - read_from_cluster(Node, Start, End, Bucket, Errors, 1). - -%% @doc Read from cluster a series of keys, asserting a certain number -%% of errors. -read_from_cluster(Node, Start, End, Bucket, Errors, Quorum) -> - lager:info("Reading ~p keys from node ~p.", [End - Start, Node]), - Res2 = rt:systest_read(Node, Start, End, Bucket, Quorum, <<>>, true), - ?assertEqual(Errors, length(Res2)). - -%% @doc Assert we can perform one fullsync cycle, and that the number of -%% expected failures is correct. -check_fullsync(Node, Cluster, ExpectedFailures) -> - {Time, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [Node, Cluster]), - lager:info("Fullsync completed in ~p seconds", [Time/1000/1000]), - - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - - Props = case proplists:get_value(fullsync_coordinator, Status) of - [{_Name, Props0}] -> - Props0; - Multiple -> - {_Name, Props0} = lists:keyfind(Cluster, 1, Multiple), - Props0 - end, - - %% check that the expected number of partitions failed to sync - ErrorExits = proplists:get_value(error_exits, Props), - lager:info("Error exits: ~p", [ErrorExits]), - ?assertEqual(ExpectedFailures, ErrorExits), - - %% check that we retried each of them 5 times - RetryExits = proplists:get_value(retry_exits, Props), - lager:info("Retry exits: ~p", [RetryExits]), - ?assert(RetryExits >= ExpectedFailures * 5), - - ok. - -%% @doc Add an intercept on a target node to simulate a given failure -%% mode, and then enable fullsync replication and verify completes -%% a full cycle. Subsequently reboot the node. -validate_intercepted_fullsync(InterceptTarget, - Intercept, - ReplicationLeader, - ReplicationCluster, - NumIndicies) -> - lager:info("Validating intercept ~p on ~p.", - [Intercept, InterceptTarget]), - - %% Add intercept. - ok = rt_intercept:add(InterceptTarget, Intercept), - - %% Verify fullsync. - ok = repl_util:check_fullsync(ReplicationLeader, - ReplicationCluster, - NumIndicies), - - %% Reboot node. - rt:stop_and_wait(InterceptTarget), - rt:start_and_wait(InterceptTarget), - - %% Wait for riak_kv and riak_repl to initialize. - rt:wait_for_service(InterceptTarget, riak_kv), - rt:wait_for_service(InterceptTarget, riak_repl), - - %% Wait until AAE trees are compueted on the rebooted node. - rt:wait_until_aae_trees_built([InterceptTarget]). diff --git a/tests/replication.erl b/tests/replication.erl deleted file mode 100644 index c2463265c..000000000 --- a/tests/replication.erl +++ /dev/null @@ -1,706 +0,0 @@ --module(replication). --behavior(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - -%% export functions shared with other replication tests... --export([make_bucket/3]). - -confirm() -> - Conf = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {diff_batch_size, 10} - ]} - ], - rt:set_advanced_conf(all, Conf), - [ANodes, BNodes] = rt:build_clusters([3, 3]), - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - replication(ANodes, BNodes, false), - pass. - -replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> - - AllNodes = ANodes ++ BNodes, - - rt:log_to_nodes(AllNodes, "Starting replication test"), - - TestHash = erlang:md5(term_to_binary(os:timestamp())), - TestBucket = <>, - FullsyncOnly = <>, - RealtimeOnly = <>, - NoRepl = <>, - - case Connected of - false -> - %% clusters are not connected, connect them - - %% write some initial data to A - lager:info("Writing 100 keys to ~p", [AFirst]), - ?assertEqual([], do_write(AFirst, 1, 100, TestBucket, 2)), - - rt:log_to_nodes(AllNodes, "Adding listeners"), - %% setup servers/listeners on A - Listeners = add_listeners(ANodes), - rt:wait_until_ring_converged(ANodes), - - %% verify servers are visible on all nodes - verify_listeners(Listeners), - - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, wait_until_leader_converge(ANodes)), - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, wait_until_leader_converge(BNodes)), - - %% get the leader for the first cluster - LeaderA = rpc:call(AFirst, riak_repl_leader, leader_node, []), - - %% list of listeners not on the leader node - NonLeaderListeners = lists:keydelete(LeaderA, 3, Listeners), - - rt:log_to_nodes(AllNodes, "Setup replication sites"), - %% setup sites on B - %% TODO: make `NumSites' an argument - NumSites = 4, - {Ip, Port, _} = hd(NonLeaderListeners), - add_site(hd(BNodes), {Ip, Port, "site1"}), - rt:wait_until_ring_converged(BNodes), - FakeListeners = gen_fake_listeners(NumSites-1), - add_fake_sites(BNodes, FakeListeners), - rt:wait_until_ring_converged(BNodes), - - rt:log_to_nodes(AllNodes, "Verify replication sites"), - %% verify sites are distributed on B - verify_sites_balanced(NumSites, BNodes), - - wait_until_connection(LeaderA), - %% check the listener IPs were all imported into the site - wait_for_site_ips(BFirst, "site1", Listeners); - _ -> - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, wait_until_leader_converge(ANodes)), - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, wait_until_leader_converge(BNodes)), - %% get the leader for the first cluster - LeaderA = rpc:call(AFirst, riak_repl_leader, leader_node, []), - lager:info("Leader on cluster A is ~p", [LeaderA]), - [{Ip, Port, _}|_] = get_listeners(LeaderA) - end, - - rt:log_to_nodes(AllNodes, "Write data to A"), - %% write some data on A - ?assertEqual(ok, wait_until_connection(LeaderA)), - %io:format("~p~n", [rpc:call(LeaderA, riak_repl_console, status, [quiet])]), - lager:info("Writing 100 more keys to ~p", [LeaderA]), - ?assertEqual([], do_write(LeaderA, 101, 200, TestBucket, 2)), - - rt:log_to_nodes(AllNodes, "Verify data received on B"), - %% verify data is replicated to B - lager:info("Reading 100 keys written to ~p from ~p", [LeaderA, BFirst]), - ?assertEqual(0, wait_for_reads(BFirst, 101, 200, TestBucket, 2)), - - case Connected of - false -> - %% check that the keys we wrote initially aren't replicated yet, because - %% we've disabled fullsync_on_connect - lager:info("Check keys written before repl was connected are not present"), - Res2 = rt:systest_read(BFirst, 1, 100, TestBucket, 2), - ?assertEqual(100, length(Res2)), - - start_and_wait_until_fullsync_complete(LeaderA), - - lager:info("Check keys written before repl was connected are present"), - ?assertEqual(0, wait_for_reads(BFirst, 1, 200, TestBucket, 2)); - _ -> - ok - end, - - ASecond = hd(ANodes -- [LeaderA]), - - %% disconnect the other cluster, so realtime doesn't happen - lager:info("disconnect the 2 clusters"), - del_site(BNodes, "site1"), - ?assertEqual(ok, wait_until_no_connection(LeaderA)), - - lager:info("write 2000 keys"), - ?assertEqual([], do_write(ASecond, 50000, 52000, - TestBucket, 2)), - - lager:info("reconnect the 2 clusters"), - add_site(hd(BNodes), {Ip, Port, "site1"}), - ?assertEqual(ok, wait_until_connection(LeaderA)), - - start_and_wait_until_fullsync_complete(LeaderA), - - lager:info("read 2000 keys"), - ?assertEqual(0, wait_for_reads(BFirst, 50000, 52000, TestBucket, 2)), - - %% - %% Failover tests - %% - - rt:log_to_nodes(AllNodes, "Testing master failover: stopping ~p", [LeaderA]), - lager:info("Testing master failover: stopping ~p", [LeaderA]), - rt:stop(LeaderA), - rt:wait_until_unpingable(LeaderA), - wait_until_leader(ASecond), - - LeaderA2 = rpc:call(ASecond, riak_repl_leader, leader_node, []), - - lager:info("New leader is ~p", [LeaderA2]), - - ?assertEqual(ok, wait_until_connection(LeaderA2)), - - lager:info("Writing 100 more keys to ~p now that the old leader is down", - [ASecond]), - - ?assertEqual([], do_write(ASecond, 201, 300, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading 100 keys written to ~p from ~p", [ASecond, BFirst]), - ?assertEqual(0, wait_for_reads(BFirst, 201, 300, TestBucket, 2)), - - %% get the leader for the first cluster - LeaderB = rpc:call(BFirst, riak_repl_leader, leader_node, []), - - lager:info("Testing client failover: stopping ~p", [LeaderB]), - rt:stop(LeaderB), - rt:wait_until_unpingable(LeaderB), - BSecond = hd(BNodes -- [LeaderB]), - wait_until_leader(BSecond), - - LeaderB2 = rpc:call(BSecond, riak_repl_leader, leader_node, []), - - lager:info("New leader is ~p", [LeaderB2]), - - ?assertEqual(ok, wait_until_connection(LeaderA2)), - - lager:info("Writing 100 more keys to ~p now that the old leader is down", - [ASecond]), - - ?assertEqual([], do_write(ASecond, 301, 400, TestBucket, 2)), - - %% verify data is replicated to B - rt:wait_until_pingable(BSecond), - lager:info("Reading 101 keys written to ~p from ~p", [ASecond, BSecond]), - ?assertEqual(0, wait_for_reads(BSecond, 301, 400, TestBucket, 2)), - - %% Testing fullsync with downed nodes - lager:info("Re-running fullsync with ~p and ~p down", [LeaderA, LeaderB]), - - start_and_wait_until_fullsync_complete(LeaderA2), - - %% - %% Per-bucket repl settings tests - %% - - lager:info("Restarting down node ~p", [LeaderA]), - rt:start(LeaderA), - rt:wait_until_pingable(LeaderA), - rt:wait_until_no_pending_changes(ANodes), - wait_until_leader_converge(ANodes), - start_and_wait_until_fullsync_complete(LeaderA2), - - case nodes_all_have_version(ANodes, "1.2.2") of - true -> - - lager:info("Starting Joe's Repl Test"), - - %% At this point, realtime sync should still work, but, it doesn't - %% because of a bug in 1.2.1. - %% Check that repl leader is LeaderA - %% Check that LeaderA2 has ceeded socket back to LeaderA - - lager:info("Leader: ~p", [rpc:call(ASecond, riak_repl_leader, leader_node, [])]), - lager:info("LeaderA: ~p", [LeaderA]), - lager:info("LeaderA2: ~p", [LeaderA2]), - - ?assertEqual(ok, wait_until_connection(LeaderA)), - - lager:info("Simulation partition to force leader re-election"), - - OldCookie = rpc:call(LeaderA2, erlang, get_cookie, []), - NewCookie = list_to_atom(lists:reverse(atom_to_list(OldCookie))), - rpc:call(LeaderA2, erlang, set_cookie, [LeaderA2, NewCookie]), - - [ rpc:call(LeaderA2, erlang, disconnect_node, [Node]) || - Node <- ANodes -- [LeaderA2]], - [ rpc:call(Node, erlang, disconnect_node, [LeaderA2]) || - Node <- ANodes -- [LeaderA2]], - - wait_until_new_leader(hd(ANodes -- [LeaderA2]), LeaderA2), - InterimLeader = rpc:call(LeaderA, riak_repl_leader, leader_node, []), - lager:info("Interim leader: ~p", [InterimLeader]), - - rpc:call(LeaderA2, erlang, set_cookie, [LeaderA2, OldCookie]), - - [ rpc:call(LeaderA2, net_adm, ping, [Node]) || - Node <- ANodes -- [LeaderA2]], - [ rpc:call(Node, net_adm, ping, [LeaderA2]) || - Node <- ANodes -- [LeaderA2]], - - %% there's no point in writing anything until the leaders - %% converge, as we can drop writes in the middle of an election - wait_until_leader_converge(ANodes), - - LeaderA3 = rpc:call(ASecond, riak_repl_leader, leader_node, []), - - wait_until_connection(LeaderA3), - - lager:info("Leader: ~p", [LeaderA3]), - lager:info("Writing 2 more keys to ~p", [LeaderA3]), - ?assertEqual([], do_write(LeaderA3, 1301, 1302, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading 2 keys written to ~p from ~p", [LeaderA3, BSecond]), - ?assertEqual(0, wait_for_reads(BSecond, 1301, 1302, TestBucket, 2)), - - lager:info("Finished Joe's Section"), - - lager:info("Nodes restarted"); - _ -> - lager:info("Skipping Joe's Repl Test") - end, - - lager:info("Restarting down node ~p", [LeaderB]), - rt:start(LeaderB), - rt:wait_until_pingable(LeaderB), - - case nodes_all_have_version(ANodes, "1.1.0") of - true -> - - make_bucket(ANodes, NoRepl, [{repl, false}]), - - case nodes_all_have_version(ANodes, "1.2.0") of - true -> - make_bucket(ANodes, RealtimeOnly, [{repl, realtime}]), - make_bucket(ANodes, FullsyncOnly, [{repl, fullsync}]), - - %% disconnect the other cluster, so realtime doesn't happen - lager:info("disconnect the 2 clusters"), - del_site(BNodes, "site1"), - ?assertEqual(ok, wait_until_no_connection(LeaderA)), - - lager:info("write 100 keys to a realtime only bucket"), - ?assertEqual([], do_write(ASecond, 1, 100, - RealtimeOnly, 2)), - - lager:info("reconnect the 2 clusters"), - add_site(LeaderB, {Ip, Port, "site1"}), - ?assertEqual(ok, wait_until_connection(LeaderA)); - _ -> - timer:sleep(1000) - end, - - LeaderA4 = rpc:call(ASecond, riak_repl_leader, leader_node, []), - - lager:info("write 100 keys to a {repl, false} bucket"), - ?assertEqual([], do_write(ASecond, 1, 100, NoRepl, 2)), - - case nodes_all_have_version(ANodes, "1.2.0") of - true -> - lager:info("write 100 keys to a fullsync only bucket"), - ?assertEqual([], do_write(ASecond, 1, 100, - FullsyncOnly, 2)), - - lager:info("Check the fullsync only bucket didn't replicate the writes"), - Res6 = rt:systest_read(BSecond, 1, 100, FullsyncOnly, 2), - ?assertEqual(100, length(Res6)), - - lager:info("Check the realtime only bucket that was written to offline " - "isn't replicated"), - Res7 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), - ?assertEqual(100, length(Res7)); - _ -> - timer:sleep(1000) - end, - - lager:info("Check the {repl, false} bucket didn't replicate"), - Res8 = rt:systest_read(BSecond, 1, 100, NoRepl, 2), - ?assertEqual(100, length(Res8)), - - %% do a fullsync, make sure that fullsync_only is replicated, but - %% realtime_only and no_repl aren't - start_and_wait_until_fullsync_complete(LeaderA4), - - case nodes_all_have_version(ANodes, "1.2.0") of - true -> - lager:info("Check fullsync only bucket is now replicated"), - ?assertEqual(0, wait_for_reads(BSecond, 1, 100, - FullsyncOnly, 2)), - - lager:info("Check realtime only bucket didn't replicate"), - Res10 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), - ?assertEqual(100, length(Res10)), - - - lager:info("Write 100 more keys into realtime only bucket"), - ?assertEqual([], do_write(ASecond, 101, 200, - RealtimeOnly, 2)), - - timer:sleep(5000), - - lager:info("Check the realtime keys replicated"), - ?assertEqual(0, wait_for_reads(BSecond, 101, 200, - RealtimeOnly, 2)), - - lager:info("Check the older keys in the realtime bucket did not replicate"), - Res12 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), - ?assertEqual(100, length(Res12)); - _ -> - ok - end, - - lager:info("Check {repl, false} bucket didn't replicate"), - Res13 = rt:systest_read(BSecond, 1, 100, NoRepl, 2), - ?assertEqual(100, length(Res13)); - _ -> - ok - end, - - lager:info("Test passed"), - fin. - -verify_sites_balanced(NumSites, BNodes0) -> - Leader = rpc:call(hd(BNodes0), riak_repl_leader, leader_node, []), - case node_has_version(Leader, "1.2.0") of - true -> - BNodes = nodes_with_version(BNodes0, "1.2.0") -- [Leader], - NumNodes = length(BNodes), - case NumNodes of - 0 -> - %% only leader is upgraded, runs clients locally - ?assertEqual(NumSites, client_count(Leader)); - _ -> - NodeCounts = [{Node, client_count(Node)} || Node <- BNodes], - lager:notice("nodecounts ~p", [NodeCounts]), - lager:notice("leader ~p", [Leader]), - Min = NumSites div NumNodes, - [?assert(Count >= Min) || {_Node, Count} <- NodeCounts] - end; - false -> - ok - end. - -%% does the node meet the version requirement? -node_has_version(Node, Version) -> - {_, NodeVersion} = rpc:call(Node, init, script_id, []), - case NodeVersion of - current -> - %% current always satisfies any version check - true; - _ -> - NodeVersion >= Version - end. - -nodes_with_version(Nodes, Version) -> - [Node || Node <- Nodes, node_has_version(Node, Version)]. - -nodes_all_have_version(Nodes, Version) -> - Nodes == nodes_with_version(Nodes, Version). - -client_count(Node) -> - Clients = rpc:call(Node, supervisor, which_children, [riak_repl_client_sup]), - length(Clients). - -gen_fake_listeners(Num) -> - Ports = gen_ports(11000, Num), - IPs = lists:duplicate(Num, "127.0.0.1"), - Nodes = [fake_node(N) || N <- lists:seq(1, Num)], - lists:zip3(IPs, Ports, Nodes). - -fake_node(Num) -> - lists:flatten(io_lib:format("fake~p@127.0.0.1", [Num])). - -add_fake_sites([Node|_], Listeners) -> - [add_site(Node, {IP, Port, fake_site(Port)}) - || {IP, Port, _} <- Listeners]. - -add_site(Node, {IP, Port, Name}) -> - lager:info("Add site ~p ~p:~p at node ~p", [Name, IP, Port, Node]), - Args = [IP, integer_to_list(Port), Name], - Res = rpc:call(Node, riak_repl_console, add_site, [Args]), - ?assertEqual(ok, Res), - timer:sleep(timer:seconds(5)). - -del_site([Node|_]=Nodes, Name) -> - lager:info("Del site ~p at ~p", [Name, Node]), - Res = rpc:call(Node, riak_repl_console, del_site, [[Name]]), - ?assertEqual(ok, Res), - rt:wait_until_ring_converged(Nodes), - timer:sleep(timer:seconds(5)). - -fake_site(Port) -> - lists:flatten(io_lib:format("fake_site_~p", [Port])). - -verify_listeners(Listeners) -> - Strs = [IP ++ ":" ++ integer_to_list(Port) || {IP, Port, _} <- Listeners], - [?assertEqual(ok, verify_listener(Node, Strs)) || {_, _, Node} <- Listeners]. - -verify_listener(Node, Strs) -> - lager:info("Verify listeners ~p ~p", [Node, Strs]), - rt:wait_until(Node, - fun(_) -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - lists:all(fun(Str) -> - lists:keymember(Str, 2, Status) - end, Strs) - end). - -add_listeners(Nodes=[FirstNode|_]) -> - Ports = gen_ports(9010, length(Nodes)), - IPs = lists:duplicate(length(Nodes), "127.0.0.1"), - PN = lists:zip3(IPs, Ports, Nodes), - [add_listener(FirstNode, Node, IP, Port) || {IP, Port, Node} <- PN], - timer:sleep(timer:seconds(5)), - PN. - -add_listener(N, Node, IP, Port) -> - lager:info("Adding repl listener to ~p ~s:~p", [Node, IP, Port]), - Args = [[atom_to_list(Node), IP, integer_to_list(Port)]], - Res = rpc:call(N, riak_repl_console, add_listener, Args), - ?assertEqual(ok, Res). - -del_listeners(Node) -> - Listeners = get_listeners(Node), - lists:foreach(fun(Listener={IP, Port, N}) -> - lager:info("deleting listener ~p on ~p", [Listener, Node]), - Res = rpc:call(Node, riak_repl_console, del_listener, - [[atom_to_list(N), IP, integer_to_list(Port)]]), - ?assertEqual(ok, Res) - end, Listeners). - -get_listeners(Node) -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - %% *sigh* - [ - begin - NodeName = list_to_atom(string:substr(K, 10)), - [IP, Port] = string:tokens(V, ":"), - {IP, list_to_integer(Port), NodeName} - end || {K, V} <- Status, is_list(K), string:substr(K, 1, 9) == "listener_" - ]. - -gen_ports(Start, Len) -> - lists:seq(Start, Start + Len - 1). - -wait_for_site_ips(Leader, Site, Listeners) -> - rt:wait_until(verify_site_ips_fun(Leader, Site, Listeners)). - -verify_site_ips_fun(Leader, Site, Listeners) -> - fun() -> - Status = rpc:call(Leader, riak_repl_console, status, [quiet]), - Key = lists:flatten([Site, "_ips"]), - IPStr = proplists:get_value(Key, Status), - lager:info("IPSTR: ~p", [IPStr]), - IPs = lists:sort(re:split(IPStr, ", ")), - ExpectedIPs = lists:sort( - [list_to_binary([IP, ":", integer_to_list(Port)]) || {IP, Port, _Node} <- - Listeners]), - lager:info("ExpectedIPs: ~p IPs: ~p", [ExpectedIPs, IPs]), - ExpectedIPs =:= IPs - end. - -make_bucket([Node|_]=Nodes, Name, Args) -> - Res = rpc:call(Node, riak_core_bucket, set_bucket, [Name, Args]), - rt:wait_until_ring_converged(Nodes), - ?assertEqual(ok, Res). - -start_and_wait_until_fullsync_complete(Node) -> - start_and_wait_until_fullsync_complete(Node, 20). - -start_and_wait_until_fullsync_complete(Node, Retries) -> - Status0 = rpc:call(Node, riak_repl_console, status, [quiet]), - Count = proplists:get_value(server_fullsyncs, Status0) + 1, - lager:info("waiting for fullsync count to be ~p", [Count]), - - lager:info("Starting fullsync on ~p (~p)", [Node, - rtdev:node_version(rtdev:node_id(Node))]), - rpc:call(Node, riak_repl_console, start_fullsync, [[]]), - - %% sleep because of the old bug where stats will crash if you call it too - %% soon after starting a fullsync - timer:sleep(500), - - case rt:wait_until(make_fullsync_wait_fun(Node, Count), 100, 1000) of - ok -> - ok; - _ when Retries > 0 -> - ?assertEqual(ok, wait_until_connection(Node)), - lager:warning("Node failed to fullsync, retrying"), - start_and_wait_until_fullsync_complete(Node, Retries-1) - end, - lager:info("Fullsync on ~p complete", [Node]). - -make_fullsync_wait_fun(Node, Count) -> - fun() -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - case Status of - {badrpc, _} -> - false; - _ -> - case proplists:get_value(server_fullsyncs, Status) of - C when C >= Count -> - true; - _ -> - false - end - end - end. - -wait_until_is_leader(Node) -> - lager:info("wait_until_is_leader(~p)", [Node]), - rt:wait_until(Node, fun is_leader/1). - -is_leader(Node) -> - case rpc:call(Node, riak_repl_leader, leader_node, []) of - {badrpc, _} -> - lager:info("Badrpc"), - false; - Leader -> - lager:info("Checking: ~p =:= ~p", [Leader, Node]), - Leader =:= Node - end. - - -wait_until_is_not_leader(Node) -> - lager:info("wait_until_is_not_leader(~p)", [Node]), - rt:wait_until(Node, fun is_not_leader/1). - -is_not_leader(Node) -> - case rpc:call(Node, riak_repl_leader, leader_node, []) of - {badrpc, _} -> - lager:info("Badrpc"), - false; - Leader -> - lager:info("Checking: ~p =/= ~p", [Leader, Node]), - Leader =/= Node - end. - -wait_until_leader(Node) -> - wait_until_new_leader(Node, undefined). - -wait_until_new_leader(Node, OldLeader) -> - Res = rt:wait_until(Node, - fun(_) -> - case rpc:call(Node, riak_repl_console, status, [quiet]) of - {badrpc, _} -> - false; - Status -> - case proplists:get_value(leader, Status) of - undefined -> - false; - OldLeader -> - false; - _Other -> - true - end - end - end), - ?assertEqual(ok, Res). - -wait_until_leader_converge([Node|_] = Nodes) -> - rt:wait_until(Node, - fun(_) -> - LeaderResults = - [get_leader(rpc:call(N, riak_repl_console, status, [quiet])) || - N <- Nodes], - {Leaders, Errors} = - lists:partition(leader_result_filter_fun(), LeaderResults), - UniqueLeaders = lists:usort(Leaders), - Errors == [] andalso length(UniqueLeaders) == 1 - end). - -get_leader({badrpc, _}=Err) -> - Err; -get_leader(Status) -> - case proplists:get_value(leader, Status) of - undefined -> - false; - L -> - %%lager:info("Leader for ~p is ~p", - %%[N,L]), - L - end. - -leader_result_filter_fun() -> - fun(L) -> - case L of - undefined -> - false; - {badrpc, _} -> - false; - _ -> - true - end - end. - -wait_until_connection(Node) -> - rt:wait_until(Node, - fun(_) -> - case rpc:call(Node, riak_repl_console, status, [quiet]) of - {badrpc, _} -> - false; - Status -> - case proplists:get_value(server_stats, Status) of - [] -> - false; - [{_, _, too_busy}] -> - false; - [_C] -> - true; - Conns -> - lager:warning("multiple connections detected: ~p", - [Conns]), - true - end - end - end). %% 40 seconds is enough for repl - -wait_until_no_connection(Node) -> - rt:wait_until(Node, - fun(_) -> - case rpc:call(Node, riak_repl_console, status, [quiet]) of - {badrpc, _} -> - false; - Status -> - case proplists:get_value(server_stats, Status) of - [] -> - true; - [{_, _, too_busy}] -> - false; - [_C] -> - false; - Conns -> - lager:warning("multiple connections detected: ~p", - [Conns]), - false - end - end - end). %% 40 seconds is enough for repl - - -wait_for_reads(Node, Start, End, Bucket, R) -> - rt:wait_until(Node, - fun(_) -> - rt:systest_read(Node, Start, End, Bucket, R) == [] - end), - Reads = rt:systest_read(Node, Start, End, Bucket, R), - lager:info("Reads: ~p", [Reads]), - length(Reads). - -do_write(Node, Start, End, Bucket, W) -> - case rt:systest_write(Node, Start, End, Bucket, W) of - [] -> - []; - Errors -> - lager:warning("~p errors while writing: ~p", - [length(Errors), Errors]), - timer:sleep(1000), - lists:flatten([rt:systest_write(Node, S, S, Bucket, W) || - {S, _Error} <- Errors]) - end. diff --git a/tests/replication2.erl b/tests/replication2.erl deleted file mode 100644 index cc9b07e70..000000000 --- a/tests/replication2.erl +++ /dev/null @@ -1,770 +0,0 @@ --module(replication2). --behavior(riak_test). --export([confirm/0, replication/3]). --include_lib("eunit/include/eunit.hrl"). - --import(rt, [join/2, - log_to_nodes/2, - log_to_nodes/3, - wait_until_nodes_ready/1, - wait_until_no_pending_changes/1]). - -confirm() -> - - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, 3), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_kv, - [ - %% Specify fast building of AAE trees - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {diff_batch_size, 10} - ]} - ], - - lager:info("Building Clusters A and B"), - [ANodes, BNodes] = rt:build_clusters([{ClusterASize, Conf}, {NumNodes - ClusterASize, Conf}]), - - %lager:info("Skipping all tests"), - replication(ANodes, BNodes, false), - pass. - - -replication(ANodes, BNodes, Connected) -> - - log_to_nodes(ANodes ++ BNodes, "Starting replication2 test"), - lager:info("Connection Status: ~p", [Connected]), - - lager:info("Real Time Full Sync Replication test"), - real_time_replication_test(ANodes, BNodes, Connected), - - lager:info("Disconnected cluster Full Sync test"), - disconnected_cluster_fsync_test(ANodes, BNodes), - - lager:info("Failover tests"), - master_failover_test(ANodes, BNodes), - - lager:info("Network Partition test"), - network_partition_test(ANodes, BNodes), - - lager:info("Bucket Sync tests"), - bucket_sync_test(ANodes, BNodes), - - lager:info("Offline queueing tests"), - offline_queueing_tests(ANodes, BNodes), - - lager:info("Protocol Buffer writes during shutdown test"), - pb_write_during_shutdown(ANodes, BNodes), - - lager:info("HTTP writes during shutdown test"), - http_write_during_shutdown(ANodes, BNodes), - - lager:info("Tests passed"), - - fin. - - -%% @doc Real time replication test -%% Test Cycle: -%% Write some keys with full sync disabled. -%% Write some keys with full sync enabled. -%% Check for keys written prior to full sync. -%% Check all keys. -real_time_replication_test([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - - case Connected of - false -> - %% Before connecting clusters, write some initial data to Cluster A - lager:info("Writing 100 keys to ~p", [AFirst]), - ?assertEqual([], repl_util:do_write(AFirst, 1, 100, TestBucket, 2)), - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - %% Wait for Cluster naming to converge. - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - lager:info("Waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - %% Get the leader for the first cluster. - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), %% Ask Cluster "A" Node 1 who the leader is. - - {ok, {_IP, BFirstPort}} = rpc:call(BFirst, application, get_env, [riak_core, cluster_mgr]), - - lager:info("connect cluster A:~p to B on port ~p", [LeaderA, BFirstPort]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", BFirstPort), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes); - - %% Cluster "A" and Cluster "B" are now connected with real time and full sync enabled. - _ -> - lager:info("Clusters should already be connected"), - lager:info("Waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - lager:info("Waiting for leader to converge on cluster B"), - - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - %% get the leader for the first cluster - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - lager:info("Leader on cluster A is ~p", [LeaderA]), - lager:info("BFirst on cluster B is ~p", [BFirst]), - {ok, {_IP, BFirstPort}} = rpc:call(BFirst, application, get_env, [riak_core, cluster_mgr]), - lager:info("B is ~p with port ~p", [BFirst, BFirstPort]) - end, - - log_to_nodes(ANodes++BNodes, "Write data to Cluster A, verify replication to Cluster B via realtime"), - lager:info("Writing 100 keys to Cluster A-LeaderNode: ~p", [LeaderA]), % This export from Case isn't my favorite. - ?assertEqual([], repl_util:do_write(LeaderA, 101, 200, TestBucket, 2)), - - lager:info("Reading 100 keys written to Cluster A-LeaderNode: ~p from Cluster B-Node: ~p", [LeaderA, BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 101, 200, TestBucket, 2)), - - case Connected of - false -> - %% Check that the keys we wrote initially aren't replicated yet as - %% fullsync_on_connect is disabled. - lager:info("Check keys written before repl was connected are not present"), - Res2 = rt:systest_read(BFirst, 1, 100, TestBucket, 2), - ?assertEqual(100, length(Res2)), - - log_to_nodes(ANodes++BNodes, "Test fullsync with leader ~p", [LeaderA]), - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - - case rpc:call(LeaderA, init, script_id, []) of - {"riak", Vsn} when Vsn > "1.4" -> - %% check that the number of successful FS source exists matches the number of partitions - NumExits = repl_util:get_fs_coord_status_item(LeaderA, "B", successful_exits), - NumPartitions = repl_util:num_partitions(LeaderA), - ?assertEqual(NumPartitions, NumExits); - _ -> - ok - end, - - lager:info("Check keys written before repl was connected are present"), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 1, 200, TestBucket, 2)); - _ -> - ok - end. - - -%% @doc Disconnected Clusters Full Sync Test -%% Test Cycle: -%% Disconnect Clusters "A" and "B". -%% Write 2000 keys to Cluster "A". -%% Reconnect Clusters "A" and "B" and enable real time and full sync. -%% Read 2000 keys from Cluster "B". -disconnected_cluster_fsync_test([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - {ok, {_IP, BFirstPort}} = rpc:call(BFirst, application, get_env,[riak_core, cluster_mgr]), - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - log_to_nodes(ANodes ++ BNodes, "Starting disconnected full sync test"), - - %% Disconnect Cluster B to disablereal time sync - lager:info("Disconnect the 2 clusters"), - repl_util:disable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:disconnect_cluster(LeaderA, "B"), - repl_util:wait_until_no_connection(LeaderA), - rt:wait_until_ring_converged(ANodes), - - lager:info("Write 2000 keys"), - ?assertEqual([], repl_util:do_write(LeaderA, 50000, 52000, TestBucket, 2)), - - lager:info("Reconnect the 2 clusters"), - repl_util:connect_cluster(LeaderA, "127.0.0.1", BFirstPort), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - rt:wait_until_ring_converged(ANodes), - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - ?assertEqual(ok, repl_util:wait_until_connection(LeaderA)), - - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - - lager:info("Read 2000 keys"), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 50000, 52000, TestBucket, 2)). - - -%% @doc Master Failover Test -%% Test Cycle: -%% Stop Cluster "A" leader. -%% Get new Cluster "A" leader. -%% Write 100 keys to new Cluster "A" leader. -%% Verify 100 keys are replicated to Cluster "B". -%% Get Cluster "B" leader and shut it down. -%% Get new Cluster "B" leader. -%% Write 100 keys to Cluster "A". -%% Verify 100 keys are replicated to Cluster "B". -master_failover_test([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - log_to_nodes(ANodes ++ BNodes , "Failover tests"), - log_to_nodes(ANodes ++ BNodes, "Testing master failover: stopping ~p", [LeaderA]), - - lager:info("Testing master failover: stopping ~p", [LeaderA]), - rt:stop(LeaderA), - rt:wait_until_unpingable(LeaderA), - ASecond = hd(ANodes -- [LeaderA]), - repl_util:wait_until_leader(ASecond), - - LeaderA2 = rpc:call(ASecond, riak_core_cluster_mgr, get_leader, []), - - lager:info("New leader is ~p", [LeaderA2]), - ?assertEqual(ok, repl_util:wait_until_connection(LeaderA2)), - - %% ASecond is all nodes w/o LeaderA - lager:info("Writing 100 more keys to ~p now that the old leader is down", [ASecond]), - ?assertEqual([], repl_util:do_write(ASecond, 201, 300, TestBucket, 2)), - - %% Verify data is replicated to Cluster "B" - lager:info("Reading 100 keys written to ~p from ~p", [ASecond, BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 201, 300, TestBucket, 2)), - - %% Get the leader for Cluster "B" - LeaderB = rpc:call(BFirst, riak_core_cluster_mgr, get_leader, []), - - log_to_nodes(ANodes ++ BNodes, "Testing client failover: stopping ~p", [LeaderB]), - - lager:info("Testing client failover: stopping ~p", [LeaderB]), - rt:stop(LeaderB), - rt:wait_until_unpingable(LeaderB), - BSecond = hd(BNodes -- [LeaderB]), - repl_util:wait_until_leader(BSecond), - - LeaderB2 = rpc:call(BSecond, riak_core_cluster_mgr, get_leader, []), - - lager:info("New leader is ~p", [LeaderB2]), - ?assertEqual(ok, repl_util:wait_until_connection(LeaderA2)), - - lager:info("Writing 100 more keys to ~p now that the old leader is down", [ASecond]), - ?assertEqual([], repl_util:do_write(ASecond, 301, 400, TestBucket, 2)), - - %% Verify data is replicated to Cluster B - lager:info("Reading 101 keys written to ~p from ~p", [ASecond, BSecond]), - ?assertEqual(0, repl_util:wait_for_reads(BSecond, 301, 400, TestBucket, 2)), - - log_to_nodes(ANodes ++ BNodes, "Test fullsync with ~p and ~p down", [LeaderA, LeaderB]), - lager:info("Re-running fullsync with ~p and ~p down", [LeaderA, LeaderB]), - - repl_util:start_and_wait_until_fullsync_complete(LeaderA2), - - %% This says test full sync, but there's never a verification step. - log_to_nodes(ANodes ++ BNodes, "Test fullsync after restarting ~p", [LeaderA]), - - %% Put everything back to 'normal'. - lager:info("Nodes restarted"), - lager:info("Restarting down nodes ~p, ~p", [LeaderA, LeaderB]), - rt:start(LeaderA), - rt:start(LeaderB), - rt:wait_until_pingable(LeaderA), - rt:wait_until_pingable(LeaderB), - rt:wait_for_service(LeaderA, [riak_kv, riak_repl]), - repl_util:start_and_wait_until_fullsync_complete(LeaderA2). - - -%% @doc Network Partition Test -%% Test Cycle: -%% Connect the Cluster "A" leader and swap its cookie for a new cookie. -%% Disconnect node with new cookie from the rest of the nodes. -%% Nodes will elect a new leader. -%% Reset cookie on disconnected node and reconnct. -%% Write 2 keys to node that was reconnected. -%% Verify replication of keys to Cluster "B". -network_partition_test([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - log_to_nodes(ANodes ++ BNodes, "Starting network partition test"), - - %% @todo add stuff - %% At this point, realtime sync should still work, but, it doesn't because of a bug in 1.2.1 - %% Check that repl leader is LeaderA - %% Check that LeaderA2 has ceeded socket back to LeaderA - - %%lager:info("Leader: ~p", [rpc:call(ASecond, riak_core_cluster_mgr, get_leader, [])]), - %%lager:info("LeaderA: ~p", [LeaderA]), - %%lager:info("LeaderA2: ~p", [LeaderA2]), - - ?assertEqual(ok, repl_util:wait_until_connection(LeaderA)), - - %% Swap cookie on LeaderA to simulate a network partition. - OldCookie = rpc:call(LeaderA, erlang, get_cookie, []), - NewCookie = list_to_atom(lists:reverse(atom_to_list(OldCookie))), - rpc:call(LeaderA, erlang, set_cookie, [LeaderA, NewCookie]), - - [rpc:call(LeaderA, erlang, disconnect_node, [Node]) || Node <- ANodes -- [LeaderA]], - [rpc:call(Node, erlang, disconnect_node, [LeaderA]) || Node <- ANodes -- [LeaderA]], - - repl_util:wait_until_new_leader(hd(ANodes -- [LeaderA]), LeaderA), - InterimLeader = rpc:call(LeaderA, riak_core_cluster_mgr, get_leader, []), - lager:info("Interim leader: ~p", [InterimLeader]), - - rpc:call(LeaderA, erlang, set_cookie, [LeaderA, OldCookie]), - - [rpc:call(LeaderA, net_adm, ping, [Node]) || Node <- ANodes -- [LeaderA]], - [rpc:call(Node, net_adm, ping, [LeaderA]) || Node <- ANodes -- [LeaderA]], - - %% There's no point in writing anything until the leaders converge, as we - %% can drop writes in the middle of an election - repl_util:wait_until_leader_converge(ANodes), - - ASecond = hd(ANodes -- [LeaderA]), - - lager:info("Leader: ~p", [rpc:call(ASecond, riak_core_cluster_mgr, get_leader, [])]), - lager:info("Writing 2 more keys to ~p", [LeaderA]), - ?assertEqual([], repl_util:do_write(LeaderA, 1301, 1302, TestBucket, 2)), - - %% Verify data is replicated to Cluster "B" - lager:info("Reading 2 keys written to ~p from ~p", [LeaderA, BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 1301, 1302, TestBucket, 2)), - - log_to_nodes(ANodes ++ BNodes, "Completed network partition test"). - - -%% @doc Bucket Sync Test -%% Test Cycle: -%% Make 3 buckets on Cluster "A": -%% -No Replication -%% -Real Time only -%% -Full Sync only -%% Disable real time replication and disconnect Cluster "A" and Cluster "B" -%% Write 100 keys to real time only bucket. -%% Reconnect and reenable real time and full sync between Cluster "A" and Cluster "B" -%% Write 100 keys to the No Replication bucket -%% Write 100 keys to the Full Sync only bucket -%% Verify that Full sync didn't replicate -%% Verify that real time bucket written to offline didn't replicate -%% Verify that the No Replication bucket didn't replicate -%% Restart full sync -%% Verify full sync bucket replicated -%% Verify that the Real time keys replicated -%% Verify that the original real time keys did not replicate -%% Verify that the No replication bucket didn't replicate. -bucket_sync_test([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - FullsyncOnly = <>, - RealtimeOnly = <>, - NoRepl = <>, - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - {ok, {_IP, BFirstPort}} = rpc:call(BFirst, application, get_env, [riak_core, cluster_mgr]), - - log_to_nodes(ANodes ++ BNodes, "Starting bucket sync test"), - - replication:make_bucket(ANodes, NoRepl, [{repl, false}]), - replication:make_bucket(ANodes, RealtimeOnly, [{repl, realtime}]), - replication:make_bucket(ANodes, FullsyncOnly, [{repl, fullsync}]), - - %% Disable real time and disconnect Cluster "B" to prevent real time replication. - lager:info("Disconnect the 2 clusters"), - repl_util:disable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:disconnect_cluster(LeaderA, "B"), - repl_util:wait_until_no_connection(LeaderA), - rt:wait_until_ring_converged(ANodes), - - lager:info("Write 100 keys to a real time only bucket"), - ?assertEqual([], repl_util:do_write(AFirst, 1, 100, RealtimeOnly, 2)), - - lager:info("Reconnect Clusters A and B"), - repl_util:connect_cluster(LeaderA, "127.0.0.1", BFirstPort), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - rt:wait_until_ring_converged(ANodes), - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - ?assertEqual(ok, repl_util:wait_until_connection(LeaderA)), - - log_to_nodes(ANodes ++ BNodes, "Test fullsync and realtime independence"), - - lager:info("write 100 keys to a {repl, false} bucket"), - ?assertEqual([], repl_util:do_write(AFirst, 1, 100, NoRepl, 2)), - - lager:info("write 100 keys to a fullsync only bucket"), - ?assertEqual([], repl_util:do_write(AFirst, 1, 100, FullsyncOnly, 2)), - - lager:info("Check the fullsync only bucket didn't replicate the writes"), - Res6 = rt:systest_read(BFirst, 1, 100, FullsyncOnly, 2), - ?assertEqual(100, length(Res6)), - - lager:info("Check the realtime only bucket that was written to offline isn't replicated"), - Res7 = rt:systest_read(BFirst, 1, 100, RealtimeOnly, 2), - ?assertEqual(100, length(Res7)), - - lager:info("Check the {repl, false} bucket didn't replicate"), - Res8 = rt:systest_read(BFirst, 1, 100, NoRepl, 2), - ?assertEqual(100, length(Res8)), - - %% Do a fullsync, make sure that fullsync_only is replicated, but - %% realtime_only and no_repl aren't - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - - lager:info("Check fullsync only bucket is now replicated"), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 1, 100, FullsyncOnly, 2)), - - lager:info("Check realtime only bucket didn't replicate"), - Res10 = rt:systest_read(BFirst, 1, 100, RealtimeOnly, 2), - ?assertEqual(100, length(Res10)), - - lager:info("Write 100 more keys into realtime only bucket on ~p", [AFirst]), - ?assertEqual([], repl_util:do_write(AFirst, 101, 200, RealtimeOnly, 2)), - - lager:info("Check the realtime keys replicated"), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 101, 200, RealtimeOnly, 2)), - - lager:info("Check the older keys in the realtime bucket did not replicate"), - Res12 = rt:systest_read(BFirst, 1, 100, RealtimeOnly, 2), - ?assertEqual(100, length(Res12)), - - lager:info("Check {repl, false} bucket didn't replicate"), - Res13 = rt:systest_read(BFirst, 1, 100, NoRepl, 2), - ?assertEqual(100, length(Res13)). - - -%% @doc Offline Queuing Test -%% Test Cycle: -%% Stop real time on Cluster "A" -%% Write 100 keys to leader on Cluster "A" -%% Restart real time on Cluster "A" -%% Verify Keys replicated to Cluster "B" after real time was restarted -%% Stop real time on Cluster "A" -%% Verify that 100 keys are NOT on Cluster "A" -%% Write 100 keys to Cluster "A" -%% Verify that 100 keys were written to Cluster "A" -%% Verify that 100 keys are NOT on Cluster "B" -%% Re-enable real time on Cluster "A" -%% Verify that 100 keys are available on Cluster "B" -offline_queueing_tests([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - - log_to_nodes(ANodes ++ BNodes, "Testing offline realtime queueing"), - lager:info("Testing offline realtime queueing"), - - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - lager:info("Stopping realtime, queue will build"), - repl_util:stop_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - lager:info("Writing 100 keys"), - ?assertEqual([], repl_util:do_write(LeaderA, 800, 900, TestBucket, 2)), - - lager:info("Starting realtime"), - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - lager:info("Reading keys written while repl was stopped"), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 800, 900, TestBucket, 2)), - - log_to_nodes(ANodes ++ BNodes, "Testing realtime migration on node shutdown"), - lager:info("Testing realtime migration on node shutdown"), - Target = hd(ANodes -- [LeaderA]), - - lager:info("Stopping realtime, queue will build"), - repl_util:stop_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - lager:info("Verifying 100 keys are missing from ~p", [Target]), - repl_util:read_from_cluster(Target, 901, 1000, TestBucket, 100), - - lager:info("Writing 100 keys to ~p", [Target]), - ?assertEqual([], repl_util:do_write(Target, 901, 1000, TestBucket, 2)), - - lager:info("Verifying 100 keys are read from ~p", [Target]), - repl_util:read_from_cluster(Target, 901, 1000, TestBucket, 0), - - lager:info("Verifying 100 keys are missing from ~p", [BFirst]), - repl_util:read_from_cluster(BFirst, 901, 1000, TestBucket, 100), - - io:format("queue status: ~p", [rpc:call(Target, riak_repl2_rtq, status, [])]), - - lager:info("Stopping node ~p", [Target]), - - rt:stop(Target), - rt:wait_until_unpingable(Target), - - lager:info("Starting realtime"), - repl_util:start_realtime(LeaderA, "B"), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - lager:info("Verifying 100 keys are now available on ~p", [BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 901, 1000, TestBucket, 2)), - - rt:start(Target), - rt:wait_until_pingable(Target), - rt:wait_for_service(Target, riak_repl). - - -%% @doc Protocol Buffer Write During Shutdown -%% Test Cycle: -%% Connect to Cluster "A" via PB -%% Spawn background process to stop Cluster "A" nodes -%% Write 10,000 keys to Cluster "A" -%% Verify that there are and equal number of write failures on Cluster "A" and read failures on Cluster "B" -pb_write_during_shutdown([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - - log_to_nodes(ANodes ++ BNodes, "Testing protocol buffer writes during shutdown"), - - LeaderA = rpc:call( AFirst, riak_core_cluster_mgr, get_leader, []), - Target = hd( ANodes -- [LeaderA]), - - ConnInfo = proplists:get_value(Target, rt:connection_info([Target])), - {IP, Port} = proplists:get_value(pb, ConnInfo), - lager:info("Connecting to pb socket ~p:~p on ~p", [IP, Port, Target]), - PBSock = rt:pbc(Target), - - %% do the stop in the background while we're writing keys - spawn(fun() -> - timer:sleep(500), - lager:info("Stopping node ~p again", [Target]), - rt:stop(Target), - lager:info("Node stopped") - end), - - lager:info("Writing 10,000 keys"), - WriteErrors = - try - pb_write(PBSock, 1000, 11000, TestBucket, 2) - catch - _:_ -> - lager:info("Shutdown timeout caught"), - [] - end, - lager:info("Received ~p write failures", [length(WriteErrors)]), - timer:sleep(3000), - lager:info("Checking number of read failures on secondary cluster"), - ReadErrors = rt:systest_read(BFirst, 1000, 11000, TestBucket, 2), - lager:info("Received ~p read failures", [length(ReadErrors)]), - - %% Ensure node is down before we try to start it up again. - lager:info("pb_write_during_shutdown: Ensure node ~p is down before restart", [Target]), - ?assertEqual(ok, rt:wait_until_unpingable(Target)), - - rt:start(Target), - rt:wait_until_pingable(Target), - rt:wait_for_service(Target, riak_repl), - ReadErrors2 = rt:systest_read(Target, 1000, 11000, TestBucket, 2), - lager:info("Received ~p read failures on ~p", [length(ReadErrors2), Target]), - case length(WriteErrors) >= length(ReadErrors) of - true -> - ok; - false -> - lager:error("Received more read errors on ~p: ~p than write errors on ~p: ~p", - [BFirst, length(ReadErrors), Target, length(WriteErrors)]), - FailedKeys = lists:foldl(fun({Key, _}, Acc) -> - case lists:keyfind(Key, 1, WriteErrors) of - false -> - [Key|Acc]; - _ -> - Acc - end - end, [], ReadErrors), - lager:info("Failed keys ~p", [FailedKeys]), - ?assert(false) - end. - - -%% @doc HTTP Write during Shutdown -%% Test Cycle: -%% Connect to Cluster "A" via HTTP -%% Spawn background process to stop Cluster "A" nodes -%% Write 10,000 keys to Cluster "A" -%% Verify that there are and equal number of write failures on Cluster "A" and read failures on Cluster "B" -http_write_during_shutdown([AFirst|_] = ANodes, [BFirst|_] = BNodes) -> - - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - - log_to_nodes(ANodes ++ BNodes, "Testing http writes during shutdown"), - - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - Target = hd(ANodes -- [LeaderA]), - - ConnInfo = proplists:get_value(Target, rt:connection_info([Target])), - {IP, Port} = proplists:get_value(http, ConnInfo), - lager:info("Connecting to http socket ~p:~p on ~p", [IP, Port, Target]), - C = rt:httpc(Target), - - %% do the stop in the background while we're writing keys - spawn(fun() -> - timer:sleep(500), - lager:info("Stopping node ~p again", [Target]), - rt:stop(Target), - lager:info("Node stopped") - end), - - lager:info("Writing 10,000 keys"), - WriteErrors = - try - http_write(C, 12000, 22000, TestBucket, 2) - catch - _:_ -> - lager:info("Shutdown timeout caught"), - [] - end, - lager:info("got ~p write failures to ~p", [length(WriteErrors), Target]), - timer:sleep(3000), - lager:info("Checking number of read failures on secondary cluster node, ~p", [BFirst]), - [{_, {IP, Port2}},_] = rt:connection_info(BFirst), - C2 = rhc:create("127.0.0.1", Port2, "riak", []), - ReadErrors = http_read(C2, 12000, 22000, TestBucket, 2), - lager:info("Received ~p read failures from ~p", [length(ReadErrors), BFirst]), - - %% Ensure node is down before we try to start it up again. - lager:info("HTTP: write_during_shutdown: Ensure node ~p is down before restart", [Target]), - ?assertEqual(ok, rt:wait_until_unpingable(Target)), - - rt:start(Target), - rt:wait_until_pingable(Target), - rt:wait_for_service(Target, riak_repl), - ReadErrors2 = http_read(C, 12000, 22000, TestBucket, 2), - lager:info("Received ~p read failures on ~p", [length(ReadErrors2), Target]), - case length(WriteErrors) >= length(ReadErrors) of - true -> - ok; - false -> - lager:error("Received more read errors on ~p: ~p than write errors on ~p: ~p", - [BFirst, length(ReadErrors), Target, length(WriteErrors)]), - FailedKeys = lists:foldl(fun({Key, _}, Acc) -> - case lists:keyfind(Key, 1, WriteErrors) of - false -> - [Key|Acc]; - _ -> - Acc - end - end, [], ReadErrors), - lager:info("Failed keys ~p", [FailedKeys]), - ?assert(false) - end. - - -client_iterate(_Sock, [], _Bucket, _W, Acc, _Fun, Parent) -> - Parent ! {result, self(), Acc}, - Acc; - - -client_iterate(Sock, [N | NS], Bucket, W, Acc, Fun, Parent) -> - NewAcc = try Fun(Sock, Bucket, N, W) of - ok -> - Acc; - Other -> - [{N, Other} | Acc] - catch - What:Why -> - [{N, {What, Why}} | Acc] - end, - client_iterate(Sock, NS, Bucket, W, NewAcc, Fun, Parent). - - -http_write(Sock, Start, End, Bucket, W) -> - F = fun(S, B, K, WVal) -> - X = list_to_binary(integer_to_list(K)), - Obj = riakc_obj:new(B, X, X), - rhc:put(S, Obj, [{dw, WVal}]) - end, - Keys = lists:seq(Start, End), - Partitions = partition_keys(Keys, 8), - Parent = self(), - Workers = [spawn_monitor(fun() -> client_iterate(Sock, K, Bucket, W, [], F, Parent) end) || K <- Partitions], - collect_results(Workers, []). - - -pb_write(Sock, Start, End, Bucket, W) -> - F = fun(S, B, K, WVal) -> - Obj = riakc_obj:new(B, <>, <>), - riakc_pb_socket:put(S, Obj, [{dw, WVal}]) - end, - Keys = lists:seq(Start, End), - Partitions = partition_keys(Keys, 8), - Parent = self(), - Workers = [spawn_monitor(fun() -> client_iterate(Sock, K, Bucket, W, [], F, Parent) end) || K <- Partitions], - collect_results(Workers, []). - - -http_read(Sock, Start, End, Bucket, R) -> - F = fun(S, B, K, RVal) -> - X = list_to_binary(integer_to_list(K)), - case rhc:get(S, B, X, [{r, RVal}]) of - {ok, _} -> - ok; - Error -> - Error - end - end, - client_iterate(Sock, lists:seq(Start, End), Bucket, R, [], F, self()). - - -partition_keys(Keys, PC) -> - partition_keys(Keys, PC, lists:duplicate(PC, [])). - - -partition_keys([] , _, Acc) -> - Acc; - - -partition_keys(Keys, PC, Acc) -> - In = lists:sublist(Keys, PC), - Rest = try lists:nthtail(PC, Keys) - catch _:_ -> [] - end, - NewAcc = lists:foldl(fun(K, [H|T]) -> - T ++ [[K|H]] - end, Acc, In), - partition_keys(Rest, PC, NewAcc). - - -collect_results([], Acc) -> - Acc; - - -collect_results(Workers, Acc) -> - receive - {result, Pid, Res} -> - collect_results(lists:keydelete(Pid, 1, Workers), Res ++ Acc); - {'DOWN', _, _, Pid, _Reason} -> - collect_results(lists:keydelete(Pid, 1, Workers), Acc) - end. diff --git a/tests/replication2_connections.erl b/tests/replication2_connections.erl deleted file mode 100644 index e9419d435..000000000 --- a/tests/replication2_connections.erl +++ /dev/null @@ -1,306 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% - --module(replication2_connections). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(HB_TIMEOUT, 2000). - -confirm() -> - ?assertEqual(pass, simple_test()), - ?assertEqual(pass, disconnect_test()), - ?assertEqual(pass, error_cleanup_test()), - pass. - -simple_test() -> - lager:info("Running simple_test()~n"), - Conf = [{riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% override defaults for RT heartbeat so that we - %% can see faults sooner and have a quicker test. - {rt_heartbeat_interval, ?HB_TIMEOUT}, - {rt_heartbeat_timeout, ?HB_TIMEOUT} - ]}], - - rt:set_advanced_conf(all, Conf), - - [ANodes, BNodes] = rt:build_clusters([3, 3]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - lager:info("Waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - lager:info("Naming A"), - repl_util:name_cluster(AFirst, "A"), - ?assertEqual(ok, rt:wait_until_ring_converged(ANodes)), - - lager:info("Naming B"), - repl_util:name_cluster(BFirst, "B"), - ?assertEqual(ok, rt:wait_until_ring_converged(BNodes)), - - lager:info("Connecting A to B"), - connect_clusters(AFirst, BFirst), - - lager:info("Enabling realtime replication from A to B."), - repl_util:enable_realtime(AFirst, "B"), - ?assertEqual(ok, rt:wait_until_ring_converged(ANodes)), - repl_util:start_realtime(AFirst, "B"), - ?assertEqual(ok, rt:wait_until_ring_converged(ANodes)), - - lager:info("Connecting B to A"), - connect_clusters(BFirst, AFirst), - - lager:info("Enabling realtime replication from B to A."), - repl_util:enable_realtime(BFirst, "A"), - ?assertEqual(ok, rt:wait_until_ring_converged(BNodes)), - repl_util:start_realtime(BFirst, "A"), - ?assertEqual(ok, rt:wait_until_ring_converged(BNodes)), - - lager:info("Verifying connectivity between clusters."), - [verify_connectivity(Node, "B") || Node <- ANodes], - [verify_connectivity(Node, "A") || Node <- BNodes], - - lager:info("Cleaning cluster A"), - rt:clean_cluster(ANodes), - lager:info("Cleaning cluster B"), - rt:clean_cluster(BNodes), - lager:info("Test passed"), - - pass. - -disconnect_test() -> - lager:info("Running disconnect_test()~n"), - Conf = [{riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% override defaults for RT heartbeat so that we - %% can see faults sooner and have a quicker test. - {rt_heartbeat_interval, ?HB_TIMEOUT}, - {rt_heartbeat_timeout, ?HB_TIMEOUT} - ]}], - - rt:set_advanced_conf(all, Conf), - - [ANodes, BNodes] = rt:build_clusters([3, 3]), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - lager:info("Waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - lager:info("Naming A"), - repl_util:name_cluster(AFirst, "A"), - ?assertEqual(ok, rt:wait_until_ring_converged(ANodes)), - - lager:info("Naming B"), - repl_util:name_cluster(BFirst, "B"), - ?assertEqual(ok, rt:wait_until_ring_converged(BNodes)), - - lager:info("Connecting A to B"), - connect_clusters(AFirst, BFirst), - - lager:info("Connecting B to A"), - connect_clusters(BFirst, AFirst), - - lager:info("Verifying connectivity between clusters."), - [verify_connectivity(Node, "B") || Node <- ANodes], - [verify_connectivity(Node, "A") || Node <- BNodes], - - lager:info("Disconnect A to B"), - repl_util:disconnect_cluster(AFirst, "B"), - - lager:info("Verifying disconnect from A to B."), - [verify_disconnect(Node, "B") || Node <- ANodes], - - lager:info("Disconnect B to A"), - repl_util:disconnect_cluster(BFirst, "A"), - - lager:info("Verifying disconnect from B to A."), - [verify_disconnect(Node, "A") || Node <- BNodes], - - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), - - pass. - -error_cleanup_test() -> - lager:info("Running error_cleanup_test()~n"), - NumNodes = rt_config:get(num_nodes, 6), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [{riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% override defaults for RT heartbeat so that we - %% can see faults sooner and have a quicker test. - {rt_heartbeat_interval, ?HB_TIMEOUT}, - {rt_heartbeat_timeout, ?HB_TIMEOUT}, - %% Set quicker cancellation interval of 5 seconds - {cm_cancellation_interval, 5 * 1000} - ]}], - - rt:set_advanced_conf(all, Conf), - - [ANodes, BNodes] = rt:build_clusters([3, 3]), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - lager:info("Waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - lager:info("Naming A"), - repl_util:name_cluster(AFirst, "A"), - ?assertEqual(ok, rt:wait_until_ring_converged(ANodes)), - - lager:info("Naming B"), - repl_util:name_cluster(BFirst, "B"), - ?assertEqual(ok, rt:wait_until_ring_converged(BNodes)), - - % Insert intercept to cause some errors on connect - lager:info("Adding intercept to cause econnrefused errors"), - Intercept = {riak_core_connection,[{{sync_connect, 2}, return_econnrefused}]}, - [ok = rt_intercept:add(Target, Intercept) || Target <- ANodes], - - lager:info("Connecting A to B"), - connect_clusters(AFirst, BFirst), - - lager:info("Wait until errors in connection_manager status"), - ?assertEqual(ok, repl_util:wait_until_connection_errors(repl_util:get_leader(AFirst), BFirst)), - - lager:info("Disconnect A from B via IP/PORT"), - ?assertEqual(ok, rpc:call(AFirst, riak_repl_console, disconnect,[["127.0.0.1","10046"]])), - - lager:info("Wait until connections clear"), - ?assertEqual(ok, repl_util:wait_until_connections_clear(repl_util:get_leader(AFirst))), - - lager:info("Verify disconnect from A to B"), - [verify_full_disconnect(Node) || Node <- ANodes], - - % Insert intercept to allow connections to occur - lager:info("Adding intercept to allow connections"), - Intercept2 = {riak_core_connection,[{{sync_connect, 2}, sync_connect}]}, - [ok = rt_intercept:add(Target, Intercept2) || Target <- ANodes], - - lager:info("Connecting A to B"), - connect_clusters(AFirst, BFirst), - - lager:info("Verifying connection from A to B"), - [verify_connectivity(Node, "B") || Node <- ANodes], - - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), - - pass. - -%% @doc Verify connectivity between sources and sink. -verify_connectivity(Node, Cluster) -> - print_repl_ring(Node), - ?assertEqual(ok,repl_util:wait_for_connection(Node, Cluster)), - print_repl_ring(Node), - restart_process(Node, riak_core_connection_manager), - ?assertEqual(ok,repl_util:wait_for_connection(Node, Cluster)). - -%% @doc Verify disconnect between Node and sink Cluster. -verify_disconnect(Node, Cluster) -> - print_repl_ring(Node), - ?assertEqual(ok,repl_util:wait_for_disconnect(Node, Cluster)), - print_repl_ring(Node), - restart_process(Node, riak_core_connection_manager), - ?assertEqual(ok,repl_util:wait_for_disconnect(Node, Cluster)). - -%% @doc Verify no connections of any type on Node. -verify_full_disconnect(Node) -> - print_repl_ring(Node), - ?assertEqual(ok,repl_util:wait_for_full_disconnect(Node)), - print_repl_ring(Node), - restart_process(Node, riak_core_connection_manager), - ?assertEqual(ok,repl_util:wait_for_full_disconnect(Node)). - -%% @doc Print the status of the ring. -print_repl_ring(Node) -> - {ok, Ring} = rpc:call(Node, - riak_core_ring_manager, - get_my_ring, - []), - Clusters = rpc:call(Node, - riak_repl_ring, - get_clusters, - [Ring]), - lager:info("REPL ring shows clusters as: ~p", [Clusters]). - -%% @doc Restart a given process by name. -restart_process(Node, Name) -> - lager:info("Restarting ~p on ~p.", [Name, Node]), - - %% Find the process. - Pid0 = rpc:call(Node, erlang, whereis, [Name]), - lager:info("Found ~p on node ~p at ~p, killing.", - [Name, Node, Pid0]), - - %% Kill it. - true = rpc:call(Node, erlang, exit, [Pid0, brutal_kill]), - - %% Verify it restarts. - rt:wait_until(Node, fun(_) -> - lager:info("Waiting for ~p to restart...", [Name]), - Pid = rpc:call(Node, erlang, whereis, [Name]), - Pid =/= Pid0 andalso Pid =/= undefined - end), - - lager:info("Process restarted."). - -%% @doc Connect two clusters for replication using their respective -%% leader nodes. -connect_clusters(LeaderA, LeaderB) -> - {ok, {_IP, Port}} = rpc:call(LeaderB, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port). diff --git a/tests/replication2_console_tests.erl b/tests/replication2_console_tests.erl deleted file mode 100644 index 068aa7cd0..000000000 --- a/tests/replication2_console_tests.erl +++ /dev/null @@ -1,123 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(replication2_console_tests). --include_lib("eunit/include/eunit.hrl"). - -%% This test checks to see if the riak-repl *shell script* -%% communicates it's command line args to riak_repl_console -%% correctly. This test needs to be exercised on all supported -%% Riak platforms. This test helped fix a problem on Ubuntu -%% where "riak-repl cascades" failed due to a shift error in -%% the script. Hopefully, this script will catch similar errors -%% with future changes to riak-repl. -%% Note, this test is more about verifying parameter *arity* in -%% riak_repl_console than verifying all valid combinations -%% of arguments for each -%% command. -%% -%% test flow: -%% riak_test -> riak_repl (shell script) -> intercept -%% a) if input received by riak-repl is correct, -%% display "pass" to the console. Test will -%% pass via assert in check_cmd/2. -%% b) if input received by riap-repl is unexpected -%% display "fail" to the console, test will fail -%% via assert in check_cmd/2 -%% c) if interrupt isn't called, "pass" won't be printed -%% to stdout, test will fail via assert in check_cmd/2 - --export([confirm/0]). - -confirm() -> - %% Deploy a node to test against - lager:info("Deploy node to test riak-repl command line"), - [Node] = rt:deploy_nodes(1, [], [riak_kv, riak_repl]), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), - rt_intercept:add(Node, - {riak_repl_console, - [ - {{clustername,1}, verify_clustername}, - {{modes,1}, verify_modes}, - {{clusterstats,1}, verify_clusterstats}, - {{realtime_cascades,1}, verify_realtime_cascades}, - {{max_fssource_node,1}, verify_max_fssource_node}, - {{max_fssource_cluster,1}, verify_max_fssource_cluster}, - {{max_fssink_node,1}, verify_max_fssink_node}, - {{fullsync,1}, verify_fullsync}, - {{proxy_get,1}, verify_proxy_get}, - {{add_nat_map,1}, verify_add_nat_map}, - {{del_nat_map,1}, verify_del_nat_map}, - {{show_nat_map,1}, verify_show_nat_map}, - {{realtime,1}, verify_realtime}, - {{add_block_provider_redirect,1}, verify_add_block_provider_redirect}, - {{show_block_provider_redirect,1}, verify_show_block_provider_redirect}, - {{delete_block_provider_redirect,1}, verify_delete_block_provider_redirect}, - {{show_local_cluster_id,1}, verify_show_local_cluster_id} - ]}), - - %% test different parameter arities - check_cmd(Node, "clusterstats"), - check_cmd(Node, "clusterstats cluster_mgr"), - check_cmd(Node, "clusterstats 192.168.1.1:5555"), - - check_cmd(Node, "modes"), - check_cmd(Node, "modes mode_repl12"), - check_cmd(Node, "modes mode_repl12 mode_repl13"), - - check_cmd(Node, "clustername"), - check_cmd(Node, "clustername foo"), - - check_cmd(Node, "realtime cascades"), - check_cmd(Node, "realtime cascades always"), - - check_cmd(Node, "fullsync max_fssource_node"), - check_cmd(Node, "fullsync max_fssource_node 99"), - - check_cmd(Node, "fullsync max_fssource_cluster"), - check_cmd(Node, "fullsync max_fssource_cluster 99"), - - check_cmd(Node, "fullsync max_fssink_node"), - check_cmd(Node, "fullsync max_fssink_node 99"), - - check_cmd(Node, "fullsync enable foo"), - check_cmd(Node, "fullsync disable bar"), - - check_cmd(Node, "realtime enable foo"), - check_cmd(Node, "realtime disable bar"), - - check_cmd(Node, "proxy_get enable foo"), - check_cmd(Node, "proxy_get disable bar"), - - check_cmd(Node, "nat-map show"), - check_cmd(Node, "nat-map add 1.2.3.4:4321 192.168.1.1"), - check_cmd(Node, "nat-map del 1.2.3.4:4321 192.168.1.1"), - - check_cmd(Node, "add-block-provider-redirect a b"), - check_cmd(Node, "show-block-provider-redirect a"), - check_cmd(Node, "delete-block-provider-redirect a"), - check_cmd(Node, "show-local-cluster-id"), - - pass. - -check_cmd(Node, Cmd) -> - lager:info("Testing riak-repl ~s on ~s", [Cmd, Node]), - {ok, Out} = rt:riak_repl(Node, [Cmd]), - ?assertEqual("pass", Out). - diff --git a/tests/replication2_dirty.erl b/tests/replication2_dirty.erl deleted file mode 100644 index eeb4f899c..000000000 --- a/tests/replication2_dirty.erl +++ /dev/null @@ -1,259 +0,0 @@ --module(replication2_dirty). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - TestHash = erlang:md5(term_to_binary(os:timestamp())), - TestBucket = <>, - - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, 4), - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled} - ]} - ], - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - {[AFirst|_] = ANodes, [BFirst|_] = BNodes} = lists:split(ClusterASize, Nodes), - - AllNodes = ANodes ++ BNodes, - rt:log_to_nodes(AllNodes, "Starting replication2_dirty test"), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - rt:log_to_nodes(AllNodes, "Building and connecting Clusters"), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - %% get the leader for the first cluster - repl_util:wait_until_leader(AFirst), - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - %LeaderB = rpc:call(BFirst, riak_core_cluster_mgr, get_leader, []), - - {ok, {_IP, Port}} = rpc:call(BFirst, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:start_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - % nothing should be dirty initially - lager:info("Waiting until all nodes clean"), - wait_until_all_nodes_clean(LeaderA), - - rt:log_to_nodes(AllNodes, "Test basic realtime replication from A -> B"), - - %% write some data on A - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - %io:format("~p~n", [rpc:call(LeaderA, riak_repl_console, status, [quiet])]), - lager:info("Writing 2000 more keys to ~p", [LeaderA]), - ?assertEqual([], repl_util:do_write(LeaderA, 101, 2000, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading 2000 keys written to ~p from ~p", [LeaderA, BFirst]), - ?assertEqual(0, repl_util:wait_for_reads(BFirst, 101, 2000, TestBucket, 2)), - - [ ?assertEqual(0, get_dirty_stat(Node)) || Node <- ANodes], - [ ?assertEqual(0, get_dirty_stat(Node)) || Node <- BNodes], - [ ?assertEqual({0,0}, get_rt_errors(Node)) || Node <- ANodes], - [ ?assertEqual({0,0}, get_rt_errors(Node)) || Node <-BNodes], - - lager:info("Waiting until all nodes clean"), - wait_until_all_nodes_clean(LeaderA), - - rt:log_to_nodes(AllNodes, "Verify fullsync after manual dirty flag set"), - - lager:info("Manually setting rt_dirty state"), - - % manually set this for now to simulate source errors - Result = rpc:call(LeaderA, riak_repl_stats, rt_source_errors, []), - lager:info("Result = ~p", [Result]), - - lager:info("Waiting until dirty"), - wait_until_coord_has_dirty(LeaderA), - - lager:info("Starting fullsync"), - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - lager:info("Wait for all nodes to show up clean"), - wait_until_all_nodes_clean(LeaderA), - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - rt:log_to_nodes(AllNodes, "Multiple node test"), - lager:info("Multiple node test"), - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - %% test multiple nodes dirty - [DirtyA , DirtyB | _] = ANodes, - % manually set this for now to simulate source errors - ResultA = rpc:call(DirtyA, riak_repl_stats, rt_source_errors, []), - ResultB = rpc:call(DirtyB, riak_repl_stats, rt_sink_errors, []), - lager:info("Result = ~p", [ResultA]), - lager:info("Result = ~p", [ResultB]), - - lager:info("Waiting until dirty"), - wait_until_coord_has_dirty(DirtyA), - wait_until_coord_has_dirty(DirtyB), - - lager:info("Starting fullsync"), - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - lager:info("Wait for all nodes to show up clean"), - wait_until_all_nodes_clean(LeaderA), - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - rt:log_to_nodes(AllNodes, "Multiple node test, one failed during fullsync"), - lager:info("Multiple node test, one failed during fullsync"), - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - %% test multiple nodes dirty - [DirtyC , DirtyD | _] = ANodes, - % manually set this for now to simulate source errors - ResultC = rpc:call(DirtyC, riak_repl_stats, rt_source_errors, []), - lager:info("ResultC = ~p", [ResultC]), - - lager:info("Waiting until dirty"), - wait_until_coord_has_dirty(DirtyC), - - lager:info("Starting fullsync"), - spawn(fun() -> - timer:sleep(1000), - lager:info("Marking node as dirty during a fullsync"), - ResultC = rpc:call(DirtyD, riak_repl_stats, rt_source_errors, []), - lager:info("Result = ~p", [ResultC]) - end), - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - - lager:info("Checking to see if C is still clean"), - wait_until_node_clean(DirtyC), - lager:info("Checking to see if D is still dirty"), - wait_until_coord_has_dirty(DirtyD), - - % Clear out all dirty state - %repl_util:start_and_wait_until_fullsync_complete(LeaderA), - - rt:log_to_nodes(AllNodes, "rt_dirty test completed"), - pass. - -get_dirty_stat(Node) -> - Stats = rpc:call(Node, riak_repl_stats, get_stats, []), - %lager:info("RT_DIRTY = ~p", [proplists:get_value(rt_dirty, Stats, -1)]), - proplists:get_value(rt_dirty, Stats, -1). - -get_rt_errors(Node) -> - Stats = rpc:call(Node, riak_repl_stats, get_stats, []), - SourceErrors = proplists:get_value(rt_source_errors, Stats, -1), - SinkErrors = proplists:get_value(rt_sink_errors, Stats, -1), - lager:info("Source errors = ~p, sink errors = ~p", [SourceErrors, SinkErrors]), - {SourceErrors, SinkErrors}. - -wait_until_coord_has_dirty(Node) -> - Res = rt:wait_until(Node, - fun(_) -> - lager:info("Checking dirty for node ~p", [Node]), - Status = rpc:call(Node, riak_repl2_fscoordinator, status, []), - case Status of - {badrpc, _} -> false; - [] -> false; - [{_,Stats}|_Rest] -> - NodeString = proplists:get_value(fullsync_suggested, Stats), - Nodes = string:tokens(NodeString,","), - lager:info("Nodes = ~p",[Nodes]), - lists:member(erlang:atom_to_list(Node), Nodes) - end - end), - ?assertEqual(ok, Res). - -%wait_until_coord_has_any_dirty(SourceLeader) -> -% Res = rt:wait_until(SourceLeader, -% fun(_) -> -% lager:info("Checking for any dirty nodes"), -% Status = rpc:call(SourceLeader, riak_repl2_fscoordinator, status, []), -% case Status of -% {badrpc, _} -> false; -% [] -> false; -% [{_,Stats}|_Rest] -> -% NodeString = proplists:get_value(fullsync_suggested, Stats), -% Nodes = string:tokens(NodeString,","), -% lager:info("Nodes = ~p",[Nodes]), -% length(Nodes) > 0 -% end -% end), -% ?assertEqual(ok, Res). -% -%write_until_coord_has_any_dirty(SourceLeader, TestBucket) -> -% Res = rt:wait_until(SourceLeader, -% fun(_) -> -% lager:info("Writing data while checking for any dirty nodes"), -% ?assertEqual([], repl_util:do_write(SourceLeader, 0, 5000, TestBucket, 2)), -% Status = rpc:call(SourceLeader, riak_repl2_fscoordinator, status, []), -% case Status of -% {badrpc, _} -> false; -% [] -> false; -% [{_,Stats}|_Rest] -> -% NodeString = proplists:get_value(fullsync_suggested, Stats), -% Nodes = string:tokens(NodeString,","), -% lager:info("Nodes = ~p",[Nodes]), -% length(Nodes) > 0 -% end -% end), -% ?assertEqual(ok, Res). - - - -%% yeah yeah, copy paste, I know -wait_until_node_clean(Node) -> - Res = rt:wait_until(Node, - fun(_) -> - lager:info("Checking dirty for node ~p", [Node]), - Status = rpc:call(Node, riak_repl2_fscoordinator, status, []), - case Status of - {badrpc, _} -> false; - [] -> false; - [{_,Stats}|_Rest] -> - NodeString = proplists:get_value(fullsync_suggested, Stats), - Nodes = string:tokens(NodeString,","), - lager:info("Nodes = ~p",[Nodes]), - not lists:member(erlang:atom_to_list(Node), Nodes) - end - end), - ?assertEqual(ok, Res). - -wait_until_all_nodes_clean(Leader) -> - Res = rt:wait_until(Leader, - fun(L) -> - lager:info("Checking for all nodes clean"), - Status = rpc:call(L, riak_repl2_fscoordinator, status, []), - case Status of - {badrpc, _} -> false; - [] -> true; - [{_,Stats}|_Rest] -> - NodeString = proplists:get_value(fullsync_suggested, Stats), - Nodes = string:tokens(NodeString,","), - lager:info("Nodes = ~p",[Nodes]), - Nodes == [] - - end - end), - ?assertEqual(ok, Res). - - diff --git a/tests/replication2_fsschedule.erl b/tests/replication2_fsschedule.erl deleted file mode 100644 index 9f455aaec..000000000 --- a/tests/replication2_fsschedule.erl +++ /dev/null @@ -1,220 +0,0 @@ --module(replication2_fsschedule). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% This tests fullsync scheduling in 1.4+ Advanced Replication%% intercept -%% gets called w/ v3 test too, let it -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -setup_repl_clusters(Conf, InterceptSetup) -> - NumNodes = 6, - lager:info("Deploy ~p nodes", [NumNodes]), - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - InterceptSetup(Nodes), - - lager:info("Nodes = ~p", [Nodes]), - {[AFirst|_] = ANodes, Rest} = lists:split(2, Nodes), - {[BFirst|_] = BNodes, [CFirst|_] = CNodes} = lists:split(2, Rest), - - %%AllNodes = ANodes ++ BNodes ++ CNodes, - rt:log_to_nodes(Nodes, "Starting replication2_fullsync test"), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - lager:info("CNodes: ~p", [CNodes]), - - rt:log_to_nodes(Nodes, "Building and connecting Clusters"), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - lager:info("Build cluster C"), - repl_util:make_cluster(CNodes), - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - repl_util:name_cluster(CFirst, "C"), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - rt:wait_until_ring_converged(CNodes), - - %% set the fullsync limits higher, so fullsyncs don't take forever - [begin - rpc:call(N, riak_repl_console, max_fssource_cluster, - [["10"]]), - rpc:call(N, riak_repl_console, max_fssource_node, [["5"]]), - rpc:call(N, riak_repl_console, max_fssink_node, [["5"]]) - end || N <- [AFirst, BFirst, CFirst]], - - %% get the leader for the first cluster - repl_util:wait_until_leader(AFirst), - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - {ok, {_IP, BPort}} = rpc:call(BFirst, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", BPort), - - {ok, {_IP, CPort}} = rpc:call(CFirst, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", CPort), - - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - rt:wait_until_ring_converged(ANodes), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "C")), - rt:wait_until_ring_converged(ANodes), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - repl_util:enable_fullsync(LeaderA, "B"), - repl_util:enable_fullsync(LeaderA, "C"), - rt:wait_until_ring_converged(ANodes), - {LeaderA, ANodes, BNodes, CNodes, Nodes}. - - -test_multiple_schedules() -> - Conf = [ - {riak_core, [{ring_creation_size, 4}]}, - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, [{"B",1}, {"C",2}]} - ]} - ], - {LeaderA, _ANodes, _BNodes, _CNodes, AllNodes} = - setup_repl_clusters(Conf, fun install_v3_intercepts/1), - lager:info("Waiting for fullsyncs"), - wait_until_fullsyncs(LeaderA, "B", 5), - wait_until_fullsyncs(LeaderA, "C", 5), - rt:clean_cluster(AllNodes), - pass. - -test_single_schedule() -> - Conf = [ - {riak_core, [{ring_creation_size, 4}]}, - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, 99} - ]} - ], - {LeaderA, _ANodes, _BNodes, _CNodes, AllNodes} = - setup_repl_clusters(Conf, fun install_v3_intercepts/1), - rt:log_to_nodes(AllNodes, "Test shared fullsync schedule from A -> [B,C]"), - %% let some msgs queue up, doesn't matter how long we wait - lager:info("Waiting for fullsyncs"), - wait_until_fullsyncs(LeaderA, "B", 10), - wait_until_fullsyncs(LeaderA, "C", 10), - rt:clean_cluster(AllNodes), - pass. - -test_mixed_12_13() -> - Conf = [ - {riak_core, [{ring_creation_size, 4}]}, - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, 99} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, fun install_mixed_intercepts/1), - - {_AFirst, BFirst, _CFirst} = get_firsts(AllNodes), - - repl_util:wait_until_leader_converge(ANodes), - repl_util:wait_until_leader_converge(BNodes), - repl_util:wait_until_leader_converge(CNodes), - - lager:info("Adding repl listener to cluster A"), - ListenerArgs = [[atom_to_list(LeaderA), "127.0.0.1", "9010"]], - Res = rpc:call(LeaderA, riak_repl_console, add_listener, ListenerArgs), - ?assertEqual(ok, Res), - - lager:info("Adding repl site to cluster B"), - SiteArgs = ["127.0.0.1", "9010", "rtmixed"], - Res = rpc:call(BFirst, riak_repl_console, add_site, [SiteArgs]), - - lager:info("Waiting for v2 repl to catch up. Good time to light up a cold can of Tab."), - wait_until_fullsyncs(LeaderA, "B", 3), - wait_until_fullsyncs(LeaderA, "C", 3), - wait_until_12_fs_complete(LeaderA, 9), - rt:clean_cluster(AllNodes), - pass. - - -confirm() -> - AllTests = [test_mixed_12_13(), test_multiple_schedules(), test_single_schedule()], - case lists:all(fun (Result) -> Result == pass end, AllTests) of - true -> pass; - false -> sadtrombone - end. - -wait_until_fullsyncs(Node, ClusterName, N) -> - Res = rt:wait_until(Node, - fun(_) -> - FS = get_cluster_fullsyncs(Node, ClusterName), - case FS of - {badrpc, _} -> - false; - undefined -> - false; - X when X >= N -> - true; - _ -> - false - end - end), - ?assertEqual(ok, Res). - -wait_until_12_fs_complete(Node, N) -> - rt:wait_until(Node, - fun(_) -> - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - case proplists:get_value(server_fullsyncs, Status) of - C when C >= N -> - true; - _ -> - false - end - end). - -get_firsts(Nodes) -> - {[AFirst|_] = _ANodes, Rest} = lists:split(2, Nodes), - {[BFirst|_] = _BNodes, [CFirst|_] = _CNodes} = lists:split(2, Rest), - {AFirst, BFirst, CFirst}. - -get_cluster_fullsyncs(Node, ClusterName) -> - Status = rpc:call(Node, riak_repl2_fscoordinator, status, []), - case proplists:lookup(ClusterName, Status) of - none -> 0; - {_, ClusterData} -> - case proplists:lookup(fullsyncs_completed, ClusterData) of - none -> 0; - FSC -> FSC - end - end. - -%% skip v2 repl interval checks -install_v3_intercepts(Nodes) -> - [rt_intercept:add(Node, {riak_repl_util, [{{start_fullsync_timer,3}, - interval_check_v3} - ]}) - || Node <- Nodes]. - -%% check v2 + v3 intervals -install_mixed_intercepts(Nodes) -> - [rt_intercept:add(Node, {riak_repl_util, [{{start_fullsync_timer,3}, - interval_check_v3}, - {{schedule_fullsync,1}, - interval_check_v2}]}) - || Node <- Nodes]. - diff --git a/tests/replication2_pg.erl b/tests/replication2_pg.erl deleted file mode 100644 index 6c9345875..000000000 --- a/tests/replication2_pg.erl +++ /dev/null @@ -1,1009 +0,0 @@ --module(replication2_pg). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% Test proxy_get in Default and Advanced mode of 1.3+ repl -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -setup_repl_clusters(Conf) -> - setup_repl_clusters(Conf, false). - -setup_repl_clusters(Conf, SSL) -> - NumNodes = 6, - lager:info("Deploy ~p nodes", [NumNodes]), - - CertDir = rt_config:get(rt_scratch_dir) ++ "/certs", - - %% make a bunch of crypto keys - make_certs:rootCA(CertDir, "rootCA"), - make_certs:intermediateCA(CertDir, "intCA", "rootCA"), - make_certs:endusers(CertDir, "rootCA", ["site3.basho.com", "site4.basho.com"]), - make_certs:endusers(CertDir, "intCA", ["site1.basho.com", "site2.basho.com"]), - - SSLConfig1 = [ - {riak_core, - [ - {ssl_enabled, true}, - {certfile, filename:join([CertDir, - "site1.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site1.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site1.basho.com/cacerts.pem"])} - ]} - ], - - SSLConfig2 = [ - {riak_core, - [ - {ssl_enabled, true}, - {certfile, filename:join([CertDir, - "site2.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site2.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site2.basho.com/cacerts.pem"])} - ]} - ], - - SSLConfig3 = [ - {riak_core, - [ - {ssl_enabled, true}, - {certfile, filename:join([CertDir, - "site3.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site3.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site3.basho.com/cacerts.pem"])} - ]} - ], - - - rt:set_advanced_conf(all, Conf), - Nodes = [ANodes, BNodes, CNodes] = rt:build_clusters([2, 2, 2]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - rt:wait_for_cluster_service(CNodes, riak_repl), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - CFirst = hd(CNodes), - - rt:log_to_nodes(Nodes, "Starting replication2_pg test"), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - lager:info("CNodes: ~p", [CNodes]), - - case SSL of - true -> - lager:info("Enabling SSL for this test"), - [rt:update_app_config(N, merge_config(SSLConfig1, Conf)) || - N <- ANodes], - [rt:update_app_config(N, merge_config(SSLConfig2, Conf)) || - N <- BNodes], - [rt:update_app_config(N, merge_config(SSLConfig3, Conf)) || - N <- CNodes]; - _ -> - lager:info("SSL not enabled for this test") - end, - - rt:log_to_nodes(Nodes, "Building and connecting clusters"), - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - repl_util:name_cluster(CFirst, "C"), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - rt:wait_until_ring_converged(CNodes), - - %% get the leader for the first cluster - repl_util:wait_until_leader(AFirst), - LeaderA = rpc:call(AFirst, riak_core_cluster_mgr, get_leader, []), - - {ok, {BIP, BPort}} = rpc:call(BFirst, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderA, BIP, BPort), - - {ok, {CIP, CPort}} = rpc:call(CFirst, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderA, CIP, CPort), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - rt:wait_until_ring_converged(ANodes), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "C")), - rt:wait_until_ring_converged(ANodes), - - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - rt:wait_until_transfers_complete(CNodes), - - {LeaderA, ANodes, BNodes, CNodes, Nodes}. - - -make_test_object(Suffix) -> - Bucket = <<"test_bucket">>, - KeyText = "test_key" ++ Suffix, - ValueText = "testdata_" ++ Suffix, - Key = erlang:list_to_binary(KeyText), - Value = erlang:list_to_binary(ValueText), - {Bucket, Key, Value}. - - -test_basic_pg(Mode) -> - test_basic_pg(Mode, false). - -test_basic_pg(Mode, SSL) -> - banner(io_lib:format("test_basic_pg with ~p mode", [Mode]), SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, SSL), - rt:log_to_nodes(AllNodes, "Testing basic pg"), - - case Mode of - mode_repl13 -> - ModeRes = rpc:call(LeaderA, riak_repl_console, modes, [["mode_repl13"]]), - lager:info("ModeRes = ~p", [ModeRes]); - mixed -> - lager:info("Using mode_repl12, mode_repl13"), - ok - end, - rt:wait_until_ring_converged(ANodes), - - PGEnableResult = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","B"]]), - lager:info("Enabled pg: ~p", [PGEnableResult]), - Status = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, Status) of - undefined -> ?assert(false); - EnabledFor -> lager:info("PG enabled for cluster ~p",[EnabledFor]) - end, - - PidA = rt:pbc(LeaderA), - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - lager:info("Cluster ID for A = ~p", [CidA]), - - {Bucket, KeyA, ValueA} = make_test_object("a"), - {Bucket, KeyB, ValueB} = make_test_object("b"), - - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - - _FirstA = hd(ANodes), - FirstB = hd(BNodes), - FirstC = hd(CNodes), - PidB = rt:pbc(FirstB), - lager:info("Connected to cluster B"), - {ok, PGResult} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), - ?assertEqual(ValueA, riakc_obj:get_value(PGResult)), - - rt:log_to_nodes(AllNodes, "Disabling pg on A"), - PGDisableResult = rpc:call(LeaderA, riak_repl_console, proxy_get, [["disable","B"]]), - lager:info("Disable pg ~p", [PGDisableResult]), - Status2 = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - ?assertEqual([], proplists:get_value(proxy_get_enabled, Status2)), - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - %% After the clusters are disconnected, see if the object was - %% written locally after the PG - {ok, PG2Value} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), - - ?assertEqual(ValueA, riakc_obj:get_value(PG2Value)), - - %% test an object that wasn't previously "proxy-gotten", it should fail - FailedResult = riak_repl_pb_api:get(PidB,Bucket,KeyB,CidA), - ?assertEqual({error, notfound}, FailedResult), - - PGEnableResult2 = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","B"]]), - lager:info("Enabled pg: ~p", [PGEnableResult2]), - Status3 = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, Status3) of - undefined -> ?assert(false); - EnabledFor2 -> lager:info("PG enabled for cluster ~p",[EnabledFor2]) - end, - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - {ok, PGResult2} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), - ?assertEqual(ValueA, riakc_obj:get_value(PGResult2)), - - %% Test with optional n_val and sloppy_quorum Options. - %% KeyB is not on C yet. Try via proxy get with above options. - - PGEnableResult3 = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","C"]]), - lager:info("Enabled pg: ~p", [PGEnableResult3]), - Status4 = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, Status4) of - undefined -> ?assert(false); - EnabledFor3 -> lager:info("PG enabled for cluster ~p",[EnabledFor3]) - end, - - PidC = rt:pbc(FirstC), - - Options = [{n_val, 1}, {sloppy_quorum, false}], - lager:info("Test proxy get from C using options: ~p", [Options]), - PGResult3 = riak_repl_pb_api:get(PidC,Bucket,KeyA,CidA,Options), - % it's ok if the first request fails due to the options, - % try it again without options to see if it passes - RetriableGet = case PGResult3 of - {ok, PGResult3Value} -> - riakc_obj:get_value(PGResult3Value); - {error, notfound} -> - RetryOptions = [{n_val, 1}], - case riak_repl_pb_api:get(PidC,Bucket,KeyA,CidA,RetryOptions) of - {ok, PGResult4Value} -> riakc_obj:get_value(PGResult4Value); - UnknownResult -> UnknownResult - end; - UnknownResult -> - %% welp, we might have been expecting a notfound, but we got - %% something else. - UnknownResult - end, - ?assertEqual(ValueA, RetriableGet), - - verify_topology_change(ANodes, BNodes), - - pass. - -%% test 1.2 replication (aka "Default" repl) -%% Mode is either mode_repl12 or mixed. -%% "mixed" is the default in 1.3: mode_repl12, mode_repl13 -test_12_pg(Mode) -> - test_12_pg(Mode, false). - -test_12_pg(Mode, SSL) -> - banner(io_lib:format("test_12_pg with ~p mode", [Mode]), SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, SSL), - - {Bucket, KeyA, ValueA} = make_test_object("a"), - {Bucket, KeyB, ValueB} = make_test_object("b"), - - rt:log_to_nodes(AllNodes, "Test 1.2 proxy_get"), - _FirstA = hd(ANodes), - FirstB = hd(BNodes), - _FirstC = hd(CNodes), - case Mode of - mode_repl12 -> - ModeRes = rpc:call(FirstB, riak_repl_console, modes, [["mode_repl12"]]), - lager:info("ModeRes = ~p", [ModeRes]); - mixed -> - lager:info("Using mode_repl12, mode_repl13"), - ok - end, - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - - PidA = rt:pbc(LeaderA), - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - lager:info("Cluster ID for A = ~p", [CidA]), - - LeaderB = rpc:call(FirstB, riak_repl2_leader, leader_node, []), - rt:log_to_nodes([LeaderB], "Trying to use PG while it's disabled"), - PidB = rt:pbc(LeaderB), - ?assertEqual({error, notfound}, - riak_repl_pb_api:get(PidB, Bucket, KeyA, CidA)), - - rt:log_to_nodes([LeaderA], "Adding a listener"), - LeaderAIP = rt:get_ip(LeaderA), - ListenerArgs = [[atom_to_list(LeaderA), LeaderAIP, "5666"]], - Res = rpc:call(LeaderA, riak_repl_console, add_listener, ListenerArgs), - ?assertEqual(ok, Res), - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - - rt:log_to_nodes([FirstB], "Adding a site"), - SiteArgs = [LeaderAIP, "5666", "rtmixed"], - Res = rpc:call(FirstB, riak_repl_console, add_site, [SiteArgs]), - lager:info("Res = ~p", [Res]), - - rt:log_to_nodes(AllNodes, "Waiting until connected"), - wait_until_12_connection(LeaderA), - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - lager:info("Trying proxy_get"), - - LeaderB2 = rpc:call(FirstB, riak_repl2_leader, leader_node, []), - PidB2 = rt:pbc(LeaderB2), - {ok, PGResult} = riak_repl_pb_api:get(PidB2, Bucket, KeyB, CidA), - lager:info("PGResult: ~p", [PGResult]), - ?assertEqual(ValueB, riakc_obj:get_value(PGResult)), - - lager:info("Disable repl and wait for clusters to disconnect"), - - rt:log_to_nodes([LeaderA], "Delete listener"), - DelListenerArgs = [[atom_to_list(LeaderA), LeaderAIP, "5666"]], - DelListenerRes = rpc:call(LeaderA, riak_repl_console, del_listener, DelListenerArgs), - ?assertEqual(ok, DelListenerRes), - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - - rt:log_to_nodes([FirstB], "Delete site"), - DelSiteArgs = [LeaderAIP, "5666", "rtmixed"], - DelSiteRes = rpc:call(FirstB, riak_repl_console, add_site, [DelSiteArgs]), - lager:info("Res = ~p", [DelSiteRes]), - - rt:log_to_nodes(AllNodes, "Waiting until disconnected"), - wait_until_12_no_connection(LeaderA), - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - - rt:log_to_nodes(AllNodes, "Trying proxy_get without a connection"), - ?assertEqual({error, notfound}, - riak_repl_pb_api:get(PidB, Bucket, KeyA, CidA)), - pass. - -%% test shutting down nodes in source + sink clusters -test_pg_proxy() -> - test_pg_proxy(false). - -test_pg_proxy(SSL) -> - banner("test_pg_proxy", SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, SSL), - rt:log_to_nodes(AllNodes, "Testing pg proxy"), - rt:wait_until_ring_converged(ANodes), - - PGEnableResult = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","B"]]), - lager:info("Enabled pg: ~p", [PGEnableResult]), - Status = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, Status) of - undefined -> ?assert(false); - EnabledFor -> lager:info("PG enabled for cluster ~p",[EnabledFor]) - end, - - PidA = rt:pbc(LeaderA), - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - lager:info("Cluster ID for A = ~p", [CidA]), - - %% Write a new k/v for every PG test, otherwise you'll get a locally written response - {Bucket, KeyA, ValueA} = make_test_object("a"), - {Bucket, KeyB, ValueB} = make_test_object("b"), - {Bucket, KeyC, ValueC} = make_test_object("c"), - {Bucket, KeyD, ValueD} = make_test_object("d"), - - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - rt:pbc_write(PidA, Bucket, KeyC, ValueC), - rt:pbc_write(PidA, Bucket, KeyD, ValueD), - %% sanity check. You know, like the 10000 tests that autoconf runs - %% before it actually does any work. - FirstA = hd(ANodes), - FirstB = hd(BNodes), - _FirstC = hd(CNodes), - PidB = rt:pbc(FirstB), - lager:info("Connected to cluster B"), - {ok, PGResult} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), - ?assertEqual(ValueA, riakc_obj:get_value(PGResult)), - - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Stopping leader on requester cluster"), - PGLeaderB = rpc:call(FirstB, riak_core_cluster_mgr, get_leader, []), - rt:log_to_nodes(AllNodes, "Killing leader on requester cluster"), - rt:stop(PGLeaderB), - [RunningBNode | _ ] = BNodes -- [PGLeaderB], - repl_util:wait_until_leader(RunningBNode), - PidB2 = rt:pbc(RunningBNode), - lager:info("Now trying proxy_get"), - ?assertEqual(ok, wait_until_pg(RunningBNode, PidB2, Bucket, KeyC, CidA)), - lager:info("If you got here, proxy_get worked after the pg block requesting leader was killed"), - - lager:info("Stopping leader on provider cluster"), - PGLeaderA = rpc:call(FirstA, riak_core_cluster_mgr, get_leader, []), - rt:stop(PGLeaderA), - [RunningANode | _ ] = ANodes -- [PGLeaderA], - repl_util:wait_until_leader(RunningANode), - ?assertEqual(ok, wait_until_pg(RunningBNode, PidB2, Bucket, KeyD, CidA)), - lager:info("If you got here, proxy_get worked after the pg block providing leader was killed"), - lager:info("pg_proxy test complete. Time to obtain celebratory cheese sticks."), - - pass. - -%% test mapping of cluster from a retired cluster to an active one, repl issue 306 -test_cluster_mapping() -> - test_cluster_mapping(false). - -test_cluster_mapping(SSL) -> - banner("test_cluster_mapping", SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, _AllNodes} = - setup_repl_clusters(Conf, SSL), - - _FirstA = hd(ANodes), - FirstB = hd(BNodes), - FirstC = hd(CNodes), - LeaderB = rpc:call(FirstB, riak_core_cluster_mgr, get_leader, []), - LeaderC = rpc:call(FirstC, riak_core_cluster_mgr, get_leader, []), - - % Cluser C-> connection must be set up for the proxy gets to work - % with the cluster ID mapping - {ok, {CIP, CPort}} = rpc:call(FirstC, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderB, CIP, CPort), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderB, "C")), - - % enable A to serve blocks to C - PGEnableResultA = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","C"]]), - % enable B to serve blocks to C - PGEnableResultB = rpc:call(LeaderB, riak_repl_console, proxy_get, [["enable","C"]]), - - lager:info("Enabled pg to A:~p", [PGEnableResultA]), - lager:info("Enabled pg to B:~p", [PGEnableResultB]), - - StatusA = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - case proplists:get_value(proxy_get_enabled, StatusA) of - undefined -> ?assert(false); - EnabledForA -> lager:info("PG enabled for cluster ~p",[EnabledForA]) - end, - StatusB = rpc:call(LeaderB, riak_repl_console, status, [quiet]), - case proplists:get_value(proxy_get_enabled, StatusB) of - undefined -> ?assert(false); - EnabledForB -> lager:info("PG enabled for cluster ~p",[EnabledForB]) - end, - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - - PidA = rt:pbc(LeaderA), - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - lager:info("Cluster ID for A = ~p", [CidA]), - - PidB = rt:pbc(LeaderB), - {ok,CidB}=riak_repl_pb_api:get_clusterid(PidB), - lager:info("Cluster ID for B = ~p", [CidB]), - - PidC = rt:pbc(LeaderC), - {ok,CidC}=riak_repl_pb_api:get_clusterid(PidC), - lager:info("Cluster ID for C = ~p", [CidC]), - - %% Write a new k/v for every PG test, otherwise you'll get a locally written response - {Bucket, KeyA, ValueA} = make_test_object("a"), - {Bucket, KeyB, ValueB} = make_test_object("b"), - {Bucket, KeyC, ValueC} = make_test_object("c"), - {Bucket, KeyD, ValueD} = make_test_object("d"), - - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - rt:pbc_write(PidA, Bucket, KeyC, ValueC), - rt:pbc_write(PidA, Bucket, KeyD, ValueD), - - - {ok, PGResult} = riak_repl_pb_api:get(PidA,Bucket,KeyA,CidA), - ?assertEqual(ValueA, riakc_obj:get_value(PGResult)), - - % Configure cluster_mapping on C to map cluster_id A -> C - lager:info("Configuring cluster C to map its cluster_id to B's cluster_id"), - %rpc:call(LeaderC, riak_core_metadata, put, [{<<"replication">>, <<"cluster-mapping">>}, CidA, CidB]), - rpc:call(LeaderC, riak_repl_console, add_block_provider_redirect, [[CidA, CidB]]), - Res = rpc:call(LeaderC, riak_core_metadata, get, [{<<"replication">>, <<"cluster-mapping">>}, CidA]), - lager:info("result: ~p", [Res]), - - % full sync from CS Block Provider A to CS Block Provider B - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - {Time,_} = timer:tc(repl_util,start_and_wait_until_fullsync_complete,[LeaderA]), - lager:info("Fullsync completed in ~p seconds", [Time/1000/1000]), - - % shut down cluster A - lager:info("Shutting down cluster A"), - [ rt:stop(Node) || Node <- ANodes ], - [ rt:wait_until_unpingable(Node) || Node <- ANodes ], - - rt:wait_until_ring_converged(BNodes), - rt:wait_until_ring_converged(CNodes), - - % proxy-get from cluster C, using A's clusterID - % Should redirect requester C from Cid A, to Cid B, and still - % return the correct value for the Key - {ok, PGResultC} = riak_repl_pb_api:get(PidC, Bucket, KeyC, CidA), - lager:info("PGResultC: ~p", [PGResultC]), - ?assertEqual(ValueC, riakc_obj:get_value(PGResultC)), - - % now delete the redirect and make sure it's gone - rpc:call(LeaderC, riak_repl_console, delete_block_provider_redirect, [[CidA]]), - case rpc:call(LeaderC, riak_core_metadata, get, [{<<"replication">>, <<"cluster-mapping">>}, CidA]) of - undefined -> - lager:info("cluster-mapping no longer found in meta data, after delete, which is expected"); - Match -> - lager:info("cluster mapping ~p still in meta data after delete; problem!", [Match]), - ?assert(false) - end, - pass. - -%% connect source + sink clusters, pg bidirectionally -test_bidirectional_pg() -> - test_bidirectional_pg(false). - -test_bidirectional_pg(SSL) -> - banner("test_bidirectional_pg", SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, SSL), - rt:log_to_nodes(AllNodes, "Testing bidirectional proxy-get"), - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - FirstA = hd(ANodes), - FirstB = hd(BNodes), - _FirstC = hd(CNodes), - - LeaderB = rpc:call(FirstB, riak_repl2_leader, leader_node, []), - - {ok, {AIP, APort}} = rpc:call(FirstA, application, get_env, - [riak_core, cluster_mgr]), - repl_util:connect_cluster(LeaderB, AIP, APort), - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - PGEnableResult = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","B"]]), - PGEnableResult = rpc:call(LeaderB, riak_repl_console, proxy_get, [["enable","A"]]), - - lager:info("Enabled bidirectional pg ~p", [PGEnableResult]), - StatusA = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, StatusA) of - undefined -> ?assert(false); - EnabledForA -> lager:info("PG enabled for cluster ~p",[EnabledForA]) - end, - - StatusB = rpc:call(LeaderB, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, StatusB) of - undefined -> ?assert(false); - EnabledForB -> lager:info("PG enabled for cluster ~p",[EnabledForB]) - end, - - PidA = rt:pbc(LeaderA), - PidB = rt:pbc(FirstB), - - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - {ok,CidB}=riak_repl_pb_api:get_clusterid(PidB), - lager:info("Cluster ID for A = ~p", [CidA]), - lager:info("Cluster ID for B = ~p", [CidB]), - - {Bucket, KeyA, ValueA} = make_test_object("a"), - {Bucket, KeyB, ValueB} = make_test_object("b"), - - %% write some data to cluster A - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - - %% write some data to cluster B - rt:pbc_write(PidB, Bucket, KeyB, ValueB), - - lager:info("Trying first get"), - wait_until_pg(LeaderB, PidB, Bucket, KeyA, CidA), - lager:info("First get worked"), - - lager:info("Trying second get"), - wait_until_pg(LeaderA, PidA, Bucket, KeyB, CidB), - lager:info("Second get worked"), - - verify_topology_change(ANodes, BNodes), - - pass. - -%% Test multiple sinks against a single source -test_multiple_sink_pg() -> - test_multiple_sink_pg(false). - -test_multiple_sink_pg(SSL) -> - banner("test_multiple_sink_pg", SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, SSL), - rt:log_to_nodes(AllNodes, "Testing basic pg"), - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - rt:wait_until_ring_converged(CNodes), - - PGEnableResultB = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","B"]]), - PGEnableResultC = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","C"]]), - - lager:info("Enabled pg to B:~p", [PGEnableResultB]), - lager:info("Enabled pg to C:~p", [PGEnableResultC]), - Status = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, Status) of - undefined -> ?assert(false); - EnabledForC -> lager:info("PG enabled for cluster ~p",[EnabledForC]) - end, - - PidA = rt:pbc(LeaderA), - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - lager:info("Cluster ID for A = ~p", [CidA]), - - {Bucket, KeyA, ValueA} = make_test_object("a"), - {Bucket, KeyB, ValueB} = make_test_object("b"), - - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - - _FirstA = hd(ANodes), - FirstB = hd(BNodes), - FirstC = hd(CNodes), - - PidB = rt:pbc(FirstB), - PidC = rt:pbc(FirstC), - - {ok, PGResultB} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), - ?assertEqual(ValueA, riakc_obj:get_value(PGResultB)), - - {ok, PGResultC} = riak_repl_pb_api:get(PidC,Bucket,KeyB,CidA), - ?assertEqual(ValueB, riakc_obj:get_value(PGResultC)), - - pass. - -%% test 1.2 + 1.3 repl being used at the same time -test_mixed_pg() -> - test_mixed_pg(false). - -test_mixed_pg(SSL) -> - banner("test_mixed_pg", SSL), - Conf = [ - {riak_repl, - [ - {proxy_get, enabled}, - {fullsync_on_connect, false} - ]} - ], - {LeaderA, ANodes, BNodes, CNodes, AllNodes} = - setup_repl_clusters(Conf, SSL), - rt:log_to_nodes(AllNodes, "Testing basic pg"), - - rt:wait_until_ring_converged(ANodes), - - PGEnableResult = rpc:call(LeaderA, riak_repl_console, proxy_get, [["enable","B"]]), - lager:info("Enabled pg: ~p", [PGEnableResult]), - Status = rpc:call(LeaderA, riak_repl_console, status, [quiet]), - - case proplists:get_value(proxy_get_enabled, Status) of - undefined -> ?assert(false); - EnabledFor -> lager:info("PG enabled for cluster ~p",[EnabledFor]) - end, - - PidA = rt:pbc(LeaderA), - {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), - lager:info("Cluster ID for A = ~p", [CidA]), - - {Bucket, KeyB, ValueB} = make_test_object("b"), - {Bucket, KeyC, ValueC} = make_test_object("c"), - - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - rt:pbc_write(PidA, Bucket, KeyC, ValueC), - - _FirstA = hd(ANodes), - FirstB = hd(BNodes), - FirstC = hd(CNodes), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - rt:log_to_nodes([LeaderA], "Adding a listener"), - ListenerIP = rt:get_ip(LeaderA), - ListenerArgs = [[atom_to_list(LeaderA), ListenerIP, "5666"]], - Res = rpc:call(LeaderA, riak_repl_console, add_listener, ListenerArgs), - ?assertEqual(ok, Res), - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - - rt:log_to_nodes([FirstC], "Adding a site"), - SiteArgs = [ListenerIP, "5666", "rtmixed"], - Res = rpc:call(FirstC, riak_repl_console, add_site, [SiteArgs]), - lager:info("Res = ~p", [Res]), - - rt:log_to_nodes(AllNodes, "Waiting until connected"), - wait_until_12_connection(LeaderA), - - [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - lager:info("Trying proxy_get"), - - LeaderC = rpc:call(FirstC, riak_repl2_leader, leader_node, []), - PidB = rt:pbc(FirstB), - PidC = rt:pbc(LeaderC), - - {ok, PGResultB} = riak_repl_pb_api:get(PidB, Bucket, KeyB, CidA), - lager:info("PGResultB: ~p", [PGResultB]), - ?assertEqual(ValueB, riakc_obj:get_value(PGResultB)), - - {ok, PGResultC} = riak_repl_pb_api:get(PidC, Bucket, KeyC, CidA), - lager:info("PGResultC: ~p", [PGResultC]), - ?assertEqual(ValueC, riakc_obj:get_value(PGResultC)), - - pass. - - -wait_until_12_connection(Node) -> - rt:wait_until(Node, - fun(_) -> - case rpc:call(Node, riak_repl_console, status, [quiet]) of - {badrpc, _} -> - false; - Status -> - case proplists:get_value(server_stats, Status) of - [] -> - false; - [{_, _, too_busy}] -> - false; - [_C] -> - true; - Conns -> - lager:warning("multiple connections detected: ~p", - [Conns]), - true - end - end - end). %% 40 seconds is enough for repl - -wait_until_12_no_connection(Node) -> - rt:wait_until(Node, - fun(_) -> - case rpc:call(Node, riak_repl_console, status, [quiet]) of - {badrpc, _} -> - false; - Status -> - case proplists:get_value(server_stats, Status) of - undefined -> - true; - [] -> - true; - [{_, _, too_busy}] -> - false; - [_C] -> - false; - Conns -> - lager:warning("multiple connections detected: ~p", - [Conns]), - false - end - end - end). %% 40 seconds is enough for repl - - - -%% these funs allow you to call: -%% riak_test -t replication2_pg:test_basic_pg_mode_repl13 etc -test_basic_pg_mode_repl13() -> - test_basic_pg(mode_repl13). - -test_basic_pg_mode_mixed() -> - test_basic_pg(mixed). - -test_12_pg_mode_repl12() -> - test_12_pg(mode_repl12). - -test_12_pg_mode_repl_mixed() -> - test_12_pg(mixed). - - -test_basic_pg_mode_repl13_ssl() -> - test_basic_pg(mode_repl13, true). - -test_basic_pg_mode_mixed_ssl() -> - test_basic_pg(mixed, true). - -test_12_pg_mode_repl12_ssl() -> - test_12_pg(mode_repl12, true). - -test_12_pg_mode_repl_mixed_ssl() -> - test_12_pg(mixed, true). - -test_mixed_pg_ssl() -> - test_mixed_pg(true). - -test_multiple_sink_pg_ssl() -> - test_multiple_sink_pg(true). - -test_bidirectional_pg_ssl() -> - test_bidirectional_pg(true). - -test_pg_proxy_ssl() -> - test_pg_proxy(true). - -confirm() -> - AllTests = - [ - test_basic_pg_mode_repl13, - test_basic_pg_mode_mixed, - test_12_pg_mode_repl12, - test_12_pg_mode_repl_mixed, - test_mixed_pg, - test_multiple_sink_pg, - test_bidirectional_pg, - test_cluster_mapping, - test_pg_proxy, - test_basic_pg_mode_repl13_ssl, - test_basic_pg_mode_mixed_ssl, - test_12_pg_mode_repl12_ssl, - test_12_pg_mode_repl_mixed_ssl, - test_mixed_pg_ssl, - test_multiple_sink_pg_ssl, - test_bidirectional_pg_ssl, - test_pg_proxy_ssl - - ], - lager:error("run riak_test with -t Mod:test1 -t Mod:test2"), - lager:error("The runnable tests in this module are: ~p", [AllTests]), - %% TODO: The problem with this LC is that it doesn't incorporate any - %% of riak_test's setup/teardown per test. - [?assertEqual(pass, erlang:apply(?MODULE, Test, [])) || Test <- AllTests]. - -banner(T) -> - banner(T, false). - -banner(T, SSL) -> - lager:info("----------------------------------------------"), - lager:info("----------------------------------------------"), - lager:info("~s, SSL ~s",[T, SSL]), - lager:info("----------------------------------------------"), - lager:info("----------------------------------------------"). - -wait_until_pg(Node, Pid, Bucket, Key, Cid) -> - rt:wait_until(Node, - fun(_) -> - case riak_repl_pb_api:get(Pid,Bucket,Key,Cid) of - {error, notfound} -> - false; - {ok, _Value} -> true; - _ -> false - end - end). - -merge_config(Mixin, Base) -> - lists:ukeymerge(1, lists:keysort(1, Mixin), lists:keysort(1, Base)). - -verify_topology_change(SourceNodes, SinkNodes) -> - lager:info("Verify topology changes doesn't break the proxy get."), - - %% Get connections - [SourceNode1, _SourceNode2] = SourceNodes, - SourceNode1Pid = rt:pbc(SourceNode1), - [SinkNode1, SinkNode2] = SinkNodes, - SinkNode1Pid = rt:pbc(SinkNode1), - {ok, SourceCid} = riak_repl_pb_api:get_clusterid(SourceNode1Pid), - - %% Write new object to source. - lager:info("Writing key 'before' to the source."), - {Bucket, KeyBefore, ValueBefore} = make_test_object("before"), - rt:pbc_write(SourceNode1Pid, Bucket, KeyBefore, ValueBefore), - - %% Verify proxy_get through the sink works. - lager:info("Verifying key 'before' can be read through the sink."), - {ok, PGResult1} = riak_repl_pb_api:get(SinkNode1Pid, - Bucket, KeyBefore, SourceCid), - ?assertEqual(ValueBefore, riakc_obj:get_value(PGResult1)), - - %% Remove leader from the sink cluster. - SinkLeader = rpc:call(SinkNode1, - riak_repl2_leader, leader_node, []), - - %% Sad this takes 2.5 minutes - lager:info("Removing current leader from the cluster: ~p.", - [SinkLeader]), - rt:leave(SinkLeader), - ?assertEqual(ok, rt:wait_until_unpingable(SinkLeader)), - - %% Wait for everything to restart, and rings to converge. - lager:info("Starting leader node back up and waiting for repl."), - rt:start(SinkLeader), - rt:wait_for_service(SinkLeader, riak_repl), - rt:wait_until_ring_converged(SinkNodes), - - %% Assert nodes have different leaders, which are themselves. - lager:info("Ensure that each node is its own leader."), - SinkNode1Leader = rpc:call(SinkNode1, - riak_repl2_leader, leader_node, []), - SinkNode2Leader = rpc:call(SinkNode2, - riak_repl2_leader, leader_node, []), - ?assertEqual(SinkNode1, SinkNode1Leader), - ?assertEqual(SinkNode2, SinkNode2Leader), - ?assertNotEqual(SinkNode1Leader, SinkNode2Leader), - - %% Before we join the nodes, install an intercept on all nodes for - %% the leader election callback. - lager:info("Installing set_leader_node intercept."), - Result = riak_repl2_leader_intercepts:set_leader_node(SinkLeader), - - lager:info("riak_repl2_leader_intercepts:set_leader_node(~p) = ~p", [SinkLeader, Result]), - [ begin - rt_intercept:load_code(N), - ok = rt_intercept:add(N, {riak_repl2_leader, [{{set_leader,3}, set_leader_node}]}) - end || N <- SinkNodes ], - - %% Restart former leader and rejoin to the cluster. - lager:info("Rejoining former leader."), - case SinkLeader of - SinkNode1 -> - rt:join(SinkNode1, SinkNode2); - SinkNode2 -> - rt:join(SinkNode2, SinkNode1) - end, - rt:wait_until_ring_converged(SinkNodes), - - %% Assert that all nodes have the same leader. - lager:info("Assert that all nodes have the same leader."), - SinkNode1LeaderRejoin = rpc:call(SinkNode1, - riak_repl2_leader, leader_node, []), - SinkNode2LeaderRejoin = rpc:call(SinkNode2, - riak_repl2_leader, leader_node, []), - ?assertEqual(SinkNode1LeaderRejoin, SinkNode2LeaderRejoin), - - %% Assert that the leader is the former leader. - lager:info("Assert that new leader is the former leader."), - ?assertEqual(SinkLeader, SinkNode1LeaderRejoin), - - %% Write new object to source. - lager:info("Writing key 'after' to the source."), - {ok, SourceCid} = riak_repl_pb_api:get_clusterid(SourceNode1Pid), - {Bucket, KeyPost, ValuePost} = make_test_object("after"), - rt:pbc_write(SourceNode1Pid, Bucket, KeyPost, ValuePost), - - %% Verify we can retrieve from source. - lager:info("Verifying key 'after' can be read through the source."), - {ok, PGResult2} = riak_repl_pb_api:get(SourceNode1Pid, - Bucket, KeyPost, SourceCid), - ?assertEqual(ValuePost, riakc_obj:get_value(PGResult2)), - - %% Verify proxy_get through the sink works. - lager:info("Verifying key 'after' can be read through the sink."), - wait_until_pg(SinkNode1, SinkNode1Pid, Bucket, KeyPost, SourceCid), - - %% We're good! - pass. diff --git a/tests/replication2_rt_sink_connection.erl b/tests/replication2_rt_sink_connection.erl deleted file mode 100644 index 4c38a7d82..000000000 --- a/tests/replication2_rt_sink_connection.erl +++ /dev/null @@ -1,123 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% - --module(replication2_rt_sink_connection). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(HB_TIMEOUT, 2000). - -confirm() -> - NumNodes = rt_config:get(num_nodes, 6), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_repl, - [ - %% turn off fullsync - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - %% override defaults for RT heartbeat so that we - %% can see faults sooner and have a quicker test. - {rt_heartbeat_interval, ?HB_TIMEOUT}, - {rt_heartbeat_timeout, ?HB_TIMEOUT} - ]} - ], - - Nodes = rt:deploy_nodes(NumNodes, Conf, [riak_kv, riak_repl]), - {ANodes, Rest} = lists:split(2, Nodes), - {BNodes, CNodes} = lists:split(2, Rest), - - lager:info("Loading intercepts."), - CNode = hd(CNodes), - rt_intercept:load_code(CNode), - rt_intercept:add(CNode, {riak_repl_ring_handler, - [{{handle_event, 2}, slow_handle_event}]}), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - lager:info("CNodes: ~p", [CNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - % lager:info("Waiting for cluster A to converge"), - % rt:wait_until_ring_converged(ANodes), - - % lager:info("Waiting for cluster B to converge"), - % rt:wait_until_ring_converged(BNodes), - - lager:info("waiting for leader to converge on cluster A"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - AFirst = hd(ANodes), - - lager:info("waiting for leader to converge on cluster B"), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - BFirst = hd(BNodes), - - lager:info("Naming A"), - repl_util:name_cluster(AFirst, "A"), - - lager:info("Naming B"), - repl_util:name_cluster(BFirst, "B"), - - connect_clusters(AFirst, BFirst), - - enable_rt(AFirst, ANodes), - - lager:info("Adding 4th node to the A cluster"), - rt:join(CNode, AFirst), - - [verify_connectivity(Node) || Node <- ANodes], - - verify_connectivity(CNode), - - pass. - -%% @doc Verify connectivity between sources and sink. -verify_connectivity(Node) -> - rt:wait_until(Node, fun(N) -> - {ok, Connections} = rpc:call(N, - riak_core_cluster_mgr, - get_connections, - []), - lager:info("Waiting for sink connections on ~p: ~p.", - [Node, Connections]), - Connections =/= [] - end). - -%% @doc Connect two clusters for replication using their respective leader nodes. -connect_clusters(LeaderA, LeaderB) -> - {ok, {_IP, Port}} = rpc:call(LeaderB, application, get_env, - [riak_core, cluster_mgr]), - lager:info("Connect cluster A:~p to B on port ~p", [LeaderA, Port]), - repl_util:connect_cluster(LeaderA, "127.0.0.1", Port). - -%% @doc Turn on Realtime replication on the cluster lead by LeaderA. -%% The clusters must already have been named and connected. -enable_rt(LeaderA, ANodes) -> - lager:info("Enabling RT replication: ~p ~p.", [LeaderA, ANodes]), - repl_util:enable_realtime(LeaderA, "B"), - repl_util:start_realtime(LeaderA, "B"). diff --git a/tests/replication2_ssl.erl b/tests/replication2_ssl.erl deleted file mode 100644 index c080b424e..000000000 --- a/tests/replication2_ssl.erl +++ /dev/null @@ -1,390 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012-2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(replication2_ssl). --behavior(riak_test). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - -%% Certificate Names --define(DEF_DOM, ".basho.com"). --define(DOM_WC, "*" ++ ?DEF_DOM). --define(BAD_WC, "*.bahso.com"). --define(CERTN(S), S ++ ?DEF_DOM). --define(SITEN(N), ?CERTN("site" ++ ??N)). --define(CERTP(S), filename:join(CertDir, S)). - -%% Certificate Information --record(ci, { - cn, %% common name of the certificate - rd = 0, %% required ssl_depth - wc = ?DOM_WC, %% acceptable *.domain wildcard - ssl %% options returned from ssl_paths -}). - -%% -%% @doc Tests various TLS (SSL) connection scenarios for MDC. -%% The following configiration options are recognized: -%% -%% num_nodes [default 6] -%% How many nodes to use to build two clusters. -%% -%% cluster_a_size [default (num_nodes div 2)] -%% How many nodes to use in cluster "A". The remainder is used in cluster "B". -%% -%% conn_fail_time [default rt_max_wait_time] -%% A (presumably shortened) timout to use in tests where the connection is -%% expected to be rejected due to invalid TLS configurations. Something around -%% one minute is appropriate. Using the default ten-minute timeout, this -%% test will take more than an hour and a half to run successfully. -%% -confirm() -> - - %% test requires allow_mult=false - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, (NumNodes div 2)), - - CertDir = rt_config:get(rt_scratch_dir) ++ "/certs", - - %% make some CAs - make_certs:rootCA(CertDir, "CA_0"), - make_certs:intermediateCA(CertDir, "CA_1", "CA_0"), - make_certs:intermediateCA(CertDir, "CA_2", "CA_1"), - - %% make a bunch of certificates and matching ci records - S1Name = ?SITEN(1), - S2Name = ?SITEN(2), - S3Name = ?SITEN(3), - S4Name = ?SITEN(4), - S5Name = ?SITEN(5), - S6Name = ?SITEN(6), - W1Name = ?CERTN("wildcard1"), - W2Name = ?CERTN("wildcard2"), - - make_certs:endusers(CertDir, "CA_0", [S1Name, S2Name]), - CIdep0s1 = #ci{cn = S1Name, rd = 0, ssl = ssl_paths(?CERTP(S1Name))}, - CIdep0s2 = #ci{cn = S2Name, rd = 0, ssl = ssl_paths(?CERTP(S2Name))}, - - make_certs:endusers(CertDir, "CA_1", [S3Name, S4Name]), - CIdep1s1 = #ci{cn = S3Name, rd = 1, ssl = ssl_paths(?CERTP(S3Name))}, - CIdep1s2 = #ci{cn = S4Name, rd = 1, ssl = ssl_paths(?CERTP(S4Name))}, - - make_certs:endusers(CertDir, "CA_2", [S5Name, S6Name]), - CIdep2s1 = #ci{cn = S5Name, rd = 2, ssl = ssl_paths(?CERTP(S5Name))}, - CIdep2s2 = #ci{cn = S6Name, rd = 2, ssl = ssl_paths(?CERTP(S6Name))}, - - make_certs:enduser(CertDir, "CA_1", ?DOM_WC, W1Name), - CIdep1wc = #ci{cn = ?DOM_WC, rd = 1, ssl = ssl_paths(?CERTP(W1Name))}, - - make_certs:enduser(CertDir, "CA_2", ?DOM_WC, W2Name), - CIdep2wc = #ci{cn = ?DOM_WC, rd = 2, ssl = ssl_paths(?CERTP(W2Name))}, - - % crufty old certs really need to be replaced - CIexpired = #ci{cn = "ny.cataclysm-software.net", rd = 0, - wc = "*.cataclysm-software.net", ssl = ssl_paths( - filename:join([rt:priv_dir(), "certs", "cacert.org"]), - "ny-cert-old.pem", "ny-key.pem", "ca")}, - - lager:info("Deploy ~p nodes", [NumNodes]), - - ConfRepl = {riak_repl, - [{fullsync_on_connect, false}, {fullsync_interval, disabled}]}, - - ConfTcpBasic = [ConfRepl, {riak_core, [{ssl_enabled, false}]}], - - %% - %% !!! IMPORTANT !!! - %% Properties added to node configurations CANNOT currently be removed, - %% only overwritten. As such, configurations that include ACLs MUST come - %% after ALL non-ACL configurations! This has been learned the hard way :( - %% The same applies to the ssl_depth option, though it's much easier to - %% contend with - make sure it works, then always use a valid depth. - %% - - %% - %% Connection Test descriptors - %% Each is a tuple: {Description, Node1Config, Node2Config, Should Pass} - %% - SslConnTests = [ - %% - %% basic tests - %% - {"non-SSL peer fails", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s1#ci.ssl }], - ConfTcpBasic, - false}, - {"non-SSL local fails", - ConfTcpBasic, - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s2#ci.ssl }], - false}, - {"basic SSL connectivity", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s2#ci.ssl }], - true}, - {"expired peer certificate fails", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIexpired#ci.ssl }], - false}, - {"expired local certificate fails", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIexpired#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s2#ci.ssl }], - false}, - {"identical certificate CN is disallowed", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s1#ci.ssl }], - false}, - {"identical wildcard certificate CN is allowed", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1wc#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1wc#ci.ssl }], - true}, - {"SSL connectivity with one intermediate CA is allowed by default", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1s2#ci.ssl }], - true}, - {"SSL connectivity with two intermediate CAs is disallowed by default", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep2s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep2s2#ci.ssl }], - false}, - {"wildcard certificates on both ends", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1wc#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1wc#ci.ssl }], - true}, - {"wildcard certificate on one end", - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep1wc#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - ] ++ CIdep0s1#ci.ssl }], - true}, - %% - %% first use of ssl_depth, all subsequent tests must specify - %% - {"disallowing intermediate CA setting allows direct-signed certs", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, 0} - ] ++ CIdep0s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, 0} - ] ++ CIdep0s2#ci.ssl }], - true}, - {"disallowing intermediate CA disallows intermediate-signed peer", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, 0} - ] ++ CIdep0s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep0s1#ci.rd} - ] ++ CIdep1s2#ci.ssl }], - false}, - {"disallowing intermediate CA disallows intermediate-signed local", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep0s2#ci.rd} - ] ++ CIdep1s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, 0} - ] ++ CIdep0s2#ci.ssl }], - false}, - {"allow arbitrary-depth intermediate CAs", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep2s2#ci.rd} - ] ++ CIdep2s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep2s1#ci.rd} - ] ++ CIdep2s2#ci.ssl }], - true}, - %% - %% first use of peer_common_name_acl, all subsequent tests must specify - %% - {"wildcard certificate on one end with matching ACL", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s1#ci.rd} - ] ++ CIdep1wc#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1wc#ci.rd} - , {peer_common_name_acl, [?DOM_WC]} - ] ++ CIdep1s1#ci.ssl }], - true}, - {"wildcard certificate on one end with mismatched ACL", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s1#ci.rd} - ] ++ CIdep1wc#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1wc#ci.rd} - , {peer_common_name_acl, [?BAD_WC]} - ] ++ CIdep1s1#ci.ssl }], - false}, - {"one wildcard ACL and one strict ACL", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s2#ci.rd} - , {peer_common_name_acl, [?DOM_WC]} - ] ++ CIdep1s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s1#ci.rd} - , {peer_common_name_acl, [CIdep1s1#ci.cn]} - ] ++ CIdep1s2#ci.ssl }], - true}, - {"wildcard certificates on both ends with ACLs", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep2wc#ci.rd} - , {peer_common_name_acl, [CIdep2wc#ci.wc]} - ] ++ CIdep1wc#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1wc#ci.rd} - , {peer_common_name_acl, [CIdep1wc#ci.wc]} - ] ++ CIdep2wc#ci.ssl }], - true}, - {"explicit certificates with strict ACLs", - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep2s2#ci.rd} - , {peer_common_name_acl, [CIdep2s2#ci.cn]} - ] ++ CIdep1s1#ci.ssl }], - [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s1#ci.rd} - , {peer_common_name_acl, [CIdep1s1#ci.cn]} - ] ++ CIdep2s2#ci.ssl }], - true} - ], - - lager:info("Deploying 2 nodes for connectivity tests"), - - [Node1, Node2] = rt:deploy_nodes(2, ConfTcpBasic, [riak_kv, riak_repl]), - - repl_util:name_cluster(Node1, "A"), - repl_util:name_cluster(Node2, "B"), - - %% we'll need to wait for cluster names before continuing - rt:wait_until_ring_converged([Node1]), - rt:wait_until_ring_converged([Node2]), - - rt:wait_for_service(Node1, [riak_kv, riak_repl]), - rt:wait_for_service(Node2, [riak_kv, riak_repl]), - - lager:info("=== Testing basic connectivity"), - rt:log_to_nodes([Node1, Node2], "Testing basic connectivity"), - - {ok, {_IP, Port}} = rpc:call(Node2, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster A:~p to B on port ~p", [Node1, Port]), - rt:log_to_nodes([Node1, Node2], "connect A to B"), - repl_util:connect_cluster(Node1, "127.0.0.1", Port), - lager:info("Waiting for connection to B"), - - ?assertEqual(ok, repl_util:wait_for_connection(Node1, "B")), - - %% run each of the SSL connectivity tests - lists:foreach(fun({Desc, Conf1, Conf2, ShouldPass}) -> - test_connection(Desc, {Node1, Conf1}, {Node2, Conf2}, ShouldPass) - end, SslConnTests), - - lager:info("Connectivity tests passed"), - - repl_util:disconnect_cluster(Node1, "B"), - - lager:info("Re-deploying 6 nodes"), - - Nodes = rt:deploy_nodes(6, ConfTcpBasic, [riak_kv, riak_repl]), - - [rt:wait_until_pingable(N) || N <- Nodes], - - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - - lager:info("Reconfiguring nodes with SSL options"), - ConfANodes = [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s2#ci.rd} - , {peer_common_name_acl, [CIdep1s2#ci.cn]} - ] ++ CIdep1s1#ci.ssl }], - ConfBNodes = [ConfRepl, {riak_core, [{ssl_enabled, true} - , {ssl_depth, CIdep1s1#ci.rd} - , {peer_common_name_acl, [CIdep1s1#ci.cn]} - ] ++ CIdep1s2#ci.ssl }], - [rt:update_app_config(N, ConfANodes) || N <- ANodes], - [rt:update_app_config(N, ConfBNodes) || N <- BNodes], - - [rt:wait_until_pingable(N) || N <- Nodes], - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - repl_util:disconnect_cluster(Node1, "B"), - - replication2:replication(ANodes, BNodes, false), - - pass. - -test_connection(Desc, {N1, C1}, {N2, C2}, ShouldPass) -> - lager:info("=== Testing " ++ Desc), - rt:log_to_nodes([N1, N2], "Testing " ++ Desc), - test_connection({N1, C1}, {N2, C2}, ShouldPass). - -test_connection(Left, Right, true) -> - ?assertEqual(ok, test_connection(Left, Right)), - lager:info("Connection succeeded"); -test_connection(Left, Right, false) -> - DefaultTimeout = rt_config:get(rt_max_wait_time), - ConnFailTimeout = rt_config:get(conn_fail_time, DefaultTimeout), - rt_config:set(rt_max_wait_time, ConnFailTimeout), - ?assertMatch({fail, _}, test_connection(Left, Right)), - rt_config:set(rt_max_wait_time, DefaultTimeout), - lager:info("Connection rejected"). - -test_connection({Node1, Config1}, {Node2, Config2}) -> - repl_util:disconnect_cluster(Node1, "B"), - repl_util:wait_for_disconnect(Node1, "B"), - rt:update_app_config(Node2, Config2), - rt:wait_until_pingable(Node2), - rt:update_app_config(Node1, Config1), - rt:wait_until_pingable(Node1), - rt:wait_for_service(Node1, [riak_kv, riak_repl]), - rt:wait_for_service(Node2, [riak_kv, riak_repl]), - {ok, {_IP, Port}} = rpc:call(Node2, application, get_env, - [riak_core, cluster_mgr]), - lager:info("connect cluster A:~p to B on port ~p", [Node1, Port]), - rt:log_to_nodes([Node1, Node2], "connect A to B"), - repl_util:connect_cluster(Node1, "127.0.0.1", Port), - repl_util:wait_for_connection(Node1, "B"). - -ssl_paths(Dir) -> - ssl_paths(Dir, "cert.pem", "key.pem", "cacerts.pem"). -ssl_paths(Dir, Cert, Key, CaCerts) -> - [{certfile, filename:join(Dir, Cert)} - ,{keyfile, filename:join(Dir, Key)} - ,{cacertdir, filename:join(Dir, CaCerts)}]. diff --git a/tests/replication2_upgrade.erl b/tests/replication2_upgrade.erl deleted file mode 100644 index fea58b752..000000000 --- a/tests/replication2_upgrade.erl +++ /dev/null @@ -1,105 +0,0 @@ -%% Test cluster version migration with BNW replication as "new" version --module(replication2_upgrade). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - FromVersion = proplists:get_value(upgrade_version, TestMetaData, previous), - - lager:info("Doing rolling replication upgrade test from ~p to ~p", - [FromVersion, "current"]), - - NumNodes = rt_config:get(num_nodes, 6), - - UpgradeOrder = rt_config:get(repl_upgrade_order, "forwards"), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_kv, - [ - {anti_entropy, {off, []}} - ] - }, - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {diff_batch_size, 10} - ]} - ], - - NodeConfig = [{FromVersion, Conf} || _ <- lists:seq(1, NumNodes)], - - Nodes = rt:deploy_nodes(NodeConfig, [riak_kv, riak_repl]), - - NodeUpgrades = case UpgradeOrder of - "forwards" -> - Nodes; - "backwards" -> - lists:reverse(Nodes); - "alternate" -> - %% eg 1, 4, 2, 5, 3, 6 - lists:flatten(lists:foldl(fun(E, [A,B,C]) -> [B, C, A ++ [E]] end, - [[],[],[]], Nodes)); - "random" -> - %% halfass randomization - lists:sort(fun(_, _) -> random:uniform(100) < 50 end, Nodes); - Other -> - lager:error("Invalid upgrade ordering ~p", [Other]), - erlang:exit() - end, - - ClusterASize = rt_config:get(cluster_a_size, 3), - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - lager:info("Replication First pass...homogenous cluster"), - rt:log_to_nodes(Nodes, "Replication First pass...homogenous cluster"), - - %% initial "previous" replication run, homogeneous cluster - replication2:replication(ANodes, BNodes, false), - - lager:info("Upgrading nodes in order: ~p", [NodeUpgrades]), - rt:log_to_nodes(Nodes, "Upgrading nodes in order: ~p", [NodeUpgrades]), - %% upgrade the nodes, one at a time - ok = lists:foreach(fun(Node) -> - lager:info("Upgrade node: ~p", [Node]), - rt:log_to_nodes(Nodes, "Upgrade node: ~p", [Node]), - rtdev:upgrade(Node, current), - %% The upgrade did a wait for pingable - rt:wait_for_service(Node, [riak_kv, riak_pipe, riak_repl]), - [rt:wait_until_ring_converged(N) || N <- [ANodes, BNodes]], - - %% Prior to 1.4.8 riak_repl registered - %% as a service before completing all - %% initialization including establishing - %% realtime connections. - %% - %% @TODO Ideally the test would only wait - %% for the connection in the case of the - %% node version being < 1.4.8, but currently - %% the rt API does not provide a - %% harness-agnostic method do get the node - %% version. For now the test waits for all - %% source cluster nodes to establish a - %% connection before proceeding. - case lists:member(Node, ANodes) of - true -> - repl_util:wait_for_connection(Node, "B"); - false -> - ok - end, - lager:info("Replication with upgraded node: ~p", [Node]), - rt:log_to_nodes(Nodes, "Replication with upgraded node: ~p", [Node]), - replication2:replication(ANodes, BNodes, true) - end, NodeUpgrades), - pass. diff --git a/tests/replication_object_reformat.erl b/tests/replication_object_reformat.erl deleted file mode 100644 index cb1cf6851..000000000 --- a/tests/replication_object_reformat.erl +++ /dev/null @@ -1,239 +0,0 @@ --module(replication_object_reformat). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(TEST_BUCKET, <<"object-reformat">>). --define(NUM_KEYS, 1000). --define(N, 3). - --define(CONF(Retries), [ - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, ?N}]} - ] - }, - {riak_kv, - [ - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, aae}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_retries, Retries} - ]} - ]). - -confirm() -> - lager:info("Verifying v0 source to v1 sink, realtime enabled."), - verify_replication(v0, v1, 1, ?NUM_KEYS, true), - lager:info("Verifying v0 source to v1 sink, realtime disabled."), - verify_replication(v0, v1, 1, ?NUM_KEYS, false), - lager:info("Verifying v1 source to v0 sink, realtime enabled."), - verify_replication(v1, v0, 1, ?NUM_KEYS, true), - lager:info("Verifying v1 source to v0 sink, realtime disabled."), - verify_replication(v1, v0, 1, ?NUM_KEYS, false), - pass. - -%% @doc Verify replication works between two different versions of the -%% Riak object format. -verify_replication(AVersion, BVersion, Start, End, Realtime) -> - [ANodes, BNodes] = configure_clusters(AVersion, BVersion, Realtime), - - Nodes = [ANodes, BNodes], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Get leader of A cluster."), - LeaderA = repl_util:get_leader(AFirst), - - %% Before starting writes, initiate a rolling downgrade. - Me = self(), - - case Realtime of - true -> - spawn(fun() -> - lager:info("Running kv_reformat to downgrade to v0 on ~p", - [BFirst]), - {_, _, Error1} = rpc:call(BFirst, - riak_kv_reformat, - run, - [v0, [{kill_handoffs, false}]]), - ?assertEqual(0, Error1), - - lager:info("Waiting for all nodes to see the v0 capability."), - [rt:wait_until_capability(N, {riak_kv, object_format}, v0, v0) - || N <- BNodes], - - lager:info("Allowing downgrade and writes to occurr concurrently."), - Me ! continue, - - lager:info("Downgrading node ~p to previous.", - [BFirst]), - rt:upgrade(BFirst, previous), - - lager:info("Waiting for riak_kv to start on node ~p.", - [BFirst]), - rt:wait_for_service(BFirst, [riak_kv]) - end), - ok; - _ -> - ok - end, - - %% Pause and wait for rolling upgrade to begin, if it takes too - %% long, proceed anyway and the test will fail when it attempts - %% to read the keys. - receive - continue -> - ok - after 60000 -> - ok - end, - - lager:info("Write keys, assert they are not available yet."), - repl_util:write_to_cluster(AFirst, Start, End, ?TEST_BUCKET, ?N), - - case Realtime of - false -> - lager:info("Verify we can not read the keys on the sink."), - repl_util:read_from_cluster( - BFirst, Start, End, ?TEST_BUCKET, ?NUM_KEYS, ?N); - _ -> - ok - end, - - lager:info("Verify we can read the keys on the source."), - repl_util:read_from_cluster(AFirst, Start, End, ?TEST_BUCKET, 0, ?N), - - %% Wait until the sink cluster is in a steady state before - %% starting fullsync - rt:wait_until_nodes_ready(BNodes), - rt:wait_until_no_pending_changes(BNodes), - rt:wait_until_registered(BFirst, riak_repl2_fs_node_reserver), - - repl_util:validate_completed_fullsync( - LeaderA, BFirst, "B", Start, End, ?TEST_BUCKET), - - lager:info("Verify we can read the keys on the sink."), - repl_util:read_from_cluster(BFirst, Start, End, ?TEST_BUCKET, 0, ?N), - - %% Verify if we downgrade sink, after replication has complete, we - %% can still read the objects. - %% - case {Realtime, BVersion} of - {false, v1} -> - lager:info("Running kv_reformat to downgrade to v0 on ~p", - [BFirst]), - {_, _, Error} = rpc:call(BFirst, - riak_kv_reformat, - run, - [v0, [{kill_handoffs, false}]]), - ?assertEqual(0, Error), - - lager:info("Waiting for all nodes to see the v0 capability."), - [rt:wait_until_capability(N, {riak_kv, object_format}, v0, v0) - || N <- BNodes], - - lager:info("Downgrading node ~p to previous.", - [BFirst]), - rt:upgrade(BFirst, previous), - - lager:info("Waiting for riak_kv to start on node ~p.", - [BFirst]), - rt:wait_for_service(BFirst, riak_kv), - - lager:info("Verify we can read from node ~p after downgrade.", - [BFirst]), - repl_util:read_from_cluster( - BFirst, Start, End, ?TEST_BUCKET, 0, ?N), - ok; - _ -> - ok - end, - - rt:clean_cluster(lists:flatten(Nodes)). - -%% @doc Configure two clusters and set up replication between them, -%% return the node list of each cluster. -configure_clusters(AVersion, BVersion, Realtime) -> - rt:set_advanced_conf(all, ?CONF(infinity)), - - Nodes = [ANodes, BNodes] = rt:build_clusters([3, 3]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Updating app config to force ~p on source cluster.", - [AVersion]), - [rt:update_app_config(N, [{riak_kv, - [{object_format, AVersion}]}]) - || N <- ANodes], - - lager:info("Updating app config to force ~p on sink cluster.", - [BVersion]), - [rt:update_app_config(N, [{riak_kv, - [{object_format, BVersion}]}]) - || N <- BNodes], - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - lager:info("Naming clusters."), - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - lager:info("Waiting for convergence."), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - lager:info("Waiting for transfers to complete."), - rt:wait_until_transfers_complete(ANodes), - rt:wait_until_transfers_complete(BNodes), - - lager:info("Get leaders."), - LeaderA = repl_util:get_leader(AFirst), - - lager:info("Connecting cluster A to B"), - {ok, {BIP, BPort}} = rpc:call(BFirst, application, get_env, [riak_core, cluster_mgr]), - - repl_util:connect_cluster(LeaderA, BIP, BPort), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - lager:info("Enabling fullsync from A to B"), - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - case Realtime of - true -> - lager:info("Enabling realtime from A to B"), - repl_util:enable_realtime(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes); - _ -> - ok - end, - - lager:info("Wait for capability on source cluster."), - [rt:wait_until_capability(N, {riak_kv, object_format}, AVersion, v0) - || N <- ANodes], - - lager:info("Wait for capability on sink cluster."), - [rt:wait_until_capability(N, {riak_kv, object_format}, BVersion, v0) - || N <- BNodes], - - lager:info("Ensuring connection from cluster A to B"), - repl_util:connect_cluster_by_name(LeaderA, BPort, "B"), - - Nodes. diff --git a/tests/replication_ssl.erl b/tests/replication_ssl.erl deleted file mode 100644 index b04fa3eaa..000000000 --- a/tests/replication_ssl.erl +++ /dev/null @@ -1,263 +0,0 @@ --module(replication_ssl). --behavior(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - %% test requires allow_mult=false - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - - NumNodes = rt_config:get(num_nodes, 6), - ClusterASize = rt_config:get(cluster_a_size, 3), - - CertDir = rt_config:get(rt_scratch_dir) ++ "/certs", - - %% make a bunch of crypto keys - make_certs:rootCA(CertDir, "rootCA"), - make_certs:intermediateCA(CertDir, "intCA", "rootCA"), - make_certs:endusers(CertDir, "rootCA", ["site3.basho.com", "site4.basho.com"]), - make_certs:endusers(CertDir, "intCA", ["site1.basho.com", "site2.basho.com"]), - - lager:info("Deploy ~p nodes", [NumNodes]), - BaseConf = [ - {riak_repl, - [ - {ssl_enabled, false}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled} - ]} - ], - - PrivDir = rt:priv_dir(), - - lager:info("priv dir: ~p -> ~p", [code:priv_dir(riak_test), PrivDir]), - - SSLConfig1 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {certfile, filename:join([CertDir, - "site1.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site1.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site1.basho.com"])} - ]} - ], - - SSLConfig2 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {certfile, filename:join([CertDir, - "site2.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site2.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site2.basho.com"])} - ]} - ], - - SSLConfig3 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {certfile, filename:join([CertDir, - "site3.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site3.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site3.basho.com"])} - ]} - ], - - %% same as above,with a depth of 0 - SSLConfig3A = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {ssl_depth, 0}, - {certfile, filename:join([CertDir, - "site3.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site3.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site3.basho.com"])} - ]} - ], - - SSLConfig4 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {ssl_depth, 0}, - {certfile, filename:join([CertDir, - "site4.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site4.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site4.basho.com"])} - ]} - ], - - SSLConfig5 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {ssl_depth, 1}, - {peer_common_name_acl, ["*.basho.com"]}, - {certfile, filename:join([CertDir, - "site1.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site1.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site1.basho.com/cacerts.pem"])} - ]} - ], - - SSLConfig6 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {ssl_depth, 1}, - {peer_common_name_acl, ["site1.basho.com"]}, - {certfile, filename:join([CertDir, - "site2.basho.com/cert.pem"])}, - {keyfile, filename:join([CertDir, - "site2.basho.com/key.pem"])}, - {cacertdir, filename:join([CertDir, - "site2.basho.com/cacerts.pem"])} - ]} - ], - - SSLConfig7 = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {ssl_enabled, true}, - {peer_common_name_acl, ["ca.cataclysm-software.net"]}, - {certfile, filename:join([PrivDir, - "certs/cacert.org/ny-cert-old.pem"])}, - {keyfile, filename:join([PrivDir, - "certs/cacert.org/ny-key.pem"])}, - {cacertdir, filename:join([PrivDir, - "certs/cacert.org/ca"])} - ]} - ], - - lager:info("===testing basic connectivity"), - - [Node1, Node2] = rt:deploy_nodes(2, BaseConf, [riak_kv, riak_repl]), - - Listeners = replication:add_listeners([Node1]), - replication:verify_listeners(Listeners), - - {Ip, Port, _} = hd(Listeners), - replication:add_site(Node2, {Ip, Port, "site1"}), - - replication:wait_for_site_ips(Node2, "site1", Listeners), - - rt:log_to_nodes([Node1, Node2], "Basic connectivity test"), - ?assertEqual(ok, replication:wait_until_connection(Node1)), - - lager:info("===testing you can't connect to a server with a cert with the same common name"), - rt:log_to_nodes([Node1, Node2], "Testing identical cert is disallowed"), - ?assertMatch({fail, _}, test_connection({Node1, merge_config(SSLConfig1, BaseConf)}, - {Node2, merge_config(SSLConfig1, BaseConf)})), - - lager:info("===testing you can't connect when peer doesn't support SSL"), - rt:log_to_nodes([Node1, Node2], "Testing missing ssl on peer fails"), - ?assertMatch({fail, _}, test_connection({Node1, merge_config(SSLConfig1, BaseConf)}, - {Node2, BaseConf})), - - lager:info("===testing you can't connect when local doesn't support SSL"), - rt:log_to_nodes([Node1, Node2], "Testing missing ssl locally fails"), - ?assertMatch({fail, _}, test_connection({Node1, BaseConf}, - {Node2, merge_config(SSLConfig2, BaseConf)})), - - lager:info("===testing simple SSL connectivity"), - rt:log_to_nodes([Node1, Node2], "Basic SSL test"), - ?assertEqual(ok, test_connection({Node1, merge_config(SSLConfig1, BaseConf)}, - {Node2, merge_config(SSLConfig2, BaseConf)})), - - lager:info("===testing SSL connectivity with an intermediate CA"), - rt:log_to_nodes([Node1, Node2], "Intermediate CA test"), - ?assertEqual(ok, test_connection({Node1, merge_config(SSLConfig1, BaseConf)}, - {Node2, merge_config(SSLConfig3, BaseConf)})), - - lager:info("===testing disallowing intermediate CAs works"), - rt:log_to_nodes([Node1, Node2], "Disallowing intermediate CA test"), - ?assertEqual(ok, test_connection({Node1, merge_config(SSLConfig3A, BaseConf)}, - {Node2, merge_config(SSLConfig4, BaseConf)})), - - lager:info("===testing disallowing intermediate CAs disallows connections"), - rt:log_to_nodes([Node1, Node2], "Disallowing intermediate CA test 2"), - ?assertMatch({fail, _}, test_connection({Node1, merge_config(SSLConfig3A, BaseConf)}, - {Node2, merge_config(SSLConfig1, BaseConf)})), - - lager:info("===testing wildcard and strict ACLs"), - rt:log_to_nodes([Node1, Node2], "wildcard and strict ACL test"), - ?assertEqual(ok, test_connection({Node1, merge_config(SSLConfig5, BaseConf)}, - {Node2, merge_config(SSLConfig6, BaseConf)})), - - lager:info("===testing expired certificates fail"), - rt:log_to_nodes([Node1, Node2], "expired certificates test"), - ?assertMatch({fail, _}, test_connection({Node1, merge_config(SSLConfig5, BaseConf)}, - {Node2, merge_config(SSLConfig7, BaseConf)})), - - lager:info("Connectivity tests passed"), - - lager:info("Re-deploying 6 nodes"), - - Nodes = rt:deploy_nodes(6, BaseConf, [riak_kv, riak_repl]), - - [rt:wait_until_pingable(N) || N <- Nodes], - - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - - lager:info("Reconfiguring nodes with SSL options"), - [rt:update_app_config(N, merge_config(SSLConfig5, BaseConf)) || N <- - ANodes], - - [rt:update_app_config(N, merge_config(SSLConfig6, BaseConf)) || N <- - BNodes], - - [rt:wait_until_pingable(N) || N <- Nodes], - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - replication:replication(ANodes, BNodes, false), - - pass. - -merge_config(Mixin, Base) -> - lists:ukeymerge(1, lists:keysort(1, Mixin), lists:keysort(1, Base)). - -test_connection({Node1, Config1}, {Node2, Config2}) -> - rt:update_app_config(Node1, Config1), - rt:wait_until_pingable(Node1), - rt:update_app_config(Node2, Config2), - rt:wait_until_pingable(Node2), - rt:wait_for_service(Node1, [riak_kv, riak_repl]), - rt:wait_for_service(Node2, [riak_kv, riak_repl]), - replication:wait_until_connection(Node1). diff --git a/tests/replication_stats.erl b/tests/replication_stats.erl deleted file mode 100644 index cce909651..000000000 --- a/tests/replication_stats.erl +++ /dev/null @@ -1,108 +0,0 @@ --module(replication_stats). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(FULL_NUM_KEYS, 5000). --define(TEST_BUCKET, <<"repl_bench">>). - --define(HARNESS, (rt_config:get(rt_harness))). - --define(CONF, [ - {riak_core, - [ - {ring_creation_size, 8}, - {default_bucket_props, [{n_val, 1}, {allow_mult, false}]} - ] - }, - {riak_kv, - [ - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100} - ] - }, - {riak_repl, - [ - {fullsync_strategy, keylist}, - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {max_fssource_retries, infinity}, - {max_fssource_cluster, 1}, - {max_fssource_node, 1}, - {max_fssink_node, 1} - ]} - ]). - -confirm() -> - fullsync_enabled_and_started(). - -fullsync_enabled_and_started() -> - rt:set_advanced_conf(all, ?CONF), - - [ANodes, BNodes] = rt:build_clusters([3, 3]), - - rt:wait_for_cluster_service(ANodes, riak_repl), - rt:wait_for_cluster_service(BNodes, riak_repl), - - AFirst = hd(ANodes), - BFirst = hd(BNodes), - - repl_util:name_cluster(AFirst, "A"), - repl_util:name_cluster(BFirst, "B"), - - rt:wait_until_ring_converged(ANodes), - rt:wait_until_ring_converged(BNodes), - - ?assertEqual(ok, repl_util:wait_until_leader_converge(ANodes)), - ?assertEqual(ok, repl_util:wait_until_leader_converge(BNodes)), - - LeaderA = rpc:call(AFirst, - riak_core_cluster_mgr, get_leader, []), - - {ok, {IP, Port}} = rpc:call(BFirst, - application, get_env, [riak_core, cluster_mgr]), - - repl_util:connect_cluster(LeaderA, IP, Port), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - repl_util:enable_fullsync(LeaderA, "B"), - rt:wait_until_ring_converged(ANodes), - - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - - %% Write keys and perform fullsync. - repl_util:write_to_cluster(AFirst, 0, ?FULL_NUM_KEYS, ?TEST_BUCKET), - - Me = self(), - - spawn(fun() -> - {FullTime, _} = timer:tc(repl_util, - start_and_wait_until_fullsync_complete, - [LeaderA, undefined, Me]), - lager:info("Fullsync completed in ~p", [FullTime]) - end), - - Result = receive - fullsync_started -> - lager:info("Fullsync started!"), - - case rpc:call(LeaderA, riak_repl_console, fs_remotes_status, - []) of - {badrpc, _} -> - fail; - Stats -> - ?assertEqual(Stats, - [{fullsync_enabled, "B"}, - {fullsync_running, "B"}]), - pass - end - after 60000 -> - fail - end, - - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), - - Result. diff --git a/tests/replication_upgrade.erl b/tests/replication_upgrade.erl deleted file mode 100644 index 86d82adb9..000000000 --- a/tests/replication_upgrade.erl +++ /dev/null @@ -1,96 +0,0 @@ --module(replication_upgrade). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - FromVersion = proplists:get_value(upgrade_version, TestMetaData, previous), - - lager:info("Doing rolling replication upgrade test from ~p to ~p", - [FromVersion, "current"]), - - NumNodes = rt_config:get(num_nodes, 6), - - UpgradeOrder = rt_config:get(repl_upgrade_order, "forwards"), - - lager:info("Deploy ~p nodes", [NumNodes]), - Conf = [ - {riak_repl, - [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled} - ]} - ], - - NodeConfig = [{FromVersion, Conf} || _ <- lists:seq(1, NumNodes)], - - Nodes = rt:deploy_nodes(NodeConfig, [riak_kv, riak_repl]), - - NodeUpgrades = case UpgradeOrder of - "forwards" -> - Nodes; - "backwards" -> - lists:reverse(Nodes); - "alternate" -> - %% eg 1, 4, 2, 5, 3, 6 - lists:flatten(lists:foldl(fun(E, [A,B,C]) -> [B, C, A ++ [E]] end, - [[],[],[]], Nodes)); - "random" -> - %% halfass randomization - lists:sort(fun(_, _) -> random:uniform(100) < 50 end, Nodes); - Other -> - lager:error("Invalid upgrade ordering ~p", [Other]), - erlang:exit() - end, - - ClusterASize = rt_config:get(cluster_a_size, 3), - {ANodes, BNodes} = lists:split(ClusterASize, Nodes), - lager:info("ANodes: ~p", [ANodes]), - lager:info("BNodes: ~p", [BNodes]), - - lager:info("Build cluster A"), - repl_util:make_cluster(ANodes), - - lager:info("Build cluster B"), - repl_util:make_cluster(BNodes), - - lager:info("Replication First pass...homogenous cluster"), - - %% initial replication run, homogeneous cluster - replication:replication(ANodes, BNodes, false), - - lager:info("Upgrading nodes in order: ~p", [NodeUpgrades]), - rt:log_to_nodes(Nodes, "Upgrading nodes in order: ~p", [NodeUpgrades]), - %% upgrade the nodes, one at a time - ok = lists:foreach(fun(Node) -> - lager:info("Upgrade node: ~p", [Node]), - rt:log_to_nodes(Nodes, "Upgrade node: ~p", [Node]), - rtdev:upgrade(Node, current), - rt:wait_until_pingable(Node), - rt:wait_for_service(Node, [riak_kv, riak_pipe, riak_repl]), - [rt:wait_until_ring_converged(N) || N <- [ANodes, BNodes]], - %% Prior to 1.4.8 riak_repl registered - %% as a service before completing all - %% initialization including establishing - %% realtime connections. - %% - %% @TODO Ideally the test would only wait - %% for the connection in the case of the - %% node version being < 1.4.8, but currently - %% the rt API does not provide a - %% harness-agnostic method do get the node - %% version. For now the test waits for all - %% source cluster nodes to establish a - %% connection before proceeding. - case lists:member(Node, ANodes) of - true -> - replication:wait_until_connection(Node); - false -> - ok - end, - lager:info("Replication with upgraded node: ~p", [Node]), - rt:log_to_nodes(Nodes, "Replication with upgraded node: ~p", [Node]), - replication:replication(ANodes, BNodes, true) - end, NodeUpgrades), - pass. diff --git a/tests/riak667_mixed.erl b/tests/riak667_mixed.erl deleted file mode 100644 index ca6e57987..000000000 --- a/tests/riak667_mixed.erl +++ /dev/null @@ -1,334 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak667_mixed). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(HARNESS, (rt_config:get(rt_harness))). --define(TYPE, <<"maps">>). --define(KEY, <<"cmeiklejohn">>). --define(KEY2, <<"cmeik">>). --define(BUCKET, {?TYPE, <<"testbucket">>}). - --define(CONF, [ - {riak_core, - [{ring_creation_size, 8}] - }, - {riak_kv, - [{mdc_crdt_epoch, 1}]} - ]). - -confirm() -> - rt:set_advanced_conf(all, ?CONF), - - %% Configure cluster. - TestMetaData = riak_test_runner:metadata(), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, "2.0.2"), - Nodes = [Node1, Node2] = rt:build_cluster([OldVsn, OldVsn]), - - CurrentVer = rt:get_version(), - - lager:info("mdc_crdt_epoch? ~p", [rpc:multicall(Nodes, application, get_env, [riak_kv, mdc_crdt_epoch])]), - - %% Create PB connection. - Pid = rt:pbc(Node1), - riakc_pb_socket:set_options(Pid, [queue_if_disconnected]), - - %% Create bucket type for maps. - rt:create_and_activate_bucket_type(Node1, ?TYPE, [{datatype, map}]), - - %% Write some sample data. - Map = riakc_map:update( - {<<"names">>, set}, - fun(R) -> - riakc_set:add_element(<<"Original">>, R) - end, riakc_map:new()), - Map2 = riakc_map:update({<<"profile">>, map}, - fun(M) -> - riakc_map:update( - {<<"name">>, register}, - fun(R) -> - riakc_register:set(<<"Bob">>, R) - end, M) - end, Map), - - ok = riakc_pb_socket:update_type( - Pid, - ?BUCKET, - ?KEY, - riakc_map:to_op(Map2)), - - %% Upgrade one node. - upgrade(Node2, "2.0.4"), - - lager:notice("running mixed 2.0.2 and 2.0.4"), - - %% Create PB connection. - Pid2 = rt:pbc(Node2), - riakc_pb_socket:set_options(Pid2, [queue_if_disconnected, auto_reconnect]), - - %% Read value. - ?assertMatch({error, <<"Error processing incoming message: error:{badrecord,dict}", _/binary>>}, - riakc_pb_socket:fetch_type(Pid2, ?BUCKET, ?KEY)), - - lager:notice("Can't read a 2.0.2 map from 2.0.4 node"), - - %% Write some 2.0.4 data. - Oh4Map = riakc_map:update( - {<<"names">>, set}, - fun(R) -> - riakc_set:add_element(<<"Original">>, R) - end, riakc_map:new()), - Oh4Map2 = riakc_map:update({<<"profile">>, map}, - fun(M) -> - riakc_map:update( - {"name", register}, - fun(R) -> - riakc_register:set(<<"Bob">>, R) - end, M) - end, Oh4Map), - - ok = riakc_pb_socket:update_type( - Pid2, - ?BUCKET, - ?KEY2, - riakc_map:to_op(Oh4Map2)), - - lager:notice("Created a 2.0.4 map"), - - %% and read 2.0.4 data?? Nope, dict is not an orddict - ?assertMatch({error,<<"Error processing incoming message: error:function_clause:[{orddict,fold", _/binary>>}, - riakc_pb_socket:fetch_type(Pid, ?BUCKET, ?KEY2)), - - lager:notice("Can't read 2.0.4 map from 2.0.2 node"), - - %% upgrade 2.0.4 to 2.0.5 - riakc_pb_socket:stop(Pid2), - upgrade(Node2, current), - - lager:notice("running mixed 2.0.2 and ~s", [CurrentVer]), - - %% Create PB connection. - Pid3 = rt:pbc(Node2), - riakc_pb_socket:set_options(Pid3, [queue_if_disconnected]), - - %% read both maps - {ok, K1O} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY), - {ok, K2O} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY2), - - lager:notice("2.0.2 map ~p", [K1O]), - lager:notice("2.0.4 map ~p", [K2O]), - - %% update 2.0.2 map on new node ?KEY Pid3 - K1OU = riakc_map:update({<<"profile">>, map}, - fun(M) -> - riakc_map:update( - {<<"name">>, register}, - fun(R) -> - riakc_register:set(<<"Rita">>, R) - end, M) - end, K1O), - - ok = riakc_pb_socket:update_type(Pid3, ?BUCKET, ?KEY, riakc_map:to_op(K1OU)), - lager:notice("Updated 2.0.2 map on ~s", [CurrentVer]), - - %% read 2.0.2 map from 2.0.2 node ?KEY Pid - {ok, K1OR} = riakc_pb_socket:fetch_type(Pid, ?BUCKET, ?KEY), - lager:notice("Read 2.0.2 map from 2.0.2 node: ~p", [K1OR]), - - ?assertEqual(<<"Rita">>, orddict:fetch({<<"name">>, register}, - riakc_map:fetch({<<"profile">>, map}, K1OR))), - - %% update 2.0.2 map on old node ?KEY Pid - K1O2 = riakc_map:update({<<"profile">>, map}, - fun(M) -> - riakc_map:update( - {<<"name">>, register}, - fun(R) -> - riakc_register:set(<<"Sue">>, R) - end, M) - end, K1OR), - - ok = riakc_pb_socket:update_type(Pid, ?BUCKET, ?KEY, riakc_map:to_op(K1O2)), - lager:notice("Updated 2.0.2 map on 2.0.2 node"), - - %% read it from 2.0.5 node ?KEY Pid3 - {ok, K1OC} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY), - lager:notice("Read 2.0.2 map from ~s node: ~p", [CurrentVer, K1OC]), - - ?assertEqual(<<"Sue">>, orddict:fetch({<<"name">>, register}, - riakc_map:fetch({<<"profile">>, map}, K1OC))), - - %% update 2.0.4 map node ?KEY2 Pid3 - K2OU = riakc_map:update({<<"people">>, set}, - fun(S) -> - riakc_set:add_element(<<"Joan">>, S) - end, riakc_map:new()), - - ok = riakc_pb_socket:update_type(Pid3, ?BUCKET, ?KEY2, riakc_map:to_op(K2OU)), - lager:notice("Updated 2.0.4 map on ~s node", [CurrentVer]), - %% upgrade 2.0.2 node - - riakc_pb_socket:stop(Pid), - upgrade(Node1, current), - lager:notice("Upgraded 2.0.2 node to ~s", [CurrentVer]), - - %% read and write maps - Pid4 = rt:pbc(Node1), - - {ok, K1N1} = riakc_pb_socket:fetch_type(Pid4, ?BUCKET, ?KEY), - {ok, K1N2} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY), - ?assertEqual(K1N1, K1N2), - - {ok, K2N1} = riakc_pb_socket:fetch_type(Pid4, ?BUCKET, ?KEY2), - {ok, K2N2} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY2), - ?assertEqual(K2N1, K2N2), - lager:notice("Maps fetched from both nodes are same K1:~p K2:~p", [K1N1, K2N1]), - - K1M = riakc_map:update({<<"people">>, set}, - fun(S) -> riakc_set:add_element(<<"Roger">>, S) end, - K1N1), - ok = riakc_pb_socket:update_type(Pid3, ?BUCKET, ?KEY, riakc_map:to_op(K1M)), - - K2M = riakc_map:update({<<"people">>, set}, - fun(S) -> riakc_set:add_element(<<"Don">>, S) end, - K2N1), - ok = riakc_pb_socket:update_type(Pid4, ?BUCKET, ?KEY2, riakc_map:to_op(K2M)), - %% (how???) check format is still v1 (maybe get raw kv object and inspect contents using riak_kv_crdt?? - - {ok, Robj1} = riakc_pb_socket:get(Pid3, ?BUCKET, ?KEY), - ?assert(map_contents_are_lists(Robj1)), - - {ok, Robj2} = riakc_pb_socket:get(Pid4, ?BUCKET, ?KEY2), - - lager:info("mdc_crdt_epoch? ~p", [rpc:multicall(Nodes, application, get_env, [riak_kv, mdc_crdt_epoch])]), - - ?assert(map_contents_are_lists(Robj2)), - %% unset env var - rpc:multicall(Nodes, application, set_env, [riak_kv, mdc_crdt_epoch, false]), - - lager:info("mdc_crdt_epoch? ~p", [rpc:multicall(Nodes, application, get_env, [riak_kv, mdc_crdt_epoch])]), - - %% read and write maps - {ok, Up1N1} = riakc_pb_socket:fetch_type(Pid4, ?BUCKET, ?KEY), - {ok, Up1N2} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY), - ?assertEqual(Up1N1, Up1N2), - - {ok, Up2N1} = riakc_pb_socket:fetch_type(Pid4, ?BUCKET, ?KEY2), - {ok, Up2N2} = riakc_pb_socket:fetch_type(Pid3, ?BUCKET, ?KEY2), - ?assertEqual(Up2N1, Up2N2), - lager:notice("Maps fetched from both nodes are same K1:~p K2:~p", [Up1N1, Up2N1]), - - Up1M = riakc_map:update({<<"people">>, set}, - fun(S) -> riakc_set:add_element(<<"Betty">>, S) end, - Up1N1), - ok = riakc_pb_socket:update_type(Pid3, ?BUCKET, ?KEY, riakc_map:to_op(Up1M)), - - Up2M = riakc_map:update({<<"people">>, set}, - fun(S) -> riakc_set:add_element(<<"Burt">>, S) end, - Up2N1), - ok = riakc_pb_socket:update_type(Pid4, ?BUCKET, ?KEY2, riakc_map:to_op(Up2M)), - - %% (how??? see above?) check ondisk format is now v2 - {ok, UpObj1} = riakc_pb_socket:get(Pid3, ?BUCKET, ?KEY), - ?assert(map_contents_are_dicts(UpObj1)), - - {ok, UpObj2} = riakc_pb_socket:get(Pid4, ?BUCKET, ?KEY2), - ?assert(map_contents_are_dicts(UpObj2)), - - %% Stop PB connection. - riakc_pb_socket:stop(Pid3), - riakc_pb_socket:stop(Pid4), - - pass. - -upgrade(Node, NewVsn) -> - lager:notice("Upgrading ~p to ~p", [Node, NewVsn]), - rt:upgrade(Node, NewVsn), - rt:wait_for_service(Node, riak_kv), - ok. - -map_contents_are_lists(RObj) -> - [{_MD, V}] = riakc_obj:get_contents(RObj), - {riak_dt_map, {_Clock, Entries, Deferred}} = map_from_binary(V), - lager:info("Top-level map: ~p || ~p", [Entries, Deferred]), - is_list(Entries) andalso is_list(Deferred) andalso nested_are_lists(Entries). - -nested_are_lists(Entries) -> - %% This is ugly because it reaches into the guts of the data - %% structure's internal format. - lists:all(fun({{_, riak_dt_orswot}, {CRDTs, Tombstone}}) -> - lists:all(fun({_Dot, Set}) -> set_is_list(Set) end, CRDTs) - andalso set_is_list(Tombstone); - ({{_, riak_dt_map}, {CRDTs, Tombstone}}) -> - lists:all(fun({_Dot, Map}) -> map_is_list(Map) end, CRDTs) - andalso map_is_list(Tombstone); - (_) -> - true - end, Entries). - -set_is_list({_Clock, Entries, Deferred}) -> - is_list(Entries) andalso is_list(Deferred). - -map_is_list({_Clock, Entries, Deferred}) -> - is_list(Deferred) andalso nested_are_lists(Entries). - -map_contents_are_dicts(RObj) -> - [{_MD, V}] = riakc_obj:get_contents(RObj), - {riak_dt_map, {_Clock, Entries, Deferred}} = map_from_binary(V), - lager:info("Top-level map: ~p || ~p", [Entries, Deferred]), - is_dict(Entries) andalso is_dict(Deferred) andalso nested_are_dicts(Entries). - -is_dict(V) -> - is_tuple(V) andalso dict == element(1, V). - -nested_are_dicts(Entries) -> - %% This is ugly because it reaches into the guts of the data - %% structure's internal format. - lists:all(fun({{_, riak_dt_orswot}, {CRDTs, Tombstone}}) -> - is_dict(CRDTs) andalso - set_is_dict(Tombstone) andalso - dict:fold(fun(_K, Set, Acc) -> - set_is_dict(Set) andalso Acc - end, true, CRDTs); - ({{_, riak_dt_map}, {CRDTs, Tombstone}}) -> - is_dict(CRDTs) andalso map_is_dict(Tombstone) andalso - dict:fold(fun(_K, Map, Acc) -> - map_is_dict(Map) andalso Acc - end, true, CRDTs); - (_) -> - true - end, dict:to_list(Entries)). - -set_is_dict({_Clock, Entries, Deferred}) -> - is_dict(Entries) andalso is_dict(Deferred). - -map_is_dict({_Clock, Entries, Deferred}) -> - is_dict(Deferred) andalso nested_are_dicts(Entries). - -%% Somewhat copy-pasta from riak_kv_crdt -%% NB:?TAG is 69 in riak_kv_crdt, version is 2 -%% ?TAG is 77 in riak_dt_types.hrl, version is 1 or 2 -map_from_binary(<<69:8, 2:8, TypeLen:32/integer, Type:TypeLen/binary, 77:8, MapVer:8, - CRDTBin/binary>>) -> - lager:notice("Deserialized Map: ~s v~p", [Type, MapVer]), - Mod = binary_to_atom(Type, latin1), - {Mod, riak_dt:from_binary(CRDTBin)}. diff --git a/tests/riak667_safe.erl b/tests/riak667_safe.erl deleted file mode 100644 index 5c2bef926..000000000 --- a/tests/riak667_safe.erl +++ /dev/null @@ -1,102 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak667_safe). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(HARNESS, (rt_config:get(rt_harness))). --define(INDEX, <<"maps">>). --define(TYPE, <<"maps">>). --define(KEY, <<"cmeiklejohn">>). --define(BUCKET, {?TYPE, <<"testbucket">>}). --define(NAME_REGISTER_VALUE, <<"Christopher Meiklejohn">>). - --define(CONF, [ - {riak_core, - [{ring_creation_size, 8}] - }]). - -confirm() -> - rt:set_advanced_conf(all, ?CONF), - - %% Configure cluster. - TestMetaData = riak_test_runner:metadata(), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, "2.0.2"), - Nodes = [Node|_] = rt:build_cluster([OldVsn]), - - %% Create PB connection. - Pid = rt:pbc(Node), - riakc_pb_socket:set_options(Pid, [queue_if_disconnected]), - - %% Create bucket type for maps. - rt:create_and_activate_bucket_type(Node, ?TYPE, [{datatype, map}]), - - lager:info("Write map on 2.0.2"), - %% Write some sample data. - Map = riakc_map:update( - {<<"name">>, register}, - fun(R) -> - riakc_register:set(<<"Original">>, R) - end, - riakc_map:new()), - ok = riakc_pb_socket:update_type( - Pid, - ?BUCKET, - ?KEY, - riakc_map:to_op(Map)), - - %% Stop PB connection. - riakc_pb_socket:stop(Pid), - - lager:info("Upgrade to current"), - %% Upgrade all nodes. - [upgrade(N, current) || N <- Nodes], - - lager:info("Update map on upgraded cluster"), - %% Create PB connection. - Pid2 = rt:pbc(Node), - riakc_pb_socket:set_options(Pid2, [queue_if_disconnected]), - - %% Read value. - {ok, O} = riakc_pb_socket:fetch_type(Pid2, ?BUCKET, ?KEY), - - %% Write some sample data. - Map2 = riakc_map:update( - {<<"name">>, register}, - fun(R) -> - riakc_register:set(<<"Updated">>, R) - end, O), - ok = riakc_pb_socket:update_type( - Pid2, - ?BUCKET, - ?KEY, - riakc_map:to_op(Map2)), - - %% Stop PB connection. - riakc_pb_socket:stop(Pid2), - - pass. - -upgrade(Node, NewVsn) -> - lager:info("Upgrading ~p to ~p", [Node, NewVsn]), - rt:upgrade(Node, NewVsn), - rt:wait_for_service(Node, riak_kv), - ok. diff --git a/tests/riak_admin_console_tests.erl b/tests/riak_admin_console_tests.erl deleted file mode 100644 index 8084b8424..000000000 --- a/tests/riak_admin_console_tests.erl +++ /dev/null @@ -1,246 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riak_admin_console_tests). --include_lib("eunit/include/eunit.hrl"). - --export([confirm/0]). - -%% This test passes params to the riak-admin shell script on to intercepts -%% that either return ?PASS or ?FAIL (which print out "pass" or "fail" to -%% the console). If an unexpected input is received in Erlang, ?FAIL is -%% returned. This test should (will?) make sure we don't implement -%% any unportable shell code. For example, `riak-repl cascades foo` -%% didn't work on Ubuntu due to an invalid call to shift. Since this test -%% will be run on giddyup and hence many platforms, we should be able -%% to catch these types of bugs earlier. -%% See also: replication2_console_tests.erl for a more detailed -%% description. - -%% UNTESTED, as they don't use rpc, or have a non-trivial impl -%% test -%% diag -%% top -%% wait-for-services -%% js-reload -%% reip - -%% riak-admin cluster -cluster_tests(Node) -> - check_admin_cmd(Node, "cluster join dev99@127.0.0.1"), - check_admin_cmd(Node, "cluster leave"), - check_admin_cmd(Node, "cluster leave dev99@127.0.0.1"), - check_admin_cmd(Node, "cluster force-remove dev99@127.0.0.1"), - check_admin_cmd(Node, "cluster replace dev98@127.0.0.1 dev99@127.0.0.1"), - check_admin_cmd(Node, "cluster force-replace dev98@127.0.0.1 dev99@127.0.0.1"), - check_admin_cmd(Node, "cluster resize-ring 42"), - check_admin_cmd(Node, "cluster resize-ring abort"), - check_admin_cmd(Node, "cluster plan"), - check_admin_cmd(Node, "cluster commit"), - check_admin_cmd(Node, "cluster clear"). - -%% riak-admin bucket_type -bucket_tests(Node) -> - check_admin_cmd(Node, "bucket-type status foo"), - check_admin_cmd(Node, "bucket-type activate foo"), - check_admin_cmd(Node, "bucket-type create foo {\"props\":{[]}}"), - check_admin_cmd(Node, "bucket-type update foo {\"props\":{[]}}"), - check_admin_cmd(Node, "bucket-type list"). - - -%% riak-admin security -security_tests(Node) -> - check_admin_cmd_2x(Node, "security add-user foo"), - check_admin_cmd_2x(Node, "security add-user foo x1=y1 x2=y2"), - check_admin_cmd_2x(Node, "security add-group group"), - check_admin_cmd_2x(Node, "security add-group group x1=y1 x2=y2"), - check_admin_cmd_2x(Node, "security alter-user foo x1=y1"), - check_admin_cmd_2x(Node, "security alter-user foo x1=y1 x2=y2"), - check_admin_cmd_2x(Node, "security alter-group group x1=y1 x2=y2"), - check_admin_cmd(Node, "security del-user foo"), - check_admin_cmd(Node, "security del-group group"), - check_admin_cmd(Node, "security add-source all 192.168.100.0/22 y"), - check_admin_cmd(Node, "security add-source all 192.168.100.0/22 x x1=y1"), - check_admin_cmd(Node, "security add-source foo,bar 192.168.100.0/22 x x1=y1"), - check_admin_cmd(Node, "security add-source foo,bar,baz 192.168.100.0/22 x x1=y1 x2=y2"), - check_admin_cmd(Node, "security del-source all 192.168.100.0/22"), - check_admin_cmd(Node, "security del-source x 192.168.100.0/22"), - check_admin_cmd(Node, "security del-source x,y,z 192.168.100.0/22"), - check_admin_cmd(Node, "security grant foo on any my_bucket to x"), - check_admin_cmd(Node, "security grant foo,bar on any my_bucket to x"), - check_admin_cmd(Node, "security grant foo on any my_bucket to x,y,z"), - check_admin_cmd(Node, "security grant foo,bar,baz on any my_bucket to y"), - check_admin_cmd(Node, "security grant foo,bar,baz on foo my_bucket to y"), - check_admin_cmd(Node, "security revoke foo on any my_bucket from x"), - check_admin_cmd(Node, "security revoke foo,bar on any my_bucket from x"), - check_admin_cmd(Node, "security revoke foo on any my_bucket from x,y,z"), - check_admin_cmd(Node, "security revoke foo,bar,baz on any my_bucket from y"), - check_admin_cmd(Node, "security revoke foo,bar,baz on foo my_bucket from y"), - check_admin_cmd(Node, "security print-users"), - check_admin_cmd(Node, "security print-sources"), - check_admin_cmd_2x(Node, "security enable"), - check_admin_cmd_2x(Node, "security disable"), - check_admin_cmd(Node, "security status"), - check_admin_cmd(Node, "security print-user foo"), - check_admin_cmd(Node, "security print-group group"), - check_admin_cmd(Node, "security print-grants foo"), - check_admin_cmd(Node, "security ciphers foo"). - -%% "top level" riak-admin COMMANDS -riak_admin_tests(Node) -> - check_admin_cmd(Node, "join -f dev99@127.0.0.1"), - check_admin_cmd(Node, "leave -f"), - check_admin_cmd(Node, "force-remove -f dev99@127.0.0.1"), - check_admin_cmd(Node, "force_remove -f dev99@127.0.0.1"), - check_admin_cmd(Node, "down dev98@127.0.0.1"), - check_admin_cmd(Node, "status"), - check_admin_cmd(Node, "vnode-status"), - check_admin_cmd(Node, "vnode_status"), - check_admin_cmd(Node, "ringready"), - check_admin_cmd(Node, "transfers"), - check_admin_cmd(Node, "member-status"), - check_admin_cmd(Node, "member_status"), - check_admin_cmd(Node, "ring-status"), - check_admin_cmd(Node, "ring_status"), - check_admin_cmd(Node, "aae-status"), - check_admin_cmd(Node, "aae_status"), - check_admin_cmd(Node, "repair_2i status"), - check_admin_cmd(Node, "repair_2i kill"), - check_admin_cmd(Node, "repair_2i --speed 5 foo bar baz"), - check_admin_cmd(Node, "repair-2i status"), - check_admin_cmd(Node, "repair-2i kill"), - check_admin_cmd(Node, "repair-2i --speed 5 foo bar baz"), - check_admin_cmd(Node, "cluster_info foo local"), - check_admin_cmd(Node, "cluster_info foo local dev99@127.0.0.1"), - check_admin_cmd(Node, "erl-reload"), - check_admin_cmd(Node, "erl_reload"), - check_admin_cmd(Node, "transfer-limit 1"), - check_admin_cmd(Node, "transfer-limit dev55@127.0.0.1 1"), - check_admin_cmd(Node, "transfer_limit 1"), - check_admin_cmd(Node, "transfer_limit dev55@127.0.0.1 1"), - check_admin_cmd(Node, "reformat-indexes --downgrade"), - check_admin_cmd(Node, "reformat-indexes 5"), - check_admin_cmd(Node, "reformat-indexes 6 7"), - check_admin_cmd(Node, "reformat-indexes 5 --downgrade"), - check_admin_cmd(Node, "reformat-indexes 6 7 --downgrade"), - check_admin_cmd(Node, "reformat_indexes --downgrade"), - check_admin_cmd(Node, "reformat_indexes 5"), - check_admin_cmd(Node, "reformat_indexes 6 7"), - check_admin_cmd(Node, "reformat_indexes 5 --downgrade"), - check_admin_cmd(Node, "reformat_indexes 6 7 --downgrade"), - check_admin_cmd(Node, "downgrade_objects true"), - check_admin_cmd(Node, "downgrade_objects true 1"), - check_admin_cmd(Node, "downgrade_objects true"), - check_admin_cmd(Node, "downgrade_objects true 1"), - check_admin_cmd(Node, "js-reload foo bar baz"), - ok. - -confirm() -> - %% Deploy a node to test against - lager:info("Deploy node to test riak command line"), - [Node] = rt:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), - rt_intercept:add(Node, - {riak_core_console, - [ - {{transfers,1}, verify_console_transfers}, - {{member_status,1}, verify_console_member_status}, - {{ring_status,1}, verify_console_ring_status}, - {{stage_remove,1}, verify_console_stage_remove}, - {{stage_leave,1}, verify_console_stage_leave}, - {{stage_replace, 1}, verify_console_stage_replace}, - {{stage_force_replace, 1}, verify_console_stage_force_replace}, - {{stage_resize_ring, 1}, verify_console_stage_resize_ring}, - {{print_staged, 1}, verify_console_print_staged}, - {{commit_staged, 1}, verify_console_commit_staged}, - {{clear_staged, 1}, verify_console_clear_staged}, - {{transfer_limit, 1}, verify_console_transfer_limit}, - {{add_user, 1}, verify_console_add_user}, - {{alter_user, 1}, verify_console_alter_user}, - {{del_user, 1}, verify_console_del_user}, - {{add_group, 1}, verify_console_add_group}, - {{alter_group, 1}, verify_console_alter_group}, - {{del_group, 1}, verify_console_del_group}, - {{add_source, 1}, verify_console_add_source}, - {{del_source, 1}, verify_console_del_source}, - {{grant, 1}, verify_console_grant}, - {{revoke, 1}, verify_console_revoke}, - {{print_user,1}, verify_console_print_user}, - {{print_users,1}, verify_console_print_users}, - {{print_group,1}, verify_console_print_group}, - {{print_groups,1}, verify_console_print_groups}, - {{print_grants,1}, verify_console_print_grants}, - {{print_sources, 1}, verify_console_print_sources}, - {{security_enable,1}, verify_console_security_enable}, - {{security_disable,1}, verify_console_security_disable}, - {{security_status,1}, verify_console_security_stats}, - {{ciphers,1}, verify_console_ciphers} ]}), - - rt_intercept:add(Node, - {riak_kv_console, - [ - {{join,1}, verify_console_join}, - {{leave,1}, verify_console_leave}, - {{remove,1}, verify_console_remove}, - {{staged_join,1}, verify_console_staged_join}, - {{down,1}, verify_console_down}, - {{status,1}, verify_console_status}, - {{vnode_status,1}, verify_console_vnode_status}, - {{ringready,1}, verify_console_ringready}, - {{aae_status,1}, verify_console_aae_status}, - {{cluster_info, 1}, verify_console_cluster_info}, - {{reload_code, 1}, verify_console_reload_code}, - {{repair_2i, 1}, verify_console_repair_2i}, - {{reformat_indexes, 1}, verify_console_reformat_indexes}, - {{reformat_objects, 1}, verify_console_reformat_objects}, - {{bucket_type_status,1}, verify_console_bucket_type_status}, - {{bucket_type_activate,1}, verify_console_bucket_type_activate}, - {{bucket_type_create,1}, verify_console_bucket_type_create}, - {{bucket_type_update,1}, verify_console_bucket_type_update}, - {{bucket_type_list,1}, verify_console_bucket_type_list} - ]}), - - rt_intercept:add(Node, - {riak_kv_js_manager, - [ - {{reload,1}, verify_console_reload} - ]}), - - rt_intercept:wait_until_loaded(Node), - - riak_admin_tests(Node), - cluster_tests(Node), - bucket_tests(Node), - security_tests(Node), - pass. - -check_admin_cmd(Node, Cmd) -> - S = string:tokens(Cmd, " "), - lager:info("Testing riak-admin ~s on ~s", [Cmd, Node]), - {ok, Out} = rt:admin(Node, S), - ?assertEqual("pass", Out). - -%% Recently we've started calling riak_core_console twice from the -%% same riak-admin invocation; this will result in "passpass" as a -%% return instead of a simple "pass" -check_admin_cmd_2x(Node, Cmd) -> - S = string:tokens(Cmd, " "), - lager:info("Testing riak-admin ~s on ~s", [Cmd, Node]), - {ok, Out} = rt:admin(Node, S), - ?assertEqual("passpass", Out). diff --git a/tests/riak_control.erl b/tests/riak_control.erl deleted file mode 100644 index 8ad4878b7..000000000 --- a/tests/riak_control.erl +++ /dev/null @@ -1,261 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% -%% @doc Run Riak Control on all nodes, and verify that we can upgrade -%% from legacy and previous to current, while ensuring Riak Control -%% continues to operate and doesn't crash on any node. - --module(riak_control). - --behaviour(riak_test). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(RC_ENABLE_CFG, [{riak_control, [{enabled, true}, {auth, none}]}]). - -%% @doc Verify that Riak Control operates predictably during an upgrade. -confirm() -> - verify_upgrade(legacy), - rt:setup_harness(ignored, ignored), - verify_upgrade(previous), - rt:setup_harness(ignored, ignored), - pass. - -%% @doc Verify an upgrade succeeds with all nodes running control from -%% the specified `Vsn' to current. -verify_upgrade(Vsn) -> - lager:info("Verify upgrade from ~p to current.", [Vsn]), - - lager:info("Building cluster."), - [Nodes] = rt:build_clusters([{3, Vsn, ?RC_ENABLE_CFG}]), - - lager:info("Verifying all nodes are alive."), - verify_alive(Nodes), - - lager:info("Upgrading each node and verifying Control."), - VersionedNodes = [{Vsn, Node} || Node <- Nodes], - lists:foldl(fun verify_upgrade_fold/2, VersionedNodes, VersionedNodes), - - lager:info("Validate capability convergence."), - validate_capability(VersionedNodes), - - ok. - -%% @doc Verify upgrade fold function. -verify_upgrade_fold({FromVsn, Node}, VersionedNodes0) -> - lager:info("Upgrading ~p from ~p to current.", [Node, FromVsn]), - - lager:info("Performing upgrade."), - rt:upgrade(Node, current), - rt:wait_for_service(Node, riak_kv), - - %% Wait for Riak Control to start. - rt:wait_for_control(VersionedNodes0), - - %% Wait for Riak Control polling cycle. - wait_for_control_cycle(Node), - - lager:info("Versioned nodes is: ~p.", [VersionedNodes0]), - VersionedNodes = lists:keyreplace(Node, 2, VersionedNodes0, {current, Node}), - lager:info("Versioned nodes is now: ~p.", [VersionedNodes]), - - lager:info("Verify that all nodes are still alive."), - verify_alive([VersionedNode || {_, VersionedNode} <- VersionedNodes]), - - lager:info("Verify that control still works on all nodes."), - verify_control(VersionedNodes), - - VersionedNodes. - -verify_control({current, Node}, VersionedNodes) -> - lager:info("Verifying control on node ~p vsn current.", [Node]), - - %% Verify node resource. - {struct, - [{<<"nodes">>, Nodes}]} = verify_resource(Node, "/admin/nodes"), - validate_nodes(Node, Nodes, VersionedNodes, any), - - %% Verify partitions resource. - {struct, - [{<<"partitions">>, Partitions}, - {<<"default_n_val">>, _}]} = verify_resource(Node, "/admin/partitions"), - validate_partitions({current, Node}, Partitions, VersionedNodes), - - ok; -verify_control({Vsn, Node}, VersionedNodes) -> - lager:info("Verifying control on node ~p vsn ~p.", [Vsn, Node]), - - %% Verify node resource. - {struct, - [{<<"nodes">>, Nodes}]} = verify_resource(Node, "/admin/nodes"), - validate_nodes(Node, Nodes, VersionedNodes, any), - - %% Verify partitions resource. - {struct, - [{<<"partitions">>, Partitions}]} = verify_resource(Node, "/admin/partitions"), - validate_partitions({previous, Node}, Partitions, VersionedNodes), - - ok. -verify_control(VersionedNodes) -> - [verify_control(NodeVsn, VersionedNodes) || NodeVsn <- VersionedNodes]. - -%% @doc Verify a particular JSON resource responds. -verify_resource(Node0, Resource) -> - Node = rt:http_url(Node0), - Output = os:cmd(io_lib:format("curl -s -S ~s~p", [Node, Resource])), - lager:info("Verifying node ~p resource ~p.", [Node, Resource]), - mochijson2:decode(Output). - -%% @doc Verify that riak_kv is still running on all nodes. -verify_alive(Nodes) -> - [rt:wait_for_service(Node, riak_kv) || Node <- Nodes]. - -%% @doc This section iterates over the JSON response of nodes, and -%% verifies that each node is reporting its status correctly based -%% on it's current Vsn. -validate_nodes(ControlNode, ResponseNodes, VersionedNodes, Status0) -> - MixedCluster = mixed_cluster(VersionedNodes), - lager:info("Mixed cluster: ~p.", [MixedCluster]), - - lists:map(fun({struct, Node}) -> - - %% Parse JSON further. - BinaryName = proplists:get_value(<<"name">>, Node), - Status = proplists:get_value(<<"status">>, Node), - Name = list_to_existing_atom(binary_to_list(BinaryName)), - - %% Find current Vsn of node we are validating, and the - %% vsn of the node running Riak Control that we've - %% queried. - {NodeVsn, _} = lists:keyfind(Name, 2, VersionedNodes), - {ControlVsn, _} = lists:keyfind(ControlNode, 2, VersionedNodes), - - %% Determine what the correct status should be, or if - %% we've been told to test a specific status, use that. - case Status0 of - any -> - ?assertEqual(true, - valid_status(MixedCluster, ControlVsn, - NodeVsn, Status)); - _ -> - ?assertEqual(Status0, Status) - end - end, ResponseNodes). - -%% @doc Determine if we're currently running mixed mode. -mixed_cluster(VersionedNodes) -> - length(lists:usort( - lists:map(fun({Vsn, _}) -> Vsn end, VersionedNodes))) =/= 1. - -wait_for_control_cycle(Node) when is_atom(Node) -> - lager:info("Waiting for riak_control poll on node ~p.", [Node]), - - {ok, CurrentVsn} = rpc:call(Node, - riak_control_session, - get_version, - []), - ExpectedVsn = CurrentVsn + 1, - - rt:wait_until(Node, fun(N) -> - {ok, Vsn} = rpc:call(N, - riak_control_session, - get_version, - []), - Vsn =:= ExpectedVsn - end). - -%% @doc Validate partitions response. -validate_partitions({current, _}, _ResponsePartitions, _VersionedNodes) -> - %% The newest version of the partitions display can derive the - %% partition state without relying on data from rpc calls -- it can - %% use just the ring to do this. Don't test anything specific here - %% yet. - ok; -validate_partitions({ControlVsn, _}, ResponsePartitions, VersionedNodes) -> - MixedCluster = mixed_cluster(VersionedNodes), - lager:info("Mixed cluster: ~p.", [MixedCluster]), - - lists:map(fun({struct, Partition}) -> - - %% Parse JSON further. - BinaryName = proplists:get_value(<<"node">>, Partition), - Status = proplists:get_value(<<"status">>, Partition), - Name = list_to_existing_atom(binary_to_list(BinaryName)), - - %% Find current Vsn of node we are validating, and the - %% vsn of the node running Riak Control that we've - %% queried. - {NodeVsn, _} = lists:keyfind(Name, 2, VersionedNodes), - - %% Validate response. - ?assertEqual(true, - valid_status(MixedCluster, ControlVsn, - NodeVsn, Status)) - end, ResponsePartitions). - -%% @doc Validate status based on Vsn. -valid_status(false, current, current, <<"incompatible">>) -> - %% Fully upgraded cluster, but might have not negotiated yet. - true; -valid_status(false, current, current, <<"valid">>) -> - %% Fully upgraded cluster, but already negotiated. - true; -valid_status(true, _, _, <<"valid">>) -> - %% Cross-version communication in mixed cluster. - true; -valid_status(MixedCluster, ControlVsn, NodeVsn, Status) -> - %% Default failure case. - lager:info("Invalid status: ~p ~p ~p ~p", [MixedCluster, - ControlVsn, - NodeVsn, - Status]), - false. - -%% @doc Validate capability has converged. -validate_capability(VersionedNodes) -> - %% Wait for capability negotiation. - [rt:wait_until_capability(Node, - {riak_control, member_info_version}, - v1) || {_, Node} <- VersionedNodes], - - %% We can test any node here, so just choose the first. - [{_Vsn, Node}|_] = VersionedNodes, - lager:info("Verifying capability through ~p.", [Node]), - - %% Wait the Riak Control converges. - lager:info("Waiting for riak_control to converge."), - - rt:wait_until(Node, fun(N) -> - {ok, _, Status} = rpc:call(N, - riak_control_session, - get_status, - []), - Status =:= valid - end), - - %% Get the current response. - {struct, - [{<<"nodes">>, Nodes}]} = verify_resource(Node, "/admin/nodes"), - - %% Validate we are in the correct state, not the incompatible state, - %% which ensure the capability has negotiated correctly. - validate_nodes(Node, Nodes, VersionedNodes, <<"valid">>). diff --git a/tests/riak_control_authentication.erl b/tests/riak_control_authentication.erl deleted file mode 100644 index a8bbb7f8f..000000000 --- a/tests/riak_control_authentication.erl +++ /dev/null @@ -1,229 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% -%% @doc Verify authentication works for riak_control. - --module(riak_control_authentication). - --behaviour(riak_test). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(RC_AUTH_NONE_CONFIG, - [{riak_control, [{enabled, true}, - {auth, none}]}]). - --define(RC_AUTH_NONE_CONFIG_FORCE_SSL, - [{riak_api, [{https, [{"127.0.0.1", 8069}]}]}, - {riak_core, - [{https, [{"127.0.0.1", 8069}]}, - {ssl, - [{certfile, "./etc/cert.pem"}, - {keyfile, "./etc/key.pem"} - ]}]}, - {riak_control, [{enabled, true}, - {auth, none}, - {force_ssl, true}]}]). - --define(RC_AUTH_USERLIST_CONFIG, - [{riak_api, [{https, [{"127.0.0.1", 8069}]}]}, - {riak_core, - [{https, [{"127.0.0.1", 8069}]}, - {ssl, - [{certfile, "./etc/cert.pem"}, - {keyfile, "./etc/key.pem"} - ]}]}, - {riak_control, [{enabled, true}, - {auth, userlist}, - {userlist, [{"user", "pass"}]}]}]). - --define(RC_AUTH_USERLIST_CONFIG_FORCE_SSL, - [{riak_api, [{https, [{"127.0.0.1", 8069}]}]}, - {riak_core, - [{https, [{"127.0.0.1", 8069}]}, - {ssl, - [{certfile, "./etc/cert.pem"}, - {keyfile, "./etc/key.pem"} - ]}]}, - {riak_control, [{enabled, true}, - {force_ssl, true}, - {auth, userlist}, - {userlist, [{"user", "pass"}]}]}]). - --define(RC_AUTH_USERLIST_CONFIG_NO_FORCE_SSL, - [{riak_api, [{https, [{"127.0.0.1", 8069}]}]}, - {riak_core, - [{https, [{"127.0.0.1", 8069}]}, - {ssl, - [{certfile, "./etc/cert.pem"}, - {keyfile, "./etc/key.pem"} - ]}]}, - {riak_control, [{enabled, true}, - {force_ssl, false}, - {auth, userlist}, - {userlist, [{"user", "pass"}]}]}]). - -%% @doc Confirm all authentication methods work for the three supported -%% releases. -confirm() -> - %% Verify authentication method 'none'. - verify_authentication(legacy, ?RC_AUTH_NONE_CONFIG), - verify_authentication(previous, ?RC_AUTH_NONE_CONFIG), - - %% Verify authentication method 'userlist'. - verify_authentication(legacy, ?RC_AUTH_USERLIST_CONFIG), - verify_authentication(previous, ?RC_AUTH_USERLIST_CONFIG), - - %% Verify authentication none, and then with forced SSL. - verify_authentication(current, ?RC_AUTH_NONE_CONFIG), - verify_authentication(current, ?RC_AUTH_NONE_CONFIG_FORCE_SSL), - - %% Verify authentication userlist, without SSL and then with SSL. - verify_authentication(current, ?RC_AUTH_USERLIST_CONFIG_FORCE_SSL), - verify_authentication(current, ?RC_AUTH_USERLIST_CONFIG_NO_FORCE_SSL). - -%% @doc Verify the disabled authentication method works. -verify_authentication(Vsn, ?RC_AUTH_NONE_CONFIG) -> - lager:info("Verifying auth 'none', ~p.", [Vsn]), - Nodes = build_singleton_cluster(Vsn, ?RC_AUTH_NONE_CONFIG), - Node = lists:nth(1, Nodes), - - %% Assert that we can load the main page. - lager:info("Verifying Control loads."), - Command = io_lib:format("curl -sL -w %{http_code} ~s~p -o /dev/null", - [rt:http_url(Node), "/admin"]), - ?assertEqual("200", os:cmd(Command)), - - pass; -%% @doc Verify the disabled authentication method works with force SSL. -verify_authentication(current, ?RC_AUTH_NONE_CONFIG_FORCE_SSL) -> - lager:info("Verifying auth 'none', 'force_ssl' 'true', current."), - Nodes = build_singleton_cluster(current, - ?RC_AUTH_NONE_CONFIG_FORCE_SSL), - Node = lists:nth(1, Nodes), - - %% Assert that we get redirected if we hit the HTTP port. - lager:info("Verifying redirect to SSL."), - RedirectCommand = io_lib:format("curl -sL -w %{http_code} ~s~p -o /dev/null", - [rt:http_url(Node), "/admin"]), - ?assertEqual("303", os:cmd(RedirectCommand)), - - %% TODO: Temporarily disabled because of OTP R16B02 SSL bug. - %% Assert that we can access resource over the SSL port. - % lager:info("Verifying Control loads over SSL."), - % AccessCommand = io_lib:format("curl --insecure -sL -w %{http_code} ~s~p", - % [rt:https_url(Node), "/admin"]), - % ?assertEqual("200", os:cmd(AccessCommand)), - - pass; -%% @doc Verify the userlist authentication method works. -verify_authentication(Vsn, ?RC_AUTH_USERLIST_CONFIG) -> - lager:info("Verifying auth 'userlist', ~p.", [Vsn]), - Nodes = build_singleton_cluster(Vsn, ?RC_AUTH_USERLIST_CONFIG), - Node = lists:nth(1, Nodes), - - %% Assert that we get redirected if we hit the HTTP port. - lager:info("Verifying redirect to SSL."), - RedirectCommand = io_lib:format("curl -sL -w %{http_code} ~s~p -o /dev/null", - [rt:http_url(Node), "/admin"]), - ?assertEqual("303", os:cmd(RedirectCommand)), - - %% Assert that we can access resource over the SSL port. - lager:info("Verifying Control loads over SSL."), - AccessCommand = io_lib:format("curl --insecure -sL -w %{http_code} ~s~p -o /dev/null", - [rt:https_url(Node), "/admin"]), - ?assertEqual("401", os:cmd(AccessCommand)), - - %% Assert that we can access resource over the SSL port. - lager:info("Verifying Control loads with credentials."), - AuthCommand = io_lib:format("curl -u user:pass --insecure -sL -w %{http_code} ~s~p -o /dev/null", - [rt:https_url(Node), "/admin"]), - ?assertEqual("200", os:cmd(AuthCommand)), - - pass; -%% @doc Verify the userlist authentication method works. -verify_authentication(current, ?RC_AUTH_USERLIST_CONFIG_FORCE_SSL) -> - lager:info("Verifying auth 'userlist', 'force_ssl' 'true', current."), - Nodes = build_singleton_cluster(current, ?RC_AUTH_USERLIST_CONFIG_FORCE_SSL), - Node = lists:nth(1, Nodes), - - %% Assert that we get redirected if we hit the HTTP port. - lager:info("Verifying redirect to SSL."), - RedirectCommand = io_lib:format("curl -sL -w %{http_code} ~s~p -o /dev/null", - [rt:http_url(Node), "/admin"]), - ?assertEqual("303", os:cmd(RedirectCommand)), - - %% TODO: Temporarily disabled because of OTP R16B02 SSL bug. - %% Assert that we can access resource over the SSL port. - % lager:info("Verifying Control loads over SSL."), - % AccessCommand = io_lib:format("curl --insecure -sL -w %{http_code} ~s~p -o /dev/null", - % [rt:https_url(Node), "/admin"]), - % ?assertEqual("401", os:cmd(AccessCommand)), - - %% TODO: Temporarily disabled because of OTP R16B02 SSL bug. - %% Assert that we can access resource over the SSL port. - % lager:info("Verifying Control loads with credentials."), - % AuthCommand = io_lib:format("curl -u user:pass --insecure -sL -w %{http_code} ~s~p -o /dev/null", - % [rt:https_url(Node), "/admin"]), - % ?assertEqual("200", os:cmd(AuthCommand)), - - pass; -%% @doc Verify the userlist authentication method works. -verify_authentication(current, ?RC_AUTH_USERLIST_CONFIG_NO_FORCE_SSL) -> - lager:info("Verifying auth 'userlist', 'force_ssl' 'false', current."), - Nodes = build_singleton_cluster(current, ?RC_AUTH_USERLIST_CONFIG_NO_FORCE_SSL), - Node = lists:nth(1, Nodes), - - %% Assert that we can access resource over the SSL port. - lager:info("Verifying Control loads over SSL."), - AccessCommand = io_lib:format("curl --insecure -sL -w %{http_code} ~s~p -o /dev/null", - [rt:http_url(Node), "/admin"]), - ?assertEqual("401", os:cmd(AccessCommand)), - - %% Assert that we can access resource over the SSL port. - lager:info("Verifying Control loads with credentials."), - AuthCommand = io_lib:format("curl -u user:pass --insecure -sL -w %{http_code} ~s~p -o /dev/null", - [rt:http_url(Node), "/admin"]), - ?assertEqual("200", os:cmd(AuthCommand)), - - pass. - -%% @doc Build a one node cluster. -build_singleton_cluster(Vsn, Config) -> - [Nodes] = rt:build_clusters([{1, Vsn, Config}]), - - %% Start and stop, wait for riak_kv. - %% - %% Since many of the Riak Control configuration options change how - %% the supervisor starts, we need to restart to ensure settings - %% take effect. - Node = lists:nth(1, Nodes), - rt:stop_and_wait(Node), - rt:start_and_wait(Node), - rt:wait_for_service(Node, riak_kv), - - %% Wait for control to start. - VersionedNodes = [{Vsn, N} || N <- Nodes], - rt:wait_for_control(VersionedNodes), - - lager:info("Build ~p, nodes: ~p.", [Vsn, Nodes]), - Nodes. diff --git a/tests/riak_rex.erl b/tests/riak_rex.erl deleted file mode 100644 index 30ace1353..000000000 --- a/tests/riak_rex.erl +++ /dev/null @@ -1,59 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% ------------------------------------------------------------------- --module(riak_rex). --behaviour(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - -%% @doc riak_test entry point -confirm() -> - SetupData = setup(current), - rex_test(SetupData), - pass. - -setup(Type) -> - deploy_node(Type). - -rex_test(Node) -> - % validated we can get the rex pid on the node - RexPid1 = riak_core_util:safe_rpc(Node, erlang, whereis, [rex]), - ?assertEqual(node(RexPid1), Node), - % kill rex on the node and check that safe_rpc works - kill_rex(Node), - ErrorTuple = riak_core_util:safe_rpc(Node, erlang, whereis, [rex]), - ?assertEqual(ErrorTuple, {badrpc,rpc_process_down}), - % restart rex - supervisor:restart_child({kernel_sup, Node}, rex), - RexPid2 = riak_core_util:safe_rpc(Node, erlang, whereis, [rex]), - ?assertEqual(node(RexPid2), Node). - - -deploy_node(NumNodes, current) -> - rt:deploy_nodes(NumNodes, conf()); -deploy_node(_, mixed) -> - Conf = conf(), - rt:deploy_nodes([{current, Conf}, {previous, Conf}]). - -deploy_node(Type) -> - NumNodes = rt_config:get(num_nodes, 1), - - lager:info("Deploy ~p node", [NumNodes]), - Node = deploy_node(NumNodes, Type), - lager:info("Node: ~p", [Node]), - hd(Node). - -kill_rex(Node) -> - ok = supervisor:terminate_child({kernel_sup, Node}, rex). - -conf() -> - [ - {riak_kv, - [ - {anti_entropy, {off, []}} - ] - } - ]. diff --git a/tests/riaknostic_rt.erl b/tests/riaknostic_rt.erl deleted file mode 100644 index 85ed822a5..000000000 --- a/tests/riaknostic_rt.erl +++ /dev/null @@ -1,108 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(riaknostic_rt). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -%% Change when a new release comes out. --define(RIAKNOSTIC_URL, "https://github.com/basho/riaknostic/downloads/riaknostic-1.0.2.tar.gz"). - -%% REQUIRES (sh, curl, tar) - -confirm() -> - %% Build a small cluster - [Node1, _Node2] = rt:build_cluster(2, []), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), - - %% Install riaknostic for Riak versions below 1.3.0 - riaknostic_bootstrap(Node1), - - %% Run through all tests on Node1 - check_riaknostic_execute(Node1), - check_riaknostic_usage(Node1), - check_riaknostic_command_list(Node1), - check_riaknostic_log_levels(Node1), - - %% Done! - lager:info("Test riaknostic: PASS"), - pass. - -riaknostic_bootstrap(Node) -> - lager:info("Check if riaknostic is installed"), - {ok, RiaknosticOut1} = rt:admin(Node, ["diag"]), - riaknostic_install((rt:str(RiaknosticOut1, "is not present!")), Node). - -%% riaknostic is already installed, move along -riaknostic_install(false, _Node) -> - ok; - -%% install riaknostic -riaknostic_install(true, Node) -> - %% Install - lager:info("Installing Riaknostic"), - {ok, LibDir} = rpc:call(Node, application, get_env, [riak_core, platform_lib_dir]), - Cmd = io_lib:format("sh -c \"cd ~s && curl -O -L ~s && tar xzf ~s\"", - [LibDir, ?RIAKNOSTIC_URL, filename:basename(?RIAKNOSTIC_URL)]), - lager:info("Running command: ~s", [Cmd]), - lager:debug("~p~n", [rpc:call(Node, os, cmd, [Cmd])]), - ok. - -%% Check that riaknostic executes -check_riaknostic_execute(Node) -> - %% Execute - lager:info("** Check Riaknostic executes"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag"]), - ?assertNot(rt:str(RiaknosticOut, "is not present!")), - ?assertNot(rt:str(RiaknosticOut, "[debug]")), - ok. - -%% Check that riaknostic gives a usage message -check_riaknostic_usage(Node) -> - %% Check usage message - lager:info("** Run Riaknostic usage message"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag", "--help"]), - ?assert(rt:str(RiaknosticOut, "Usage: riak-admin")), - ok. - -%% Check that riaknostic gives a command listing -check_riaknostic_command_list(Node) -> - %% Check commands list - lager:info("** Run Riaknostic commands list message"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag", "--list"]), - ?assert(rt:str(RiaknosticOut, "Available diagnostic checks")), - ?assert(rt:str(RiaknosticOut, " disk ")), - ?assert(rt:str(RiaknosticOut, " dumps ")), - ?assert(rt:str(RiaknosticOut, " memory_use ")), - ?assert(rt:str(RiaknosticOut, " nodes_connected")), - ?assert(rt:str(RiaknosticOut, " ring_membership")), - ?assert(rt:str(RiaknosticOut, " ring_preflists ")), - ?assert(rt:str(RiaknosticOut, " ring_size ")), - ?assert(rt:str(RiaknosticOut, " search ")), - ok. - -%% Check that log levels can be set -check_riaknostic_log_levels(Node) -> - %% Check log levels - lager:info("** Run Riaknostic with a different log level"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag", "--level", "debug"]), - ?assert(rt:str(RiaknosticOut, "[debug]")), - ok. - diff --git a/tests/rolling_capabilities.erl b/tests/rolling_capabilities.erl deleted file mode 100644 index 5b05788b4..000000000 --- a/tests/rolling_capabilities.erl +++ /dev/null @@ -1,86 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(rolling_capabilities). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - Count = 4, - OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - - ExpectedCurrent = [{riak_core, vnode_routing, proxy}, - {riak_core, staged_joins, true}, - {riak_kv, legacy_keylisting, false}, - {riak_kv, listkeys_backpressure, true}, - {riak_kv, mapred_2i_pipe, true}, - {riak_kv, mapred_system, pipe}, - {riak_kv, vnode_vclocks, true}, - {riak_kv, anti_entropy, enabled_v1}], - - ExpectedOld = case OldVsn of - legacy -> [{riak_core, vnode_routing, proxy}, - {riak_core, staged_joins, true}, - {riak_kv, legacy_keylisting, false}, - {riak_kv, listkeys_backpressure, true}, - {riak_kv, mapred_2i_pipe, true}, - {riak_kv, mapred_system, pipe}, - {riak_kv, vnode_vclocks, true}]; - previous -> [{riak_core, vnode_routing, proxy}, - {riak_core, staged_joins, true}, - {riak_kv, legacy_keylisting, false}, - {riak_kv, listkeys_backpressure, true}, - {riak_kv, mapred_2i_pipe, true}, - {riak_kv, mapred_system, pipe}, - {riak_kv, vnode_vclocks, true}]; - _ -> [] - end, - - lager:info("Deploying Riak ~p cluster", [OldVsn]), - Nodes = rt:build_cluster([OldVsn || _ <- lists:seq(1,Count)]), - lists:foldl(fun(Node, Upgraded) -> - rt:upgrade(Node, current), - Upgraded2 = Upgraded ++ [Node], - lager:info("Verifying rolling/old capabilities"), - (Upgraded2 == Nodes) - orelse check_capabilities(Upgraded2, ExpectedOld), - Upgraded2 - end, [], Nodes), - lager:info("Verifying final/upgraded capabilities"), - check_capabilities(Nodes, ExpectedCurrent), - lager:info("Test ~p passed", [?MODULE]), - pass. - -check_capabilities(Nodes, Expected) -> - - CapCheck = fun(Node) -> - Caps = rt:capability(Node, all), - Results = [ verify_capability({ExpProj, ExpCap}, ExpVal, Caps) || {ExpProj, ExpCap, ExpVal} <- Expected ], - lists:all(fun(X) -> X =:= true end, Results) - end, - - [?assertEqual(ok, rt:wait_until(N, CapCheck)) || N <- Nodes], - ok. - -verify_capability({ExpProj, ExpCap}, ExpVal, Caps) -> - CurVal = proplists:get_value({ExpProj, ExpCap}, Caps), - lager:info("Verifying: ~p ~p ~p ~p", [ExpProj, ExpCap, ExpVal, CurVal]), - CurVal =:= ExpVal. diff --git a/tests/rpc_output.erl b/tests/rpc_output.erl deleted file mode 100644 index a6689d18a..000000000 --- a/tests/rpc_output.erl +++ /dev/null @@ -1,84 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(rpc_output). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). --compile([{parse_transform, lager_transform}]). - --behavior(gen_event). - -%% gen_event callbacks --export([init/1, - handle_call/2, - handle_event/2, - handle_info/2, - terminate/2, - code_change/3]). - -confirm() -> - gen_event:add_handler(lager_event, ?MODULE, []), - io:put_chars("This is an io:put_chars/1 call"), - io:format("This is an io:format/1 call"), - io:format("This is an io:format/~w call", [2]), - lager:info("This is a lager message"), - {ok, {LogId, Failures}} = gen_event:delete_handler(lager_event, ?MODULE, []), - ?assertEqual(5, LogId), - ?assertEqual([], Failures), - pass. - --record(state, {level = debug, verbose = true, log_id = 1, failures = []}). - -init(_) -> {ok, #state{}}. -handle_event({log, _Dest, _Level, {_Date, _Time}, [_LevelStr, _Location, Message]}, - State) -> - check_log_message(lists:flatten(Message), State); -handle_event({log, _Level, {_Date, _Time}, [_LevelStr, _Location, Message]}, State) -> - check_log_message(lists:flatten(Message), State); -handle_event(_, State) -> - {ok, State}. - -handle_call(_, State) -> - {ok, State}. - -handle_info(_, State) -> - {ok, State}. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -terminate(_Reason, #state{log_id = LogId, failures = Failures}) -> - {ok, {LogId, Failures}}. - -check_log_message(Message, #state{log_id = LogId, failures = Failures} = State) -> - try - case LogId of - 1 -> ?assertEqual(Message, "This is an io:put_chars/1 call"); - 2 -> ?assertEqual(Message, "This is an io:format/1 call"); - 3 -> ?assertEqual(Message, "This is an io:format/2 call"); - 4 -> ?assertEqual(Message, "This is a lager message"); - _ -> ?assert(false) - end, - {ok, State#state{log_id = LogId + 1}} - catch - _:Reason -> {ok, State#state{log_id = LogId + 1, failures = [Reason|Failures]}} - end. diff --git a/tests/rt_basic_test.erl b/tests/rt_basic_test.erl deleted file mode 100644 index 91881c57f..000000000 --- a/tests/rt_basic_test.erl +++ /dev/null @@ -1,29 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(rt_basic_test). --behavior(riak_test). --export([confirm/0]). - -confirm() -> - lager:info("Deploy some nodes"), - Nodes = rt:deploy_nodes(2), - lager:info("Stop the nodes"), - [rt:stop(Node) || Node <- Nodes], - pass. diff --git a/tests/rt_cascading.erl b/tests/rt_cascading.erl deleted file mode 100644 index 8c6702ed8..000000000 --- a/tests/rt_cascading.erl +++ /dev/null @@ -1,1246 +0,0 @@ -%% These tests were written using the following riak versions: -%% current: 1.4 -%% previous: 1.3.1 -%% legacy: 1.2.1 -%% -%% uses the following configs with given defaults: -%% -%% ## default_timeout = 1000 :: timeout() -%% -%% Base timeout value; some tests will use a larger value (multiple of). -%% -%% ## run_rt_cascading_1_3_tests = false :: any() -%% -%% Some tests (new_to_old and mixed_version_clusters) only make sense to -%% run if one is testing the version before cascading was introduced and -%% the version it was added; eg current being riak 1.4 and previous being -%% riak 1.3. If this is set to anything (other than 'false') those tests -%% are run. They will not function properly unless the correct versions -%% for riak are avialable. The tests check if the versions under test are -%% too old to be valid however. -%% -%% With this set to default, the tests that depend on this option will -%% emit a log message saying they are not configured to run. -%% - --module(rt_cascading). --compile(export_all). --behavior(riak_test). - --include_lib("eunit/include/eunit.hrl"). - --define(bucket, <<"objects">>). - --export([confirm/0]). --export([new_to_old/0, mixed_version_clusters/0]). - -% cluster_mgr port = 10006 + 10n where n is devN - -confirm() -> - %% test requires allow_mult=false b/c of rt:systest_read - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - - case eunit:test(?MODULE, [verbose]) of - ok -> - pass; - error -> - % at the time this is written, the return value isn't acutally - % checked, the only way to fail is to crash the process. - % i leave the fail here in hopes a future version will actually - % do what the documentation says. - exit(error), - fail - end. - --record(simple_state, { - beginning = [] :: [node()], - middle = [] :: [node()], - ending = [] :: [node()] -}). - -simple_test_() -> - % +-----------+ +--------+ +-----+ - % | beginning | -> | middle | -> | end | - % +-----------+ +--------+ +-----+ - {timeout, timeout(90), {setup, fun() -> - Conf = conf(), - [BeginNode, MiddleNode, EndNode] = Nodes = rt:deploy_nodes(3, Conf), - repl_util:make_cluster([BeginNode]), - repl_util:make_cluster([MiddleNode]), - repl_util:make_cluster([EndNode]), - repl_util:name_cluster(BeginNode, "beginning"), - [repl_util:wait_until_is_leader(N) || N <- Nodes], - repl_util:name_cluster(MiddleNode, "middle"), - repl_util:name_cluster(EndNode, "end"), - #simple_state{beginning = BeginNode, middle = MiddleNode, - ending = EndNode} - end, - fun(State) -> - Nodes = [State#simple_state.beginning, State#simple_state.middle, - State#simple_state.ending], - rt:clean_cluster(Nodes) - end, - fun(State) -> [ - - {"connecting Beginning to Middle", fun() -> - Port = get_cluster_mgr_port(State#simple_state.middle), - repl_util:connect_cluster(State#simple_state.beginning, "127.0.0.1", Port), - repl_util:enable_realtime(State#simple_state.beginning, "middle"), - repl_util:start_realtime(State#simple_state.beginning, "middle") - end}, - - {"connection Middle to End", fun() -> - Port = get_cluster_mgr_port(State#simple_state.ending), - repl_util:connect_cluster(State#simple_state.middle, "127.0.0.1", Port), - repl_util:enable_realtime(State#simple_state.middle, "end"), - repl_util:start_realtime(State#simple_state.middle, "end") - end}, - - {"cascade a put from beginning down to ending", timeout, timeout(25), fun() -> - BeginningClient = rt:pbc(State#simple_state.beginning), - Bin = <<"cascading realtime">>, - Obj = riakc_obj:new(<<"objects">>, Bin, Bin), - riakc_pb_socket:put(BeginningClient, Obj, [{w,1}]), - riakc_pb_socket:stop(BeginningClient), - ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.middle, <<"objects">>, Bin)), - ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.ending, <<"objects">>, Bin)) - end}, - - {"disable cascading on middle", timeout, timeout(25), fun() -> - rpc:call(State#simple_state.middle, riak_repl_console, realtime_cascades, [["never"]]), - Bin = <<"disabled cascading">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - Client = rt:pbc(State#simple_state.beginning), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.middle, ?bucket, Bin)), - ?assertEqual({error, notfound}, maybe_eventually_exists(State#simple_state.ending, ?bucket, Bin)) - - end}, - - {"re-enable cascading", timeout, timeout(25), fun() -> - rpc:call(State#simple_state.middle, riak_repl_console, realtime_cascades, [["always"]]), - Bin = <<"cascading re-enabled">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - Client = rt:pbc(State#simple_state.beginning), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.middle, ?bucket, Bin)), - ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.ending, ?bucket, Bin)) - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero([State#simple_state.middle, - State#simple_state.beginning, - State#simple_state.ending]) - end} - ] end}}. - -big_circle_test_() -> - % Initally just 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 1, but then 2 way is - % added later. - % +---+ - % | 1 | - % +---+ - % ^ ^ - % / \ - % V V - % +---+ +---+ - % | 6 | | 2 | - % +---+ +---+ - % ^ ^ - % | | - % V V - % +---+ +---+ - % | 5 | | 3 | - % +---+ +---+ - % ^ ^ - % \ / - % V - % +---+ - % | 4 | - % +---+ - {timeout, timeout(130), {setup, fun() -> - Conf = conf(), - Nodes = rt:deploy_nodes(6, Conf), - [repl_util:make_cluster([N]) || N <- Nodes], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - Names = ["1", "2", "3", "4", "5", "6"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - [NameHd | NameTail] = Names, - ConnectTo = NameTail ++ [NameHd], - NamePortMap = lists:map(fun({Node, Name}) -> - Port = get_cluster_mgr_port(Node), - {Name, Port} - end, lists:zip(Nodes, Names)), - Connect = fun({Node, ConnectToName}) -> - Port = proplists:get_value(ConnectToName, NamePortMap), - connect_rt(Node, Port, ConnectToName) - end, - Res = lists:map(Connect, lists:zip(Nodes, ConnectTo)), - ?debugFmt("der res: ~p", [Res]), - Nodes - end, - fun(Nodes) -> - rt:clean_cluster(Nodes) - end, - fun(Nodes) -> [ - - {"circle it", timeout, timeout(65), fun() -> - [One | _] = Nodes, - C = rt:pbc(One), - Bin = <<"goober">>, - Bucket = <<"objects">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(C, Obj, [{w,1}]), - riakc_pb_socket:stop(C), - [begin - ?debugFmt("Checking ~p", [Node]), - ?assertEqual(Bin, maybe_eventually_exists(Node, Bucket, Bin)) - end || Node <- Nodes] - end}, - - {"2 way repl, and circle it", timeout, timeout(65), fun() -> - ConnectTo = ["6", "1", "2", "3", "4", "5"], - Connect = fun({Node, ConnectToName}) -> - Nth = list_to_integer(ConnectToName), - ConnectNode = lists:nth(Nth, Nodes), - Port = get_cluster_mgr_port(ConnectNode), - connect_rt(Node, Port, ConnectToName) - end, - lists:map(Connect, lists:zip(Nodes, ConnectTo)), - C = rt:pbc(hd(Nodes)), - Bin = <<"2 way repl">>, - Bucket = <<"objects">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(C, Obj, [{w,1}]), - lists:map(fun(N) -> - ?debugFmt("Testing ~p", [N]), - ?assertEqual(Bin, maybe_eventually_exists(N, Bucket, Bin)) - end, Nodes) - % there will be duplicate writes, but due to size of the circle, - % there's not going to be a lot. Also, it's very difficult to - % determine when/where a duplicate may start/occur. - % a full breakdown: - % "1" forwards to "2" and "6", noting its local forwards. - % so we have two flows going. Assuming both sides flow at the same - % rate: - % 1 - % / \ - % 6 2: 6 has [1, 2, 6]; 2 has [1, 2, 6] - % 5 3: 5 has [1,2,5,6]; 3 has [1,2,3,6] - % 4 4: 4 has [1,2,4,5,6]; 4 has [1,2,3,4,6] ! double write - % 3 5: 3 has [1,2,3,4,5,6]; 5 has [1,2,3,4,5,6] ! double write - % - % let's explore the flow with 10 clusters: - % 1 - % / \ - % 10 2 10: [1,2,10]; 2: [1,2,10] - % 9 3 9: [1,2,9,10]; 3: [1,2,3,10] - % 8 4 8: [1,2,8,9,10]; 4: [1,2,3,4,10] - % 7 5 7: [1,2,7,8,9,10]; 5: [1,2,3,4,5,10] - % 6 6 6: [1,2,6,7,8,9,10]; 6: [1,2,3,4,5,6,10] !! - % 5 7 5: [1,2,5..10]; 7: [1..7,10] !! - % 4 8 4: [1,2,4..10]; 8: [1..8,10] !! - % 3 9 3: [1..10]; 9: [1..10] !! - % so, by adding 4 clusters, we've added 2 overlaps. - % best guess based on what's above is: - % NumDuplicateWrites = ceil(NumClusters/2 - 1.5) - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero(Nodes) - end} - ] end}}. - -circle_test_() -> - % +-----+ - % | one | - % +-----+ - % ^ \ - % / V - % +-------+ +-----+ - % | three | <- | two | - % +-------+ +-----+ - {timeout, timeout(30), {setup, fun() -> - Conf = conf(), - [One, Two, Three] = Nodes = rt:deploy_nodes(3, Conf), - [repl_util:make_cluster([N]) || N <- Nodes], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - Names = ["one", "two", "three"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - - Connections = [ - {One, Two, "two"}, - {Two, Three, "three"}, - {Three, One, "one"} - ], - lists:map(fun({Node, ConnectNode, Name}) -> - Port = get_cluster_mgr_port(ConnectNode), - connect_rt(Node, Port, Name) - end, Connections), - Nodes - end, - fun(Nodes) -> - rt:clean_cluster(Nodes) - end, - fun(Nodes) -> [ - - {"cascade all the way to the other end, but no further", timeout, timeout(12), fun() -> - Client = rt:pbc(hd(Nodes)), - Bin = <<"cascading">>, - Obj = riakc_obj:new(<<"objects">>, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - ?assertEqual(Bin, maybe_eventually_exists(lists:last(Nodes), <<"objects">>, Bin)), - % we want to ensure there's not a cascade back to the beginning, so - % there's no event we can properly wait for. All we can do is wait - % and make sure we didn't update/write the object. - timer:sleep(1000), - Status = rpc:call(hd(Nodes), riak_repl2_rt, status, []), - [SinkData] = proplists:get_value(sinks, Status, [[]]), - ?assertEqual(undefined, proplists:get_value(expect_seq, SinkData)) - end}, - - {"cascade starting at a different point", timeout, timeout(12), fun() -> - [One, Two | _] = Nodes, - Client = rt:pbc(Two), - Bin = <<"start_at_two">>, - Obj = riakc_obj:new(<<"objects">>, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - ?assertEqual(Bin, maybe_eventually_exists(One, <<"objects">>, Bin)), - timer:sleep(1000), - Status = rpc:call(Two, riak_repl2_rt, status, []), - [SinkData] = proplists:get_value(sinks, Status, [[]]), - ?assertEqual(2, proplists:get_value(expect_seq, SinkData)) - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero(Nodes) - end} - ] end}}. - -pyramid_test_() -> - % +-----+ - % | top | - % +-----+ - % / \ - % V V - % +------+ +-------+ - % | left | | right | - % +------+ +-------+ - % | | - % V V - % +-------+ +--------+ - % | left2 | | right2 | - % +-------+ +--------+ - - {timeout, timeout(70), {setup, fun() -> - Conf = conf(), - [Top, Left, Left2, Right, Right2] = Nodes = rt:deploy_nodes(5, Conf), - [repl_util:make_cluster([N]) || N <- Nodes], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - Names = ["top", "left", "left2", "right", "right2"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - Ports = lists:map(fun(Node) -> - Port = get_cluster_mgr_port(Node), - {Node, Port} - end, Nodes), - connect_rt(Top, proplists:get_value(Left, Ports), "left"), - connect_rt(Left, proplists:get_value(Left2, Ports), "left2"), - connect_rt(Top, proplists:get_value(Right, Ports), "right"), - connect_rt(Right, proplists:get_value(Right2, Ports), "right2"), - Nodes - end, - fun(Nodes) -> - rt:clean_cluster(Nodes) - end, - fun(Nodes) -> [ - - {"Cascade to both kids", timeout, timeout(65), fun() -> - [Top | _] = Nodes, - Client = rt:pbc(Top), - Bucket = <<"objects">>, - Bin = <<"pyramid_top">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - lists:map(fun(N) -> - ?debugFmt("Checking ~p", [N]), - ?assertEqual(Bin, maybe_eventually_exists(N, Bucket, Bin)) - end, Nodes) - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero(Nodes) - end} - ] end}}. - -diamond_test_() -> - % A pretty cluster of clusters: - % +-----+ - % +--------------->| top | - % | loop added +-----+ - % | / \ - % | V V - % | +---------+ +----------+ - % ^ | midleft | | midright | - % | +---------+ +----------+ - % | \ / - % | V V - % | +--------+ - % +-------<-------| bottom | - % +--------+ - {timeout, timeout(180), {setup, fun() -> - Conf = conf(), - [Top, MidLeft, MidRight, Bottom] = Nodes = rt:deploy_nodes(4, Conf), - [repl_util:make_cluster([N]) || N <- Nodes], - Names = ["top", "midleft", "midright", "bottom"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - PortMap = lists:map(fun(Node) -> - Port = get_cluster_mgr_port(Node), - {Node, Port} - end, Nodes), - connect_rt(Top, proplists:get_value(MidLeft, PortMap), "midleft"), - connect_rt(MidLeft, proplists:get_value(Bottom, PortMap), "bottom"), - connect_rt(MidRight, proplists:get_value(Bottom, PortMap), "bottom"), - connect_rt(Top, proplists:get_value(MidRight, PortMap), "midright"), - Nodes - end, - fun(Nodes) -> - rt:clean_cluster(Nodes) - end, - fun(Nodes) -> [ - - {"unfortunate double write", timeout, timeout(135), fun() -> - [Top, MidLeft, MidRight, Bottom] = Nodes, - Client = rt:pbc(Top), - Bin = <<"start_at_top">>, - Obj = riakc_obj:new(<<"objects">>, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - timer:sleep(100000), - ?assertEqual(Bin, maybe_eventually_exists(MidLeft, <<"objects">>, Bin)), - ?assertEqual(Bin, maybe_eventually_exists(MidRight, <<"objects">>, Bin)), - ?assertEqual(Bin, maybe_eventually_exists(Bottom, <<"objects">>, Bin)), - %timer:sleep(1000), - Status = rpc:call(Bottom, riak_repl2_rt, status, []), - [SinkOne, SinkTwo] = proplists:get_value(sinks, Status, [[], []]), - ?assertEqual(proplists:get_value(expect_seq, SinkOne), proplists:get_value(expect_seq, SinkTwo)) - end}, - - {"connect bottom to top", fun() -> - [Top, _MidLeft, _MidRight, Bottom] = Nodes, - Port = get_cluster_mgr_port(Top), - connect_rt(Bottom, Port, "top"), - WaitFun = fun(N) -> - Status = rpc:call(N, riak_repl2_rt, status, []), - Sinks = proplists:get_value(sinks, Status, []), - length(Sinks) == 1 - end, - ?assertEqual(ok, rt:wait_until(Top, WaitFun)) - end}, - - {"start at midright", timeout, timeout(35), fun() -> - [Top, MidLeft, MidRight, Bottom] = Nodes, - % To ensure a write doesn't happen to MidRight when it originated - % on midright, we're going to compare the expect_seq before and - % after. - Status = rpc:call(MidRight, riak_repl2_rt, status, []), - [Sink] = proplists:get_value(sinks, Status, [[]]), - ExpectSeq = proplists:get_value(expect_seq, Sink), - - Client = rt:pbc(MidRight), - Bin = <<"start at midright">>, - Bucket = <<"objects">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - [begin - ?debugFmt("Checking ~p", [N]), - ?assertEqual(Bin, maybe_eventually_exists(N, Bucket, Bin)) - end || N <- [Bottom, Top, MidLeft]], - - Status2 = rpc:call(MidRight, riak_repl2_rt, status, []), - [Sink2] = proplists:get_value(sinks, Status2, [[]]), - GotSeq = proplists:get_value(expect_seq, Sink2), - ?assertEqual(ExpectSeq, GotSeq) - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero(Nodes) - end} - ] end}}. - -circle_and_spurs_test_() -> - % +------------+ - % | north_spur | - % +------------+ - % ^ - % | - % +-------+ - % +---> | north | ---+ - % | +-------+ | - % | V - % +-----------+ +------+ +------+ +-----------+ - % | west_spur | <- | west | <-------- | east | -> | east_spur | - % +-----------+ +------+ +------+ +-----------+ - {timeout, timeout(170), {setup, fun() -> - Conf = conf(), - [North, East, West, NorthSpur, EastSpur, WestSpur] = Nodes = rt:deploy_nodes(6, Conf), - [repl_util:make_cluster([N]) || N <- Nodes], - Names = ["north", "east", "west", "north_spur", "east_spur", "west_spur"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - connect_rt(North, get_cluster_mgr_port(East), "east"), - connect_rt(East, get_cluster_mgr_port(West), "west"), - connect_rt(West, get_cluster_mgr_port(North), "north"), - connect_rt(North, get_cluster_mgr_port(NorthSpur), "north_spur"), - connect_rt(East, get_cluster_mgr_port(EastSpur), "east_spur"), - connect_rt(West, get_cluster_mgr_port(WestSpur), "west_spur"), - Nodes - end, - fun(Nodes) -> - rt:clean_cluster(Nodes) - end, - fun(Nodes) -> [ - - {"start at north", timeout, timeout(55), fun() -> - [North | _Rest] = Nodes, - Client = rt:pbc(North), - Bin = <<"start at north">>, - Bucket = <<"objects">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - [begin - ?debugFmt("Checking ~p", [N]), - ?assertEqual(Bin, maybe_eventually_exists(N, Bucket, Bin)) - end || N <- Nodes, N =/= North] - end}, - - {"Start at west", timeout, timeout(55), fun() -> - [_North, _East, West | _Rest] = Nodes, - Client = rt:pbc(West), - Bin = <<"start at west">>, - Bucket = <<"objects">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - [begin - ?debugFmt("Checking ~p", [N]), - ?assertEqual(Bin, maybe_eventually_exists(N, Bucket, Bin)) - end || N <- Nodes, N =/= West] - end}, - - {"spurs don't replicate back", timeout, timeout(55), fun() -> - [_North, _East, _West, NorthSpur | _Rest] = Nodes, - Client = rt:pbc(NorthSpur), - Bin = <<"start at north_spur">>, - Bucket = <<"objects">>, - Obj = riakc_obj:new(Bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - [begin - ?debugFmt("Checking ~p", [N]), - ?assertEqual({error, notfound}, maybe_eventually_exists(N, Bucket, Bin)) - end || N <- Nodes, N =/= NorthSpur] - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero(Nodes) - end} - ] end}}. - -mixed_version_clusters() -> - case eunit:test(?MODULE:mixed_version_clusters_test_(), [verbose]) of - ok -> - pass; - error -> - % at the time this is written, the return value isn't acutally - % checked, the only way to fail is to crash the process. - % i leave the fail here in hopes a future version will actually - % do what the documentation says. - exit(error), - fail - end. - -mixed_version_clusters_test_() -> - % +-----+ - % | n12 | - % +-----+ - % ^ \ - % / V - % +-----+ +-----+ - % | n56 | <- | n34 | - % +-----+ +-----+ - % - % This test is configurable for 1.3 versions of Riak, but off by default. - % place the following config in ~/.riak_test_config to run: - % - % {run_rt_cascading_1_3_tests, true} - case rt_config:config_or_os_env(run_rt_cascading_1_3_tests, false) of - false -> - lager:info("mixed_version_clusters_test_ not configured to run!"), - []; - _ -> - lager:info("new_to_old_test_ configured to run for 1.3"), - mixed_version_clusters_test_dep() - end. - -mixed_version_clusters_test_dep() -> - {timeout, 60000, {setup, fun() -> - Conf = conf(), - DeployConfs = [{previous, Conf} || _ <- lists:seq(1,6)], - Nodes = rt:deploy_nodes(DeployConfs), - [N1, N2, N3, N4, N5, N6] = Nodes, - case rpc:call(N1, application, get_key, [riak_core, vsn]) of - % this is meant to test upgrading from early BNW aka - % Brave New World aka Advanced Repl aka version 3 repl to - % a cascading realtime repl. Other tests handle going from pre - % repl 3 to repl 3. - {ok, Vsn} when Vsn < "1.3.0" -> - {too_old, Nodes}; - _ -> - N12 = [N1, N2], - N34 = [N3, N4], - N56 = [N5, N6], - repl_util:make_cluster(N12), - repl_util:make_cluster(N34), - repl_util:make_cluster(N56), - repl_util:name_cluster(N1, "n12"), - repl_util:name_cluster(N3, "n34"), - repl_util:name_cluster(N5, "n56"), - [repl_util:wait_until_leader_converge(Cluster) || Cluster <- [N12, N34, N56]], - connect_rt(N1, get_cluster_mgr_port(N3), "n34"), - connect_rt(N3, get_cluster_mgr_port(N5), "n56"), - connect_rt(N5, get_cluster_mgr_port(N1), "n12"), - Nodes - end - end, - fun(MaybeNodes) -> - Nodes = case MaybeNodes of - {too_old, Ns} -> Ns; - _ -> MaybeNodes - end, - rt:clean_cluster(Nodes) - end, - fun({too_old, _Nodes}) -> []; - ([N1, N2, N3, N4, N5, N6] = Nodes) -> [ - - {"no cascading at first", timeout, timeout(35), [ - {timeout, timeout(15), fun() -> - Client = rt:pbc(N1), - Bin = <<"no cascade yet">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 2}]), - riakc_pb_socket:stop(Client), - ?assertEqual({error, notfound}, maybe_eventually_exists([N5, N6], ?bucket, Bin)), - ?assertEqual(Bin, maybe_eventually_exists([N3, N4], ?bucket, Bin)) - end}, - - {timeout, timeout(15), fun() -> - Client = rt:pbc(N2), - Bin = <<"no cascade yet 2">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 2}]), - riakc_pb_socket:stop(Client), - ?assertEqual({error, notfound}, maybe_eventually_exists([N5, N6], ?bucket, Bin)), - ?assertEqual(Bin, maybe_eventually_exists([N3, N4], ?bucket, Bin)) - end} - ]}, - - {"mixed source can send", timeout, timeout(235), {setup, - fun() -> - rt:upgrade(N1, current), - repl_util:wait_until_leader_converge([N1, N2]), - Running = fun(Node) -> - RTStatus = rpc:call(Node, riak_repl2_rt, status, []), - if - is_list(RTStatus) -> - SourcesList = proplists:get_value(sources, RTStatus, []), - Sources = [S || S <- SourcesList, - is_list(S), - proplists:get_value(connected, S, false), - proplists:get_value(source, S) =:= "n34" - ], - length(Sources) >= 1; - true -> - false - end - end, - ?assertEqual(ok, rt:wait_until(N1, Running)), - % give the node further time to settle - StatsNotEmpty = fun(Node) -> - case rpc:call(Node, riak_repl_stats, get_stats, []) of - [] -> - false; - Stats -> - is_list(Stats) - end - end, - ?assertEqual(ok, rt:wait_until(N1, StatsNotEmpty)) - end, - fun(_) -> [ - - {"node1 put", timeout, timeout(205), fun() -> - Client = rt:pbc(N1), - Bin = <<"rt after upgrade">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 2}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(N3, ?bucket, Bin, timeout(100))), - ?assertEqual({error, notfound}, maybe_eventually_exists(N5, ?bucket, Bin, 100000)) - end}, - - {"node2 put", timeout, timeout(25), fun() -> - Client = rt:pbc(N2), - Bin = <<"rt after upgrade 2">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 2}]), - riakc_pb_socket:stop(Client), - ?assertEqual({error, notfound}, maybe_eventually_exists(N5, ?bucket, Bin)), - ?assertEqual(Bin, maybe_eventually_exists([N3,N4], ?bucket, Bin)) - end} - ] end - }}, - - {"upgrade the world, cascade starts working", timeout, timeout(200), {setup, - fun() -> - [N1 | NotUpgraded] = Nodes, - [rt:upgrade(Node, current) || Node <- NotUpgraded], - repl_util:wait_until_leader_converge([N1, N2]), - repl_util:wait_until_leader_converge([N3, N4]), - repl_util:wait_until_leader_converge([N5, N6]), - ClusterMgrUp = fun(Node) -> - case rpc:call(Node, erlang, whereis, [riak_core_cluster_manager]) of - P when is_pid(P) -> - true; - _ -> - fail - end - end, - [rt:wait_until(N, ClusterMgrUp) || N <- Nodes], - maybe_reconnect_rt(N1, get_cluster_mgr_port(N3), "n34"), - maybe_reconnect_rt(N3, get_cluster_mgr_port(N5), "n56"), - maybe_reconnect_rt(N5, get_cluster_mgr_port(N1), "n12"), - ok - end, - fun(_) -> - ToB = fun - (Atom) when is_atom(Atom) -> - list_to_binary(atom_to_list(Atom)); - (N) when is_integer(N) -> - list_to_binary(integer_to_list(N)) - end, - ExistsEverywhere = fun(Key, LookupOrder) -> - Reses = [maybe_eventually_exists(Node, ?bucket, Key) || Node <- LookupOrder], - ?debugFmt("Node and it's res:~n~p", [lists:zip(LookupOrder, -Reses)]), - lists:all(fun(E) -> E =:= Key end, Reses) - end, - MakeTest = fun(Node, N) -> - Name = "writing " ++ atom_to_list(Node) ++ "-write-" ++ integer_to_list(N), - {NewTail, NewHead} = lists:splitwith(fun(E) -> - E =/= Node - end, Nodes), - ExistsLookup = NewHead ++ NewTail, - Test = fun() -> - ?debugFmt("Running test ~p", [Name]), - Client = rt:pbc(Node), - Key = <<(ToB(Node))/binary, "-write-", (ToB(N))/binary>>, - Obj = riakc_obj:new(?bucket, Key, Key), - riakc_pb_socket:put(Client, Obj, [{w, 2}]), - riakc_pb_socket:stop(Client), - ?assert(ExistsEverywhere(Key, ExistsLookup)) - end, - {Name, timeout, timeout(65), Test} - end, - [MakeTest(Node, N) || Node <- Nodes, N <- lists:seq(1, 3)] - end - }}, - {"check pendings", fun() -> - wait_until_pending_count_zero(Nodes) - end} - - ] end}}. - -new_to_old() -> - case eunit:test(?MODULE:new_to_old_test_(), [verbose]) of - ok -> - pass; - error -> - % at the time this is written, the return value isn't acutally - % checked, the only way to fail is to crash the process. - % i leave the fail here in hopes a future version will actually - % do what the documentation says. - exit(error), - fail - end. - -new_to_old_test_() -> - % +------+ - % | New1 | - % +------+ - % ^ \ - % / V - % +------+ +------+ - % | New3 | <- | Old2 | - % +------+ +------+ - % - % This test is configurable for 1.3 versions of Riak, but off by default. - % place the following config in ~/.riak_test_config to run: - % - % {run_rt_cascading_1_3_tests, true} - case rt_config:config_or_os_env(run_rt_cascading_1_3_tests, false) of - false -> - lager:info("new_to_old_test_ not configured to run!"), - []; - _ -> - lager:info("new_to_old_test_ configured to run for 1.3"), - new_to_old_test_dep() - end. - -new_to_old_test_dep() -> - {timeout, timeout(105), {setup, fun() -> - Conf = conf(), - DeployConfs = [{current, Conf}, {previous, Conf}, {current, Conf}], - [New1, Old2, New3] = Nodes = rt:deploy_nodes(DeployConfs), - case rpc:call(Old2, application, get_key, [riak_core, vsn]) of - % this is meant to test upgrading from early BNW aka - % Brave New World aka Advanced Repl aka version 3 repl to - % a cascading realtime repl. Other tests handle going from pre - % repl 3 to repl 3. - {ok, Vsn} when Vsn < "1.3.0" -> - {too_old, Nodes}; - _ -> - [repl_util:make_cluster([N]) || N <- Nodes], - Names = ["new1", "old2", "new3"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - connect_rt(New1, 10026, "old2"), - connect_rt(Old2, 10036, "new3"), - connect_rt(New3, 10016, "new1"), - Nodes - end - end, - fun(MaybeNodes) -> - Nodes = case MaybeNodes of - {too_old, Ns} -> Ns; - _ -> MaybeNodes - end, - rt:clean_cluster(Nodes) - end, - fun({too_old, _}) -> []; - ([New1, Old2, New3]) -> [ - - {"From new1 to old2", timeout, timeout(25), fun() -> - Client = rt:pbc(New1), - Bin = <<"new1 to old2">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 1}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(Old2, ?bucket, Bin)), - ?assertEqual({error, notfound}, maybe_eventually_exists(New3, ?bucket, Bin)) - end}, - - {"old2 does not cascade at all", timeout, timeout(25), fun() -> - Client = rt:pbc(New1), - Bin = <<"old2 no cascade">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 1}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(Old2, ?bucket, Bin)), - ?assertEqual({error, notfound}, maybe_eventually_exists(New3, ?bucket, Bin)) - end}, - - {"from new3 to old2", timeout, timeout(25), fun() -> - Client = rt:pbc(New3), - Bin = <<"new3 to old2">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w, 1}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(New1, ?bucket, Bin)), - ?assertEqual(Bin, maybe_eventually_exists(Old2, ?bucket, Bin)) - end}, - - {"from old2 to new3 no cascade", timeout, timeout(25), fun() -> - % in the future, cascading may be able to occur even if it starts - % from an older source cluster/node. It is prevented for now by - % having no easy/good way to get the name of the source cluster, - % thus preventing complete information on the routed clusters. - Client = rt:pbc(Old2), - Bin = <<"old2 to new3">>, - Obj = riakc_obj:new(?bucket, Bin, Bin), - riakc_pb_socket:put(Client, Obj, [{w,1}]), - riakc_pb_socket:stop(Client), - ?assertEqual(Bin, maybe_eventually_exists(New3, ?bucket, Bin)), - ?assertEqual({error, notfound}, maybe_eventually_exists(New1, ?bucket, Bin)) - end}, - {"check pendings", fun() -> - wait_until_pending_count_zero(["new1", "old2", "new3"]) - end} - ] end}}. - -ensure_ack_test_() -> - {timeout, timeout(130), {setup, fun() -> - Conf = conf(), - [LeaderA, LeaderB] = Nodes = rt:deploy_nodes(2, Conf), - [repl_util:make_cluster([N]) || N <- Nodes], - [repl_util:wait_until_is_leader(N) || N <- Nodes], - Names = ["A", "B"], - [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], - %repl_util:name_cluster(LeaderA, "A"), - %repl_util:name_cluster(LeaderB, "B"), - lager:info("made it past naming"), - Port = get_cluster_mgr_port(LeaderB), - lager:info("made it past port"), - connect_rt(LeaderA, Port, "B"), - lager:info("made it past connect"), - [LeaderA, LeaderB] - end, - fun(Nodes) -> - rt:clean_cluster(Nodes) - end, - - fun([LeaderA, LeaderB] = _Nodes) -> [ - {"ensure acks", timeout, timeout(65), fun() -> - lager:info("Nodes:~p, ~p", [LeaderA, LeaderB]), - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - - %% Write some objects to the source cluster (A), - lager:info("Writing 1 key to ~p, which should RT repl to ~p", - [LeaderA, LeaderB]), - ?assertEqual([], repl_util:do_write(LeaderA, 1, 1, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading 1 key written from ~p", [LeaderB]), - ?assertEqual(0, repl_util:wait_for_reads(LeaderB, 1, 1, TestBucket, 2)), - - RTQStatus = rpc:call(LeaderA, riak_repl2_rtq, status, []), - - Consumers = proplists:get_value(consumers, RTQStatus), - case proplists:get_value("B", Consumers) of - undefined -> - []; - Consumer -> - Unacked = proplists:get_value(unacked, Consumer, 0), - lager:info("unacked: ~p", [Unacked]), - ?assertEqual(0, Unacked) - end - - end} - ] - end}}. - -ensure_unacked_and_queue() -> - eunit(ensure_unacked_and_queue_test_()). - -ensure_unacked_and_queue_test_() -> - {timeout, timeout(2300), {setup, fun() -> - Nodes = rt:deploy_nodes(6, conf()), - {N123, N456} = lists:split(3, Nodes), - repl_util:make_cluster(N123), - repl_util:make_cluster(N456), - repl_util:wait_until_leader_converge(N123), - repl_util:wait_until_leader_converge(N456), - repl_util:name_cluster(hd(N123), "n123"), - repl_util:name_cluster(hd(N456), "n456"), - N456Port = get_cluster_mgr_port(hd(N456)), - connect_rt(hd(N123), N456Port, "n456"), - N123Port = get_cluster_mgr_port(hd(N123)), - connect_rt(hd(N456), N123Port, "n123"), - {N123, N456} - end, - maybe_skip_teardown(fun({N123, N456}) -> - rt:clean_cluster(N123), - rt:clean_cluster(N456) - end), - fun({N123, N456}) -> [ - - {"unacked does not increase when there are skips", timeout, timeout(100), fun() -> - N123Leader = hd(N123), - N456Leader = hd(N456), - - write_n_keys(N123Leader, N456Leader, 1, 10000), - - write_n_keys(N456Leader, N123Leader, 10001, 20000), - - Res = rt:wait_until(fun() -> - RTQStatus = rpc:call(N123Leader, riak_repl2_rtq, status, []), - - Consumers = proplists:get_value(consumers, RTQStatus), - Data = proplists:get_value("n456", Consumers), - Unacked = proplists:get_value(unacked, Data), - ?debugFmt("unacked: ~p", [Unacked]), - 0 == Unacked - end), - ?assertEqual(ok, Res) - end}, - - {"after acks, queues are empty", fun() -> - Nodes = N123 ++ N456, - Got = lists:map(fun(Node) -> - rpc:call(Node, riak_repl2_rtq, all_queues_empty, []) - end, Nodes), - Expected = [true || _ <- lists:seq(1, length(Nodes))], - ?assertEqual(Expected, Got) - end}, - - {"after acks, queues truly are empty. Truly", fun() -> - Nodes = N123 ++ N456, - Gots = lists:map(fun(Node) -> - {Node, rpc:call(Node, riak_repl2_rtq, dumpq, [])} - end, Nodes), - lists:map(fun({Node, Got}) -> - ?debugFmt("Checking data from ~p", [Node]), - ?assertEqual([], Got) - end, Gots) - end}, - - {"dual loads keeps unacked satisfied", timeout, timeout(100), fun() -> - N123Leader = hd(N123), - N456Leader = hd(N456), - LoadN123Pid = spawn(fun() -> - {Time, Val} = timer:tc(fun write_n_keys/4, [N123Leader, N456Leader, 20001, 30000]), - ?debugFmt("loading 123 to 456 took ~p to get ~p", [Time, Val]), - Val - end), - LoadN456Pid = spawn(fun() -> - {Time, Val} = timer:tc(fun write_n_keys/4, [N456Leader, N123Leader, 30001, 40000]), - ?debugFmt("loading 456 to 123 took ~p to get ~p", [Time, Val]), - Val - end), - Exits = wait_exit([LoadN123Pid, LoadN456Pid], infinity), - ?assert(lists:all(fun(E) -> E == normal end, Exits)), - - StatusDig = fun(SinkName, Node) -> - Status = rpc:call(Node, riak_repl2_rtq, status, []), - Consumers = proplists:get_value(consumers, Status, []), - ConsumerStats = proplists:get_value(SinkName, Consumers, []), - proplists:get_value(unacked, ConsumerStats) - end, - - N123UnackedRes = rt:wait_until(fun() -> - Unacked = StatusDig("n456", N123Leader), - ?debugFmt("Unacked: ~p", [Unacked]), - 0 == Unacked - end), - ?assertEqual(ok, N123UnackedRes), - - N456Unacked = StatusDig("n123", N456Leader), - case N456Unacked of - 0 -> - ?assert(true); - _ -> - N456Unacked2 = StatusDig("n123", N456Leader), - ?debugFmt("Not 0, are they at least decreasing?~n" - " ~p, ~p", [N456Unacked2, N456Unacked]), - ?assert(N456Unacked2 < N456Unacked) - end - end}, - - {"after dual load acks, queues are empty", fun() -> - Nodes = N123 ++ N456, - Got = lists:map(fun(Node) -> - rpc:call(Node, riak_repl2_rtq, all_queues_empty, []) - end, Nodes), - Expected = [true || _ <- lists:seq(1, length(Nodes))], - ?assertEqual(Expected, Got) - end}, - - {"after dual load acks, queues truly are empty. Truly", fun() -> - Nodes = N123 ++ N456, - Gots = lists:map(fun(Node) -> - {Node, rpc:call(Node, riak_repl2_rtq, dumpq, [])} - end, Nodes), - lists:map(fun({Node, Got}) -> - ?debugFmt("Checking data from ~p", [Node]), - ?assertEqual([], Got) - end, Gots) - end}, - - {"no negative pendings", fun() -> - Nodes = N123 ++ N456, - GetPending = fun({sink_stats, SinkStats}) -> - ConnTo = proplists:get_value(rt_sink_connected_to, SinkStats), - proplists:get_value(pending, ConnTo) - end, - lists:map(fun(Node) -> - ?debugFmt("Checking node ~p", [Node]), - Status = rpc:call(Node, riak_repl_console, status, [quiet]), - Sinks = proplists:get_value(sinks, Status), - lists:map(fun(SStats) -> - Pending = GetPending(SStats), - ?assertEqual(0, Pending) - end, Sinks) - end, Nodes) - end} - - ] end}}. - -%% ===== -%% utility functions for teh happy -%% ==== - -wait_exit(Pids, Timeout) -> - Mons = [{erlang:monitor(process, Pid), Pid} || Pid <- Pids], - lists:map(fun({Mon, Pid}) -> - receive - {'DOWN', Mon, process, Pid, Cause} -> - Cause - after Timeout -> - timeout - end - end, Mons). - -conf() -> - [{lager, [ - {handlers, [ - {lager_console_backend,info}, - {lager_file_backend, [ - {"./log/error.log",error,10485760,"$D0",5}, - {"./log/console.log",info,10485760,"$D0",5}, - {"./log/debug.log",debug,10485760,"$D0",5} - ]} - ]}, - {crash_log,"./log/crash.log"}, - {crash_log_msg_size,65536}, - {crash_log_size,10485760}, - {crash_log_date,"$D0"}, - {crash_log_count,5}, - {error_logger_redirect,true} - ]}, - {riak_repl, [ - {fullsync_on_connect, false}, - {fullsync_interval, disabled}, - {diff_batch_size, 10}, - {rt_heartbeat_interval, undefined} - ]}]. - -get_cluster_mgr_port(Node) -> - {ok, {_Ip, Port}} = rpc:call(Node, application, get_env, [riak_core, cluster_mgr]), - Port. - -maybe_reconnect_rt(SourceNode, SinkPort, SinkName) -> - case repl_util:wait_for_connection(SourceNode, SinkName) of - fail -> - connect_rt(SourceNode, SinkPort, SinkName); - Oot -> - Oot - end. - -connect_rt(SourceNode, SinkPort, SinkName) -> - repl_util:connect_cluster(SourceNode, "127.0.0.1", SinkPort), - repl_util:wait_for_connection(SourceNode, SinkName), - repl_util:enable_realtime(SourceNode, SinkName), - repl_util:start_realtime(SourceNode, SinkName). - -exists(Nodes, Bucket, Key) -> - exists({error, notfound}, Nodes, Bucket, Key). - -exists(Got, [], _Bucket, _Key) -> - Got; -exists({error, notfound}, [Node | Tail], Bucket, Key) -> - Pid = rt:pbc(Node), - Got = riakc_pb_socket:get(Pid, Bucket, Key), - riakc_pb_socket:stop(Pid), - exists(Got, Tail, Bucket, Key); -exists(Got, _Nodes, _Bucket, _Key) -> - Got. - -maybe_eventually_exists(Node, Bucket, Key) -> - Timeout = timeout(10), - WaitTime = rt_config:get(default_wait_time, 1000), - maybe_eventually_exists(Node, Bucket, Key, Timeout, WaitTime). - -maybe_eventually_exists(Node, Bucket, Key, Timeout) -> - WaitTime = rt_config:get(default_wait_time, 1000), - maybe_eventually_exists(Node, Bucket, Key, Timeout, WaitTime). - -maybe_eventually_exists(Node, Bucket, Key, Timeout, WaitMs) when is_atom(Node) -> - maybe_eventually_exists([Node], Bucket, Key, Timeout, WaitMs); - -maybe_eventually_exists(Nodes, Bucket, Key, Timeout, WaitMs) -> - Got = exists(Nodes, Bucket, Key), - maybe_eventually_exists(Got, Nodes, Bucket, Key, Timeout, WaitMs). - -maybe_eventually_exists({error, notfound}, Nodes, Bucket, Key, Timeout, WaitMs) when Timeout > 0 -> - timer:sleep(WaitMs), - Got = exists(Nodes, Bucket, Key), - Timeout2 = case Timeout of - infinity -> - infinity; - _ -> - Timeout - WaitMs - end, - maybe_eventually_exists(Got, Nodes, Bucket, Key, Timeout2, WaitMs); - -maybe_eventually_exists({ok, RiakObj}, _Nodes, _Bucket, _Key, _Timeout, _WaitMs) -> - riakc_obj:get_value(RiakObj); - -maybe_eventually_exists(Got, _Nodes, _Bucket, _Key, _Timeout, _WaitMs) -> - Got. - -wait_for_rt_started(Node, ToName) -> - Fun = fun(_) -> - Status = rpc:call(Node, riak_repl2_rt, status, []), - Started = proplists:get_value(started, Status, []), - lists:member(ToName, Started) - end, - rt:wait_until(Node, Fun). - -write_n_keys(Source, Destination, M, N) -> - TestHash = list_to_binary([io_lib:format("~2.16.0b", [X]) || - <> <= erlang:md5(term_to_binary(os:timestamp()))]), - TestBucket = <>, - First = M, - Last = N, - - %% Write some objects to the source cluster (A), - lager:info("Writing ~p keys to ~p, which should RT repl to ~p", - [Last-First+1, Source, Destination]), - ?assertEqual([], repl_util:do_write(Source, First, Last, TestBucket, 2)), - - %% verify data is replicated to B - lager:info("Reading ~p keys written from ~p", [Last-First+1, Destination]), - ?assertEqual(0, repl_util:wait_for_reads(Destination, First, Last, TestBucket, 2)). - -timeout(MultiplyBy) -> - case rt_config:get(default_timeout, 1000) of - infinity -> - infinity; - N -> - N * MultiplyBy - end. - -eunit(TestDef) -> - case eunit:test(TestDef, [verbose]) of - ok -> - pass; - error -> - exit(error), - fail - end. - -maybe_skip_teardown(TearDownFun) -> - fun(Arg) -> - case rt_config:config_or_os_env(skip_teardowns, undefined) of - undefined -> - TearDownFun(Arg); - _ -> - ok - end - end. - -wait_until_pending_count_zero(Nodes) -> - WaitFun = fun() -> - {Statuses, _} = rpc:multicall(Nodes, riak_repl2_rtq, status, []), - Out = [check_status(S) || S <- Statuses], - not lists:member(false, Out) - end, - ?assertEqual(ok, rt:wait_until(WaitFun)), - ok. - -check_status(Status) -> - case proplists:get_all_values(consumers, Status) of - undefined -> - true; - [] -> - true; - Cs -> - PendingList = [proplists:lookup_all(pending, C) || {_, C} <- lists:flatten(Cs)], - PendingCount = lists:sum(proplists:get_all_values(pending, lists:flatten(PendingList))), - ?debugFmt("RTQ status pending on test node:~p", [PendingCount]), - PendingCount == 0 - end. diff --git a/tests/secondary_index_tests.erl b/tests/secondary_index_tests.erl deleted file mode 100644 index aa4618623..000000000 --- a/tests/secondary_index_tests.erl +++ /dev/null @@ -1,332 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(secondary_index_tests). --behavior(riak_test). --export([confirm/0]). --export([put_an_object/2, put_an_object/4, int_to_key/1, - stream_pb/2, stream_pb/3, pb_query/3, http_query/2, - http_query/3, http_stream/3, int_to_field1_bin/1, url/2, - assertExactQuery/5, assertRangeQuery/7]). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(BUCKET, <<"2ibucket">>). --define(KEYS(A), [int_to_key(A)]). --define(KEYS(A,B), [int_to_key(N) || N <- lists:seq(A,B)]). --define(KEYS(A,B,C), [int_to_key(N) || N <- lists:seq(A,B), C]). --define(KEYS(A,B,G1,G2), [int_to_key(N) || N <- lists:seq(A,B), G1, G2]). - -confirm() -> - Nodes = rt:build_cluster(3), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - - %% First test with sorting non-paginated results off by default - SetResult = rpc:multicall(Nodes, application, set_env, - [riak_kv, secondary_index_sort_default, false]), - AOK = [ok || _ <- lists:seq(1, length(Nodes))], - ?assertMatch({AOK, []}, SetResult), - - PBC = rt:pbc(hd(Nodes)), - HTTPC = rt:httpc(hd(Nodes)), - Clients = [{pb, PBC}, {http, HTTPC}], - - [put_an_object(PBC, N) || N <- lists:seq(0, 20)], - - K = fun int_to_key/1, - - assertExactQuery(Clients, ?KEYS(5), <<"field1_bin">>, <<"val5">>), - assertExactQuery(Clients, ?KEYS(5), <<"field2_int">>, 5), - assertExactQuery(Clients, ?KEYS(5, 9), <<"field3_int">>, 5), - assertRangeQuery(Clients, ?KEYS(10, 18), <<"field1_bin">>, <<"val10">>, <<"val18">>), - assertRangeQuery(Clients, ?KEYS(12), <<"field1_bin">>, <<"val10">>, <<"val18">>, <<"v...2">>), - assertRangeQuery(Clients, ?KEYS(10, 19), <<"field2_int">>, 10, 19), - assertRangeQuery(Clients, ?KEYS(10, 17), <<"$key">>, <<"obj10">>, <<"obj17">>), - assertRangeQuery(Clients, ?KEYS(12), <<"$key">>, <<"obj10">>, <<"obj17">>, <<"ob..2">>), - - lager:info("Delete an object, verify deletion..."), - ToDel = [<<"obj05">>, <<"obj11">>], - [?assertMatch(ok, riakc_pb_socket:delete(PBC, ?BUCKET, KD)) || KD <- ToDel], - lager:info("Make sure the tombstone is reaped..."), - ?assertMatch(ok, rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, ?BUCKET, ToDel) end)), - - assertExactQuery(Clients, [], <<"field1_bin">>, <<"val5">>), - assertExactQuery(Clients, [], <<"field2_int">>, 5), - assertExactQuery(Clients, ?KEYS(6, 9), <<"field3_int">>, 5), - assertRangeQuery(Clients, ?KEYS(10, 18, N /= 11), <<"field1_bin">>, <<"val10">>, <<"val18">>), - assertRangeQuery(Clients, ?KEYS(10), <<"field1_bin">>, <<"val10">>, <<"val18">>, <<"10$">>), - assertRangeQuery(Clients, ?KEYS(10, 19, N /= 11), <<"field2_int">>, 10, 19), - assertRangeQuery(Clients, ?KEYS(10, 17, N /= 11), <<"$key">>, <<"obj10">>, <<"obj17">>), - assertRangeQuery(Clients, ?KEYS(12), <<"$key">>, <<"obj10">>, <<"obj17">>, <<"2">>), - - %% Verify the $key index, and riak_kv#367 regression - assertRangeQuery(Clients, ?KEYS(6), <<"$key">>, <<"obj06">>, <<"obj06">>), - assertRangeQuery(Clients, ?KEYS(6,7), <<"$key">>, <<"obj06">>, <<"obj07">>), - - %% Exercise sort set to true by default - SetResult2 = rpc:multicall(Nodes, application, set_env, - [riak_kv, secondary_index_sort_default, true]), - ?assertMatch({AOK, []}, SetResult2), - - assertExactQuery(Clients, ?KEYS(15, 19), - <<"field3_int">>, 15, {undefined, true}), - %% Keys ordered by val index term, since 2i order is {term, key} - KsVal = [A || {_, A} <- - lists:sort([{int_to_field1_bin(N), K(N)} || - N <- lists:seq(0, 20), N /= 11, N /= 5])], - assertRangeQuery(Clients, KsVal, - <<"field1_bin">>, <<"val0">>, <<"val9">>, undefined, {undefined, true}), - assertRangeQuery(Clients, ?KEYS(0, 20, N /= 11, N /= 5), - <<"field2_int">>, 0, 20, undefined, {undefined, true}), - assertRangeQuery(Clients, ?KEYS(0, 20, N /= 11, N /= 5), - <<"$key">>, <<"obj00">>, <<"obj20">>, undefined, {undefined, true}), - - %% Verify bignum sort order in sext -- eleveldb only (riak_kv#499) - TestIdxVal = 1362400142028, - put_an_object(PBC, TestIdxVal), - assertRangeQuery(Clients, - [<<"obj1362400142028">>], - <<"field2_int">>, - 1000000000000, - TestIdxVal), - - pass. - -assertExactQuery(Clients, Expected, Index, Value) -> - assertExactQuery(Clients, Expected, Index, Value, {false, false}), - assertExactQuery(Clients, Expected, Index, Value, {true, true}). - -assertExactQuery(Clients, Expected, Index, Value, Sorted) when is_list(Clients) -> - [assertExactQuery(C, Expected, Index, Value, Sorted) || C <- Clients]; -assertExactQuery({ClientType, Client}, Expected, Index, Value, - {Sort, ExpectSorted}) -> - lager:info("Searching Index ~p for ~p, sort: ~p ~p with client ~p", - [Index, Value, Sort, ExpectSorted, ClientType]), - {ok, ?INDEX_RESULTS{keys=Results}} = case ClientType of - pb -> - riakc_pb_socket:get_index_eq(Client, ?BUCKET, Index, Value, - [{pagination_sort, Sort} || Sort /= undefined]); - http -> - rhc:get_index(Client, ?BUCKET, Index, Value, [{pagination_sort, Sort}]) - end, - - ActualKeys = case ExpectSorted of - true -> Results; - _ -> lists:sort(Results) - end, - lager:info("Expected: ~p", [Expected]), - lager:info("Actual : ~p", [Results]), - lager:info("Sorted : ~p", [ActualKeys]), - ?assertEqual(Expected, ActualKeys). - -assertRangeQuery(Clients, Expected, Index, StartValue, EndValue) -> - assertRangeQuery(Clients, Expected, Index, StartValue, EndValue, undefined). - -assertRangeQuery(Clients, Expected, Index, StartValue, EndValue, Re) -> - assertRangeQuery(Clients, Expected, Index, StartValue, EndValue, Re, {false, false}), - assertRangeQuery(Clients, Expected, Index, StartValue, EndValue, Re, {true, true}). - -assertRangeQuery(Clients, Expected, Index, StartValue, EndValue, Re, Sort) when is_list(Clients) -> - [assertRangeQuery(C, Expected, Index, StartValue, EndValue, Re, Sort) || C <- Clients]; -assertRangeQuery({ClientType, Client}, Expected, Index, StartValue, EndValue, Re, - {Sort, ExpectSorted}) -> - lager:info("Searching Index ~p for ~p-~p re:~p, sort: ~p, ~p with ~p client", - [Index, StartValue, EndValue, Re, Sort, ExpectSorted, ClientType]), - {ok, ?INDEX_RESULTS{keys=Results}} = case ClientType of - pb -> - riakc_pb_socket:get_index_range(Client, ?BUCKET, Index, StartValue, EndValue, - [{term_regex, Re} || Re /= undefined] ++ - [{pagination_sort, Sort} || Sort /= undefined]); - http -> - rhc:get_index(Client, ?BUCKET, Index, {StartValue, EndValue}, - [{term_regex, Re} || Re /= undefined] ++ - [{pagination_sort, Sort}]) - end, - ActualKeys = case ExpectSorted of - true -> Results; - _ -> lists:sort(Results) - end, - lager:info("Expected: ~p", [Expected]), - lager:info("Actual : ~p", [Results]), - lager:info("Sorted : ~p", [ActualKeys]), - ?assertEqual(Expected, ActualKeys). - -%% general 2i utility -put_an_object(Pid, N) -> - Key = int_to_key(N), - Data = io_lib:format("data~p", [N]), - BinIndex = int_to_field1_bin(N), - Indexes = [{"field1_bin", BinIndex}, - {"field2_int", N}, - % every 5 items indexed together - {"field3_int", N - (N rem 5)} - ], - put_an_object(Pid, Key, Data, Indexes). - -put_an_object(Pid, Key, Data, Indexes) when is_list(Indexes) -> - lager:info("Putting object ~p", [Key]), - MetaData = dict:from_list([{<<"index">>, Indexes}]), - Robj0 = riakc_obj:new(?BUCKET, Key), - Robj1 = riakc_obj:update_value(Robj0, Data), - Robj2 = riakc_obj:update_metadata(Robj1, MetaData), - riakc_pb_socket:put(Pid, Robj2); -put_an_object(Pid, Key, IntIndex, BinIndex) when is_integer(IntIndex), is_binary(BinIndex) -> - put_an_object(Pid, Key, Key, [{"field1_bin", BinIndex},{"field2_int", IntIndex}]). - -int_to_key(N) -> - case N < 100 of - true -> - list_to_binary(io_lib:format("obj~2..0B", [N])); - _ -> - list_to_binary(io_lib:format("obj~p", [N])) - end. - -int_to_field1_bin(N) -> - list_to_binary(io_lib:format("val~p", [N])). - -stream_pb(Pid, Q) -> - pb_query(Pid, Q, [stream]), - stream_loop(). - -stream_pb(Pid, Q, Opts) -> - pb_query(Pid, Q, [stream|Opts]), - stream_loop(). - -stream_loop() -> - stream_loop(orddict:new()). - -stream_loop(Acc) -> - receive - {_Ref, {done, undefined}} -> - {ok, orddict:to_list(Acc)}; - {_Ref, {done, Continuation}} -> - {ok, orddict:store(continuation, Continuation, Acc)}; - {_Ref, ?INDEX_STREAM_RESULT{terms=undefined, keys=Keys}} -> - Acc2 = orddict:update(keys, fun(Existing) -> Existing++Keys end, Keys, Acc), - stream_loop(Acc2); - {_Ref, ?INDEX_STREAM_RESULT{terms=Results}} -> - Acc2 = orddict:update(results, fun(Existing) -> Existing++Results end, Results, Acc), - stream_loop(Acc2); - {_Ref, {error, <<"{error,timeout}">>}} -> - {error, timeout}; - {_Ref, Wat} -> - lager:info("got a wat ~p", [Wat]), - stream_loop(Acc) - end. - -pb_query(Pid, {Field, Val}, Opts) -> - riakc_pb_socket:get_index_eq(Pid, ?BUCKET, Field, Val, Opts); -pb_query(Pid, {Field, Start, End}, Opts) -> - riakc_pb_socket:get_index_range(Pid, ?BUCKET, Field, Start, End, Opts). - -http_stream(NodePath, Query, Opts) -> - http_query(NodePath, Query, [{stream, true} | Opts], stream). - -http_query(NodePath, Q) -> - http_query(NodePath, Q, []). - -http_query(NodePath, Query, Opts) -> - http_query(NodePath, Query, Opts, undefined). - -http_query(NodePath, {Field, Value}, Opts, Pid) -> - QString = opts_to_qstring(Opts, []), - Flag = case is_integer(Value) of true -> "w"; false -> "s" end, - Url = url("~s/buckets/~s/index/~s/~"++Flag++"~s", [NodePath, ?BUCKET, Field, Value, QString]), - http_get(Url, Pid); -http_query(NodePath, {Field, Start, End}, Opts, Pid) -> - QString = opts_to_qstring(Opts, []), - Flag = case is_integer(Start) of true -> "w"; false -> "s" end, - Url = url("~s/buckets/~s/index/~s/~"++Flag++"/~"++Flag++"~s", [NodePath, ?BUCKET, Field, Start, End, QString]), - http_get(Url, Pid). - -url(Format, Elements) -> - Path = io_lib:format(Format, Elements), - lists:flatten(Path). - -http_get(Url, undefined) -> - lager:info("getting ~p", [Url]), - {ok,{{"HTTP/1.1",200,"OK"}, _, Body}} = httpc:request(Url), - {struct, Result} = mochijson2:decode(Body), - Result; -http_get(Url, stream) -> - lager:info("streaming ~p", [Url]), - {ok, Ref} = httpc:request(get, {Url, []}, [], [{stream, self}, {sync, false}]), - start_http_stream(Ref). - -opts_to_qstring([], QString) -> - QString; -opts_to_qstring([Opt|Rest], []) -> - QOpt = opt_to_string("?", Opt), - opts_to_qstring(Rest, QOpt); -opts_to_qstring([Opt|Rest], QString) -> - QOpt = opt_to_string("&", Opt), - opts_to_qstring(Rest, QString++QOpt). - -opt_to_string(Sep, {Name, Value}) when is_integer(Value) -> - io_lib:format(Sep++"~s=~p", [Name, Value]); -opt_to_string(Sep, {Name, Value})-> - io_lib:format(Sep++"~s=~s", [Name, url_encode(Value)]); -opt_to_string(Sep, Name) -> - io_lib:format(Sep++"~s=~s", [Name, true]). - -url_encode(Val) when is_binary(Val) -> - url_encode(binary_to_list(Val)); -url_encode(Val) when is_atom(Val) -> - url_encode(atom_to_list(Val)); -url_encode(Val) -> - ibrowse_lib:url_encode(Val). - -start_http_stream(Ref) -> - receive - {http, {Ref, stream_start, Headers}} -> - Boundary = get_boundary(proplists:get_value("content-type", Headers)), - http_stream_loop(Ref, <<>>, Boundary); - Other -> lager:error("Unexpected message ~p", [Other]), - {error, unknown_message} - after 60000 -> - {error, timeout_local} - end. - -http_stream_loop(Ref, Acc, {Boundary, BLen}=B) -> - receive - {http, {Ref, stream, Chunk}} -> - http_stream_loop(Ref, <>, B); - {http, {Ref, stream_end, _Headers}} -> - Parts = binary:split(Acc,[ - <<"\r\n--", Boundary:BLen/bytes, "\r\nContent-Type: application/json\r\n\r\n">>, - <<"\r\n--", Boundary:BLen/bytes,"--\r\n">> - ], [global, trim]), - lists:foldl(fun(<<>>, Results) -> Results; - (Part, Results) -> - {struct, Result} = mochijson2:decode(Part), - orddict:merge(fun(_K, V1, V2) -> V1 ++ V2 end, - Results, Result) - end, [], Parts); - Other -> lager:error("Unexpected message ~p", [Other]), - {error, unknown_message} - after 60000 -> - {error, timeout_local} - end. - -get_boundary("multipart/mixed;boundary=" ++ Boundary) -> - B = list_to_binary(Boundary), - {B, byte_size(B)}; -get_boundary(_) -> - undefined. - diff --git a/tests/sibling_explosion.erl b/tests/sibling_explosion.erl deleted file mode 100644 index aee706c68..000000000 --- a/tests/sibling_explosion.erl +++ /dev/null @@ -1,90 +0,0 @@ --module(sibling_explosion). --include_lib("eunit/include/eunit.hrl"). --export([confirm/0]). --compile(export_all). - --define(B, <<"b">>). --define(K, <<"k">>). - -%% This tests provokes a sibling explosion. It does so with a single -%% node and a single client. All it does to acheive the explosion is -%% interleave fetch / resolve / writes. It works like this: -%% - The aim of the clients is to write a single set of intergers from 0-99 -%% - one client gets odds, the other evens -%% - The test interleaves writes from the clients -%% - Each client, in turn: -%% -- fetchs the key from riak -%% -- resolves the sibling values (by performing a set union on the values in all siblings) -%% -- adds a new value to the set -%% -- Puts the new value back to riak -%% This results in 99 siblings, each a subset of the following sibling [0] | [0, 1] | [0, 1, 2], [0, 1, 2, 3] etc -confirm() -> - Conf = [{riak_core, [{default_bucket_props, [{allow_mult, true}, - {dvv_enabled, true}]}]}], - [Node1] = rt:deploy_nodes(1, Conf), - N = 100, - - lager:info("Put new object in ~p via PBC.", [Node1]), - PB = rt:pbc(Node1), - - A0 = riakc_obj:new(<<"b">>, <<"k">>, sets:from_list([0])), - B0 = riakc_obj:new(<<"b">>, <<"k">>, sets:from_list([1])), - - _ = explode(PB, {A0, B0}, N), - - {ok, SibCheck1} = riakc_pb_socket:get(PB, <<"b">>, <<"k">>), - %% there should now be only two siblings - ?assertEqual(2, riakc_obj:value_count(SibCheck1)), - %% siblings should merge to include all writes - assert_sibling_values(riakc_obj:get_values(SibCheck1), N), - pass. - -%% Check that everywrite was wrote. -assert_sibling_values(Values, N) -> - V = resolve(Values, sets:new()), - L = lists:sort(sets:to_list(V)), - Expected = lists:seq(0, N-2), - ?assertEqual(Expected, L). - -%% Pick one of the two objects, and perform a fetch, resolve, update -%% cycle with it. The point is that the two object's writes are -%% interleaved. First A is updated, then B. Each object already has a -%% "latest" vclock returned from it's last put. This simulates the -%% case where a client fetches a vclock before PUT, but another write -%% lands at the vnode after the vclock is returned and before the next -%% PUT. Each PUT sees all but one write that Riak as seen, meaning -%% there is a perpetual race / sibling. Without DVV that means ALL -%% writes are added to the sibling set. With DVV, we correctly capture -%% the resolution of seen writes. -explode(_PB, {A, B}, 1) -> - {A, B}; -explode(PB, {A0, B}, Cnt) when Cnt rem 2 == 0 -> - A = resolve_mutate_store(PB, Cnt, A0), - explode(PB, {A, B}, Cnt-1); -explode(PB, {A, B0}, Cnt) when Cnt rem 2 /= 0 -> - B = resolve_mutate_store(PB, Cnt, B0), - explode(PB, {A, B}, Cnt-1). - -%% resolve the fetch, and put a new value -resolve_mutate_store(PB, N, Obj0) -> - Obj = resolve_update(Obj0, N), - {ok, Obj2} = riakc_pb_socket:put(PB, Obj, [return_body]), - Obj2. - -%% simply union the values, and add a new one for this operation -resolve_update(Obj, N) -> - case riakc_obj:get_values(Obj) of - [] -> Obj; - Values -> - Value0 = resolve(Values, sets:new()), - Value = sets:add_element(N, Value0), - lager:info("Storing ~p", [N]), - riakc_obj:update_metadata(riakc_obj:update_value(Obj, Value), dict:new()) - end. - -%% Set union on each value -resolve([], Acc) -> - Acc; -resolve([V0 | Rest], Acc) -> - V = binary_to_term(V0), - resolve(Rest, sets:union(V, Acc)). diff --git a/tests/test_cluster.erl b/tests/test_cluster.erl deleted file mode 100644 index abe34d661..000000000 --- a/tests/test_cluster.erl +++ /dev/null @@ -1,29 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(test_cluster). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - Config = [{riak_search, [{enabled, true}]}], - rt:build_cluster(4, Config), - ?assert(false), - fail. \ No newline at end of file diff --git a/tests/verify_2i_aae.erl b/tests/verify_2i_aae.erl deleted file mode 100644 index 1ea4c0e2d..000000000 --- a/tests/verify_2i_aae.erl +++ /dev/null @@ -1,228 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_2i_aae). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - -%% Make it multi-backend compatible. --define(BUCKETS, [<<"eleveldb1">>, <<"memory1">>]). --define(NUM_ITEMS, 1000). --define(NUM_DELETES, 100). --define(SCAN_BATCH_SIZE, 100). --define(N_VAL, 3). - -confirm() -> - [Node1] = rt:build_cluster(1, - [{riak_kv, - [{anti_entropy, {off, []}}, - {anti_entropy_build_limit, {100, 500}}, - {anti_entropy_concurrency, 100}, - {anti_entropy_tick, 200}]}]), - rt_intercept:load_code(Node1), - rt_intercept:add(Node1, - {riak_object, - [{{index_specs, 1}, skippable_index_specs}, - {{diff_index_specs, 2}, skippable_diff_index_specs}]}), - lager:info("Installed intercepts to corrupt index specs on node ~p", [Node1]), - %%rpc:call(Node1, lager, set_loglevel, [lager_console_backend, debug]), - PBC = rt:pbc(Node1), - NumItems = ?NUM_ITEMS, - NumDel = ?NUM_DELETES, - pass = check_lost_objects(Node1, PBC, NumItems, NumDel), - pass = check_lost_indexes(Node1, PBC, NumItems), - pass = check_kill_repair(Node1), - lager:info("Et voila"), - riakc_pb_socket:stop(PBC), - pass. - -%% Write objects with a 2i index. Modify/delete the objects without updating -%% the 2i index. Test that running 2i repair corrects the 2i indexes. -check_lost_objects(Node1, PBC, NumItems, NumDel) -> - Index = {integer_index, "i"}, - set_skip_index_specs(Node1, false), - lager:info("Putting ~p objects with indexes", [NumItems]), - HalfNumItems = NumItems div 2, - [put_obj(PBC, Bucket, N, N+1, Index) || N <- lists:seq(1, HalfNumItems), - Bucket <- ?BUCKETS], - lager:info("Put half the objects, now enable AAE and build tress"), - %% Enable AAE and build trees. - ok = rpc:call(Node1, application, set_env, - [riak_kv, anti_entropy, {on, [debug]}]), - ok = rpc:call(Node1, riak_kv_entropy_manager, enable, []), - rt:wait_until_aae_trees_built([Node1]), - - lager:info("AAE trees built, now put the rest of the data"), - [put_obj(PBC, Bucket, N, N+1, Index) - || N <- lists:seq(HalfNumItems+1, NumItems), Bucket <- ?BUCKETS], - %% Verify they are there. - ExpectedInitial = [{to_key(N+1), to_key(N)} || N <- lists:seq(1, NumItems)], - lager:info("Check objects are there as expected"), - [assert_range_query(PBC, Bucket, ExpectedInitial, Index, 1, NumItems+1) - || Bucket <- ?BUCKETS], - - lager:info("Now mess index spec code and change values"), - set_skip_index_specs(Node1, true), - [put_obj(PBC, Bucket, N, N, Index) || N <- lists:seq(1, NumItems-NumDel), - Bucket <- ?BUCKETS], - DelRange = lists:seq(NumItems-NumDel+1, NumItems), - lager:info("Deleting ~b objects without updating indexes", [NumDel]), - [del_obj(PBC, Bucket, N) || N <- DelRange, Bucket <- ?BUCKETS], - DelKeys = [to_key(N) || N <- DelRange], - [rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, DelKeys) end) - || Bucket <- ?BUCKETS], - %% Verify they are damaged - lager:info("Verify change did not take, needs repair"), - [assert_range_query(PBC, Bucket, ExpectedInitial, Index, 1, NumItems+1) - || Bucket <- ?BUCKETS], - set_skip_index_specs(Node1, false), - run_2i_repair(Node1), - lager:info("Now verify that previous changes are visible after repair"), - ExpectedFinal = [{to_key(N), to_key(N)} || N <- lists:seq(1, NumItems-NumDel)], - [assert_range_query(PBC, Bucket, ExpectedFinal, Index, 1, NumItems+1) - || Bucket <- ?BUCKETS], - pass. - -do_tree_rebuild(Node) -> - lager:info("Let's go through a tree rebuild right here"), - %% Cheat by clearing build times from ETS directly, as the code doesn't - %% ever clear them currently. - ?assertEqual(true, rpc:call(Node, ets, delete_all_objects, [ets_riak_kv_entropy])), - %% Make it so it doesn't go wild rebuilding things when the expiration is - %% tiny. - ?assertEqual(ok, rpc:call(Node, application, set_env, [riak_kv, - anti_entropy_build_limit, - {0, 5000}])), - %% Make any tree expire on tick. - ?assertEqual(ok, rpc:call(Node, application, set_env, [riak_kv, - anti_entropy_expire, - 1])), - %% Wait for a good number of ticks. - timer:sleep(5000), - %% Make sure things stop expiring on tick - ?assertEqual(ok, rpc:call(Node, application, set_env, [riak_kv, - anti_entropy_expire, - 7 * 24 * 60 * 60 * 1000])), - %% And let the manager start allowing builds again. - ?assertEqual(ok, rpc:call(Node, application, set_env, [riak_kv, - anti_entropy_build_limit, - {100, 1000}])), - rt:wait_until_aae_trees_built([Node]), - ok. - -%% Write objects without a 2i index. Test that running 2i repair will generate -%% the missing indexes. -check_lost_indexes(Node1, PBC, NumItems) -> - set_skip_index_specs(Node1, true), - Index = {integer_index, "ii"}, - lager:info("Writing ~b objects without index", [NumItems]), - [put_obj(PBC, Bucket, N, N+1, Index) || Bucket <- ?BUCKETS, - N <- lists:seq(1, NumItems)], - lager:info("Verify that objects cannot be found via index"), - [assert_range_query(PBC, Bucket, [], Index, 1, NumItems+1) - || Bucket <- ?BUCKETS], - do_tree_rebuild(Node1), - run_2i_repair(Node1), - lager:info("Check that objects can now be found via index"), - Expected = [{to_key(N+1), to_key(N)} || N <- lists:seq(1, NumItems)], - [assert_range_query(PBC, Bucket, Expected, Index, 1, NumItems+1) - || Bucket <- ?BUCKETS], - pass. - -check_kill_repair(Node1) -> - lager:info("Test that killing 2i repair works as desired"), - spawn(fun() -> - timer:sleep(1500), - rt:admin(Node1, ["repair-2i", "kill"]) - end), - ExitStatus = run_2i_repair(Node1), - case ExitStatus of - normal -> - lager:info("Shucks. Repair finished before we could kill it"); - killed -> - lager:info("Repair was forcibly killed"); - user_request -> - lager:info("Repair exited gracefully, we should be able to " - "trigger another repair immediately"), - normal = run_2i_repair(Node1) - end, - pass. - -run_2i_repair(Node1) -> - lager:info("Run 2i AAE repair"), - ?assertMatch({ok, _}, rt:admin(Node1, ["repair-2i"])), - RepairPid = rpc:call(Node1, erlang, whereis, [riak_kv_2i_aae]), - lager:info("Wait for repair process to finish"), - Mon = monitor(process, RepairPid), - MaxWaitTime = rt_config:get(rt_max_wait_time), - receive - {'DOWN', Mon, _, _, Status} -> - lager:info("Status: ~p", [Status]), - Status - after - MaxWaitTime -> - lager:error("Timed out (~pms) waiting for 2i AAE repair process", [MaxWaitTime]), - ?assertEqual(aae_2i_repair_complete, aae_2i_repair_timeout) - end. - -set_skip_index_specs(Node, Val) -> - ok = rpc:call(Node, application, set_env, - [riak_kv, skip_index_specs, Val]). - -to_key(N) -> - list_to_binary(integer_to_list(N)). - -put_obj(PBC, Bucket, N, IN, Index) -> - K = to_key(N), - Obj = - case riakc_pb_socket:get(PBC, Bucket, K) of - {ok, ExistingObj} -> - ExistingObj; - _ -> - riakc_obj:new(Bucket, K, K) - end, - MD = riakc_obj:get_metadata(Obj), - MD2 = riakc_obj:set_secondary_index(MD, {Index, [IN]}), - Obj2 = riakc_obj:update_metadata(Obj, MD2), - riakc_pb_socket:put(PBC, Obj2, [{dw, ?N_VAL}]). - -del_obj(PBC, Bucket, N) -> - K = to_key(N), - case riakc_pb_socket:get(PBC, Bucket, K) of - {ok, ExistingObj} -> - ?assertMatch(ok, riakc_pb_socket:delete_obj(PBC, ExistingObj)); - _ -> - ?assertMatch(ok, riakc_pb_socket:delete(PBC, Bucket, K)) - end. - - -assert_range_query(Pid, Bucket, Expected0, Index, StartValue, EndValue) -> - lager:info("Searching Index ~p/~p for ~p-~p", [Bucket, Index, StartValue, EndValue]), - {ok, ?INDEX_RESULTS{terms=Keys}} = riakc_pb_socket:get_index_range(Pid, Bucket, Index, StartValue, EndValue, [{return_terms, true}]), - Actual = case Keys of - undefined -> - []; - _ -> - lists:sort(Keys) - end, - Expected = lists:sort(Expected0), - ?assertEqual({Bucket, Expected}, {Bucket, Actual}), - lager:info("Yay! ~b (actual) == ~b (expected)", [length(Actual), length(Expected)]). diff --git a/tests/verify_2i_limit.erl b/tests/verify_2i_limit.erl deleted file mode 100644 index 944b371d4..000000000 --- a/tests/verify_2i_limit.erl +++ /dev/null @@ -1,126 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_2i_limit). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). --import(secondary_index_tests, [put_an_object/2, put_an_object/4, int_to_key/1, - stream_pb/3, http_query/3, pb_query/3]). --define(BUCKET, <<"2ibucket">>). --define(FOO, <<"foo">>). --define(MAX_RESULTS, 50). - -confirm() -> - inets:start(), - - Nodes = rt:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - - RiakHttp = rt:httpc(hd(Nodes)), - HttpUrl = rt:http_url(hd(Nodes)), - PBPid = rt:pbc(hd(Nodes)), - - [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], - - ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 100)]), - {FirstHalf, Rest} = lists:split(?MAX_RESULTS, ExpectedKeys), - Q = {<<"$key">>, int_to_key(0), int_to_key(999)}, - - %% PB - {ok, PBRes} = stream_pb(PBPid, Q, [{max_results, ?MAX_RESULTS}]), - ?assertEqual(FirstHalf, proplists:get_value(keys, PBRes, [])), - PBContinuation = proplists:get_value(continuation, PBRes), - - {ok, PBKeys2} = stream_pb(PBPid, Q, [{continuation, PBContinuation}]), - ?assertEqual(Rest, proplists:get_value(keys, PBKeys2, [])), - - %% HTTP - HttpRes = rhc:get_index(RiakHttp, ?BUCKET, <<"$key">>, - {int_to_key(0), int_to_key(999)}, - [{max_results, ?MAX_RESULTS}]), - ?assertMatch({ok, ?INDEX_RESULTS{}}, HttpRes), - {ok, ?INDEX_RESULTS{keys=HttpResKeys, - continuation=HttpContinuation}} = HttpRes, - ?assertEqual(FirstHalf, HttpResKeys), - ?assertEqual(PBContinuation, HttpContinuation), - - HttpRes2 = rhc:get_index(RiakHttp, ?BUCKET, <<"$key">>, - {int_to_key(0), int_to_key(999)}, - [{continuation, HttpContinuation}]), - ?assertMatch({ok, ?INDEX_RESULTS{}}, HttpRes2), - {ok, ?INDEX_RESULTS{keys=HttpRes2Keys}} = HttpRes2, - ?assertEqual(Rest, HttpRes2Keys), - - %% Multiple indexes for single key - O1 = riakc_obj:new(?BUCKET, <<"bob">>, <<"1">>), - Md = riakc_obj:get_metadata(O1), - Md2 = riakc_obj:set_secondary_index(Md, {{integer_index, "i1"}, [300, 301, 302]}), - O2 = riakc_obj:update_metadata(O1, Md2), - riakc_pb_socket:put(PBPid, O2), - - MQ = {"i1_int", 300, 302}, - {ok, ?INDEX_RESULTS{terms=Terms, continuation=RTContinuation}} = pb_query(PBPid, MQ, [{max_results, 2}, return_terms]), - - ?assertEqual([{<<"300">>, <<"bob">>}, - {<<"301">>, <<"bob">>}], Terms), - - {ok, ?INDEX_RESULTS{terms=Terms2}} = pb_query(PBPid, MQ, [{max_results, 2}, return_terms, - {continuation, RTContinuation}]), - - ?assertEqual([{<<"302">>,<<"bob">>}], Terms2), - - %% gh611 - equals query pagination - riakc_pb_socket:delete(PBPid, ?BUCKET, <<"bob">>), - rt:wait_until(fun() -> rt:pbc_really_deleted(PBPid, ?BUCKET, [<<"bob">>]) end), - - [put_an_object(PBPid, int_to_key(N), 1000, <<"myval">>) || N <- lists:seq(0, 100)], - - [ verify_eq_pag(PBPid, HttpUrl, EqualsQuery, FirstHalf, Rest) || - EqualsQuery <- [{"field1_bin", <<"myval">>}, - {"field2_int", 1000}, - {"$bucket", ?BUCKET}]], - - riakc_pb_socket:stop(PBPid), - pass. - -verify_eq_pag(PBPid, RiakHttp, EqualsQuery, FirstHalf, Rest) -> - HTTPEqs = http_query(RiakHttp, EqualsQuery, [{max_results, ?MAX_RESULTS}]), - ?assertEqual({EqualsQuery, FirstHalf}, - {EqualsQuery, proplists:get_value(<<"keys">>, HTTPEqs, [])}), - EqualsHttpContinuation = proplists:get_value(<<"continuation">>, HTTPEqs), - - HTTPEqs2 = http_query(RiakHttp, EqualsQuery, - [{continuation, EqualsHttpContinuation}]), - ?assertEqual({EqualsQuery, Rest}, - {EqualsQuery, proplists:get_value(<<"keys">>, HTTPEqs2, [])}), - - %% And PB - - {ok, EqPBRes} = stream_pb(PBPid, EqualsQuery, - [{max_results, ?MAX_RESULTS}]), - ?assertEqual({EqualsQuery, FirstHalf}, - {EqualsQuery, proplists:get_value(keys, EqPBRes, [])}), - EqPBContinuation = proplists:get_value(continuation, EqPBRes), - - {ok, EqPBKeys2} = stream_pb(PBPid, EqualsQuery, - [{continuation, EqPBContinuation}]), - ?assertEqual({EqualsQuery, Rest}, - {EqualsQuery, proplists:get_value(keys, EqPBKeys2, [])}). diff --git a/tests/verify_2i_mixed_cluster.erl b/tests/verify_2i_mixed_cluster.erl deleted file mode 100644 index 02369948b..000000000 --- a/tests/verify_2i_mixed_cluster.erl +++ /dev/null @@ -1,75 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_2i_mixed_cluster). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --import(secondary_index_tests, [put_an_object/2]). --define(BUCKET, <<"2ibucket">>). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - Nodes = - [CurrentNode, OldNode1, _] = - rt:build_cluster([{current, - [{riak_kv, [{anti_entropy, {off, []}}]}]}, - OldVsn, OldVsn]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - - PBC1 = rt:pbc(CurrentNode), - PBC2 = rt:pbc(OldNode1), - HTTPC1 = rt:httpc(CurrentNode), - - Clients = [{pb, PBC1}, {pb, PBC2}, {http, HTTPC1}], - - [put_an_object(PBC1, N) || N <- lists:seq(0, 20)], - - K = fun secondary_index_tests:int_to_key/1, - - assertExactQuery(Clients, [K(5)], <<"field1_bin">>, <<"val5">>), - assertExactQuery(Clients, [K(5)], <<"field2_int">>, 5), - assertExactQuery(Clients, [K(N) || N <- lists:seq(5, 9)], <<"field3_int">>, 5), - assertRangeQuery(Clients, [K(N) || N <- lists:seq(10, 18)], <<"field1_bin">>, <<"val10">>, <<"val18">>), - assertRangeQuery(Clients, [K(N) || N <- lists:seq(10, 19)], <<"field2_int">>, 10, 19), - assertRangeQuery(Clients, [K(N) || N <- lists:seq(10, 17)], <<"$key">>, <<"obj10">>, <<"obj17">>), - - lager:info("Delete an object, verify deletion..."), - ToDel = [<<"obj05">>, <<"obj11">>], - [?assertMatch(ok, riakc_pb_socket:delete(PBC1, ?BUCKET, KD)) || KD <- ToDel], - lager:info("Make sure the tombstone is reaped..."), - ?assertMatch(ok, rt:wait_until(fun() -> rt:pbc_really_deleted(PBC1, ?BUCKET, ToDel) end)), - - assertExactQuery(Clients, [], <<"field1_bin">>, <<"val5">>), - assertExactQuery(Clients, [], <<"field2_int">>, 5), - assertExactQuery(Clients, [K(N) || N <- lists:seq(6, 9)], <<"field3_int">>, 5), - assertRangeQuery(Clients, [K(N) || N <- lists:seq(10, 18), N /= 11], <<"field1_bin">>, <<"val10">>, <<"val18">>), - assertRangeQuery(Clients, [K(N) || N <- lists:seq(10, 19), N /= 11], <<"field2_int">>, 10, 19), - assertRangeQuery(Clients, [K(N) || N <- lists:seq(10, 17), N /= 11], <<"$key">>, <<"obj10">>, <<"obj17">>), - - pass. - -assertExactQuery(C, K, F, V) -> - secondary_index_tests:assertExactQuery(C, K, F, V, {false, false}). - -assertRangeQuery(C, K, F, V1, V2) -> - secondary_index_tests:assertRangeQuery(C, K, F, V1, V2, undefined, {false, false}). diff --git a/tests/verify_2i_returnterms.erl b/tests/verify_2i_returnterms.erl deleted file mode 100644 index 7a9f50ee4..000000000 --- a/tests/verify_2i_returnterms.erl +++ /dev/null @@ -1,76 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_2i_returnterms). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --import(secondary_index_tests, [put_an_object/2, put_an_object/4, int_to_key/1, - stream_pb/3, http_query/3]). --define(BUCKET, <<"2ibucket">>). --define(FOO, <<"foo">>). --define(Q_OPTS, [{return_terms, true}]). - -confirm() -> - inets:start(), - - Nodes = rt:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - - RiakHttp = rt:http_url(hd(Nodes)), - PBPid = rt:pbc(hd(Nodes)), - - [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], - [put_an_object(PBPid, int_to_key(N), N, ?FOO) || N <- lists:seq(101, 200)], - - %% Bucket, key, and index_eq queries should ignore `return_terms' - ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 200)]), - assertEqual(RiakHttp, PBPid, ExpectedKeys, {<<"$key">>, int_to_key(0), int_to_key(999)}, ?Q_OPTS, keys), - assertEqual(RiakHttp, PBPid, ExpectedKeys, { <<"$bucket">>, ?BUCKET}, ?Q_OPTS, keys), - - ExpectedFooKeys = lists:sort([int_to_key(N) || N <- lists:seq(101, 200)]), - assertEqual(RiakHttp, PBPid, ExpectedFooKeys, {<<"field1_bin">>, ?FOO}, ?Q_OPTS, keys), - - ExpectedRangeResults = lists:sort([{list_to_binary(integer_to_list(N)), int_to_key(N)} || N <- lists:seq(1, 100)]), - assertEqual(RiakHttp, PBPid, ExpectedRangeResults, {<<"field2_int">>, "1", "100"}, ?Q_OPTS, results), - - riakc_pb_socket:stop(PBPid), - pass. - -%% Check the PB result against our expectations -%% and the non-streamed HTTP -assertEqual(Http, PB, Expected, Query, Opts, ResultKey) -> - {ok, PBRes} = stream_pb(PB, Query, Opts), - PBKeys = proplists:get_value(ResultKey, PBRes, []), - HTTPRes = http_query(Http, Query, Opts), - HTTPResults0 = proplists:get_value(atom_to_binary(ResultKey, latin1), HTTPRes, []), - HTTPResults = decode_http_results(ResultKey, HTTPResults0), - ?assertEqual(Expected, lists:sort(PBKeys)), - ?assertEqual(Expected, lists:sort(HTTPResults)). - -decode_http_results(keys, Keys) -> - Keys; -decode_http_results(results, Results) -> - decode_http_results(Results, []); - -decode_http_results([], Acc) -> - lists:reverse(Acc); -decode_http_results([{struct, [Res]} | Rest ], Acc) -> - decode_http_results(Rest, [Res | Acc]). - diff --git a/tests/verify_2i_stream.erl b/tests/verify_2i_stream.erl deleted file mode 100644 index 32aff6939..000000000 --- a/tests/verify_2i_stream.erl +++ /dev/null @@ -1,68 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_2i_stream). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --import(secondary_index_tests, [put_an_object/2, put_an_object/4, int_to_key/1, - stream_pb/2, http_query/2, http_stream/3]). --define(BUCKET, <<"2ibucket">>). --define(FOO, <<"foo">>). - -confirm() -> - inets:start(), - - Nodes = rt:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - - RiakHttp = rt:http_url(hd(Nodes)), - PBPid = rt:pbc(hd(Nodes)), - - [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], - [put_an_object(PBPid, int_to_key(N), N, ?FOO) || N <- lists:seq(101, 200)], - - ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 200)]), - assertEqual(RiakHttp, PBPid, ExpectedKeys, {<<"$key">>, int_to_key(0), int_to_key(999)}), - assertEqual(RiakHttp, PBPid, ExpectedKeys, { <<"$bucket">>, ?BUCKET}), - - ExpectedFooKeys = lists:sort([int_to_key(N) || N <- lists:seq(101, 200)]), - assertEqual(RiakHttp, PBPid, ExpectedFooKeys, {<<"field1_bin">>, ?FOO}), - - %% Note: not sorted by key, but by value (the int index) - ExpectedRangeKeys = [int_to_key(N) || N <- lists:seq(1, 100)], - assertEqual(RiakHttp, PBPid, ExpectedRangeKeys, {<<"field2_int">>, "1", "100"}), - - riakc_pb_socket:stop(PBPid), - pass. - -%% Check the PB result against our expectations -%% and the non-streamed HTTP -assertEqual(Http, PB, Expected0, Query) -> - {ok, PBRes} = stream_pb(PB, Query), - PBKeys = proplists:get_value(keys, PBRes, []), - HTTPRes = http_query(Http, Query), - StreamHTTPRes = http_stream(Http, Query, []), - HTTPKeys = proplists:get_value(<<"keys">>, HTTPRes, []), - StreamHttpKeys = proplists:get_value(<<"keys">>, StreamHTTPRes, []), - Expected = lists:sort(Expected0), - ?assertEqual(Expected, lists:sort(PBKeys)), - ?assertEqual(Expected, lists:sort(HTTPKeys)), - ?assertEqual(Expected, lists:sort(StreamHttpKeys)). - diff --git a/tests/verify_2i_timeout.erl b/tests/verify_2i_timeout.erl deleted file mode 100644 index d913b5631..000000000 --- a/tests/verify_2i_timeout.erl +++ /dev/null @@ -1,69 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_2i_timeout). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --import(secondary_index_tests, [put_an_object/2, put_an_object/4, int_to_key/1, - stream_pb/3, url/2, http_query/3, http_stream/3]). --define(BUCKET, <<"2ibucket">>). --define(FOO, <<"foo">>). - -confirm() -> - inets:start(), - Config = [{riak_kv, [{secondary_index_timeout, 1}]}], %% ludicrously short, should fail always - Nodes = rt:build_cluster([{current, Config}, {current, Config}, {current, Config}]), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - - PBPid = rt:pbc(hd(Nodes)), - Http = rt:http_url(hd(Nodes)), - - [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], - [put_an_object(PBPid, int_to_key(N), N, ?FOO) || N <- lists:seq(101, 200)], - - ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 200)]), - Query = {<<"$bucket">>, ?BUCKET}, - %% Verifies that the app.config param was used - ?assertEqual({error, timeout}, stream_pb(PBPid, Query, [])), - - %% Override app.config - {ok, Res} = stream_pb(PBPid, Query, [{timeout, 5000}]), - ?assertEqual(ExpectedKeys, lists:sort(proplists:get_value(keys, Res, []))), - - {ok, {{_, ErrCode, _}, _, Body}} = httpc:request(url("~s/buckets/~s/index/~s/~s~s", - [Http, ?BUCKET, <<"$bucket">>, ?BUCKET, []])), - - ?assertEqual(true, ErrCode >= 500), - ?assertMatch({match, _}, re:run(Body, "request timed out|{error,timeout}")), %% shows the app.config timeout - - HttpRes = http_query(Http, Query, [{timeout, 5000}]), - ?assertEqual(ExpectedKeys, lists:sort(proplists:get_value(<<"keys">>, HttpRes, []))), - - stream_http(Http, Query, ExpectedKeys), - - riakc_pb_socket:stop(PBPid), - pass. - -stream_http(Http, Query, ExpectedKeys) -> - Res = http_stream(Http, Query, []), - ?assert(lists:member({<<"error">>,<<"timeout">>}, Res)), - Res2 = http_stream(Http, Query, [{timeout, 5000}]), - ?assertEqual(ExpectedKeys, lists:sort(proplists:get_value(<<"keys">>, Res2, []))). - diff --git a/tests/verify_aae.erl b/tests/verify_aae.erl deleted file mode 100644 index 1d5fa2dda..000000000 --- a/tests/verify_aae.erl +++ /dev/null @@ -1,304 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc Verification of Active Anti Entropy. -%% The basic guarantee of AAE is this: Even without the read repairs that will -%% happen when data is accessed, inconsistencies between the replicas of a -%% KV object will be repaired eventually. The test tries hard not to -%% explicitly check for when the AAE trees are built or when exchanges are run -%% in an effort to remain decoupled from the implementation. Instead, it -%% simply configures AAE to build/rebuild and run exchanges between the data -%% partitions. It then performs direct vnode reads on all replicas and verify -%% they eventually match. -%% -%% Data recovery after the following scenarios is tested: -%% -%% - Data for a partition completely disappears. -%% - Less than N replicas are written -%% - Less than N replicas are updated -%% -%% Also, a sanity check is done to make sure AAE repairs go away eventually -%% if there is no activity. That was an actual early AAE bug. - --module(verify_aae). --export([confirm/0, verify_aae/1, test_single_partition_loss/3]). --include_lib("eunit/include/eunit.hrl"). - -% I would hope this would come from the testing framework some day -% to use the test in small and large scenarios. --define(DEFAULT_RING_SIZE, 8). --define(CFG, - [{riak_kv, - [ - % Speedy AAE configuration - {anti_entropy, {on, []}}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100}, - {anti_entropy_expire, 24 * 60 * 60 * 1000}, % Not for now! - {anti_entropy_tick, 500} - ]}, - {riak_core, - [ - {ring_creation_size, ?DEFAULT_RING_SIZE} - ]}] - ). --define(NUM_NODES, 1). --define(NUM_KEYS, 1000). --define(BUCKET, <<"test_bucket">>). --define(N_VAL, 3). - -confirm() -> - Nodes = rt:build_cluster(?NUM_NODES, ?CFG), - verify_aae(Nodes), - pass. - -verify_aae(Nodes) -> - Node1 = hd(Nodes), - % First, recovery without tree rebuilds - - % Test recovery from to few replicas written - KV1 = test_data(1, 1000), - test_less_than_n_writes(Node1, KV1), - - % Test recovery when replicas are different - KV2 = [{K, <>} || {K, V} <- KV1], - test_less_than_n_mods(Node1, KV2), - - lager:info("Run similar tests now with tree rebuilds enabled"), - start_tree_rebuilds(Nodes), - - % Test recovery from to few replicas written - KV3 = test_data(1001, 2000), - test_less_than_n_writes(Node1, KV3), - - % Test recovery when replicas are different - KV4 = [{K, <>} || {K, V} <- KV3], - test_less_than_n_mods(Node1, KV4), - - lager:info("Writing 1000 objects"), - KV5 = test_data(2001, 3000), - write_data(Node1, KV5), - - % Test recovery from single partition loss. - {PNuke, NNuke} = choose_partition_to_nuke(Node1, ?BUCKET, KV5), - test_single_partition_loss(NNuke, PNuke, KV5), - - % Test recovery from losing AAE data - test_aae_partition_loss(NNuke, PNuke, KV5), - - % Test recovery from losing both AAE and KV data - test_total_partition_loss(NNuke, PNuke, KV5), - - % Make sure AAE repairs die down. - wait_until_no_aae_repairs(Nodes), - - lager:info("Finished verifying AAE magic"), - ok. - -start_tree_rebuilds(Nodes) -> - rpc:multicall(Nodes, application, set_env, [riak_kv, anti_entropy_expire, - 15 * 1000]). - -acc_preflists(Pl, PlCounts) -> - lists:foldl(fun(Idx, D) -> - dict:update(Idx, fun(V) -> V+1 end, 0, D) - end, PlCounts, Pl). - -choose_partition_to_nuke(Node, Bucket, KVs) -> - Preflists = [get_preflist(Node, Bucket, K) || {K, _} <- KVs], - PCounts = lists:foldl(fun acc_preflists/2, dict:new(), Preflists), - CPs = [{C, P} || {P, C} <- dict:to_list(PCounts)], - {_, MaxP} = lists:max(CPs), - MaxP. - -get_preflist(Node, B, K) -> - DocIdx = rpc:call(Node, riak_core_util, chash_key, [{B, K}]), - PlTagged = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, ?N_VAL, riak_kv]), - Pl = [E || {E, primary} <- PlTagged], - Pl. - -to_key(N) -> - list_to_binary(io_lib:format("K~4..0B", [N])). - -test_data(Start, End) -> - Keys = [to_key(N) || N <- lists:seq(Start, End)], - [{K, K} || K <- Keys]. - -write_data(Node, KVs) -> - write_data(Node, KVs, []). - -write_data(Node, KVs, Opts) -> - PB = rt:pbc(Node), - [begin - O = - case riakc_pb_socket:get(PB, ?BUCKET, K) of - {ok, Prev} -> - riakc_obj:update_value(Prev, V); - _ -> - riakc_obj:new(?BUCKET, K, V) - end, - ?assertMatch(ok, riakc_pb_socket:put(PB, O, Opts)) - end || {K, V} <- KVs], - riakc_pb_socket:stop(PB), - ok. - -% @doc Verifies that the data is eventually restored to the expected set. -verify_data(Node, KeyValues) -> - lager:info("Verify all replicas are eventually correct"), - PB = rt:pbc(Node), - CheckFun = - fun() -> - Matches = [verify_replicas(Node, ?BUCKET, K, V, ?N_VAL) - || {K, V} <- KeyValues], - CountTrues = fun(true, G) -> G+1; (false, G) -> G end, - NumGood = lists:foldl(CountTrues, 0, Matches), - Num = length(KeyValues), - case Num == NumGood of - true -> true; - false -> - lager:info("Data not yet correct: ~p mismatches", - [Num-NumGood]), - false - end - end, - MaxTime = rt_config:get(rt_max_wait_time), - Delay = 2000, % every two seconds until max time. - Retry = MaxTime div Delay, - case rt:wait_until(CheckFun, Retry, Delay) of - ok -> - lager:info("Data is now correct. Yay!"); - fail -> - lager:error("AAE failed to fix data"), - ?assertEqual(aae_fixed_data, aae_failed_to_fix_data) - end, - riakc_pb_socket:stop(PB), - ok. - -merge_values(O) -> - Vals = riak_object:get_values(O), - lists:foldl(fun(NV, V) -> - case size(NV) > size(V) of - true -> NV; - _ -> V - end - end, <<>>, Vals). - -verify_replicas(Node, B, K, V, N) -> - Replies = [rt:get_replica(Node, B, K, I, N) - || I <- lists:seq(1,N)], - Vals = [merge_values(O) || {ok, O} <- Replies], - Expected = [V || _ <- lists:seq(1, N)], - Vals == Expected. - -test_single_partition_loss(Node, Partition, KeyValues) - when is_atom(Node), is_integer(Partition) -> - lager:info("Verify recovery from the loss of partition ~p", [Partition]), - wipe_out_partition(Node, Partition), - restart_vnode(Node, riak_kv, Partition), - verify_data(Node, KeyValues). - -test_aae_partition_loss(Node, Partition, KeyValues) - when is_atom(Node), is_integer(Partition) -> - lager:info("Verify recovery from the loss of AAE data for partition ~p", [Partition]), - wipe_out_aae_data(Node, Partition), - restart_vnode(Node, riak_kv, Partition), - verify_data(Node, KeyValues). - -test_total_partition_loss(Node, Partition, KeyValues) - when is_atom(Node), is_integer(Partition) -> - lager:info("Verify recovery from the loss of AAE and KV data for partition ~p", [Partition]), - wipe_out_partition(Node, Partition), - wipe_out_aae_data(Node, Partition), - restart_vnode(Node, riak_kv, Partition), - verify_data(Node, KeyValues). - -test_less_than_n_writes(Node, KeyValues) -> - lager:info("Writing ~p objects with N=1, AAE should ensure they end up" - " with ~p replicas", [length(KeyValues), ?N_VAL]), - write_data(Node, KeyValues, [{n_val, 1}]), - verify_data(Node, KeyValues). - -test_less_than_n_mods(Node, KeyValues) -> - lager:info("Modifying only one replica for ~p objects. AAE should ensure" - " all replicas end up modified", [length(KeyValues)]), - write_data(Node, KeyValues, [{n_val, 1}]), - verify_data(Node, KeyValues). - -wipe_out_partition(Node, Partition) -> - lager:info("Wiping out partition ~p in node ~p", [Partition, Node]), - rt:clean_data_dir(Node, dir_for_partition(Partition)), - ok. - -wipe_out_aae_data(Node, Partition) -> - lager:info("Wiping out AAE data for partition ~p in node ~p", [Partition, Node]), - rt:clean_data_dir(Node, "anti_entropy/"++integer_to_list(Partition)), - ok. - -base_dir_for_backend(undefined) -> - base_dir_for_backend(bitcask); -base_dir_for_backend(bitcask) -> - "bitcask"; -base_dir_for_backend(eleveldb) -> - "leveldb". - -restart_vnode(Node, Service, Partition) -> - VNodeName = list_to_atom(atom_to_list(Service) ++ "_vnode"), - {ok, Pid} = rpc:call(Node, riak_core_vnode_manager, get_vnode_pid, - [Partition, VNodeName]), - ?assert(rpc:call(Node, erlang, exit, [Pid, kill_for_test])), - Mon = monitor(process, Pid), - receive - {'DOWN', Mon, _, _, _} -> - ok - after - rt_config:get(rt_max_wait_time) -> - lager:error("VNode for partition ~p did not die, the bastard", - [Partition]), - ?assertEqual(vnode_killed, {failed_to_kill_vnode, Partition}) - end, - {ok, NewPid} = rpc:call(Node, riak_core_vnode_manager, get_vnode_pid, - [Partition, VNodeName]), - lager:info("Vnode for partition ~p restarted as ~p", - [Partition, NewPid]). - -dir_for_partition(Partition) -> - TestMetaData = riak_test_runner:metadata(), - KVBackend = proplists:get_value(backend, TestMetaData), - BaseDir = base_dir_for_backend(KVBackend), - filename:join([BaseDir, integer_to_list(Partition)]). - -% @doc True if the AAE stats report zero data repairs for last exchange -% across the board. -wait_until_no_aae_repairs(Nodes) -> - lager:info("Verifying AAE repairs go away without activity"), - rt:wait_until(fun() -> no_aae_repairs(Nodes) end). - -no_aae_repairs(Nodes) when is_list(Nodes) -> - MaxCount = max_aae_repairs(Nodes), - lager:info("Max AAE repair count across the board is ~p", [MaxCount]), - MaxCount == 0. - -max_aae_repairs(Nodes) when is_list(Nodes) -> - MaxCount = lists:max([max_aae_repairs(Node) || Node <- Nodes]), - MaxCount; -max_aae_repairs(Node) when is_atom(Node) -> - Info = rpc:call(Node, riak_kv_entropy_info, compute_exchange_info, []), - LastCounts = [Last || {_, _, _, {Last, _, _, _}} <- Info], - MaxCount = lists:max(LastCounts), - MaxCount. diff --git a/tests/verify_api_timeouts.erl b/tests/verify_api_timeouts.erl deleted file mode 100644 index 0a6851cfd..000000000 --- a/tests/verify_api_timeouts.erl +++ /dev/null @@ -1,245 +0,0 @@ - --module(verify_api_timeouts). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"listkeys_bucket">>). --define(NUM_BUCKETS, 1200). --define(NUM_KEYS, 1000). - -confirm() -> - %% test requires allow_mult=false b/c of rt:systest_read - [Node] = rt:build_cluster(1), - rt:wait_until_pingable(Node), - - HC = rt:httpc(Node), - lager:info("setting up initial data and loading remote code"), - rt:httpc_write(HC, <<"foo">>, <<"bar">>, <<"foobarbaz\n">>), - rt:httpc_write(HC, <<"foo">>, <<"bar2">>, <<"foobarbaz2\n">>), - - put_keys(Node, ?BUCKET, ?NUM_KEYS), - put_buckets(Node, ?NUM_BUCKETS), - timer:sleep(2000), - - rt_intercept:add(Node, {riak_kv_get_fsm, - [{{prepare,2}, slow_prepare}]}), - rt_intercept:add(Node, {riak_kv_put_fsm, - [{{prepare,2}, slow_prepare}]}), - rt_intercept:add(Node, {riak_kv_vnode, - [{{handle_coverage,4}, slow_handle_coverage}]}), - - - lager:info("testing HTTP API"), - - lager:info("testing GET timeout"), - {error, Tup1} = rhc:get(HC, <<"foo">>, <<"bar">>, [{timeout, 100}]), - ?assertMatch({ok, "503", _, <<"request timed out\n">>}, Tup1), - - lager:info("testing PUT timeout"), - {error, Tup2} = rhc:put(HC, riakc_obj:new(<<"foo">>, <<"bar">>, - <<"getgetgetgetget\n">>), - [{timeout, 100}]), - ?assertMatch({ok, "503", _, <<"request timed out\n">>}, Tup2), - - lager:info("testing DELETE timeout"), - {error, Tup3} = rhc:delete(HC, <<"foo">>, <<"bar">>, [{timeout, 100}]), - ?assertMatch({ok, "503", _, <<"request timed out\n">>}, Tup3), - - lager:info("testing invalid timeout value"), - {error, Tup4} = rhc:get(HC, <<"foo">>, <<"bar">>, [{timeout, asdasdasd}]), - ?assertMatch({ok, "400", _, - <<"Bad timeout value \"asdasdasd\"\n">>}, - Tup4), - - lager:info("testing GET still works before long timeout"), - {ok, O} = rhc:get(HC, <<"foo">>, <<"bar">>, [{timeout, 4000}]), - - %% either of these are potentially valid. - case riakc_obj:get_values(O) of - [<<"foobarbaz\n">>] -> - lager:info("Original Value"), - ok; - [<<"getgetgetgetget\n">>] -> - lager:info("New Value"), - ok; - [_A, _B] = L -> - ?assertEqual([<<"foobarbaz\n">>,<<"getgetgetgetget\n">>], - lists:sort(L)), - lager:info("Both Values"), - ok; - V -> ?assertEqual({object_value, <<"getgetgetgetget\n">>}, - {object_value, V}) - end, - - - PC = rt:pbc(Node), - - lager:info("testing PBC API"), - - BOOM = {error, <<"timeout">>}, - - lager:info("testing GET timeout"), - PGET = riakc_pb_socket:get(PC, <<"foo">>, <<"bar2">>, [{timeout, 100}]), - ?assertEqual(BOOM, PGET), - - lager:info("testing PUT timeout"), - PPUT = riakc_pb_socket:put(PC, - riakc_obj:new(<<"foo">>, <<"bar2">>, - <<"get2get2get2get2get\n">>), - [{timeout, 100}]), - ?assertEqual(BOOM, PPUT), - - lager:info("testing DELETE timeout"), - PDEL = riakc_pb_socket:delete(PC, <<"foo">>, <<"bar2">>, - [{timeout, 100}]), - ?assertEqual(BOOM, PDEL), - - lager:info("testing invalid timeout value"), - ?assertError(badarg, riakc_pb_socket:get(PC, <<"foo">>, <<"bar2">>, - [{timeout, asdasdasd}])), - - lager:info("testing GET still works before long timeout"), - {ok, O2} = riakc_pb_socket:get(PC, <<"foo">>, <<"bar2">>, - [{timeout, 4000}]), - - %% either of these are potentially valid. - case riakc_obj:get_values(O2) of - [<<"get2get2get2get2get\n">>] -> - lager:info("New Value"), - ok; - [<<"foobarbaz2\n">>] -> - lager:info("Original Value"), - ok; - [_A2, _B2] = L2 -> - ?assertEqual([<<"foobarbaz2\n">>, <<"get2get2get2get2get\n">>], - lists:sort(L2)), - lager:info("Both Values"), - ok; - V2 -> ?assertEqual({object_value, <<"get2get2get2get2get\n">>}, - {object_value, V2}) - end, - - - Long = 1000000, - Short = 1000, - - lager:info("Checking List timeouts"), - - lager:info("Checking PBC"), - Pid = rt:pbc(Node), - lager:info("Checking keys timeout"), - ?assertMatch({error, <<"timeout">>}, - riakc_pb_socket:list_keys(Pid, ?BUCKET, Short)), - lager:info("Checking keys w/ long timeout"), - ?assertMatch({ok, _}, - riakc_pb_socket:list_keys(Pid, ?BUCKET, Long)), - lager:info("Checking stream keys timeout"), - {ok, ReqId0} = riakc_pb_socket:stream_list_keys(Pid, ?BUCKET, Short), - wait_for_error(ReqId0), - lager:info("Checking stream keys works w/ long timeout"), - {ok, ReqId8} = riakc_pb_socket:stream_list_keys(Pid, ?BUCKET, Long), - wait_for_end(ReqId8), - - lager:info("Checking buckets timeout"), - ?assertMatch({error, <<"timeout">>}, - riakc_pb_socket:list_buckets(Pid, Short)), - lager:info("Checking buckets w/ long timeout"), - ?assertMatch({ok, _}, - riakc_pb_socket:list_buckets(Pid, Long)), - lager:info("Checking stream buckets timeout"), - {ok, ReqId1} = riakc_pb_socket:stream_list_buckets(Pid, Short), - wait_for_error(ReqId1), - lager:info("Checking stream buckets works w/ long timeout"), - {ok, ReqId7} = riakc_pb_socket:stream_list_buckets(Pid, Long), - wait_for_end(ReqId7), - - - lager:info("Checking HTTP"), - LHC = rt:httpc(Node), - lager:info("Checking keys timeout"), - ?assertMatch({error, <<"timeout">>}, - rhc:list_keys(LHC, ?BUCKET, Short)), - lager:info("Checking keys w/ long timeout"), - ?assertMatch({ok, _}, - rhc:list_keys(LHC, ?BUCKET, Long)), - lager:info("Checking stream keys timeout"), - {ok, ReqId2} = rhc:stream_list_keys(LHC, ?BUCKET, Short), - wait_for_error(ReqId2), - lager:info("Checking stream keys works w/ long timeout"), - {ok, ReqId4} = rhc:stream_list_keys(LHC, ?BUCKET, Long), - wait_for_end(ReqId4), - - lager:info("Checking buckets timeout"), - ?assertMatch({error, <<"timeout">>}, - rhc:list_buckets(LHC, Short)), - lager:info("Checking buckets w/ long timeout"), - ?assertMatch({ok, _}, - rhc:list_buckets(LHC, Long)), - lager:info("Checking stream buckets timeout"), - {ok, ReqId3} = rhc:stream_list_buckets(LHC, Short), - wait_for_error(ReqId3), - lager:info("Checking stream buckets works w/ long timeout"), - {ok, ReqId5} = rhc:stream_list_buckets(LHC, Long), - wait_for_end(ReqId5), - - - - - pass. - - -wait_for_error(ReqId) -> - receive - {ReqId, done} -> - lager:error("stream incorrectly finished"), - error(stream_finished); - {ReqId, {error, <<"timeout">>}} -> - lager:info("stream correctly timed out"), - ok; - {ReqId, {_Key, _Vals}} -> - %% the line below is spammy but nice for debugging - %%{ReqId, {Key, Vals}} -> - %%lager:info("Got some values ~p, ~p", [Key, Vals]), - wait_for_error(ReqId); - {ReqId, Other} -> - error({unexpected_message, Other}) - after 10000 -> - error(error_stream_recv_timed_out) - end. - -wait_for_end(ReqId) -> - receive - {ReqId, done} -> - lager:info("stream correctly finished"), - ok; - {ReqId, {error, <<"timeout">>}} -> - lager:error("stream incorrectly timed out"), - error(stream_timed_out); - {ReqId, {_Key, _Vals}} -> - %% the line below is spammy but nice for debugging - %%{ReqId, {Key, Vals}} -> - %%lager:info("Got some values ~p, ~p", [Key, Vals]), - wait_for_end(ReqId); - {ReqId, Other} -> - error({unexpected_message, Other}) - after 10000 -> - error(error_stream_recv_timed_out) - end. - - -put_buckets(Node, Num) -> - Pid = rt:pbc(Node), - Buckets = [list_to_binary(["", integer_to_list(Ki)]) - || Ki <- lists:seq(0, Num - 1)], - {Key, Val} = {<<"test_key">>, <<"test_value">>}, - [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) - || Bucket <- Buckets], - riakc_pb_socket:stop(Pid). - - -put_keys(Node, Bucket, Num) -> - Pid = rt:pbc(Node), - Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], - Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], - [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) || {Key, Val} <- lists:zip(Keys, Vals)], - riakc_pb_socket:stop(Pid). diff --git a/tests/verify_asis_put.erl b/tests/verify_asis_put.erl deleted file mode 100644 index d34439c0f..000000000 --- a/tests/verify_asis_put.erl +++ /dev/null @@ -1,40 +0,0 @@ --module(verify_asis_put). --include_lib("eunit/include/eunit.hrl"). --export([confirm/0]). - -confirm() -> - %% 1. Deploy two nodes - [Node1, Node2] = rt:deploy_nodes(2), - %% 2. With PBC - lager:info("Put new object in ~p via PBC.", [Node1]), - PB1 = rt:pbc(Node1), - PB2 = rt:pbc(Node2), - Obj1 = riakc_obj:new(<<"verify_asis_put">>, <<"1">>, <<"test">>, "text/plain"), - %% a. put in node 1 - %% b. fetch from node 1 for vclock - {ok, Obj1a} = riakc_pb_socket:put(PB1, Obj1, [return_body]), - %% c. put asis in node 2 - %% d. fetch from node 2, check vclock is same - lager:info("Put object asis in ~p via PBC.", [Node2]), - {ok, Obj1b} = riakc_pb_socket:put(PB2, Obj1a, [asis, return_body]), - lager:info("Check vclock equality after asis put (PBC)."), - ?assertEqual({vclock_equal, riakc_obj:vclock(Obj1a)}, - {vclock_equal, riakc_obj:vclock(Obj1b)}), - - %% 3. Repeat with HTTP, nodes reversed - lager:info("Put new object in ~p via HTTP.", [Node2]), - HTTP1 = rt:httpc(Node1), - HTTP2 = rt:httpc(Node2), - Obj2 = riakc_obj:new(<<"verify_asis_put">>, <<"2">>, <<"test">>, "text/plain"), - %% a. put in node 2 - %% b. fetch from node 2 for vclock - {ok, Obj2a} = rhc:put(HTTP2, Obj2, [return_body]), - %% c. put asis in node 1 - %% d. fetch from node 1, check vclock is same - lager:info("Put object asis in ~p via PBC.", [Node1]), - {ok, Obj2b} = rhc:put(HTTP1, Obj2a, [asis, return_body]), - lager:info("Check vclock equality after asis put (HTTP)."), - ?assertEqual({vclock_equal, riakc_obj:vclock(Obj2a)}, - {vclock_equal, riakc_obj:vclock(Obj2b)}), - - pass. diff --git a/tests/verify_backup_restore.erl b/tests/verify_backup_restore.erl deleted file mode 100644 index b587160ad..000000000 --- a/tests/verify_backup_restore.erl +++ /dev/null @@ -1,292 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc Verifies the functionality of the riak-admin backup and restore -%% commands. Restore re-puts the data store by backup. Notice that this does -%% not mean the data is restored to what it was. Newer data may prevail -%% depending on the configuration (last write wins, vector clocks used, etc). - --module(verify_backup_restore). --behavior(riak_test). --export([confirm/0]). --include_lib("riakc/include/riakc.hrl"). --include_lib("eunit/include/eunit.hrl"). - --define(NUM_NODES, 4). --define(NUM_KEYS, 1000). --define(NUM_DEL, 100). --define(NUM_MOD, 100). --define(SEARCH_BUCKET, <<"search_bucket">>). - -confirm() -> - lager:info("Building cluster of ~p nodes", [?NUM_NODES]), - SpamDir = rt_config:config_or_os_env(spam_dir), - Config = [{riak_search, [{enabled, true}]}], - [Node0 | _RestNodes] = Nodes = rt:build_cluster(?NUM_NODES, Config), - rt:enable_search_hook(Node0, ?SEARCH_BUCKET), - rt:wait_until_ring_converged(Nodes), - PbcPid = rt:pbc(Node0), - Searches = - [ - {<<"ZiaSun">>, 1}, - {<<"headaches">>, 4}, - {<<"YALSP">>, 3}, - {<<"mister">>, 0}, - {<<"prohibiting">>, 5}, - {<<"mx.example.net">>, 187} - ], - EmptySearches = [ {Term, 0} || {Term, _Count} <- Searches], - ConcatBin = fun({T, _},Acc) -> <> end, - AllTerms = lists:foldl(ConcatBin, <<"">>, Searches), - - lager:info("Indexing data for search from ~p", [SpamDir]), - rt:pbc_put_dir(PbcPid, ?SEARCH_BUCKET, SpamDir), - ExtraKey = <<"Extra1">>, - riakc_pb_socket:put(PbcPid, - riakc_obj:new(?SEARCH_BUCKET, - ExtraKey, - AllTerms)), - - lager:info("Writing some data to the cluster"), - write_some(PbcPid, [{last, ?NUM_KEYS}]), - - lager:info("Verifying data made it in"), - rt:wait_until_no_pending_changes(Nodes), - verify_searches(PbcPid, Searches, 1), - [?assertEqual([], read_some(Node, [{last, ?NUM_KEYS}])) || Node <- Nodes], - - BackupFile = filename:join([rt_config:get(rt_scratch_dir), "TestBackup.bak"]), - case filelib:is_regular(BackupFile) of - true -> - lager:info("Deleting current backup file at ~p", [BackupFile]), - ?assertMatch(ok, file:delete(BackupFile)); - _ -> ok - end, - - lager:info("Backing up the data to ~p", [BackupFile]), - Cookie = "riak", - rt:admin(Node0, ["backup", atom_to_list(Node0), Cookie, BackupFile, "all"]), - - lager:info("Modifying data on cluster"), - ModF = fun(N) -> - <<"MOD_V_", (i2b(N))/binary>> - end, - lager:info("Modifying another ~p keys (mods will persist after backup)", - [?NUM_MOD]), - write_some(PbcPid, [{delete, true}, - {last, ?NUM_MOD}, - {vfun, ModF}]), - lager:info("Deleting ~p keys", [?NUM_DEL+1]), - delete_some(PbcPid, [{first, ?NUM_MOD+1}, - {last, ?NUM_MOD+?NUM_DEL}]), - lager:info("Deleting extra search doc"), - riakc_pb_socket:delete(PbcPid, ?SEARCH_BUCKET, ExtraKey), - rt:wait_until(fun() -> rt:pbc_really_deleted(PbcPid, - ?SEARCH_BUCKET, - [ExtraKey]) - end), - - lager:info("Verifying data has changed"), - [?assertEqual([], read_some(Node, [{last, ?NUM_MOD}, - {vfun, ModF}])) - || Node <- Nodes], - [?assertEqual([], read_some(Node, [{first, ?NUM_MOD+1}, - {last, ?NUM_MOD+?NUM_DEL}, - {expect, deleted}])) - || Node <- Nodes], - verify_searches(PbcPid, Searches, 0), - - lager:info("Restoring from backup ~p", [BackupFile]), - rt:admin(Node0, ["restore", atom_to_list(Node0), Cookie, BackupFile]), - rt:wait_until_no_pending_changes(Nodes), - - %% When allow_mult=false, the mods overwrite the restored data. When - %% allow_mult=true, the a sibling is generated with the original - %% data, and a divergent vclock. Verify that both objects exist. - lager:info("Verifying that deleted data is back, mods are still in"), - [?assertEqual([], read_some(Node, [{siblings, true}, - {last, ?NUM_MOD}, - {vfun, ModF}])) - || Node <- Nodes], - [?assertEqual([], read_some(Node, [{siblings, true}, - {first, ?NUM_MOD+1}, - {last, ?NUM_KEYS}])) - || Node <- Nodes], - - lager:info("Verifying deleted search results are back"), - verify_searches(PbcPid, Searches, 1), - - lager:info("Wipe out entire cluster and start fresh"), - riakc_pb_socket:stop(PbcPid), - rt:clean_cluster(Nodes), - lager:info("Rebuilding the cluster"), - rt:build_cluster(?NUM_NODES, Config), - rt:enable_search_hook(Node0, ?SEARCH_BUCKET), - rt:wait_until_ring_converged(Nodes), - rt:wait_until_no_pending_changes(Nodes), - PbcPid2 = rt:pbc(Node0), - - lager:info("Verify no data in cluster"), - [?assertEqual([], read_some(Node, [{last, ?NUM_KEYS}, - {expect, deleted}])) - || Node <- Nodes], - verify_searches(PbcPid2, EmptySearches, 0), - - lager:info("Restoring from backup ~p again", [BackupFile]), - rt:admin(Node0, ["restore", atom_to_list(Node0), Cookie, BackupFile]), - rt:enable_search_hook(Node0, ?SEARCH_BUCKET), - - lager:info("Verifying data is back to original backup"), - rt:wait_until_no_pending_changes(Nodes), - verify_searches(PbcPid2, Searches, 1), - [?assertEqual([], read_some(Node, [{last, ?NUM_KEYS}])) || Node <- Nodes], - - lager:info("C'est tout mon ami"), - riakc_pb_socket:stop(PbcPid2), - pass. - - -verify_searches(PbcPid, Searches, Offset) -> - [verify_search_count(PbcPid, T, C + Offset) - || {T, C} <- Searches ]. - -i2b(N) when is_integer(N) -> - list_to_binary(integer_to_list(N)). - -default_kfun(N) when is_integer(N) -> - <<"K_",(i2b(N))/binary>>. - -default_vfun(N) when is_integer(N) -> - <<"V_",(i2b(N))/binary>>. - -% @todo Maybe replace systest_write -write_some(PBC, Props) -> - Bucket = proplists:get_value(bucket, Props, <<"test_bucket">>), - Start = proplists:get_value(first, Props, 0), - End = proplists:get_value(last, Props, 1000), - Del = proplists:get_value(delete, Props, false), - KFun = proplists:get_value(kfun, Props, fun default_kfun/1), - VFun = proplists:get_value(vfun, Props, fun default_vfun/1), - - Keys = [{KFun(N), VFun(N), N} || N <- lists:seq(Start, End)], - Keys1 = [Key || {Key, _, _} <- Keys], - - case Del of - true -> - DelFun = - fun({K, _, _}, Acc) -> - case riakc_pb_socket:delete(PBC, Bucket, K) of - ok -> Acc; - _ -> [{error, {could_not_delete, K}} | Acc] - end - end, - ?assertEqual([], lists:foldl(DelFun, [], Keys)), - rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, Keys1) end); - _ -> - ok - end, - - PutFun = fun({K, V, N}, Acc) -> - Obj = riakc_obj:new(Bucket, K, V), - case riakc_pb_socket:put(PBC, Obj) of - ok -> - Acc; - Other -> - [{N, Other} | Acc] - end - end, - ?assertEqual([], lists:foldl(PutFun, [], Keys)). - -% @todo Maybe replace systest_read -read_some(Node, Props) -> - Bucket = proplists:get_value(bucket, Props, <<"test_bucket">>), - R = proplists:get_value(r, Props, 2), - Start = proplists:get_value(first, Props, 0), - End = proplists:get_value(last, Props, 1000), - Expect = proplists:get_value(expect, Props, exists), - KFun = proplists:get_value(kfun, Props, fun default_kfun/1), - VFun = proplists:get_value(vfun, Props, fun default_vfun/1), - Siblings = proplists:get_value(siblings, Props, false), - - {ok, C} = riak:client_connect(Node), - F = - fun(N, Acc) -> - K = KFun(N), - case Expect of - exists -> - case C:get(Bucket, K, R) of - {ok, Obj} -> - Val = VFun(N), - case Siblings of - true -> - Values = riak_object:get_values(Obj), - case lists:member(Val, Values) of - true -> - Acc; - false -> - [{N, {val_not_member, Values, expected, Val}} - | Acc] - end; - false -> - case riak_object:get_value(Obj) of - Val -> - Acc; - WrongVal -> - [{N, {wrong_val, WrongVal, expected, Val}} - | Acc] - end - end; - Other -> - [{N, Other} | Acc] - end; - deleted -> - case C:get(Bucket, K, R) of - {error, notfound} -> - Acc; - Other -> - [{N, {not_deleted, Other}} | Acc] - end - end - end, - lists:foldl(F, [], lists:seq(Start, End)). - -% @todo Maybe replace systest_read -delete_some(PBC, Props) -> - Bucket = proplists:get_value(bucket, Props, <<"test_bucket">>), - Start = proplists:get_value(first, Props, 0), - End = proplists:get_value(last, Props, 1000), - KFun = proplists:get_value(kfun, Props, fun default_kfun/1), - - Keys = [KFun(N) || N <- lists:seq(Start, End)], - F = - fun(K, Acc) -> - case riakc_pb_socket:delete(PBC, Bucket, K) of - ok -> Acc; - _ -> [{error, {could_not_delete, K}} | Acc] - end - end, - lists:foldl(F, [], Keys), - rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, Keys) end), - ok. - -verify_search_count(Pid, SearchQuery, Count) -> - {ok, #search_results{num_found=NumFound}} = riakc_pb_socket:search(Pid, ?SEARCH_BUCKET, SearchQuery), - lager:info("Found ~p search results for query ~p", [NumFound, SearchQuery]), - ?assertEqual(Count, NumFound). diff --git a/tests/verify_basic_upgrade.erl b/tests/verify_basic_upgrade.erl deleted file mode 100644 index f02a7cc11..000000000 --- a/tests/verify_basic_upgrade.erl +++ /dev/null @@ -1,48 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_basic_upgrade). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - - Nodes = [Node1|_] = rt:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), - - lager:info("Writing 100 keys to ~p", [Node1]), - rt:systest_write(Node1, 100, 3), - ?assertEqual([], rt:systest_read(Node1, 100, 1)), - - [upgrade(Node, current) || Node <- Nodes], - - %% Umm.. technically, it'd downgrade - [upgrade(Node, OldVsn) || Node <- Nodes], - pass. - -upgrade(Node, NewVsn) -> - lager:info("Upgrading ~p to ~p", [Node, NewVsn]), - rt:upgrade(Node, NewVsn), - rt:wait_for_service(Node, riak_kv), - lager:info("Ensuring keys still exist"), - rt:systest_read(Node, 100, 1), - ?assertEqual([], rt:systest_read(Node, 100, 1)), - ok. diff --git a/tests/verify_bdp_event_handler.erl b/tests/verify_bdp_event_handler.erl deleted file mode 100644 index ac800d329..000000000 --- a/tests/verify_bdp_event_handler.erl +++ /dev/null @@ -1,133 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(verify_bdp_event_handler). - --behaviour(gen_event). - -%% API --export([add_handler/1]). - -%% gen_event callbacks --export([init/1, handle_event/2, handle_call/2, - handle_info/2, terminate/2, code_change/3]). - --record(state, { - test_pid :: pid() - }). - -%%%=================================================================== -%%% gen_event callbacks -%%%=================================================================== - -add_handler(Pid) -> - gen_event:add_handler(riak_sysmon_handler, ?MODULE, Pid). - -%%%=================================================================== -%%% gen_event callbacks -%%%=================================================================== - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Whenever a new event handler is added to an event manager, -%% this function is called to initialize the event handler. -%% -%% @spec init(Args) -> {ok, State} -%% @end -%%-------------------------------------------------------------------- -init(Pid) -> - {ok, #state{test_pid=Pid}}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Whenever an event manager receives an event sent using -%% gen_event:notify/2 or gen_event:sync_notify/2, this function is -%% called for each installed event handler to handle the event. -%% -%% @spec handle_event(Event, State) -> -%% {ok, State} | -%% {swap_handler, Args1, State1, Mod2, Args2} | -%% remove_handler -%% @end -%%-------------------------------------------------------------------- -handle_event({monitor, _, busy_dist_port, {_Port, _}}, State) -> - %% notify test busy_dist_port event fired - State#state.test_pid ! go, - {ok, State}; -handle_event(_Event, State) -> - {ok, State}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Whenever an event manager receives a request sent using -%% gen_event:call/3,4, this function is called for the specified -%% event handler to handle the request. -%% -%% @spec handle_call(Request, State) -> -%% {ok, Reply, State} | -%% {swap_handler, Reply, Args1, State1, Mod2, Args2} | -%% {remove_handler, Reply} -%% @end -%%-------------------------------------------------------------------- -handle_call(_, State) -> - {ok, ok, State}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% This function is called for each installed event handler when -%% an event manager receives any other message than an event or a -%% synchronous request (or a system message). -%% -%% @spec handle_info(Info, State) -> -%% {ok, State} | -%% {swap_handler, Args1, State1, Mod2, Args2} | -%% remove_handler -%% @end -%%-------------------------------------------------------------------- -handle_info(_Info, State) -> - {ok, State}. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Whenever an event handler is deleted from an event manager, this -%% function is called. It should be the opposite of Module:init/1 and -%% do any necessary cleaning up. -%% -%% @spec terminate(Reason, State) -> void() -%% @end -%%-------------------------------------------------------------------- -terminate(_Reason, _State) -> - ok. - -%%-------------------------------------------------------------------- -%% @private -%% @doc -%% Convert process state when code is changed -%% -%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} -%% @end -%%-------------------------------------------------------------------- -code_change(_OldVsn, State, _Extra) -> - {ok, State}. diff --git a/tests/verify_bitcask_tombstone2_upgrade.erl b/tests/verify_bitcask_tombstone2_upgrade.erl deleted file mode 100644 index 72543e2c8..000000000 --- a/tests/verify_bitcask_tombstone2_upgrade.erl +++ /dev/null @@ -1,91 +0,0 @@ -% @doc Verify that upgrading Riak with Bitcask to 2.0 or later will trigger -% an upgrade mechanism that will end up merging all existing bitcask files. -% This is necessary so that old style tombstones are reaped, which might -% otherwise stay around for a very long time. This version writes tombstones -% that can be safely dropped during a merge. Bitcask could resurrect old -% values easily when reaping tombstones during a partial merge if a -% restart happened later. --module(verify_bitcask_tombstone2_upgrade). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - Backend = proplists:get_value(backend, TestMetaData), - lager:info("Running with backend (this better be Bitcask!) ~p", [Backend]), - ?assertEqual({backend, bitcask}, {backend, Backend}), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - % Configure for fast merge checks - Config = [{riak_kv, [{bitcask_merge_check_interval, 2000}]}, - {bitcask, [{max_file_size, 100}]}], - Nodes = rt:build_cluster([{OldVsn, Config}]), - verify_bitcask_tombstone2_upgrade(Nodes), - pass. - -% Expects nodes running a version of Riak < 2.0 using Bitcask -verify_bitcask_tombstone2_upgrade(Nodes) -> - lager:info("Write some data, write it good"), - write_some_data(Nodes), - lager:info("Collect the list of bitcask files created"), - BitcaskFiles = list_bitcask_files(Nodes), - lager:info("Now update the node to the current version"), - [rt:upgrade(Node, current) || Node <- Nodes], - lager:info("And wait until all the old files have been merged, the version upgrade finished"), - ?assertEqual(ok, rt:wait_until(upgrade_complete_fun(BitcaskFiles))), - lager:info("And that is that"). - -write_some_data([Node1 | _]) -> - rt:pbc_systest_write(Node1, 10000). - -list_bitcask_files(Nodes) -> - [{Node, list_node_bitcask_files(Node)} || Node <- Nodes]. - -list_node_bitcask_files(Node) -> - % Gather partitions owned, list *.bitcask.data on each. - Partitions = rt:partitions_for_node(Node), - {ok, DataDir} = rt:rpc_get_env(Node, [{bitcask, data_root}]), - [begin - IdxStr = integer_to_list(Idx), - IdxDir = filename:join(DataDir, IdxStr), - BitcaskPattern = filename:join([IdxDir, "*.bitcask.data"]), - Paths = rpc:call(Node, filelib, wildcard, [BitcaskPattern]), - ?assert(is_list(Paths)), - Files = [filename:basename(Path) || Path <- Paths], - {IdxDir, Files} - end || Idx <- Partitions]. - -upgrade_complete_fun(BitcaskFiles) -> - fun() -> - upgrade_complete(BitcaskFiles) - end. - -upgrade_complete(BitcaskFiles) -> - all(true, [upgrade_complete(Node, PFiles) - || {Node, PFiles} <- BitcaskFiles]). - -upgrade_complete(Node, PartitionFiles) -> - all(true,[upgrade_complete(Node, IdxDir, Files) - || {IdxDir, Files} <- PartitionFiles]). - -upgrade_complete(Node, IdxDir, Files) -> - % Check we have version.txt, no upgrade.txt, no merge.txt - MergeFile = filename:join(IdxDir, "merge.txt"), - UpgradeFile = filename:join(IdxDir, "upgrade.txt"), - VsnFile = filename:join(IdxDir, "version.txt"), - file_exists(Node, VsnFile) andalso - not file_exists(Node, UpgradeFile) andalso - not file_exists(Node, MergeFile) andalso - all(false, - [file_exists(Node, filename:join(IdxDir, F)) || F <- Files]). - -file_exists(Node, Path) -> - case rpc:call(Node, filelib, is_regular, [Path]) of - {badrpc, Reason} -> - throw({can_not_check_file, Node, Path, Reason}); - Result -> - Result - end. - -all(Val, L) -> - lists:all(fun(E) -> E == Val end, L). diff --git a/tests/verify_build_cluster.erl b/tests/verify_build_cluster.erl deleted file mode 100644 index e934eb434..000000000 --- a/tests/verify_build_cluster.erl +++ /dev/null @@ -1,107 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_build_cluster). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --import(rt, [wait_until_nodes_ready/1, - wait_until_no_pending_changes/1]). - -confirm() -> - %% test requires allow_mult=false b/c of rt:systest_read - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - %% Deploy a set of new nodes - lager:info("Deploying 4 nodes"), - %% handoff_concurrency needs to be raised to make the leave operation faster. - %% most clusters go up to 10, but this one is one louder, isn't it? - [Node1, Node2, Node3, Node4] = Nodes = rt:deploy_nodes(4, [{riak_core, [{handoff_concurrency, 11}]}]), - - %% Ensure each node owns 100% of it's own ring - lager:info("Ensure each nodes 100% of it's own ring"), - - [rt:wait_until_owners_according_to(Node, [Node]) || Node <- Nodes], - - lager:info("Loading some data up in this cluster."), - ?assertEqual([], rt:systest_write(Node1, 0, 1000, <<"verify_build_cluster">>, 2)), - - lager:info("joining Node 2 to the cluster... It takes two to make a thing go right"), - rt:join(Node2, Node1), - wait_and_validate([Node1, Node2]), - - lager:info("joining Node 3 to the cluster"), - rt:join(Node3, Node1), - wait_and_validate([Node1, Node2, Node3]), - - lager:info("joining Node 4 to the cluster"), - rt:join(Node4, Node1), - wait_and_validate(Nodes), - - lager:info("taking Node 1 down"), - rt:stop(Node1), - ?assertEqual(ok, rt:wait_until_unpingable(Node1)), - wait_and_validate(Nodes, [Node2, Node3, Node4]), - - lager:info("taking Node 2 down"), - rt:stop(Node2), - ?assertEqual(ok, rt:wait_until_unpingable(Node2)), - wait_and_validate(Nodes, [Node3, Node4]), - - lager:info("bringing Node 1 up"), - rt:start(Node1), - ok = rt:wait_until_pingable(Node1), - wait_and_validate(Nodes, [Node1, Node3, Node4]), - lager:info("bringing Node 2 up"), - rt:start(Node2), - ok = rt:wait_until_pingable(Node2), - wait_and_validate(Nodes), - - % leave 1, 2, and 3 - lager:info("leaving Node 1"), - rt:leave(Node1), - ?assertEqual(ok, rt:wait_until_unpingable(Node1)), - wait_and_validate([Node2, Node3, Node4]), - - lager:info("leaving Node 2"), - rt:leave(Node2), - ?assertEqual(ok, rt:wait_until_unpingable(Node2)), - wait_and_validate([Node3, Node4]), - - lager:info("leaving Node 3"), - rt:leave(Node3), - ?assertEqual(ok, rt:wait_until_unpingable(Node3)), - - % verify 4 - wait_and_validate([Node4]), - - pass. - -wait_and_validate(Nodes) -> wait_and_validate(Nodes, Nodes). -wait_and_validate(RingNodes, UpNodes) -> - lager:info("Wait until all nodes are ready and there are no pending changes"), - ?assertEqual(ok, rt:wait_until_nodes_ready(UpNodes)), - ?assertEqual(ok, rt:wait_until_all_members(UpNodes)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(UpNodes)), - lager:info("Ensure each node owns a portion of the ring"), - [rt:wait_until_owners_according_to(Node, RingNodes) || Node <- UpNodes], - [rt:wait_for_service(Node, riak_kv) || Node <- UpNodes], - lager:info("Verify that you got much data... (this is how we do it)"), - ?assertEqual([], rt:systest_read(hd(UpNodes), 0, 1000, <<"verify_build_cluster">>, 2)), - done. diff --git a/tests/verify_busy_dist_port.erl b/tests/verify_busy_dist_port.erl deleted file mode 100644 index c8828d902..000000000 --- a/tests/verify_busy_dist_port.erl +++ /dev/null @@ -1,109 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% Test for regression in riak_sysmon where busy_port/busy_dist_port were not set to -%% true in app.config by default. Originally reported in az1018 (AgileZen 1018). -%% -%% This test starts two riak nodes and pauses the process of one of the node's vms -%% using "kill -STOP". The other node (not paused) is then directed to send thousands -%% of messages to the paused node, which should cause busy_dist_port. We then check -%% for busy_dist_port messages in the logs. -%% -%% see: https://issues.basho.com/show_bug.cgi?id=1305 -%% see: https://github.com/basho/basho_expect/blob/master/basho_expect/regression_az1018.py -%% -%% -- ORIGINAL TICKET TEXT FROM AGILE ZEN (AZ1018) -- -%% As we discovered in a customer's production network, riak_sysmon has been -%% mis-configured and buggy and therefore was not logging 'busy_dist_port' events -%% when they were happening. While triaging the customer's cluster, we made -%% several mistakes while assuming that those events weren't happening. -%% -%% Two fixes are required: -%% -%% Fix the riak_sysmon_filter:init() code. -%% Tune the app.config settings to correct values. -%% -%% -- END ORIGINAL TICKET -- -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_busy_dist_port). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - [Node1, Node2] = rt:build_cluster(2), - lager:info("deployed 2 nodes"), - - rt:load_modules_on_nodes([cause_bdp, verify_bdp_event_handler, - riak_test_lager_backend], [Node1]), - Res = rpc:call(Node1, verify_bdp_event_handler, add_handler, [self()]), - ok = rpc:call(Node1, gen_event, add_handler, [lager_event, riak_test_lager_backend, [info, false]]), - ok = rpc:call(Node1, lager, set_loglevel, [riak_test_lager_backend, info]), - lager:info("RES: ~p", [Res]), - - OsPid = rpc:call(Node2, os, getpid, []), - lager:info("pausing node 2 (~p) pid ~s", [Node2, OsPid]), - %% must use cast here, call will never return - rpc:cast(Node2, os, cmd, [lists:flatten(io_lib:format("kill -STOP ~s", [OsPid]))]), - - lager:info("flooding node 2 (paused) with messages from node 1"), - rpc:call(Node1, cause_bdp, spam_nodes, [[Node2]]), - - - receive - go -> - lager:info("busy_dist_port event fired on node 1 (~p), checking logs", [Node1]) - after - rt_config:get(rt_max_wait_time) -> - lager:error("no busy_dist_port event fired on node 1. test is borked", - []) - end, - - lager:info("Verifying busy_dist_port message ended up in the log"), - CheckLogFun = fun(Node) -> - Logs = rpc:call(Node, riak_test_lager_backend, get_logs, []), - try case re:run(Logs, "monitor busy_dist_port .*#Port", []) of - {match, _} -> true; - nomatch -> false - end - catch - Err:Reason -> - lager:error("busy_dist_port re:run failed w/ ~p: ~p", [Err, Reason]), - false - end - end, - - Success = case rt:wait_until(Node1, CheckLogFun) of - ok -> - lager:info("found busy_dist_port message in log", []), - true; - _ -> - lager:error("busy_dist_port message not found in log", []), - false - end, - - lager:info("continuing node 2 (~p) pid ~s", [Node2, OsPid]), - %% NOTE: this call must be executed on the OS running Node2 in order to unpause it - %% and not break future test runs. The command cannot be executed via - %% rpc:cast(Node2, os, cmd, ...) because Node2 is paused and will never process the - %% message! - rt:cmd(lists:flatten(io_lib:format("kill -CONT ~p", [OsPid]))), - - ?assert(Success), - pass. diff --git a/tests/verify_capabilities.erl b/tests/verify_capabilities.erl deleted file mode 100644 index a1ff966df..000000000 --- a/tests/verify_capabilities.erl +++ /dev/null @@ -1,267 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012-2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_capabilities). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -%% 1.4 {riak_kv, handoff_data_encoding} -> [encode_raw, encode_zlib] -%% 1.3 {riak_kv, anti_entropy} -> [disabled, enabled_v1] -confirm() -> - lager:info("Deploying mixed set of nodes"), - Legacy = case lists:member(legacy, rt:versions()) of - true -> legacy; - _ -> current - end, - - Nodes = rt:deploy_nodes([current, previous, Legacy]), - [CNode, PNode, LNode] = Nodes, - - lager:info("Verifying known capabilities on a Current 1-node cluster"), - lager:info("Verify staged_joins == true"), - ?assertEqual(ok, rt:wait_until_capability(CNode, {riak_core, staged_joins}, true)), - - CCapabilities = rt:capability(CNode, all), - assert_capability(CNode, {riak_kv, legacy_keylisting}, false), - assert_capability(CNode, {riak_kv, listkeys_backpressure}, true), - assert_capability(CNode, {riak_core, staged_joins}, true), - assert_capability(CNode, {riak_kv, index_backpressure}, true), - assert_capability(CNode, {riak_pipe, trace_format}, ordsets), - assert_capability(CNode, {riak_kv, mapred_2i_pipe}, true), - assert_capability(CNode, {riak_kv, mapred_system}, pipe), - assert_capability(CNode, {riak_kv, vnode_vclocks}, true), - assert_capability(CNode, {riak_core, vnode_routing}, proxy), - assert_supported(CCapabilities, {riak_core, staged_joins}, [true,false]), - assert_supported(CCapabilities, {riak_core, vnode_routing}, [proxy,legacy]), - assert_supported(CCapabilities, {riak_kv, index_backpressure}, [true,false]), - assert_supported(CCapabilities, {riak_kv, legacy_keylisting}, [false]), - assert_supported(CCapabilities, {riak_kv, listkeys_backpressure}, [true,false]), - assert_supported(CCapabilities, {riak_kv, mapred_2i_pipe}, [true,false]), - assert_supported(CCapabilities, {riak_kv, mapred_system}, [pipe]), - assert_supported(CCapabilities, {riak_kv, vnode_vclocks}, [true,false]), - assert_supported(CCapabilities, {riak_pipe, trace_format}, [ordsets,sets]), - assert_supported(CCapabilities, {riak_kv, handoff_data_encoding}, [encode_raw, encode_zlib]), - - %% We've got a current-version node only, we should see raw selected as default: - assert_using(CNode, {riak_kv, handoff_data_encoding}, encode_raw), - - lager:info("Crash riak_core_capability server"), - restart_capability_server(CNode), - - lager:info("Verify staged_joins == true after crash"), - ?assertEqual(ok, rt:wait_until_capability(CNode, {riak_core, staged_joins}, true)), - - lager:info("Building current + ~s cluster", [Legacy]), - rt:join(LNode, CNode), - ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode])), - ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)), - - case Legacy of - legacy -> - LCapabilities = rt:capability(CNode, all), - assert_capability(CNode, {riak_kv, legacy_keylisting}, false), - assert_capability(CNode, {riak_kv, listkeys_backpressure}, true), - assert_capability(CNode, {riak_core, staged_joins}, true), - assert_capability(CNode, {riak_kv, index_backpressure}, true), - assert_capability(CNode, {riak_pipe, trace_format}, ordsets), - assert_capability(CNode, {riak_kv, mapred_2i_pipe}, true), - assert_capability(CNode, {riak_kv, mapred_system}, pipe), - assert_capability(CNode, {riak_kv, vnode_vclocks}, true), - assert_capability(CNode, {riak_core, vnode_routing}, proxy), - assert_supported(LCapabilities, {riak_core, staged_joins}, [true,false]), - assert_supported(LCapabilities, {riak_core, vnode_routing}, [proxy,legacy]), - assert_supported(LCapabilities, {riak_kv, index_backpressure}, [true,false]), - assert_supported(LCapabilities, {riak_kv, legacy_keylisting}, [false]), - assert_supported(LCapabilities, {riak_kv, listkeys_backpressure}, [true,false]), - assert_supported(LCapabilities, {riak_kv, mapred_2i_pipe}, [true,false]), - assert_supported(LCapabilities, {riak_kv, mapred_system}, [pipe]), - assert_supported(LCapabilities, {riak_kv, vnode_vclocks}, [true,false]), - assert_supported(LCapabilities, {riak_pipe, trace_format}, [ordsets,sets]), - - %% We've added a legacy server: we should see zlib selected by the current-version node: - assert_using(CNode, {riak_kv, handoff_data_encoding}, encode_zlib), - - lager:info("Crash riak_core_capability server"), - restart_capability_server(CNode), - - lager:info("Adding previous node to cluster"), - rt:join(PNode, LNode), - ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode, PNode])), - ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)), - - PCapabilities = rt:capability(CNode, all), - assert_capability(CNode, {riak_kv, legacy_keylisting}, false), - assert_capability(CNode, {riak_kv, listkeys_backpressure}, true), - assert_capability(CNode, {riak_core, staged_joins}, true), - assert_capability(CNode, {riak_kv, index_backpressure}, true), - assert_capability(CNode, {riak_pipe, trace_format}, ordsets), - assert_capability(CNode, {riak_kv, mapred_2i_pipe}, true), - assert_capability(CNode, {riak_kv, mapred_system}, pipe), - assert_capability(CNode, {riak_kv, vnode_vclocks}, true), - assert_capability(CNode, {riak_core, vnode_routing}, proxy), - assert_supported(PCapabilities, {riak_core, staged_joins}, [true,false]), - assert_supported(PCapabilities, {riak_core, vnode_routing}, [proxy,legacy]), - assert_supported(PCapabilities, {riak_kv, index_backpressure}, [true,false]), - assert_supported(PCapabilities, {riak_kv, legacy_keylisting}, [false]), - assert_supported(PCapabilities, {riak_kv, listkeys_backpressure}, [true,false]), - assert_supported(PCapabilities, {riak_kv, mapred_2i_pipe}, [true,false]), - assert_supported(PCapabilities, {riak_kv, mapred_system}, [pipe]), - assert_supported(PCapabilities, {riak_kv, vnode_vclocks}, [true,false]), - assert_supported(PCapabilities, {riak_pipe, trace_format}, [ordsets,sets]), - - %% We've added a previous version (1.2) we should (still) see zlib selected: - assert_using(CNode, {riak_kv, handoff_data_encoding}, encode_zlib), - - lager:info("Upgrade Legacy node"), - rt:upgrade(LNode, current), - ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode, PNode])), - ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)), - - %% We have upgraded the legacy node, but we should see zlib selected (previous node still not upgraded): - assert_using(CNode, {riak_kv, handoff_data_encoding}, encode_zlib); - _ -> - lager:info("Legacy Riak not available, skipping legacy tests"), - lager:info("Adding previous node to cluster"), - rt:join(PNode, LNode), - ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode, PNode])), - ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)) - end, - - PCap2 = rt:capability(CNode, all), - assert_capability(CNode, {riak_kv, legacy_keylisting}, false), - assert_capability(CNode, {riak_kv, listkeys_backpressure}, true), - assert_capability(CNode, {riak_core, staged_joins}, true), - assert_capability(CNode, {riak_kv, index_backpressure}, true), - assert_capability(CNode, {riak_pipe, trace_format}, ordsets), - assert_capability(CNode, {riak_kv, mapred_2i_pipe}, true), - assert_capability(CNode, {riak_kv, mapred_system}, pipe), - assert_capability(CNode, {riak_kv, vnode_vclocks}, true), - assert_capability(CNode, {riak_core, vnode_routing}, proxy), - assert_supported(PCap2, {riak_core, staged_joins}, [true,false]), - assert_supported(PCap2, {riak_core, vnode_routing}, [proxy,legacy]), - assert_supported(PCap2, {riak_kv, index_backpressure}, [true,false]), - assert_supported(PCap2, {riak_kv, legacy_keylisting}, [false]), - assert_supported(PCap2, {riak_kv, listkeys_backpressure}, [true,false]), - assert_supported(PCap2, {riak_kv, mapred_2i_pipe}, [true,false]), - assert_supported(PCap2, {riak_kv, mapred_system}, [pipe]), - assert_supported(PCap2, {riak_kv, vnode_vclocks}, [true,false]), - assert_supported(PCap2, {riak_pipe, trace_format}, [ordsets,sets]), - - lager:info("Upgrading Previous node"), - rt:upgrade(PNode, current), - - lager:info("Verifying index_backpressue changes to true"), - ?assertEqual(ok, rt:wait_until_capability(CNode, {riak_kv, index_backpressure}, true)), - - lager:info("Verifying riak_pipe,trace_format changes to ordsets"), - ?assertEqual(ok, rt:wait_until_capability(CNode, {riak_pipe, trace_format}, ordsets)), - - CCap2 = rt:capability(CNode, all), - assert_capability(CNode, {riak_kv, legacy_keylisting}, false), - assert_capability(CNode, {riak_kv, listkeys_backpressure}, true), - assert_capability(CNode, {riak_core, staged_joins}, true), - assert_capability(CNode, {riak_kv, index_backpressure}, true), - assert_capability(CNode, {riak_pipe, trace_format}, ordsets), - assert_capability(CNode, {riak_kv, mapred_2i_pipe}, true), - assert_capability(CNode, {riak_kv, mapred_system}, pipe), - assert_capability(CNode, {riak_kv, vnode_vclocks}, true), - assert_capability(CNode, {riak_core, vnode_routing}, proxy), - assert_supported(CCap2, {riak_core, staged_joins}, [true,false]), - assert_supported(CCap2, {riak_core, vnode_routing}, [proxy,legacy]), - assert_supported(CCap2, {riak_kv, index_backpressure}, [true,false]), - assert_supported(CCap2, {riak_kv, legacy_keylisting}, [false]), - assert_supported(CCap2, {riak_kv, listkeys_backpressure}, [true,false]), - assert_supported(CCap2, {riak_kv, mapred_2i_pipe}, [true,false]), - assert_supported(CCap2, {riak_kv, mapred_system}, [pipe]), - assert_supported(CCap2, {riak_kv, vnode_vclocks}, [true,false]), - assert_supported(CCap2, {riak_pipe, trace_format}, [ordsets,sets]), - - %% We've upgraded both legacy and previous versions; we should see raw selected by everyone: - [assert_using(Node, {riak_kv, handoff_data_encoding}, encode_raw) || Node <- [CNode, PNode, LNode]], - - %% All nodes are now current version. Test override behavior. - Override = fun(undefined, Prefer) -> - [{riak_core, [{override_capability, - [{vnode_routing, - [{prefer, Prefer}] - }]}] - }]; - (Use, Prefer) -> - [{riak_core, [{override_capability, - [{vnode_routing, - [{use, Use}, - {prefer, Prefer}] - }]}] - }] - end, - - lager:info("Override: (use: legacy), (prefer: proxy)"), - [rt:update_app_config(Node, Override(legacy, proxy)) || Node <- Nodes], - - lager:info("Verify vnode_routing == legacy"), - assert_capability(CNode, {riak_core, vnode_routing}, legacy), - - lager:info("Override: (use: proxy), (prefer: legacy)"), - [rt:update_app_config(Node, Override(proxy, legacy)) || Node <- Nodes], - - lager:info("Verify vnode_routing == proxy"), - assert_capability(CNode, {riak_core, vnode_routing}, proxy), - - lager:info("Override: (prefer: legacy)"), - [rt:update_app_config(Node, Override(undefined, legacy)) || Node <- Nodes], - - lager:info("Verify vnode_routing == legacy"), - assert_capability(CNode, {riak_core, vnode_routing}, legacy), - - [rt:stop(Node) || Node <- Nodes], - pass. - -assert_capability(CNode, Capability, Value) -> - lager:info("Checking Capability Setting ~p =:= ~p on ~p", - [Capability, Value, CNode]), - ?assertEqual(ok, rt:wait_until_capability(CNode, Capability, Value)). - -assert_supported(Capabilities, Capability, Value) -> - lager:info("Checking Capability Supported Values ~p =:= ~p", [Capability, Value]), - ?assertEqual(Value, proplists:get_value(Capability, proplists:get_value('$supported', Capabilities))). - -assert_using(Node, {CapabilityCategory, CapabilityName}, ExpectedCapabilityName) -> - lager:info("assert_using ~p =:= ~p", [ExpectedCapabilityName, CapabilityName]), - try ExpectedCapabilityName =:= rt:capability(Node, {CapabilityCategory, CapabilityName}) of - X -> X - catch - %% This is for catching a case in which a legacy node doesn't support capabilities at all: - exit:Exception -> lager:info("assert_using() caught exception: ~p", [Exception]), - false - end. - -restart_capability_server(Node) -> - Pid = rpc:call(Node, erlang, whereis, [riak_core_capability]), - rpc:call(Node, erlang, exit, [Pid, kill]), - HasNewPid = - fun(N) -> - case rpc:call(N, erlang, whereis, [riak_core_capability]) of - Pid -> false; - NewPid when is_pid(NewPid) -> true; - _ -> false - end - end, - rt:wait_until(Node, HasNewPid). - diff --git a/tests/verify_claimant.erl b/tests/verify_claimant.erl deleted file mode 100644 index bb61e33bb..000000000 --- a/tests/verify_claimant.erl +++ /dev/null @@ -1,79 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_claimant). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --import(rt, [build_cluster/1, - start/1, - stop/1, - down/2, - claimant_according_to/1, - wait_until_unpingable/1, - wait_until_ring_converged/1, - status_of_according_to/2, - wait_until_nodes_ready/1]). - -confirm() -> - Nodes = build_cluster(3), - [Node1, Node2, _Node3] = Nodes, - - %% Ensure all nodes believe node1 is the claimant - lager:info("Ensure all nodes believe ~p is the claimant", [Node1]), - [?assertEqual(Node1, claimant_according_to(Node)) || Node <- Nodes], - - %% Stop node1 - lager:info("Stop ~p", [Node1]), - stop(Node1), - ?assertEqual(ok, wait_until_unpingable(Node1)), - - %% Ensure all nodes still believe node1 is the claimant - lager:info("Ensure all nodes still believe ~p is the claimant", [Node1]), - Remaining = Nodes -- [Node1], - [?assertEqual(Node1, claimant_according_to(Node)) || Node <- Remaining], - - %% Mark node1 as down and wait for ring convergence - lager:info("Mark ~p as down", [Node1]), - down(Node2, Node1), - ?assertEqual(ok, wait_until_ring_converged(Remaining)), - [?assertEqual(down, status_of_according_to(Node1, Node)) || Node <- Remaining], - - %% Ensure all nodes now believe node2 to be the claimant - lager:info("Ensure all nodes now believe ~p is the claimant", [Node2]), - [?assertEqual(Node2, claimant_according_to(Node)) || Node <- Remaining], - - %% Restart node1 and wait for ring convergence - lager:info("Restart ~p and wait for ring convergence", [Node1]), - start(Node1), - ?assertEqual(ok, wait_until_nodes_ready([Node1])), - ?assertEqual(ok, rt:wait_until_all_members(Nodes)), - ?assertEqual(ok, wait_until_ring_converged(Nodes)), - - %% Ensure node has rejoined and is no longer down - lager:info("Ensure ~p has rejoined and is no longer down", [Node1]), - [?assertEqual(valid, status_of_according_to(Node1, Node)) || Node <- Nodes], - - %% Ensure all nodes still believe node2 is the claimant - lager:info("Ensure all nodes still believe ~p is the claimant", [Node2]), - [?assertEqual(Node2, claimant_according_to(Node)) || Node <- Nodes], - pass. diff --git a/tests/verify_cluster_converge.erl b/tests/verify_cluster_converge.erl deleted file mode 100644 index 6b5931f79..000000000 --- a/tests/verify_cluster_converge.erl +++ /dev/null @@ -1,15 +0,0 @@ --module(verify_cluster_converge). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(assertDenied(Op), ?assertMatch({error, <<"Permission",_/binary>>}, Op)). - -confirm() -> - lager:info("Deploy & cluster some nodes"), - - _Nodes = rt:build_cluster(4), - pass. \ No newline at end of file diff --git a/tests/verify_commit_hooks.erl b/tests/verify_commit_hooks.erl deleted file mode 100644 index fdc8ca40a..000000000 --- a/tests/verify_commit_hooks.erl +++ /dev/null @@ -1,76 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_commit_hooks). --include_lib("eunit/include/eunit.hrl"). --behavior(riak_test). --export([confirm/0]). - -confirm() -> - [Node] = rt:deploy_nodes(1), - lager:info("Loading the hooks module into ~p", [Node]), - rt:load_modules_on_nodes([hooks], [Node]), - - lager:info("Setting pid of test (~p) in application environment of ~p for postcommit hook", [self(), Node]), - ?assertEqual(ok, rpc:call(Node, application, set_env, [riak_test, test_pid, self()])), - - lager:info("Installing commit hooks on ~p", [Node]), - ?assertEqual(ok, rpc:call(Node, hooks, set_hooks, [])), - - lager:info("Checking precommit atom failure reason."), - HTTP = rt:httpc(Node), - ?assertMatch({error, {ok, "500", _, _}}, - rt:httpc_write(HTTP, <<"failatom">>, <<"key">>, <<"value">>)), - - lager:info("Checking Bug 1145 - string failure reason"), - ?assertMatch({error, {ok, "403", _, _}}, - rt:httpc_write(HTTP, <<"failstr">>, <<"key">>, <<"value">>)), - - lager:info("Checking Bug 1145 - binary failure reason"), - ?assertMatch({error, {ok, "403", _, _}}, - rt:httpc_write(HTTP, <<"failbin">>, <<"key">>, <<"value">>)), - - lager:info("Checking that bucket without commit hooks passes."), - ?assertEqual(ok, rt:httpc_write(HTTP, <<"fail">>, <<"key">>, <<"value">>)), - - lager:info("Checking that bucket with passing precommit passes."), - ?assertEqual(ok, rt:httpc_write(HTTP, <<"failkey">>, <<"key">>, <<"value">>)), - - lager:info("Checking that bucket with failing precommit fails."), - ?assertMatch({error, {ok, "403", _, _}}, - rt:httpc_write(HTTP, <<"failkey">>, <<"fail">>, <<"value">>)), - - lager:info("Checking fix for BZ1244 - riak_kv_wm_object makes call to riak_client:get/3 with invalid type for key"), - %% riak_kv_wm_object:ensure_doc will return {error, not_found}, leading to 404. - %% see https://github.com/basho/riak_kv/pull/237 for details of the fix. - ?assertMatch({error, {ok, "404", _, _}}, - rt:httpc_write(HTTP, <<"bz1244bucket">>, undefined, <<"value">>)), - - lager:info("Checking that postcommit fires."), - ?assertMatch(ok, rt:httpc_write(HTTP, <<"postcommit">>, <<"key">>, <<"value">>)), - - receive - {wrote, _Bucket, _Key}=Msg -> - ?assertEqual({wrote, <<"postcommit">>, <<"key">>}, Msg), - pass - after 2000 - -> - lager:error("Postcommit did not send a message within 2 seconds!"), - ?assert(false) - end. diff --git a/tests/verify_conditional_postcommit.erl b/tests/verify_conditional_postcommit.erl deleted file mode 100644 index fcb2c5f0c..000000000 --- a/tests/verify_conditional_postcommit.erl +++ /dev/null @@ -1,80 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_conditional_postcommit). --export([confirm/0, conditional_hook/3, postcommit/1]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - Config = [{riak_core, [{vnode_management_timer, 1000}, - {ring_creation_size, 4}]}], - Nodes = rt:deploy_nodes(1, Config), - Node = hd(Nodes), - ok = rt:load_modules_on_nodes([?MODULE], Nodes), - - lager:info("Creating bucket types 'type1' and 'type2'"), - rt:create_and_activate_bucket_type(Node, <<"type1">>, [{magic, false}]), - rt:create_and_activate_bucket_type(Node, <<"type2">>, [{magic, true}]), - - lager:info("Installing conditional hook"), - CondHook = {?MODULE, conditional_hook}, - ok = rpc:call(Node, riak_kv_hooks, add_conditional_postcommit, [CondHook]), - - Bucket1 = {<<"type1">>, <<"test">>}, - Bucket2 = {<<"type2">>, <<"test">>}, - Keys = [<> || N <- lists:seq(1,1000)], - PBC = rt:pbc(Node), - - lager:info("Writing keys as 'type1' and verifying hook is not triggered"), - write_keys(Node, PBC, Bucket1, Keys, false), - - lager:info("Writing keys as 'type2' and verifying hook is triggered"), - write_keys(Node, PBC, Bucket2, Keys, true), - - lager:info("Removing conditional hook"), - ok = rpc:call(Node, riak_kv_hooks, del_conditional_postcommit, [CondHook]), - lager:info("Re-writing keys as 'type2' and verifying hook is not triggered"), - write_keys(Node, PBC, Bucket2, Keys, false), - pass. - -write_keys(Node, PBC, Bucket, Keys, ShouldHook) -> - rpc:call(Node, application, set_env, [riak_kv, hook_count, 0]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], - {ok, Count} = rpc:call(Node, application, get_env, [riak_kv, hook_count]), - case ShouldHook of - true -> - ?assertEqual(length(Keys), Count); - false -> - ?assertEqual(0, Count) - end, - ok. - -conditional_hook(_BucketType, _Bucket, BucketProps) -> - case lists:member({magic, true}, BucketProps) of - true -> - {struct, [{<<"mod">>, atom_to_binary(?MODULE, utf8)}, - {<<"fun">>, <<"postcommit">>}]}; - false -> - false - end. - -postcommit(_Obj) -> - {ok, Count} = application:get_env(riak_kv, hook_count), - application:set_env(riak_kv, hook_count, Count + 1), - ok. diff --git a/tests/verify_corruption_filtering.erl b/tests/verify_corruption_filtering.erl deleted file mode 100644 index 7154786d4..000000000 --- a/tests/verify_corruption_filtering.erl +++ /dev/null @@ -1,121 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_corruption_filtering). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --import(rt, [build_cluster/1, - leave/1, - wait_until_unpingable/1, - status_of_according_to/2, - remove/2]). -%% Test plan: -%% - build a 2-node cluster -%% - load values -%% - intercept the backend so that it sometimes returns -%% bad data -%% - test puts, gets, folds, and handoff. - - - - -confirm() -> - Nodes = build_cluster(2), - [Node1, Node2] = Nodes, - rt:wait_until_pingable(Node2), - - load_cluster(Node1), - - lager:info("Cluster loaded"), - - case rt_config:get(rt_backend, undefined) of - riak_kv_eleveldb_backend -> - load_level_intercepts(Nodes); - _ -> - load_bitcask_intercepts(Nodes) - end, - - get_put_mix(Node1), - - %% Have node2 leave - lager:info("Have ~p leave", [Node2]), - leave(Node2), - ?assertEqual(ok, wait_until_unpingable(Node2)), - - %% we'll never get here or timeout if this issue - %% isn't fixed. - pass. - -get_put_mix(Node) -> - PB = rt:pbc(Node), - [begin - Key = random:uniform(1000), - case random:uniform(2) of - 1 -> - X = crypto:rand_bytes(512), - riakc_pb_socket:put(PB, - riakc_obj:new(<<"foo">>, <>, - X)); - 2 -> - case riakc_pb_socket:get(PB, <<"foo">>, <>) of - {error, notfound} -> - ok; - {error, Reason} -> - lager:error("got unexpected return: ~p", - [Reason]), - throw(Reason); - {ok, _O} -> ok; - Else -> throw(Else) - end - end - end - || _ <- lists:seq(1, 2000)]. - -load_cluster(Node) -> - PB = rt:pbc(Node), - [riakc_pb_socket:put(PB, - riakc_obj:new(<<"foo">>, <>, - <>)) - || X <- lists:seq(1,1000)]. - -load_level_intercepts(Nodes) -> - [begin - rt_intercept:add(Node, {riak_kv_eleveldb_backend, - [{{get, 3}, corrupting_get}]}), - rt_intercept:add(Node, {riak_kv_eleveldb_backend, - [{{put, 5}, corrupting_put}]}), - rt_intercept:add(Node, {riak_kv_vnode, - [{{handle_handoff_data, 2}, - corrupting_handle_handoff_data}]}) - end - || Node <- Nodes]. - -load_bitcask_intercepts(Nodes) -> - [begin - rt_intercept:add(Node, {riak_kv_bitcask_backend, - [{{get, 3}, corrupting_get}]}), - rt_intercept:add(Node, {riak_kv_bitcask_backend, - [{{put, 5}, corrupting_put}]}), - rt_intercept:add(Node, {riak_kv_vnode, - [{{handle_handoff_data, 2}, - corrupting_handle_handoff_data}]}) - end - || Node <- Nodes]. diff --git a/tests/verify_counter_capability.erl b/tests/verify_counter_capability.erl deleted file mode 100644 index 5f259c2dc..000000000 --- a/tests/verify_counter_capability.erl +++ /dev/null @@ -1,84 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @author Russell Brown -%%% @copyright (C) 2013, Basho Technologies -%%% @doc -%%% riak_test for counter cabability -%%% @end - --module(verify_counter_capability). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"test-counters">>). --define(KEY, <<"foo">>). - - -confirm() -> - %% Create a mixed cluster of legacy and previous - %% Create a PB client - %% GET / PUT on older and newer cluster - %% Upgrade nodes to previous - %% Get put on all nodes - Config = [], - [Legacy, Previous] = Nodes = rt:build_cluster([{legacy, Config}, {previous, Config}]), - ?assertEqual(ok, rt:wait_until_capability_contains(Previous, {riak_kv, crdt}, [pncounter])), - verify_counter_converge:set_allow_mult_true(Nodes), - - {LegacyPB, LegacyHttp} = get_clients(Legacy), - {PrevPB, PrevHttp} = get_clients(Previous), - - ?assertMatch(ok, rhc:counter_incr(LegacyHttp, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 1}, rhc:counter_val(LegacyHttp, ?BUCKET, ?KEY)), - - ?assertMatch(ok, rhc:counter_incr(PrevHttp, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 2}, rhc:counter_val(PrevHttp, ?BUCKET, ?KEY)), - - ?assertEqual(ok, riakc_pb_socket:counter_incr(LegacyPB, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 3}, riakc_pb_socket:counter_val(LegacyPB, ?BUCKET, ?KEY)), - ?assertEqual(ok, riakc_pb_socket:counter_incr(PrevPB, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 4}, riakc_pb_socket:counter_val(PrevPB, ?BUCKET, ?KEY)), - - riakc_pb_socket:stop(LegacyPB), - - rt:upgrade(Legacy, previous), - - PrevPB2 = rt:pbc(Legacy), - - ?assertEqual(ok, rt:wait_until_capability_contains(Previous, {riak_kv, crdt}, [pncounter,riak_dt_pncounter,riak_dt_orswot,riak_dt_map])), - - ?assertMatch(ok, rhc:counter_incr(LegacyHttp, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 5}, rhc:counter_val(LegacyHttp, ?BUCKET, ?KEY)), - - ?assertMatch(ok, rhc:counter_incr(PrevHttp, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 6}, rhc:counter_val(PrevHttp, ?BUCKET, ?KEY)), - - ?assertEqual(ok, riakc_pb_socket:counter_incr(PrevPB2, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 7}, riakc_pb_socket:counter_val(PrevPB2, ?BUCKET, ?KEY)), - ?assertEqual(ok, riakc_pb_socket:counter_incr(PrevPB, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 8}, riakc_pb_socket:counter_val(PrevPB, ?BUCKET, ?KEY)), - - [riakc_pb_socket:stop(C) || C <- [PrevPB, PrevPB2]], - - pass. - -get_clients(Node) -> - {rt:pbc(Node), rt:httpc(Node)}. diff --git a/tests/verify_counter_converge.erl b/tests/verify_counter_converge.erl deleted file mode 100644 index 2217d582b..000000000 --- a/tests/verify_counter_converge.erl +++ /dev/null @@ -1,111 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @author Russell Brown -%%% @copyright (C) 2012, Basho Technologies -%%% @doc -%%% riak_test for riak_dt counter convergence, -%%% @end - --module(verify_counter_converge). --behavior(riak_test). --export([confirm/0, set_allow_mult_true/1, set_allow_mult_true/2]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"test-counters">>). - -confirm() -> - Key = <<"a">>, - - [N1, N2, N3, N4]=Nodes = rt:build_cluster(4), - [C1, C2, C3, C4]=Clients = [ rt:httpc(N) || N <- Nodes ], - - set_allow_mult_true(Nodes), - - increment_counter(C1, Key), - increment_counter(C2, Key, 10), - - [?assertEqual(11, get_counter(C, Key)) || C <- Clients], - - decrement_counter(C3, Key), - decrement_counter(C4, Key, 2), - - [?assertEqual(8, get_counter(C, Key)) || C <- Clients], - - lager:info("Partition cluster in two."), - - PartInfo = rt:partition([N1, N2], [N3, N4]), - - %% increment one side - increment_counter(C1, Key, 5), - - %% check value on one side is different from other - [?assertEqual(13, get_counter(C, Key)) || C <- [C1, C2]], - [?assertEqual(8, get_counter(C, Key)) || C <- [C3, C4]], - - %% decrement other side - decrement_counter(C3, Key, 2), - - %% verify values differ - [?assertEqual(13, get_counter(C, Key)) || C <- [C1, C2]], - [?assertEqual(6, get_counter(C, Key)) || C <- [C3, C4]], - - %% heal - lager:info("Heal and check merged values"), - ok = rt:heal(PartInfo), - ok = rt:wait_for_cluster_service(Nodes, riak_kv), - - %% verify all nodes agree - [?assertEqual(ok, rt:wait_until(fun() -> - 11 == get_counter(HP, Key) - end)) || HP <- Clients ], - - pass. - -set_allow_mult_true(Nodes) -> - set_allow_mult_true(Nodes, ?BUCKET). - -set_allow_mult_true(Nodes, Bucket) -> - %% Counters REQUIRE allow_mult=true - N1 = hd(Nodes), - AllowMult = [{allow_mult, true}], - lager:info("Setting bucket properties ~p for bucket ~p on node ~p", - [AllowMult, Bucket, N1]), - rpc:call(N1, riak_core_bucket, set_bucket, [Bucket, AllowMult]), - rt:wait_until_ring_converged(Nodes). - -%% Counter API -get_counter(Client, Key) -> - {ok, Val} = rhc:counter_val(Client, ?BUCKET, Key), - Val. - -increment_counter(Client, Key) -> - increment_counter(Client, Key, 1). - -increment_counter(Client, Key, Amt) -> - update_counter(Client, Key, Amt). - -decrement_counter(Client, Key) -> - decrement_counter(Client, Key, 1). - -decrement_counter(Client, Key, Amt) -> - update_counter(Client, Key, -Amt). - -update_counter(Client, Key, Amt) -> - rhc:counter_incr(Client, ?BUCKET, Key, Amt). diff --git a/tests/verify_counter_repl.erl b/tests/verify_counter_repl.erl deleted file mode 100644 index 79a107b37..000000000 --- a/tests/verify_counter_repl.erl +++ /dev/null @@ -1,132 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%%% @copyright (C) 2013, Basho Technologies -%%% @doc -%%% riak_test for riak_dt counter convergence over repl -%%% @end - --module(verify_counter_repl). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"counter-bucket">>). --define(KEY, <<"counter-key">>). - -confirm() -> - inets:start(), - - {ClusterA, ClusterB} = make_clusters(), - - %% Write the data to both sides of the cluster - AIncrements = increment_cluster_counter(ClusterA), - BIncrements = increment_cluster_counter(ClusterB), - AExpected = lists:sum(AIncrements), - BExpected = lists:sum(BIncrements), - - AValue0 = get_counter(hd(ClusterA)), - BValue0 = get_counter(hd(ClusterB)), - - ?assertEqual(AExpected, AValue0), - ?assertEqual(BExpected, BValue0), - - %% let the repl flow - repl_power_activate(ClusterA, ClusterB), - - AValue = get_counter(hd(ClusterA)), - BValue = get_counter(hd(ClusterB)), - ExpectedValve = AExpected + BExpected, - - ?assertEqual(ExpectedValve, AValue), - ?assertEqual(ExpectedValve, BValue), - pass. - -make_clusters() -> - Conf = [{riak_repl, [{fullsync_on_connect, false}, - {fullsync_interval, disabled}]}, - {riak_core, [{default_bucket_props, [{allow_mult, true}]}]}], - Nodes = rt:deploy_nodes(6, Conf, [riak_kv, riak_repl]), - {ClusterA, ClusterB} = lists:split(3, Nodes), - A = make_cluster(ClusterA, "A"), - B = make_cluster(ClusterB, "B"), - {A, B}. - -make_cluster(Nodes, Name) -> - repl_util:make_cluster(Nodes), - repl_util:name_cluster(hd(Nodes), Name), - repl_util:wait_until_leader_converge(Nodes), - Clients = [ rt:httpc(Node) || Node <- Nodes ], - lists:zip(Clients, Nodes). - -increment_cluster_counter(Cluster) -> - [increment_counter(Client, rand_amt()) || {Client, _Node} <- Cluster]. - -increment_counter(Client, Amt) -> - rhc:counter_incr(Client, ?BUCKET, ?KEY, Amt), - Amt. - -get_counter({Client, _Node}) -> - {ok, Val} = rhc:counter_val(Client, ?BUCKET, ?KEY), - Val. - -rand_amt() -> - crypto:rand_uniform(-100, 100). - -%% Set up bi-directional full sync replication. -repl_power_activate(ClusterA, ClusterB) -> - lager:info("repl power...ACTIVATE!"), - LeaderA = get_leader(hd(ClusterA)), - info("got leader A"), - LeaderB = get_leader(hd(ClusterB)), - info("Got leader B"), - MgrPortA = get_mgr_port(hd(ClusterA)), - info("Got manager port A"), - MgrPortB = get_mgr_port(hd(ClusterB)), - info("Got manager port B"), - info("connecting A to B"), - repl_util:connect_cluster(LeaderA, "127.0.0.1", MgrPortB), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - info("A connected to B"), - info("connecting B to A"), - repl_util:connect_cluster(LeaderB, "127.0.0.1", MgrPortA), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderB, "A")), - info("B connected to A"), - info("Enabling Fullsync bi-directional"), - repl_util:enable_fullsync(LeaderA, "B"), - info("Enabled A->B"), - repl_util:enable_fullsync(LeaderB, "A"), - info("Enabled B->A"), - info("Awaiting fullsync completion"), - repl_util:start_and_wait_until_fullsync_complete(LeaderA), - info("A->B complete"), - repl_util:start_and_wait_until_fullsync_complete(LeaderB), - info("B->A complete"). - -get_leader({_, Node}) -> - rpc:call(Node, riak_core_cluster_mgr, get_leader, []). - -get_mgr_port({_, Node}) -> - {ok, {_IP, Port}} = rpc:call(Node, application, get_env, - [riak_core, cluster_mgr]), - Port. - -info(Message) -> - lager:info(Message). diff --git a/tests/verify_crdt_capability.erl b/tests/verify_crdt_capability.erl deleted file mode 100644 index 9b49b5a57..000000000 --- a/tests/verify_crdt_capability.erl +++ /dev/null @@ -1,101 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @author Russell Brown -%%% @copyright (C) 2013, Basho Technologies -%%% @doc -%%% riak_test for crdt cabability -%%% @end - --module(verify_crdt_capability). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"test-counters">>). --define(KEY, <<"foo">>). - -confirm() -> - %% Create a mixed cluster of current and previous versions - %% Create a PB client - %% GET / PUT on current and previous cluster - %% Upgrade nodes - %% Get put on all nodes - Config = [], - [Previous, Current]=Nodes = rt:build_cluster([{previous, Config}, {current, Config}]), - ?assertEqual(ok, rt:wait_until_capability_contains(Current, {riak_kv, crdt}, [pncounter])), - - verify_counter_converge:set_allow_mult_true(Nodes), - - {PrevPB, PrevHttp} = get_clients(Previous), - {PB, Http} = get_clients(Current), - - ?assertMatch(ok, rhc:counter_incr(PrevHttp, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 1}, rhc:counter_val(PrevHttp, ?BUCKET, ?KEY)), - - ?assertMatch(ok, rhc:counter_incr(Http, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 2}, rhc:counter_val(Http, ?BUCKET, ?KEY)), - - ?assertEqual(ok, riakc_pb_socket:counter_incr(PrevPB, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 3}, riakc_pb_socket:counter_val(PrevPB, ?BUCKET, ?KEY)), - ?assertEqual(ok, riakc_pb_socket:counter_incr(PB, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 4}, riakc_pb_socket:counter_val(PB, ?BUCKET, ?KEY)), - - lager:info("Passed mixed test, upgrade time!"), - - rt:upgrade(Previous, current), - lager:info("Upgrayded!!"), - ?assertEqual(ok, rt:wait_until_ready(Current)), - ?assertEqual(ok, rt:wait_until_ready(Previous)), - rt:wait_for_service(Previous, riak_kv), - ?assertEqual(ok, rt:wait_until_capability_contains(Current, {riak_kv, crdt}, [riak_dt_pncounter, riak_dt_orswot, riak_dt_map, pncounter])), - ?assertMatch(ok, rhc:counter_incr(PrevHttp, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 5}, rhc:counter_val(PrevHttp, ?BUCKET, ?KEY)), - - ?assertMatch(ok, rhc:counter_incr(Http, ?BUCKET, ?KEY, 1)), - ?assertMatch({ok, 6}, rhc:counter_val(Http, ?BUCKET, ?KEY)), - - %% Reconnect to the upgraded node. - riakc_pb_socket:stop(PrevPB), - {PrevPB1, _} = get_clients(Previous), - - ?assertEqual(ok, riakc_pb_socket:counter_incr(PrevPB1, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 7}, riakc_pb_socket:counter_val(PrevPB1, ?BUCKET, ?KEY)), - ?assertEqual(ok, riakc_pb_socket:counter_incr(PB, ?BUCKET, ?KEY, 1)), - ?assertEqual({ok, 8}, riakc_pb_socket:counter_val(PB, ?BUCKET, ?KEY)), - - %% And check that those 1.4 written values can be accessed / - %% incremented over the 2.0 API - - ?assertEqual(8, begin - {ok, Counter} = riakc_pb_socket:fetch_type(PrevPB1, {<<"default">>, ?BUCKET}, ?KEY), - riakc_counter:value(Counter) - end), - ?assertEqual(ok, riakc_pb_socket:update_type(PrevPB1, {<<"default">>, ?BUCKET}, ?KEY, gen_counter_op())), - ?assertEqual({ok, 9}, riakc_pb_socket:counter_val(PB, ?BUCKET, ?KEY)), - - [riakc_pb_socket:stop(C) || C <- [PB, PrevPB1]], - - pass. - -gen_counter_op() -> - riakc_counter:to_op(riakc_counter:increment(riakc_counter:new())). - -get_clients(Node) -> - {rt:pbc(Node), rt:httpc(Node)}. diff --git a/tests/verify_cs_bucket.erl b/tests/verify_cs_bucket.erl deleted file mode 100644 index 5db40bbca..000000000 --- a/tests/verify_cs_bucket.erl +++ /dev/null @@ -1,84 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @ doc tests the new CS Bucket fold message - --module(verify_cs_bucket). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --import(secondary_index_tests, [put_an_object/2, int_to_key/1]). --define(BUCKET, <<"2ibucket">>). --define(FOO, <<"foo">>). - -confirm() -> - Nodes = rt:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - - PBPid = rt:pbc(hd(Nodes)), - - [put_an_object(PBPid, N) || N <- lists:seq(0, 200)], - - ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 200)]), - - undefined = assertEqual(PBPid, ExpectedKeys, ?BUCKET, [{start_key, int_to_key(0)}]), - undefined = assertEqual(PBPid, tl(ExpectedKeys), ?BUCKET, [{start_key, int_to_key(0)}, {start_incl, false}]), - undefined = assertEqual(PBPid, [int_to_key(104)], ?BUCKET, [{start_key, int_to_key(103)}, - {end_key, int_to_key(105)}, - {start_incl, false}, - {end_incl, false}]), - - %% Limit / continuations - - Continuation1 = assertEqual(PBPid, lists:sublist(ExpectedKeys, 20), ?BUCKET, [{start_key, int_to_key(0)}, {max_results, 20}]), - Continuation2 = assertEqual(PBPid, lists:sublist(ExpectedKeys, 21, 20), ?BUCKET, - [{start_key, int_to_key(0)}, {max_results, 20}, {continuation, Continuation1}]), - undefined = assertEqual(PBPid, lists:sublist(ExpectedKeys, 41, 200), ?BUCKET, [{continuation, Continuation2}, {max_results, 200}]), - - riakc_pb_socket:stop(PBPid), - pass. - -%% Check the PB result against our expectations -%% and the non-streamed HTTP -assertEqual(PB, Expected, Bucket, Opts) -> - {ok, PBRes} = stream_pb(PB, Bucket, Opts), - PBObjects = proplists:get_value(objects, PBRes, []), - Keys = [riakc_obj:key(Obj) || Obj <- PBObjects], - ?assertEqual(Expected, Keys), - proplists:get_value(continuation, PBRes). - - -stream_pb(Pid, Bucket, Opts) -> - riakc_pb_socket:cs_bucket_fold(Pid, Bucket, Opts), - stream_loop(). - -stream_loop() -> - stream_loop(orddict:new()). - -stream_loop(Acc) -> - receive - {_Ref, {done, undefined}} -> - {ok, orddict:to_list(Acc)}; - {_Ref, {done, Continuation}} -> - {ok, orddict:store(continuation, Continuation, Acc)}; - {_Ref, {ok, Objects}} -> - Acc2 = orddict:update(objects, fun(Existing) -> Existing++Objects end, Objects, Acc), - stream_loop(Acc2) - end. diff --git a/tests/verify_down.erl b/tests/verify_down.erl deleted file mode 100644 index 973fb9f10..000000000 --- a/tests/verify_down.erl +++ /dev/null @@ -1,68 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_down). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - Nodes = rt:deploy_nodes(3), - [Node1, Node2, Node3] = Nodes, - - %% Join node2 to node1 and wait for cluster convergence - lager:info("Join ~p to ~p", [Node2, Node1]), - rt:join(Node2, Node1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1, Node2])), - ?assertEqual(ok, rt:wait_until_no_pending_changes([Node1, Node2])), - - %% Shutdown node2 - lager:info("Stopping ~p", [Node2]), - rt:stop(Node2), - ?assertEqual(ok, rt:wait_until_unpingable(Node2)), - Remaining = Nodes -- [Node2], - - %% Join node3 to node1 - lager:info("Join ~p to ~p", [Node3, Node1]), - rt:join(Node3, Node1), - ?assertEqual(ok, rt:wait_until_all_members(Remaining, [Node3])), - - %% Ensure node3 remains in the joining state - lager:info("Ensure ~p remains in the joining state", [Node3]), - [?assertEqual(joining, rt:status_of_according_to(Node3, Node)) || Node <- Remaining], - - %% Mark node2 as down and wait for ring convergence - lager:info("Mark ~p as down", [Node2]), - rt:down(Node1, Node2), - ?assertEqual(ok, rt:wait_until_ring_converged(Remaining)), - [?assertEqual(down, rt:status_of_according_to(Node2, Node)) || Node <- Remaining], - - %% Ensure node3 is now valid - [?assertEqual(valid, rt:status_of_according_to(Node3, Node)) || Node <- Remaining], - - %% Restart node2 and wait for ring convergence - lager:info("Restart ~p and wait for ring convergence", [Node2]), - rt:start(Node2), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node2])), - ?assertEqual(ok, rt:wait_until_ring_converged(Nodes)), - - %% Verify that all three nodes are ready - lager:info("Ensure all nodes are ready"), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - pass. diff --git a/tests/verify_dt_context.erl b/tests/verify_dt_context.erl deleted file mode 100644 index acdd74e27..000000000 --- a/tests/verify_dt_context.erl +++ /dev/null @@ -1,222 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2013, Basho Technologies -%%% @doc -%%% riak_test for riak_dt CRDT context operations -%%% @end - --module(verify_dt_context). --behavior(riak_test). --compile([export_all]). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(STYPE, <<"sets">>). --define(MTYPE, <<"maps">>). --define(TYPES, [{?STYPE, set}, - {?MTYPE, map}]). - --define(BUCKET, <<"pbtest">>). --define(KEY, <<"ctx">>). - --define(MODIFY_OPTS, [create]). - -confirm() -> - Config = [ {riak_kv, [{handoff_concurrency, 100}]}, - {riak_core, [ {ring_creation_size, 16}, - {vnode_management_timer, 1000} ]}], - - [N1, N2]=Nodes = rt:build_cluster(2, Config), - - create_bucket_types(Nodes, ?TYPES), - - [P1, P2] = PBClients = create_pb_clients(Nodes), - - S = make_set([a, b]), - - ok = store_set(P1, S), - - S2 = make_set([x, y, z]), - - M = make_map([{<<"set1">>, S}, {<<"set2">>, S2}]), - - ok = store_map(P2, M), - - - verify_dt_converge:check_value(P1, riakc_pb_socket, - {?STYPE, ?BUCKET}, ?KEY, riakc_set, - [<<"a">>, <<"b">>]), - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?MTYPE, ?BUCKET}, ?KEY, riakc_map, - [{{<<"set1">>, set}, [<<"a">>, <<"b">>]}, - {{<<"set2">>, set}, [ <<"x">>, <<"y">>, <<"z">>]}]), - - lager:info("Partition cluster in two."), - - PartInfo = rt:partition([N1], [N2]), - - lager:info("Modify data on side 1"), - %% Modify one side - S1_1 = make_set([c, d, e]), - ok= store_set(P1, S1_1), - - S3 = make_set([r, s]), - - M_1 = make_map([{<<"set1">>, S1_1}, {<<"set3">>, S3}]), - ok = store_map(P1, M_1), - - verify_dt_converge:check_value(P1, riakc_pb_socket, - {?STYPE, ?BUCKET}, ?KEY, riakc_set, - [<<"a">>, <<"b">>, <<"c">>, <<"d">>, <<"e">>]), - - verify_dt_converge:check_value(P1, riakc_pb_socket, - {?MTYPE, ?BUCKET}, ?KEY, riakc_map, - [{{<<"set1">>, set}, [<<"a">>, <<"b">>, <<"c">>, <<"d">>, <<"e">>]}, - {{<<"set2">>, set}, [ <<"x">>, <<"y">>, <<"z">>]}, - {{<<"set3">>, set}, [<<"r">>, <<"s">>]}]), - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?STYPE, ?BUCKET}, ?KEY, riakc_set, - [<<"a">>, <<"b">>]), - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?MTYPE, ?BUCKET}, ?KEY, riakc_map, - [{{<<"set1">>, set}, [<<"a">>, <<"b">>]}, - {{<<"set2">>, set}, [ <<"x">>, <<"y">>, <<"z">>]}]), - - %% get the modified side's values - - S1_2 = fetch(P1, ?STYPE), - M_2 = fetch(P1, ?MTYPE), - - %% operate on them and send to the partitioned side - S1_3 = riakc_set:del_element(<<"d">>, S1_2), - M_3 = riakc_map:update({<<"set1">>, set}, fun(Set1) -> - riakc_set:del_element(<<"e">>, Set1) end, - riakc_map:erase({<<"set3">>, set}, M_2)), - - %% we've removed elements that aren't to be found on P2, and a - %% field that's never been seen on P2 - - %% update the unmodified side - ok = store_map(P2, M_3), - ok = store_set(P2, S1_3), - - %% the value should not have changed, as these removes should be deferred - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?STYPE, ?BUCKET}, ?KEY, riakc_set, - [<<"a">>, <<"b">>]), - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?MTYPE, ?BUCKET}, ?KEY, riakc_map, - [{{<<"set1">>, set}, [<<"a">>, <<"b">>]}, - {{<<"set2">>, set}, [ <<"x">>, <<"y">>, <<"z">>]}]), - - %% Check both sides - %% heal - lager:info("Heal and check merged values"), - ok = rt:heal(PartInfo), - ok = rt:wait_for_cluster_service(Nodes, riak_kv), - - %% verify all nodes agree - - verify_dt_converge:check_value(P1, riakc_pb_socket, - {?STYPE, ?BUCKET}, ?KEY, riakc_set, - [<<"a">>, <<"b">>, <<"c">>, <<"e">>]), - - verify_dt_converge:check_value(P1, riakc_pb_socket, - {?MTYPE, ?BUCKET}, ?KEY, riakc_map, - [{{<<"set1">>, set}, [<<"a">>, <<"b">>, <<"c">>, <<"d">>]}, - {{<<"set2">>, set}, [ <<"x">>, <<"y">>, <<"z">>]}]), - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?STYPE, ?BUCKET}, ?KEY, riakc_set, - [<<"a">>, <<"b">>, <<"c">>, <<"e">>]), - - verify_dt_converge:check_value(P2, riakc_pb_socket, - {?MTYPE, ?BUCKET}, ?KEY, riakc_map, - [{{<<"set1">>, set}, [<<"a">>, <<"b">>, <<"c">>, <<"d">>]}, - {{<<"set2">>, set}, [ <<"x">>, <<"y">>, <<"z">>]}]), - - - [riakc_pb_socket:stop(C) || C <- PBClients], - - pass. - -fetch(Client, BType) -> - {ok, DT} = riakc_pb_socket:fetch_type(Client, {BType, ?BUCKET}, ?KEY), - DT. - - -make_set(Elems) -> - lists:foldl(fun(E, Set) -> - riakc_set:add_element(atom_to_binary(E, latin1), Set) - end, - riakc_set:new(), - Elems). - -make_map(Fields) -> - lists:foldl(fun({F, V}, Map) -> - riakc_map:update({F, set}, fun(_) -> - V end, - Map) - end, - riakc_map:new(), - Fields). - -store_set(Client, Set) -> - riakc_pb_socket:update_type(Client, {?STYPE, ?BUCKET}, ?KEY, riakc_set:to_op(Set)). - -store_map(Client, Map) -> - riakc_pb_socket:update_type(Client, {?MTYPE, ?BUCKET}, ?KEY, riakc_map:to_op(Map)). - -create_pb_clients(Nodes) -> - [begin - C = rt:pbc(N), - riakc_pb_socket:set_options(C, [queue_if_disconnected]), - C - end || N <- Nodes]. - -create_bucket_types([N1|_], Types) -> - lager:info("Creating bucket types with datatypes: ~p", [Types]), - [rt:create_and_activate_bucket_type(N1, Name, [{datatype, Type}, {allow_mult, true}]) - || {Name, Type} <- Types ]. - -bucket_type_ready_fun(Name) -> - fun(Node) -> - Res = rpc:call(Node, riak_core_bucket_type, activate, [Name]), - lager:info("is ~p ready ~p?", [Name, Res]), - Res == ok - end. - -bucket_type_matches_fun(Types) -> - fun(Node) -> - lists:all(fun({Name, Type}) -> - Props = rpc:call(Node, riak_core_bucket_type, get, - [Name]), - Props /= undefined andalso - proplists:get_value(allow_mult, Props, false) - andalso - proplists:get_value(datatype, Props) == Type - end, Types) - end. diff --git a/tests/verify_dt_converge.erl b/tests/verify_dt_converge.erl deleted file mode 100644 index 60c78ab36..000000000 --- a/tests/verify_dt_converge.erl +++ /dev/null @@ -1,340 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @copyright (C) 2013, Basho Technologies -%%% @doc -%%% riak_test for riak_dt CRDT convergence -%%% @end - --module(verify_dt_converge). --behavior(riak_test). --compile([export_all]). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(CTYPE, <<"counters">>). --define(STYPE, <<"sets">>). --define(MTYPE, <<"maps">>). --define(TYPES, [{?CTYPE, counter}, - {?STYPE, set}, - {?MTYPE, map}]). - --define(PB_BUCKET, <<"pbtest">>). --define(HTTP_BUCKET, <<"httptest">>). --define(KEY, <<"test">>). - -%% Type, Bucket, Client, Mod - --define(MODIFY_OPTS, [create]). - -confirm() -> - Config = [ {riak_kv, [{handoff_concurrency, 100}]}, - {riak_core, [ {ring_creation_size, 16}, - {vnode_management_timer, 1000} ]}], - - [N1, N2, N3, N4]=Nodes = rt:build_cluster(4, Config), - - create_bucket_types(Nodes, ?TYPES), - - [P1, P2, P3, P4] = PBClients = create_pb_clients(Nodes), - [H1, H2, H3, H4] = HTTPClients = create_http_clients(Nodes), - - %% Do some updates to each type - [update_1(Type, ?PB_BUCKET, Client, riakc_pb_socket) || - {Type, Client} <- lists:zip(?TYPES, [P1, P2, P3])], - - [update_1(Type, ?HTTP_BUCKET, Client, rhc) || - {Type, Client} <- lists:zip(?TYPES, [H1, H2, H3])], - - %% Check that the updates are stored - [check_1(Type, ?PB_BUCKET, Client, riakc_pb_socket) || - {Type, Client} <- lists:zip(?TYPES, [P4, P3, P2])], - - [check_1(Type, ?HTTP_BUCKET, Client, rhc) || - {Type, Client} <- lists:zip(?TYPES, [H4, H3, H2])], - - lager:info("Partition cluster in two."), - - PartInfo = rt:partition([N1, N2], [N3, N4]), - - lager:info("Modify data on side 1"), - %% Modify one side - [update_2a(Type, ?PB_BUCKET, Client, riakc_pb_socket) || - {Type, Client} <- lists:zip(?TYPES, [P1, P2, P1])], - - [update_2a(Type, ?HTTP_BUCKET, Client, rhc) || - {Type, Client} <- lists:zip(?TYPES, [H1, H2, H1])], - - lager:info("Check data is unmodified on side 2"), - %% check value on one side is different from other - [check_2b(Type, ?PB_BUCKET, Client, riakc_pb_socket) || - {Type, Client} <- lists:zip(?TYPES, [P4, P3, P4])], - - [check_2b(Type, ?HTTP_BUCKET, Client, rhc) || - {Type, Client} <- lists:zip(?TYPES, [H4, H3, H4])], - - lager:info("Modify data on side 2"), - %% Modify other side - [update_3b(Type, ?PB_BUCKET, Client, riakc_pb_socket) || - {Type, Client} <- lists:zip(?TYPES, [P3, P4, P3])], - - [update_3b(Type, ?HTTP_BUCKET, Client, rhc) || - {Type, Client} <- lists:zip(?TYPES, [H3, H4, H3])], - - lager:info("Check data is unmodified on side 1"), - %% verify values differ - [check_3a(Type, ?PB_BUCKET, Client, riakc_pb_socket) || - {Type, Client} <- lists:zip(?TYPES, [P2, P2, P1])], - - [check_3a(Type, ?HTTP_BUCKET, Client, rhc) || - {Type, Client} <- lists:zip(?TYPES, [H2, H2, H1])], - - %% heal - lager:info("Heal and check merged values"), - ok = rt:heal(PartInfo), - ok = rt:wait_for_cluster_service(Nodes, riak_kv), - - %% verify all nodes agree - [?assertEqual(ok, check_4(Type, ?PB_BUCKET, Client, riakc_pb_socket)) - || Type <- ?TYPES, Client <- PBClients], - - [?assertEqual(ok, check_4(Type, ?HTTP_BUCKET, Client, rhc)) - || Type <- ?TYPES, Client <- HTTPClients], - - [riakc_pb_socket:stop(C) || C <- PBClients], - - pass. - -create_pb_clients(Nodes) -> - [begin - C = rt:pbc(N), - riakc_pb_socket:set_options(C, [queue_if_disconnected]), - C - end || N <- Nodes]. - -create_http_clients(Nodes) -> - [ rt:httpc(N) || N <- Nodes ]. - -create_bucket_types([N1|_]=Nodes, Types) -> - lager:info("Creating bucket types with datatypes: ~p", [Types]), - [ rpc:call(N1, riak_core_bucket_type, create, - [Name, [{datatype, Type}, {allow_mult, true}]]) || - {Name, Type} <- Types ], - [rt:wait_until(N1, bucket_type_ready_fun(Name)) || {Name, _Type} <- Types], - [ rt:wait_until(N, bucket_type_matches_fun(Types)) || N <- Nodes]. - -bucket_type_ready_fun(Name) -> - fun(Node) -> - Res = rpc:call(Node, riak_core_bucket_type, activate, [Name]), - lager:info("is ~p ready ~p?", [Name, Res]), - Res == ok - end. - -bucket_type_matches_fun(Types) -> - fun(Node) -> - lists:all(fun({Name, Type}) -> - Props = rpc:call(Node, riak_core_bucket_type, get, - [Name]), - Props /= undefined andalso - proplists:get_value(allow_mult, Props, false) - andalso - proplists:get_value(datatype, Props) == Type - end, Types) - end. - - -update_1({BType, counter}, Bucket, Client, CMod) -> - lager:info("update_1: Updating counter"), - CMod:modify_type(Client, - fun(C) -> - riakc_counter:increment(5, C) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS); -update_1({BType, set}, Bucket, Client, CMod) -> - lager:info("update_1: Updating set"), - CMod:modify_type(Client, - fun(S) -> - riakc_set:add_element(<<"Riak">>, S) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS); -update_1({BType, map}, Bucket, Client, CMod) -> - lager:info("update_1: Updating map"), - CMod:modify_type(Client, - fun(M) -> - M1 = riakc_map:update( - {<<"friends">>, set}, - fun(S) -> - riakc_set:add_element(<<"Russell">>, - S) - end, M), - riakc_map:update( - {<<"followers">>, counter}, - fun(C) -> - riakc_counter:increment(10, C) - end, M1) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS). - -check_1({BType, counter}, Bucket, Client, CMod) -> - lager:info("check_1: Checking counter value is correct"), - check_value(Client,CMod,{BType, Bucket},?KEY,riakc_counter,5); -check_1({BType, set}, Bucket, Client, CMod) -> - lager:info("check_1: Checking set value is correct"), - check_value(Client,CMod,{BType, Bucket},?KEY,riakc_set,[<<"Riak">>]); -check_1({BType, map}, Bucket, Client, CMod) -> - lager:info("check_1: Checking map value is correct"), - check_value(Client, CMod, {BType, Bucket}, ?KEY, riakc_map, - [{{<<"followers">>, counter}, 10}, - {{<<"friends">>, set}, [<<"Russell">>]}]). - -update_2a({BType, counter}, Bucket, Client, CMod) -> - CMod:modify_type(Client, - fun(C) -> - riakc_counter:decrement(10, C) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS); -update_2a({BType, set}, Bucket, Client, CMod) -> - CMod:modify_type(Client, - fun(S) -> - riakc_set:add_element( - <<"Voldemort">>, - riakc_set:add_element(<<"Cassandra">>, S)) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS); -update_2a({BType, map}, Bucket, Client, CMod) -> - CMod:modify_type(Client, - fun(M) -> - M1 = riakc_map:update( - {<<"friends">>, set}, - fun(S) -> - riakc_set:add_element(<<"Sam">>, S) - end, M), - riakc_map:update({<<"verified">>, flag}, fun(F) -> - riakc_flag:enable(F) - end, - M1) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS). - -check_2b({BType, counter}, Bucket, Client, CMod) -> - lager:info("check_2b: Checking counter value is unchanged"), - check_value(Client, CMod, {BType, Bucket}, ?KEY, riakc_counter, 5); -check_2b({BType, set},Bucket,Client,CMod) -> - lager:info("check_2b: Checking set value is unchanged"), - check_value(Client, CMod, {BType, Bucket}, ?KEY, riakc_set, [<<"Riak">>]); -check_2b({BType, map},Bucket,Client,CMod) -> - lager:info("check_2b: Checking map value is unchanged"), - check_value(Client, CMod, {BType, Bucket}, ?KEY, riakc_map, - [{{<<"followers">>, counter}, 10}, - {{<<"friends">>, set}, [<<"Russell">>]}]). - -update_3b({BType, counter}, Bucket, Client, CMod) -> - CMod:modify_type(Client, - fun(C) -> - riakc_counter:increment(2, C) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS); -update_3b({BType, set}, Bucket, Client, CMod) -> - CMod:modify_type(Client, - fun(S) -> - riakc_set:add_element(<<"Couchbase">>, S) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS); -update_3b({BType, map},Bucket,Client,CMod) -> - CMod:modify_type(Client, - fun(M) -> - M1 = riakc_map:erase({<<"friends">>, set}, M), - riakc_map:update( - {<<"emails">>, map}, - fun(MI) -> - riakc_map:update( - {<<"home">>, register}, - fun(R) -> - riakc_register:set( - <<"foo@bar.com">>, R) - end, MI) - end, - M1) - end, - {BType, Bucket}, ?KEY, ?MODIFY_OPTS). - -check_3a({BType, counter}, Bucket, Client, CMod) -> - lager:info("check_3a: Checking counter value is unchanged"), - check_value(Client,CMod,{BType, Bucket},?KEY,riakc_counter,-5); -check_3a({BType, set}, Bucket, Client, CMod) -> - lager:info("check_3a: Checking set value is unchanged"), - check_value(Client,CMod,{BType, Bucket},?KEY,riakc_set, - [<<"Cassandra">>, <<"Riak">>, <<"Voldemort">>]); -check_3a({BType, map}, Bucket, Client, CMod) -> - lager:info("check_3a: Checking map value is unchanged"), - check_value(Client, CMod, {BType, Bucket}, ?KEY, riakc_map, - [{{<<"followers">>, counter}, 10}, - {{<<"friends">>, set}, [<<"Russell">>, <<"Sam">>]}, - {{<<"verified">>, flag}, true}]). - -check_4({BType, counter}, Bucket, Client, CMod) -> - lager:info("check_4: Checking final merged value of counter"), - check_value(Client,CMod,{BType, Bucket},?KEY,riakc_counter,-3, - [{pr, 3}, {notfound_ok, false}]); -check_4({BType, set}, Bucket, Client, CMod) -> - lager:info("check_4: Checking final merged value of set"), - check_value(Client, - CMod, {BType, Bucket}, - ?KEY, - riakc_set, - [<<"Cassandra">>, <<"Couchbase">>, <<"Riak">>, <<"Voldemort">>], - [{pr, 3}, {notfound_ok, false}]); -check_4({BType, map}, Bucket, Client, CMod) -> - lager:info("check_4: Checking final merged value of map"), - check_value(Client, CMod, {BType, Bucket}, ?KEY, riakc_map, - [{{<<"emails">>, map}, - [ - {{<<"home">>, register}, <<"foo@bar.com">>} - ]}, - {{<<"followers">>, counter}, 10}, - {{<<"friends">>, set}, [<<"Sam">>]}, - {{<<"verified">>, flag}, true}], - [{pr, 3}, {notfound_ok, false}]). - -check_value(Client, CMod, Bucket, Key, DTMod, Expected) -> - check_value(Client,CMod,Bucket,Key,DTMod,Expected, - [{r,2}, {notfound_ok, true}, {timeout, 5000}]). - -check_value(Client, CMod, Bucket, Key, DTMod, Expected, Options) -> - rt:wait_until(fun() -> - try - Result = CMod:fetch_type(Client, Bucket, Key, - Options), - lager:info("Expected ~p~n got ~p~n", [Expected, Result]), - ?assertMatch({ok, _}, Result), - {ok, C} = Result, - ?assertEqual(true, DTMod:is_type(C)), - ?assertEqual(Expected, DTMod:value(C)), - true - catch - Type:Error -> - lager:debug("check_value(~p,~p,~p,~p,~p) " - "failed: ~p:~p", [Client, Bucket, - Key, DTMod, - Expected, Type, - Error]), - false - end - end). diff --git a/tests/verify_dt_upgrade.erl b/tests/verify_dt_upgrade.erl deleted file mode 100644 index c43f3fb09..000000000 --- a/tests/verify_dt_upgrade.erl +++ /dev/null @@ -1,91 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%%% @doc r_t to verify CRDTs and counter values can properly upgrade -%%% 1.4-2.0 verifies that 1.4 counters can work with types -%%% Currently, this is a mainly placeholder for future 2.0+ tests --module(verify_dt_upgrade). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(COUNTER_BUCKET, <<"cbucket">>). - -confirm() -> - TestMetaData = riak_test_runner:metadata(), - OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - - Nodes = [Node1|_] = rt:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), - - verify_counter_converge:set_allow_mult_true(Nodes, ?COUNTER_BUCKET), - populate_counters(Node1), - - [begin - verify_counters(Node), - upgrade(Node, current) - end || Node <- Nodes], - - verify_counters(Node1), - pass. - -%% @private - -%% @doc populate a counter via http and pbc -populate_counters(Node) -> - lager:info("Writing counters to ~p", [Node]), - rt:wait_for_service(Node, riak_kv), - ?assertEqual(ok, rt:wait_until(Node, fun has_counter_capability/1)), - - RHC = rt:httpc(Node), - ?assertMatch(ok, rhc:counter_incr(RHC, ?COUNTER_BUCKET, <<"httpkey">>, 2)), - ?assertMatch({ok, 2}, rhc:counter_val(RHC, ?COUNTER_BUCKET, <<"httpkey">>)), - - PBC = rt:pbc(Node), - ?assertEqual(ok, riakc_pb_socket:counter_incr(PBC, ?COUNTER_BUCKET, <<"pbkey">>, 4)), - ?assertEqual({ok, 4}, riakc_pb_socket:counter_val(PBC, ?COUNTER_BUCKET, <<"pbkey">>)), - ok. - -%% @doc check that the counter values exist after upgrade, and -%% check that you can get via default bucket -verify_counters(Node) -> - lager:info("Verifying counters on ~p", [Node]), - RHC = rt:httpc(Node), - ?assertMatch({ok, 4}, rhc:counter_val(RHC, ?COUNTER_BUCKET, <<"pbkey">>)), - - PBC = rt:pbc(Node), - ?assertEqual({ok, 2}, riakc_pb_socket:counter_val(PBC, ?COUNTER_BUCKET, <<"httpkey">>)), - - %% Check that 1.4 counters work with bucket types - case catch rt:capability(Node, {riak_core, bucket_types}) of - true -> - ?assertEqual({ok, {counter, 4, undefined}}, riakc_pb_socket:fetch_type(PBC, {<<"default">>, ?COUNTER_BUCKET}, <<"pbkey">>)); - _ -> - ok - end, - ok. - -upgrade(Node, NewVsn) -> - lager:info("Upgrading ~p to ~p", [Node, NewVsn]), - rt:upgrade(Node, NewVsn), - rt:wait_for_service(Node, riak_kv), - ok. - -has_counter_capability(Node) -> - Types = rt:capability(Node, {riak_kv, crdt}), - is_list(Types) andalso lists:member(pncounter, Types). diff --git a/tests/verify_dvv_repl.erl b/tests/verify_dvv_repl.erl deleted file mode 100644 index c7354d3a5..000000000 --- a/tests/verify_dvv_repl.erl +++ /dev/null @@ -1,170 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%%% @copyright (C) 2014, Basho Technologies -%%% @doc -%%% riak_test repl caused sibling explosion Encodes scenario as -%%% described to me in hipchat. Write something to cluster B, enable -%%% realtime repl from A to B, read and write object, with resolution -%%% to A 100 times. Without DVV you have 100 siblings on B, with, you -%%% have 2 (the original B write, and the converged A writes) -%%% @end - --module(verify_dvv_repl). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"dvv-repl-bucket">>). --define(KEY, <<"dvv-repl-key">>). --define(KEY2, <<"dvv-repl-key2">>). - -confirm() -> - inets:start(), - - {{ClientA, ClusterA}, {ClientB, ClusterB}} = make_clusters(), - - %% Write data to B - write_object(ClientB), - - %% Connect for real time repl A->B - connect_realtime(ClusterA, ClusterB), - - IsReplicating = make_replicate_test_fun(ClientA, ClientB), - - rt:wait_until(IsReplicating), - - %% Update ClusterA 100 times - [write_object(ClientA) || _ <- lists:seq(1, 100)], - - %% Get the object, and see if it has 100 siblings (not the two it - %% should have.) Turn off DVV in `make_cluster` and see the - %% siblings explode! - AObj = get_object(ClientA), - - Expected = lists:seq(1, 100), - - %% Having up to 3 siblings could happen in rare cases when the writes hit - %% different nodes concurrently in the n_val=3 preflist. - ?assertMatch(Count when Count =< 3, riakc_obj:value_count(AObj)), - WaitFun = fun() -> - lager:info("Checking sink object"), - BObj = get_object(ClientB), - Resolved0 = resolve(riakc_obj:get_values(BObj)), - Resolved = lists:sort(sets:to_list(Resolved0)), - case Resolved of - Expected -> - BCount = riakc_obj:value_count(BObj), - ?assertMatch(C when C =< 6, BCount), - true; - _ -> - false - end - end, - ?assertEqual(ok, rt:wait_until(WaitFun)), - pass. - - -make_replicate_test_fun(From, To) -> - fun() -> - Obj = riakc_obj:new(?BUCKET, ?KEY2, <<"am I replicated yet?">>), - ok = riakc_pb_socket:put(From, Obj), - case riakc_pb_socket:get(To, ?BUCKET, ?KEY2) of - {ok, _} -> - true; - {error, notfound} -> - false - end - end. - -make_clusters() -> - Conf = [{riak_repl, [{fullsync_on_connect, false}, - {fullsync_interval, disabled}]}, - {riak_core, [{default_bucket_props, - [{dvv_enabled, true}, - {allow_mult, true}]}]}], - Nodes = rt:deploy_nodes(6, Conf, [riak_kv, riak_repl]), - {ClusterA, ClusterB} = lists:split(3, Nodes), - A = make_cluster(ClusterA, "A"), - B = make_cluster(ClusterB, "B"), - {A, B}. - -make_cluster(Nodes, Name) -> - repl_util:make_cluster(Nodes), - repl_util:name_cluster(hd(Nodes), Name), - repl_util:wait_until_leader_converge(Nodes), - C = rt:pbc(hd(Nodes)), - riakc_pb_socket:set_options(C, [queue_if_disconnected]), - {C, Nodes}. - -write_object([]) -> - ok; -write_object([Client | Rest]) -> - ok = write_object(Client), - write_object(Rest); -write_object(Client) -> - fetch_resolve_write(Client). - -get_object(Client) -> - case riakc_pb_socket:get(Client, ?BUCKET, ?KEY) of - {ok, Obj} -> - Obj; - _ -> - riakc_obj:new(?BUCKET, ?KEY) - end. - -fetch_resolve_write(Client) -> - Obj = get_object(Client), - Value = resolve_update(riakc_obj:get_values(Obj)), - Obj3 = riakc_obj:update_metadata(riakc_obj:update_value(Obj, Value), dict:new()), - ok = riakc_pb_socket:put(Client, Obj3). - -resolve(Values) -> - lists:foldl(fun(V0, Acc) -> - V = binary_to_term(V0), - sets:union(V, Acc) - end, - sets:new(), - Values). - -resolve_update([]) -> - sets:add_element(1, sets:new()); -resolve_update(Values) -> - Resolved = resolve(Values), - NewValue = lists:max(sets:to_list(Resolved)) + 1, - sets:add_element(NewValue, Resolved). - -%% Set up one way RT repl -connect_realtime(ClusterA, ClusterB) -> - lager:info("repl power...ACTIVATE!"), - LeaderA = get_leader(hd(ClusterA)), - MgrPortB = get_mgr_port(hd(ClusterB)), - repl_util:connect_cluster(LeaderA, "127.0.0.1", MgrPortB), - ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), - repl_util:enable_realtime(LeaderA, "B"), - repl_util:start_realtime(LeaderA, "B"). - -get_leader(Node) -> - rpc:call(Node, riak_core_cluster_mgr, get_leader, []). - -get_mgr_port(Node) -> - {ok, {_IP, Port}} = rpc:call(Node, application, get_env, - [riak_core, cluster_mgr]), - Port. diff --git a/tests/verify_dynamic_ring.erl b/tests/verify_dynamic_ring.erl deleted file mode 100644 index afd87f267..000000000 --- a/tests/verify_dynamic_ring.erl +++ /dev/null @@ -1,213 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_dynamic_ring). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"dynring">>). --define(W, 2). --define(R, 2). --define(START_SIZE, 16). --define(EXPANDED_SIZE, 64). --define(SHRUNK_SIZE, 8). - -confirm() -> - %% test requires allow_mult=false b/c of rt:systest_read - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - rt:update_app_config(all, [{riak_core, - [{ring_creation_size, ?START_SIZE}]}]), - [ANode, AnotherNode, YetAnother, _ReplacingNode] = _AllNodes = rt:deploy_nodes(4), - NewNodes = Nodes = [ANode, AnotherNode, YetAnother], - %% This assignment for `NewNodes' is commented until riak_core - %% issue #570 is resolved - %% NewNodes = [ANode, YetAnother, ReplacingNode], - rt:join(AnotherNode, ANode), - rt:join(YetAnother, ANode), - rt:wait_until_nodes_agree_about_ownership(Nodes), - rt:wait_until_ring_converged(Nodes), - rt:wait_until_no_pending_changes(Nodes), - - test_resize(?START_SIZE, ?EXPANDED_SIZE, ANode, Nodes), - - test_resize(?EXPANDED_SIZE, ?SHRUNK_SIZE, ANode, Nodes), - wait_until_extra_vnodes_shutdown(Nodes), - wait_until_extra_proxies_shutdown(Nodes), - - lager:info("writing 500 keys"), - ?assertEqual([], rt:systest_write(ANode, 1, 500, ?BUCKET, ?W)), - test_resize(?SHRUNK_SIZE, ?START_SIZE, ANode, Nodes, {501, 750}), - lager:info("verifying previously written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 500, ?BUCKET, ?R)), - - test_resize(?START_SIZE, ?EXPANDED_SIZE, ANode, Nodes), - lager:info("verifying previously written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), - - %% This following test code for force-replace is commented until - %% riak_core issue #570 is resolved. At that time the preceding 3 - %% lines should also be removed - - %% lager:info("testing force-replace during resize"), - %% submit_resize(?EXPANDED_SIZE, ANode), - %% %% sleep for a second, yes i know this is nasty but we just care that the resize has - %% %% been submitted and started, we aren't really waiting on a condition - %% timer:sleep(3000), - %% rpc:multicall(Nodes, riak_core_handoff_manager, kill_handoffs, []), - %% Statuses = rpc:multicall(Nodes, riak_core_handoff_manager, status, []), - %% lager:info("Handoff statuses: ~p", [Statuses]), - %% ok = rpc:call(ReplacingNode, riak_core, staged_join, [ANode]), - %% rt:wait_until_ring_converged(AllNodes), - %% ok = rpc:call(ANode, riak_core_claimant, force_replace, [AnotherNode, ReplacingNode]), - %% {ok, _, _} = rpc:call(ANode, riak_core_claimant, plan, []), - %% ok = rpc:call(ANode, riak_core_claimant, commit, []), - %% rpc:multicall(AllNodes, riak_core_handoff_manager, set_concurrency, [4]), - %% rt:wait_until_no_pending_changes(NewNodes), - %% assert_ring_size(?EXPANDED_SIZE, NewNodes), - %% lager:info("verifying written data"), - %% ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), - - test_resize(?EXPANDED_SIZE, ?SHRUNK_SIZE, ANode, NewNodes), - lager:info("verifying written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), - wait_until_extra_vnodes_shutdown(NewNodes), - wait_until_extra_proxies_shutdown(NewNodes), - - lager:info("submitting resize to subsequently abort. ~p -> ~p", [?SHRUNK_SIZE, ?START_SIZE]), - submit_resize(?START_SIZE, ANode), - %% sleep for a second, yes i know this is nasty but we just care that the resize has - %% made some progress. not really waiting on a condition - timer:sleep(1000), - lager:info("aborting resize operation, verifying cluster still has ~p partitions", - [?SHRUNK_SIZE]), - rpc:multicall(NewNodes, riak_core_handoff_manager, kill_handoffs, []), - rt:wait_until_ring_converged(NewNodes), - abort_resize(ANode), - rt:wait_until_no_pending_changes(NewNodes), - lager:info("verifying running vnodes abandoned during aborted resize are shutdown"), - %% force handoffs so we don't wait on vnode manager tick - rpc:multicall(Nodes, riak_core_vnode_manager, force_handoffs, []), - wait_until_extra_vnodes_shutdown(NewNodes), - lager:info("verifying vnodes abandoned during aborted resize have proxies shutdown"), - wait_until_extra_proxies_shutdown(NewNodes), - rt:wait_until_ring_converged(NewNodes), - assert_ring_size(?SHRUNK_SIZE, NewNodes), - lager:info("verifying written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), - - pass. - -test_resize(CurrentSize, NewSize, ANode, Nodes) -> - test_resize(CurrentSize, NewSize, ANode, Nodes, {undefined, undefined}). - -test_resize(CurrentSize, NewSize, ANode, Nodes, {WriteStart,WriteEnd}) -> - assert_ring_size(CurrentSize, Nodes), - Str = case CurrentSize > NewSize of - true -> "shrinking"; - false -> "expansion" - end, - lager:info("testing ring ~s. ~p -> ~p", [Str, CurrentSize, NewSize]), - rt:wait_until_ring_converged(Nodes), - submit_resize(NewSize, ANode), - write_during_resize(ANode, WriteStart, WriteEnd), - rt:wait_until_no_pending_changes(Nodes), - rt:wait_until_ring_converged(Nodes), - assert_ring_size(NewSize, ANode), - verify_write_during_resize(ANode, WriteStart, WriteEnd). - -write_during_resize(_, Start, End) when Start =:= undefined orelse End =:= undefined -> - ok; -write_during_resize(Node, Start, End) -> - Pid = self(), - spawn(fun() -> - case rt:systest_write(Node, Start, End, ?BUCKET, ?W) of - [] -> - Pid ! done_writing; - Ers -> - Pid ! {errors_writing, Ers} - end - end). - -verify_write_during_resize(_, Start, End) when Start =:= undefined orelse End =:= undefined -> - ok; -verify_write_during_resize(Node, Start, End) -> - receive - done_writing -> - lager:info("verifying data written during operation"), - ?assertEqual([], rt:systest_read(Node, Start, End, ?BUCKET, ?R)), - ok; - {errors_writing, Ers} -> - lager:error("errors were encountered while writing during operation: ~p", [Ers]), - throw(writes_failed) - after - 10000 -> - lager:error("failed to complete writes during operation before timeout"), - throw(writes_timedout) - end. - -submit_resize(NewSize, Node) -> - ?assertEqual(ok, rpc:call(Node, riak_core_claimant, resize_ring, [NewSize])), - {ok, _, _} = rpc:call(Node, riak_core_claimant, plan, []), - ?assertEqual(ok, rpc:call(Node, riak_core_claimant, commit, [])). - -abort_resize(Node) -> - ?assertEqual(ok, rpc:call(Node, riak_core_claimant, abort_resize, [])), - {ok, _, _} = rpc:call(Node, riak_core_claimant, plan, []), - ?assertEqual(ok, rpc:call(Node, riak_core_claimant, commit, [])). - -assert_ring_size(Size, Nodes) when is_list(Nodes) -> - [assert_ring_size(Size, Node) || Node <- Nodes]; -assert_ring_size(Size, Node) -> - {ok, R} = rpc:call(Node, riak_core_ring_manager, get_my_ring, []), - ?assertEqual(Size, riak_core_ring:num_partitions(R)). - -wait_until_extra_vnodes_shutdown([]) -> - ok; -wait_until_extra_vnodes_shutdown([Node | Nodes]) -> - wait_until_extra_vnodes_shutdown(Node), - wait_until_extra_vnodes_shutdown(Nodes); -wait_until_extra_vnodes_shutdown(Node) -> - {ok, R} = rpc:call(Node, riak_core_ring_manager, get_my_ring, []), - AllIndexes = [Idx || {Idx, _} <- riak_core_ring:all_owners(R)], - F = fun(_N) -> - Running = rpc:call(Node, riak_core_vnode_manager, all_index_pid, [riak_kv_vnode]), - StillRunning = [Idx || {Idx, _} <- Running, not lists:member(Idx, AllIndexes)], - length(StillRunning) =:= 0 - end, - ?assertEqual(ok, rt:wait_until(Node, F)). - -wait_until_extra_proxies_shutdown([]) -> - ok; -wait_until_extra_proxies_shutdown([Node | Nodes]) -> - wait_until_extra_proxies_shutdown(Node), - wait_until_extra_proxies_shutdown(Nodes); -wait_until_extra_proxies_shutdown(Node) -> - {ok, R} = rpc:call(Node, riak_core_ring_manager, get_my_ring, []), - AllIndexes = [Idx || {Idx, _} <- riak_core_ring:all_owners(R)], - F = fun(_N) -> - Running = running_vnode_proxies(Node), - StillRunning = [Idx || Idx <- Running, not lists:member(Idx, AllIndexes)], - length(StillRunning) =:= 0 - end, - ?assertEqual(ok, rt:wait_until(Node, F)). - -running_vnode_proxies(Node) -> - Children = rpc:call(Node, supervisor, which_children, [riak_core_vnode_proxy_sup]), - [Idx || {{_,Idx},Pid,_,_} <- Children, is_pid(Pid)]. diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl deleted file mode 100644 index 211a14167..000000000 --- a/tests/verify_handoff.erl +++ /dev/null @@ -1,146 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_handoff). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -%% We've got a separate test for capability negotiation and other mechanisms, so the test here is fairly -%% straightforward: get a list of different versions of nodes and join them into a cluster, making sure that -%% each time our data has been replicated: -confirm() -> - NTestItems = 1000, %% How many test items to write/verify? - NTestNodes = 3, %% How many nodes to spin up for tests? - TestMode = false, %% Set to false for "production tests", true if too slow. - EncodingTypes = [default, encode_raw, encode_zlib], %% Usually, you won't want to fiddle with these. - - [run_test(TestMode, NTestItems, NTestNodes, EncodingType) || - EncodingType <- EncodingTypes], - - lager:info("Test verify_handoff passed."), - pass. - -run_test(TestMode, NTestItems, NTestNodes, Encoding) -> - lager:info("Testing handoff (items ~p, encoding: ~p)", [NTestItems, Encoding]), - - %% This resets nodes, cleans up stale directories, etc.: - lager:info("Cleaning up..."), - rt:setup_harness(dummy, dummy), - - lager:info("Spinning up test nodes"), - [RootNode | TestNodes] = Nodes = deploy_test_nodes(TestMode, NTestNodes), - - rt:wait_for_service(RootNode, riak_kv), - - set_handoff_encoding(Encoding, Nodes), - - %% Insert delay into handoff folding to test the efficacy of the - %% handoff heartbeat addition - [rt_intercept:add(N, {riak_core_handoff_sender, - [{{visit_item, 3}, delayed_visit_item_3}]}) - || N <- Nodes], - - lager:info("Populating root node."), - rt:systest_write(RootNode, NTestItems), - %% write one object with a bucket type - rt:create_and_activate_bucket_type(RootNode, <<"type">>, []), - %% allow cluster metadata some time to propogate - rt:systest_write(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), - - %% Test handoff on each node: - lager:info("Testing handoff for cluster."), - lists:foreach(fun(TestNode) -> test_handoff(RootNode, TestNode, NTestItems) end, TestNodes), - - %% Prepare for the next call to our test (we aren't polite about it, it's faster that way): - lager:info("Bringing down test nodes."), - lists:foreach(fun(N) -> rt:brutal_kill(N) end, TestNodes), - - %% The "root" node can't leave() since it's the only node left: - lager:info("Stopping root node."), - rt:brutal_kill(RootNode). - -set_handoff_encoding(default, _) -> - lager:info("Using default encoding type."), - true; -set_handoff_encoding(Encoding, Nodes) -> - lager:info("Forcing encoding type to ~p.", [Encoding]), - - %% Update all nodes (capabilities are not re-negotiated): - [begin - rt:update_app_config(Node, override_data(Encoding)), - assert_using(Node, {riak_kv, handoff_data_encoding}, Encoding) - end || Node <- Nodes]. - -override_data(Encoding) -> - [ - { riak_core, - [ - { override_capability, - [ - { handoff_data_encoding, - [ - { use, Encoding}, - { prefer, Encoding} - ] - } - ] - } - ]}]. - -%% See if we get the same data back from our new nodes as we put into the root node: -test_handoff(RootNode, NewNode, NTestItems) -> - - lager:info("Waiting for service on new node."), - rt:wait_for_service(NewNode, riak_kv), - - lager:info("Joining new node with cluster."), - rt:join(NewNode, RootNode), - ?assertEqual(ok, rt:wait_until_nodes_ready([RootNode, NewNode])), - rt:wait_until_no_pending_changes([RootNode, NewNode]), - - %% See if we get the same data back from the joined node that we added to the root node. - %% Note: systest_read() returns /non-matching/ items, so getting nothing back is good: - lager:info("Validating data after handoff:"), - Results = rt:systest_read(NewNode, NTestItems), - ?assertEqual(0, length(Results)), - Results2 = rt:systest_read(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), - ?assertEqual(0, length(Results2)), - lager:info("Data looks ok."). - -assert_using(Node, {CapabilityCategory, CapabilityName}, ExpectedCapabilityName) -> - lager:info("assert_using ~p =:= ~p", [ExpectedCapabilityName, CapabilityName]), - ExpectedCapabilityName =:= rt:capability(Node, {CapabilityCategory, CapabilityName}). - -%% For some testing purposes, making these limits smaller is helpful: -deploy_test_nodes(false, N) -> - Config = [{riak_core, [{ring_creation_size, 8}, - {handoff_acksync_threshold, 20}, - {handoff_receive_timeout, 2000}]}], - rt:deploy_nodes(N, Config); -deploy_test_nodes(true, N) -> - lager:info("WARNING: Using turbo settings for testing."), - Config = [{riak_core, [{forced_ownership_handoff, 8}, - {ring_creation_size, 8}, - {handoff_concurrency, 8}, - {vnode_inactivity_timeout, 1000}, - {handoff_acksync_threshold, 20}, - {handoff_receive_timeout, 2000}, - {gossip_limit, {10000000, 60000}}]}], - rt:deploy_nodes(N, Config). diff --git a/tests/verify_handoff_mixed.erl b/tests/verify_handoff_mixed.erl deleted file mode 100644 index 9439109c4..000000000 --- a/tests/verify_handoff_mixed.erl +++ /dev/null @@ -1,186 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc Test basic handoff in mixed-version clusters. This was born -%% out of a bug found in the upgrade of vnode fold requests: -%% https://github.com/basho/riak/issues/407 -%% -%% Basic test: -%% - load data into a new node -%% - join an old node to it -%% - wait for handoff to finish -%% -%% Node versions used are `current' and whatever the test runner has -%% set the `upgrade_version' metadata to (`previous' by default). -%% -%% Handoff uses riak_core_fold_req_v* commands, and riak issue #407 -%% tracked a problem with upgrading that command from 1.4.2 format to -%% 2.0.0pre3 format. --module(verify_handoff_mixed). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --include("rt_pipe.hrl"). - --define(KV_BUCKET, <<"vhm_kv">>). --define(KV_COUNT, 1000). - --define(SEARCH_BUCKET, <<"vhm_search">>). --define(SEARCH_COUNT, 1000). - --define(PIPE_COUNT, 100). - --define(FOLD_CAPABILITY, {riak_core,fold_req_version}). - -confirm() -> - %% this `upgrade_version' lookup was copied from loaded_upgrade - UpgradeVsn = proplists:get_value(upgrade_version, - riak_test_runner:metadata(), - previous), - SearchEnabled = [{riak_search, [{enabled, true}]}], - Versions = [{current, SearchEnabled}, - {UpgradeVsn, SearchEnabled}], - Services = [riak_kv, riak_search, riak_pipe], - [Current, Old] = Nodes = rt:deploy_nodes(Versions, Services), - - prepare_vnodes(Current), - - %% before joining, learn what fold req the old version used, - %% so we can know when the cluster has negotiated to it - OldFold = rt:capability(Old, ?FOLD_CAPABILITY, v1), - - %% now link the nodes together and wait for handoff to complete - ok = rt:join(Old, Current), - ok = rt:wait_until_all_members(Nodes), - ok = rt:wait_until_ring_converged(Nodes), - - %% the calls to ..._no_pending_changes and ..._transfers_complete - %% speed up the timing of handoff such that it will happen before - %% capability renegotiation if we don't wait here - this is still - %% technically race-prone, but negotiation usually happens *much* - %% sooner than handoff at normal timing - lager:info("Wait for fold_req_version == ~p", [OldFold]), - ok = rt:wait_until_capability(Current, ?FOLD_CAPABILITY, OldFold), - - %% this will timeout if wrong fix is in place - %% (riak_kv_vnode would infinite-loop v1 fold requests) - %% or if no fix is in place - %% (riak_pipe_vnode would drop v1 fold requests on the floor) - ok = rt:wait_until_no_pending_changes(Nodes), - ok = rt:wait_until_transfers_complete(Nodes), - - %% this will error if wrong fix is in place - %% (riak_search forward v1 fold requests) - ok = check_logs(), - pass. - -%% @doc get vnodes running on Node, such that they'll be ready to -%% handoff when we join the other node -prepare_vnodes(Node) -> - prepare_kv_vnodes(Node), - prepare_search_vnodes(Node), - prepare_pipe_vnodes(Node). - -prepare_kv_vnodes(Node) -> - lager:info("Preparing KV vnodes with keys 1-~b in bucket ~s", - [?KV_COUNT, ?KV_BUCKET]), - C = rt:pbc(Node), - lists:foreach( - fun(KV) -> - ok = riakc_pb_socket:put(C, riakc_obj:new(?KV_BUCKET, KV, KV)) - end, - [ list_to_binary(integer_to_list(N)) || N <- lists:seq(1, ?KV_COUNT) ]), - riakc_pb_socket:stop(C). - -prepare_search_vnodes(Node) -> - lager:info("Peparing Search vnodes with keys 1000-~b in bucket ~s", - [1000+?SEARCH_COUNT, ?SEARCH_BUCKET]), - rt:enable_search_hook(Node, ?SEARCH_BUCKET), - C = rt:pbc(Node), - lists:foreach( - fun(KV) -> - O = riakc_obj:new(?SEARCH_BUCKET, KV, KV, "text/plain"), - ok = riakc_pb_socket:put(C, O) - end, - [ list_to_binary(integer_to_list(N)) - || N <- lists:seq(1000, 1000+?SEARCH_COUNT) ]), - riakc_pb_socket:stop(C). - -prepare_pipe_vnodes(Node) -> - %% the riak_pipe_w_pass worker produces no archive, but the vnode - %% still sends its queue (even if empty) through handoff - Spec = [#fitting_spec{name=vhm, module=riak_pipe_w_pass}], - %% keep outputs out of our mailbox - DummySink = spawn_link(fun() -> receive never -> ok end end), - Options = [{sink, #fitting{pid=DummySink}}], - - lager:info("Filling a pipe with ~b inputs", [?PIPE_COUNT]), - {ok, Pipe} = rpc:call(Node, riak_pipe, exec, [Spec, Options]), - lists:foreach( - fun(I) -> ok = rpc:call(Node, riak_pipe, queue_work, [Pipe, I]) end, - lists:seq(1, ?PIPE_COUNT)). - -check_logs() -> - AppCounts = sum_app_handoff(), - lager:info("Found handoff counts in logs: ~p", [AppCounts]), - - %% make sure all of our apps completed some handoff - ExpectedApps = lists:sort([riak_kv_vnode, - riak_search_vnode, - riak_pipe_vnode]), - FoundApps = lists:sort([ A || {A, _} <- AppCounts ]), - ?assertEqual(ExpectedApps, FoundApps), - - ZeroHandoff = [ A || {_, Count}=A <- AppCounts, - Count == 0 ], - %% none of these apps should be reporting zero objects handed off - ?assertEqual([], ZeroHandoff), - ok. - -sum_app_handoff() -> - lager:info("Combing logs for handoff notes"), - lists:foldl( - fun({App, Count}, Acc) -> - orddict:update_counter(App, Count, Acc) - end, - [], - lists:append([ find_app_handoff(Log) || Log <- rt:get_node_logs() ])). - -find_app_handoff({Path, Port}) -> - case re:run(Path, "console\.log$") of - {match, _} -> - find_line(Port, file:read_line(Port)); - nomatch -> - %% save time not looking through other logs - [] - end. - -find_line(Port, {ok, Data}) -> - Re = "ownership transfer of ([a-z_]+).*" - "completed.*([0-9]+) objects", - case re:run(Data, Re, [{capture, all_but_first, list}]) of - {match, [App, Count]} -> - [{list_to_atom(App), list_to_integer(Count)} - |find_line(Port, file:read_line(Port))]; - nomatch -> - find_line(Port, file:read_line(Port)) - end; -find_line(_, _) -> - []. diff --git a/tests/verify_handoff_write_once.erl b/tests/verify_handoff_write_once.erl deleted file mode 100644 index 9753946ed..000000000 --- a/tests/verify_handoff_write_once.erl +++ /dev/null @@ -1,207 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_handoff_write_once). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --define(BUCKET_TYPE, <<"write_once">>). --define(BUCKET, {?BUCKET_TYPE, <<"write_once">>}). - - -%% @doc This test will run a handoff in the case of write_once buckets, verifying -%% that write-once entries are properly handed off as part of ownership handoff, -%% but more importantly, that riak_kv_vnode properly handles data being written into -%% riak while ownership handoff is taking place. -%% -%% This test will create two nodes each with a ring size of 8, and populate one node -%% with 1k entries. It will then join the two nodes to make a cluster of size 2, which -%% will result in ownership handoff of four of the nodes (in each direction). -%% -%% We have intercepted the riak_kv_worker, which handles handoff for an individual vnode, -%% to ensure what we can send data through Riak while the cluster is in the handoff state, -%% thus ensuring that the riak_kv_vnode:handle_handoff_command callback is exercised in -%% the case of write_once buckets. -%% -%% We install intercepts at key points in the vnode to measure how many time various key -%% parts of the code are called. -%% -%% We run the above test twice, once in the case where we are doing asynchronous writes on the -%% back end, and once when we are using synchronous writes. Currently, this is toggled via -%% the use of a back end that can support async writes (currently, only leveldb) -%% -confirm() -> - - AsyncConfig = create_config(riak_kv_eleveldb_backend), - AsyncCluster = run_test(AsyncConfig, true), - - rt:clean_cluster(AsyncCluster), - - SyncConfig = create_config(riak_kv_memory_backend), - _SyncCluster = run_test(SyncConfig, false), - - pass. - -create_config(Backend) -> - [{riak_core, [ - {default_bucket_props, [{n_val, 1}]}, - {ring_creation_size, 8}, - {handoff_acksync_threshold, 20}, - {handoff_concurrency, 4}, - {handoff_receive_timeout, 2000}, - {vnode_management_timer, 100}]}, - {riak_kv, [ - {storage_backend, Backend}]} - ]. - -run_test(Config, AsyncWrites) -> - %% - %% Deploy 2 nodes based on config. Wait for K/V to start on each node. - %% - lager:info("Deploying 2 nodes..."), - Cluster = [RootNode, NewNode] = rt:deploy_nodes(2, Config), - [rt:wait_for_service(Node, riak_kv) || Node <- [RootNode, NewNode]], - %% - %% Set up the intercepts - %% - lager:info("Setting up intercepts..."), - make_intercepts_tab(RootNode), - % This intercept will tell the backround process (below) to send an event for each - % vnode that is being handed off (there will be 4 such vnodes, in this test case) - rt_intercept:add( - RootNode, {riak_kv_worker, [{{handle_work, 3}, handle_work_intercept}]} - ), - rt_intercept:add( - RootNode, {riak_kv_vnode, [ - %% Count everytime riak_kv_vnode:handle_handoff_command/3 is called with a write_once message - {{handle_handoff_command, 3}, count_handoff_w1c_puts}, - %% Count everytime riak_kv_vnode:handle_command/3 is called with a write_once message - {{handle_command, 3}, count_w1c_handle_command} - ]} - ), - true = rpc:call(RootNode, ets, insert, [intercepts_tab, {w1c_async_replies, 0}]), - true = rpc:call(RootNode, ets, insert, [intercepts_tab, {w1c_sync_replies, 0}]), - true = rpc:call(RootNode, ets, insert, [intercepts_tab, {w1c_put_counter, 0}]), - %% - %% Seed the root node with some data - %% - lager:info("Populating root node..."), - rt:create_and_activate_bucket_type(RootNode, ?BUCKET_TYPE, [{write_once, true}, {n_val, 1}]), - NTestItems = 100, - RingSize = proplists:get_value(ring_creation_size, proplists:get_value(riak_core, Config)), - [] = rt:systest_write(RootNode, 1, NTestItems, ?BUCKET, 1), - %% - %% Start an asynchronous proc which will send puts into riak during handoff. - %% - lager:info("Joining new node with cluster..."), - start_proc(RootNode, NTestItems, RingSize div 2), - rt:join(NewNode, RootNode), - TotalSent = wait_until_async_writes_complete(), - ?assertMatch(ok, rt:wait_until_nodes_ready(Cluster)), - rt:wait_until_bucket_type_visible(Cluster, ?BUCKET_TYPE), - rt:wait_until_no_pending_changes(Cluster), - rt:wait_until_transfers_complete(Cluster), - %% - %% Verify the results - %% - lager:info("Validating data after handoff..."), - Results2 = rt:systest_read(NewNode, 1, TotalSent, ?BUCKET, 1), - ?assertMatch([], Results2), - lager:info("Read ~p entries.", [TotalSent]), - [{_, Count}] = rpc:call(RootNode, ets, lookup, [intercepts_tab, w1c_put_counter]), - ?assertEqual(RingSize div 2, Count), - lager:info("We handled ~p write_once puts during handoff.", [Count]), - [{_, W1CAsyncReplies}] = rpc:call(RootNode, ets, lookup, [intercepts_tab, w1c_async_replies]), - [{_, W1CSyncReplies}] = rpc:call(RootNode, ets, lookup, [intercepts_tab, w1c_sync_replies]), - case AsyncWrites of - true -> - ?assertEqual(NTestItems + RingSize div 2, W1CAsyncReplies), - ?assertEqual(0, W1CSyncReplies); - false -> - ?assertEqual(0, W1CAsyncReplies), - ?assertEqual(NTestItems + RingSize div 2, W1CSyncReplies) - end, - Cluster. - -make_intercepts_tab(Node) -> - SupPid = rpc:call(Node, erlang, whereis, [sasl_safe_sup]), - intercepts_tab = rpc:call(Node, ets, new, [intercepts_tab, [named_table, - public, set, {heir, SupPid, {}}]]). - - -%% -%% Notes on the background process and corresponding intercepts. -%% -%% The code below is used to spawn a background process that is globally -%% registered with the name rt_ho_w1c_proc. This process will -%% wait for a message from the riak_kv_worker handle_work intercept, -%% telling this proc to write a message into Riak. The timing of the -%% intercept is such that the write is guaranteed to take place while -%% handoff is in progress, but before the vnode has been told to finish. -%% Sending this message will trigger this background process to do a -%% write into Riak, which in turn will force the vnode's -%% handle_handoff_command to be called. -%% - --record(state, { - node, sender, k, pids=[], expected, init=true -}). - -start_proc(Node, NTestItems, Expected) -> - Self = self(), - Pid = spawn_link(fun() -> loop(#state{node=Node, sender=Self, k=NTestItems, expected=Expected}) end), - global:register_name(rt_ho_w1c_proc, Pid), - receive ok -> ok end. - -loop(#state{node=Node, sender=Sender, k=K, pids=Pids, expected=Expected, init=Init} = State) -> - case Init of - true -> - Sender ! ok; - _ -> ok - end, - receive - {write, Pid} -> - ThePids = [Pid | Pids], - NumPids = length(ThePids), - case NumPids of - Expected -> - %% - %% The number of expected vnodes are now in the handoff state. Do some writes, and send ok's - %% back to the waiting vnodes. Once they get the ok back, they will complete handoff. At this - %% point, we are done, so we can tell the test to proceed and wait for handoff to complete. - %% - [] = rt:systest_write(Node, K + 1, K + Expected, ?BUCKET, 1), - lager:info( - "Asynchronously wrote entries [~p..~p] during handoff. Sending ok's back to ~p waiting vnode(s)...", - [K + 1, K + Expected, NumPids] - ), - [ThePid ! ok || ThePid <- ThePids], - Sender ! (K + Expected); - _ -> - loop(State#state{pids=ThePids, init=false}) - end - end. - - -wait_until_async_writes_complete() -> - receive - K -> K - after 60000 -> - throw("Timed out after 60s waiting for async writes to complete.") - end. \ No newline at end of file diff --git a/tests/verify_kv_health_check.erl b/tests/verify_kv_health_check.erl deleted file mode 100644 index ebca92d1e..000000000 --- a/tests/verify_kv_health_check.erl +++ /dev/null @@ -1,63 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_kv_health_check). --behaviour(riak_test). --export([confirm/0]). - -confirm() -> - [Node1, Node2, _Node3] = rt:build_cluster(3), - - %% add intercept that delays handling of vnode commands - %% on a single node (the "slow" node) - rt_intercept:load_code(Node1), - rt_intercept:add(Node1, {riak_kv_vnode, - [{{handle_command, 3}, slow_handle_command}]}), - lager:info("Installed intercept to delay handling of requests by kv_vnode on ~p", - [Node1]), - - %% lets use some reasonable threshold values so we aren't here forever - DisableThreshold = 10, - EnableThreshold = 9, - ok = rpc:call(Node1, - application, - set_env, - [riak_kv, vnode_mailbox_limit, {EnableThreshold, DisableThreshold}]), - - %% make DisableThreshold+5 requests and trigger the health check explicitly - %% we only need to backup one vnode's msg queue on the node to fail the health check - %% so we read the same key again and again - C = rt:pbc(Node2), - [riakc_pb_socket:get(C, <<"b">>, <<"k">>) || _ <- lists:seq(1,DisableThreshold+5)], - ok = rpc:call(Node1, riak_core_node_watcher, check_health, [riak_kv]), - - lager:info("health check should disable riak_kv on ~p shortly", [Node1]), - rt:wait_until(Node1, - fun(N) -> - Up = rpc:call(N, riak_core_node_watcher, services, [N]), - not lists:member(riak_kv, Up) - end), - lager:info("health check successfully disabled riak_kv on ~p", [Node1]), - lager:info("health check should re-enable riak_kv on ~p after some messages have been processed", - [Node1]), - %% wait for health check timer to do its thing, don't explicitly execute it - rt:wait_for_service(Node1, riak_kv), - lager:info("health check successfully re-enabled riak_kv on ~p", [Node1]), - riakc_pb_socket:stop(C), - pass. diff --git a/tests/verify_leave.erl b/tests/verify_leave.erl deleted file mode 100644 index 9bf079f38..000000000 --- a/tests/verify_leave.erl +++ /dev/null @@ -1,59 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_leave). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --import(rt, [build_cluster/1, - leave/1, - wait_until_unpingable/1, - status_of_according_to/2, - remove/2]). - -confirm() -> - %% Bring up a 3-node cluster for the test - Nodes = build_cluster(3), - [Node1, Node2, Node3] = Nodes, - - %% Have node2 leave - lager:info("Have ~p leave", [Node2]), - leave(Node2), - ?assertEqual(ok, wait_until_unpingable(Node2)), - - %% Verify node2 no longer owns partitions, all node believe it invalid - lager:info("Verify ~p no longer owns partitions and all nodes believe " - "it is invalid", [Node2]), - Remaining1 = Nodes -- [Node2], - rt:wait_until_nodes_agree_about_ownership(Remaining1), - [?assertEqual(invalid, status_of_according_to(Node2, Node)) || Node <- Remaining1], - - %% Have node1 remove node3 - lager:info("Have ~p remove ~p", [Node1, Node3]), - remove(Node1, Node3), - ?assertEqual(ok, wait_until_unpingable(Node3)), - - %% Verify node3 no longer owns partitions, all node believe it invalid - lager:info("Verify ~p no longer owns partitions, and all nodes believe " - "it is invalid", [Node3]), - Remaining2 = Remaining1 -- [Node3], - rt:wait_until_nodes_agree_about_ownership(Remaining2), - [?assertEqual(invalid, status_of_according_to(Node3, Node)) || Node <- Remaining2], - pass. diff --git a/tests/verify_link_walk_urls.erl b/tests/verify_link_walk_urls.erl deleted file mode 100644 index ff5fcb044..000000000 --- a/tests/verify_link_walk_urls.erl +++ /dev/null @@ -1,137 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - - -%% @doc Exercise link walking queries through the special URLs - --module(verify_link_walk_urls). - --include_lib("eunit/include/eunit.hrl"). - --export([confirm/0]). - --define(NUM_NODES, 4). - --record(config, { ip, port, prefix }). - - -confirm() -> - [Node0 | _] = rt:build_cluster(?NUM_NODES), - Pbc = rt:pbc(Node0), - - lager:info("Inserting linked graph"), - %% (deleted) (b/4,v4b) <-> (b/5,v5b) - %% | / - %% (a/1,v1) <-> (a/2,v2) <-> (a/3,v3) <-> (a/4,v4) <-> (a/5,v5) > - %% ^_________________________________________________________| - put_obj(Pbc, "a", "1", "v1", [{"a", "2", "next"}]), - put_obj(Pbc, "a", "2", "v2", [{"a", "3", "next"}, {"a", "1", "prev"}, {"b", "2", "next"}]), - put_obj(Pbc, "a", "3", "v3", [{"a", "4", "next"}, {"b", "4", "next"}, {"a", "2", "prev"}]), - put_obj(Pbc, "a", "4", "v4", [{"a", "5", "next"}, {"a", "3", "prev"}]), - put_obj(Pbc, "a", "5", "v5", [{"a", "1", "next"}, {"a", "4", "prev"}]), - - put_obj(Pbc, "b", "4", "v4b", [{"b", "5", "next"}, {"a", "3", "prev"}]), - put_obj(Pbc, "b", "5", "v5b", [{"b", "4", "prev"}]), - - Config = get_config(Node0), - - lager:info("Verifying link walk queries"), - - verify_query(Config, "a", "1", "_,next,1", - ["v2"]), - verify_query(Config, "a", "1", "_,_,1", - ["v2"]), - verify_query(Config, "a", "1", "b,next,1", - []), - verify_query(Config, "a", "1", "a,next,1", - ["v2"]), - verify_query(Config, "a", "1", "_,next,1/_,next,1", - ["v2", "v3"]), - verify_query(Config, "a", "1", "_,next,1/b,next,1/_,next,1/_,next,1/_,next,1", - ["v2"]), - verify_query(Config, "a", "1", "_,next,1/_,next,1/_,next,1/_,next,1/_,next,1", - ["v1", "v2", "v3", "v4", "v4b", "v5", "v5b"]), - verify_query(Config, "a", "1", "_,next,0/_,next,1/a,next,0/a,next,1", - ["v3", "v5"]), - verify_query(Config, "a", "1", "_,next,0/_,next,0/_,prev,1/_,next,0/_,next,1", - ["v2", "v4", "v4b"]), - - verify_query(Config, "a", "3", "_,_,1", - ["v2", "v4", "v4b"]), - verify_query(Config, "a", "3", "a,_,1", - ["v2", "v4"]), - verify_query(Config, "a", "3", "b,_,1", - ["v4b"]), - verify_query(Config, "a", "3", "_,_,0/_,next,1", - ["v3", "v5", "v5b"]), - - verify_query(Config, "a", "5", "_,prev,1", - ["v4"]), - verify_query(Config, "a", "5", "_,prev,1/_,prev,1/_,prev,1/_,prev,1", - ["v1", "v2", "v3", "v4"]), - verify_query(Config, "a", "5", "_,prev,1/_,prev,1/_,next,1/_,next,1", - ["v3", "v4", "v4", "v4b", "v5", "v5b"]), - verify_query(Config, "a", "5", "b,next,1", - []), - verify_query(Config, "a", "5", "_,_,1", - ["v1", "v4"]), - - lager:info("Au revoir mes amies"), - riakc_pb_socket:stop(Pbc), - pass. - -verify_query(Cfg, Bucket, Key, Query, Expected) -> - lager:info("Verifying (~p,~p) '~s' -> ~p", [Bucket, Key, Query, Expected]), - ?assertEqual(Expected, link_query(Cfg, Bucket, Key, Query)). - - -get_config(Node0) -> - [{http, {IP, Port}}|_] = rt:connection_info(Node0), - Prefix = - rpc:call(Node0, app_helper, get_env, [riak_kv, raw_name, "riak"]), - #config{ip = IP, port = Port, prefix = Prefix}. - - -link_query(#config{ip=IP, port=Port, prefix=Prefix}, B, K, LinkStr) -> - Url = lists:flatten(io_lib:format("http://~s:~p/~s/~s/~s/~s", - [IP, Port, Prefix, B, K, LinkStr])), - {ok, "200", _Headers, Body} = ibrowse:send_req(Url, [], get), - get_return_values(Body). - -%% @doc Extracts values from multipart body in a hacky way copied from -%% scripts in the fast track tutorial: simply filter out headers and -%% multipart markers and the rest is our value lines. -get_return_values(Body) -> - Lines = re:split(Body, "\r\n", [multiline, {return, list}]), - Vs = [Line || Line <- Lines, - length(Line) > 0, - string:str(Line, ":") =:= 0, - string:str(Line, "--") =:= 0], - lists:sort(Vs). - -put_obj(Pbc, Bucket, Key, Value, Links) when is_list(Bucket), is_list(Key), - is_list(Value), is_list(Links) -> - Obj = riakc_obj:new(list_to_binary(Bucket), - list_to_binary(Key), - list_to_binary(Value)), - Lns = [{{B, K}, T} || {B, K, T} <- Links], - Md = dict:store(<<"Links">>, Lns, dict:new()), - ObjWLinks = riakc_obj:update_metadata(Obj, Md), - riakc_pb_socket:put(Pbc, ObjWLinks). diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl deleted file mode 100644 index 33af44937..000000000 --- a/tests/verify_listkeys.erl +++ /dev/null @@ -1,250 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_listkeys). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"listkeys_bucket">>). --define(NUM_BUCKETS, 1200). --define(NUM_KEYS, 1000). --define(UNDEFINED_BUCKET, <<"880bf69d-5dab-44ee-8762-d24c6f759ce1">>). --define(UNDEFINED_BUCKET_TYPE, <<"880bf69d-5dab-44ee-8762-d24c6f759ce1">>). - -confirm() -> - [Node1, Node2, Node3, Node4] = Nodes = rt:deploy_nodes(4), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - - lager:info("Nodes deployed, but not joined."), - - lager:info("Writing some known data to Node 1"), - put_keys(Node1, ?BUCKET, ?NUM_KEYS), - put_buckets(Node1, ?NUM_BUCKETS), - timer:sleep(2000), - check_it_all([Node1]), - - lists:foldl(fun(Node, [N1|_] = Cluster) -> - lager:info("An invitation to this party is cordially extended to ~p.", [Node]), - rt:join(Node, N1), - lager:info("Wait until there are no pending changes"), - Ns = lists:usort([Node|Cluster]), - rt:wait_until_no_pending_changes(Ns), - rt:wait_for_cluster_service(Ns, riak_kv), - ok = rt:wait_until_transfers_complete(Ns), - lager:info("Check keys and buckets after transfer"), - check_it_all(Ns), - Ns - end, [Node1], [Node2, Node3, Node4]), - - lager:info("Checking basic HTTP"), - check_it_all(Nodes, http), - - lager:info("Stopping Node1"), - rt:stop(Node1), - rt:wait_until_unpingable(Node1), - - %% Stop current node, restart previous node, verify - lists:foldl(fun(Node, Prev) -> - lager:info("Stopping Node ~p", [Node]), - rt:stop(Node), - rt:wait_until_unpingable(Node), - - lager:info("Starting Node ~p", [Prev]), - rt:start(Prev), - UpNodes = Nodes -- [Node], - lager:info("Waiting for riak_kv service to be ready in ~p", [Prev]), - rt:wait_for_cluster_service(UpNodes, riak_kv), - - lager:info("Check keys and buckets"), - check_it_all(UpNodes), - Node - end, Node1, [Node2, Node3, Node4]), - - lager:info("Stopping Node2"), - rt:stop(Node2), - rt:wait_until_unpingable(Node2), - - lager:info("Stopping Node3"), - rt:stop(Node3), - rt:wait_until_unpingable(Node3), - - lager:info("Only Node1 is up, so test should fail!"), - - check_it_all([Node1], pbc, false), - pass. - -put_keys(Node, Bucket, Num) -> - Pid = rt:pbc(Node), - Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], - Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], - [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) || {Key, Val} <- lists:zip(Keys, Vals)], - riakc_pb_socket:stop(Pid). - -list_keys(Node, Interface, Bucket, Attempt, Num, ShouldPass) -> - case Interface of - pbc -> - Pid = rt:pbc(Node), - Mod = riakc_pb_socket; - http -> - Pid = rt:httpc(Node), - Mod = rhc - end, - lager:info("Listing keys on ~p using ~p. Attempt #~p", - [Node, Interface, Attempt]), - case ShouldPass of - true -> - {ok, Keys} = Mod:list_keys(Pid, Bucket), - ActualKeys = lists:usort(Keys), - ExpectedKeys = lists:usort([list_to_binary(["", integer_to_list(Ki)]) - || Ki <- lists:seq(0, Num - 1)]), - assert_equal(ExpectedKeys, ActualKeys); - _ -> - {Status, Message} = Mod:list_keys(Pid, Bucket), - ?assertEqual(error, Status), - ?assertEqual(<<"insufficient_vnodes_available">>, Message) - end, - case Interface of - pbc -> riakc_pb_socket:stop(Pid); - _ -> ok - end. - -list_keys_for_undefined_bucket_type(Node, Interface, Bucket, Attempt, ShouldPass) -> - case Interface of - pbc -> - Pid = rt:pbc(Node), - Mod = riakc_pb_socket; - http -> - Pid = rt:httpc(Node), - Mod = rhc - end, - - lager:info("Listing keys using undefined bucket type ~p on ~p using ~p. Attempt #~p", - [?UNDEFINED_BUCKET_TYPE, Node, Interface, Attempt]), - case ShouldPass of - true -> ok; - _ -> - {Status, Message} = Mod:list_keys(Pid, { ?UNDEFINED_BUCKET_TYPE, Bucket }), - ?assertEqual(error, Status), - ?assertEqual(<<"No bucket-type named '880bf69d-5dab-44ee-8762-d24c6f759ce1'">>, Message) - end, - - case Interface of - pbc -> riakc_pb_socket:stop(Pid); - _ -> ok - end. - -put_buckets(Node, Num) -> - Pid = rt:pbc(Node), - Buckets = [list_to_binary(["", integer_to_list(Ki)]) - || Ki <- lists:seq(0, Num - 1)], - {Key, Val} = {<<"test_key">>, <<"test_value">>}, - [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) - || Bucket <- Buckets], - riakc_pb_socket:stop(Pid). - -list_buckets(Node, Interface, Attempt, Num, ShouldPass) -> - case Interface of - pbc -> - Pid = rt:pbc(Node), - Mod = riakc_pb_socket; - http -> - Pid = rt:httpc(Node), - Mod = rhc - end, - lager:info("Listing buckets on ~p using ~p. Attempt #~p", - [Node, Interface, Attempt]), - - {Status, Buckets} = Mod:list_buckets(Pid), - case Status of - error -> lager:info("list buckets error ~p", [Buckets]); - _ -> ok - end, - ?assertEqual(ok, Status), - ExpectedBuckets= lists:usort([?BUCKET | - [list_to_binary(["", integer_to_list(Ki)]) - || Ki <- lists:seq(0, Num - 1)]]), - ActualBuckets = lists:usort(Buckets), - case ShouldPass of - true -> - assert_equal(ExpectedBuckets, ActualBuckets); - _ -> - ?assert(length(ActualBuckets) < length(ExpectedBuckets)), - lager:info("This case expects inconsistent bucket lists") - end, - case Interface of - pbc -> riakc_pb_socket:stop(Pid); - _ -> ok - end. - -list_buckets_for_undefined_bucket_type(Node, Interface, Attempt, ShouldPass) -> - case Interface of - pbc -> - Pid = rt:pbc(Node), - Mod = riakc_pb_socket; - http -> - Pid = rt:httpc(Node), - Mod = rhc - end, - - lager:info("Listing buckets on ~p for undefined bucket type ~p using ~p. Attempt ~p.", - [Node, ?UNDEFINED_BUCKET_TYPE, Interface, Attempt]), - - case ShouldPass of - true -> ok; - _ -> - {Status, Message} = Mod:list_buckets(Pid, ?UNDEFINED_BUCKET_TYPE, []), - lager:info("Received status ~p and message ~p", [Status, Message]), - ?assertEqual(error, Status), - ?assertEqual(<<"No bucket-type named '880bf69d-5dab-44ee-8762-d24c6f759ce1'">>, Message) - end, - - case Interface of - pbc -> - riakc_pb_socket:stop(Pid); - _ -> ok - end. - -assert_equal(Expected, Actual) -> - case Expected -- Actual of - [] -> ok; - Diff -> lager:info("Expected -- Actual: ~p", [Diff]) - end, - ?assertEqual(length(Actual), length(Expected)), - ?assertEqual(Actual, Expected). - -check_it_all(Nodes) -> - check_it_all(Nodes, pbc). - -check_it_all(Nodes, Interface) -> - check_it_all(Nodes, Interface, true). - -check_it_all(Nodes, Interface, ShouldPass) -> - [check_a_node(N, Interface, ShouldPass) || N <- Nodes]. - -check_a_node(Node, Interface, ShouldPass) -> - [list_keys(Node, Interface, ?BUCKET, Attempt, ?NUM_KEYS, ShouldPass) - || Attempt <- [1,2,3] ], - [list_keys_for_undefined_bucket_type(Node, Interface, ?BUCKET, Attempt, ShouldPass) - || Attempt <- [1,2,3] ], - [list_buckets(Node, Interface, Attempt, ?NUM_BUCKETS, ShouldPass) - || Attempt <- [1,2,3] ], - [list_buckets_for_undefined_bucket_type(Node, Interface, Attempt, ShouldPass) - || Attempt <- [1,2,3] ]. - diff --git a/tests/verify_listkeys_eqcfsm.erl b/tests/verify_listkeys_eqcfsm.erl deleted file mode 100644 index d8f5f01ee..000000000 --- a/tests/verify_listkeys_eqcfsm.erl +++ /dev/null @@ -1,246 +0,0 @@ --module(verify_listkeys_eqcfsm). --compile(export_all). - --ifdef(EQC). --include_lib("eqc/include/eqc.hrl"). --include_lib("eqc/include/eqc_fsm.hrl"). --include_lib("eunit/include/eunit.hrl"). - --behaviour(riak_test). --export([confirm/0]). - --define(NUM_TESTS, 5). --define(PREFIX, {x, x}). --define(DEVS(N), lists:concat(["dev", N, "@127.0.0.1"])). --define(DEV(N), list_to_atom(?DEVS(N))). --define(MAX_CLUSTER_SIZE, 5). --record(state, { - buckets = orddict:new(), - nodes_up = [], - nodes_down = [], - cluster_nodes = [], - key_filter = undefined - }). - -%% ==================================================================== -%% riak_test callback -%% ==================================================================== -confirm() -> - ?assert(eqc:quickcheck(eqc:numtests(?NUM_TESTS, ?MODULE:prop_test()))), - pass. -%% ==================================================================== -%% EQC generators -%% ==================================================================== -g_num_keys() -> - choose(10, 1000). - -g_uuid() -> - noshrink(eqc_gen:bind(eqc_gen:bool(), fun(_) -> druuid:v4_str() end)). - -g_bucket_type() -> - oneof(bucket_types()). - -g_key_filter() -> - %% Create a key filter function. - %% There will always be at least 10 keys - %% due to the lower bound of object count - %% generator. - MatchKeys = [list_to_binary(integer_to_list(X)) || X <- lists:seq(1,10)], - KeyFilter = - fun(X) -> - lists:member(X, MatchKeys) - end, - frequency([{4, none}, {2, KeyFilter}]). - -%% ==================================================================== -%% EQC Properties -%% ==================================================================== -prop_test() -> - ?FORALL(Cmds, noshrink(commands(?MODULE)), - ?WHENFAIL( - begin - _ = lager:error("*********************** FAILED!!!!" - "*******************") - end, - ?TRAPEXIT( - begin - Nodes = setup_cluster(random:uniform(?MAX_CLUSTER_SIZE)), - lager:info("======================== Will run commands with Nodes:~p:", [Nodes]), - [lager:info(" Command : ~p~n", [Cmd]) || Cmd <- Cmds], - {H, _S, Res} = run_commands(?MODULE, Cmds, [{nodelist, Nodes}]), - lager:info("======================== Ran commands"), - rt:clean_cluster(Nodes), - aggregate(zip(state_names(H),command_names(Cmds)), - equals(Res, ok)) - end))). - -%% ==================================================================== -%% EQC FSM state transitions -%% ==================================================================== -initial_state() -> - preloading_data. - -preloading_data(S) -> - [ - {history, {call, ?MODULE, preload_data, [g_bucket_type(), g_uuid(), {var, nodelist}, - g_num_keys(), g_key_filter()]}}, - {verifying_data, {call, ?MODULE, log_transition, [S]}} - ]. - -verifying_data(S) -> - [ - {stopped, {call, ?MODULE, verify, [S#state.buckets, - S#state.nodes_up, - S#state.key_filter]}} - ]. - -stopped(_S) -> - []. - -%% ==================================================================== -%% EQC FSM State Data -%% ==================================================================== -initial_state_data() -> - #state{}. - -next_state_data(preloading_data, preloading_data, S, _, {call, _, preload_data, - [{BucketType, _}, Bucket, Nodes, NumKeys, KeyFilter]}) -> - S#state{ buckets = orddict:update_counter({Bucket, BucketType}, NumKeys, S#state.buckets), - key_filter = KeyFilter, - nodes_up = Nodes - }; -next_state_data(_From, _To, S, _R, _C) -> - S. - -%% ==================================================================== -%% EQC FSM state transition weights -%% ==================================================================== -weight(preloading_data,preloading_data,{call,verify_listkeys_eqcfsm,preload_data,[_,_,_,_,_]}) -> 80; -weight(preloading_data,verifying_data,{call,verify_listkeys_eqcfsm,log_transition,[_]}) -> 10; -weight(verifying_data,stopped,{call,verify_listkeys_eqcfsm,verify,[_,_,_]}) -> 10. - -%% ==================================================================== -%% EQC FSM preconditions -%% ==================================================================== -precondition(_From,_To,_S,{call,_,_,_}) -> - true. - -%% ==================================================================== -%% EQC FSM postconditions -%% ==================================================================== -postcondition(_From,_To,_S,{call,_,verify,_},{error, Reason}) -> - lager:info("Error: ~p", [Reason]), - false; -postcondition(_From,_To,S,{call,_,verify,_},KeyDict) -> - Res = audit_keys_per_node(S, KeyDict), - not lists:member(false, Res); -postcondition(_From,_To,_S,{call,_,_,_},_Res) -> - true. - -audit_keys_per_node(S, KeyDict) -> - [ [ assert_equal( - expected_keys(orddict:fetch({Bucket, BucketType}, S#state.buckets), - S#state.key_filter), - NodeKeyList) - || NodeKeyList <- orddict:fetch({Bucket, BucketType}, KeyDict) ] - || {Bucket, BucketType} <- orddict:fetch_keys(S#state.buckets) ]. -%% ==================================================================== -%% callback functions -%% ==================================================================== -preload_data({BucketType, _}, Bucket, Nodes, NumKeys, _KeyFilter) -> - lager:info("Nodes: ~p", [Nodes]), - Node = hd(Nodes), - lager:info("*******************[CMD] First node ~p", [Node]), - lager:info("Writing to bucket ~p", [Bucket]), - put_keys(Node, {BucketType, Bucket}, NumKeys). - -verify(undefined, _Nodes, _KeyFilter) -> - lager:info("Nothing to compare."); -verify(Buckets, Nodes, KeyFilter) -> - Keys = orddict:fold(fun({Bucket, BucketType}, _, Acc) -> - ListVal = [ list_filter_sort(Node, {BucketType, Bucket}, KeyFilter) - || Node <- Nodes ], - orddict:append({Bucket, BucketType}, hd(ListVal), Acc) - end, - orddict:new(), - Buckets), - Keys. - -log_transition(S) -> - lager:debug("Buckets and key counts at transition:"), - orddict:fold(fun({Bucket, BucketType} = _Key, NumKeys, _Acc) -> - lager:debug("Bucket:~p, BucketType:~p, NumKeys:~p", [Bucket,BucketType,NumKeys]) - end, - [], - S#state.buckets). - -%% ==================================================================== -%% Helpers -%% ==================================================================== -setup_cluster(NumNodes) -> - Nodes = rt:build_cluster(NumNodes), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - ?assertEqual(ok, rt:wait_until_transfers_complete(Nodes)), - Node = hd(Nodes), - [begin - rt:create_and_activate_bucket_type(Node, BucketType, [{n_val, NVal}]), - rt:wait_until_bucket_type_status(BucketType, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, BucketType) - end || {BucketType, NVal} <- bucket_types()], - Nodes. - -assert_equal(Expected, Actual) -> - case Expected -- Actual of - [] -> - ok; - Diff -> lager:info("Expected:~p~nActual:~p~nExpected -- Actual: ~p", - [length(Expected), length(Actual), length(Diff)]) - end, - length(Actual) == length(Expected) - andalso Actual == Expected. - -bucket_types() -> - [{<<"n_val_one">>, 1}, - {<<"n_val_two">>, 2}, - {<<"n_val_three">>, 3}, - {<<"n_val_four">>, 4}, - {<<"n_val_five">>, 5}]. - -expected_keys(NumKeys, FilterFun) -> - KeysPair = {ok, [list_to_binary(["", integer_to_list(Ki)]) || - Ki <- lists:seq(0, NumKeys - 1)]}, - sort_keys(filter_keys(KeysPair, FilterFun)). - -filter_keys({ok, Keys}, none) -> - Keys; -filter_keys({ok, Keys}, FilterFun) -> - lists:filter(FilterFun, Keys); -filter_keys({error, _}=Error, _) -> - Error. - -list_filter_sort(Node, Bucket, KeyFilter) -> - %% Move client to state - {ok, C} = riak:client_connect(Node), - sort_keys(filter_keys(riak_client:list_keys(Bucket, C), KeyFilter)). - -node_list(NumNodes) -> - NodesN = lists:seq(1, NumNodes), - [?DEV(N) || N <- NodesN]. - -put_keys(Node, Bucket, Num) -> - lager:info("*******************[CMD] Putting ~p keys into bucket ~p on node ~p", [Num, Bucket, Node]), - Pid = rt:pbc(Node), - try - Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], - Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], - [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) || {Key, Val} <- lists:zip(Keys, Vals)] - after - catch(riakc_pb_socket:stop(Pid)) - end. - -sort_keys({error, _}=Error) -> - Error; -sort_keys(Keys) -> - lists:usort(Keys). - --endif. % EQC diff --git a/tests/verify_membackend.erl b/tests/verify_membackend.erl deleted file mode 100644 index 7a41186d2..000000000 --- a/tests/verify_membackend.erl +++ /dev/null @@ -1,313 +0,0 @@ --module(verify_membackend). -%% -export([confirm/0]). - --compile(export_all). - --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"ttl_test">>). - -%% from 2.0, but should be valid for 1.1+ --record(state, {data_ref :: ets:tid(), - index_ref :: ets:tid(), - time_ref :: ets:tid(), - max_memory :: undefined | integer(), - used_memory=0 :: integer(), - ttl :: integer()}). - -confirm() -> - Tests = [ttl, max_memory, combo], - [Res1, Res2] = - [begin - lager:info("testing mode ~p", [Mode]), - put(mode, Mode), - [begin - lager:info("testing setting ~p", [Test]), - ?MODULE:Test(Mode) - end - || Test <- Tests] - end - || Mode <- [regular, multi]], - Res = Res1 ++ Res2, - [ok] = lists:usort(Res), - pass. - - -ttl(Mode) -> - Conf = mkconf(ttl, Mode), - [NodeA, NodeB] = rt:deploy_nodes(2, Conf), - - ?assertEqual(ok, check_leave_and_expiry(NodeA, NodeB)), - - rt:clean_cluster([NodeA]), - ok. - -max_memory(Mode) -> - Conf = mkconf(max_memory, Mode), - [NodeA, NodeB] = rt:deploy_nodes(2, Conf), - - rt:join(NodeB, NodeA), - - ?assertEqual(ok, check_put_delete(NodeA)), - - ?assertEqual(ok, check_put_consistent(NodeA)), - - ?assertEqual(ok, check_eviction(NodeA)), - - rt:clean_cluster([NodeA, NodeB]), - - ok. - -combo(Mode) -> - Conf = mkconf(combo, Mode), - - [NodeA, NodeB] = rt:deploy_nodes(2, Conf), - - ?assertEqual(ok, check_leave_and_expiry(NodeA, NodeB)), - - %% Make sure that expiry is updating used_memory correctly - Pid = get_remote_vnode_pid(NodeA), - 0 = get_used_space(Pid, NodeA), - - ?assertEqual(ok, check_put_delete(NodeA)), - - ?assertEqual(ok, check_put_consistent(NodeA)), - - ?assertEqual(ok, check_eviction(NodeA)), - - rt:clean_cluster([NodeA]), - - ok. - - -check_leave_and_expiry(NodeA, NodeB) -> - ?assertEqual([], rt:systest_write(NodeB, 1, 100, ?BUCKET, 2)), - ?assertEqual([], rt:systest_read(NodeB, 1, 100, ?BUCKET, 2)), - - rt:join(NodeB, NodeA), - - ?assertEqual(ok, rt:wait_until_nodes_ready([NodeA, NodeB])), - rt:wait_until_no_pending_changes([NodeA, NodeB]), - - rt:leave(NodeB), - rt:wait_until_unpingable(NodeB), - - ?assertEqual([], rt:systest_read(NodeA, 1, 100, ?BUCKET, 2)), - - lager:info("waiting for keys to expire"), - timer:sleep(timer:seconds(210)), - - _ = rt:systest_read(NodeA, 1, 100, ?BUCKET, 2), - timer:sleep(timer:seconds(5)), - Res = rt:systest_read(NodeA, 1, 100, ?BUCKET, 2), - - ?assertEqual(100, length(Res)), - ok. - -check_eviction(Node) -> - lager:info("checking that values are evicted when memory limit " - "is exceeded"), - Size = 20000 * 8, - Val = <<0:Size>>, - - ?assertEqual([], rt:systest_write(Node, 1, 500, ?BUCKET, 2, Val)), - - Res = length(rt:systest_read(Node, 1, 100, ?BUCKET, 2, Val)), - - %% this is a wider range than I'd like but the final outcome is - %% somewhat hard to predict. Just trying to verify that some - %% memory limit somewhere is being honored and that values are - %% being evicted. - case Res == 100 of - true -> - ok; - false -> - ?assertEqual(Res, memory_reclamation_issue) - end, - - {ok, C} = riak:client_connect(Node), - - [begin - C:delete(?BUCKET, <>), - timer:sleep(100) - end - || N <- lists:seq(1, 500)], - - %% make sure all deletes propagate? - timer:sleep(timer:seconds(10)), - ok. - -check_put_delete(Node) -> - lager:info("checking that used mem is reclaimed on delete"), - Pid = get_remote_vnode_pid(Node), - - {MemBaseline, Key} = put_until_changed(Pid, Node, 1000), - - {ok, C} = riak:client_connect(Node), - - ok = C:delete(?BUCKET, <>), - - timer:sleep(timer:seconds(5)), - - Mem = get_used_space(Pid, Node), - - %% this is meh, but the value isn't always the same length. - %% It seems to be the size of a Riak Object put some overhead - case (MemBaseline - Mem >= 1142) andalso - (MemBaseline - Mem =< 1150) of - true -> - ok; - false -> - ?assertEqual(MemBaseline, fail) - end, - ok. - -check_put_consistent(Node) -> - lager:info("checking that used mem doesn't change on re-put"), - Pid = get_remote_vnode_pid(Node), - - {MemBaseline, Key} = put_until_changed(Pid, Node, 1000), - - {ok, C} = riak:client_connect(Node), - - ok = C:put(riak_object:new(?BUCKET, <>, <<0:8192>>)), - - {ok, _} = C:get(?BUCKET, <>), - - timer:sleep(timer:seconds(2)), - - Mem = get_used_space(Pid, Node), - - case abs(Mem - MemBaseline) < 3 of - true -> ok; - false -> ?assertEqual(consistency_failure, - {Mem, MemBaseline}) - end, - ok. - -put_until_changed(Pid, Node, Key) -> - {ok, C} = riak:client_connect(Node), - UsedSpace = get_used_space(Pid, Node), - - C:put(riak_object:new(?BUCKET, <>, <<0:8192>>)), - - timer:sleep(100), - - UsedSpace1 = get_used_space(Pid, Node), - case UsedSpace < UsedSpace1 of - true -> - {UsedSpace1, Key}; - false -> - put_until_changed(Pid, Node, Key+1) - end. - -mkconf(Test, Mode) -> - MembConfig = - case Test of - ttl -> - [{ttl, 200}]; - max_memory -> - [{max_memory, 1}]; - combo -> - [{max_memory, 1}, - {ttl, 200}] - end, - case Mode of - regular -> - %% only memory supports TTL - rt:set_backend(memory), - - [ - {riak_core, [ - {ring_creation_size, 4} - ]}, - {riak_kv, [ - {anti_entropy, {off, []}}, - {delete_mode, immediate}, - {memory_backend, MembConfig} - ]} - ]; - multi -> - rt:set_backend(multi), - [ - {riak_core, [ - {ring_creation_size, 4} - ]}, - {riak_kv, [ - {anti_entropy, {off, []}}, - {delete_mode, immediate}, - {multi_backend_default, <<"memb">>}, - {multi_backend, - [ - {<<"memb">>, riak_kv_memory_backend, - MembConfig} - ] - } - ] - } - ] - end. - -get_remote_vnode_pid(Node) -> - [{_,_,VNode}|_] = rpc:call(Node, riak_core_vnode_manager, - all_vnodes, [riak_kv_vnode]), - VNode. - -%% @doc Crack open the VNode state record to find -%% the hidden number of used bytes within the -%% riak_kv_memory_backend:state record. --spec parse_regular_state_fun(integer()) -> fun(). -parse_regular_state_fun(Offset) -> - fun(X) -> - element(Offset, element(4, element(2, X))) - end. - -%% @doc Crack open the VNode state record to find -%% the hidden number of used bytes within the -%% riak_kv_memory_backend:state record for a multi- -%% backend --spec parse_multi_state_fun(integer()) -> fun(). -parse_multi_state_fun(Offset) -> - fun(X) -> - element( - 3, lists:nth( - 1, element( - 2, element( - Offset, element( - 4, element(2, X)))))) - end. - -%% this is silly fragile and only works for Riak 2.0+ -get_used_space(VNode, Node) -> - S = rpc:call(Node, sys, get_state, [VNode]), - Mode = get(mode), - Version = rt:get_version(), - %% lager:info("version mode ~p", [{Version, Mode}]), - - Extract = - case {Version, Mode} of - {<<"riak-2.0",_/binary>>, regular} -> - parse_regular_state_fun(4); - {<<"riak_ee-2.0",_/binary>>, regular} -> - parse_regular_state_fun(4); - {<<"riak-2.0",_/binary>>, multi} -> - parse_multi_state_fun(4); - {<<"riak_ee-2.0",_/binary>>, multi} -> - parse_multi_state_fun(4); - {<<"riak-2.1",_/binary>>, regular} -> - parse_regular_state_fun(5); - {<<"riak_ee-2.1",_/binary>>, regular} -> - parse_regular_state_fun(5); - {<<"riak-2.1",_/binary>>, multi} -> - parse_multi_state_fun(5); - {<<"riak_ee-2.1",_/binary>>, multi} -> - parse_multi_state_fun(5); - _Else -> - lager:error("didn't understand version/mode tuple ~p", - [{Version, Mode}]), - throw(boom) - end, - State = Extract(S), - Mem = State#state.used_memory, - lager:info("got ~p used memory", [Mem]), - Mem. diff --git a/tests/verify_mr_prereduce_node_down.erl b/tests/verify_mr_prereduce_node_down.erl deleted file mode 100644 index 265dd7a29..000000000 --- a/tests/verify_mr_prereduce_node_down.erl +++ /dev/null @@ -1,93 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- -%% @doc This test was designed to provoke a specific failure in -%% MapReduce when one node is down, and a prereduce phase is used. The -%% test simply counts items in a bucket, but it will occasionally get -%% a result of `[]' (the empty list) or `[0]' instead of `[Count]'. -%% -%% The bug was determined to be in the choice of static hash for the -%% final reduce phase. It did not take into account node liveness, and -%% therefor might assign the reduce worker to a vnode on a node that -%% was down. -%% -%% This test is based on one submitted by Alexander Gunin to the -%% riak-users mailing list as an issue reproducer. -%% -%% [http://lists.basho.com/pipermail/riak-users_lists.basho.com/2013-January/010896.html] --module(verify_mr_prereduce_node_down). - --export([ - %% riak_test's entry - confirm/0 - ]). - --include_lib("eunit/include/eunit.hrl"). - -%% @doc riak_test callback -confirm() -> - NodeCount = 4, - lager:info("Build ~b-node cluster", [NodeCount]), - [Primary,ToKill|_] = rt:build_cluster(NodeCount), - - %% We need one node down for this test - rt:stop(ToKill), - - %% store our test data - Bucket = <<"verify_mr_prereduce_node_down">>, - ObjCount = 100, - lager:info("Loading ~b objects of test data", [ObjCount]), - [] = rt:systest_write(Primary, 1, ObjCount, Bucket, 3), - - %% run the query a bunch - C = rt:pbc(Primary), - TestCount = 100, - lager:info("Running the MR query ~b times", [TestCount]), - Runs = [ run_query(C, Bucket) || _ <- lists:seq(1, TestCount) ], - - lager:info("Evaluating results"), - - %% Errors == failures that even Riak thinks were failures - %% Correct == correct answers - %% Incorrect == failures that Riak thought were correct - SupposedCorrectFun = fun({ok, _}) -> true; (_) -> false end, - ActualCorrectFun = fun({ok, V}) -> V == [{1, [ObjCount]}] end, - {Supposed, Errors} = lists:partition(SupposedCorrectFun, Runs), - {Correct, Incorrect} = lists:partition(ActualCorrectFun, Supposed), - - %% asserting that all queries gave the correct answer; asserting - %% more than just Correct == TestCount, such that failures print - %% out details about how they failed - ?assertEqual({TestCount, [], []}, - {length(Correct), Incorrect, Errors}), - - lager:info("~s: PASS", [atom_to_list(?MODULE)]), - pass. - -%% result should be a count of the objects in the bucket -run_query(C, Bucket) -> - riakc_pb_socket:mapred( - C, Bucket, - %% this prereduce is key - with it, we'll get - %% {ok, []} results in the broken case; without - %% it, we'll get error tuples - [{map, {modfun, riak_kv_mapreduce, map_identity}, [do_prereduce], false}, - %% counting inputs works because the inputs are riak_objects - %% (not integers, which might confuse the counting - {reduce, {modfun, riak_kv_mapreduce, reduce_count_inputs}, none, true}]). diff --git a/tests/verify_no_writes_on_read.erl b/tests/verify_no_writes_on_read.erl deleted file mode 100644 index ca4e95d31..000000000 --- a/tests/verify_no_writes_on_read.erl +++ /dev/null @@ -1,49 +0,0 @@ --module(verify_no_writes_on_read). --behaviour(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --define(NUM_NODES, 3). --define(BUCKET, <<"bucket">>). - -confirm() -> - Backend = proplists:get_value(backend, riak_test_runner:metadata()), - lager:info("Running with backend ~p", [Backend]), - ?assertEqual(bitcask, Backend), - [Node1 | _Rest] = _Nodes = rt:build_cluster(?NUM_NODES), - PBC = rt:pbc(Node1), - lager:info("Setting last write wins on bucket"), - B = ?BUCKET, - ?assertMatch(ok, rpc:call(Node1, riak_core_bucket, set_bucket, [B, [{last_write_wins, true}]])), - BProps = rpc:call(Node1, riak_core_bucket, get_bucket, [B]), - lager:info("Bucket properties ~p", [BProps]), - K = <<"Key">>, - V = <<"Value">>, - Obj = riakc_obj:new(B, K, V), - lager:info("Writing a simple object"), - riakc_pb_socket:put(PBC,Obj), - lager:info("Waiting some time to let the stats update"), - timer:sleep(10000), - OrigStats = get_write_stats(Node1), - lager:info("Stats are now ~p", [OrigStats]), - Read1 = fun(_N) -> - ?assertMatch({ok,_O}, riakc_pb_socket:get(PBC, B, K)) - end, - lager:info("Repeatedly read that object. There should be no writes"), - lists:foreach(Read1, lists:seq(1,100)), - lager:info("Waiting some time to let the stats update"), - timer:sleep(10000), - Stats = get_write_stats(Node1), - lager:info("Stats are now ~p", [Stats]), - ?assertEqual(OrigStats, Stats), - riakc_pb_socket:stop(PBC), - pass. - - -get_write_stats(Node) -> - Stats = rpc:call(Node, riak_kv_stat, get_stats, []), - Puts = proplists:get_value(vnode_puts, Stats), - ReadRepairs = proplists:get_value(read_repairs, Stats), - [{puts, Puts}, {read_repairs, ReadRepairs}]. - diff --git a/tests/verify_object_limits.erl b/tests/verify_object_limits.erl deleted file mode 100644 index fd8af35dc..000000000 --- a/tests/verify_object_limits.erl +++ /dev/null @@ -1,130 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc Verifies Riak's warnings and caps for number of siblings -%% and object size. Warnings end up in the logs, and hard caps can -%% make requests fail. --module(verify_object_limits). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"b">>). --define(WARN_SIZE, 1000). --define(MAX_SIZE, 10000). --define(WARN_SIBLINGS,2). --define(MAX_SIBLINGS,5). - - -confirm() -> - [Node1] = rt:build_cluster(1, [{riak_kv, [ - {ring_creation_size, 8}, - {max_object_size, ?MAX_SIZE}, - {warn_object_size, ?WARN_SIZE}, - {max_siblings, ?MAX_SIBLINGS}, - {warn_siblings, ?WARN_SIBLINGS}]}]), - C = rt:pbc(Node1), - - %% Set up to grep logs to verify messages - rt:setup_log_capture(Node1), - - % For the sibling test, we need the bucket to allow siblings - lager:info("Configuring bucket to allow siblings"), - ?assertMatch(ok, riakc_pb_socket:set_bucket(C, ?BUCKET, - [{allow_mult, true}])), - verify_size_limits(C, Node1), - verify_sibling_limits(C, Node1), - pass. - -verify_size_limits(C, Node1) -> - lager:info("Verifying size limits"), - Puts = [{1, ok}, - {10, ok}, - {50, ok}, - {?WARN_SIZE, warning}, - {?MAX_SIZE, error}, - {?MAX_SIZE*2, error}], - [begin - lager:info("Checking put of size ~p, expected ~p", [N, X]), - K = <>, - V = <<0:(N)/integer-unit:8>>, % N zeroes bin - O = riakc_obj:new(?BUCKET, K, V), - % Verify behavior on write - Res = riakc_pb_socket:put(C, O), - lager:info("Result : ~p", [Res]), - case X of - ok -> - ?assertMatch({N, ok}, {N, Res}); - error -> - ?assertMatch({N, {error, _}}, {N, Res}), - verify_size_write_error(Node1, K, N); - warning -> - verify_size_write_warning(Node1, K, N) - end, - % Now verify on read - lager:info("Now checking read of size ~p, expected ~p", [N, X]), - ReadRes = riakc_pb_socket:get(C, ?BUCKET, K), - case X of - ok -> - ?assertMatch({{ok, _}, N}, {ReadRes, N}); - warning -> - ?assertMatch({{ok, _}, N}, {ReadRes, N}), - verify_size_read_warning(Node1, K, N); - error -> - ?assertMatch({{error, _}, N}, {ReadRes, N}) - end - end || {N, X} <- Puts], - ok. - -verify_size_write_warning(Node, K, N) -> - lager:info("Looking for write warning for size ~p", [N]), - Pattern = io_lib:format("warning.*Writ.*~p.*~p",[?BUCKET, K]), - Res = rt:expect_in_log(Node, Pattern), - ?assertEqual({warning, N, true}, {warning, N, Res}). - -verify_size_read_warning(Node, K, N) -> - lager:info("Looking for read warning for size ~p", [N]), - Pattern = io_lib:format("warning.*Read.*~p.*~p",[?BUCKET, K]), - Res = rt:expect_in_log(Node, Pattern), - ?assertEqual({warning, N, true}, {warning, N, Res}). - -verify_size_write_error(Node, K, N) -> - lager:info("Looking for write error for size ~p", [N]), - Pattern = io_lib:format("error.*~p.*~p",[?BUCKET, K]), - Res = rt:expect_in_log(Node, Pattern), - ?assertEqual({warning, N, true}, {warning, N, Res}). - -verify_sibling_limits(C, Node1) -> - K = <<"sibtest">>, - O = riakc_obj:new(?BUCKET, K, <<"val">>), - [?assertMatch(ok, riakc_pb_socket:put(C, O)) - || _ <- lists:seq(1, ?WARN_SIBLINGS+1)], - P = io_lib:format("warning.*siblings.*~p.*~p.*(~p)", - [?BUCKET, K, ?WARN_SIBLINGS+1]), - Found = rt:expect_in_log(Node1, P), - lager:info("Looking for sibling warning: ~p", [Found]), - ?assertEqual(true, Found), - % Generate error now - [?assertMatch(ok, riakc_pb_socket:put(C, O)) - || _ <- lists:seq(?WARN_SIBLINGS+2, ?MAX_SIBLINGS)], - Res = riakc_pb_socket:put(C, O), - lager:info("Result when too many siblings : ~p", [Res]), - ?assertMatch({error,_}, Res), - ok. diff --git a/tests/verify_reset_bucket_props.erl b/tests/verify_reset_bucket_props.erl deleted file mode 100644 index 1c099a81c..000000000 --- a/tests/verify_reset_bucket_props.erl +++ /dev/null @@ -1,79 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_reset_bucket_props). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(BUCKET, <<"test_bucket">>). - -confirm() -> - %% Bring up a 3-node cluster for the test - %% we will be using two of the nodes to perform an - %% update and then a reset (one on each node) of a bucket's properties. - %% All nodes are checked to make sure the reset is affected on them - [Node1, Node2, Node3] = Nodes = rt:build_cluster(3), - - DefaultProps = get_current_bucket_props(Nodes, ?BUCKET), - - update_props(DefaultProps, Node1, Nodes), - lager:info("Resetting bucket properties for bucket ~p on node ~p via rpc", - [?BUCKET, Node2]), - rpc:call(Node2, riak_core_bucket, reset_bucket, [?BUCKET]), - rt:wait_until_ring_converged(Nodes), - - [check_props_reset(Node, ?BUCKET, DefaultProps) || Node <- Nodes], - - - update_props(DefaultProps, Node1, Nodes), - C = rt:pbc(Node3), - lager:info("Resetting bucket properties for bucket ~p on node ~p via pbc", - [?BUCKET, Node3]), - ok = riakc_pb_socket:reset_bucket(C, ?BUCKET), - rt:wait_until_ring_converged(Nodes), - - [check_props_reset(Node, ?BUCKET, DefaultProps) || Node <- Nodes], - pass. - -update_props(DefaultProps, Node, Nodes) -> - Updates = [{n_val, 1}], - lager:info("Setting bucket properties ~p for bucket ~p on node ~p", - [?BUCKET, Updates, Node]), - rpc:call(Node, riak_core_bucket, set_bucket, [?BUCKET, Updates]), - rt:wait_until_ring_converged(Nodes), - - UpdatedProps = get_current_bucket_props(Nodes, ?BUCKET), - ?assertNotEqual(DefaultProps, UpdatedProps). - - -%% fetch bucket properties via rpc -%% from a node or a list of nodes (one node is chosen at random) -get_current_bucket_props(Nodes, Bucket) when is_list(Nodes) -> - Node = lists:nth(length(Nodes), Nodes), - get_current_bucket_props(Node, Bucket); -get_current_bucket_props(Node, Bucket) when is_atom(Node) -> - rpc:call(Node, - riak_core_bucket, - get_bucket, - [Bucket]). - -check_props_reset(Node, Bucket, DefaultProps) -> - Current = get_current_bucket_props(Node, Bucket), - ?assertEqual(lists:usort(DefaultProps), lists:usort(Current)). diff --git a/tests/verify_riak_lager.erl b/tests/verify_riak_lager.erl deleted file mode 100644 index 98505538b..000000000 --- a/tests/verify_riak_lager.erl +++ /dev/null @@ -1,53 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_riak_lager). - --behavior(riak_test). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). --include_lib("kernel/include/file.hrl"). - --define(UNIX_RW_R__R__, 8#100644). - -confirm() -> - lager:info("Staring a node"), - Nodes = [Node] = rt:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - - lager:info("Stopping that node"), - rt:stop(Node), - - rt:start(Node), - lager:info("Checking for log files"), - - {ok, LagerHandlers} = rt:rpc_get_env(Node, [{lager, handlers}]), - - Files = [proplists:get_value(file, Config) || {Backend, Config} <- LagerHandlers, - Backend == lager_file_backend ], - - lager:info("Checking for files: ~p", [Files]), - [?assert(rpc:call(Node, filelib, is_file, [File])) || File <- Files], - - FileInfos = [ FileInfo || {ok, FileInfo} <- [rpc:call(Node, file, read_file_info, [File]) || File <- Files]], - - [?assertEqual(?UNIX_RW_R__R__, ?UNIX_RW_R__R__ band FileInfo#file_info.mode) || FileInfo <- FileInfos], - pass. - diff --git a/tests/verify_riak_object_reformat.erl b/tests/verify_riak_object_reformat.erl deleted file mode 100644 index ca44f9668..000000000 --- a/tests/verify_riak_object_reformat.erl +++ /dev/null @@ -1,64 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% Newer versions of Riak may use different on-disk formats of riak. -%% When performing a rolling downgrade, the downgraded nodes may not -%% be able to read the newer on-disk format. This tests that riak_kv_format -%% correctly downgrades the on-disk format on a node while performing a -%% rolling downgrade. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_riak_object_reformat). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(N, 3). - -confirm() -> - rt:update_app_config(all, [{riak_kv, [{object_format, v1}]}]), - TestMetaData = riak_test_runner:metadata(), - DowngradeVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - Nodes = [Node1|_] = rt:build_cluster(?N), - - [rt:wait_until_capability(N, {riak_kv, object_format}, v1, v0) || N <- Nodes], - - lager:info("Writing 100 keys in format v1 to ~p", [Node1]), - rt:systest_write(Node1, 100, ?N), - ?assertEqual([], rt:systest_read(Node1, 100, ?N)), - lager:info("100 keys successfully written to ~p", [Node1]), - - %% TODO: introduce some handoff - [begin - lager:info("Reformatting objects and downgrading ~p", [Node]), - run_reformat(Node, Node =:= Node1), %% wait for handoffs on one node, kill on rest - [rt:wait_until_capability(N, {riak_kv, object_format}, v0, v0) || N <- Nodes], - rt:upgrade(Node, DowngradeVsn), %% use upgrade to downgrade - rt:wait_for_service(Node, riak_kv), - lager:info("Ensuring keys still readable on ~p", [Node]), - ?assertEqual([], rt:systest_read(Node, 100, ?N)) - end || Node <- Nodes], - pass. - -run_reformat(Node, KillHandoffs) -> - {_Success, _Ignore, Error} = rpc:call(Node, - riak_kv_reformat, - run, - [v0, [{kill_handoffs, KillHandoffs}]]), - ?assertEqual(0, Error), - ok. diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl deleted file mode 100644 index 53ae72d1e..000000000 --- a/tests/verify_riak_stats.erl +++ /dev/null @@ -1,826 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_riak_stats). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --define(CTYPE, <<"counters">>). --define(STYPE, <<"sets">>). --define(MTYPE, <<"maps">>). --define(TYPES, [{?CTYPE, counter}, - {?STYPE, set}, - {?MTYPE, map}]). --define(CONF, [ - {yokozuna, - [{enabled, true}] - }]). - -%% You should have curl installed locally to do this. -confirm() -> - Nodes = rt:deploy_nodes(1, ?CONF), - [Node1] = Nodes, - verify_dt_converge:create_bucket_types(Nodes, ?TYPES), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), - Stats1 = get_stats(Node1), - - lager:info("Verifying that all expected stats keys are present from the HTTP endpoint"), - ok = verify_stats_keys_complete(Node1, Stats1), - - AdminStats1 = get_console_stats(Node1), - lager:info("Verifying that the stats keys in riak-admin status and HTTP match"), - ok = compare_http_and_console_stats(Stats1, AdminStats1), - - %% make sure a set of stats have valid values - lager:info("Verifying that the system and ring stats have valid values"), - verify_nz(Stats1,[<<"cpu_nprocs">>, - <<"mem_total">>, - <<"mem_allocated">>, - <<"sys_logical_processors">>, - <<"sys_process_count">>, - <<"sys_thread_pool_size">>, - <<"sys_wordsize">>, - <<"ring_num_partitions">>, - <<"ring_creation_size">>, - <<"memory_total">>, - <<"memory_processes">>, - <<"memory_processes_used">>, - <<"memory_system">>, - <<"memory_atom">>, - <<"memory_atom_used">>, - <<"memory_binary">>, - <<"memory_code">>, - <<"memory_ets">>]), - - - lager:info("perform 5 x PUT and a GET to increment the stats"), - lager:info("as the stat system only does calcs for > 5 readings"), - - C = rt:httpc(Node1), - [rt:httpc_write(C, <<"systest">>, <>, <<"12345">>) || X <- lists:seq(1, 5)], - [rt:httpc_read(C, <<"systest">>, <>) || X <- lists:seq(1, 5)], - - Stats2 = get_stats(Node1), - - %% make sure the stats that were supposed to increment did - verify_inc(Stats1, Stats2, [{<<"node_gets">>, 10}, - {<<"node_puts">>, 5}, - {<<"node_gets_total">>, 10}, - {<<"node_puts_total">>, 5}, - {<<"vnode_gets">>, 30}, - {<<"vnode_puts">>, 15}, - {<<"vnode_gets_total">>, 30}, - {<<"vnode_puts_total">>, 15}]), - - %% verify that fsm times were tallied - verify_nz(Stats2, [<<"node_get_fsm_time_mean">>, - <<"node_get_fsm_time_median">>, - <<"node_get_fsm_time_95">>, - <<"node_get_fsm_time_99">>, - <<"node_get_fsm_time_100">>, - <<"node_put_fsm_time_mean">>, - <<"node_put_fsm_time_median">>, - <<"node_put_fsm_time_95">>, - <<"node_put_fsm_time_99">>, - <<"node_put_fsm_time_100">>]), - - - lager:info("Make PBC Connection"), - Pid = rt:pbc(Node1), - - Stats3 = get_stats(Node1), - - rt:systest_write(Node1, 1), - %% make sure the stats that were supposed to increment did - verify_inc(Stats2, Stats3, [{<<"pbc_connects_total">>, 1}, - {<<"pbc_connects">>, 1}, - {<<"pbc_active">>, 1}]), - - - - lager:info("Force Read Repair"), - rt:pbc_write(Pid, <<"testbucket">>, <<"1">>, <<"blah!">>), - rt:pbc_set_bucket_prop(Pid, <<"testbucket">>, [{n_val, 4}]), - - Stats4 = get_stats(Node1), - verify_inc(Stats3, Stats4, [{<<"read_repairs_total">>, 0}, - {<<"read_repairs">>, 0}]), - - _Value = rt:pbc_read(Pid, <<"testbucket">>, <<"1">>), - - Stats5 = get_stats(Node1), - - verify_inc(Stats3, Stats5, [{<<"read_repairs_total">>, 1}, - {<<"read_repairs">>, 1}]), - - _ = do_datatypes(Pid), - - lager:info("Verifying datatype stats are non-zero."), - - Stats6 = get_stats(Node1), - [ - begin - lager:info("~s: ~p (expected non-zero)", [S, proplists:get_value(S, Stats6)]), - verify_nz(Stats6, [S]) - end || S <- datatype_stats() ], - pass. - -verify_inc(Prev, Props, Keys) -> - [begin - Old = proplists:get_value(Key, Prev, 0), - New = proplists:get_value(Key, Props, 0), - lager:info("~s: ~p -> ~p (expected ~p)", [Key, Old, New, Old + Inc]), - ?assertEqual(New, (Old + Inc)) - end || {Key, Inc} <- Keys]. - -verify_nz(Props, Keys) -> - [?assertNotEqual(proplists:get_value(Key,Props,0), 0) || Key <- Keys]. - -get_stats(Node) -> - timer:sleep(10000), - lager:info("Retrieving stats from node ~s", [Node]), - StatsCommand = io_lib:format("curl -s -S ~s/stats", [rt:http_url(Node)]), - lager:debug("Retrieving stats using command ~s", [StatsCommand]), - StatString = os:cmd(StatsCommand), - {struct, Stats} = mochijson2:decode(StatString), - %%lager:debug(StatString), - Stats. - -get_console_stats(Node) -> - %% Problem: rt:admin(Node, Cmd) seems to drop parts of the output when - %% used for "riak-admin status" in 'rtdev'. - %% Temporary workaround: use os:cmd/1 when in 'rtdev' (needs some cheats - %% in order to find the right path etc.) - try - Stats = - case rt_config:get(rt_harness) of - rtdev -> - N = rtdev:node_id(Node), - Path = rtdev:relpath(rtdev:node_version(N)), - Cmd = rtdev:riak_admin_cmd(Path, N, ["status"]), - lager:info("Cmd = ~p~n", [Cmd]), - os:cmd(Cmd); - _ -> - rt:admin(Node, "status") - end, - [S || {_,_} = S <- - [list_to_tuple(re:split(L, " : ", [])) - || L <- tl(tl(string:tokens(Stats, "\n")))]] - catch - error:Reason -> - lager:info("riak-admin status ERROR: ~p~n~p~n", - [Reason, erlang:get_stacktrace()]), - [] - end. - -compare_http_and_console_stats(Stats1, Stats2) -> - OnlyInHttp = [S || {K,_} = S <- Stats1, - not lists:keymember(K, 1, Stats2)], - OnlyInAdmin = [S || {K,_} = S <- Stats2, - not lists:keymember(K, 1, Stats1)], - maybe_log_stats_keys(OnlyInHttp, "Keys missing from riak-admin"), - maybe_log_stats_keys(OnlyInAdmin, "Keys missing from HTTP"), - ?assertEqual([], OnlyInHttp), - ?assertEqual([], OnlyInAdmin), - ok. - -verify_stats_keys_complete(Node, Stats) -> - ActualKeys = proplists:get_keys(Stats), - ExpectedKeys = all_stats(Node), - MissingStatsKeys = diff_lists(ActualKeys, ExpectedKeys), - AdditionalStatsKeys = diff_lists(ExpectedKeys, ActualKeys), - maybe_log_stats_keys(MissingStatsKeys, "missing stats keys"), - maybe_log_stats_keys(AdditionalStatsKeys, "additional stats"), - ?assertEqual({[],[]}, {MissingStatsKeys, AdditionalStatsKeys}), - ok. - -diff_lists(List, ThatList) -> - lists:filter(fun(Element) -> not lists:member(Element, List) end, ThatList). - --spec maybe_log_stats_keys([binary()], string()) -> ok. -maybe_log_stats_keys(StatsKeys, _Description) when length(StatsKeys) == 0 -> - ok; -maybe_log_stats_keys(StatsKeys, Description) -> - lager:info("~s: ~s", [Description, pretty_print_stats_keys(StatsKeys)]). - --spec pretty_print_stats_keys([binary()]) -> string(). -pretty_print_stats_keys(StatsKeys) -> - ConvertedStatsKeys = lists:map(fun(StatsKey) -> binary_to_list(StatsKey) end, StatsKeys), - string:join(ConvertedStatsKeys, ", "). - -datatype_stats() -> - %% Merge stats are excluded because we likely never merge disjoint - %% copies on a single node after a single write each. - [ list_to_binary(Stat) || - Stat <- [ - %% "object_counter_merge" - %% ,"object_counter_merge_total" - %% ,"object_counter_merge_time_mean" - %% ,"object_counter_merge_time_median" - %% ,"object_counter_merge_time_95" - %% ,"object_counter_merge_time_99" - %% ,"object_counter_merge_time_100" - %% , - "vnode_counter_update" - ,"vnode_counter_update_total" - ,"vnode_counter_update_time_mean" - ,"vnode_counter_update_time_median" - ,"vnode_counter_update_time_95" - ,"vnode_counter_update_time_99" - ,"vnode_counter_update_time_100" - %% ,"object_set_merge" - %% ,"object_set_merge_total" - %% ,"object_set_merge_time_mean" - %% ,"object_set_merge_time_median" - %% ,"object_set_merge_time_95" - %% ,"object_set_merge_time_99" - %% ,"object_set_merge_time_100" - ,"vnode_set_update" - ,"vnode_set_update_total" - ,"vnode_set_update_time_mean" - ,"vnode_set_update_time_median" - ,"vnode_set_update_time_95" - ,"vnode_set_update_time_99" - ,"vnode_set_update_time_100" - %% ,"object_map_merge" - %% ,"object_map_merge_total" - %% ,"object_map_merge_time_mean" - %% ,"object_map_merge_time_median" - %% ,"object_map_merge_time_95" - %% ,"object_map_merge_time_99" - %% ,"object_map_merge_time_100" - ,"vnode_map_update" - ,"vnode_map_update_total" - ,"vnode_map_update_time_mean" - ,"vnode_map_update_time_median" - ,"vnode_map_update_time_95" - ,"vnode_map_update_time_99" - ,"vnode_map_update_time_100" - ,"node_gets_counter" - ,"node_gets_counter_total" - ,"node_get_fsm_counter_siblings_mean" - ,"node_get_fsm_counter_siblings_median" - ,"node_get_fsm_counter_siblings_95" - ,"node_get_fsm_counter_siblings_99" - ,"node_get_fsm_counter_siblings_100" - ,"node_get_fsm_counter_objsize_mean" - ,"node_get_fsm_counter_objsize_median" - ,"node_get_fsm_counter_objsize_95" - ,"node_get_fsm_counter_objsize_99" - ,"node_get_fsm_counter_objsize_100" - ,"node_get_fsm_counter_time_mean" - ,"node_get_fsm_counter_time_median" - ,"node_get_fsm_counter_time_95" - ,"node_get_fsm_counter_time_99" - ,"node_get_fsm_counter_time_100" - ,"node_gets_set" - ,"node_gets_set_total" - ,"node_get_fsm_set_siblings_mean" - ,"node_get_fsm_set_siblings_median" - ,"node_get_fsm_set_siblings_95" - ,"node_get_fsm_set_siblings_99" - ,"node_get_fsm_set_siblings_100" - ,"node_get_fsm_set_objsize_mean" - ,"node_get_fsm_set_objsize_median" - ,"node_get_fsm_set_objsize_95" - ,"node_get_fsm_set_objsize_99" - ,"node_get_fsm_set_objsize_100" - ,"node_get_fsm_set_time_mean" - ,"node_get_fsm_set_time_median" - ,"node_get_fsm_set_time_95" - ,"node_get_fsm_set_time_99" - ,"node_get_fsm_set_time_100" - ,"node_gets_map" - ,"node_gets_map_total" - ,"node_get_fsm_map_siblings_mean" - ,"node_get_fsm_map_siblings_median" - ,"node_get_fsm_map_siblings_95" - ,"node_get_fsm_map_siblings_99" - ,"node_get_fsm_map_siblings_100" - ,"node_get_fsm_map_objsize_mean" - ,"node_get_fsm_map_objsize_median" - ,"node_get_fsm_map_objsize_95" - ,"node_get_fsm_map_objsize_99" - ,"node_get_fsm_map_objsize_100" - ,"node_get_fsm_map_time_mean" - ,"node_get_fsm_map_time_median" - ,"node_get_fsm_map_time_95" - ,"node_get_fsm_map_time_99" - ,"node_get_fsm_map_time_100" - ,"node_puts_counter" - ,"node_puts_counter_total" - ,"node_put_fsm_counter_time_mean" - ,"node_put_fsm_counter_time_median" - ,"node_put_fsm_counter_time_95" - ,"node_put_fsm_counter_time_99" - ,"node_put_fsm_counter_time_100" - ,"node_puts_set" - ,"node_puts_set_total" - ,"node_put_fsm_set_time_mean" - ,"node_put_fsm_set_time_median" - ,"node_put_fsm_set_time_95" - ,"node_put_fsm_set_time_99" - ,"node_put_fsm_set_time_100" - ,"node_puts_map" - ,"node_puts_map_total" - ,"node_put_fsm_map_time_mean" - ,"node_put_fsm_map_time_median" - ,"node_put_fsm_map_time_95" - ,"node_put_fsm_map_time_99" - ,"node_put_fsm_map_time_100" - ,"counter_actor_counts_mean" - ,"counter_actor_counts_median" - ,"counter_actor_counts_95" - ,"counter_actor_counts_99" - ,"counter_actor_counts_100" - ,"set_actor_counts_mean" - ,"set_actor_counts_median" - ,"set_actor_counts_95" - ,"set_actor_counts_99" - ,"set_actor_counts_100" - ,"map_actor_counts_mean" - ,"map_actor_counts_median" - ,"map_actor_counts_95" - ,"map_actor_counts_99" - ,"map_actor_counts_100" - ] - ]. - -do_datatypes(Pid) -> - _ = [ get_and_update(Pid, Type) || Type <- [counter, set, map]]. - -get_and_update(Pid, counter) -> - - _ = [ riakc_pb_socket:update_type(Pid, {?CTYPE, <<"pb">>}, <>, - {counter, {increment, 5}, - undefined}) - || I <- lists:seq(1, 10) ], - - _ = [ riakc_pb_socket:fetch_type(Pid, {?CTYPE, <<"pb">>}, <>) - || I <- lists:seq(1, 10) ]; - -get_and_update(Pid, set) -> - - _ = [ riakc_pb_socket:update_type(Pid, {?STYPE, <<"pb">>}, <>, - {set, {add_all, [<<"a">>, <<"b">>]}, undefined}) - || I <- lists:seq(1, 10) ], - - _ = [ riakc_pb_socket:fetch_type(Pid, {?STYPE, <<"pb">>}, <>) - || I <- lists:seq(1, 10) ]; - -get_and_update(Pid, map) -> - - _ = [ riakc_pb_socket:update_type(Pid, {?MTYPE, <<"pb">>}, <>, - {map, - {update,[ - {update, {<<"a">>, counter}, {increment, 5}} - ]}, - undefined}) - || I <- lists:seq(1, 10) ], - - _ = [ riakc_pb_socket:fetch_type(Pid, {?MTYPE, <<"pb">>}, <>) - || I <- lists:seq(1, 10) ]. - -all_stats(Node) -> - common_stats() ++ product_stats(rt:product(Node)). - -common_stats() -> - [ - <<"asn1_version">>, - <<"basho_stats_version">>, - <<"bitcask_version">>, - <<"clique_version">>, - <<"cluster_info_version">>, - <<"compiler_version">>, - <<"connected_nodes">>, - <<"consistent_get_objsize_100">>, - <<"consistent_get_objsize_95">>, - <<"consistent_get_objsize_99">>, - <<"consistent_get_objsize_mean">>, - <<"consistent_get_objsize_median">>, - <<"consistent_get_time_100">>, - <<"consistent_get_time_95">>, - <<"consistent_get_time_99">>, - <<"consistent_get_time_mean">>, - <<"consistent_get_time_median">>, - <<"consistent_gets">>, - <<"consistent_gets_total">>, - <<"consistent_put_objsize_100">>, - <<"consistent_put_objsize_95">>, - <<"consistent_put_objsize_99">>, - <<"consistent_put_objsize_mean">>, - <<"consistent_put_objsize_median">>, - <<"consistent_put_time_100">>, - <<"consistent_put_time_95">>, - <<"consistent_put_time_99">>, - <<"consistent_put_time_mean">>, - <<"consistent_put_time_median">>, - <<"consistent_puts">>, - <<"consistent_puts_total">>, - <<"converge_delay_last">>, - <<"converge_delay_max">>, - <<"converge_delay_mean">>, - <<"converge_delay_min">>, - <<"coord_redirs_total">>, - <<"counter_actor_counts_100">>, - <<"counter_actor_counts_95">>, - <<"counter_actor_counts_99">>, - <<"counter_actor_counts_mean">>, - <<"counter_actor_counts_median">>, - <<"cpu_avg1">>, - <<"cpu_avg15">>, - <<"cpu_avg5">>, - <<"cpu_nprocs">>, - <<"crypto_version">>, - <<"disk">>, - <<"dropped_vnode_requests_total">>, - <<"eleveldb_version">>, - <<"erlang_js_version">>, - <<"erlydtl_version">>, - <<"executing_mappers">>, - <<"exometer_core_version">>, - <<"goldrush_version">>, - <<"gossip_received">>, - <<"handoff_timeouts">>, - <<"ibrowse_version">>, - <<"ignored_gossip_total">>, - <<"index_fsm_active">>, - <<"index_fsm_create">>, - <<"index_fsm_create_error">>, - <<"inets_version">>, - <<"kernel_version">>, - <<"lager_version">>, - <<"late_put_fsm_coordinator_ack">>, - <<"leveldb_read_block_error">>, - <<"list_fsm_active">>, - <<"list_fsm_create">>, - <<"list_fsm_create_error">>, - <<"list_fsm_create_error_total">>, - <<"list_fsm_create_total">>, - <<"map_actor_counts_100">>, - <<"map_actor_counts_95">>, - <<"map_actor_counts_99">>, - <<"map_actor_counts_mean">>, - <<"map_actor_counts_median">>, - <<"mem_allocated">>, - <<"mem_total">>, - <<"memory_atom">>, - <<"memory_atom_used">>, - <<"memory_binary">>, - <<"memory_code">>, - <<"memory_ets">>, - <<"memory_processes">>, - <<"memory_processes_used">>, - <<"memory_system">>, - <<"memory_total">>, - <<"merge_index_version">>, - <<"mochiweb_version">>, - <<"node_get_fsm_active">>, - <<"node_get_fsm_active_60s">>, - <<"node_get_fsm_counter_objsize_100">>, - <<"node_get_fsm_counter_objsize_95">>, - <<"node_get_fsm_counter_objsize_99">>, - <<"node_get_fsm_counter_objsize_mean">>, - <<"node_get_fsm_counter_objsize_median">>, - <<"node_get_fsm_counter_siblings_100">>, - <<"node_get_fsm_counter_siblings_95">>, - <<"node_get_fsm_counter_siblings_99">>, - <<"node_get_fsm_counter_siblings_mean">>, - <<"node_get_fsm_counter_siblings_median">>, - <<"node_get_fsm_counter_time_100">>, - <<"node_get_fsm_counter_time_95">>, - <<"node_get_fsm_counter_time_99">>, - <<"node_get_fsm_counter_time_mean">>, - <<"node_get_fsm_counter_time_median">>, - <<"node_get_fsm_errors">>, - <<"node_get_fsm_errors_total">>, - <<"node_get_fsm_in_rate">>, - <<"node_get_fsm_map_objsize_100">>, - <<"node_get_fsm_map_objsize_95">>, - <<"node_get_fsm_map_objsize_99">>, - <<"node_get_fsm_map_objsize_mean">>, - <<"node_get_fsm_map_objsize_median">>, - <<"node_get_fsm_map_siblings_100">>, - <<"node_get_fsm_map_siblings_95">>, - <<"node_get_fsm_map_siblings_99">>, - <<"node_get_fsm_map_siblings_mean">>, - <<"node_get_fsm_map_siblings_median">>, - <<"node_get_fsm_map_time_100">>, - <<"node_get_fsm_map_time_95">>, - <<"node_get_fsm_map_time_99">>, - <<"node_get_fsm_map_time_mean">>, - <<"node_get_fsm_map_time_median">>, - <<"node_get_fsm_objsize_100">>, - <<"node_get_fsm_objsize_95">>, - <<"node_get_fsm_objsize_99">>, - <<"node_get_fsm_objsize_mean">>, - <<"node_get_fsm_objsize_median">>, - <<"node_get_fsm_out_rate">>, - <<"node_get_fsm_rejected">>, - <<"node_get_fsm_rejected_60s">>, - <<"node_get_fsm_rejected_total">>, - <<"node_get_fsm_set_objsize_100">>, - <<"node_get_fsm_set_objsize_95">>, - <<"node_get_fsm_set_objsize_99">>, - <<"node_get_fsm_set_objsize_mean">>, - <<"node_get_fsm_set_objsize_median">>, - <<"node_get_fsm_set_siblings_100">>, - <<"node_get_fsm_set_siblings_95">>, - <<"node_get_fsm_set_siblings_99">>, - <<"node_get_fsm_set_siblings_mean">>, - <<"node_get_fsm_set_siblings_median">>, - <<"node_get_fsm_set_time_100">>, - <<"node_get_fsm_set_time_95">>, - <<"node_get_fsm_set_time_99">>, - <<"node_get_fsm_set_time_mean">>, - <<"node_get_fsm_set_time_median">>, - <<"node_get_fsm_siblings_100">>, - <<"node_get_fsm_siblings_95">>, - <<"node_get_fsm_siblings_99">>, - <<"node_get_fsm_siblings_mean">>, - <<"node_get_fsm_siblings_median">>, - <<"node_get_fsm_time_100">>, - <<"node_get_fsm_time_95">>, - <<"node_get_fsm_time_99">>, - <<"node_get_fsm_time_mean">>, - <<"node_get_fsm_time_median">>, - <<"node_gets">>, - <<"node_gets_counter">>, - <<"node_gets_counter_total">>, - <<"node_gets_map">>, - <<"node_gets_map_total">>, - <<"node_gets_set">>, - <<"node_gets_set_total">>, - <<"node_gets_total">>, - <<"node_put_fsm_active">>, - <<"node_put_fsm_active_60s">>, - <<"node_put_fsm_counter_time_100">>, - <<"node_put_fsm_counter_time_95">>, - <<"node_put_fsm_counter_time_99">>, - <<"node_put_fsm_counter_time_mean">>, - <<"node_put_fsm_counter_time_median">>, - <<"node_put_fsm_in_rate">>, - <<"node_put_fsm_map_time_100">>, - <<"node_put_fsm_map_time_95">>, - <<"node_put_fsm_map_time_99">>, - <<"node_put_fsm_map_time_mean">>, - <<"node_put_fsm_map_time_median">>, - <<"node_put_fsm_out_rate">>, - <<"node_put_fsm_rejected">>, - <<"node_put_fsm_rejected_60s">>, - <<"node_put_fsm_rejected_total">>, - <<"node_put_fsm_set_time_100">>, - <<"node_put_fsm_set_time_95">>, - <<"node_put_fsm_set_time_99">>, - <<"node_put_fsm_set_time_mean">>, - <<"node_put_fsm_set_time_median">>, - <<"node_put_fsm_time_100">>, - <<"node_put_fsm_time_95">>, - <<"node_put_fsm_time_99">>, - <<"node_put_fsm_time_mean">>, - <<"node_put_fsm_time_median">>, - <<"node_puts">>, - <<"node_puts_counter">>, - <<"node_puts_counter_total">>, - <<"node_puts_map">>, - <<"node_puts_map_total">>, - <<"node_puts_set">>, - <<"node_puts_set_total">>, - <<"node_puts_total">>, - <<"nodename">>, - <<"object_counter_merge">>, - <<"object_counter_merge_time_100">>, - <<"object_counter_merge_time_95">>, - <<"object_counter_merge_time_99">>, - <<"object_counter_merge_time_mean">>, - <<"object_counter_merge_time_median">>, - <<"object_counter_merge_total">>, - <<"object_map_merge">>, - <<"object_map_merge_time_100">>, - <<"object_map_merge_time_95">>, - <<"object_map_merge_time_99">>, - <<"object_map_merge_time_mean">>, - <<"object_map_merge_time_median">>, - <<"object_map_merge_total">>, - <<"object_merge">>, - <<"object_merge_time_100">>, - <<"object_merge_time_95">>, - <<"object_merge_time_99">>, - <<"object_merge_time_mean">>, - <<"object_merge_time_median">>, - <<"object_merge_total">>, - <<"object_set_merge">>, - <<"object_set_merge_time_100">>, - <<"object_set_merge_time_95">>, - <<"object_set_merge_time_99">>, - <<"object_set_merge_time_mean">>, - <<"object_set_merge_time_median">>, - <<"object_set_merge_total">>, - <<"os_mon_version">>, - <<"pbc_active">>, - <<"pbc_connects">>, - <<"pbc_connects_total">>, - <<"pbkdf2_version">>, - <<"pipeline_active">>, - <<"pipeline_create_count">>, - <<"pipeline_create_error_count">>, - <<"pipeline_create_error_one">>, - <<"pipeline_create_one">>, - <<"poolboy_version">>, - <<"postcommit_fail">>, - <<"precommit_fail">>, - <<"protobuffs_version">>, - <<"public_key_version">>, - <<"read_repairs">>, - <<"read_repairs_counter">>, - <<"read_repairs_counter_total">>, - <<"read_repairs_fallback_notfound_count">>, - <<"read_repairs_fallback_notfound_one">>, - <<"read_repairs_fallback_outofdate_count">>, - <<"read_repairs_fallback_outofdate_one">>, - <<"read_repairs_map">>, - <<"read_repairs_map_total">>, - <<"read_repairs_primary_notfound_count">>, - <<"read_repairs_primary_notfound_one">>, - <<"read_repairs_primary_outofdate_count">>, - <<"read_repairs_primary_outofdate_one">>, - <<"read_repairs_set">>, - <<"read_repairs_set_total">>, - <<"read_repairs_total">>, - <<"rebalance_delay_last">>, - <<"rebalance_delay_max">>, - <<"rebalance_delay_mean">>, - <<"rebalance_delay_min">>, - <<"rejected_handoffs">>, - <<"riak_api_version">>, - <<"riak_auth_mods_version">>, - <<"riak_control_version">>, - <<"riak_core_version">>, - <<"riak_dt_version">>, - <<"riak_kv_version">>, - <<"riak_kv_vnodeq_max">>, - <<"riak_kv_vnodeq_mean">>, - <<"riak_kv_vnodeq_median">>, - <<"riak_kv_vnodeq_min">>, - <<"riak_kv_vnodeq_total">>, - <<"riak_kv_vnodes_running">>, - <<"riak_pb_version">>, - <<"riak_pipe_version">>, - <<"riak_pipe_vnodeq_max">>, - <<"riak_pipe_vnodeq_mean">>, - <<"riak_pipe_vnodeq_median">>, - <<"riak_pipe_vnodeq_min">>, - <<"riak_pipe_vnodeq_total">>, - <<"riak_pipe_vnodes_running">>, - <<"riak_search_version">>, - <<"riak_sysmon_version">>, - <<"ring_creation_size">>, - <<"ring_members">>, - <<"ring_num_partitions">>, - <<"ring_ownership">>, - <<"rings_reconciled">>, - <<"rings_reconciled_total">>, - <<"runtime_tools_version">>, - <<"sasl_version">>, - <<"search_index_fail_count">>, - <<"search_index_fail_one">>, - <<"search_index_latency_95">>, - <<"search_index_latency_99">>, - <<"search_index_latency_999">>, - <<"search_index_latency_max">>, - <<"search_index_latency_mean">>, - <<"search_index_latency_median">>, - <<"search_index_latency_min">>, - <<"search_index_throughput_count">>, - <<"search_index_throughput_one">>, - <<"search_query_fail_count">>, - <<"search_query_fail_one">>, - <<"search_query_latency_95">>, - <<"search_query_latency_99">>, - <<"search_query_latency_999">>, - <<"search_query_latency_max">>, - <<"search_query_latency_mean">>, - <<"search_query_latency_median">>, - <<"search_query_latency_min">>, - <<"search_query_throughput_count">>, - <<"search_query_throughput_one">>, - <<"set_actor_counts_100">>, - <<"set_actor_counts_95">>, - <<"set_actor_counts_99">>, - <<"set_actor_counts_mean">>, - <<"set_actor_counts_median">>, - <<"sidejob_version">>, - <<"skipped_read_repairs">>, - <<"skipped_read_repairs_total">>, - <<"ssl_version">>, - <<"stdlib_version">>, - <<"storage_backend">>, - <<"syntax_tools_version">>, - <<"sys_driver_version">>, - <<"sys_global_heaps_size">>, - <<"sys_heap_type">>, - <<"sys_logical_processors">>, - <<"sys_monitor_count">>, - <<"sys_otp_release">>, - <<"sys_port_count">>, - <<"sys_process_count">>, - <<"sys_smp_support">>, - <<"sys_system_architecture">>, - <<"sys_system_version">>, - <<"sys_thread_pool_size">>, - <<"sys_threads_enabled">>, - <<"sys_wordsize">>, - <<"vnode_counter_update">>, - <<"vnode_counter_update_time_100">>, - <<"vnode_counter_update_time_95">>, - <<"vnode_counter_update_time_99">>, - <<"vnode_counter_update_time_mean">>, - <<"vnode_counter_update_time_median">>, - <<"vnode_counter_update_total">>, - <<"vnode_get_fsm_time_100">>, - <<"vnode_get_fsm_time_95">>, - <<"vnode_get_fsm_time_99">>, - <<"vnode_get_fsm_time_mean">>, - <<"vnode_get_fsm_time_median">>, - <<"vnode_gets">>, - <<"vnode_gets_total">>, - <<"vnode_index_deletes">>, - <<"vnode_index_deletes_postings">>, - <<"vnode_index_deletes_postings_total">>, - <<"vnode_index_deletes_total">>, - <<"vnode_index_reads">>, - <<"vnode_index_reads_total">>, - <<"vnode_index_refreshes">>, - <<"vnode_index_refreshes_total">>, - <<"vnode_index_writes">>, - <<"vnode_index_writes_postings">>, - <<"vnode_index_writes_postings_total">>, - <<"vnode_index_writes_total">>, - <<"vnode_map_update">>, - <<"vnode_map_update_time_100">>, - <<"vnode_map_update_time_95">>, - <<"vnode_map_update_time_99">>, - <<"vnode_map_update_time_mean">>, - <<"vnode_map_update_time_median">>, - <<"vnode_map_update_total">>, - <<"vnode_put_fsm_time_100">>, - <<"vnode_put_fsm_time_95">>, - <<"vnode_put_fsm_time_99">>, - <<"vnode_put_fsm_time_mean">>, - <<"vnode_put_fsm_time_median">>, - <<"vnode_puts">>, - <<"vnode_puts_total">>, - <<"vnode_set_update">>, - <<"vnode_set_update_time_100">>, - <<"vnode_set_update_time_95">>, - <<"vnode_set_update_time_99">>, - <<"vnode_set_update_time_mean">>, - <<"vnode_set_update_time_median">>, - <<"vnode_set_update_total">>, - <<"webmachine_version">>, - <<"write_once_merge">>, - <<"write_once_put_objsize_100">>, - <<"write_once_put_objsize_95">>, - <<"write_once_put_objsize_99">>, - <<"write_once_put_objsize_mean">>, - <<"write_once_put_objsize_median">>, - <<"write_once_put_time_100">>, - <<"write_once_put_time_95">>, - <<"write_once_put_time_99">>, - <<"write_once_put_time_mean">>, - <<"write_once_put_time_median">>, - <<"write_once_puts">>, - <<"write_once_puts_total">>, - <<"xmerl_version">>, - <<"yokozuna_version">> - ]. - -product_stats(riak_ee) -> - [ - <<"ebloom_version">>, - <<"mnesia_version">>, - <<"ranch_version">>, - <<"riak_jmx_version">>, - <<"riak_repl_version">>, - <<"riak_snmp_version">>, - <<"snmp_version">> - ]; -product_stats(riak) -> - []. diff --git a/tests/verify_search.erl b/tests/verify_search.erl deleted file mode 100644 index 2d98ebb32..000000000 --- a/tests/verify_search.erl +++ /dev/null @@ -1,68 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - -%% @doc Wrapper for the tests in riak_search/tests/riak_search - --module(verify_search). --include_lib("eunit/include/eunit.hrl"). - --export([confirm/0]). -%% To run in the possibly remote node --export([test_dirs/1]). - --define(SEARCH_REPO, "git://github.com/basho/riak_search"). - -confirm() -> - Config = [{riak_search, [{enabled, true}]}], - [Node0 | _RestNodes] = Nodes = rt:build_cluster(3, Config), - rt:wait_until_ring_converged(Nodes), - - Path = rt_config:get(rt_scratch_dir), - lager:info("Creating scratch dir if necessary at ~s", [Path]), - ?assertMatch({0, _}, rt:cmd("mkdir -p " ++ Path)), - SearchRepoDir = filename:join(Path, "riak_search"), - lager:info("Deleting any previous riak_search repo ~s", [SearchRepoDir]), - ?assertMatch({0, _}, rt:cmd("rm -rf " ++ SearchRepoDir)), - lager:info("Cloning riak_search repo within scratch dir"), - ?assertMatch({0, _}, rt:cmd("git clone --depth 1 "++?SEARCH_REPO, - [{cd, Path}])), - BaseDir = filename:join([Path, "riak_search", "tests", "riak_search"]), - - rt:load_modules_on_nodes([?MODULE], [Node0]), - TestDirs = rpc:call(Node0, ?MODULE, test_dirs, [BaseDir]), - ?assert(is_list(TestDirs)), - Run = - fun(Dir) -> - lager:info("Running test in directory ~s", [Dir]), - ?assertMatch(ok, - rpc:call(Node0, riak_search_test, test, [Dir])) - end, - lists:foreach(Run, TestDirs), - pass. - - -test_dirs(BaseDir) -> - {ok, SubDirs} = file:list_dir(BaseDir), - [filename:join([BaseDir, SubDir]) || - SubDir <- SubDirs, - %% @todo Figure out why this one is not run by run_all.sh - %% It does fail in a weird way if included - SubDir /= "replication_test", - filelib:is_file(filename:join([BaseDir, SubDir, "script.def"]))]. diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl deleted file mode 100644 index 1defc4ec0..000000000 --- a/tests/verify_secondary_index_reformat.erl +++ /dev/null @@ -1,121 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_secondary_index_reformat). --behaviour(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - -confirm() -> - [Node] = rt:build_cluster([legacy]), - rt:wait_until_nodes_ready([Node]), - - check_fixed_index_statuses(Node, undefined), - - TestBucket = <<"test">>, - TestKey = <<"badindex">>, - TestIndex = {integer_index, "foo"}, - TestIdxValue = 1362400142028, - - %% write key with index that old version of sext would encode improperly (not perserving - %% sort order) - lager:info("writing test key"), - Client0 = rt:pbc(Node), - Obj0 = riakc_obj:new(TestBucket, TestKey, <<"somevalue">>), - ObjMD0 = riakc_obj:get_update_metadata(Obj0), - ObjMD1 = riakc_obj:set_secondary_index(ObjMD0, - [{TestIndex, [TestIdxValue]}]), - Obj1 = riakc_obj:update_metadata(Obj0, ObjMD1), - ok = riakc_pb_socket:put(Client0, Obj1), - - %% upgrade node to version that supports reformatting - rt:upgrade(Node, current), - rt:wait_for_service(Node, riak_kv), - - %% some indexes have no data written and will be marked as fixed, - %% others will not since there are invalid indexes - check_fixed_index_statuses(Node, [true, false]), - - lager:info("reformatting indexes and verifying range query"), - %% should rewrite 1 index (* n = 3), ignore 0 and have zero errors - {3, 0, 0} = rpc:call(Node, riak_kv_util, fix_incorrect_index_entries, []), - - Client1 = rt:pbc(Node), - Results = riakc_pb_socket:get_index(Client1, TestBucket, - TestIndex, 1000000000000, - TestIdxValue), - ?assertMatch({ok, #index_results_v1{}}, Results), - {ok, ?INDEX_RESULTS{keys=ResultKeys}} = Results, - lager:info("found keys: ~p", [ResultKeys]), - ?assertEqual([TestKey], ResultKeys), - - check_fixed_index_statuses(Node, true), - - %% write some more data (make sure flag doesn't "roll back" on restart - lager:info("writing some more data"), - rt:systest_write(Node, 10, 1), - - lager:info("restarting node"), - rt:stop_and_wait(Node), - rt:start(Node), - rt:wait_for_service(Node, riak_kv), - - check_fixed_index_statuses(Node, true), - - lager:info("rewriting indexes in old format to prepare for downgrade"), - {3, 0, 0} = rpc:call(Node, riak_kv_util, fix_incorrect_index_entries, [[{downgrade, true}]]), - - check_fixed_index_statuses(Node, false), - - rt:stop_and_wait(Node), - rt:start(Node), - rt:wait_for_service(Node, riak_kv), - check_fixed_index_statuses(Node, false), - - pass. - -check_fixed_index_statuses(Node, E) when not is_list(E) -> - check_fixed_index_statuses(Node, [E]); -check_fixed_index_statuses(Node, ExpectedStatuses) -> - lager:info("Verifying fixed index status of ~p is one of ~p for all partitions", - [Node, ExpectedStatuses]), - Statuses = rpc:call(Node, riak_kv_status, vnode_status, []), - BadIndexes = [{Idx, proplists:get_value(fixed_indexes, Status)} || - {Idx, [{backend_status,_,Status}]} <- Statuses, - not fixed_index_status_ok(Status, ExpectedStatuses)], - ?assertEqual([], BadIndexes), - %% if we are checking for undefined then we are on old version that doesn't - %% have riak_kv_status:fixed_index_status/0 - case lists:member(undefined, ExpectedStatuses) of - false -> - IncompleteIndexes = [Idx || {Idx, [{backend_status,_,Status}]} <- Statuses, - fixed_index_status_ok(Status, [false])], - RPCStatus = rpc:call(Node, riak_kv_status, fixed_index_status, []), - case IncompleteIndexes of - [] -> ?assert(RPCStatus); - _ -> ?assertNot(RPCStatus) - end; - true -> - ok - end. - -fixed_index_status_ok(Status, Expected) -> - Found = proplists:get_value(fixed_indexes, Status), - lists:member(Found, Expected). diff --git a/tests/verify_snmp.erl b/tests/verify_snmp.erl deleted file mode 100644 index 639972fcf..000000000 --- a/tests/verify_snmp.erl +++ /dev/null @@ -1,97 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2010-2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(verify_snmp). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - --prereq("curl"). - -confirm() -> - %% Bring up a small cluster - Config = [{riak_snmp, [{polling_interval, 1000}]}], - [Node1] = rt:deploy_nodes(1, Config), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), - - Keys = [{vnodeGets,<<"vnode_gets">>}, - {vnodePuts,<<"vnode_puts">>}, - {nodeGets,<<"node_gets">>}, - {nodePuts,<<"node_puts">>}, - {nodeGetTimeMean,<<"node_get_fsm_time_mean">>}, - {nodeGetTimeMedian,<<"node_get_fsm_time_median">>}, - {nodeGetTime95,<<"node_get_fsm_time_95">>}, - {nodeGetTime99,<<"node_get_fsm_time_99">>}, - {nodeGetTime100,<<"node_get_fsm_time_100">>}, - {nodePutTimeMean,<<"node_put_fsm_time_mean">>}, - {nodePutTimeMedian,<<"node_put_fsm_time_median">>}, - {nodePutTime95,<<"node_put_fsm_time_95">>}, - {nodePutTime99,<<"node_put_fsm_time_99">>}, - {nodePutTime100,<<"node_put_fsm_time_100">>}], - - lager:info("Waiting for SNMP to start."), - - rpc:call(Node1, riak_core, wait_for_application, [snmp]), - rpc:call(Node1, riak_core, wait_for_application, [riak_snmp]), - - lager:info("Mapping SNMP names to OIDs"), - - OIDPairs = [ begin - {value, OID} = rpc:call(Node1, snmpa, name_to_oid, [SKey]), - {OID ++ [0], HKey} - end || {SKey, HKey} <- Keys ], - - lager:info("Doing some reads and writes to record some stats."), - - rt:systest_write(Node1, 10), - rt:systest_read(Node1, 10), - - lager:info("Waiting for HTTP Stats to be non-zero"), - ?assertEqual(ok, - rt:wait_until(Node1, fun(N) -> - Stats = get_stats(N), - proplists:get_value(<<"vnode_gets">>, Stats) =/= 0 - end)), - - - verify_eq(OIDPairs, Node1), - pass. - -verify_eq(Keys, Node) -> - {OIDs, HKeys} = lists:unzip(Keys), - ?assertEqual(ok, - rt:wait_until(Node, - fun(N) -> - Stats = get_stats(Node), - SStats = rpc:call(N, snmpa, get, [snmp_master_agent, OIDs]), - SPairs = lists:zip(SStats, HKeys), - lists:all( - fun({A,B}) -> - Stat = proplists:get_value(B, Stats), - lager:info("Comparing ~p | Stats ~p ~~ SNMP ~p", [B, Stat, A]), - A == Stat - end, - SPairs) - end)). - -get_stats(Node) -> - StatString = os:cmd(io_lib:format("curl -s -S ~s/stats", [rt:http_url(Node)])), - {struct, Stats} = mochijson2:decode(StatString), - Stats. diff --git a/tests/verify_snmp_repl.erl b/tests/verify_snmp_repl.erl deleted file mode 100644 index 71307fd36..000000000 --- a/tests/verify_snmp_repl.erl +++ /dev/null @@ -1,99 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2010-2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(verify_snmp_repl). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). --compile({parse_transform, rt_intercept_pt}). - -confirm() -> - Clusters = make_clusters(["cluster-1", "cluster-2", "cluster-3"], 1), - [{_, Leader, _}|_] = Clusters, - intercept_riak_snmp_stat_poller(Leader), - wait_for_snmp_stat_poller(). - -make_clusters(Names, NodeCount) -> - ClusterCount = length(Names), - Config = [{riak_snmp, [{polling_interval, 100}]}], - AllNodes = make_nodes(NodeCount, ClusterCount, Config), - Clusters = lists:zip(Names, AllNodes), - lists:foreach(fun make_named_cluster/1, Clusters), - lists:foreach(fun wait_until_ring_converged/1, Clusters), - lists:foreach(fun wait_until_leader_converge/1, Clusters), - - ClustersWithLeaders = [{Name, repl_util:get_leader(hd(Nodes)), Nodes} || {Name, Nodes} <- Clusters], - enable_realtime(ClustersWithLeaders), - ClustersWithLeaders. - -intercept_riak_snmp_stat_poller(Node) -> - RiakTestProcess = self(), - rt_intercept:add( - Node, - {riak_snmp_stat_poller, - [{{set_rows, 4}, - {[RiakTestProcess], - fun(Table, Indexes, Cols, IndexCol) - when Table =:= replRealtimeStatusTable; Table =:= replFullsyncStatusTable -> - try - riak_snmp_stat_poller_orig:set_rows_orig(Table, Indexes, Cols, IndexCol), - RiakTestProcess ! pass - catch - Exception:Reason -> - RiakTestProcess ! {fail, {Exception, Reason}}, - error({Exception, Reason}) - end - end}}]}). - -wait_for_snmp_stat_poller() -> - receive - pass -> pass; - {fail, Reason} -> {fail, Reason}; - X -> {fail, {unknown, X}} - after - 1000 -> {fail, timeout} - end. - -make_nodes(NodeCount, ClusterCount, Config) -> - Nodes = rt:deploy_nodes(NodeCount * ClusterCount, Config), - sublists(Nodes, NodeCount). - -sublists(List, Len) -> - lists:map( - fun(I) -> lists:sublist(List, I, Len) end, - lists:seq(1, length(List), Len)). - -make_named_cluster({Name, Nodes}) -> - repl_util:make_cluster(Nodes), - repl_util:name_cluster(hd(Nodes), Name). - -wait_until_ring_converged({_Name, Nodes}) -> - rt:wait_until_ring_converged(Nodes). - -wait_until_leader_converge({_Name, Nodes}) -> - repl_util:wait_until_leader_converge(Nodes). - -enable_realtime([{_, Node, _}|OtherClusters]) -> - lists:foreach( - fun({Cluster, _, _}) -> - repl_util:enable_realtime(Node, Cluster) - end, - OtherClusters). - diff --git a/tests/verify_staged_clustering.erl b/tests/verify_staged_clustering.erl deleted file mode 100644 index f792c4800..000000000 --- a/tests/verify_staged_clustering.erl +++ /dev/null @@ -1,183 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_staged_clustering). --behavior(riak_test). --export([confirm/0]). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - Nodes = rt:deploy_nodes(4), - [Node1, Node2, Node3, Node4] = Nodes, - Nodes123 = [Node1, Node2, Node3], - Nodes23 = [Node2, Node3], - - lager:info("Join ~p and ~p to ~p", [Node2, Node3, Node1]), - [stage_join(Node, Node1) || Node <- Nodes23], - ?assertEqual(ok, rt:wait_until_all_members(Nodes123)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), - - lager:info("Ensure that ~p has not yet claimed partitions", [Node2]), - [?assertEqual([Node1], rt:owners_according_to(Node)) || Node <- Nodes123], - - lager:info("Commit without first printing the plan. This should fail"), - commit_staged(Node1), - - lager:info("Print staged plan and then commit"), - print_staged(Node1), - commit_staged(Node1), - - lager:info("Ensure that ~p now own all partitions", [Nodes123]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes123)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), - rt:assert_nodes_agree_about_ownership(Nodes123), - - lager:info("Join ~p to the cluster", [Node4]), - stage_join(Node4, Node1), - ?assertEqual(ok, rt:wait_until_all_members(Nodes)), - - lager:info("Stage replacement of ~p with ~p", [Node2, Node4]), - stage_replace(Node1, Node2, Node4), - - lager:info("Print staged plan and commit"), - print_staged(Node1), - commit_staged(Node1), - - Nodes134 = [Node1, Node3, Node4], - lager:info("Ensure that ~p now own all partitions", [Nodes134]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes134)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes134)), - rt:assert_nodes_agree_about_ownership(Nodes134), - - lager:info("Verify that ~p shutdown after being replaced", [Node2]), - ?assertEqual(ok, rt:wait_until_unpingable(Node2)), - - lager:info("Restart ~p and re-join to cluster", [Node2]), - rt:start(Node2), - %% Wait for Node 2 to be fully up before doing the force replace. This seems - %% to be what's causing the issue in BTA-175 - rt:wait_for_service(Node2, riak_kv), - - stage_join(Node2, Node1), - ?assertEqual(ok, rt:wait_until_all_members(Nodes)), - - lager:info("Schedule force-replace of ~p with ~p", [Node3, Node2]), - stage_force_replace(Node4, Node3, Node2), - - lager:info("Print staged plan and commit"), - print_staged(Node4), - commit_staged(Node4), - - Nodes124 = [Node1, Node2, Node4], - lager:info("Ensure that ~p now own all partitions", [Nodes124]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes124)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes124)), - rt:assert_nodes_agree_about_ownership(Nodes124), - - lager:info("Stage leave of ~p", [Node2]), - stage_leave(Node1, Node2), - lager:info("Stage force-remove of ~p", [Node4]), - stage_remove(Node1, Node4), - - lager:info("Print staged plan and verify clear_staged works"), - print_staged(Node1), - clear_staged(Node1), - commit_staged(Node1), - - lager:info("Re-stage leave of ~p and force-remove of ~p", [Node2, Node4]), - stage_leave(Node1, Node2), - stage_remove(Node1, Node4), - lager:info("Print staged plan and commit"), - print_staged(Node1), - commit_staged(Node1), - - lager:info("Verify that ~p is the only remaining cluster member", [Node1]), - ?assertEqual(ok, rt:wait_until_no_pending_changes([Node1])), - ?assertEqual([Node1], rt:owners_according_to(Node1)), - ?assertEqual(ok, rt:wait_until_all_members([Node1])), - - lager:info("Test verify_staged_clustering: Passed"), - pass. - -n(Atom) -> - atom_to_list(Atom). - -stage_join(Node, OtherNode) -> - %% rpc:call(Node, riak_kv_console, staged_join, [[n(OtherNode)]]). - rt:admin(Node, ["cluster", "join", n(OtherNode)]). - -stage_leave(Node, OtherNode) -> - %% rpc:call(Node, riak_core_console, stage_leave, [[n(OtherNode)]]). - rt:admin(Node, ["cluster", "leave", n(OtherNode)]). - -stage_remove(Node, OtherNode) -> - %% rpc:call(Node, riak_core_console, stage_remove, [[n(OtherNode)]]). - rt:admin(Node, ["cluster", "force-remove", n(OtherNode)]). - -stage_replace(Node, Node1, Node2) -> - %% rpc:call(Node, riak_core_console, stage_replace, [[n(Node1), n(Node2)]]). - rt:admin(Node, ["cluster", "replace", n(Node1), n(Node2)]). - -stage_force_replace(Node, Node1, Node2) -> - %% rpc:call(Node, riak_core_console, stage_force_replace, [[n(Node1), n(Node2)]]). - rt:admin(Node, ["cluster", "force-replace", n(Node1), n(Node2)]). - -print_staged(Node) -> - %% rpc:call(Node, riak_core_console, print_staged, [[]]). - F = fun(_) -> - {ok, StdOut} = rt:admin(Node, ["cluster", "plan"]), - case StdOut of - "Cannot" ++ _X -> false; - _ -> true - end - end, - rt:wait_until(Node, F). - -commit_staged(Node) -> - %% rpc:call(Node, riak_core_console, commit_staged, [[]]). - rt:admin(Node, ["cluster", "commit"]). - -clear_staged(Node) -> - %% rpc:call(Node, riak_core_console, clear_staged, [[]]). - rt:admin(Node, ["cluster", "clear"]). - -stage_join_rpc(Node, OtherNode) -> - rpc:call(Node, riak_core, staged_join, [OtherNode]). - -stage_leave_rpc(Node, OtherNode) -> - rpc:call(Node, riak_core_claimant, leave_member, [OtherNode]). - -stage_remove_rpc(Node, OtherNode) -> - rpc:call(Node, riak_core_claimant, remove_member, [OtherNode]). - -stage_replace_rpc(Node, Node1, Node2) -> - rpc:call(Node, riak_core_claimant, replace, [Node1, Node2]). - -stage_force_replace_rpc(Node, Node1, Node2) -> - rpc:call(Node, riak_core_claimant, force_replace, [Node1, Node2]). - -plan_staged_rpc(Node) -> - rpc:call(Node, riak_core_claimant, plan, []). - -commit_staged_rpc(Node) -> - rpc:call(Node, riak_core_claimant, commit, []). - -clear_staged_rpc(Node) -> - rpc:call(Node, riak_core_claimant, clear, []). diff --git a/tests/verify_tick_change.erl b/tests/verify_tick_change.erl deleted file mode 100644 index 3390fbac8..000000000 --- a/tests/verify_tick_change.erl +++ /dev/null @@ -1,85 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(verify_tick_change). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -confirm() -> - ClusterSize = 4, - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - NewConfig = [], - Nodes = rt:build_cluster(ClusterSize, NewConfig), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - [Node1|_] = Nodes, - Bucket = <<"systest">>, - Start = 0, End = 100, - W = quorum, - NewTime = 11, - - write_stuff(Nodes, Start, End, Bucket, W, <<>>), - read_stuff(Nodes, Start, End, Bucket, W, <<>>), - - io:format("Start ticktime daemon on ~p, then wait a few seconds\n",[Node1]), - rpc:call(Node1, riak_core_net_ticktime, start_set_net_ticktime_daemon, - [Node1, NewTime]), - timer:sleep(2*1000), - - io:format("Changing net_ticktime to ~p\n", [NewTime]), - ok = rt:wait_until( - fun() -> - write_read_poll_check(Nodes, NewTime, Start, End, Bucket, W) - end), - lager:info("If we got this far, then we found no inconsistencies\n"), - [begin - RemoteTime = rpc:call(Node, net_kernel, get_net_ticktime, []), - io:format("Node ~p tick is ~p\n", [Node, RemoteTime]), - ?assertEqual(NewTime, RemoteTime) - end || Node <- lists:usort([node()|nodes(connected)])], - io:format("If we got this far, all nodes are using the same tick time\n"), - - pass. - -make_common() -> - list_to_binary(io_lib:format("~p", [now()])). - -write_stuff(Nodes, Start, End, Bucket, W, Common) -> - Nd = lists:nth(length(Nodes), Nodes), - [] = rt:systest_write(Nd, Start, End, Bucket, W, Common). - -read_stuff(Nodes, Start, End, Bucket, W, Common) -> - Nd = lists:nth(length(Nodes), Nodes), - [] = rt:systest_read(Nd, Start, End, Bucket, W, Common). - -is_set_net_ticktime_done(Nodes, Time) -> - case lists:usort([(catch rpc:call(Node, net_kernel, get_net_ticktime,[])) - || Node <- Nodes]) of - [Time] -> - true; - _ -> - false - end. - -write_read_poll_check(Nodes, NewTime, Start, End, Bucket, W) -> - Common = make_common(), - write_stuff(Nodes, Start, End, Bucket, W, Common), - read_stuff(Nodes, Start, End, Bucket, W, Common), - is_set_net_ticktime_done(Nodes, NewTime). diff --git a/tests/verify_vclock.erl b/tests/verify_vclock.erl deleted file mode 100644 index 64e03ea1e..000000000 --- a/tests/verify_vclock.erl +++ /dev/null @@ -1,204 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_vclock). --behavior(riak_test). --export([confirm/0]). --include_lib("eunit/include/eunit.hrl"). - -%% We've got a separate test for capability negotiation and other mechanisms, so the test here is fairly -%% straightforward: get a list of different versions of nodes and join them into a cluster, making sure that -%% each time our data has been replicated: -confirm() -> - NTestItems = 10, %% How many test items to write/verify? - TestMode = false, %% Set to false for "production tests", true if too slow. - EncodingTypes = [default, encode_raw, encode_zlib], %% Usually, you won't want to fiddle with these. - - lists:foreach(fun(EncodingType) -> run_test(TestMode, NTestItems, EncodingType) end, EncodingTypes), - - lager:info("Test verify_vclock passed."), - pass. - -run_test(TestMode, NTestItems, VClockEncoding) -> - - lager:info("Testing vclock (encoding: ~p)", [VClockEncoding]), - - %% This resets nodes, cleans up stale directories, etc.: - lager:info("Cleaning up..."), - rt:setup_harness(dummy, dummy), - - %% In reality, we could probably do this with a single node, but now the plumbing's already here: - lager:info("Spinning up test nodes"), - [RootNode, TestNode0, TestNode1] = Nodes = deploy_test_nodes(TestMode, 3), - - %% First, exercise the default setting, then force known encodings and see if we get our data back. - try_encoding(RootNode, default, NTestItems), - try_encoding(TestNode0, encode_raw, NTestItems), - try_encoding(TestNode1, encode_zlib, NTestItems), - - stopall(Nodes), - lager:info("Test verify_vclock passed."), - pass. - -try_encoding(TestNode, Encoding, NTestItems) -> - - rt:wait_for_service(TestNode, riak_kv), - force_encoding(TestNode, Encoding), - - %% Check to see if we can round-trip with the selected encoding: - lager:info("Testing round-trip for encoding ~p...", [Encoding]), - Input = <<"delicious ham">>, - Encoded = riak_object:encode_vclock(Input), - Decoded = riak_object:decode_vclock(Encoded), - Input = Decoded, - - %% Try to find some data that does not exist: - lager:info("Testing find-missing..."), - Results0 = our_pbc_read(TestNode, NTestItems, <<"saba">>), - ?assertEqual(NTestItems, length(Results0)), - lager:info("Ok, data not found (as expected)."), - - %% Do an initial write and see if we can get our data back (indirectly test vclock creation and - %% encoding): - lager:info("Testing write-and-read..."), - our_pbc_write(TestNode, NTestItems), - Results1 = our_pbc_read(TestNode, NTestItems), - ?assertEqual(0, length(Results1)), - lager:info("Ok, data looks all right."), - - %% Update the data and see if everything worked; the idea is to indirectly test vclock increment: - lager:info("Testing update..."), - our_pbc_write(TestNode, NTestItems, <<"hamachi">>), - Results2 = our_pbc_read(TestNode, NTestItems, <<"hamachi">>), - ?assertEqual(0, length(Results2)), - lager:info("Ok, data looks all right.") -. - -force_encoding(Node, EncodingMethod) -> - case EncodingMethod of - default -> lager:info("Using default encoding type."), true; - - _ -> lager:info("Forcing encoding type to ~p.", [EncodingMethod]), - OverrideData = - [ - { riak_kv, - [ - { override_capability, - [ - { vclock_data_encoding, - [ - { use, EncodingMethod}, - { prefer, EncodingMethod} - ] - } - ] - } - ] - } - ], - - rt:update_app_config(Node, OverrideData) - - end. - -stopall(Nodes) -> - lists:foreach(fun(N) -> rt:brutal_kill(N) end, Nodes). - -make_kv(N, VSuffix) -> - K = <>, - V = <>, - { K, V }. - -%% Unfortunately, the rt module's systest write/read doesn't wind up triggering a vclock, so -%% we need our own version: -our_pbc_write(Node, Size) -> - our_pbc_write(Node, 1, Size, <<"systest">>, <<>>). - -our_pbc_write(Node, Size, Suffix) -> - our_pbc_write(Node, 1, Size, <<"systest">>, Suffix). - -our_pbc_write(Node, Start, End, Bucket, VSuffix) -> - PBC = rt:pbc(Node), - F = fun(N, Acc) -> - {K, V} = make_kv(N, VSuffix), - try rt:pbc_write(PBC, Bucket, K, V) of - ok -> - Acc; - Other -> - [{N, Other} | Acc] - catch - What:Why -> - [{N, {What, Why}} | Acc] - end - end, - lists:foldl(F, [], lists:seq(Start, End)). - -our_pbc_read(Node, Size) -> - our_pbc_read(Node, 1, Size, <<"systest">>, <<>>). - -our_pbc_read(Node, Size, Suffix) -> - our_pbc_read(Node, 1, Size, <<"systest">>, Suffix). - -our_pbc_read(Node, Start, End, Bucket, VSuffix) -> - PBC = rt:pbc(Node), - - %% Trundle along through the list, collecting mismatches: - F = fun(N, Acc) -> - {K, V} = make_kv(N, VSuffix), - - AddFailure = fun(Reason, EntryN, Accumulator) -> - [{EntryN, Reason} | Accumulator] - end, - - ResultValue = riakc_pb_socket:get(PBC, Bucket, K), - case ResultValue of - {ok, Obj} -> - ObjectValue = riakc_obj:get_value(Obj), - case ObjectValue of - V -> - Acc; - WrongVal -> - [{N, {wrong_val, WrongVal}} | Acc] - end; - - {error, timeout} -> - lager:error("timeout"), - AddFailure({error, timeout}, N, Acc); - {error, disconnected} -> - lager:error("disconnected"), - AddFailure({error, disconnected}, N, Acc); - - Other -> - AddFailure(Other, N, Acc) - end - end, - lists:foldl(F, [], lists:seq(Start, End)) -. - -%% For some testing purposes, making these limits smaller is helpful: -deploy_test_nodes(false, N) -> - rt:deploy_nodes(N); -deploy_test_nodes(true, N) -> - lager:info("NOTICE: Using turbo settings for testing."), - Config = [{riak_core, [{forced_ownership_handoff, 8}, - {handoff_concurrency, 8}, - {vnode_inactivity_timeout, 1000}, - {gossip_limit, {10000000, 60000}}]}], - rt:deploy_nodes(N, Config). - diff --git a/tests/verify_vclock_encoding_upgrade.erl b/tests/verify_vclock_encoding_upgrade.erl deleted file mode 100644 index e8614e83b..000000000 --- a/tests/verify_vclock_encoding_upgrade.erl +++ /dev/null @@ -1,45 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2012 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- --module(verify_vclock_encoding_upgrade). --behavior(riak_test). --export([confirm/0]). - -confirm() -> - lager:info("Deploying previous cluster"), - [Prev, Current] = rt:build_cluster([previous, current]), - PrevClient = rt:pbc(Prev), - CurrentClient = rt:pbc(Current), - K = <<"key">>, - B = <<"bucket">>, - V = <<"value">>, - lager:info("Putting object in previous version node"), - riakc_pb_socket:put(PrevClient, riakc_obj:new(B, K, V)), - lager:info("Fetching object from previous version node"), - {ok, O} = riakc_pb_socket:get(PrevClient, B, K), - O2 = riakc_obj:update_value(O, <<"value2">>), - lager:info("Putting updated object in current version node"), - ok = riakc_pb_socket:put(CurrentClient, O2), - lager:info("Fetching again from current version node"), - {ok, O3} = riakc_pb_socket:get(CurrentClient, B, K), - O4 = riakc_obj:update_value(O3, <<"value2">>), - lager:info("Putting updated object back in previous version node"), - ok = riakc_pb_socket:put(PrevClient, O4), - lager:info("Eso es todo amigos!"), - pass. diff --git a/tests/verify_write_once.erl b/tests/verify_write_once.erl deleted file mode 100644 index 02fcf41d4..000000000 --- a/tests/verify_write_once.erl +++ /dev/null @@ -1,333 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(verify_write_once). --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(DEFAULT_RING_SIZE, 16). --define(NVAL, 2). --define(BUCKET_TYPE, <<"write_once">>). --define(BUCKET, {?BUCKET_TYPE, <<"bucket">>}). --define(ASYNC_PUT_BUCKET_TYPE, <<"async_put">>). --define(ASYNC_PUT_BUCKET, {?ASYNC_PUT_BUCKET_TYPE, <<"bucket">>}). --define(ANY_VALUE, <<"any">>). - - -%% @doc This test exercises the write_once bucket property, which results in puts that avoid coordination -%% and reads before writes, and which therefore have lower latency and higher throughput. -%% -confirm() -> - %% - %% Set two clusters. We need one for most of the testing of this code path. - %% The first cluster will use the memory back end. - %% The second cluster will be a singleton cluster with the leveldb back end, - %% in order to test asynchronous puts - %% - [Cluster1, Cluster2] = rt:deploy_clusters([ - {4, config(?DEFAULT_RING_SIZE, ?NVAL)}, - {1, config(?DEFAULT_RING_SIZE, ?NVAL, riak_kv_eleveldb_backend)} - ]), - rt:join_cluster(Cluster1), - % rt:join_cluster(Cluster2), - lager:info("Set up clusters: ~p, ~p", [Cluster1, Cluster2]), - %% - %% Select a random node, and use it to create an immutable bucket - %% - Node = lists:nth(random:uniform(length((Cluster1))), Cluster1), - rt:create_and_activate_bucket_type(Node, ?BUCKET_TYPE, [{write_once, true}]), - rt:wait_until_bucket_type_status(?BUCKET_TYPE, active, Cluster1), - lager:info("Created ~p bucket type on ~p", [?BUCKET_TYPE, Node]), - %% - %% - %% - pass = confirm_put(Node), - pass = confirm_w(Cluster1), - pass = confirm_pw(Cluster1), - pass = confirm_rww(Cluster1), - pass = confirm_async_put(hd(Cluster2)), - pass. - -%% -%% private -%% - - -confirm_put(Node) -> - ok = verify_put(Node, ?BUCKET, <<"confirm_put_key">>, <<"confirm_put_value">>), - verify_failed_put( - Node, ?BUCKET, <<"confirm_put-bad_w">>, ?ANY_VALUE, [{w, 9999}], - fun(Error) -> - ?assertMatch({n_val_violation, 3}, Error) - end - ), - verify_failed_put( - Node, ?BUCKET, <<"confirm_put-bad_pw">>, ?ANY_VALUE, [{pw, 9999}], - fun(Error) -> - ?assertMatch({n_val_violation, 3}, Error) - end - ), - lager:info("confirm_put...ok"), - pass. - - -confirm_w(Nodes) -> - %% - %% split the cluster into 2 paritions [dev1, dev2, dev3], [dev4] - %% - P1 = lists:sublist(Nodes, 3), - P2 = lists:sublist(Nodes, 4, 1), - PartitonInfo = rt:partition(P1, P2), - [Node1 | _Rest1] = P1, - verify_put(Node1, ?BUCKET, <<"confirm_w_key">>, <<"confirm_w_value">>), - [Node2 | _Rest2] = P2, - %% - %% By setting sloppy_quorum to false, we require a strict quorum of primaries. But because - %% we only have one node in the partition, the put should fail. It should bail immediately - %% without even attempting a write on the back end, because a quorum will not be possible. - %% - verify_failed_put( - Node2, ?BUCKET, <<"confirm_w_key">>, <<"confirm_w_value">>, [{sloppy_quorum, false}], - fun(Error) -> - ?assertMatch({insufficient_vnodes, _, need, 2}, Error) - end - ), - rt:heal(PartitonInfo), - lager:info("confirm_pw...ok"), - pass. - - -confirm_pw(Nodes) -> - %% - %% split the cluster into 2 paritions [dev1, dev2, dev3], [dev4] - %% - P1 = lists:sublist(Nodes, 3), - P2 = lists:sublist(Nodes, 4, 1), - PartitonInfo = rt:partition(P1, P2), - [Node1 | _Rest1] = P1, - verify_put(Node1, ?BUCKET, <<"confirm_pw_key">>, <<"confirm_pw_value">>), - [Node2 | _Rest2] = P2, - %% - %% Similar to the above test -- if pw is all, then we require n_val puts on primaries, but - %% the node is a singleton in the partition, so this, too, should fail. This will time - %% out, so set the timeout to something small. - %% - verify_put_timeout( - Node2, ?BUCKET, <<"confirm_pw_key">>, ?ANY_VALUE, [{pw, all}], 1000, - fun(Error) -> - ?assertMatch({pw_val_unsatisfied, 3, _}, Error) - end - ), - rt:heal(PartitonInfo), - lager:info("confirm_pw...ok"), - pass. - -confirm_rww(Nodes) -> - %% - %% split the cluster into 2 paritions - %% - P1 = lists:sublist(Nodes, 2), - P2 = lists:sublist(Nodes, 3, 2), - PartitonInfo = rt:partition(P1, P2), - NumFastMerges = num_fast_merges(Nodes), - %% - %% put different values into each partiton - %% - [Node1 | _Rest1] = P1, - verify_put(Node1, ?BUCKET, <<"confirm_rww_key">>, <<"confirm_rww_value1">>), - [Node2 | _Rest2] = P2, - verify_put(Node2, ?BUCKET, <<"confirm_rww_key">>, <<"confirm_rww_value2">>), - %% - %% After healing, both should agree on an arbitrary value - %% - rt:heal(PartitonInfo), - rt:wait_until(fun() -> - V1 = get(Node1, ?BUCKET, <<"confirm_rww_key">>), - V2 = get(Node2, ?BUCKET, <<"confirm_rww_key">>), - V1 =:= V2 - end), - ?assert(NumFastMerges < num_fast_merges(Nodes)), - lager:info("confirm_rww...ok"), - pass. - -%% -%% In order to test asynchronous puts, at this point we need a node with leveldb, as -%% that is currently the only back end that supports it. In the future, we may add -%% async puts as a capability which can be arbitrated through the multi backend. -%% -confirm_async_put(Node) -> - %% - %% Set up the intercepts on the singleton node in cluster2 - %% - make_intercepts_tab(Node), - rt_intercept:add( - Node, {riak_kv_vnode, [ - %% Count everytime riak_kv_vnode:handle_handoff_command/3 is called with a write_once message - {{handle_command, 3}, count_w1c_handle_command} - ]} - ), - %% - %% Create the bucket type - %% - rt:create_and_activate_bucket_type(Node, ?ASYNC_PUT_BUCKET_TYPE, [{write_once, true}, {backend, myeleveldb}]), - rt:wait_until_bucket_type_status(?ASYNC_PUT_BUCKET_TYPE, active, [Node]), - lager:info("Created ~p bucket type on ~p", [?ASYNC_PUT_BUCKET_TYPE, Node]), - %% - %% Clear the intercept counters - %% - true = rpc:call(Node, ets, insert, [intercepts_tab, {w1c_async_replies, 0}]), - true = rpc:call(Node, ets, insert, [intercepts_tab, {w1c_sync_replies, 0}]), - - ok = verify_put(Node, ?ASYNC_PUT_BUCKET, <<"confirm_async_put_key">>, <<"confirm_async_put_value">>), - %% - %% verify that we have handled 3 asynchronous writes and 0 synchronous writes - %% - [{_, W1CAsyncReplies}] = rpc:call(Node, ets, lookup, [intercepts_tab, w1c_async_replies]), - [{_, W1CSyncReplies}] = rpc:call(Node, ets, lookup, [intercepts_tab, w1c_sync_replies]), - ?assertEqual(0, W1CSyncReplies), - ?assertEqual(3, W1CAsyncReplies), - %% - %% reconfigure the node to force use of synchronous writes with leveldb - %% - rt:update_app_config(Node, [{riak_kv, [{allow_async_put, false}]}]), - rt:start(Node), - %% - %% Set up the intercepts on the singleton node in cluster2 - %% - make_intercepts_tab(Node), - rt_intercept:add( - Node, {riak_kv_vnode, [ - %% Count everytime riak_kv_vnode:handle_handoff_command/3 is called with a write_once message - {{handle_command, 3}, count_w1c_handle_command} - ]} - ), - %% - %% Clear the intercept counters - %% - true = rpc:call(Node, ets, insert, [intercepts_tab, {w1c_async_replies, 0}]), - true = rpc:call(Node, ets, insert, [intercepts_tab, {w1c_sync_replies, 0}]), - - ok = verify_put(Node, ?ASYNC_PUT_BUCKET, <<"confirm_async_put_key">>, <<"confirm_async_put_value">>), - %% - %% verify that we have handled 0 asynchronous writes and 3 synchronous writes, instead - %% - [{_, W1CAsyncReplies2}] = rpc:call(Node, ets, lookup, [intercepts_tab, w1c_async_replies]), - [{_, W1CSyncReplies2}] = rpc:call(Node, ets, lookup, [intercepts_tab, w1c_sync_replies]), - ?assertEqual(3, W1CSyncReplies2), - ?assertEqual(0, W1CAsyncReplies2), - %% - %% done! - %% - lager:info("confirm_async_put...ok"), - pass. - -verify_put(Node, Bucket, Key, Value) -> - verify_put(Node, Bucket, Key, Value, [], Value). - -verify_put(Node, Bucket, Key, Value, Options, ExpectedValue) -> - Client = rt:pbc(Node), - _Ret = riakc_pb_socket:put( - Client, riakc_obj:new( - Bucket, Key, Value - ), - Options - ), - {ok, Val} = riakc_pb_socket:get(Client, Bucket, Key), - ?assertEqual(ExpectedValue, riakc_obj:get_value(Val)), - ok. - -verify_failed_put(Node, Bucket, Key, Value, Options, ExpectedPutReturnFunc) -> - Client = rt:pbc(Node), - {error, PutReturnValue} = riakc_pb_socket:put( - Client, riakc_obj:new( - Bucket, Key, Value - ), - Options - ), - ExpectedPutReturnFunc(parse(PutReturnValue)), - ok. - - -verify_put_timeout(Node, Bucket, Key, Value, Options, Timeout, ExpectedPutReturnFunc) -> - Client = rt:pbc(Node), - {Time, {error, Val}} = timer:tc( - fun() -> - riakc_pb_socket:put( - Client, riakc_obj:new( - Bucket, Key, Value - ), [{timeout, Timeout} | Options] - ) - end - ), - ExpectedPutReturnFunc(parse(Val)), - ?assert(Time div 1000000 =< 2*Timeout), - ok. - -num_fast_merges(Nodes) -> - lists:foldl( - fun(Node, Acc) -> - {write_once_merge, N} = proplists:lookup( - write_once_merge, - rpc:call(Node, riak_kv_stat, get_stats, []) - ), - Acc + N - end, - 0, Nodes - ). - -get(Node, Bucket, Key) -> - Client = rt:pbc(Node), - {ok, Val} = riakc_pb_socket:get(Client, Bucket, Key), - riakc_obj:get_value(Val). - -config(RingSize, NVal) -> - config(RingSize, NVal, riak_kv_multi_backend). - -config(RingSize, NVal, Backend) -> - [ - {riak_core, [ - {default_bucket_props, [{n_val, NVal}]}, - {vnode_management_timer, 1000}, - {ring_creation_size, RingSize}] - }, - {riak_kv, [ - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 100}, - {anti_entropy_tick, 100}, - {anti_entropy, {on, []}}, - {anti_entropy_timeout, 5000}, - {storage_backend, Backend}, - {multi_backend, [ - {mymemory, riak_kv_memory_backend, []}, - {myeleveldb, riak_kv_eleveldb_backend, []} - ]} - ]} - ]. - -parse(Binary) -> - {ok, Tokens, _} = erl_scan:string(binary_to_list(Binary) ++ "."), - {ok, Term} = erl_parse:parse_term(Tokens), - Term. - -make_intercepts_tab(Node) -> - SupPid = rpc:call(Node, erlang, whereis, [sasl_safe_sup]), - intercepts_tab = rpc:call(Node, ets, new, [intercepts_tab, [named_table, - public, set, {heir, SupPid, {}}]]). diff --git a/tests/vnode_util.erl b/tests/vnode_util.erl deleted file mode 100644 index e64c02d94..000000000 --- a/tests/vnode_util.erl +++ /dev/null @@ -1,80 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2013-2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%% ------------------------------------------------------------------- - --module(vnode_util). --compile(export_all). - -load(Nodes) -> - rt:load_modules_on_nodes([?MODULE], Nodes), - ok. - -suspend_vnode(Node, Idx) -> - lager:info("Suspending vnode ~p/~p", [Node, Idx]), - Pid = rpc:call(Node, ?MODULE, remote_suspend_vnode, [Idx], infinity), - Pid. - -remote_suspend_vnode(Idx) -> - Parent = self(), - Pid = spawn(fun() -> - {ok, Pid} = riak_core_vnode_manager:get_vnode_pid(Idx, riak_kv_vnode), - erlang:suspend_process(Pid, []), - Parent ! suspended, - receive resume -> - io:format("Resuming vnode :: ~p/~p~n", [node(), Idx]), - erlang:resume_process(Pid) - end - end), - receive suspended -> ok end, - Pid. - -resume_vnode(Pid) -> - Pid ! resume. - -kill_vnode({VIdx, VNode}) -> - lager:info("Killing vnode: ~p", [VIdx]), - Pid = vnode_pid(VNode, VIdx), - rpc:call(VNode, erlang, exit, [Pid, kill]), - ok = rt:wait_until(fun() -> - vnode_pid(VNode, VIdx) /= Pid - end). - -vnode_pid(Node, Partition) -> - {ok, Pid} = rpc:call(Node, riak_core_vnode_manager, get_vnode_pid, - [Partition, riak_kv_vnode]), - Pid. - -rebuild_vnode({VIdx, VNode}) -> - lager:info("Rebuild AAE tree: ~p", [VIdx]), - rebuild_aae_tree(VNode, VIdx). - -rebuild_aae_tree(Node, Partition) -> - {ok, Pid} = rpc:call(Node, riak_kv_vnode, hashtree_pid, [Partition]), - Info = rpc:call(Node, riak_kv_entropy_info, compute_tree_info, []), - {_, Built} = lists:keyfind(Partition, 1, Info), - lager:info("Forcing rebuild of AAE tree for: ~b", [Partition]), - lager:info("Tree originally built at: ~p", [Built]), - rpc:call(Node, riak_kv_index_hashtree, clear, [Pid]), - ok = rt:wait_until(fun() -> - NewInfo = rpc:call(Node, riak_kv_entropy_info, compute_tree_info, []), - {_, NewBuilt} = lists:keyfind(Partition, 1, NewInfo), - NewBuilt > Built - end), - lager:info("Tree successfully rebuilt"), - ok. diff --git a/tests/yz_core_properties_create_unload.erl b/tests/yz_core_properties_create_unload.erl deleted file mode 100644 index 29a6180d3..000000000 --- a/tests/yz_core_properties_create_unload.erl +++ /dev/null @@ -1,160 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%%------------------------------------------------------------------- --module(yz_core_properties_create_unload). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --define(CFG, [{riak_kv, - [ - %% allow AAE to build trees and exchange rapidly - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 8} - ]}, - {yokozuna, - [ - {enabled, true}, - {anti_entropy_tick, 1000} - ]}]). --define(INDEX, <<"test_idx_core">>). --define(TYPE, <<"data">>). --define(BUCKET, {?TYPE, <<"test_bkt_core">>}). --define(SEQMAX, 100). - -confirm() -> - Cluster = rt:build_cluster(4, ?CFG), - rt:wait_for_cluster_service(Cluster, yokozuna), - - %% Generate keys, YZ only supports UTF-8 compatible keys - Keys = [<> || N <- lists:seq(1, ?SEQMAX), - not lists:any(fun(E) -> E > 127 end, - binary_to_list(<>))], - KeyCount = length(Keys), - - %% Randomly select a subset of the test nodes to remove - %% core.properties from - RandNodes = rt:random_sublist(Cluster, 3), - - %% Select one of the modified nodes as a client endpoint - Node = rt:select_random(RandNodes), - Pid = rt:pbc(Node), - riakc_pb_socket:set_options(Pid, [queue_if_disconnected]), - - %% Create a search index and associate with a bucket - lager:info("Create and set Index ~p for Bucket ~p~n", [?INDEX, ?BUCKET]), - ok = riakc_pb_socket:create_search_index(Pid, ?INDEX), - ok = rt:create_and_activate_bucket_type(Node, - ?TYPE, - [{search_index, ?INDEX}]), - timer:sleep(1000), - - %% Write keys and wait for soft commit - lager:info("Writing ~p keys", [KeyCount]), - [ok = rt:pbc_write(Pid, ?BUCKET, Key, Key, "text/plain") || Key <- Keys], - timer:sleep(1100), - - verify_count(Pid, KeyCount), - - test_core_props_removal(Cluster, RandNodes, KeyCount, Pid), - test_remove_index_dirs(Cluster, RandNodes, KeyCount, Pid), - test_remove_segment_infos_and_rebuild(Cluster, RandNodes, KeyCount, Pid), - - riakc_pb_socket:stop(Pid), - - pass. - -test_core_props_removal(Cluster, RandNodes, KeyCount, Pid) -> - lager:info("Remove core.properties file in each index data dir"), - remove_core_props(RandNodes, ?INDEX), - - yokozuna_rt:check_exists(Cluster, ?INDEX), - - lager:info("Write one more piece of data"), - ok = rt:pbc_write(Pid, ?BUCKET, <<"foo">>, <<"foo">>, "text/plain"), - timer:sleep(1100), - - verify_count(Pid, KeyCount + 1). - -test_remove_index_dirs(Cluster, RandNodes, KeyCount, Pid) -> - lager:info("Remove index directories on each node and let them recreate/reindex"), - yokozuna_rt:remove_index_dirs(RandNodes, ?INDEX), - - yokozuna_rt:check_exists(Cluster, ?INDEX), - - yokozuna_rt:expire_trees(Cluster), - yokozuna_rt:wait_for_aae(Cluster), - - lager:info("Write second piece of data"), - ok = rt:pbc_write(Pid, ?BUCKET, <<"food">>, <<"foody">>, "text/plain"), - timer:sleep(1100), - - verify_count(Pid, KeyCount + 2). - -test_remove_segment_infos_and_rebuild(Cluster, RandNodes, KeyCount, Pid) -> - lager:info("Remove segment info files in each index data dir"), - remove_segment_infos(RandNodes, ?INDEX), - - lager:info("To fix, we remove index directories on each node and let them recreate/reindex"), - - yokozuna_rt:remove_index_dirs(RandNodes, ?INDEX), - - yokozuna_rt:check_exists(Cluster, ?INDEX), - - yokozuna_rt:expire_trees(Cluster), - yokozuna_rt:wait_for_aae(Cluster), - - lager:info("Write third piece of data"), - ok = rt:pbc_write(Pid, ?BUCKET, <<"baz">>, <<"bar">>, "text/plain"), - timer:sleep(1100), - - verify_count(Pid, KeyCount + 3). - -%% @doc Verify search count. -verify_count(Pid, ExpectedKeyCount) -> - case riakc_pb_socket:search(Pid, ?INDEX, <<"*:*">>) of - {ok ,{search_results, _, _, NumFound}} -> - lager:info("Check Count, Expected: ~p | Actual: ~p~n", - [ExpectedKeyCount, NumFound]), - ?assertEqual(ExpectedKeyCount, NumFound); - E -> - lager:info("No results because ~p~n", [E]) - end. - -%% @doc Remove core properties file on nodes. -remove_core_props(Nodes, IndexName) -> - IndexDirs = [rpc:call(Node, yz_index, index_dir, [IndexName]) || - Node <- Nodes], - PropsFiles = [filename:join([IndexDir, "core.properties"]) || - IndexDir <- IndexDirs], - lager:info("Remove core.properties files: ~p, on nodes: ~p~n", - [PropsFiles, Nodes]), - [file:delete(PropsFile) || PropsFile <- PropsFiles], - ok. - -%% @doc Remove lucence segment info files to check if reindexing will occur -%% on re-creation/re-indexing. -remove_segment_infos(Nodes, IndexName) -> - IndexDirs = [rpc:call(Node, yz_index, index_dir, [IndexName]) || - Node <- Nodes], - SiPaths = [binary_to_list(filename:join([IndexDir, "data/index/*.si"])) || - IndexDir <- IndexDirs], - SiFiles = lists:append([filelib:wildcard(Path) || Path <- SiPaths]), - lager:info("Remove segment info files: ~p, on in dirs: ~p~n", - [SiFiles, IndexDirs]), - [file:delete(SiFile) || SiFile <- SiFiles]. diff --git a/tests/yz_crdt.erl b/tests/yz_crdt.erl deleted file mode 100644 index 815f9bd98..000000000 --- a/tests/yz_crdt.erl +++ /dev/null @@ -1,119 +0,0 @@ --module(yz_crdt). - --export([confirm/0]). - --include_lib("eunit/include/eunit.hrl"). - --define(HARNESS, (rt_config:get(rt_harness))). --define(INDEX, <<"maps">>). --define(TYPE, <<"maps">>). --define(KEY, "Chris Meiklejohn"). --define(BUCKET, {?TYPE, <<"testbucket">>}). --define(GET(K,L), proplists:get_value(K, L)). - --define(CONF, - [ - {riak_core, - [{ring_creation_size, 8}] - }, - {yokozuna, - [{enabled, true}] - }]). - -confirm() -> - rt:set_advanced_conf(all, ?CONF), - - %% Configure cluster. - Nodes = rt:build_cluster(5, ?CONF), - - Node = rt:select_random(Nodes), - - %% Create PB connection. - Pid = rt:pbc(Node), - riakc_pb_socket:set_options(Pid, [queue_if_disconnected]), - - %% Create index. - riakc_pb_socket:create_search_index(Pid, ?INDEX, <<"_yz_default">>, []), - - %% Create bucket type for maps. - rt:create_and_activate_bucket_type(Node, - ?TYPE, - [{datatype, map}, - {search_index, ?INDEX}]), - - %% Write some sample data. - - Map1 = riakc_map:update( - {<<"name">>, register}, - fun(R) -> - riakc_register:set(list_to_binary(?KEY), R) - end, riakc_map:new()), - Map2 = riakc_map:update( - {<<"interests">>, set}, - fun(S) -> - riakc_set:add_element(<<"thing">>, S) end, - Map1), - ok = riakc_pb_socket:update_type( - Pid, - ?BUCKET, - ?KEY, - riakc_map:to_op(Map2)), - - %% Wait for yokozuna index to trigger. - timer:sleep(1000), - - %% Perform simple queries, check for register, set fields. - {ok, {search_results, Results1a, _, _}} = riakc_pb_socket:search( - Pid, ?INDEX, <<"name_register:Chris*">>), - lager:info("Search name_register:Chris*: ~p~n", [Results1a]), - ?assertEqual(length(Results1a), 1), - ?assertEqual(?GET(<<"name_register">>, ?GET(?INDEX, Results1a)), - list_to_binary(?KEY)), - ?assertEqual(?GET(<<"interests_set">>, ?GET(?INDEX, Results1a)), - <<"thing">>), - - {ok, {search_results, Results2a, _, _}} = riakc_pb_socket:search( - Pid, ?INDEX, <<"interests_set:thing*">>), - lager:info("Search interests_set:thing*: ~p~n", [Results2a]), - ?assertEqual(length(Results2a), 1), - ?assertEqual(?GET(<<"name_register">>, ?GET(?INDEX, Results2a)), - list_to_binary(?KEY)), - ?assertEqual(?GET(<<"interests_set">>, ?GET(?INDEX, Results2a)), - <<"thing">>), - - {ok, {search_results, Results3a, _, _}} = riakc_pb_socket:search( - Pid, ?INDEX, <<"_yz_rb:testbucket">>), - lager:info("Search testbucket: ~p~n", [Results3a]), - ?assertEqual(length(Results3a), 1), - ?assertEqual(?GET(<<"name_register">>, ?GET(?INDEX, Results3a)), - list_to_binary(?KEY)), - ?assertEqual(?GET(<<"interests_set">>, ?GET(?INDEX, Results3a)), - <<"thing">>), - - %% Redo queries and check if results are equal - {ok, {search_results, Results1b, _, _}} = riakc_pb_socket:search( - Pid, ?INDEX, <<"name_register:Chris*">>), - ?assertEqual(number_of_fields(Results1a), - number_of_fields(Results1b)), - - {ok, {search_results, Results2b, _, _}} = riakc_pb_socket:search( - Pid, ?INDEX, <<"interests_set:thing*">>), - ?assertEqual(number_of_fields(Results2a), - number_of_fields(Results2b)), - - {ok, {search_results, Results3b, _, _}} = riakc_pb_socket:search( - Pid, ?INDEX, <<"_yz_rb:testbucket">>), - ?assertEqual(number_of_fields(Results3a), - number_of_fields(Results3b)), - - %% Stop PB connection. - riakc_pb_socket:stop(Pid), - - %% Clean cluster. - rt:clean_cluster(Nodes), - - pass. - -%% @private -number_of_fields(Resp) -> - length(?GET(?INDEX, Resp)). diff --git a/tests/yz_default_bucket_type_upgrade.erl b/tests/yz_default_bucket_type_upgrade.erl deleted file mode 100644 index 20d197779..000000000 --- a/tests/yz_default_bucket_type_upgrade.erl +++ /dev/null @@ -1,94 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%%-------------------------------------------------------------------- - -%% @doc Test that checks to make sure that default bucket_types -%% do not lose data when expiring/clearing AAE trees when -%% trees are rebuilt for comparison. -%% @end - - --module(yz_default_bucket_type_upgrade). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(N, 3). --define(YZ_CAP, {yokozuna, handle_legacy_default_bucket_type_aae}). --define(INDEX, <<"test_upgrade_idx">>). --define(BUCKET, <<"test_upgrade_bucket">>). --define(SEQMAX, 2000). --define(CFG, - [{riak_core, - [ - {ring_creation_size, 16}, - {default_bucket_props, [{n_val, ?N}]}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 8} - ]}, - {yokozuna, - [ - {anti_entropy_tick, 1000}, - {enabled, true} - ]} - ]). - -confirm() -> - %% This test explicitly requires an upgrade from 2.0.5 to test a - %% new capability - OldVsn = "2.0.5", - - [_, Node|_] = Cluster = rt:build_cluster(lists:duplicate(4, {OldVsn, ?CFG})), - rt:wait_for_cluster_service(Cluster, yokozuna), - - [rt:assert_capability(ANode, ?YZ_CAP, {unknown_capability, ?YZ_CAP}) || ANode <- Cluster], - - %% Generate keys, YZ only supports UTF-8 compatible keys - GenKeys = [<> || N <- lists:seq(1, ?SEQMAX), - not lists:any( - fun(E) -> E > 127 end, - binary_to_list(<>))], - KeyCount = length(GenKeys), - lager:info("KeyCount ~p", [KeyCount]), - - OldPid = rt:pbc(Node), - - yokozuna_rt:write_data(Cluster, OldPid, ?INDEX, ?BUCKET, GenKeys), - %% wait for solr soft commit - timer:sleep(1100), - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount), - - %% Upgrade - yokozuna_rt:rolling_upgrade(Cluster, current), - - [rt:assert_capability(ANode, ?YZ_CAP, v1) || ANode <- Cluster], - [rt:assert_supported(rt:capability(ANode, all), ?YZ_CAP, [v1, v0]) || ANode <- Cluster], - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount), - - lager:info("Write one more piece of data"), - Pid = rt:pbc(Node), - ok = rt:pbc_write(Pid, ?BUCKET, <<"foo">>, <<"foo">>, "text/plain"), - timer:sleep(1100), - - yokozuna_rt:expire_trees(Cluster), - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount + 1), - - pass. diff --git a/tests/yz_ensemble.erl b/tests/yz_ensemble.erl deleted file mode 100644 index 5b0361e58..000000000 --- a/tests/yz_ensemble.erl +++ /dev/null @@ -1,117 +0,0 @@ --module(yz_ensemble). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). - --define(CFG, - [ - {riak_core, - [ - {ring_creation_size, 8} - ]}, - {yokozuna, - [ - {enabled, true} - ]} - ]). - -confirm() -> - NumNodes = 3, - NVal = 3, - ConfigB = ensemble_util:fast_config(NVal), - Config = ConfigB ++ [{yokozuna, [{enabled, true}]}], - lager:info("Building cluster and waiting for ensemble to stablize"), - Nodes = build_cluster_with_yz_support(NumNodes, Config, NVal), - rt:wait_for_cluster_service(Nodes, yokozuna), - vnode_util:load(Nodes), - Node = hd(Nodes), - - lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, - [{consistent, true}, {n_val, NVal}]), - - Bucket = {<<"strong">>, <<"test">>}, - Index = <<"testi">>, - create_index(Node, Index), - set_bucket_props(Node, Bucket, Index), - - verify_ensemble_delete_support(Node, Bucket, Index), - - pass. - - -%% @private -%% @doc Populates then deletes from SC bucket -verify_ensemble_delete_support(Node, Bucket, Index) -> - %% Yz only supports UTF-8 compatible keys - Keys = [<> || N <- lists:seq(1,2000), - not lists:any(fun(E) -> E > 127 end,binary_to_list(<>))], - - PBC = rt:pbc(Node), - - lager:info("Writing ~p keys", [length(Keys)]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key, "text/plain") || Key <- Keys], - - %% soft commit wait, then check that last key is indexed - lager:info("Search for keys to verify they exist"), - timer:sleep(1000), - LKey = lists:last(Keys), - rt:wait_until(fun() -> - {M, _} = riakc_pb_socket:search(PBC, Index, query_value(LKey)), - ok == M - end), - [{ok, _} = - riakc_pb_socket:search(PBC, Index, query_value(Key)) || Key <- Keys], - - lager:info("Deleting keys"), - [riakc_pb_socket:delete(PBC, Bucket, Key) || Key <- Keys], - timer:sleep(1000), - rt:wait_until(fun() -> - case riakc_pb_socket:search(PBC, Index, query_value(LKey)) of - {ok,{search_results,Res,_,_}} -> - lager:info("RES: ~p ~p~n", [Res, LKey]), - Res == []; - S -> - lager:info("OTHER: ~p ~p~n", [S, LKey]), - false - end - end), - [ {ok,{search_results,[],_,_}} = - riakc_pb_socket:search(PBC, Index, query_value(Key)) || Key <- Keys], - - ok. - - -%% @private -%% @doc build a cluster from ensemble_util + yz support -%% -%% NOTE: There's a timing issue that causes join_cluster to hang the r_t -%% node when adding yokozuna and ensemble support. Waiting for yokozuna -%% to load on each node allows join_cluster to complete consistently -build_cluster_with_yz_support(Num, Config, NVal) -> - Nodes = rt:deploy_nodes(Num, Config), - [rt:wait_for_cluster_service([N], yokozuna) || N <- Nodes], - Node = hd(Nodes), - rt:join_cluster(Nodes), - ensemble_util:wait_until_cluster(Nodes), - ensemble_util:wait_for_membership(Node), - ensemble_util:wait_until_stable(Node, NVal), - Nodes. - -%% @private -%% @doc Builds a simple riak key query -query_value(Value) -> - V2 = iolist_to_binary(re:replace(Value, "\"", "%22")), - V3 = iolist_to_binary(re:replace(V2, "\\\\", "%5C")), - <<"_yz_rk:\"",V3/binary,"\"">>. - -%% pulled from yz_rt - -%% @private -create_index(Node, Index) -> - lager:info("Creating index ~s [~p]", [Index, Node]), - ok = rpc:call(Node, yz_index, create, [Index]). - -%% @private -set_bucket_props(Node, Bucket, Index) -> - Props = [{search_index, Index}], - rpc:call(Node, riak_core_bucket, set_bucket, [Bucket, Props]). diff --git a/tests/yz_extractors.erl b/tests/yz_extractors.erl deleted file mode 100644 index 0e1582e86..000000000 --- a/tests/yz_extractors.erl +++ /dev/null @@ -1,191 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%%------------------------------------------------------------------- - -%% @doc Test that checks if we're caching the extractor map and that -%% creating custom extractors is doable via protobufs. -%% @end - --module(yz_extractors). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(INDEX1, <<"test_idx1">>). --define(BUCKET1, <<"test_bkt1">>). --define(INDEX2, <<"test_idx2">>). --define(BUCKET2, <<"test_bkt2">>). --define(YZ_CAP, {yokozuna, extractor_map_in_cmd}). --define(GET_MAP_RING_MFA, {yz_extractor, get_map, 1}). --define(GET_MAP_MFA, {yz_extractor, get_map, 0}). --define(GET_MAP_READTHROUGH_MFA, {yz_extractor, get_map_read_through, 0}). --define(YZ_META_EXTRACTORS, {yokozuna, extractors}). --define(YZ_EXTRACTOR_MAP, yokozuna_extractor_map). --define(NEW_EXTRACTOR, {"application/httpheader", yz_noop_extractor}). --define(DEFAULT_MAP, [{default, yz_noop_extractor}, - {"application/json",yz_json_extractor}, - {"application/riak_counter", yz_dt_extractor}, - {"application/riak_map", yz_dt_extractor}, - {"application/riak_set", yz_dt_extractor}, - {"application/xml",yz_xml_extractor}, - {"text/plain",yz_text_extractor}, - {"text/xml",yz_xml_extractor} - ]). --define(EXTRACTMAPEXPECT, lists:sort(?DEFAULT_MAP ++ [?NEW_EXTRACTOR])). --define(SEQMAX, 20). --define(CFG, - [ - {yokozuna, - [ - {enabled, true} - ]} - ]). - -confirm() -> - %% This test explicitly requires an upgrade from 2.0.5 to test a - %% new capability - OldVsn = "2.0.5", - - [_, Node|_] = Cluster = rt:build_cluster(lists:duplicate(4, {OldVsn, ?CFG})), - rt:wait_for_cluster_service(Cluster, yokozuna), - - [rt:assert_capability(ANode, ?YZ_CAP, {unknown_capability, ?YZ_CAP}) || ANode <- Cluster], - - OldPid = rt:pbc(Node), - - %% Generate keys, YZ only supports UTF-8 compatible keys - GenKeys = [<> || N <- lists:seq(1, ?SEQMAX), - not lists:any( - fun(E) -> E > 127 end, - binary_to_list(<>))], - KeyCount = length(GenKeys), - - rt:count_calls(Cluster, [?GET_MAP_RING_MFA, ?GET_MAP_MFA]), - - yokozuna_rt:write_data(Cluster, OldPid, ?INDEX1, ?BUCKET1, GenKeys), - %% wait for solr soft commit - timer:sleep(1100), - - {ok, BProps} = riakc_pb_socket:get_bucket(OldPid, ?BUCKET1), - N = proplists:get_value(n_val, BProps), - - ok = rt:stop_tracing(), - PrevGetMapRingCC = rt:get_call_count(Cluster, ?GET_MAP_RING_MFA), - PrevGetMapCC = rt:get_call_count(Cluster, ?GET_MAP_MFA), - ?assertEqual(KeyCount * N, PrevGetMapRingCC), - ?assertEqual(KeyCount * N, PrevGetMapCC), - - %% test query count - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX1, KeyCount), - riakc_pb_socket:stop(OldPid), - - {RingVal1, MDVal1} = get_ring_and_cmd_vals(Node, ?YZ_META_EXTRACTORS, - ?YZ_EXTRACTOR_MAP), - - ?assertEqual(undefined, MDVal1), - %% In previous version, Ring only gets map metadata if a non-default - %% extractor is registered - ?assertEqual(undefined, RingVal1), - - ?assertEqual(?DEFAULT_MAP, get_map(Node)), - - %% Custom Register - ExtractMap = register_extractor(Node, element(1, ?NEW_EXTRACTOR), - element(2, ?NEW_EXTRACTOR)), - - ?assertEqual(?EXTRACTMAPEXPECT, ExtractMap), - - %% Upgrade - yokozuna_rt:rolling_upgrade(Cluster, current), - - [rt:assert_capability(ANode, ?YZ_CAP, true) || ANode <- Cluster], - [rt:assert_supported(rt:capability(ANode, all), ?YZ_CAP, [true, false]) || ANode <- Cluster], - - %% test query count again - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX1, KeyCount), - - rt:count_calls(Cluster, [?GET_MAP_RING_MFA, ?GET_MAP_MFA, - ?GET_MAP_READTHROUGH_MFA]), - - Pid = rt:pbc(Node), - yokozuna_rt:write_data(Cluster, Pid, ?INDEX2, ?BUCKET2, GenKeys), - %% wait for solr soft commit - timer:sleep(1100), - - ok = rt:stop_tracing(), - CurrGetMapRingCC = rt:get_call_count(Cluster, ?GET_MAP_RING_MFA), - CurrGetMapCC = rt:get_call_count(Cluster, ?GET_MAP_MFA), - CurrGetMapRTCC = rt:get_call_count(Cluster, ?GET_MAP_READTHROUGH_MFA), - - lager:info("Number of calls to get the map from the ring - current: ~p~n, previous: ~p~n", - [CurrGetMapRingCC, PrevGetMapRingCC]), - ?assert(CurrGetMapRingCC < PrevGetMapRingCC), - lager:info("Number of calls to get the map - current: ~p~n, previous: ~p~n", - [CurrGetMapCC, PrevGetMapCC]), - ?assert(CurrGetMapCC =< PrevGetMapCC), - lager:info("Number of calls to get_map_read_through/0: ~p~n, Number of calls to get_map/0: ~p~n", - [CurrGetMapRTCC, CurrGetMapCC]), - ?assert(CurrGetMapRTCC < CurrGetMapCC), - - riakc_pb_socket:stop(Pid), - - {_RingVal2, MDVal2} = get_ring_and_cmd_vals(Node, ?YZ_META_EXTRACTORS, - ?YZ_EXTRACTOR_MAP), - - ?assertEqual(?EXTRACTMAPEXPECT, MDVal2), - ?assertEqual(?EXTRACTMAPEXPECT, get_map(Node)), - - rt_intercept:add(Node, {yz_noop_extractor, - [{{extract, 1}, extract_httpheader}]}), - rt_intercept:wait_until_loaded(Node), - - ExpectedExtraction = [{method,'GET'}, - {host,<<"www.google.com">>}, - {uri,<<"/">>}], - ?assertEqual(ExpectedExtraction, - verify_extractor(Node, - <<"GET http://www.google.com HTTP/1.1\n">>, - element(2, ?NEW_EXTRACTOR))), - - pass. - -%%%=================================================================== -%%% Private -%%%=================================================================== - -get_ring_and_cmd_vals(Node, Prefix, Key) -> - Ring = rpc:call(Node, yz_misc, get_ring, [transformed]), - MDVal = metadata_get(Node, Prefix, Key), - RingVal = ring_meta_get(Node, Key, Ring), - {RingVal, MDVal}. - -metadata_get(Node, Prefix, Key) -> - rpc:call(Node, riak_core_metadata, get, [Prefix, Key, []]). - -ring_meta_get(Node, Key, Ring) -> - rpc:call(Node, riak_core_ring, get_meta, [Key, Ring]). - -register_extractor(Node, MimeType, Mod) -> - rpc:call(Node, yz_extractor, register, [MimeType, Mod]). - -get_map(Node) -> - rpc:call(Node, yz_extractor, get_map, []). - -verify_extractor(Node, PacketData, Mod) -> - rpc:call(Node, yz_extractor, run, [PacketData, Mod]). diff --git a/tests/yz_handoff.erl b/tests/yz_handoff.erl deleted file mode 100644 index 0c4ac95cd..000000000 --- a/tests/yz_handoff.erl +++ /dev/null @@ -1,208 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2014 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%%------------------------------------------------------------------- --module(yz_handoff). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(GET(K,L), proplists:get_value(K, L)). --define(FMT(S, Args), lists:flatten(io_lib:format(S, Args))). --define(INDEX, <<"test_idx">>). --define(BUCKET, <<"test_bkt">>). --define(NUMRUNSTATES, 1). --define(SEQMAX, 1000). --define(TESTCYCLE, 20). --define(N, 3). --define(CFG, - [ - {riak_core, - [ - {ring_creation_size, 16}, - {n_val, ?N}, - {handoff_concurrency, 10}, - {vnode_management_timer, 1000} - ]}, - {riak_kv, - [ - %% allow AAE to build trees and exchange rapidly - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 8}, - {handoff_rejected_max, infinity} - ]}, - {yokozuna, - [ - {anti_entropy_tick, 1000}, - {enabled, true} - ]} - ]). - --record(trial_state, { - solr_url_before, - solr_url_after, - leave_node, - join_node, - admin_node}). - -confirm() -> - %% Setup cluster initially - [Node1, Node2, _Node3, _Node4, _Node5] = Nodes = rt:build_cluster(5, ?CFG), - - rt:wait_for_cluster_service(Nodes, yokozuna), - - ConnInfo = ?GET(Node2, rt:connection_info([Node2])), - {Host, Port} = ?GET(http, ConnInfo), - Shards = [{N, node_solr_port(N)} || N <- Nodes], - - %% Generate keys, YZ only supports UTF-8 compatible keys - Keys = [<> || N <- lists:seq(1, ?SEQMAX), - not lists:any(fun(E) -> E > 127 end, - binary_to_list(<>))], - KeyCount = length(Keys), - - Pid = rt:pbc(Node2), - yokozuna_rt:write_data(Nodes, Pid, ?INDEX, ?BUCKET, Keys), - timer:sleep(1100), - - %% Separate out shards for multiple runs - [Shard1|Shards2Rest] = Shards, - {_, SolrPort1} = Shard1, - [{_, SolrPort2}|_] = Shards2Rest, - SolrURL = internal_solr_url(Host, SolrPort1, ?INDEX, Shards), - BucketURL = bucket_keys_url(Host, Port, ?BUCKET), - SearchURL = search_url(Host, Port, ?INDEX), - - lager:info("Verify Replicas Count = (3 * docs/keys) count"), - verify_count(SolrURL, (KeyCount * ?N)), - - States = [#trial_state{solr_url_before = SolrURL, - solr_url_after = internal_solr_url(Host, SolrPort2, ?INDEX, Shards2Rest), - leave_node = Node1}, - #trial_state{solr_url_before = internal_solr_url(Host, SolrPort2, ?INDEX, Shards2Rest), - solr_url_after = SolrURL, - join_node = Node1, - admin_node = Node2}], - - %% Run Shell Script to count/test # of replicas and leave/join - %% nodes from the cluster - [[begin - check_data(Nodes, KeyCount, BucketURL, SearchURL, State), - check_counts(Pid, KeyCount, BucketURL) - end || State <- States] - || _ <- lists:seq(1,?NUMRUNSTATES)], - - pass. - -%%%=================================================================== -%%% Private -%%%=================================================================== - -node_solr_port(Node) -> - {ok, P} = riak_core_util:safe_rpc(Node, application, get_env, - [yokozuna, solr_port]), - P. - -internal_solr_url(Host, Port, Index) -> - ?FMT("http://~s:~B/internal_solr/~s", [Host, Port, Index]). -internal_solr_url(Host, Port, Index, Shards) -> - Ss = [internal_solr_url(Host, ShardPort, Index) - || {_, ShardPort} <- Shards], - ?FMT("http://~s:~B/internal_solr/~s/select?wt=json&q=*:*&shards=~s", - [Host, Port, Index, string:join(Ss, ",")]). - -%% @private -bucket_keys_url(Host, Port, BName) -> - ?FMT("http://~s:~B/buckets/~s/keys?keys=true", [Host, Port, BName]). - -%% @private -search_url(Host, Port, Index) -> - ?FMT("http://~s:~B/solr/~s/select?wt=json&q=*:*", [Host, Port, Index]). - -verify_count(Url, ExpectedCount) -> - AreUp = - fun() -> - {ok, "200", _, DBody} = ibrowse:send_req(Url, [], get, []), - FoundCount = get_count(DBody), - lager:info("FoundCount: ~b, ExpectedCount: ~b", - [FoundCount, ExpectedCount]), - ExpectedCount =:= FoundCount - end, - ?assertEqual(ok, rt:wait_until(AreUp)), - ok. - -get_count(Resp) -> - Struct = mochijson2:decode(Resp), - kvc:path([<<"response">>, <<"numFound">>], Struct). - -get_keys_count(BucketURL) -> - {ok, "200", _, RBody} = ibrowse:send_req(BucketURL, [], get, []), - Struct = mochijson2:decode(RBody), - length(kvc:path([<<"keys">>], Struct)). - -check_counts(Pid, InitKeyCount, BucketURL) -> - PBCounts = [begin {ok, Resp} = riakc_pb_socket:search( - Pid, ?INDEX, <<"*:*">>), - Resp#search_results.num_found - end || _ <- lists:seq(1,?TESTCYCLE)], - HTTPCounts = [begin {ok, "200", _, RBody} = ibrowse:send_req( - BucketURL, [], get, []), - Struct = mochijson2:decode(RBody), - length(kvc:path([<<"keys">>], Struct)) - end || _ <- lists:seq(1,?TESTCYCLE)], - MinPBCount = lists:min(PBCounts), - MinHTTPCount = lists:min(HTTPCounts), - lager:info("Before-Node-Leave PB: ~b, After-Node-Leave PB: ~b", - [InitKeyCount, MinPBCount]), - ?assertEqual(InitKeyCount, MinPBCount), - lager:info("Before-Node-Leave PB: ~b, After-Node-Leave HTTP: ~b", - [InitKeyCount, MinHTTPCount]), - ?assertEqual(InitKeyCount, MinHTTPCount). - -check_data(Cluster, KeyCount, BucketURL, SearchURL, S) -> - CheckCount = KeyCount * ?N, - KeysBefore = get_keys_count(BucketURL), - - UpdatedCluster = leave_or_join(Cluster, S), - - yokozuna_rt:wait_for_aae(UpdatedCluster), - - KeysAfter = get_keys_count(BucketURL), - lager:info("KeysBefore: ~b, KeysAfter: ~b", [KeysBefore, KeysAfter]), - ?assertEqual(KeysBefore, KeysAfter), - - lager:info("Verify Search Docs Count =:= key count"), - lager:info("Run Search URL: ~s", [SearchURL]), - verify_count(SearchURL, KeysAfter), - lager:info("Verify Replicas Count = (3 * docs/keys) count"), - lager:info("Run Search URL: ~s", [S#trial_state.solr_url_after]), - verify_count(S#trial_state.solr_url_after, CheckCount). - -leave_or_join(Cluster, S=#trial_state{join_node=undefined}) -> - Node = S#trial_state.leave_node, - rt:leave(Node), - ?assertEqual(ok, rt:wait_until_unpingable(Node)), - Cluster -- [Node]; -leave_or_join(Cluster, S=#trial_state{leave_node=undefined}) -> - Node = S#trial_state.join_node, - NodeAdmin = S#trial_state.admin_node, - ok = rt:start_and_wait(Node), - ok = rt:join(Node, NodeAdmin), - ?assertEqual(ok, rt:wait_until_nodes_ready(Cluster)), - ?assertEqual(ok, rt:wait_until_no_pending_changes(Cluster)), - Cluster ++ [Node]. diff --git a/tests/yz_schema_change_reset.erl b/tests/yz_schema_change_reset.erl deleted file mode 100644 index 9980ff69b..000000000 --- a/tests/yz_schema_change_reset.erl +++ /dev/null @@ -1,315 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%%-------------------------------------------------------------------- --module(yz_schema_change_reset). --compile(export_all). --include_lib("eunit/include/eunit.hrl"). --include_lib("riakc/include/riakc.hrl"). - --define(GET(K,L), proplists:get_value(K, L)). --define(INDEX, <<"test_schema_change_reset">>). --define(TYPE, <<"test_schema_change">>). --define(BUCKET1, <<"test_schema_change_reset">>). --define(BUCKET2, {?TYPE, <<"test_schema_change_reset_2">>}). --define(SCHEMANAME, <<"test">>). - --define(TEST_SCHEMA, -<<" - - - - - - - - - - - - - - - - -_yz_id - - - - - - - - - - - - - - - - - - - - -">>). --define(TEST_SCHEMA_UPDATE, -<<" - - - - - - - - - - - - - - - - - - - -_yz_id - - - - - - - - - - - - - - - - - - - -">>). - --define(SEQMAX, 20). --define(CFG, - [{riak_core, - [ - {ring_creation_size, 16}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_concurrency, 8} - ]}, - {yokozuna, - [ - {anti_entropy_tick, 1000}, - {enabled, true} - ]} - ]). - -confirm() -> - [Node1|_RestNodes] = Cluster = rt:build_cluster(4, ?CFG), - rt:wait_for_cluster_service(Cluster, yokozuna), - - %% Generate keys, YZ only supports UTF-8 compatible keys - GenKeys = [<> || N <- lists:seq(1, ?SEQMAX), - not lists:any( - fun(E) -> E > 127 end, - binary_to_list(<>))], - KeyCount = length(GenKeys), - lager:info("KeyCount ~p", [KeyCount]), - - Pid = rt:pbc(rt:select_random(Cluster)), - - lager:info("Write initial data to index ~p with schema ~p", - [?INDEX, ?SCHEMANAME]), - - yokozuna_rt:write_data(Cluster, Pid, ?INDEX, - {?SCHEMANAME, ?TEST_SCHEMA}, - ?BUCKET1, GenKeys), - timer:sleep(1100), - - lager:info("Create and activate map-based bucket type ~s and tie it to search_index ~s", - [?TYPE, ?INDEX]), - rt:create_and_activate_bucket_type(Node1, ?TYPE, [{datatype, map}, - {search_index, ?INDEX}]), - - lager:info("Write and check age at integer per original schema"), - - NewObj1A = riakc_obj:new(?BUCKET1, <<"keyA">>, - <<"{\"age\":26}">>, - "application/json"), - - NewObj1B = riakc_obj:new(?BUCKET1, <<"keyB">>, - <<"{\"age\":99}">>, - "application/json"), - - {ok, _ObjA} = riakc_pb_socket:put(Pid, NewObj1A, [return_head]), - timer:sleep(1100), - {ok, _ObjB} = riakc_pb_socket:put(Pid, NewObj1B, [return_head]), - timer:sleep(1100), - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount + 2), - - assert_search(Pid, Cluster, <<"age:26">>, {<<"age">>, <<"26">>}, []), - assert_search(Pid, Cluster, <<"age:99">>, {<<"age">>, <<"99">>}, []), - - Map1 = riakc_map:update( - {<<"0_foo">>, register}, - fun(R) -> - riakc_register:set(<<"44ab">>, R) - end, riakc_map:new()), - ok = riakc_pb_socket:update_type( - Pid, - ?BUCKET2, - <<"keyMap1">>, - riakc_map:to_op(Map1)), - - {ok, Map2} = riakc_pb_socket:fetch_type(Pid, ?BUCKET2, <<"keyMap1">>), - Map3 = riakc_map:update( - {<<"1_baz">>, counter}, - fun(R) -> - riakc_counter:increment(10, R) - end, Map2), - ok = riakc_pb_socket:update_type( - Pid, - ?BUCKET2, - <<"keyMap1">>, - riakc_map:to_op(Map3)), - - timer:sleep(1100), - assert_search(Pid, Cluster, <<"0_foo_register:44ab">>, {<<"0_foo_register">>, - <<"44ab">>}, []), - - lager:info("Expire and re-check count before updating schema"), - - yokozuna_rt:expire_trees(Cluster), - yokozuna_rt:wait_for_aae(Cluster), - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount + 3), - - lager:info("Overwrite schema with updated schema"), - override_schema(Pid, Cluster, ?INDEX, ?SCHEMANAME, ?TEST_SCHEMA_UPDATE), - - lager:info("Write and check hello_i at integer per schema update"), - - NewObj2 = riakc_obj:new(?BUCKET1, <<"key2">>, - <<"{\"hello_i\":36}">>, - "application/json"), - - {ok, _Obj2} = riakc_pb_socket:put(Pid, NewObj2, [return_head]), - timer:sleep(1100), - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount + 4), - assert_search(Pid, Cluster, <<"hello_i:36">>, {<<"hello_i">>, <<"36">>}, []), - - lager:info("Write and check age at string per schema update"), - - NewObj3 = riakc_obj:new(?BUCKET1, <<"key3">>, - <<"{\"age\":\"3jlkjkl\"}">>, - "application/json"), - - {ok, _Obj3} = riakc_pb_socket:put(Pid, NewObj3, [return_head]), - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount + 5), - assert_search(Pid, Cluster, <<"age:3jlkjkl">>, - {<<"age">>, <<"3jlkjkl">>}, []), - - lager:info("Expire and re-check count to make sure we're correctly indexed - by the new schema"), - - yokozuna_rt:expire_trees(Cluster), - yokozuna_rt:wait_for_aae(Cluster), - - yokozuna_rt:verify_num_found_query(Cluster, ?INDEX, KeyCount + 5), - - HP = rt:select_random(yokozuna_rt:host_entries(rt:connection_info(Cluster))), - yokozuna_rt:search_expect(HP, ?INDEX, <<"age">>, <<"*">>, 2), - - lager:info("Re-Put because AAE won't find a diff even though the types - have changed, as it only compares based on bkey currently. - Also, this re-put will work as we have a default bucket (type) - with allow_mult=false... no siblings"), - - {ok, _Obj4} = riakc_pb_socket:put(Pid, NewObj1A, [return_head]), - timer:sleep(1100), - - assert_search(Pid, Cluster, <<"age:26">>, {<<"age">>, <<"26">>}, []), - - lager:info("Re-Put Map data by dec/inc counter to account for *change* and - allow previously unindexed counter to be searchable"), - - {ok, Map4} = riakc_pb_socket:fetch_type(Pid, ?BUCKET2, <<"keyMap1">>), - Map5 = riakc_map:update( - {<<"1_baz">>, counter}, - fun(R) -> - riakc_counter:decrement(0, R), - riakc_counter:increment(0, R) - end, Map4), - ok = riakc_pb_socket:update_type( - Pid, - ?BUCKET2, - <<"keyMap1">>, - riakc_map:to_op(Map5)), - - timer:sleep(1100), - assert_search(Pid, Cluster, <<"0_foo_register:44ab">>, {<<"0_foo_register">>, - <<"44ab">>}, []), - assert_search(Pid, Cluster, <<"1_baz_counter:10">>, {<<"1_baz_counter">>, - <<"10">>}, []), - - lager:info("Test nested json searches w/ unsearched fields ignored"), - - NewObj5 = riakc_obj:new(?BUCKET1, <<"key4">>, - <<"{\"quip\":\"blashj3\", - \"paths\":{\"quip\":\"88\"}}">>, - "application/json"), - {ok, _Obj5} = riakc_pb_socket:put(Pid, NewObj5, [return_head]), - - timer:sleep(1100), - assert_search(Pid, Cluster, <<"paths.quip:88">>, - {<<"paths.quip">>, <<"88">>}, []), - - riakc_pb_socket:stop(Pid), - - pass. - -override_schema(Pid, Cluster, Index, Schema, RawUpdate) -> - ok = riakc_pb_socket:create_search_schema(Pid, Schema, RawUpdate), - yokozuna_rt:wait_for_schema(Cluster, Schema, RawUpdate), - [Node|_] = Cluster, - {ok, _} = rpc:call(Node, yz_index, reload, [Index]). - -assert_search(Pid, Cluster, Search, SearchExpect, Params) -> - F = fun(_) -> - lager:info("Searching ~p and asserting it exists", - [SearchExpect]), - {ok,{search_results,[{_Index,Fields}], _Score, Found}} = - riakc_pb_socket:search(Pid, ?INDEX, Search, Params), - ?assert(lists:member(SearchExpect, Fields)), - case Found of - 1 -> true; - 0 -> false - end - end, - rt:wait_until(Cluster, F). From 232e1580a3a27fd5fb2c7e92afde31715c8fde7d Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Mon, 7 Sep 2015 16:39:17 -0700 Subject: [PATCH 10/13] Remove pb dependencies. --- rebar.config | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/rebar.config b/rebar.config index e1dca17d4..f8c9aa355 100644 --- a/rebar.config +++ b/rebar.config @@ -16,14 +16,11 @@ {lager, "(2.0|2.1|2.2).*", {git, "git://github.com/basho/lager.git", {tag, "2.2.0"}}}, {getopt, ".*", {git, "git://github.com/jcomellas/getopt", {tag, "v0.4"}}}, {meck, ".*", {git, "git://github.com/basho/meck.git", {tag, "0.8.2"}}}, - {mapred_verify, ".*", {git, "git://github.com/basho/mapred_verify", {branch, "master"}}}, - {riakc, ".*", {git, "git://github.com/basho/riak-erlang-client", {branch, "master"}}}, - {riakhttpc, ".*", {git, "git://github.com/basho/riak-erlang-http-client", {branch, "master"}}}, {kvc, "1.3.0", {git, "https://github.com/etrepum/kvc", {tag, "v1.3.0"}}}, {druuid, ".*", {git, "git://github.com/kellymclaughlin/druuid.git", {tag, "0.2"}}} ]}. -{escript_incl_apps, [goldrush, lager, getopt, riakhttpc, riakc, ibrowse, mochiweb, kvc]}. +{escript_incl_apps, [goldrush, lager, getopt, kvc]}. {escript_emu_args, "%%! -escript main riak_test_escript +K true +P 10000 -env ERL_MAX_PORTS 10000\n"}. {plugin_dir, "src"}. {plugins, [rebar_riak_test_plugin]}. From 5e0451d51ddacf57f3b5ad15cd38dd1ed72e6b46 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sun, 4 Oct 2015 15:25:09 -0700 Subject: [PATCH 11/13] Remove YZ file. --- src/yokozuna_rt.erl | 362 -------------------------------------------- 1 file changed, 362 deletions(-) delete mode 100644 src/yokozuna_rt.erl diff --git a/src/yokozuna_rt.erl b/src/yokozuna_rt.erl deleted file mode 100644 index e7a17c123..000000000 --- a/src/yokozuna_rt.erl +++ /dev/null @@ -1,362 +0,0 @@ -%% ------------------------------------------------------------------- -%% -%% Copyright (c) 2015 Basho Technologies, Inc. -%% -%% This file is provided to you under the Apache License, -%% Version 2.0 (the "License"); you may not use this file -%% except in compliance with the License. You may obtain -%% a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, -%% software distributed under the License is distributed on an -%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -%% KIND, either express or implied. See the License for the -%% specific language governing permissions and limitations -%% under the License. -%% -%%------------------------------------------------------------------- --module(yokozuna_rt). - --include_lib("eunit/include/eunit.hrl"). --include("yokozuna_rt.hrl"). - --export([check_exists/2, - expire_trees/1, - host_entries/1, - remove_index_dirs/2, - rolling_upgrade/2, - rolling_upgrade/3, - search/4, - search/5, - search_expect/5, - search_expect/6, - search_expect/7, - verify_num_found_query/3, - wait_for_aae/1, - wait_for_full_exchange_round/2, - wait_for_index/2, - wait_for_schema/2, - wait_for_schema/3, - write_data/5, - write_data/6]). - --type host() :: string(). --type portnum() :: integer(). --type count() :: non_neg_integer(). --type json_string() :: atom | string() | binary(). - --define(FMT(S, Args), lists:flatten(io_lib:format(S, Args))). - --spec host_entries(rt:conn_info()) -> [{host(), portnum()}]. -host_entries(ClusterConnInfo) -> - [riak_http(I) || {_,I} <- ClusterConnInfo]. - -%% @doc Write `Keys' via the PB inteface to a `Bucket' and have them -%% searchable in an `Index'. --spec write_data([node()], pid(), index_name(), bucket(), [binary()]) -> ok. -write_data(Cluster, Pid, Index, Bucket, Keys) -> - riakc_pb_socket:set_options(Pid, [queue_if_disconnected]), - - create_and_set_index(Cluster, Pid, Index, Bucket), - timer:sleep(1000), - - %% Write keys - lager:info("Writing ~p keys", [length(Keys)]), - [ok = rt:pbc_write(Pid, Bucket, Key, Key, "text/plain") || Key <- Keys], - ok. - --spec write_data([node()], pid(), index_name(), {schema_name(), raw_schema()}, - bucket(), [binary()]) -> ok. -write_data(Cluster, Pid, Index, {SchemaName, SchemaData}, - Bucket, Keys) -> - riakc_pb_socket:set_options(Pid, [queue_if_disconnected]), - - riakc_pb_socket:create_search_schema(Pid, SchemaName, SchemaData), - - create_and_set_index(Cluster, Pid, Bucket, Index, SchemaName), - timer:sleep(1000), - - %% Write keys - lager:info("Writing ~p keys", [length(Keys)]), - [ok = rt:pbc_write(Pid, Bucket, Key, Key, "text/plain") || Key <- Keys], - ok. - -%% @doc Peform a rolling upgrade of the `Cluster' to a different `Version' based -%% on current | previous | legacy. --spec rolling_upgrade([node()], current | previous | legacy) -> ok. -rolling_upgrade(Cluster, Vsn) -> - rolling_upgrade(Cluster, Vsn, []). - --spec rolling_upgrade([node()], current | previous | legacy, proplists:proplist()) -> ok. -rolling_upgrade(Cluster, Vsn, YZCfgChanges) -> - lager:info("Perform rolling upgrade on cluster ~p", [Cluster]), - SolrPorts = lists:seq(11000, 11000 + length(Cluster) - 1), - Cluster2 = lists:zip(SolrPorts, Cluster), - [begin - Cfg = [{riak_kv, [{anti_entropy, {on, [debug]}}, - {anti_entropy_concurrency, 8}, - {anti_entropy_build_limit, {100, 1000}} - ]}, - {yokozuna, [{anti_entropy, {on, [debug]}}, - {anti_entropy_concurrency, 8}, - {anti_entropy_build_limit, {100, 1000}}, - {anti_entropy_tick, 1000}, - {enabled, true}, - {solr_port, SolrPort}]}], - MergeC = config_merge(Cfg, YZCfgChanges), - rt:upgrade(Node, Vsn, MergeC), - rt:wait_for_service(Node, riak_kv), - rt:wait_for_service(Node, yokozuna) - end || {SolrPort, Node} <- Cluster2], - ok. - -%% @doc Use AAE status to verify that exchange has occurred for all -%% partitions since the time this function was invoked. --spec wait_for_aae([node()]) -> ok. -wait_for_aae(Cluster) -> - lager:info("Wait for AAE to migrate/repair indexes"), - wait_for_all_trees(Cluster), - wait_for_full_exchange_round(Cluster, erlang:now()), - ok. - -%% @doc Wait for all AAE trees to be built. --spec wait_for_all_trees([node()]) -> ok. -wait_for_all_trees(Cluster) -> - F = fun(Node) -> - lager:info("Check if all trees built for node ~p", [Node]), - Info = rpc:call(Node, yz_kv, compute_tree_info, []), - NotBuilt = [X || {_,undefined}=X <- Info], - NotBuilt == [] - end, - rt:wait_until(Cluster, F), - ok. - -%% @doc Wait for a full exchange round since `Timestamp'. This means -%% that all `{Idx,N}' for all partitions must have exchanged after -%% `Timestamp'. --spec wait_for_full_exchange_round([node()], os:now()) -> ok. -wait_for_full_exchange_round(Cluster, Timestamp) -> - lager:info("wait for full AAE exchange round on cluster ~p", [Cluster]), - MoreRecent = - fun({_Idx, _, undefined, _RepairStats}) -> - false; - ({_Idx, _, AllExchangedTime, _RepairStats}) -> - AllExchangedTime > Timestamp - end, - AllExchanged = - fun(Node) -> - Exchanges = rpc:call(Node, yz_kv, compute_exchange_info, []), - {_Recent, WaitingFor1} = lists:partition(MoreRecent, Exchanges), - WaitingFor2 = [element(1,X) || X <- WaitingFor1], - lager:info("Still waiting for AAE of ~p ~p", [Node, WaitingFor2]), - [] == WaitingFor2 - end, - rt:wait_until(Cluster, AllExchanged), - ok. - -%% @doc Wait for index creation. This is to handle *legacy* versions of yokozuna -%% in upgrade tests --spec wait_for_index(list(), index_name()) -> ok. -wait_for_index(Cluster, Index) -> - IsIndexUp = - fun(Node) -> - lager:info("Waiting for index ~s to be avaiable on node ~p", - [Index, Node]), - rpc:call(Node, yz_solr, ping, [Index]) - end, - [?assertEqual(ok, rt:wait_until(Node, IsIndexUp)) || Node <- Cluster], - ok. - -%% @see wait_for_schema/3 -wait_for_schema(Cluster, Name) -> - wait_for_schema(Cluster, Name, ignore). - -%% @doc Wait for the schema `Name' to be read by all nodes in -%% `Cluster' before returning. If `Content' is binary data when -%% verify the schema bytes exactly match `Content'. --spec wait_for_schema([node()], schema_name(), ignore | raw_schema()) -> ok. -wait_for_schema(Cluster, Name, Content) -> - F = fun(Node) -> - lager:info("Attempt to read schema ~s from node ~p", - [Name, Node]), - {Host, Port} = riak_pb(hd(rt:connection_info([Node]))), - {ok, PBConn} = riakc_pb_socket:start_link(Host, Port), - R = riakc_pb_socket:get_search_schema(PBConn, Name), - riakc_pb_socket:stop(PBConn), - case R of - {ok, PL} -> - case Content of - ignore -> - Name == proplists:get_value(name, PL); - _ -> - (Name == proplists:get_value(name, PL)) and - (Content == proplists:get_value(content, PL)) - end; - _ -> - false - end - end, - rt:wait_until(Cluster, F), - ok. - -%% @doc Expire YZ trees --spec expire_trees([node()]) -> ok. -expire_trees(Cluster) -> - lager:info("Expire all trees"), - _ = [ok = rpc:call(Node, yz_entropy_mgr, expire_trees, []) - || Node <- Cluster], - - %% The expire is async so just give it a moment - timer:sleep(100), - ok. - -%% @doc Remove index directories, removing the index. --spec remove_index_dirs([node()], index_name()) -> ok. -remove_index_dirs(Nodes, IndexName) -> - IndexDirs = [rpc:call(Node, yz_index, index_dir, [IndexName]) || - Node <- Nodes], - lager:info("Remove index dirs: ~p, on nodes: ~p~n", - [IndexDirs, Nodes]), - [rt:stop(ANode) || ANode <- Nodes], - [rt:del_dir(binary_to_list(IndexDir)) || IndexDir <- IndexDirs], - [rt:start(ANode) || ANode <- Nodes], - ok. - -%% @doc Check if index/core exists in metadata, disk via yz_index:exists. --spec check_exists([node()], index_name()) -> ok. -check_exists(Nodes, IndexName) -> - rt:wait_until(Nodes, - fun(N) -> - rpc:call(N, yz_index, exists, [IndexName]) - end). - --spec verify_num_found_query([node()], index_name(), count()) -> ok. -verify_num_found_query(Cluster, Index, ExpectedCount) -> - F = fun(Node) -> - Pid = rt:pbc(Node), - {ok, {_, _, _, NumFound}} = riakc_pb_socket:search(Pid, Index, <<"*:*">>), - lager:info("Check Count, Expected: ~p | Actual: ~p~n", - [ExpectedCount, NumFound]), - ExpectedCount =:= NumFound - end, - rt:wait_until(Cluster, F), - ok. - -search_expect(HP, Index, Name, Term, Expect) -> - search_expect(yokozuna, HP, Index, Name, Term, Expect). - -search_expect(Type, HP, Index, Name, Term, Expect) -> - {ok, "200", _, R} = search(Type, HP, Index, Name, Term), - verify_count_http(Expect, R). - -search_expect(solr, {Host, Port}, Index, Name0, Term0, Shards, Expect) - when is_list(Shards), length(Shards) > 0 -> - Name = quote_unicode(Name0), - Term = quote_unicode(Term0), - URL = internal_solr_url(Host, Port, Index, Name, Term, Shards), - lager:info("Run search ~s", [URL]), - Opts = [{response_format, binary}], - {ok, "200", _, R} = ibrowse:send_req(URL, [], get, [], Opts), - verify_count_http(Expect, R). - -search(HP, Index, Name, Term) -> - search(yokozuna, HP, Index, Name, Term). - -search(Type, {Host, Port}, Index, Name, Term) when is_integer(Port) -> - search(Type, {Host, integer_to_list(Port)}, Index, Name, Term); - -search(Type, {Host, Port}, Index, Name0, Term0) -> - Name = quote_unicode(Name0), - Term = quote_unicode(Term0), - FmtStr = case Type of - solr -> - "http://~s:~s/internal_solr/~s/select?q=~s:~s&wt=json"; - yokozuna -> - "http://~s:~s/search/query/~s?q=~s:~s&wt=json" - end, - URL = ?FMT(FmtStr, [Host, Port, Index, Name, Term]), - lager:info("Run search ~s", [URL]), - Opts = [{response_format, binary}], - ibrowse:send_req(URL, [], get, [], Opts). - -%%%=================================================================== -%%% Private -%%%=================================================================== - --spec verify_count_http(count(), json_string()) -> boolean(). -verify_count_http(Expected, Resp) -> - Count = get_count_http(Resp), - lager:info("Expected: ~p, Actual: ~p", [Expected, Count]), - Expected == Count. - --spec get_count_http(json_string()) -> count(). -get_count_http(Resp) -> - Struct = mochijson2:decode(Resp), - kvc:path([<<"response">>, <<"numFound">>], Struct). - --spec riak_http({node(), rt:interfaces()} | rt:interfaces()) -> - {host(), portnum()}. -riak_http({_Node, ConnInfo}) -> - riak_http(ConnInfo); -riak_http(ConnInfo) -> - proplists:get_value(http, ConnInfo). - --spec riak_pb({node(), rt:interfaces()} | rt:interfaces()) -> - {host(), portnum()}. -riak_pb({_Node, ConnInfo}) -> - riak_pb(ConnInfo); -riak_pb(ConnInfo) -> - proplists:get_value(pb, ConnInfo). - --spec config_merge(proplists:proplist(), proplists:proplist()) -> - orddict:orddict() | proplists:proplist(). -config_merge(DefaultCfg, NewCfg) when NewCfg /= [] -> - orddict:update(yokozuna, - fun(V) -> - orddict:merge(fun(_, _X, Y) -> Y end, - orddict:from_list(V), - orddict:from_list( - orddict:fetch( - yokozuna, NewCfg))) - end, - DefaultCfg); -config_merge(DefaultCfg, _NewCfg) -> - DefaultCfg. - --spec create_and_set_index([node()], pid(), bucket(), index_name()) -> ok. -create_and_set_index(Cluster, Pid, Bucket, Index) -> - %% Create a search index and associate with a bucket - lager:info("Create a search index ~s and associate it with bucket ~s", - [Index, Bucket]), - ok = riakc_pb_socket:create_search_index(Pid, Index), - %% For possible legacy upgrade reasons, wrap create index in a wait - wait_for_index(Cluster, Index), - set_index(Pid, Bucket, Index). --spec create_and_set_index([node()], pid(), bucket(), index_name(), - schema_name()) -> ok. -create_and_set_index(Cluster, Pid, Bucket, Index, Schema) -> - %% Create a search index and associate with a bucket - lager:info("Create a search index ~s with a custom schema named ~s and - associate it with bucket ~s", [Index, Schema, Bucket]), - ok = riakc_pb_socket:create_search_index(Pid, Index, Schema, []), - %% For possible legacy upgrade reasons, wrap create index in a wait - wait_for_index(Cluster, Index), - set_index(Pid, Bucket, Index). - --spec set_index(pid(), bucket(), index_name()) -> ok. -set_index(Pid, Bucket, Index) -> - ok = riakc_pb_socket:set_search_index(Pid, Bucket, Index). - -internal_solr_url(Host, Port, Index) -> - ?FMT("http://~s:~B/internal_solr/~s", [Host, Port, Index]). -internal_solr_url(Host, Port, Index, Name, Term, Shards) -> - Ss = [internal_solr_url(Host, ShardPort, Index) - || {_, ShardPort} <- Shards], - ?FMT("http://~s:~B/internal_solr/~s/select?wt=json&q=~s:~s&shards=~s", - [Host, Port, Index, Name, Term, string:join(Ss, ",")]). - -quote_unicode(Value) -> - mochiweb_util:quote_plus(binary_to_list( - unicode:characters_to_binary(Value))). From b1c28c02918e12a4fe032feb7d728746ddcc6a5b Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sun, 4 Oct 2015 15:30:28 -0700 Subject: [PATCH 12/13] Bump KVC. --- rebar.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config b/rebar.config index f8c9aa355..15f693fbd 100644 --- a/rebar.config +++ b/rebar.config @@ -16,7 +16,7 @@ {lager, "(2.0|2.1|2.2).*", {git, "git://github.com/basho/lager.git", {tag, "2.2.0"}}}, {getopt, ".*", {git, "git://github.com/jcomellas/getopt", {tag, "v0.4"}}}, {meck, ".*", {git, "git://github.com/basho/meck.git", {tag, "0.8.2"}}}, - {kvc, "1.3.0", {git, "https://github.com/etrepum/kvc", {tag, "v1.3.0"}}}, + {kvc, ".*", {git, "https://github.com/etrepum/kvc", {branch, "master"}}}, {druuid, ".*", {git, "git://github.com/kellymclaughlin/druuid.git", {tag, "0.2"}}} ]}. From 259325c52a8af062f57b5076c335f689137a0753 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Sun, 4 Oct 2015 15:38:50 -0700 Subject: [PATCH 13/13] Use time_compat. --- intercepts/riak_core_vnode_master_intercepts.erl | 4 ++-- intercepts/riak_kv_vnode_intercepts.erl | 2 +- rebar.config | 3 ++- src/rt.erl | 8 ++++---- src/rt_local.erl | 10 +++++----- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/intercepts/riak_core_vnode_master_intercepts.erl b/intercepts/riak_core_vnode_master_intercepts.erl index b581ca769..f39d3e405 100644 --- a/intercepts/riak_core_vnode_master_intercepts.erl +++ b/intercepts/riak_core_vnode_master_intercepts.erl @@ -17,7 +17,7 @@ stop_vnode_after_bloom_fold_request_succeeds(IndexNode, Req, Sender, VMaster) -> case (ReqFun == fun riak_repl_aae_source:bloom_fold/3 orelse ReqFun == fun riak_repl_keylist_server:bloom_fold/3) of true -> - random:seed(erlang:now()), + random:seed(time_compat:timestamp()), case random:uniform(10) of 5 -> %% Simulate what happens when a VNode completes handoff between command_returning_vnode @@ -32,4 +32,4 @@ stop_vnode_after_bloom_fold_request_succeeds(IndexNode, Req, Sender, VMaster) -> ?M:command_return_vnode_orig(IndexNode, Req, Sender, VMaster) end; false -> ?M:command_return_vnode_orig(IndexNode, Req, Sender, VMaster) - end. \ No newline at end of file + end. diff --git a/intercepts/riak_kv_vnode_intercepts.erl b/intercepts/riak_kv_vnode_intercepts.erl index ad3d6c893..e9d748f3b 100644 --- a/intercepts/riak_kv_vnode_intercepts.erl +++ b/intercepts/riak_kv_vnode_intercepts.erl @@ -36,7 +36,7 @@ wrong_node(_Partition) -> %% @doc Make all KV vnode coverage commands take abnormally long. slow_handle_coverage(Req, Filter, Sender, State) -> - random:seed(erlang:now()), + random:seed(time_compat:timestamp()), Rand = random:uniform(5000), error_logger:info_msg("coverage sleeping ~p", [Rand]), timer:sleep(Rand), diff --git a/rebar.config b/rebar.config index 15f693fbd..65f86a442 100644 --- a/rebar.config +++ b/rebar.config @@ -17,7 +17,8 @@ {getopt, ".*", {git, "git://github.com/jcomellas/getopt", {tag, "v0.4"}}}, {meck, ".*", {git, "git://github.com/basho/meck.git", {tag, "0.8.2"}}}, {kvc, ".*", {git, "https://github.com/etrepum/kvc", {branch, "master"}}}, - {druuid, ".*", {git, "git://github.com/kellymclaughlin/druuid.git", {tag, "0.2"}}} + {druuid, ".*", {git, "git://github.com/kellymclaughlin/druuid.git", {tag, "0.2"}}}, + {time_compat, ".*", {git, "git://github.com/lasp-lang/time_compat.git", {branch, "master"}}} ]}. {escript_incl_apps, [goldrush, lager, getopt, kvc]}. diff --git a/src/rt.erl b/src/rt.erl index 09d66d796..3c8b96d4f 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -534,18 +534,18 @@ cmd(Cmd, Opts) -> -spec stream_cmd(string()) -> {integer(), string()}. stream_cmd(Cmd) -> Port = open_port({spawn, binary_to_list(iolist_to_binary(Cmd))}, [stream, stderr_to_stdout, exit_status]), - stream_cmd_loop(Port, "", "", now()). + stream_cmd_loop(Port, "", "", time_compat:timestamp()). %% @doc same as rt:stream_cmd/1, but with options, like open_port/2 -spec stream_cmd(string(), string()) -> {integer(), string()}. stream_cmd(Cmd, Opts) -> Port = open_port({spawn, binary_to_list(iolist_to_binary(Cmd))}, [stream, stderr_to_stdout, exit_status] ++ Opts), - stream_cmd_loop(Port, "", "", now()). + stream_cmd_loop(Port, "", "", time_compat:timestamp()). stream_cmd_loop(Port, Buffer, NewLineBuffer, Time={_MegaSecs, Secs, _MicroSecs}) -> receive {Port, {data, Data}} -> - {_, Now, _} = now(), + {_, Now, _} = time_compat:timestamp(), NewNewLineBuffer = case Now > Secs of true -> lager:info(NewLineBuffer), @@ -560,7 +560,7 @@ stream_cmd_loop(Port, Buffer, NewLineBuffer, Time={_MegaSecs, Secs, _MicroSecs}) [ lager:info(Token) || Token <- Tokens ], stream_cmd_loop(Port, Buffer ++ NewNewLineBuffer ++ Data, "", Time); _ -> - stream_cmd_loop(Port, Buffer, NewNewLineBuffer ++ Data, now()) + stream_cmd_loop(Port, Buffer, NewNewLineBuffer ++ Data, time_compat:timestamp()) end; {Port, {exit_status, Status}} -> catch port_close(Port), diff --git a/src/rt_local.erl b/src/rt_local.erl index b8bfaed8e..c4c4def6c 100644 --- a/src/rt_local.erl +++ b/src/rt_local.erl @@ -84,18 +84,18 @@ install_on_absence(Command, InstallCommand) -> -spec stream_cmd(string()) -> {integer(), string()}. stream_cmd(Cmd) -> Port = open_port({spawn, binary_to_list(iolist_to_binary(Cmd))}, [stream, stderr_to_stdout, exit_status]), - stream_cmd_loop(Port, "", "", now()). + stream_cmd_loop(Port, "", "", time_compat:timestamp()). %% @doc same as rt:stream_cmd/1, but with options, like open_port/2 -spec stream_cmd(string(), string()) -> {integer(), string()}. stream_cmd(Cmd, Opts) -> Port = open_port({spawn, binary_to_list(iolist_to_binary(Cmd))}, [stream, stderr_to_stdout, exit_status] ++ Opts), - stream_cmd_loop(Port, "", "", now()). + stream_cmd_loop(Port, "", "", time_compat:timestamp()). stream_cmd_loop(Port, Buffer, NewLineBuffer, Time={_MegaSecs, Secs, _MicroSecs}) -> receive {Port, {data, Data}} -> - {_, Now, _} = now(), + {_, Now, _} = time_compat:timestamp(), NewNewLineBuffer = case Now > Secs of true -> lager:info(NewLineBuffer), @@ -110,11 +110,11 @@ stream_cmd_loop(Port, Buffer, NewLineBuffer, Time={_MegaSecs, Secs, _MicroSecs}) [ lager:info(Token) || Token <- Tokens ], stream_cmd_loop(Port, Buffer ++ NewNewLineBuffer ++ Data, "", Time); _ -> - stream_cmd_loop(Port, Buffer, NewNewLineBuffer ++ Data, now()) + stream_cmd_loop(Port, Buffer, NewNewLineBuffer ++ Data, time_compat:timestamp()) end; {Port, {exit_status, Status}} -> catch port_close(Port), {Status, Buffer} after rt_config:get(rt_max_wait_time) -> {-1, Buffer} - end. \ No newline at end of file + end.