From ddccca4c439c3ac8835a13ea3bd70a64aaf82ce2 Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Fri, 16 Aug 2013 11:43:53 -0700 Subject: [PATCH 01/77] Add yaml for rhel 6.4 in distros. Signed-off-by: Sandon Van Ness --- distros/rhel_6.4.yaml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 distros/rhel_6.4.yaml diff --git a/distros/rhel_6.4.yaml b/distros/rhel_6.4.yaml new file mode 100644 index 000000000..522549583 --- /dev/null +++ b/distros/rhel_6.4.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.4" From 3b08c17bdac9f10e74d6f87d3d3e1e853dfaaeb2 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Fri, 23 Aug 2013 14:51:00 -0500 Subject: [PATCH 02/77] Don't rebuilt virtualimage if NO_CLOBBER is set --- bootstrap | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/bootstrap b/bootstrap index 695180621..d03ad6661 100755 --- a/bootstrap +++ b/bootstrap @@ -13,12 +13,14 @@ if [ -n "$missing" ]; then exit 1 fi -# site packages needed because libvirt python bindings are not nicely -# packaged -virtualenv --system-site-packages --distribute virtualenv +if [ -z "$NO_CLOBBER" ] || [ ! -e ./virtualenv ]; then + # site packages needed because libvirt python bindings are not nicely + # packaged + virtualenv --system-site-packages --distribute virtualenv -# avoid pip bugs -./virtualenv/bin/pip install --upgrade pip + # avoid pip bugs + ./virtualenv/bin/pip install --upgrade pip +fi ./virtualenv/bin/pip install -r requirements.txt From c91009ceb1c51fef119960b8ef965cd22e11be2d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 26 Aug 2013 18:14:37 -0700 Subject: [PATCH 03/77] admin_socket: fix retry-on-error behavior Signed-off-by: Sage Weil (cherry picked from commit 502714ba2ed7bb47ec81a25cd2f1c724583020f9) --- teuthology/task/admin_socket.py | 1 + 1 file changed, 1 insertion(+) diff --git a/teuthology/task/admin_socket.py b/teuthology/task/admin_socket.py index 81d75c723..499525234 100644 --- a/teuthology/task/admin_socket.py +++ b/teuthology/task/admin_socket.py @@ -80,6 +80,7 @@ def _socket_command(ctx, remote, socket_path, command, args): '--admin-daemon', socket_path, ] + command.split(' ') + args, stdout=json_fp, + check_status=False, ) if proc.exitstatus == 0: break From 821b179a1eb76dca0eb1ddf0ef0353a2a391a65c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 29 Aug 2013 20:13:25 -0700 Subject: [PATCH 04/77] mark all existing collections with % --- suites/big/rados-thrash/% | 0 suites/ceph-deploy/fs/% | 0 suites/ceph-deploy/rados/% | 0 suites/ceph-deploy/rbd/% | 0 suites/ceph-deploy/singleton/% | 0 suites/experimental/multimds/% | 0 suites/fs/basic/% | 0 suites/fs/multiclient/% | 0 suites/fs/samba/% | 0 suites/fs/thrash/% | 0 suites/fs/traceless/% | 0 suites/fs/verify/% | 0 suites/hadoop/basic/% | 0 suites/kcephfs/cephfs/% | 0 suites/kcephfs/thrash/% | 0 suites/krbd/rbd-nomount/% | 0 suites/krbd/rbd/% | 0 suites/krbd/singleton/% | 0 suites/krbd/thrash/% | 0 suites/marginal/basic/% | 0 suites/marginal/fs-misc/% | 0 suites/marginal/mds_restart/% | 0 suites/marginal/multimds/% | 0 suites/nfs/basic/% | 0 suites/powercycle/osd/% | 0 suites/rados/basic/% | 0 suites/rados/monthrash/% | 0 suites/rados/multimon/% | 0 suites/rados/singleton-nomsgr/% | 0 suites/rados/singleton/% | 0 suites/rados/thrash/% | 0 suites/rados/verify/% | 0 suites/rbd/basic/% | 0 suites/rbd/librbd/% | 0 suites/rbd/singleton/% | 0 suites/rbd/thrash/% | 0 suites/rgw/multifs/% | 0 suites/rgw/singleton/% | 0 suites/rgw/verify/% | 0 suites/smoke/basic/% | 0 suites/smoke/multiclient/% | 0 suites/smoke/multifs/% | 0 suites/smoke/multimon/% | 0 suites/smoke/singleton/% | 0 suites/smoke/thrash/% | 0 suites/smoke/verify/% | 0 suites/stress/bench/% | 0 suites/stress/thrash/% | 0 suites/upgrade-cuttlefish/fs/% | 0 suites/upgrade-cuttlefish/rados-older/% | 0 suites/upgrade-cuttlefish/rados/% | 0 suites/upgrade-cuttlefish/rbd/% | 0 suites/upgrade-cuttlefish/rgw/% | 0 suites/upgrade-fs/fs/% | 0 suites/upgrade-parallel/fs/% | 0 suites/upgrade-parallel/rados/% | 0 suites/upgrade-parallel/rgw/% | 0 suites/upgrade-parallel/stress-split/% | 0 suites/upgrade/mixed-cluster/% | 0 suites/upgrade/mixed-mons/% | 0 suites/upgrade/rados-double/% | 0 suites/upgrade/rados/% | 0 suites/upgrade/rbd-double/% | 0 suites/upgrade/rbd/% | 0 suites/upgrade/rgw-double/% | 0 suites/upgrade/rgw/% | 0 suites/upgrade/singleton/% | 0 67 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 suites/big/rados-thrash/% create mode 100644 suites/ceph-deploy/fs/% create mode 100644 suites/ceph-deploy/rados/% create mode 100644 suites/ceph-deploy/rbd/% create mode 100644 suites/ceph-deploy/singleton/% create mode 100644 suites/experimental/multimds/% create mode 100644 suites/fs/basic/% create mode 100644 suites/fs/multiclient/% create mode 100644 suites/fs/samba/% create mode 100644 suites/fs/thrash/% create mode 100644 suites/fs/traceless/% create mode 100644 suites/fs/verify/% create mode 100644 suites/hadoop/basic/% create mode 100644 suites/kcephfs/cephfs/% create mode 100644 suites/kcephfs/thrash/% create mode 100644 suites/krbd/rbd-nomount/% create mode 100644 suites/krbd/rbd/% create mode 100644 suites/krbd/singleton/% create mode 100644 suites/krbd/thrash/% create mode 100644 suites/marginal/basic/% create mode 100644 suites/marginal/fs-misc/% create mode 100644 suites/marginal/mds_restart/% create mode 100644 suites/marginal/multimds/% create mode 100644 suites/nfs/basic/% create mode 100644 suites/powercycle/osd/% create mode 100644 suites/rados/basic/% create mode 100644 suites/rados/monthrash/% create mode 100644 suites/rados/multimon/% create mode 100644 suites/rados/singleton-nomsgr/% create mode 100644 suites/rados/singleton/% create mode 100644 suites/rados/thrash/% create mode 100644 suites/rados/verify/% create mode 100644 suites/rbd/basic/% create mode 100644 suites/rbd/librbd/% create mode 100644 suites/rbd/singleton/% create mode 100644 suites/rbd/thrash/% create mode 100644 suites/rgw/multifs/% create mode 100644 suites/rgw/singleton/% create mode 100644 suites/rgw/verify/% create mode 100644 suites/smoke/basic/% create mode 100644 suites/smoke/multiclient/% create mode 100644 suites/smoke/multifs/% create mode 100644 suites/smoke/multimon/% create mode 100644 suites/smoke/singleton/% create mode 100644 suites/smoke/thrash/% create mode 100644 suites/smoke/verify/% create mode 100644 suites/stress/bench/% create mode 100644 suites/stress/thrash/% create mode 100644 suites/upgrade-cuttlefish/fs/% create mode 100644 suites/upgrade-cuttlefish/rados-older/% create mode 100644 suites/upgrade-cuttlefish/rados/% create mode 100644 suites/upgrade-cuttlefish/rbd/% create mode 100644 suites/upgrade-cuttlefish/rgw/% create mode 100644 suites/upgrade-fs/fs/% create mode 100644 suites/upgrade-parallel/fs/% create mode 100644 suites/upgrade-parallel/rados/% create mode 100644 suites/upgrade-parallel/rgw/% create mode 100644 suites/upgrade-parallel/stress-split/% create mode 100644 suites/upgrade/mixed-cluster/% create mode 100644 suites/upgrade/mixed-mons/% create mode 100644 suites/upgrade/rados-double/% create mode 100644 suites/upgrade/rados/% create mode 100644 suites/upgrade/rbd-double/% create mode 100644 suites/upgrade/rbd/% create mode 100644 suites/upgrade/rgw-double/% create mode 100644 suites/upgrade/rgw/% create mode 100644 suites/upgrade/singleton/% diff --git a/suites/big/rados-thrash/% b/suites/big/rados-thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/ceph-deploy/fs/% b/suites/ceph-deploy/fs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/ceph-deploy/rados/% b/suites/ceph-deploy/rados/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/ceph-deploy/rbd/% b/suites/ceph-deploy/rbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/ceph-deploy/singleton/% b/suites/ceph-deploy/singleton/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/experimental/multimds/% b/suites/experimental/multimds/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/fs/basic/% b/suites/fs/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/fs/multiclient/% b/suites/fs/multiclient/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/fs/samba/% b/suites/fs/samba/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/fs/thrash/% b/suites/fs/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/fs/traceless/% b/suites/fs/traceless/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/fs/verify/% b/suites/fs/verify/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/hadoop/basic/% b/suites/hadoop/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/kcephfs/cephfs/% b/suites/kcephfs/cephfs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/kcephfs/thrash/% b/suites/kcephfs/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/krbd/rbd-nomount/% b/suites/krbd/rbd-nomount/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/krbd/rbd/% b/suites/krbd/rbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/krbd/singleton/% b/suites/krbd/singleton/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/krbd/thrash/% b/suites/krbd/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/marginal/basic/% b/suites/marginal/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/marginal/fs-misc/% b/suites/marginal/fs-misc/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/marginal/mds_restart/% b/suites/marginal/mds_restart/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/marginal/multimds/% b/suites/marginal/multimds/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/nfs/basic/% b/suites/nfs/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/powercycle/osd/% b/suites/powercycle/osd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/basic/% b/suites/rados/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/monthrash/% b/suites/rados/monthrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/multimon/% b/suites/rados/multimon/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/singleton-nomsgr/% b/suites/rados/singleton-nomsgr/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/singleton/% b/suites/rados/singleton/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/thrash/% b/suites/rados/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rados/verify/% b/suites/rados/verify/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rbd/basic/% b/suites/rbd/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rbd/librbd/% b/suites/rbd/librbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rbd/singleton/% b/suites/rbd/singleton/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rbd/thrash/% b/suites/rbd/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rgw/multifs/% b/suites/rgw/multifs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rgw/singleton/% b/suites/rgw/singleton/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/rgw/verify/% b/suites/rgw/verify/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/basic/% b/suites/smoke/basic/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/multiclient/% b/suites/smoke/multiclient/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/multifs/% b/suites/smoke/multifs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/multimon/% b/suites/smoke/multimon/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/singleton/% b/suites/smoke/singleton/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/thrash/% b/suites/smoke/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/smoke/verify/% b/suites/smoke/verify/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/stress/bench/% b/suites/stress/bench/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/stress/thrash/% b/suites/stress/thrash/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-cuttlefish/fs/% b/suites/upgrade-cuttlefish/fs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-cuttlefish/rados-older/% b/suites/upgrade-cuttlefish/rados-older/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-cuttlefish/rados/% b/suites/upgrade-cuttlefish/rados/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-cuttlefish/rbd/% b/suites/upgrade-cuttlefish/rbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-cuttlefish/rgw/% b/suites/upgrade-cuttlefish/rgw/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-fs/fs/% b/suites/upgrade-fs/fs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-parallel/fs/% b/suites/upgrade-parallel/fs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-parallel/rados/% b/suites/upgrade-parallel/rados/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-parallel/rgw/% b/suites/upgrade-parallel/rgw/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade-parallel/stress-split/% b/suites/upgrade-parallel/stress-split/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/mixed-cluster/% b/suites/upgrade/mixed-cluster/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/mixed-mons/% b/suites/upgrade/mixed-mons/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/rados-double/% b/suites/upgrade/rados-double/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/rados/% b/suites/upgrade/rados/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/rbd-double/% b/suites/upgrade/rbd-double/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/rbd/% b/suites/upgrade/rbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/rgw-double/% b/suites/upgrade/rgw-double/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/rgw/% b/suites/upgrade/rgw/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/singleton/% b/suites/upgrade/singleton/% new file mode 100644 index 000000000..e69de29bb From 1be63e89ae75aac2c9ea7a620b0ea1b20a638ad2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 24 Aug 2013 14:09:53 -0700 Subject: [PATCH 05/77] ceph_manager: wait for dump_ops_in_flight on osd revival Wait for a command that implies a complete startup instead of 'version' (which does not). Fixes: #5924 Signed-off-by: Sage Weil (cherry picked from commit 5ec5e2c0e2e0cb4e25071999a05d9ac6294c3797) --- teuthology/task/ceph_manager.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index 09a61e731..cff36ede3 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -916,7 +916,13 @@ def revive_osd(self, osd, timeout=75): ceph_task.make_admin_daemon_dir(self.ctx, remote) self.ctx.daemons.get_daemon('osd', osd).reset() self.ctx.daemons.get_daemon('osd', osd).restart() - self.wait_run_admin_socket(osd, timeout=timeout) + # wait for dump_ops_in_flight; this command doesn't appear + # until after the signal handler is installed and it is safe + # to stop the osd again without making valgrind leak checks + # unhappy. see #5924. + self.wait_run_admin_socket(osd, + args=['dump_ops_in_flight'], + timeout=timeout) def mark_down_osd(self, osd): self.raw_cluster_cmd('osd', 'down', str(osd)) From 83906ac4dfdb7007687511655e2852a2f5c190a4 Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Thu, 5 Sep 2013 11:58:27 -0700 Subject: [PATCH 06/77] Run yum clean all after installing new ceph-release. In order to get around the issue of repomd.xml being older than the previously installed one depending on when the gitbuilder built the repo. Signed-off-by: Sandon Van Ness --- teuthology/task/install.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/teuthology/task/install.py b/teuthology/task/install.py index 3964951ad..05c58ae2d 100644 --- a/teuthology/task/install.py +++ b/teuthology/task/install.py @@ -745,6 +745,11 @@ def _upgrade_rpm_packages(ctx, config, remote, pkgs, branch): _run_and_log_error_if_fails(remote, args) _yum_fix_repo_priority(remote, project) + remote.run( + args=[ + 'sudo', 'yum', 'clean', 'all', + ]) + # Build a space-separated string consisting of $PKG-$VER for yum pkgs_with_vers = ["%s-%s" % (pkg, version) for pkg in pkgs] From 38db951e51af0f6f80a481dd597ebb3a4d2bc209 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 10 Sep 2013 10:53:41 -0700 Subject: [PATCH 07/77] remove basedir/testdir distinction We should never run with a conflicting testdir in the basedir, and the code to do this is confusing and buggy. Go back to a single testdir and simple checks. Signed-off-by: Sage Weil (cherry picked from commit 5acc57f5ad6ed14b3e1c89166d2571cf302a467c) Conflicts: README.rst teuthology/misc.py --- README.rst | 21 ++++------- teuthology/misc.py | 72 +++++++------------------------------ teuthology/nuke.py | 4 +-- teuthology/task/internal.py | 50 +++----------------------- 4 files changed, 26 insertions(+), 121 deletions(-) diff --git a/README.rst b/README.rst index 91bbb55c8..f2d6e3cbc 100644 --- a/README.rst +++ b/README.rst @@ -303,24 +303,15 @@ this issue. Test Sandbox Directory ====================== -Teuthology currently places most test files and mount points in a sandbox -directory, defaulting to ``/tmp/cephtest/{rundir}``. The ``{rundir}`` is the -name of the run (as given by ``--name``) or if no name is specified, -``user@host-timestamp`` is used. To change the location of the sandbox -directory, the following options can be specified in -``$HOME/.teuthology.yaml``:: - - base_test_dir: - -The ``base_test_dir`` option will set the base directory to use for the individual -run directories. If not specified, this defaults to: ``/tmp/cephtest``. +Teuthology currently places most test files and mount points in a +sandbox directory, defaulting to ``/home/$USER/cephtest``. To change +the location of the sandbox directory, the following option can be +specified in ``$HOME/.teuthology.yaml``:: test_path: -The ``test_path`` option will set the complete path to use for the test directory. -This allows for the old behavior, where ``/tmp/cephtest`` was used as the sandbox -directory. - +======= +>>>>>>> 5acc57f... remove basedir/testdir distinction VIRTUAL MACHINE SUPPORT ======================= diff --git a/teuthology/misc.py b/teuthology/misc.py index 1a0eee7e4..cf531e6a5 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -21,8 +21,6 @@ import datetime stamp = datetime.datetime.now().strftime("%y%m%d%H%M") -global_jobid = None -checked_jobid = False is_vm = lambda x: x.startswith('vpm') or x.startswith('ubuntu@vpm') is_arm = lambda x: x.startswith('tala') or x.startswith('ubuntu@tala') or x.startswith('saya') or x.startswith('ubuntu@saya') @@ -30,66 +28,22 @@ def get_testdir(ctx): if 'test_path' in ctx.teuthology_config: return ctx.teuthology_config['test_path'] + test_user = get_test_user(ctx) + # FIXME this ideally should use os.path.expanduser() in the future, in case + # $HOME isn't /home/$USER - e.g. on a Mac. However, since we're executing + # this on the server side, it won't work properly. + return ctx.teuthology_config.get('test_path', '/home/%s/cephtest' % test_user) - basedir = ctx.teuthology_config.get('base_test_dir', '/home/ubuntu/cephtest') - - global global_jobid - global checked_jobid +def get_test_user(ctx): + """ + :returns: str -- the user to run tests as on remote hosts + """ + return ctx.teuthology_config.get('test_user', 'ubuntu') - # check if a jobid exists in the machine status for all our targets - # and if its the same jobid, use that as the subdir for the test - if not checked_jobid and ctx.config.get('check-locks') != False: - jobids = {} - for machine in ctx.config['targets'].iterkeys(): - status = lockstatus.get_status(ctx, machine) - if status is None or 'description' not in status or status['description'] is None: - continue - jid = status['description'].split('/')[-1] - if jid is None or jid == 'None': - continue - jobids[jid] = 1 - if len(jobids) > 1: - break - if len(jobids) == 1: - # same job id on all machines, use that as the test subdir - (jobid,) = jobids.iterkeys() - if jobid is not None: - global_jobid = jobid - log.debug('setting my jobid to {jid}'.format(jid=global_jobid)) - checked_jobid = True - - # the subdir is chosen using the priority: - # 1. jobid chosen by the teuthology beanstalk queue - # 2. run name specified by teuthology schedule - # 3. user@timestamp - if global_jobid is not None: - log.debug('with jobid basedir: {b}'.format(b=global_jobid)) - return '{basedir}/{jobid}'.format( - basedir=basedir, - jobid=global_jobid, - ) - elif hasattr(ctx, 'name') and ctx.name: - log.debug('with name basedir: {b}'.format(b=basedir)) - # we need a short string to keep the path short - import re - m = re.match(r"(.*)-(.*)-(.*)-(.*)_(.*)-(.*)-(.*)-(.*)-(.*)", ctx.name) - (u, y, m, d, hms, s, c, k, f) = m.groups() - short = u[0:2] + y[2:4] + m[0:2] + d[0:2] + hms[0:2] + hms[3:5] + s[0] + c[0] + k[0] + f[0] - return '{basedir}/{rundir}'.format( - basedir=basedir, - rundir=short, - ) - else: - log.debug('basedir: {b}'.format(b=basedir)) - return '{basedir}/{user}{stamp}'.format( - basedir=basedir, - user=get_user()[0:2], - stamp=stamp) -def get_testdir_base(ctx): - if 'test_path' in ctx.teuthology_config: - return ctx.teuthology_config['test_path'] - return ctx.teuthology_config.get('base_test_dir', '/home/ubuntu/cephtest') +def get_archive_dir(ctx): + test_dir = get_testdir(ctx) + return os.path.normpath(os.path.join(test_dir, 'archive')) def get_ceph_binary_url(package=None, branch=None, tag=None, sha1=None, dist=None, diff --git a/teuthology/nuke.py b/teuthology/nuke.py index 82389f99d..0dfffb84a 100644 --- a/teuthology/nuke.py +++ b/teuthology/nuke.py @@ -263,13 +263,13 @@ def remove_installed_packages(ctx, log): install_task.purge_data(ctx) def remove_testing_tree(ctx, log): - from teuthology.misc import get_testdir_base + from teuthology.misc import get_testdir from .orchestra import run nodes = {} for remote in ctx.cluster.remotes.iterkeys(): proc = remote.run( args=[ - 'sudo', 'rm', '-rf', get_testdir_base(ctx), + 'sudo', 'rm', '-rf', get_testdir(ctx), # just for old time's sake run.Raw('&&'), 'sudo', 'rm', '-rf', '/tmp/cephtest', diff --git a/teuthology/task/internal.py b/teuthology/task/internal.py index 755af8f52..c7dff23ee 100644 --- a/teuthology/task/internal.py +++ b/teuthology/task/internal.py @@ -18,31 +18,17 @@ @contextlib.contextmanager def base(ctx, config): - log.info('Creating base directory...') - test_basedir = teuthology.get_testdir_base(ctx) + log.info('Creating test directory...') testdir = teuthology.get_testdir(ctx) - # make base dir if it doesn't exist run.wait( ctx.cluster.run( args=[ - 'mkdir', '-m0755', '-p', '--', - test_basedir, - ], - wait=False, - ) - ) - # only create testdir if its not set to basedir - if test_basedir != testdir: - run.wait( - ctx.cluster.run( - args=[ - 'mkdir', '-m0755', '--', - testdir, + 'mkdir', '-m0755', '--', + testdir, ], - wait=False, - ) + wait=False, ) - + ) try: yield finally: @@ -235,32 +221,6 @@ def check_ceph_data(ctx, config): def check_conflict(ctx, config): log.info('Checking for old test directory...') - test_basedir = teuthology.get_testdir_base(ctx) - processes = ctx.cluster.run( - args=[ - 'test', '!', '-e', test_basedir, - ], - wait=False, - ) - for proc in processes: - assert isinstance(proc.exitstatus, gevent.event.AsyncResult) - try: - proc.exitstatus.get() - except run.CommandFailedError: - # base dir exists - r = proc.remote.run( - args=[ - 'ls', test_basedir, run.Raw('|'), 'wc', '-l' - ], - stdout=StringIO(), - ) - - if int(r.stdout.getvalue()) > 0: - log.error('WARNING: Host %s has stale test directories, these need to be investigated and cleaned up!', - proc.remote.shortname) - - # testdir might be the same as base dir (if test_path is set) - # need to bail out in that case if the testdir exists testdir = teuthology.get_testdir(ctx) processes = ctx.cluster.run( args=[ From 7990f90839500e13b35e0ff406ce5663696e79f5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 6 Sep 2013 14:05:29 -0700 Subject: [PATCH 08/77] misc: valgrind --num-callers=50 Default is 12, which isn't quite enough. Signed-off-by: Sage Weil (cherry picked from commit 2214fe1845208f931260ed3014350fc0c932c054) Conflicts: teuthology/misc.py --- teuthology/misc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/teuthology/misc.py b/teuthology/misc.py index cf531e6a5..51ddaa88f 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -808,6 +808,7 @@ def get_valgrind_args(testdir, name, v): '{tdir}/chdir-coredump'.format(tdir=testdir), 'valgrind', '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir), + '--num-callers=50', '--xml=yes', '--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name) ] From 65d1062d1e2ae2d202bd9cb6deb2e7532bf706e9 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 26 Sep 2013 13:37:20 -0700 Subject: [PATCH 09/77] valgrind: suppress inet_ntop noise See #6240 Signed-off-by: Sage Weil (cherry picked from commit 2acceef69905173a008a6da075e675b3cb579391) --- teuthology/task/valgrind.supp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index c10c2d471..22836d88a 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -52,6 +52,20 @@ fun:__strptime_internal ... } +{ + inet_ntop does something lame on local stack + Memcheck:Value8 + ... + fun:inet_ntop + ... +} +{ + inet_ntop does something lame on local stack + Memcheck:Addr8 + ... + fun:inet_ntop + ... +} { dl-lookup.c thing .. Invalid write of size 8 Memcheck:Value8 From 820d7bdfd47ad9599fdac54217edd6ec316bb912 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Sun, 6 Oct 2013 12:45:39 -0700 Subject: [PATCH 10/77] rbd: pin cram tests to cuttlefish version Signed-off-by: Josh Durgin --- suites/rbd/singleton/all/formatted-output.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/suites/rbd/singleton/all/formatted-output.yaml b/suites/rbd/singleton/all/formatted-output.yaml index accd97272..1733afde3 100644 --- a/suites/rbd/singleton/all/formatted-output.yaml +++ b/suites/rbd/singleton/all/formatted-output.yaml @@ -6,4 +6,4 @@ tasks: - cram: clients: client.0: - - https://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/cli-integration/rbd/formatted-output.t + - https://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/cli-integration/rbd/formatted-output.t;hb=c7a0477bad6bfbec4ef325295ca0489ec1977926 From 8ebc3e06a711b3915c5787e74e67ea1a40647c9f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 18 Oct 2013 16:26:35 -0700 Subject: [PATCH 11/77] valgrind: fix libleveldb suppression on dynamically linked leveldb (The function names don't show up here) Signed-off-by: Sage Weil (cherry picked from commit c8ec9feca1db0f95a1f5d933bfe362e400593a65) --- teuthology/task/valgrind.supp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index 22836d88a..04d40940d 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -6,12 +6,19 @@ ... } { - ignore all leveldb leaks + ignore all static leveldb leaks Memcheck:Leak ... fun:*leveldb* ... } +{ + ignore all dynamic libleveldb leaks + Memcheck:Leak + ... + obj:*libleveldb.so* + ... +} { ignore libcurl leaks Memcheck:Leak From b3fdd29a88fab7e9849d1a5a78578eee01f644c5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 22 Oct 2013 20:42:21 -0700 Subject: [PATCH 12/77] filestore_idempotent: pull dumpling version of scripts Signed-off-by: Sage Weil --- teuthology/task/filestore_idempotent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/teuthology/task/filestore_idempotent.py b/teuthology/task/filestore_idempotent.py index fea790a76..2a97e2368 100644 --- a/teuthology/task/filestore_idempotent.py +++ b/teuthology/task/filestore_idempotent.py @@ -42,10 +42,10 @@ def task(ctx, config): 'cd', dir, run.Raw('&&'), 'wget','-q', '-Orun_seed_to.sh', - 'http://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/filestore/run_seed_to.sh;hb=HEAD', + 'http://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/filestore/run_seed_to.sh;hb=dumpling', run.Raw('&&'), 'wget','-q', '-Orun_seed_to_range.sh', - 'http://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/filestore/run_seed_to_range.sh;hb=HEAD', + 'http://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/filestore/run_seed_to_range.sh;hb=dumpling', run.Raw('&&'), 'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh', ]); From 75c546b870816c3794502eed49ff473c8fb4b06a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 24 Oct 2013 15:21:27 -0700 Subject: [PATCH 13/77] rbd/singletone/all/formatted-output: make url end in .t otherwise cram.py complains Signed-off-by: Sage Weil --- suites/rbd/singleton/all/formatted-output.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/suites/rbd/singleton/all/formatted-output.yaml b/suites/rbd/singleton/all/formatted-output.yaml index 1733afde3..a120f185a 100644 --- a/suites/rbd/singleton/all/formatted-output.yaml +++ b/suites/rbd/singleton/all/formatted-output.yaml @@ -6,4 +6,4 @@ tasks: - cram: clients: client.0: - - https://ceph.com/git/?p=ceph.git;a=blob_plain;f=src/test/cli-integration/rbd/formatted-output.t;hb=c7a0477bad6bfbec4ef325295ca0489ec1977926 + - https://ceph.com/git/?p=ceph.git;a=blob_plain;;hb=c7a0477bad6bfbec4ef325295ca0489ec197792;f=src/test/cli-integration/rbd/formatted-output.t From 9c2cf0f0855a8a802568bc635a959d60a6816a8f Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Fri, 25 Oct 2013 13:14:21 -0700 Subject: [PATCH 14/77] Use worker httpd instead of prefork (like ubuntu) on rpm distros. Ubuntu's default apache uses worker instead of prefork like rpm based distro's. If rpm use httpd.worker instead of httpd so that the -X behavior will not be blocked by a single request. Signed-off-by: Sandon Van Ness --- teuthology/task/rgw.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/teuthology/task/rgw.py b/teuthology/task/rgw.py index 47d589d61..42f4689ce 100644 --- a/teuthology/task/rgw.py +++ b/teuthology/task/rgw.py @@ -220,7 +220,7 @@ def start_apache(ctx, config): if system_type == 'deb': apache_name = 'apache2' else: - apache_name = '/usr/sbin/httpd' + apache_name = '/usr/sbin/httpd.worker' proc = remote.run( args=[ '{tdir}/adjust-ulimits'.format(tdir=testdir), From 9de76953f9fb3dbb139115cb18972ae21db1c342 Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Tue, 19 Nov 2013 16:26:11 -0800 Subject: [PATCH 15/77] Don't run QEMU tests on ARM arch. Kernel doesn't support KVM and it doesn't make much sense to do these tests on ARM in the first place... Signed-off-by: Sandon Van Ness --- suites/rbd/librbd/workloads/qemu_bonnie.yaml | 1 + suites/rbd/librbd/workloads/qemu_fsstress.yaml | 1 + suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled | 1 + suites/rbd/librbd/workloads/qemu_tiobench.yaml | 1 + suites/rbd/librbd/workloads/qemu_xfstests.yaml | 1 + 5 files changed, 5 insertions(+) diff --git a/suites/rbd/librbd/workloads/qemu_bonnie.yaml b/suites/rbd/librbd/workloads/qemu_bonnie.yaml index 2d42828e7..45368ee94 100644 --- a/suites/rbd/librbd/workloads/qemu_bonnie.yaml +++ b/suites/rbd/librbd/workloads/qemu_bonnie.yaml @@ -2,3 +2,4 @@ tasks: - qemu: all: test: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/bonnie.sh +exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_fsstress.yaml b/suites/rbd/librbd/workloads/qemu_fsstress.yaml index 4603a79c8..9ca3e1760 100644 --- a/suites/rbd/librbd/workloads/qemu_fsstress.yaml +++ b/suites/rbd/librbd/workloads/qemu_fsstress.yaml @@ -2,3 +2,4 @@ tasks: - qemu: all: test: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/fsstress.sh +exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled b/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled index c4aefde86..dfd41818a 100644 --- a/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled +++ b/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled @@ -3,3 +3,4 @@ tasks: all: test: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/iozone.sh image_size: 20480 +exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_tiobench.yaml b/suites/rbd/librbd/workloads/qemu_tiobench.yaml index 84c2b3652..3f17df3e6 100644 --- a/suites/rbd/librbd/workloads/qemu_tiobench.yaml +++ b/suites/rbd/librbd/workloads/qemu_tiobench.yaml @@ -2,3 +2,4 @@ tasks: - qemu: all: test: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/tiobench.sh +exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_xfstests.yaml b/suites/rbd/librbd/workloads/qemu_xfstests.yaml index 4abe7c624..fa0fe22b5 100644 --- a/suites/rbd/librbd/workloads/qemu_xfstests.yaml +++ b/suites/rbd/librbd/workloads/qemu_xfstests.yaml @@ -4,3 +4,4 @@ tasks: type: block num_rbd: 2 test: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/run_xfstests_qemu.sh +exclude_arch: armv7l From dfbf8ee695f9bad851f152e2d1ecaf20dc64578d Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Wed, 4 Dec 2013 11:26:20 -0800 Subject: [PATCH 16/77] Set pg warn min per osd to 5. Not set in all branches of teuthology. Signed-off-by: Sandon Van Ness --- teuthology/ceph.conf.template | 2 ++ 1 file changed, 2 insertions(+) diff --git a/teuthology/ceph.conf.template b/teuthology/ceph.conf.template index d57292d14..c69f8a198 100644 --- a/teuthology/ceph.conf.template +++ b/teuthology/ceph.conf.template @@ -12,6 +12,8 @@ ms die on old message = true + mon pg warn min per osd = 5 + [osd] osd journal size = 100 From 4cca76d0f37fcfe3eb93ec20368ec00b69dca097 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Dec 2013 21:08:20 -0800 Subject: [PATCH 17/77] admin_socket: add missing import time Signed-off-by: Sage Weil --- teuthology/task/admin_socket.py | 1 + 1 file changed, 1 insertion(+) diff --git a/teuthology/task/admin_socket.py b/teuthology/task/admin_socket.py index 499525234..01c873c84 100644 --- a/teuthology/task/admin_socket.py +++ b/teuthology/task/admin_socket.py @@ -3,6 +3,7 @@ import json import logging import os +import time from ..orchestra import run from teuthology import misc as teuthology From 3ad67d3dcbda5ebe404330706dfceb35be64ab91 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 23 Dec 2013 19:54:11 +0200 Subject: [PATCH 18/77] rbd: bump the default scratch size for xfstests to 10G autobuild-ceph.git commit 53db7a34aba5 had silently changed the default elevator from cfq to deadline, which made xfstests 167 very unhappy. It looks like with deadline and noop elevators it requires a ~6G scratch partition. Bump the default scratch image size to 10G. Signed-off-by: Ilya Dryomov (cherry picked from commit d781348fd5d319897787d4d43c3af6105b6aa988) Conflicts: teuthology/task/rbd.py --- teuthology/task/rbd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/teuthology/task/rbd.py b/teuthology/task/rbd.py index d107c6f47..2a21e294a 100644 --- a/teuthology/task/rbd.py +++ b/teuthology/task/rbd.py @@ -496,10 +496,10 @@ def xfstests(ctx, config): properties = {} test_image = properties.get('test_image', 'test_image.{role}'.format(role=role)) - test_size = properties.get('test_size', 1200) + test_size = properties.get('test_size', 2000) # 2G test_fmt = properties.get('test_format', 1) scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role)) - scratch_size = properties.get('scratch_size', 1200) + scratch_size = properties.get('scratch_size', 10000) # 10G scratch_fmt = properties.get('scratch_format', 1) images_config[role] = dict( From 43e11bbfe88ff3aaca0bedb08be533463b38aa9f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 21 Dec 2013 22:21:49 -0800 Subject: [PATCH 19/77] valgrind.supp: ignore libnss3 leaks These just started popping up when I updated the notcmalloc gitbuilder, probably because of an updated libnss version. Whitelist it! Signed-off-by: Sage Weil (cherry picked from commit 9a17bb5134ae7072089f780cd975761e61c708db) --- teuthology/task/valgrind.supp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index 04d40940d..1da125920 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -38,6 +38,13 @@ fun:OS_LibInit fun:FCGX_Init } +{ + ignore libnss3 leaks + Memcheck:Leak + ... + obj:*libnss3* + ... +} { strptime suckage Memcheck:Cond From 9b7e793df23b23971cac8549851814f2979cea3b Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 10 Jan 2014 11:00:55 -0800 Subject: [PATCH 20/77] thrashosds: change min_in from 2 -> 3 See #7171. In rare cases CRUSH can't handle it when only 2/6 of the OSDs are marked in. Avoid those situations for now. Signed-off-by: Sage Weil (cherry picked from commit 495f2163a8debe6323b1b67737a47a0b31172f07) --- teuthology/task/ceph_manager.py | 2 +- teuthology/task/thrashosds.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index cff36ede3..4c904eeef 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -24,7 +24,7 @@ def __init__(self, manager, config, logger=None): if self.config.get('powercycle'): self.revive_timeout += 120 self.clean_wait = self.config.get('clean_wait', 0) - self.minin = self.config.get("min_in", 2) + self.minin = self.config.get("min_in", 3) num_osds = self.in_osds + self.out_osds self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds diff --git a/teuthology/task/thrashosds.py b/teuthology/task/thrashosds.py index 1702dbdfd..98c7ab251 100644 --- a/teuthology/task/thrashosds.py +++ b/teuthology/task/thrashosds.py @@ -19,7 +19,7 @@ def task(ctx, config): The config is optional, and is a dict containing some or all of: - min_in: (default 2) the minimum number of OSDs to keep in the + min_in: (default 3) the minimum number of OSDs to keep in the cluster min_out: (default 0) the minimum number of OSDs to keep out of the From 886adcc27965ad996c91c3d7395665446adc4143 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 5 Feb 2014 15:18:49 -0800 Subject: [PATCH 21/77] valgrind: sync up suppressions with latest master Signed-off-by: Sage Weil --- teuthology/task/valgrind.supp | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index 1da125920..7ede033e2 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -1,3 +1,23 @@ +{ + tcmalloc: msync heap allocation points to uninit bytes + Memcheck:Param + msync(start) + obj:/lib/x86_64-linux-gnu/libpthread-2.15.so + obj:/usr/lib/libunwind.so.7.0.0 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm + obj:/usr/lib/libtcmalloc.so.0.1.0 +} +{ + tcmalloc: string + Memcheck:Leak + ... + obj:*tcmalloc* + fun:call_init.part.0 + ... +} { ceph global: deliberate onexit leak Memcheck:Leak @@ -6,21 +26,21 @@ ... } { - ignore all static leveldb leaks + libleveldb: ignore all static leveldb leaks Memcheck:Leak ... fun:*leveldb* ... } { - ignore all dynamic libleveldb leaks + libleveldb: ignore all dynamic libleveldb leaks Memcheck:Leak ... obj:*libleveldb.so* ... } { - ignore libcurl leaks + libcurl: ignore libcurl leaks Memcheck:Leak ... fun:*curl_global_init From 4b3c6d49c62862ac104498e717f1cfcd47534959 Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Tue, 11 Mar 2014 18:15:12 -0700 Subject: [PATCH 22/77] Handle newer btrfstools. Newer btfs userland tools needs a -f like xfs instead of a prompt for yes. Trusty needs this change. Signed-off-by: Sandon Van Ness --- teuthology/task/ceph.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index 4db72943f..f8836badc 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -581,7 +581,17 @@ def cluster(ctx, config): ], stdout=StringIO(), ) - remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + + try: + remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + except run.CommandFailedError: + # Newer btfs-tools doesn't prompt for overwrite, use -f + if '-f' not in mount_options: + mkfs_options.append('-f') + mkfs = ['mkfs.%s' % fs] + mkfs_options + log.info('%s on %s on %s' % (mkfs, dev, remote)) + remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + log.info('mount %s on %s -o %s' % (dev, remote, ','.join(mount_options))) remote.run( From 8a8cd1098127805217d69f2c55fe94777e67a9bd Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 11 Feb 2014 09:44:17 -0800 Subject: [PATCH 23/77] don't use notcmalloc flavor for valgrind We now have the suppressions to avoid this. Yay! Signed-off-by: Sage Weil Conflicts: suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml --- suites/fs/verify/validater/valgrind.yaml | 3 --- suites/rados/verify/validater/valgrind.yaml | 3 --- suites/rgw/verify/tasks/rgw_s3tests.yaml | 2 -- suites/rgw/verify/tasks/rgw_swift.yaml | 2 -- suites/rgw/verify/validater/valgrind.yaml | 3 --- suites/smoke/verify/tasks/rgw_s3tests.yaml | 2 -- suites/smoke/verify/validater/valgrind.yaml | 3 --- 7 files changed, 18 deletions(-) diff --git a/suites/fs/verify/validater/valgrind.yaml b/suites/fs/verify/validater/valgrind.yaml index c3d3aed48..106932169 100644 --- a/suites/fs/verify/validater/valgrind.yaml +++ b/suites/fs/verify/validater/valgrind.yaml @@ -1,7 +1,4 @@ overrides: - install: - ceph: - flavor: notcmalloc ceph: valgrind: mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/suites/rados/verify/validater/valgrind.yaml b/suites/rados/verify/validater/valgrind.yaml index 7b8f7a286..518d72b0f 100644 --- a/suites/rados/verify/validater/valgrind.yaml +++ b/suites/rados/verify/validater/valgrind.yaml @@ -1,7 +1,4 @@ overrides: - install: - ceph: - flavor: notcmalloc ceph: valgrind: mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/suites/rgw/verify/tasks/rgw_s3tests.yaml b/suites/rgw/verify/tasks/rgw_s3tests.yaml index cb84246f0..6ff4e8c34 100644 --- a/suites/rgw/verify/tasks/rgw_s3tests.yaml +++ b/suites/rgw/verify/tasks/rgw_s3tests.yaml @@ -1,7 +1,5 @@ tasks: - install: - ceph: - flavor: notcmalloc - ceph: - rgw: client.0: diff --git a/suites/rgw/verify/tasks/rgw_swift.yaml b/suites/rgw/verify/tasks/rgw_swift.yaml index 4dbbb2497..47727cacf 100644 --- a/suites/rgw/verify/tasks/rgw_swift.yaml +++ b/suites/rgw/verify/tasks/rgw_swift.yaml @@ -1,7 +1,5 @@ tasks: - install: - ceph: - flavor: notcmalloc - ceph: - rgw: client.0: diff --git a/suites/rgw/verify/validater/valgrind.yaml b/suites/rgw/verify/validater/valgrind.yaml index 7b8f7a286..518d72b0f 100644 --- a/suites/rgw/verify/validater/valgrind.yaml +++ b/suites/rgw/verify/validater/valgrind.yaml @@ -1,7 +1,4 @@ overrides: - install: - ceph: - flavor: notcmalloc ceph: valgrind: mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/suites/smoke/verify/tasks/rgw_s3tests.yaml b/suites/smoke/verify/tasks/rgw_s3tests.yaml index cb84246f0..6ff4e8c34 100644 --- a/suites/smoke/verify/tasks/rgw_s3tests.yaml +++ b/suites/smoke/verify/tasks/rgw_s3tests.yaml @@ -1,7 +1,5 @@ tasks: - install: - ceph: - flavor: notcmalloc - ceph: - rgw: client.0: diff --git a/suites/smoke/verify/validater/valgrind.yaml b/suites/smoke/verify/validater/valgrind.yaml index 7b8f7a286..518d72b0f 100644 --- a/suites/smoke/verify/validater/valgrind.yaml +++ b/suites/smoke/verify/validater/valgrind.yaml @@ -1,7 +1,4 @@ overrides: - install: - ceph: - flavor: notcmalloc ceph: valgrind: mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] From 6f4d10072213995233e18b354e0f50b2d8672477 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 29 Mar 2014 09:38:31 -0700 Subject: [PATCH 24/77] valgrind.supp: be less picky about library versions ...so that this works on trusty Signed-off-by: Sage Weil (cherry picked from commit f895d16c9e2fd59aab446254e53480cdb91092a1) --- teuthology/task/valgrind.supp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index 7ede033e2..d634334ae 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -2,13 +2,13 @@ tcmalloc: msync heap allocation points to uninit bytes Memcheck:Param msync(start) - obj:/lib/x86_64-linux-gnu/libpthread-2.15.so - obj:/usr/lib/libunwind.so.7.0.0 + obj:/lib/x86_64-linux-gnu/libpthread-* + obj:/usr/lib/libunwind.so.* fun:_ULx86_64_step fun:_Z13GetStackTracePPvii fun:_ZN8tcmalloc8PageHeap8GrowHeapEm fun:_ZN8tcmalloc8PageHeap3NewEm - obj:/usr/lib/libtcmalloc.so.0.1.0 + obj:/usr/lib/libtcmalloc.so.* } { tcmalloc: string From aeb2e214aee269bb6f042f7ea1127c4407379410 Mon Sep 17 00:00:00 2001 From: Sandon Van Ness Date: Tue, 22 Apr 2014 12:35:11 -0700 Subject: [PATCH 25/77] Fix for #8115 Increase boot disk size per #8115 where monitors shut down due to / being full on vm machines. Signed-off-by: Sandon Van Ness --- teuthology/lock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/teuthology/lock.py b/teuthology/lock.py index b54a9bf96..b31194190 100644 --- a/teuthology/lock.py +++ b/teuthology/lock.py @@ -579,7 +579,7 @@ def create_if_vm(ctx, machine_name): distroversion = default_os_version[distro] file_info = {} - file_info['disk-size'] = lcnfg.get('disk-size', '30G') + file_info['disk-size'] = lcnfg.get('disk-size', '100G') file_info['ram'] = lcnfg.get('ram', '1.9G') file_info['cpus'] = lcnfg.get('cpus', 1) file_info['networks'] = lcnfg.get('networks', From 090b67d9fd619d5906e44f2e63890ab95ef3398d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 28 Apr 2014 14:04:28 -0700 Subject: [PATCH 26/77] Revert "valgrind.supp: be less picky about library versions" This reverts commit f895d16c9e2fd59aab446254e53480cdb91092a1. (cherry picked from commit bab84d45abdaac101a517eb85540c706e3f58363) --- teuthology/task/valgrind.supp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index d634334ae..7ede033e2 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -2,13 +2,13 @@ tcmalloc: msync heap allocation points to uninit bytes Memcheck:Param msync(start) - obj:/lib/x86_64-linux-gnu/libpthread-* - obj:/usr/lib/libunwind.so.* + obj:/lib/x86_64-linux-gnu/libpthread-2.15.so + obj:/usr/lib/libunwind.so.7.0.0 fun:_ULx86_64_step fun:_Z13GetStackTracePPvii fun:_ZN8tcmalloc8PageHeap8GrowHeapEm fun:_ZN8tcmalloc8PageHeap3NewEm - obj:/usr/lib/libtcmalloc.so.* + obj:/usr/lib/libtcmalloc.so.0.1.0 } { tcmalloc: string From aa16ff2b53b778c7066af323e7c221e31c3317bf Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 28 Apr 2014 15:56:57 -0700 Subject: [PATCH 27/77] valgrind: fix tcmalloc suppression for trusty Fixes: #8225 Signed-off-by: Sage Weil (cherry picked from commit f261687f292df47b7a5296814480713c3c3d306f) --- teuthology/task/valgrind.supp | 36 ++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/teuthology/task/valgrind.supp b/teuthology/task/valgrind.supp index 7ede033e2..b497be238 100644 --- a/teuthology/task/valgrind.supp +++ b/teuthology/task/valgrind.supp @@ -1,5 +1,5 @@ { - tcmalloc: msync heap allocation points to uninit bytes + tcmalloc: msync heap allocation points to uninit bytes (precise) Memcheck:Param msync(start) obj:/lib/x86_64-linux-gnu/libpthread-2.15.so @@ -10,6 +10,40 @@ fun:_ZN8tcmalloc8PageHeap3NewEm obj:/usr/lib/libtcmalloc.so.0.1.0 } +{ + tcmalloc: msync heap allocation points to uninit bytes (trusty) + Memcheck:Param + msync(start) + obj:/lib/x86_64-linux-gnu/libpthread-2.19.so + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm + fun:_ZN8tcmalloc15CentralFreeList8PopulateEv + fun:_ZN8tcmalloc15CentralFreeList18FetchFromSpansSafeEv + fun:_ZN8tcmalloc15CentralFreeList11RemoveRangeEPPvS2_i +} +{ + tcmalloc: msync heap allocation points to uninit bytes 2 (trusty) + Memcheck:Param + msync(start) + fun:__msync_nocancel + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm + fun:_ZN8tcmalloc15CentralFreeList8PopulateEv + fun:_ZN8tcmalloc15CentralFreeList18FetchFromSpansSafeEv + fun:_ZN8tcmalloc15CentralFreeList11RemoveRangeEPPvS2_i +} { tcmalloc: string Memcheck:Leak From 6eec6f17032a1b358a5ddd8e654b1524296ea102 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Thu, 1 May 2014 11:21:08 -0500 Subject: [PATCH 28/77] Add suite name to job config Signed-off-by: Zack Cerza (cherry picked from commit ab9645f97bcefc43006728b4f7d17322fcc33d82) --- schedule_suite.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/schedule_suite.sh b/schedule_suite.sh index ca620556c..9509af327 100755 --- a/schedule_suite.sh +++ b/schedule_suite.sh @@ -71,6 +71,7 @@ $kernelvalue nuke-on-error: true machine_type: $mtype os_type: $distro +suite: $nicesuite tasks: - chef: - clock.check: From f34ebd54349d3ecd6ffd3a598be92a2db02bcc97 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Thu, 1 May 2014 11:48:54 -0500 Subject: [PATCH 29/77] Add branch name to job config Signed-off-by: Zack Cerza (cherry picked from commit d1b93530b30e1a9baefd3fc95941090c83fddce1) --- schedule_suite.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/schedule_suite.sh b/schedule_suite.sh index 9509af327..961460563 100755 --- a/schedule_suite.sh +++ b/schedule_suite.sh @@ -71,6 +71,7 @@ $kernelvalue nuke-on-error: true machine_type: $mtype os_type: $distro +branch: $ceph suite: $nicesuite tasks: - chef: From 80b8920853f1182ff18bada67c7afbebc0fd7030 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 12 May 2014 10:55:58 +0100 Subject: [PATCH 30/77] tasks: Use '_' instead of '-' in names Python modules are not allowed to contain hyphens in the name. Using hyphens only works here because we're using low level __import__. Change run_tasks to replace '-' with '_' when reading configs, and rename the task modules to have valid python module names. Aside from general hygiene, the motivation to do this is to allow one task to build on code in another task by importing it. Signed-off-by: John Spray (cherry picked from commit 79dfe2cbefbbf14b69ae10c44a89a4fe45fedbe5) --- teuthology/run_tasks.py | 4 ++++ teuthology/task/{ceph-deploy.py => ceph_deploy.py} | 0 teuthology/task/{ceph-fuse.py => ceph_fuse.py} | 0 teuthology/task/{cifs-mount.py => cifs_mount.py} | 0 teuthology/task/{radosgw-admin.py => radosgw_admin.py} | 0 .../task/{radosgw-admin-rest.py => radosgw_admin_rest.py} | 0 teuthology/task/{radosgw-agent.py => radosgw_agent.py} | 0 teuthology/task/{rest-api.py => rest_api.py} | 0 teuthology/task/{rgw-logsocket.py => rgw_logsocket.py} | 0 9 files changed, 4 insertions(+) rename teuthology/task/{ceph-deploy.py => ceph_deploy.py} (100%) rename teuthology/task/{ceph-fuse.py => ceph_fuse.py} (100%) rename teuthology/task/{cifs-mount.py => cifs_mount.py} (100%) rename teuthology/task/{radosgw-admin.py => radosgw_admin.py} (100%) rename teuthology/task/{radosgw-admin-rest.py => radosgw_admin_rest.py} (100%) rename teuthology/task/{radosgw-agent.py => radosgw_agent.py} (100%) rename teuthology/task/{rest-api.py => rest_api.py} (100%) rename teuthology/task/{rgw-logsocket.py => rgw_logsocket.py} (100%) diff --git a/teuthology/run_tasks.py b/teuthology/run_tasks.py index 8193839a6..6e53023f7 100644 --- a/teuthology/run_tasks.py +++ b/teuthology/run_tasks.py @@ -8,6 +8,10 @@ def run_one_task(taskname, **kwargs): subtask = 'task' if '.' in taskname: (submod, subtask) = taskname.rsplit('.', 1) + + # Teuthology configs may refer to modules like ceph_deploy as ceph-deploy + submod = submod.replace('-', '_') + parent = __import__('teuthology.task', globals(), locals(), [submod], 0) mod = getattr(parent, submod) fn = getattr(mod, subtask) diff --git a/teuthology/task/ceph-deploy.py b/teuthology/task/ceph_deploy.py similarity index 100% rename from teuthology/task/ceph-deploy.py rename to teuthology/task/ceph_deploy.py diff --git a/teuthology/task/ceph-fuse.py b/teuthology/task/ceph_fuse.py similarity index 100% rename from teuthology/task/ceph-fuse.py rename to teuthology/task/ceph_fuse.py diff --git a/teuthology/task/cifs-mount.py b/teuthology/task/cifs_mount.py similarity index 100% rename from teuthology/task/cifs-mount.py rename to teuthology/task/cifs_mount.py diff --git a/teuthology/task/radosgw-admin.py b/teuthology/task/radosgw_admin.py similarity index 100% rename from teuthology/task/radosgw-admin.py rename to teuthology/task/radosgw_admin.py diff --git a/teuthology/task/radosgw-admin-rest.py b/teuthology/task/radosgw_admin_rest.py similarity index 100% rename from teuthology/task/radosgw-admin-rest.py rename to teuthology/task/radosgw_admin_rest.py diff --git a/teuthology/task/radosgw-agent.py b/teuthology/task/radosgw_agent.py similarity index 100% rename from teuthology/task/radosgw-agent.py rename to teuthology/task/radosgw_agent.py diff --git a/teuthology/task/rest-api.py b/teuthology/task/rest_api.py similarity index 100% rename from teuthology/task/rest-api.py rename to teuthology/task/rest_api.py diff --git a/teuthology/task/rgw-logsocket.py b/teuthology/task/rgw_logsocket.py similarity index 100% rename from teuthology/task/rgw-logsocket.py rename to teuthology/task/rgw_logsocket.py From 0527a64ae146e85597fd742938016661e6bf96bc Mon Sep 17 00:00:00 2001 From: Yuri Weinstein Date: Wed, 2 Jul 2014 15:21:18 -0700 Subject: [PATCH 31/77] Added dumpling upgrade tests to dumpling branch from master Signed-off-by: Yuri Weinstein --- suites/upgrade/dumpling/fs/% | 0 .../upgrade/dumpling/fs/0-cluster/start.yaml | 17 +++++++++ .../cuttlefish.v0.67.1.yaml | 11 ++++++ .../fs/1-dumpling-install/v0.67.1.yaml | 7 ++++ .../fs/1-dumpling-install/v0.67.3.yaml | 7 ++++ .../fs/1-dumpling-install/v0.67.5.yaml | 7 ++++ .../fs/1-dumpling-install/v0.67.7.yaml | 7 ++++ .../fs/1-dumpling-install/v0.67.9.yaml | 7 ++++ .../dumpling/fs/2-workload/blogbench.yaml | 5 +++ .../upgrade-mds-mon-osd.yaml | 33 +++++++++++++++++ .../upgrade-mon-osd-mds.yaml | 33 +++++++++++++++++ .../upgrade-osd-mon-mds.yaml | 33 +++++++++++++++++ .../dumpling/fs/4-final/monthrash.yaml | 10 ++++++ .../dumpling/fs/4-final/osdthrash.yaml | 17 +++++++++ suites/upgrade/dumpling/rados/% | 0 .../dumpling/rados/0-cluster/start.yaml | 17 +++++++++ .../cuttlefish.v0.67.1.yaml | 11 ++++++ .../rados/1-dumpling-install/v0.67.1.yaml | 7 ++++ .../rados/1-dumpling-install/v0.67.3.yaml | 7 ++++ .../rados/1-dumpling-install/v0.67.5.yaml | 7 ++++ .../rados/1-dumpling-install/v0.67.7.yaml | 7 ++++ .../rados/1-dumpling-install/v0.67.9.yaml | 7 ++++ .../dumpling/rados/2-workload/testrados.yaml | 13 +++++++ .../upgrade-mds-mon-osd.yaml | 33 +++++++++++++++++ .../upgrade-mon-osd-mds.yaml | 33 +++++++++++++++++ .../upgrade-osd-mon-mds.yaml | 35 ++++++++++++++++++ .../dumpling/rados/4-final/monthrash.yaml | 9 +++++ .../dumpling/rados/4-final/osdthrash.yaml | 23 ++++++++++++ suites/upgrade/dumpling/rbd/% | 0 .../upgrade/dumpling/rbd/0-cluster/start.yaml | 17 +++++++++ .../cuttlefish.v0.67.1.yaml | 11 ++++++ .../rbd/1-dumpling-install/v0.67.1.yaml | 7 ++++ .../rbd/1-dumpling-install/v0.67.3.yaml | 7 ++++ .../rbd/1-dumpling-install/v0.67.5.yaml | 7 ++++ .../rbd/1-dumpling-install/v0.67.7.yaml | 7 ++++ .../rbd/1-dumpling-install/v0.67.9.yaml | 7 ++++ .../upgrade/dumpling/rbd/2-workload/rbd.yaml | 14 ++++++++ .../upgrade-mds-mon-osd.yaml | 33 +++++++++++++++++ .../upgrade-mon-osd-mds.yaml | 33 +++++++++++++++++ .../upgrade-osd-mon-mds.yaml | 33 +++++++++++++++++ .../dumpling/rbd/4-final/monthrash.yaml | 11 ++++++ .../dumpling/rbd/4-final/osdthrash.yaml | 16 +++++++++ suites/upgrade/dumpling/rgw/% | 0 .../upgrade/dumpling/rgw/0-cluster/start.yaml | 17 +++++++++ .../cuttlefish.v0.67.1.yaml | 11 ++++++ .../rgw/1-dumpling-install/v0.67.1.yaml | 7 ++++ .../rgw/1-dumpling-install/v0.67.3.yaml | 7 ++++ .../rgw/1-dumpling-install/v0.67.5.yaml | 7 ++++ .../rgw/1-dumpling-install/v0.67.7.yaml | 7 ++++ .../rgw/1-dumpling-install/v0.67.9.yaml | 7 ++++ .../dumpling/rgw/2-workload/testrgw.yaml | 7 ++++ .../upgrade-mds-mon-osd.yaml | 36 +++++++++++++++++++ .../upgrade-mon-osd-mds.yaml | 36 +++++++++++++++++++ .../upgrade-osd-mon-mds.yaml | 36 +++++++++++++++++++ .../dumpling/rgw/4-final/monthrash.yaml | 8 +++++ .../dumpling/rgw/4-final/osdthrash.yaml | 15 ++++++++ 56 files changed, 807 insertions(+) create mode 100644 suites/upgrade/dumpling/fs/% create mode 100644 suites/upgrade/dumpling/fs/0-cluster/start.yaml create mode 100644 suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml create mode 100644 suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml create mode 100644 suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml create mode 100644 suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.9.yaml create mode 100644 suites/upgrade/dumpling/fs/2-workload/blogbench.yaml create mode 100644 suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml create mode 100644 suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml create mode 100644 suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml create mode 100644 suites/upgrade/dumpling/fs/4-final/monthrash.yaml create mode 100644 suites/upgrade/dumpling/fs/4-final/osdthrash.yaml create mode 100644 suites/upgrade/dumpling/rados/% create mode 100644 suites/upgrade/dumpling/rados/0-cluster/start.yaml create mode 100644 suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml create mode 100644 suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml create mode 100644 suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml create mode 100644 suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.9.yaml create mode 100644 suites/upgrade/dumpling/rados/2-workload/testrados.yaml create mode 100644 suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml create mode 100644 suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml create mode 100644 suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml create mode 100644 suites/upgrade/dumpling/rados/4-final/monthrash.yaml create mode 100644 suites/upgrade/dumpling/rados/4-final/osdthrash.yaml create mode 100644 suites/upgrade/dumpling/rbd/% create mode 100644 suites/upgrade/dumpling/rbd/0-cluster/start.yaml create mode 100644 suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml create mode 100644 suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml create mode 100644 suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml create mode 100644 suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.9.yaml create mode 100644 suites/upgrade/dumpling/rbd/2-workload/rbd.yaml create mode 100644 suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml create mode 100644 suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml create mode 100644 suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml create mode 100644 suites/upgrade/dumpling/rbd/4-final/monthrash.yaml create mode 100644 suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml create mode 100644 suites/upgrade/dumpling/rgw/% create mode 100644 suites/upgrade/dumpling/rgw/0-cluster/start.yaml create mode 100644 suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml create mode 100644 suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml create mode 100644 suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml create mode 100644 suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml create mode 100644 suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.9.yaml create mode 100644 suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml create mode 100644 suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml create mode 100644 suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml create mode 100644 suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml create mode 100644 suites/upgrade/dumpling/rgw/4-final/monthrash.yaml create mode 100644 suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml diff --git a/suites/upgrade/dumpling/fs/% b/suites/upgrade/dumpling/fs/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/dumpling/fs/0-cluster/start.yaml b/suites/upgrade/dumpling/fs/0-cluster/start.yaml new file mode 100644 index 000000000..c1acc4e8a --- /dev/null +++ b/suites/upgrade/dumpling/fs/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 000000000..032340ba2 --- /dev/null +++ b/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 000000000..a5bf1fa90 --- /dev/null +++ b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 000000000..d0c186119 --- /dev/null +++ b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 000000000..611b6d6b8 --- /dev/null +++ b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 000000000..7cb8fcc22 --- /dev/null +++ b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.9.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.9.yaml new file mode 100644 index 000000000..41523ac82 --- /dev/null +++ b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.9.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.9 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml b/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml new file mode 100644 index 000000000..0cd59eaaf --- /dev/null +++ b/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml @@ -0,0 +1,5 @@ +workload: + workunit: + clients: + all: + - suites/blogbench.sh diff --git a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 000000000..38bba9189 --- /dev/null +++ b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 000000000..5b617fdfd --- /dev/null +++ b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 000000000..91c146a20 --- /dev/null +++ b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/suites/upgrade/dumpling/fs/4-final/monthrash.yaml b/suites/upgrade/dumpling/fs/4-final/monthrash.yaml new file mode 100644 index 000000000..13af446eb --- /dev/null +++ b/suites/upgrade/dumpling/fs/4-final/monthrash.yaml @@ -0,0 +1,10 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/dbench.sh + diff --git a/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml b/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml new file mode 100644 index 000000000..dbd7191e3 --- /dev/null +++ b/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- ceph-fuse: +- workunit: + clients: + all: + - suites/iogen.sh + diff --git a/suites/upgrade/dumpling/rados/% b/suites/upgrade/dumpling/rados/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/dumpling/rados/0-cluster/start.yaml b/suites/upgrade/dumpling/rados/0-cluster/start.yaml new file mode 100644 index 000000000..c1acc4e8a --- /dev/null +++ b/suites/upgrade/dumpling/rados/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 000000000..032340ba2 --- /dev/null +++ b/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 000000000..a5bf1fa90 --- /dev/null +++ b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 000000000..d0c186119 --- /dev/null +++ b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 000000000..611b6d6b8 --- /dev/null +++ b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 000000000..7cb8fcc22 --- /dev/null +++ b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.9.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.9.yaml new file mode 100644 index 000000000..41523ac82 --- /dev/null +++ b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.9.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.9 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/2-workload/testrados.yaml b/suites/upgrade/dumpling/rados/2-workload/testrados.yaml new file mode 100644 index 000000000..8eaab19fd --- /dev/null +++ b/suites/upgrade/dumpling/rados/2-workload/testrados.yaml @@ -0,0 +1,13 @@ +workload: + rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 000000000..38bba9189 --- /dev/null +++ b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 000000000..5b617fdfd --- /dev/null +++ b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 000000000..801bab9f1 --- /dev/null +++ b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/suites/upgrade/dumpling/rados/4-final/monthrash.yaml b/suites/upgrade/dumpling/rados/4-final/monthrash.yaml new file mode 100644 index 000000000..810ba1b30 --- /dev/null +++ b/suites/upgrade/dumpling/rados/4-final/monthrash.yaml @@ -0,0 +1,9 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rados/test.sh + diff --git a/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml new file mode 100644 index 000000000..f81504233 --- /dev/null +++ b/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml @@ -0,0 +1,23 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/suites/upgrade/dumpling/rbd/% b/suites/upgrade/dumpling/rbd/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/dumpling/rbd/0-cluster/start.yaml b/suites/upgrade/dumpling/rbd/0-cluster/start.yaml new file mode 100644 index 000000000..c1acc4e8a --- /dev/null +++ b/suites/upgrade/dumpling/rbd/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 000000000..032340ba2 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 000000000..a5bf1fa90 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 000000000..d0c186119 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 000000000..611b6d6b8 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 000000000..7cb8fcc22 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.9.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.9.yaml new file mode 100644 index 000000000..41523ac82 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.9.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.9 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml b/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml new file mode 100644 index 000000000..ce2fabe03 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml @@ -0,0 +1,14 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh + + diff --git a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 000000000..38bba9189 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 000000000..5b617fdfd --- /dev/null +++ b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 000000000..91c146a20 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml b/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml new file mode 100644 index 000000000..593191c24 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml @@ -0,0 +1,11 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rbd/copy.sh + env: + RBD_CREATE_ARGS: --new-format + diff --git a/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml new file mode 100644 index 000000000..575fd7922 --- /dev/null +++ b/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- workunit: + clients: + client.0: + - rbd/test_lock_fence.sh + diff --git a/suites/upgrade/dumpling/rgw/% b/suites/upgrade/dumpling/rgw/% new file mode 100644 index 000000000..e69de29bb diff --git a/suites/upgrade/dumpling/rgw/0-cluster/start.yaml b/suites/upgrade/dumpling/rgw/0-cluster/start.yaml new file mode 100644 index 000000000..c1acc4e8a --- /dev/null +++ b/suites/upgrade/dumpling/rgw/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 000000000..032340ba2 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 000000000..a5bf1fa90 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 000000000..d0c186119 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 000000000..611b6d6b8 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 000000000..7cb8fcc22 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.9.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.9.yaml new file mode 100644 index 000000000..41523ac82 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.9.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.9 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml b/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml new file mode 100644 index 000000000..77d7e1653 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - rgw: [client.0] + - s3tests: + client.0: + rgw_server: client.0 + diff --git a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 000000000..ff9129046 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 000000000..75face28d --- /dev/null +++ b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 000000000..a08c669bf --- /dev/null +++ b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml b/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml new file mode 100644 index 000000000..9361edc80 --- /dev/null +++ b/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml @@ -0,0 +1,8 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml new file mode 100644 index 000000000..6cf6d861d --- /dev/null +++ b/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- swift: + client.0: + rgw_server: client.0 + From d37c955495d5936a66fb43dbe69d29dbaf57fd1a Mon Sep 17 00:00:00 2001 From: Warren Usui Date: Fri, 28 Mar 2014 16:04:05 -0700 Subject: [PATCH 32/77] Fix kvm issues for Trusty Change kvm reference to qemu-system-x86_64 and use raw format in kvm/qemu command. Tested on both Trusty and Precise. Fixes: 7825 Signed-off-by: Warren Usui (cherry picked from commit eef2bf672b76a6c0243687c7acd20fd99e7dd44a) --- teuthology/task/qemu.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/teuthology/task/qemu.py b/teuthology/task/qemu.py index f69e851ae..bd4fd6b91 100644 --- a/teuthology/task/qemu.py +++ b/teuthology/task/qemu.py @@ -167,7 +167,7 @@ def run_qemu(ctx, config): '{tdir}/archive/coverage'.format(tdir=testdir), '{tdir}/daemon-helper'.format(tdir=testdir), 'term', - 'kvm', '-enable-kvm', '-nographic', + 'qemu-system-x86_64', '-enable-kvm', '-nographic', '-m', str(client_config.get('memory', DEFAULT_MEM)), # base OS device '-drive', @@ -194,7 +194,7 @@ def run_qemu(ctx, config): for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)): args.extend([ '-drive', - 'file=rbd:rbd/{img}:id={id},format=rbd,if=virtio,cache={cachemode}'.format( + 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format( img='{client}.{num}'.format(client=client, num=i), id=client[len('client.'):], cachemode=cachemode, From fcc0b2451b47793a64fc4cd4675fef667a4a5b45 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Tue, 29 Jul 2014 14:11:38 -0700 Subject: [PATCH 33/77] Don't thrash primary affinity primary affinity does not exist in dumpling. Fixes: #8726 Signed-off-by: Josh Durgin --- suites/big/rados-thrash/thrashers/default.yaml | 1 + suites/kcephfs/thrash/thrashers/default.yaml | 1 + suites/krbd/thrash/thrashers/default.yaml | 1 + suites/powercycle/osd/powercycle/default.yaml | 1 + suites/rados/singleton/all/thrash-rados.yaml | 1 + suites/rados/thrash/thrashers/default.yaml | 1 + suites/rados/thrash/thrashers/mapgap.yaml | 1 + suites/rados/thrash/thrashers/morepggrow.yaml | 1 + suites/rados/thrash/thrashers/pggrow.yaml | 1 + suites/rados/verify/1thrash/default.yaml | 1 + suites/rbd/thrash/thrashers/default.yaml | 1 + suites/smoke/singleton/all/thrash-rados.yaml | 1 + suites/smoke/thrash/thrashers/default.yaml | 1 + suites/stress/thrash/thrashers/default.yaml | 1 + suites/stress/thrash/thrashers/fast.yaml | 1 + suites/stress/thrash/thrashers/more-down.yaml | 1 + suites/upgrade-cuttlefish/fs/4-final/osdthrash.yaml | 1 + suites/upgrade-cuttlefish/rados-older/6-final/osdthrash.yaml | 1 + suites/upgrade-cuttlefish/rados/4-final/osdthrash.yaml | 1 + suites/upgrade-cuttlefish/rbd/4-final/osdthrash.yaml | 1 + suites/upgrade-cuttlefish/rgw/4-final/osdthrash.yaml | 1 + suites/upgrade-parallel/stress-split/3-thrash/default.yaml | 1 + suites/upgrade/dumpling/fs/4-final/osdthrash.yaml | 1 + suites/upgrade/dumpling/rados/4-final/osdthrash.yaml | 1 + suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml | 1 + suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml | 1 + 26 files changed, 26 insertions(+) diff --git a/suites/big/rados-thrash/thrashers/default.yaml b/suites/big/rados-thrash/thrashers/default.yaml index d67ff20a6..7fc179a59 100644 --- a/suites/big/rados-thrash/thrashers/default.yaml +++ b/suites/big/rados-thrash/thrashers/default.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/kcephfs/thrash/thrashers/default.yaml b/suites/kcephfs/thrash/thrashers/default.yaml index 14d772583..b822d7422 100644 --- a/suites/kcephfs/thrash/thrashers/default.yaml +++ b/suites/kcephfs/thrash/thrashers/default.yaml @@ -5,3 +5,4 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false diff --git a/suites/krbd/thrash/thrashers/default.yaml b/suites/krbd/thrash/thrashers/default.yaml index 14d772583..b822d7422 100644 --- a/suites/krbd/thrash/thrashers/default.yaml +++ b/suites/krbd/thrash/thrashers/default.yaml @@ -5,3 +5,4 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false diff --git a/suites/powercycle/osd/powercycle/default.yaml b/suites/powercycle/osd/powercycle/default.yaml index b632e83e6..6d80a1205 100644 --- a/suites/powercycle/osd/powercycle/default.yaml +++ b/suites/powercycle/osd/powercycle/default.yaml @@ -2,6 +2,7 @@ tasks: - install: - ceph: - thrashosds: + thrash_primary_affinity: false chance_down: 1.0 powercycle: true timeout: 600 diff --git a/suites/rados/singleton/all/thrash-rados.yaml b/suites/rados/singleton/all/thrash-rados.yaml index 4bdcf2265..7d3aac00b 100644 --- a/suites/rados/singleton/all/thrash-rados.yaml +++ b/suites/rados/singleton/all/thrash-rados.yaml @@ -14,6 +14,7 @@ tasks: log-whitelist: - wrongly marked me down - thrashosds: + thrash_primary_affinity: false op_delay: 30 clean_interval: 120 chance_down: .5 diff --git a/suites/rados/thrash/thrashers/default.yaml b/suites/rados/thrash/thrashers/default.yaml index 9435b146a..66ea0a362 100644 --- a/suites/rados/thrash/thrashers/default.yaml +++ b/suites/rados/thrash/thrashers/default.yaml @@ -5,6 +5,7 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/rados/thrash/thrashers/mapgap.yaml b/suites/rados/thrash/thrashers/mapgap.yaml index 5053338e9..60aba7e2b 100644 --- a/suites/rados/thrash/thrashers/mapgap.yaml +++ b/suites/rados/thrash/thrashers/mapgap.yaml @@ -12,6 +12,7 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/rados/thrash/thrashers/morepggrow.yaml b/suites/rados/thrash/thrashers/morepggrow.yaml index 93379a82c..944f654de 100644 --- a/suites/rados/thrash/thrashers/morepggrow.yaml +++ b/suites/rados/thrash/thrashers/morepggrow.yaml @@ -5,6 +5,7 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 3 chance_pgpnum_fix: 1 diff --git a/suites/rados/thrash/thrashers/pggrow.yaml b/suites/rados/thrash/thrashers/pggrow.yaml index 6131b0001..d4012f942 100644 --- a/suites/rados/thrash/thrashers/pggrow.yaml +++ b/suites/rados/thrash/thrashers/pggrow.yaml @@ -5,6 +5,7 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 2 chance_pgpnum_fix: 1 diff --git a/suites/rados/verify/1thrash/default.yaml b/suites/rados/verify/1thrash/default.yaml index 9435b146a..66ea0a362 100644 --- a/suites/rados/verify/1thrash/default.yaml +++ b/suites/rados/verify/1thrash/default.yaml @@ -5,6 +5,7 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/rbd/thrash/thrashers/default.yaml b/suites/rbd/thrash/thrashers/default.yaml index 14d7f17c2..04552142a 100644 --- a/suites/rbd/thrash/thrashers/default.yaml +++ b/suites/rbd/thrash/thrashers/default.yaml @@ -5,4 +5,5 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false timeout: 1200 diff --git a/suites/smoke/singleton/all/thrash-rados.yaml b/suites/smoke/singleton/all/thrash-rados.yaml index 157f0f71c..bf63d80ef 100644 --- a/suites/smoke/singleton/all/thrash-rados.yaml +++ b/suites/smoke/singleton/all/thrash-rados.yaml @@ -13,6 +13,7 @@ tasks: log-whitelist: - wrongly marked me down - thrashosds: + thrash_primary_affinity: false op_delay: 30 clean_interval: 120 chance_down: .5 diff --git a/suites/smoke/thrash/thrashers/default.yaml b/suites/smoke/thrash/thrashers/default.yaml index 14d772583..b822d7422 100644 --- a/suites/smoke/thrash/thrashers/default.yaml +++ b/suites/smoke/thrash/thrashers/default.yaml @@ -5,3 +5,4 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false diff --git a/suites/stress/thrash/thrashers/default.yaml b/suites/stress/thrash/thrashers/default.yaml index 14d772583..b822d7422 100644 --- a/suites/stress/thrash/thrashers/default.yaml +++ b/suites/stress/thrash/thrashers/default.yaml @@ -5,3 +5,4 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false diff --git a/suites/stress/thrash/thrashers/fast.yaml b/suites/stress/thrash/thrashers/fast.yaml index eea9c06cd..e2209159b 100644 --- a/suites/stress/thrash/thrashers/fast.yaml +++ b/suites/stress/thrash/thrashers/fast.yaml @@ -5,5 +5,6 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false op_delay: 1 chance_down: 10 diff --git a/suites/stress/thrash/thrashers/more-down.yaml b/suites/stress/thrash/thrashers/more-down.yaml index e39098b1c..ca1afc2e7 100644 --- a/suites/stress/thrash/thrashers/more-down.yaml +++ b/suites/stress/thrash/thrashers/more-down.yaml @@ -5,4 +5,5 @@ tasks: - wrongly marked me down - objects unfound and apparently lost - thrashosds: + thrash_primary_affinity: false chance_down: 50 diff --git a/suites/upgrade-cuttlefish/fs/4-final/osdthrash.yaml b/suites/upgrade-cuttlefish/fs/4-final/osdthrash.yaml index b335de392..9c5484254 100644 --- a/suites/upgrade-cuttlefish/fs/4-final/osdthrash.yaml +++ b/suites/upgrade-cuttlefish/fs/4-final/osdthrash.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade-cuttlefish/rados-older/6-final/osdthrash.yaml b/suites/upgrade-cuttlefish/rados-older/6-final/osdthrash.yaml index ef1bb9c55..32c3b5a1b 100644 --- a/suites/upgrade-cuttlefish/rados-older/6-final/osdthrash.yaml +++ b/suites/upgrade-cuttlefish/rados-older/6-final/osdthrash.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade-cuttlefish/rados/4-final/osdthrash.yaml b/suites/upgrade-cuttlefish/rados/4-final/osdthrash.yaml index ef1bb9c55..32c3b5a1b 100644 --- a/suites/upgrade-cuttlefish/rados/4-final/osdthrash.yaml +++ b/suites/upgrade-cuttlefish/rados/4-final/osdthrash.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade-cuttlefish/rbd/4-final/osdthrash.yaml b/suites/upgrade-cuttlefish/rbd/4-final/osdthrash.yaml index dbe419622..636351548 100644 --- a/suites/upgrade-cuttlefish/rbd/4-final/osdthrash.yaml +++ b/suites/upgrade-cuttlefish/rbd/4-final/osdthrash.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade-cuttlefish/rgw/4-final/osdthrash.yaml b/suites/upgrade-cuttlefish/rgw/4-final/osdthrash.yaml index 44028734d..4f84cdba7 100644 --- a/suites/upgrade-cuttlefish/rgw/4-final/osdthrash.yaml +++ b/suites/upgrade-cuttlefish/rgw/4-final/osdthrash.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade-parallel/stress-split/3-thrash/default.yaml b/suites/upgrade-parallel/stress-split/3-thrash/default.yaml index d67ff20a6..7fc179a59 100644 --- a/suites/upgrade-parallel/stress-split/3-thrash/default.yaml +++ b/suites/upgrade-parallel/stress-split/3-thrash/default.yaml @@ -5,6 +5,7 @@ overrides: - objects unfound and apparently lost tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml b/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml index dbd7191e3..e6420b631 100644 --- a/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml +++ b/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml @@ -6,6 +6,7 @@ overrides: - log bound mismatch tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml index f81504233..e0a9ec02c 100644 --- a/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml +++ b/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml @@ -6,6 +6,7 @@ overrides: - log bound mismatch tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml index 575fd7922..d92de013b 100644 --- a/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml +++ b/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml @@ -6,6 +6,7 @@ overrides: - log bound mismatch tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 diff --git a/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml index 6cf6d861d..7916034cc 100644 --- a/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml +++ b/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml @@ -6,6 +6,7 @@ overrides: - log bound mismatch tasks: - thrashosds: + thrash_primary_affinity: false timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 From ba1e14df60e954bed15514dbe8b63b4ae19f90b1 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Mon, 28 Jul 2014 07:58:08 -0600 Subject: [PATCH 34/77] Remove ship_utilities See 2dad906 Signed-off-by: Zack Cerza --- teuthology/task/ceph.py | 49 +--------------------------------- teuthology/task/ceph_deploy.py | 4 +-- 2 files changed, 3 insertions(+), 50 deletions(-) diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index f8836badc..fbe1445ef 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -144,52 +144,6 @@ def ceph_log(ctx, config): finally: pass -@contextlib.contextmanager -def ship_utilities(ctx, config): - assert config is None - FILES = ['daemon-helper', 'adjust-ulimits', 'chdir-coredump', - 'valgrind.supp', 'kcon_most'] - testdir = teuthology.get_testdir(ctx) - for filename in FILES: - log.info('Shipping %r...', filename) - src = os.path.join(os.path.dirname(__file__), filename) - dst = os.path.join(testdir, filename) - with file(src, 'rb') as f: - for rem in ctx.cluster.remotes.iterkeys(): - teuthology.write_file( - remote=rem, - path=dst, - data=f, - ) - f.seek(0) - rem.run( - args=[ - 'chmod', - 'a=rx', - '--', - dst, - ], - ) - - try: - yield - finally: - log.info('Removing shipped files: %s...', ' '.join(FILES)) - filenames = ( - os.path.join(testdir, filename) - for filename in FILES - ) - run.wait( - ctx.cluster.run( - args=[ - 'rm', - '-rf', - '--', - ] + list(filenames), - wait=False, - ), - ) - def assign_devs(roles, devs): return dict(zip(roles, devs)) @@ -591,7 +545,7 @@ def cluster(ctx, config): mkfs = ['mkfs.%s' % fs] + mkfs_options log.info('%s on %s on %s' % (mkfs, dev, remote)) remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) - + log.info('mount %s on %s -o %s' % (dev, remote, ','.join(mount_options))) remote.run( @@ -1147,7 +1101,6 @@ def task(ctx, config): with contextutil.nested( lambda: ceph_log(ctx=ctx, config=None), - lambda: ship_utilities(ctx=ctx, config=None), lambda: valgrind_post(ctx=ctx, config=config), lambda: cluster(ctx=ctx, config=dict( conf=config.get('conf', {}), diff --git a/teuthology/task/ceph_deploy.py b/teuthology/task/ceph_deploy.py index 193e6583e..be4278930 100644 --- a/teuthology/task/ceph_deploy.py +++ b/teuthology/task/ceph_deploy.py @@ -7,7 +7,7 @@ from teuthology import misc as teuthology from teuthology import contextutil -import ceph as ceph_fn +import install as install_fn from ..orchestra import run log = logging.getLogger(__name__) @@ -423,7 +423,7 @@ def task(ctx, config): assert isinstance(config['branch'], dict), 'branch must be a dictionary' with contextutil.nested( - lambda: ceph_fn.ship_utilities(ctx=ctx, config=None), + lambda: install_fn.ship_utilities(ctx=ctx, config=None), lambda: download_ceph_deploy(ctx=ctx, config=config), lambda: build_ceph_cluster(ctx=ctx, config=dict( conf=config.get('conf', {}), From 04ccc3f588442bd22d8a07d42b5da5b9c8fb7aa1 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Mon, 28 Jul 2014 08:39:33 -0600 Subject: [PATCH 35/77] adjust-ulimits and daemon-helper are in $PATH Signed-off-by: Zack Cerza --- teuthology/task/admin_socket.py | 2 +- teuthology/task/blktrace.py | 6 +++--- teuthology/task/ceph.py | 22 +++++++++++----------- teuthology/task/ceph_client.py | 2 +- teuthology/task/ceph_fuse.py | 4 ++-- teuthology/task/ceph_manager.py | 8 ++++---- teuthology/task/cram.py | 2 +- teuthology/task/kclient.py | 2 +- teuthology/task/lockfile.py | 20 ++++++++++---------- teuthology/task/omapbench.py | 4 ++-- teuthology/task/osd_backfill.py | 6 +++--- teuthology/task/osd_failsafe_enospc.py | 10 +++++----- teuthology/task/osd_recovery.py | 6 +++--- teuthology/task/qemu.py | 4 ++-- teuthology/task/rados.py | 2 +- teuthology/task/radosbench.py | 14 +++++++------- teuthology/task/radosgw_admin_rest.py | 2 +- teuthology/task/radosgw_agent.py | 12 ++++++------ teuthology/task/rbd.py | 10 +++++----- teuthology/task/rbd_fsx.py | 2 +- teuthology/task/recovery_bench.py | 6 +++--- teuthology/task/rest_api.py | 4 ++-- teuthology/task/restart.py | 2 +- teuthology/task/rgw.py | 12 ++++++------ teuthology/task/s3readwrite.py | 12 ++++++------ teuthology/task/s3roundtrip.py | 4 ++-- teuthology/task/s3tests.py | 4 ++-- teuthology/task/samba.py | 2 +- teuthology/task/swift.py | 4 ++-- teuthology/task/watch_notify_stress.py | 2 +- teuthology/task/workunit.py | 2 +- teuthology/task_util/rados.py | 2 +- teuthology/task_util/rgw.py | 2 +- 33 files changed, 99 insertions(+), 99 deletions(-) diff --git a/teuthology/task/admin_socket.py b/teuthology/task/admin_socket.py index 01c873c84..dbb7a17ce 100644 --- a/teuthology/task/admin_socket.py +++ b/teuthology/task/admin_socket.py @@ -74,7 +74,7 @@ def _socket_command(ctx, remote, socket_path, command, args): proc = remote.run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'ceph', diff --git a/teuthology/task/blktrace.py b/teuthology/task/blktrace.py index 3b9a7acd3..97142519f 100644 --- a/teuthology/task/blktrace.py +++ b/teuthology/task/blktrace.py @@ -3,7 +3,7 @@ from teuthology import misc as teuthology from teuthology import contextutil -from ..orchestra import run +from ..orchestra import run log = logging.getLogger(__name__) blktrace = '/usr/sbin/blktrace' @@ -41,7 +41,7 @@ def execute(ctx, config): 'cd', log_dir, run.Raw(';'), - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', daemon_signal, 'sudo', blktrace, @@ -50,7 +50,7 @@ def execute(ctx, config): '-d', dev, ], - wait=False, + wait=False, stdin=run.PIPE, ) procs.append(proc) diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index fbe1445ef..0b5b4fe66 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -349,7 +349,7 @@ def cluster(ctx, config): ctx.cluster.only(firstmon).run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph-authtool', @@ -360,7 +360,7 @@ def cluster(ctx, config): ctx.cluster.only(firstmon).run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph-authtool', @@ -388,7 +388,7 @@ def cluster(ctx, config): ctx.cluster.only(firstmon).run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph-authtool', @@ -432,7 +432,7 @@ def cluster(ctx, config): run.wait( mons.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'osdmaptool', @@ -460,7 +460,7 @@ def cluster(ctx, config): '-p', '/var/lib/ceph/mds/ceph-{id}'.format(id=id_), run.Raw('&&'), - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'sudo', @@ -574,7 +574,7 @@ def cluster(ctx, config): remote.run( args=[ 'MALLOC_CHECK_=3', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'sudo', @@ -631,7 +631,7 @@ def cluster(ctx, config): mons.run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph-authtool', @@ -658,7 +658,7 @@ def cluster(ctx, config): ) remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'sudo', @@ -837,11 +837,11 @@ def run_daemon(ctx, config, type_): num_active += 1 run_cmd = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'sudo', - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', daemon_signal, ] run_cmd_tail = [ @@ -875,7 +875,7 @@ def run_daemon(ctx, config, type_): (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() mon0_remote.run(args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph', diff --git a/teuthology/task/ceph_client.py b/teuthology/task/ceph_client.py index fd4f8cb2e..afcaf55d4 100644 --- a/teuthology/task/ceph_client.py +++ b/teuthology/task/ceph_client.py @@ -15,7 +15,7 @@ def create_keyring(ctx): client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'sudo', diff --git a/teuthology/task/ceph_fuse.py b/teuthology/task/ceph_fuse.py index b0a0fe38b..66eb42863 100644 --- a/teuthology/task/ceph_fuse.py +++ b/teuthology/task/ceph_fuse.py @@ -82,10 +82,10 @@ def task(ctx, config): run_cmd=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', daemon_signal, ] run_cmd_tail=[ diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index 4c904eeef..6b43eff89 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -324,7 +324,7 @@ def tmp(x): def raw_cluster_cmd(self, *args): testdir = teuthology.get_testdir(self.ctx) ceph_args = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'ceph', @@ -339,7 +339,7 @@ def raw_cluster_cmd(self, *args): def raw_cluster_cmd_result(self, *args): testdir = teuthology.get_testdir(self.ctx) ceph_args = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'ceph', @@ -354,7 +354,7 @@ def raw_cluster_cmd_result(self, *args): def do_rados(self, remote, cmd): testdir = teuthology.get_testdir(self.ctx) pre = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rados', @@ -412,7 +412,7 @@ def osd_admin_socket(self, osdnum, command, check_status=True): assert remote is not None args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'ceph', diff --git a/teuthology/task/cram.py b/teuthology/task/cram.py index 4829b8edd..29581af4b 100644 --- a/teuthology/task/cram.py +++ b/teuthology/task/cram.py @@ -113,7 +113,7 @@ def _run_tests(ctx, role): run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), run.Raw('CEPH_ID="{id}"'.format(id=id_)), run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)), - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), '{tdir}/virtualenv/bin/cram'.format(tdir=testdir), diff --git a/teuthology/task/kclient.py b/teuthology/task/kclient.py index 6fec64618..9fcec524d 100644 --- a/teuthology/task/kclient.py +++ b/teuthology/task/kclient.py @@ -69,7 +69,7 @@ def task(ctx, config): remote.run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), '/sbin/mount.ceph', diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py index 748b99806..ede39ad1c 100644 --- a/teuthology/task/lockfile.py +++ b/teuthology/task/lockfile.py @@ -34,7 +34,7 @@ def task(ctx, config): {client: client.1, lockfile: testfile, holdtime: 5}, {client: client.2, lockfile: testfile, holdtime: 5, maxwait: 1, expectfail: True}] - + In the past this test would have failed; there was a bug where waitlocks weren't cleaned up if the process failed. More involved scenarios are also possible. """ @@ -42,7 +42,7 @@ def task(ctx, config): try: assert isinstance(config, list), \ "task lockfile got invalid config" - + log.info("building executable on each host") buildprocs = list() # build the locker executable on each client @@ -66,7 +66,7 @@ def task(ctx, config): badconfig = True if badconfig: raise KeyError("bad config {op_}".format(op_=op)) - + testdir = teuthology.get_testdir(ctx) clients = set(clients) files = set(files) @@ -76,7 +76,7 @@ def task(ctx, config): log.info("got a client remote") (_, _, client_id) = client.partition('.') filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) - + proc = client_remote.run( args=[ 'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir), @@ -94,14 +94,14 @@ def task(ctx, config): ], logger=log.getChild('lockfile_client.{id}'.format(id=client_id)), wait=False - ) + ) log.info('building sclockandhold on client{id}'.format(id=client_id)) buildprocs.append(proc) - + # wait for builds to finish run.wait(buildprocs) log.info('finished building sclockandhold on all clients') - + # create the files to run these locks on client = clients.pop() clients.add(client) @@ -146,7 +146,7 @@ def task(ctx, config): lock_procs.append((greenlet, op)) time.sleep(0.1) # to provide proper ordering #for op in config - + for (greenlet, op) in lock_procs: log.debug('checking lock for op {op_}'.format(op_=op)) result = greenlet.get() @@ -192,10 +192,10 @@ def lock_one(op, ctx): try: proc = client_remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', 'kill', '{tdir}/lockfile/sclockandhold'.format(tdir=testdir), filepath, diff --git a/teuthology/task/omapbench.py b/teuthology/task/omapbench.py index 4b46a632d..8e97f3070 100644 --- a/teuthology/task/omapbench.py +++ b/teuthology/task/omapbench.py @@ -53,7 +53,7 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['{tdir}/adjust-ulimits', + " ".join(['adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage', 'omapbench', @@ -65,7 +65,7 @@ def task(ctx, config): '--valsize', str(config.get('valsize',1000)), '--inc', str(config.get('increment',10)), '--omaptype', str(config.get('omaptype','uniform')) - ]).format(tdir=testdir), + ]), ], logger=log.getChild('omapbench.{id}'.format(id=id_)), stdin=run.PIPE, diff --git a/teuthology/task/osd_backfill.py b/teuthology/task/osd_backfill.py index 28433026c..eb1a34eba 100644 --- a/teuthology/task/osd_backfill.py +++ b/teuthology/task/osd_backfill.py @@ -11,7 +11,7 @@ def rados_start(ctx, remote, cmd): log.info("rados %s" % ' '.join(cmd)) testdir = teuthology.get_testdir(ctx) pre = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rados', @@ -33,10 +33,10 @@ def task(ctx, config): 'thrashosds task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) - assert num_osds == 3 + assert num_osds == 3 manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/osd_failsafe_enospc.py b/teuthology/task/osd_failsafe_enospc.py index 92e5af90c..15ae7b542 100644 --- a/teuthology/task/osd_failsafe_enospc.py +++ b/teuthology/task/osd_failsafe_enospc.py @@ -60,7 +60,7 @@ def task(ctx, config): proc = mon.run( args=[ - '{tdir}/daemon-helper'.format(tdir=teuthology.get_testdir(ctx)), + 'daemon-helper', 'kill', 'ceph', '-w' ], @@ -87,7 +87,7 @@ def task(ctx, config): proc = mon.run( args=[ - '{tdir}/daemon-helper'.format(tdir=teuthology.get_testdir(ctx)), + 'daemon-helper', 'kill', 'ceph', '-w' ], @@ -128,7 +128,7 @@ def task(ctx, config): proc = mon.run( args=[ - '{tdir}/daemon-helper'.format(tdir=teuthology.get_testdir(ctx)), + 'daemon-helper', 'kill', 'ceph', '-w' ], @@ -156,7 +156,7 @@ def task(ctx, config): proc = mon.run( args=[ - '{tdir}/daemon-helper'.format(tdir=teuthology.get_testdir(ctx)), + 'daemon-helper', 'kill', 'ceph', '-w' ], @@ -186,7 +186,7 @@ def task(ctx, config): proc = mon.run( args=[ - '{tdir}/daemon-helper'.format(tdir=teuthology.get_testdir(ctx)), + 'daemon-helper', 'kill', 'ceph', '-w' ], diff --git a/teuthology/task/osd_recovery.py b/teuthology/task/osd_recovery.py index d813a6782..beb8c081a 100644 --- a/teuthology/task/osd_recovery.py +++ b/teuthology/task/osd_recovery.py @@ -10,7 +10,7 @@ def rados_start(testdir, remote, cmd): log.info("rados %s" % ' '.join(cmd)) pre = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rados', @@ -33,10 +33,10 @@ def task(ctx, config): testdir = teuthology.get_testdir(ctx) first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) - assert num_osds == 3 + assert num_osds == 3 manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/qemu.py b/teuthology/task/qemu.py index bd4fd6b91..2dc08332e 100644 --- a/teuthology/task/qemu.py +++ b/teuthology/task/qemu.py @@ -162,10 +162,10 @@ def run_qemu(ctx, config): base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', 'term', 'qemu-system-x86_64', '-enable-kvm', '-nographic', '-m', str(client_config.get('memory', DEFAULT_MEM)), diff --git a/teuthology/task/rados.py b/teuthology/task/rados.py index 7bef3e02c..20715cf39 100644 --- a/teuthology/task/rados.py +++ b/teuthology/task/rados.py @@ -57,7 +57,7 @@ def task(ctx, config): op_weights = config.get('op_weights', {}) testdir = teuthology.get_testdir(ctx) args = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'ceph_test_rados', diff --git a/teuthology/task/radosbench.py b/teuthology/task/radosbench.py index 420fa101f..263ca9d6c 100644 --- a/teuthology/task/radosbench.py +++ b/teuthology/task/radosbench.py @@ -45,13 +45,13 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['{tdir}/adjust-ulimits', + " ".join(['adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage', 'rados', '--name', role, 'mkpool', str(config.get('pool', 'data')) - ]).format(tdir=testdir), + ]), ], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, @@ -62,14 +62,14 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['{tdir}/adjust-ulimits', + " ".join(['adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage', 'rados', '--name', role, - '-p' , str(config.get('pool', 'data')), + '-p', str(config.get('pool', 'data')), 'bench', str(config.get('time', 360)), 'write', - ]).format(tdir=testdir), + ]), ], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, @@ -87,13 +87,13 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['{tdir}/adjust-ulimits', + " ".join(['adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage', 'rados', '--name', role, 'rmpool', str(config.get('pool', 'data')) - ]).format(tdir=testdir), + ]), ], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, diff --git a/teuthology/task/radosgw_admin_rest.py b/teuthology/task/radosgw_admin_rest.py index 4c57a00fe..25b04a37d 100644 --- a/teuthology/task/radosgw_admin_rest.py +++ b/teuthology/task/radosgw_admin_rest.py @@ -31,7 +31,7 @@ def rgwadmin(ctx, client, cmd): log.info('radosgw-admin: %s' % cmd) testdir = teuthology.get_testdir(ctx) pre = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', diff --git a/teuthology/task/radosgw_agent.py b/teuthology/task/radosgw_agent.py index a6661b480..34438a6a9 100644 --- a/teuthology/task/radosgw_agent.py +++ b/teuthology/task/radosgw_agent.py @@ -39,7 +39,7 @@ def run_radosgw_agent(ctx, config): remote.run( args=[ 'cd', testdir, run.Raw('&&'), - 'git', 'clone', + 'git', 'clone', '-b', branch, 'https://github.com/ceph/radosgw-agent.git', 'radosgw-agent.{client}'.format(client=client), @@ -74,7 +74,7 @@ def run_radosgw_agent(ctx, config): port = cconf.get('port', 8000) daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port) in_args=[ - '{tdir}/daemon-helper'.format(tdir=testdir), 'kill', + 'daemon-helper', 'kill', '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir, client=client), '-v', @@ -96,7 +96,7 @@ def run_radosgw_agent(ctx, config): # the test server and full/incremental flags are mutually exclusive if sync_scope is None: in_args.append('--test-server-host') - in_args.append('0.0.0.0') + in_args.append('0.0.0.0') in_args.append('--test-server-port') in_args.append(str(port)) log.debug('Starting a sync test server on {client}'.format(client=client)) @@ -124,15 +124,15 @@ def task(ctx, config): to 0.0.0.0. Port defaults to 8000. This must be run on clients that have the correct zone root pools and rgw zone set in ceph.conf, or the task cannot read the region information from the - cluster. + cluster. By default, this task will start an HTTP server that will trigger full - or incremental syncs based on requests made to it. + or incremental syncs based on requests made to it. Alternatively, a single full sync can be triggered by specifying 'sync-scope: full' or a loop of incremental syncs can be triggered by specifying 'sync-scope: incremental' (the loop will sleep '--incremental-sync-delay' seconds between each sync, default is 20 seconds). - + An example:: tasks: diff --git a/teuthology/task/rbd.py b/teuthology/task/rbd.py index 2a21e294a..649cb42b7 100644 --- a/teuthology/task/rbd.py +++ b/teuthology/task/rbd.py @@ -54,7 +54,7 @@ def create_image(ctx, config): log.info('Creating image {name} with size {size}'.format(name=name, size=size)) args = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage'.format(tdir=testdir), '{tdir}/archive/coverage'.format(tdir=testdir), 'rbd', @@ -79,7 +79,7 @@ def create_image(ctx, config): (remote,) = ctx.cluster.only(role).remotes.keys() remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rbd', @@ -166,7 +166,7 @@ def dev_create(ctx, config): remote.run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rbd', @@ -193,7 +193,7 @@ def dev_create(ctx, config): args=[ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rbd', @@ -418,7 +418,7 @@ def run_xfstests_one_client(ctx, role, properties): # readlink -f in order to get their canonical # pathname (so it matches what the kernel remembers). args = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), '/usr/bin/sudo', diff --git a/teuthology/task/rbd_fsx.py b/teuthology/task/rbd_fsx.py index d841c2ecd..97d285087 100644 --- a/teuthology/task/rbd_fsx.py +++ b/teuthology/task/rbd_fsx.py @@ -42,7 +42,7 @@ def _run_one_client(ctx, config, role): (remote,) = ctx.cluster.only(role).remotes.iterkeys() remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'ceph_test_librbd_fsx', diff --git a/teuthology/task/recovery_bench.py b/teuthology/task/recovery_bench.py index 9026716bc..569bba84c 100644 --- a/teuthology/task/recovery_bench.py +++ b/teuthology/task/recovery_bench.py @@ -104,7 +104,7 @@ def do_bench(self): # create the objects osd_remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'smalliobench'.format(tdir=testdir), @@ -120,7 +120,7 @@ def do_bench(self): log.info('non-recovery (baseline)') p = osd_remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'smalliobench', @@ -142,7 +142,7 @@ def do_bench(self): log.info('recovery active') p = osd_remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'smalliobench', diff --git a/teuthology/task/rest_api.py b/teuthology/task/rest_api.py index 252d050bb..817add29f 100644 --- a/teuthology/task/rest_api.py +++ b/teuthology/task/rest_api.py @@ -21,7 +21,7 @@ def run_rest_api_daemon(ctx, api_clients): id_ = whole_id_[len('clients'):] run_cmd = [ 'sudo', - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', 'kill', 'ceph-rest-api', '-n', @@ -98,7 +98,7 @@ def task(ctx, config): rems.run( args=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', coverage_dir, 'ceph-authtool', diff --git a/teuthology/task/restart.py b/teuthology/task/restart.py index c06b76d28..7881cca43 100644 --- a/teuthology/task/restart.py +++ b/teuthology/task/restart.py @@ -112,7 +112,7 @@ def task(ctx, config): env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) args.extend([ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), '{srcdir}/{c}'.format( diff --git a/teuthology/task/rgw.py b/teuthology/task/rgw.py index 42f4689ce..c2ec706e7 100644 --- a/teuthology/task/rgw.py +++ b/teuthology/task/rgw.py @@ -145,10 +145,10 @@ def start_rgw(ctx, config): log.info('client {client} is id {id}'.format(client=client, id=id_)) run_cmd=[ 'sudo', - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', 'term', ] run_cmd_tail=[ @@ -223,8 +223,8 @@ def start_apache(ctx, config): apache_name = '/usr/sbin/httpd.worker' proc = remote.run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), - '{tdir}/daemon-helper'.format(tdir=testdir), + 'adjust-ulimits', + 'daemon-helper', 'kill', apache_name, '-X', @@ -331,7 +331,7 @@ def fill_in_endpoints(region_info, role_zones, role_endpoints): region, zone, zone_info, _ = role_zones[role] host, port = role_endpoints[role] endpoint = 'http://{host}:{port}/'.format(host=host, port=port) - # check if the region specified under client actually exists + # check if the region specified under client actually exists # in region_info (it should, if properly configured). # If not, throw a reasonable error if region not in region_info: @@ -537,7 +537,7 @@ def task(ctx, config): rgw region: bar rgw zone: bar-secondary rgw region root pool: .rgw.rroot.bar - rgw zone root pool: .rgw.zroot.bar-secondary + rgw zone root pool: .rgw.zroot.bar-secondary - rgw: regions: foo: diff --git a/teuthology/task/s3readwrite.py b/teuthology/task/s3readwrite.py index b2ad6f9a0..d2442fa5f 100644 --- a/teuthology/task/s3readwrite.py +++ b/teuthology/task/s3readwrite.py @@ -72,7 +72,7 @@ def create_users(ctx, config): users = {'s3': 'foo'} cached_client_user_names = dict() for client in config['clients']: - cached_client_user_names[client] = dict() + cached_client_user_names[client] = dict() s3tests_conf = config['s3tests_conf'][client] s3tests_conf.setdefault('readwrite', {}) s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-') @@ -86,13 +86,13 @@ def create_users(ctx, config): rwconf['files'].setdefault('stddev', 500) for section, user in users.iteritems(): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) - log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], + log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], client=client)) - # stash the 'delete_user' flag along with user name for easier cleanup + # stash the 'delete_user' flag along with user name for easier cleanup delete_this_user = True if 'delete_user' in s3tests_conf['s3']: - delete_this_user = s3tests_conf['s3']['delete_user'] + delete_this_user = s3tests_conf['s3']['delete_user'] log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user,client=client)) cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user) @@ -103,7 +103,7 @@ def create_users(ctx, config): else: ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', @@ -126,7 +126,7 @@ def create_users(ctx, config): if delete_this_user: ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', diff --git a/teuthology/task/s3roundtrip.py b/teuthology/task/s3roundtrip.py index 100d5ef38..c99aeacd7 100644 --- a/teuthology/task/s3roundtrip.py +++ b/teuthology/task/s3roundtrip.py @@ -71,7 +71,7 @@ def create_users(ctx, config): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', @@ -92,7 +92,7 @@ def create_users(ctx, config): uid = '{user}.{client}'.format(user=user, client=client) ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', diff --git a/teuthology/task/s3tests.py b/teuthology/task/s3tests.py index 4d2c40912..e82529621 100644 --- a/teuthology/task/s3tests.py +++ b/teuthology/task/s3tests.py @@ -78,7 +78,7 @@ def create_users(ctx, config): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', @@ -99,7 +99,7 @@ def create_users(ctx, config): uid = '{user}.{client}'.format(user=user, client=client) ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', diff --git a/teuthology/task/samba.py b/teuthology/task/samba.py index d136dcd44..4a59a6040 100644 --- a/teuthology/task/samba.py +++ b/teuthology/task/samba.py @@ -148,7 +148,7 @@ def task(ctx, config): smbd_cmd = [ 'sudo', - '{tdir}/daemon-helper'.format(tdir=testdir), + 'daemon-helper', 'kill', 'nostdin', '/usr/local/samba/sbin/smbd', diff --git a/teuthology/task/swift.py b/teuthology/task/swift.py index 8a0f34342..90c5901d7 100644 --- a/teuthology/task/swift.py +++ b/teuthology/task/swift.py @@ -59,7 +59,7 @@ def create_users(ctx, config): _config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix) ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', @@ -80,7 +80,7 @@ def create_users(ctx, config): uid = '{user}.{client}'.format(user=user, client=client) ctx.cluster.only(client).run( args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', diff --git a/teuthology/task/watch_notify_stress.py b/teuthology/task/watch_notify_stress.py index 7a21ad5fb..3d9fce343 100644 --- a/teuthology/task/watch_notify_stress.py +++ b/teuthology/task/watch_notify_stress.py @@ -44,7 +44,7 @@ def task(ctx, config): args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')), - '{tdir}/daemon-helper'.format(tdir=testdir), 'kill', + 'daemon-helper', 'kill', 'multi_stress_watch foo foo' ] diff --git a/teuthology/task/workunit.py b/teuthology/task/workunit.py index 04ebb2aa3..048078e8a 100644 --- a/teuthology/task/workunit.py +++ b/teuthology/task/workunit.py @@ -288,7 +288,7 @@ def _run_tests(ctx, refspec, role, tests, env, subdir=None): env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) args.extend([ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), '{srcdir}/{workunit}'.format( diff --git a/teuthology/task_util/rados.py b/teuthology/task_util/rados.py index a9386c75b..1ce4cefd7 100644 --- a/teuthology/task_util/rados.py +++ b/teuthology/task_util/rados.py @@ -8,7 +8,7 @@ def rados(ctx, remote, cmd, wait=True, check_status=False): testdir = teuthology.get_testdir(ctx) log.info("rados %s" % ' '.join(cmd)) pre = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'rados', diff --git a/teuthology/task_util/rgw.py b/teuthology/task_util/rgw.py index 91458e5cb..82581362e 100644 --- a/teuthology/task_util/rgw.py +++ b/teuthology/task_util/rgw.py @@ -13,7 +13,7 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False): log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd)) testdir = teuthology.get_testdir(ctx) pre = [ - '{tdir}/adjust-ulimits'.format(tdir=testdir), + 'adjust-ulimits', 'ceph-coverage'.format(tdir=testdir), '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin'.format(tdir=testdir), From 83a9c414b426bd3d59b69c8e4d552a1535a95a49 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Mon, 28 Jul 2014 16:57:19 -0600 Subject: [PATCH 36/77] Use new get_valgrind_args args See 9e03c73 in teuthology Signed-off-by: Zack Cerza --- teuthology/task/ceph.py | 12 +++++++----- teuthology/task/ceph_fuse.py | 11 +++++------ teuthology/task/rgw.py | 11 +++++------ 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index 0b5b4fe66..738ecfb42 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -849,17 +849,19 @@ def run_daemon(ctx, config, type_): '-f', '-i', id_] + if type_ in config.get('cpu_profile', []): + profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_) + run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) + if config.get('valgrind') is not None: valgrind_args = None if type_ in config['valgrind']: valgrind_args = config['valgrind'][type_] if name in config['valgrind']: valgrind_args = config['valgrind'][name] - run_cmd.extend(teuthology.get_valgrind_args(testdir, name, valgrind_args)) - - if type_ in config.get('cpu_profile', []): - profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_) - run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) + run_cmd = teuthology.get_valgrind_args(testdir, name, + run_cmd, + valgrind_args) run_cmd.extend(run_cmd_tail) diff --git a/teuthology/task/ceph_fuse.py b/teuthology/task/ceph_fuse.py index 66eb42863..eeff1c0e2 100644 --- a/teuthology/task/ceph_fuse.py +++ b/teuthology/task/ceph_fuse.py @@ -97,12 +97,11 @@ def task(ctx, config): ] if client_config.get('valgrind') is not None: - run_cmd.extend( - teuthology.get_valgrind_args( - testdir, - 'client.{id}'.format(id=id_), - client_config.get('valgrind'), - ) + run_cmd = teuthology.get_valgrind_args( + testdir, + 'client.{id}'.format(id=id_), + run_cmd, + client_config.get('valgrind'), ) run_cmd.extend(run_cmd_tail) diff --git a/teuthology/task/rgw.py b/teuthology/task/rgw.py index c2ec706e7..be27c4872 100644 --- a/teuthology/task/rgw.py +++ b/teuthology/task/rgw.py @@ -176,12 +176,11 @@ def start_rgw(ctx, config): run.Raw('2>&1'), ] - run_cmd.extend( - teuthology.get_valgrind_args( - testdir, - client, - client_config.get('valgrind') - ) + run_cmd = teuthology.get_valgrind_args( + testdir, + client, + run_cmd, + client_config.get('valgrind') ) run_cmd.extend(run_cmd_tail) From 81c5e2813af9e2890858dc33b156cb53b2d2d497 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Tue, 29 Jul 2014 13:59:32 -0600 Subject: [PATCH 37/77] Add wait_until_fuse_mounted() This was removed in a refactor in master, but it would take lots of work to backport the entire refactor. Signed-off-by: Zack Cerza --- teuthology/task/ceph_fuse.py | 40 +++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/teuthology/task/ceph_fuse.py b/teuthology/task/ceph_fuse.py index eeff1c0e2..3f80fcffd 100644 --- a/teuthology/task/ceph_fuse.py +++ b/teuthology/task/ceph_fuse.py @@ -1,8 +1,10 @@ import contextlib import logging import os +import time +from cStringIO import StringIO -from teuthology import misc as teuthology +from teuthology import misc from ..orchestra import run log = logging.getLogger(__name__) @@ -45,18 +47,18 @@ def task(ctx, config): log.info('Mounting ceph-fuse clients...') fuse_daemons = {} - testdir = teuthology.get_testdir(ctx) + testdir = misc.get_testdir(ctx) if config is None: config = dict(('client.{id}'.format(id=id_), None) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) + for id_ in misc.all_roles_of_type(ctx.cluster, 'client')) elif isinstance(config, list): config = dict((name, None) for name in config) overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('ceph-fuse', {})) + misc.deep_merge(config, overrides.get('ceph-fuse', {})) - clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) + clients = list(misc.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: client_config = config.get("client.%s" % id_) @@ -97,7 +99,7 @@ def task(ctx, config): ] if client_config.get('valgrind') is not None: - run_cmd = teuthology.get_valgrind_args( + run_cmd = misc.get_valgrind_args( testdir, 'client.{id}'.format(id=id_), run_cmd, @@ -116,7 +118,7 @@ def task(ctx, config): for id_, remote in clients: mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - teuthology.wait_until_fuse_mounted( + wait_until_fuse_mounted( remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt, @@ -174,3 +176,27 @@ def task(ctx, config): mnt, ], ) + + +def wait_until_fuse_mounted(remote, fuse, mountpoint): + while True: + proc = remote.run( + args=[ + 'stat', + '--file-system', + '--printf=%T\n', + '--', + mountpoint, + ], + stdout=StringIO(), + ) + fstype = proc.stdout.getvalue().rstrip('\n') + if fstype == 'fuseblk': + break + log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype)) + + # it shouldn't have exited yet; exposes some trivial problems + assert not fuse.exitstatus.ready() + + time.sleep(5) + log.info('ceph-fuse is mounted on %s', mountpoint) From aa84f26bb1d2e631f811e2b78d384fdfbf71b3e5 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Wed, 6 Aug 2014 09:08:36 -0600 Subject: [PATCH 38/77] Move write_secret_file() into task_util/kclient.py Signed-off-by: Zack Cerza --- teuthology/misc.py | 15 --------------- teuthology/task/kclient.py | 3 ++- teuthology/task_util/kclient.py | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+), 16 deletions(-) create mode 100644 teuthology/task_util/kclient.py diff --git a/teuthology/misc.py b/teuthology/misc.py index 51ddaa88f..6030debce 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -706,21 +706,6 @@ def reconnect(ctx, timeout, remotes=None): log.debug('waited {elapsed}'.format(elapsed=str(time.time() - starttime))) time.sleep(1) -def write_secret_file(ctx, remote, role, keyring, filename): - testdir = get_testdir(ctx) - remote.run( - args=[ - '{tdir}/adjust-ulimits'.format(tdir=testdir), - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph-authtool', - '--name={role}'.format(role=role), - '--print-key', - keyring, - run.Raw('>'), - filename, - ], - ) def get_clients(ctx, roles): for role in roles: diff --git a/teuthology/task/kclient.py b/teuthology/task/kclient.py index 9fcec524d..13df5ab64 100644 --- a/teuthology/task/kclient.py +++ b/teuthology/task/kclient.py @@ -3,6 +3,7 @@ import os from teuthology import misc as teuthology +from teuthology.task_util.kclient import write_secret_file log = logging.getLogger(__name__) @@ -55,7 +56,7 @@ def task(ctx, config): keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_) - teuthology.write_secret_file(ctx, remote, 'client.{id}'.format(id=id_), + write_secret_file(ctx, remote, 'client.{id}'.format(id=id_), keyring, secret) remote.run( diff --git a/teuthology/task_util/kclient.py b/teuthology/task_util/kclient.py new file mode 100644 index 000000000..c6a259fc7 --- /dev/null +++ b/teuthology/task_util/kclient.py @@ -0,0 +1,22 @@ +from teuthology.misc import get_testdir +from teuthology.orchestra import run + + +def write_secret_file(ctx, remote, role, keyring, filename): + """ + Stash the kerying in the filename specified. + """ + testdir = get_testdir(ctx) + remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph-authtool', + '--name={role}'.format(role=role), + '--print-key', + keyring, + run.Raw('>'), + filename, + ], + ) From 624a8918b9c7969510103b7e100de9463263f68f Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Thu, 7 Aug 2014 08:24:46 -0600 Subject: [PATCH 39/77] Import teuthology tasks (dumpling branch) Signed-off-by: Zack Cerza --- .gitignore | 12 - README.rst | 490 ---------- bootstrap | 32 - build_qemu_image.sh | 61 -- check-syntax.sh | 18 - cleanup-and-unlock.sh | 4 - cleanup-run.sh | 7 - cleanup-user.sh | 6 - coverage/cov-analyze.sh | 51 - coverage/cov-init.sh | 35 - examples/3node_ceph.yaml | 15 - examples/3node_rgw.yaml | 24 - examples/parallel_example.yaml | 20 - hammer.sh | 31 - requirements.txt | 18 - roles/3-simple.yaml | 4 - roles/overrides.yaml | 10 - schedule_suite.sh | 132 --- setup.py | 32 - {teuthology => tasks}/__init__.py | 0 {teuthology/task => tasks}/admin_socket.py | 0 .../task => tasks}/apache.conf.template | 0 {teuthology/task => tasks}/autotest.py | 0 {teuthology/task => tasks}/blktrace.py | 0 {teuthology/task => tasks}/ceph.py | 0 {teuthology/task => tasks}/ceph_client.py | 0 {teuthology/task => tasks}/ceph_deploy.py | 0 {teuthology/task => tasks}/ceph_fuse.py | 0 {teuthology/task => tasks}/ceph_manager.py | 0 {teuthology/task => tasks}/chef.py | 0 {teuthology/task => tasks}/cifs_mount.py | 0 {teuthology/task => tasks}/cram.py | 0 {teuthology/task => tasks}/die_on_err.py | 0 .../task => tasks}/divergent_priors.py | 0 {teuthology/task => tasks}/dump_stuck.py | 0 .../task => tasks}/filestore_idempotent.py | 0 {teuthology/task => tasks}/kclient.py | 0 {teuthology/task => tasks}/locktest.py | 0 {teuthology/task => tasks}/lost_unfound.py | 0 {teuthology/task => tasks}/manypools.py | 0 {teuthology/task => tasks}/mds_thrash.py | 0 {teuthology/task => tasks}/metadata.yaml | 0 .../task => tasks}/mon_clock_skew_check.py | 0 {teuthology/task => tasks}/mon_recovery.py | 0 {teuthology/task => tasks}/mon_thrash.py | 0 {teuthology/task => tasks}/multibench.py | 0 .../task => tasks}/object_source_down.py | 0 {teuthology/task => tasks}/omapbench.py | 0 {teuthology/task => tasks}/osd_backfill.py | 0 .../task => tasks}/osd_failsafe_enospc.py | 0 {teuthology/task => tasks}/osd_recovery.py | 0 {teuthology/task => tasks}/peer.py | 0 .../task => tasks}/peering_speed_test.py | 0 {teuthology/task => tasks}/qemu.py | 0 {teuthology/task => tasks}/rados.py | 0 {teuthology/task => tasks}/radosbench.py | 0 {teuthology/task => tasks}/radosgw_admin.py | 0 .../task => tasks}/radosgw_admin_rest.py | 0 {teuthology/task => tasks}/radosgw_agent.py | 0 {teuthology/task => tasks}/rbd.py | 0 {teuthology/task => tasks}/rbd_fsx.py | 0 {teuthology/task => tasks}/recovery_bench.py | 0 {teuthology/task => tasks}/repair_test.py | 0 {teuthology/task => tasks}/rest_api.py | 0 {teuthology/task => tasks}/restart.py | 0 {teuthology/task => tasks}/rgw.py | 0 {teuthology/task => tasks}/rgw_logsocket.py | 0 {teuthology/task => tasks}/s3readwrite.py | 0 {teuthology/task => tasks}/s3roundtrip.py | 0 {teuthology/task => tasks}/s3tests.py | 0 {teuthology/task => tasks}/samba.py | 0 {teuthology/task => tasks}/scrub.py | 0 {teuthology/task => tasks}/scrub_test.py | 0 {teuthology/task => tasks}/thrashosds.py | 0 .../task => tasks}/userdata_setup.yaml | 0 .../task => tasks}/userdata_teardown.yaml | 0 .../orchestra => tasks/util}/__init__.py | 0 .../task_util => tasks/util}/kclient.py | 0 {teuthology/task_util => tasks/util}/rados.py | 0 {teuthology/task_util => tasks/util}/rgw.py | 0 .../task => tasks}/watch_notify_stress.py | 0 {teuthology/task => tasks}/workunit.py | 0 teuthology/ceph.conf.template | 48 - teuthology/contextutil.py | 43 - teuthology/coverage.py | 244 ----- teuthology/lock.py | 643 ------------- teuthology/locker/api.py | 196 ---- teuthology/locker/config.py | 25 - teuthology/locker/locker.py | 19 - teuthology/lockstatus.py | 26 - teuthology/misc.py | 853 ----------------- teuthology/nuke.py | 476 --------- teuthology/orchestra/cluster.py | 105 -- teuthology/orchestra/connection.py | 49 - teuthology/orchestra/monkey.py | 47 - teuthology/orchestra/remote.py | 259 ----- teuthology/orchestra/run.py | 282 ------ teuthology/orchestra/test/__init__.py | 0 teuthology/orchestra/test/test_cluster.py | 207 ---- teuthology/orchestra/test/test_connection.py | 77 -- teuthology/orchestra/test/test_integration.py | 73 -- teuthology/orchestra/test/test_remote.py | 60 -- teuthology/orchestra/test/test_run.py | 392 -------- teuthology/orchestra/test/util.py | 12 - teuthology/parallel.py | 115 --- teuthology/queue.py | 150 --- teuthology/run.py | 338 ------- teuthology/run_tasks.py | 75 -- teuthology/safepath.py | 42 - teuthology/suite.py | 405 -------- teuthology/task/__init__.py | 0 teuthology/task/adjust-ulimits | 5 - teuthology/task/args.py | 37 - teuthology/task/chdir-coredump | 8 - teuthology/task/clock.py | 93 -- teuthology/task/daemon-helper | 84 -- teuthology/task/edit_sudoers.sh | 10 - teuthology/task/exec.py | 44 - teuthology/task/hadoop.py | 526 ---------- teuthology/task/install.py | 902 ------------------ teuthology/task/interactive.py | 33 - teuthology/task/internal.py | 500 ---------- teuthology/task/kcon_most | 39 - teuthology/task/kcon_most.py | 64 -- teuthology/task/kernel.py | 541 ----------- teuthology/task/knfsd.py | 137 --- teuthology/task/localdir.py | 64 -- teuthology/task/lockfile.py | 230 ----- teuthology/task/mpi.py | 106 -- teuthology/task/nfs.py | 127 --- teuthology/task/nop.py | 10 - teuthology/task/parallel.py | 54 -- teuthology/task/parallel_example.py | 55 -- teuthology/task/pexec.py | 143 --- teuthology/task/proc_thrasher.py | 63 -- teuthology/task/sequential.py | 52 - teuthology/task/sleep.py | 29 - teuthology/task/ssh_keys.py | 163 ---- teuthology/task/swift.py | 235 ----- teuthology/task/tasktest.py | 48 - teuthology/task/timer.py | 43 - teuthology/task/valgrind.supp | 175 ---- teuthology/task_util/__init__.py | 0 teuthology/test/__init__.py | 0 teuthology/test/test_get_distro.py | 29 - teuthology/test/test_misc.py | 29 - teuthology/test/test_safepath.py | 56 -- watch-suite.sh | 4 - 148 files changed, 11001 deletions(-) delete mode 100644 .gitignore delete mode 100644 README.rst delete mode 100755 bootstrap delete mode 100755 build_qemu_image.sh delete mode 100755 check-syntax.sh delete mode 100755 cleanup-and-unlock.sh delete mode 100755 cleanup-run.sh delete mode 100755 cleanup-user.sh delete mode 100755 coverage/cov-analyze.sh delete mode 100755 coverage/cov-init.sh delete mode 100644 examples/3node_ceph.yaml delete mode 100644 examples/3node_rgw.yaml delete mode 100644 examples/parallel_example.yaml delete mode 100755 hammer.sh delete mode 100644 requirements.txt delete mode 100644 roles/3-simple.yaml delete mode 100644 roles/overrides.yaml delete mode 100755 schedule_suite.sh delete mode 100644 setup.py rename {teuthology => tasks}/__init__.py (100%) rename {teuthology/task => tasks}/admin_socket.py (100%) rename {teuthology/task => tasks}/apache.conf.template (100%) rename {teuthology/task => tasks}/autotest.py (100%) rename {teuthology/task => tasks}/blktrace.py (100%) rename {teuthology/task => tasks}/ceph.py (100%) rename {teuthology/task => tasks}/ceph_client.py (100%) rename {teuthology/task => tasks}/ceph_deploy.py (100%) rename {teuthology/task => tasks}/ceph_fuse.py (100%) rename {teuthology/task => tasks}/ceph_manager.py (100%) rename {teuthology/task => tasks}/chef.py (100%) rename {teuthology/task => tasks}/cifs_mount.py (100%) rename {teuthology/task => tasks}/cram.py (100%) rename {teuthology/task => tasks}/die_on_err.py (100%) rename {teuthology/task => tasks}/divergent_priors.py (100%) rename {teuthology/task => tasks}/dump_stuck.py (100%) rename {teuthology/task => tasks}/filestore_idempotent.py (100%) rename {teuthology/task => tasks}/kclient.py (100%) rename {teuthology/task => tasks}/locktest.py (100%) rename {teuthology/task => tasks}/lost_unfound.py (100%) rename {teuthology/task => tasks}/manypools.py (100%) rename {teuthology/task => tasks}/mds_thrash.py (100%) rename {teuthology/task => tasks}/metadata.yaml (100%) rename {teuthology/task => tasks}/mon_clock_skew_check.py (100%) rename {teuthology/task => tasks}/mon_recovery.py (100%) rename {teuthology/task => tasks}/mon_thrash.py (100%) rename {teuthology/task => tasks}/multibench.py (100%) rename {teuthology/task => tasks}/object_source_down.py (100%) rename {teuthology/task => tasks}/omapbench.py (100%) rename {teuthology/task => tasks}/osd_backfill.py (100%) rename {teuthology/task => tasks}/osd_failsafe_enospc.py (100%) rename {teuthology/task => tasks}/osd_recovery.py (100%) rename {teuthology/task => tasks}/peer.py (100%) rename {teuthology/task => tasks}/peering_speed_test.py (100%) rename {teuthology/task => tasks}/qemu.py (100%) rename {teuthology/task => tasks}/rados.py (100%) rename {teuthology/task => tasks}/radosbench.py (100%) rename {teuthology/task => tasks}/radosgw_admin.py (100%) rename {teuthology/task => tasks}/radosgw_admin_rest.py (100%) rename {teuthology/task => tasks}/radosgw_agent.py (100%) rename {teuthology/task => tasks}/rbd.py (100%) rename {teuthology/task => tasks}/rbd_fsx.py (100%) rename {teuthology/task => tasks}/recovery_bench.py (100%) rename {teuthology/task => tasks}/repair_test.py (100%) rename {teuthology/task => tasks}/rest_api.py (100%) rename {teuthology/task => tasks}/restart.py (100%) rename {teuthology/task => tasks}/rgw.py (100%) rename {teuthology/task => tasks}/rgw_logsocket.py (100%) rename {teuthology/task => tasks}/s3readwrite.py (100%) rename {teuthology/task => tasks}/s3roundtrip.py (100%) rename {teuthology/task => tasks}/s3tests.py (100%) rename {teuthology/task => tasks}/samba.py (100%) rename {teuthology/task => tasks}/scrub.py (100%) rename {teuthology/task => tasks}/scrub_test.py (100%) rename {teuthology/task => tasks}/thrashosds.py (100%) rename {teuthology/task => tasks}/userdata_setup.yaml (100%) rename {teuthology/task => tasks}/userdata_teardown.yaml (100%) rename {teuthology/orchestra => tasks/util}/__init__.py (100%) rename {teuthology/task_util => tasks/util}/kclient.py (100%) rename {teuthology/task_util => tasks/util}/rados.py (100%) rename {teuthology/task_util => tasks/util}/rgw.py (100%) rename {teuthology/task => tasks}/watch_notify_stress.py (100%) rename {teuthology/task => tasks}/workunit.py (100%) delete mode 100644 teuthology/ceph.conf.template delete mode 100644 teuthology/contextutil.py delete mode 100644 teuthology/coverage.py delete mode 100644 teuthology/lock.py delete mode 100644 teuthology/locker/api.py delete mode 100644 teuthology/locker/config.py delete mode 100755 teuthology/locker/locker.py delete mode 100644 teuthology/lockstatus.py delete mode 100644 teuthology/misc.py delete mode 100644 teuthology/nuke.py delete mode 100644 teuthology/orchestra/cluster.py delete mode 100644 teuthology/orchestra/connection.py delete mode 100644 teuthology/orchestra/monkey.py delete mode 100644 teuthology/orchestra/remote.py delete mode 100644 teuthology/orchestra/run.py delete mode 100644 teuthology/orchestra/test/__init__.py delete mode 100644 teuthology/orchestra/test/test_cluster.py delete mode 100644 teuthology/orchestra/test/test_connection.py delete mode 100644 teuthology/orchestra/test/test_integration.py delete mode 100644 teuthology/orchestra/test/test_remote.py delete mode 100644 teuthology/orchestra/test/test_run.py delete mode 100644 teuthology/orchestra/test/util.py delete mode 100644 teuthology/parallel.py delete mode 100644 teuthology/queue.py delete mode 100644 teuthology/run.py delete mode 100644 teuthology/run_tasks.py delete mode 100644 teuthology/safepath.py delete mode 100644 teuthology/suite.py delete mode 100644 teuthology/task/__init__.py delete mode 100755 teuthology/task/adjust-ulimits delete mode 100644 teuthology/task/args.py delete mode 100644 teuthology/task/chdir-coredump delete mode 100644 teuthology/task/clock.py delete mode 100755 teuthology/task/daemon-helper delete mode 100755 teuthology/task/edit_sudoers.sh delete mode 100644 teuthology/task/exec.py delete mode 100644 teuthology/task/hadoop.py delete mode 100644 teuthology/task/install.py delete mode 100644 teuthology/task/interactive.py delete mode 100644 teuthology/task/internal.py delete mode 100755 teuthology/task/kcon_most delete mode 100644 teuthology/task/kcon_most.py delete mode 100644 teuthology/task/kernel.py delete mode 100644 teuthology/task/knfsd.py delete mode 100644 teuthology/task/localdir.py delete mode 100644 teuthology/task/lockfile.py delete mode 100644 teuthology/task/mpi.py delete mode 100644 teuthology/task/nfs.py delete mode 100644 teuthology/task/nop.py delete mode 100644 teuthology/task/parallel.py delete mode 100644 teuthology/task/parallel_example.py delete mode 100644 teuthology/task/pexec.py delete mode 100644 teuthology/task/proc_thrasher.py delete mode 100644 teuthology/task/sequential.py delete mode 100644 teuthology/task/sleep.py delete mode 100644 teuthology/task/ssh_keys.py delete mode 100644 teuthology/task/swift.py delete mode 100644 teuthology/task/tasktest.py delete mode 100644 teuthology/task/timer.py delete mode 100644 teuthology/task/valgrind.supp delete mode 100644 teuthology/task_util/__init__.py delete mode 100644 teuthology/test/__init__.py delete mode 100644 teuthology/test/test_get_distro.py delete mode 100644 teuthology/test/test_misc.py delete mode 100644 teuthology/test/test_safepath.py delete mode 100755 watch-suite.sh diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 05c434708..000000000 --- a/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -*~ -.#* -## the next line needs to start with a backslash to avoid looking like -## a comment -\#*# -.*.swp - -*.pyc -*.pyo - -/*.egg-info -/virtualenv diff --git a/README.rst b/README.rst deleted file mode 100644 index f2d6e3cbc..000000000 --- a/README.rst +++ /dev/null @@ -1,490 +0,0 @@ -================================================== - `Teuthology` -- The Ceph integration test runner -================================================== - -The Ceph project needs automated tests. Because Ceph is a highly -distributed system, and has active kernel development, its testing -requirements are quite different from e.g. typical LAMP web -applications. Nothing out there seemed to handle our requirements, -so we wrote our own framework, called `Teuthology`. - - -Overview -======== - -Teuthology runs a given set of Python functions (`tasks`), with an SSH -connection to every host participating in the test. The SSH connection -uses `Paramiko `__, a native Python -client for the SSH2 protocol, and this allows us to e.g. run multiple -commands inside a single SSH connection, to speed up test -execution. Tests can use `gevent `__ to -perform actions concurrently or in the background. - - -Build -===== -``teuthology`` is not meant to be distributed as a library, therefore we depend -on the pinned dependencies listed in ``requirements.txt``, the ``setup.py`` -will not list any and will only be there to install the package entry points -(a.k.a teuthology's scripts). - - -``bootstrap`` for Ubuntu Systems --------------------------------- -A ``boostrap`` script is provided for automated builds/execution of teuthology -itself. You can run it directly **only if you are using Ubuntu**. - -Teuthology uses several Python packages that are not in the standard -library. To make the dependencies easier to get right, we use a -`virtualenv` to manage them. To get started, ensure you have the -``virtualenv`` and ``pip`` programs installed; e.g. on Debian/Ubuntu:: - - sudo apt-get install python-dev python-virtualenv python-pip libevent-dev - -and then run:: - - ./bootstrap - - -osx ---- - -.. note:: These instructions assume you are using `homebrew `_ - -As always, create a ``virtualenv`` specific to ``teuthology`` and make sure it -is activated before proceeding (location doesn't matter, we use an example -location):: - - mkdir ~/.virtualenvs - virtualenv ~/.virtualenvs/teuthology - source ~/.virtualenvs/teuthology/bin/activate - -Install the system dependencies:: - - brew install libvirt mysql libevent - -``libvirt`` does not link the python bindings so you need to do this step -manually:: - - $ cd /Library/Python/{pyversion}/site-packages - $ sudo ln -s /usr/local/Cellar/libvirt/{version}/lib/python{pyversion}/site-packages/* . - -Make sure you are able to import ``libvirt`` without error:: - - python -c "import libvirt" - -Finally, install the teuthology package and ``requirements.txt``:: - - $ python setup.py develop - $ pip install -r requirements.txt - - -Generic install ---------------- -These instructions should help get ``teuthology`` installed properly in -a system that is not OSX or Debian-based. - -Install all the system dependencies needed: - -* mysql client -* libevent -* libvirt (with the Python bindings) - -Install Python packaging tools: - -* pip -* virtualenv - -In some cases, depending on the OS, you will need a python development package -with some build helpers that are required to build packages. In Ubuntu, this is -the ``python-dev`` package. - -With a dedicated ``virtualenv`` activated, install the teuthology package and -``requirements.txt``:: - - $ python setup.py develop - $ pip install -r requirements.txt - - -Test configuration -================== - -An integration test run takes three items of configuration: - -- ``targets``: what hosts to run on; this is a dictionary mapping - hosts to ssh host keys, like: - "username@hostname.example.com: ssh-rsa long_hostkey_here" -- ``roles``: how to use the hosts; this is a list of lists, where each - entry lists all the roles to be run on a single host; for example, a - single entry might say ``[mon.1, osd.1]`` -- ``tasks``: how to set up the cluster and what tests to run on it; - see below for examples - -The format for this configuration is `YAML `__, a -structured data format that is still human-readable and editable. - -For example, a full config for a test run that sets up a three-machine -cluster, mounts Ceph via ``ceph-fuse``, and leaves you at an interactive -Python prompt for manual exploration (and enabling you to SSH in to -the nodes & use the live cluster ad hoc), might look like this:: - - roles: - - [mon.0, mds.0, osd.0] - - [mon.1, osd.1] - - [mon.2, client.0] - targets: - ubuntu@host07.example.com: ssh-rsa host07_ssh_key - ubuntu@host08.example.com: ssh-rsa host08_ssh_key - ubuntu@host09.example.com: ssh-rsa host09_ssh_key - tasks: - - install: - - ceph: - - ceph-fuse: [client.0] - - interactive: - -The number of entries under ``roles`` and ``targets`` must match. - -Note the colon after every task name in the ``tasks`` section. - -The ``install`` task needs to precede all other tasks. - -The listed targets need resolvable hostnames. If you do not have a DNS server -running, you can add entries to ``/etc/hosts``. You also need to be able to SSH -in to the listed targets without passphrases, and the remote user needs to have -passphraseless `sudo` access. Note that the ssh keys at the end of the -``targets`` entries are the public ssh keys for the hosts. On Ubuntu, these -are located at /etc/ssh/ssh_host_rsa_key.pub - -If you'd save the above file as ``example.yaml``, you could run -teuthology on it by saying:: - - ./virtualenv/bin/teuthology example.yaml - -You can also pass the ``-v`` option, for more verbose execution. See -``teuthology --help`` for more. - - -Multiple config files ---------------------- - -You can pass multiple files as arguments to ``teuthology``. Each one -will be read as a config file, and their contents will be merged. This -allows you to e.g. share definitions of what a "simple 3 node cluster" -is. The source tree comes with ``roles/3-simple.yaml``, so we could -skip the ``roles`` section in the above ``example.yaml`` and then -run:: - - ./virtualenv/bin/teuthology roles/3-simple.yaml example.yaml - - -Reserving target machines -------------------------- - -Before locking machines will work, you must create a .teuthology.yaml -file in your home directory that sets a lock_server, i.e.:: - - lock_server: http://host.example.com:8080/lock - -Teuthology automatically locks nodes for you if you specify the -``--lock`` option. Without this option, you must specify machines to -run on in a ``targets.yaml`` file, and lock them using -teuthology-lock. - -Note that the default owner of a machine is ``USER@HOST``. -You can override this with the ``--owner`` option when running -teuthology or teuthology-lock. - -With teuthology-lock, you can also add a description, so you can -remember which tests you were running on them. This can be done when -locking or unlocking machines, or as a separate action with the -``--update`` option. To lock 3 machines and set a description, run:: - - ./virtualenv/bin/teuthology-lock --lock-many 3 --desc 'test foo' - -If machines become unusable for some reason, you can mark them down:: - - ./virtualenv/bin/teuthology-lock --update --status down machine1 machine2 - -To see the status of all machines, use the ``--list`` option. This can -be restricted to particular machines as well:: - - ./virtualenv/bin/teuthology-lock --list machine1 machine2 - - -Tasks -===== - -A task is a Python module in the ``teuthology.task`` package, with a -callable named ``task``. It gets the following arguments: - -- ``ctx``: a context that is available through the lifetime of the - test run, and has useful attributes such as ``cluster``, letting the - task access the remote hosts. Tasks can also store their internal - state here. (TODO beware namespace collisions.) -- ``config``: the data structure after the colon in the config file, - e.g. for the above ``ceph-fuse`` example, it would be a list like - ``["client.0"]``. - -Tasks can be simple functions, called once in the order they are -listed in ``tasks``. But sometimes, it makes sense for a task to be -able to clean up after itself; for example, unmounting the filesystem -after a test run. A task callable that returns a Python `context -manager -`__ -will have the manager added to a stack, and the stack will be unwound -at the end of the run. This means the cleanup actions are run in -reverse order, both on success and failure. A nice way of writing -context managers is the ``contextlib.contextmanager`` decorator; look -for that string in the existing tasks to see examples, and note where -they use ``yield``. - -Further details on some of the more complex tasks such as install or workunit -can be obtained via python help. For example:: - - >>> import teuthology.task.workunit - >>> help(teuthology.task.workunit) - -displays a page of more documentation and more concrete examples. - -Some of the more important / commonly used tasks include: - -* ``chef``: Run the chef task. -* ``install``: by default, the install task goes to gitbuilder and installs the results of the latest build. You can, however, add additional parameters to the test configuration to cause it to install any branch, SHA, archive or URL. The following are valid parameters. - -- ``branch``: specify a branch (bobtail, cuttlefish...) -- ``flavor``: specify a flavor (next, unstable...). Flavors can be thought of as - subsets of branches. Sometimes (unstable, for example) they may have - a predefined meaning. -- ``project``: specify a project (ceph, samba...) -- ``sha1``: install the build with this sha1 value. -- ``tag``: specify a tag/identifying text for this build (v47.2, v48.1...) -* ``ceph``: Bring up Ceph - -* ``overrides``: override behavior. Typically, this includes sub-tasks being overridden. Sub-tasks can nest further information. For example, overrides of install tasks are project specific, so the following section of a yaml file would cause all ceph installation to default into using the cuttlefish branch:: - - overrides: - install: - ceph: - branch: cuttlefish - -* ``workunit``: workunits are a way of grouping tasks and behavior on targets. -* ``sequential``: group the sub-tasks into a unit where the sub-tasks run sequentially as listed. -* ``parallel``: group the sub-tasks into a unit where the sub-task all run in parallel. - -Sequential and parallel tasks can be nested. Tasks run sequentially if not specified. - -The above list is a very incomplete description of the tasks available on -teuthology. The teuthology/task subdirectory contains all the python files -that implement tasks. -Many of these tasks are used to run shell scripts that are defined in the -ceph/ceph-qa-suite. - -Troubleshooting -=============== - -Sometimes when a bug triggers, instead of automatic cleanup, you want -to explore the system as is. Adding a top-level:: - - interactive-on-error: true - -as a config file for ``teuthology`` will make that possible. With that -option, any *task* that fails, will have the ``interactive`` task -called after it. This means that before any cleanup happens, you get a -chance to inspect the system -- both through Teuthology and via extra -SSH connections -- and the cleanup completes only when you choose so. -Just exit the interactive Python session to continue the cleanup. - -Note that this only catches exceptions *between* the tasks. If a task -calls multiple subtasks, e.g. with ``contextutil.nested``, those -cleanups *will* be performed. Later on, we can let tasks communicate -the subtasks they wish to invoke to the top-level runner, avoiding -this issue. - -Test Sandbox Directory -====================== - -Teuthology currently places most test files and mount points in a -sandbox directory, defaulting to ``/home/$USER/cephtest``. To change -the location of the sandbox directory, the following option can be -specified in ``$HOME/.teuthology.yaml``:: - - test_path: - -======= ->>>>>>> 5acc57f... remove basedir/testdir distinction - -VIRTUAL MACHINE SUPPORT -======================= - -Teuthology also supports virtual machines, which can function like -physical machines but differ in the following ways: - -VPSHOST: --------- - -A new entry, vpshost, has been added to the teuthology database of -available machines. For physical machines, this value is null. For -virtual machines, this entry is the name of the physical machine that -that virtual machine resides on. - -There are fixed "slots" for virtual machines that appear in the teuthology -database. These slots have a machine type of vps and can be locked like -any other machine. The existence of a vpshost field is how teuthology -knows whether or not a database entry represents a physical or a virtual -machine. - -The following needs to be set in ~/.libvirt/libvirt.conf in order to get the -right virtual machine associations for the Inktank lab:: - - uri_aliases = [ - 'mira001=qemu+ssh://ubuntu@mira001.front.sepia.ceph.com/system?no_tty', - 'mira003=qemu+ssh://ubuntu@mira003.front.sepia.ceph.com/system?no_tty', - 'mira004=qemu+ssh://ubuntu@mira004.front.sepia.ceph.com/system?no_tty', - 'mira006=qemu+ssh://ubuntu@mira006.front.sepia.ceph.com/system?no_tty', - 'mira007=qemu+ssh://ubuntu@mira007.front.sepia.ceph.com/system?no_tty', - 'mira008=qemu+ssh://ubuntu@mira008.front.sepia.ceph.com/system?no_tty', - 'mira009=qemu+ssh://ubuntu@mira009.front.sepia.ceph.com/system?no_tty', - 'mira010=qemu+ssh://ubuntu@mira010.front.sepia.ceph.com/system?no_tty', - 'mira011=qemu+ssh://ubuntu@mira011.front.sepia.ceph.com/system?no_tty', - 'mira013=qemu+ssh://ubuntu@mira013.front.sepia.ceph.com/system?no_tty', - 'mira014=qemu+ssh://ubuntu@mira014.front.sepia.ceph.com/system?no_tty', - 'mira015=qemu+ssh://ubuntu@mira015.front.sepia.ceph.com/system?no_tty', - 'mira017=qemu+ssh://ubuntu@mira017.front.sepia.ceph.com/system?no_tty', - 'mira018=qemu+ssh://ubuntu@mira018.front.sepia.ceph.com/system?no_tty', - 'mira020=qemu+ssh://ubuntu@mira020.front.sepia.ceph.com/system?no_tty', - 'vercoi01=qemu+ssh://ubuntu@vercoi01.front.sepia.ceph.com/system?no_tty', - 'vercoi02=qemu+ssh://ubuntu@vercoi02.front.sepia.ceph.com/system?no_tty', - 'vercoi03=qemu+ssh://ubuntu@vercoi03.front.sepia.ceph.com/system?no_tty', - 'vercoi04=qemu+ssh://ubuntu@vercoi04.front.sepia.ceph.com/system?no_tty', - 'vercoi05=qemu+ssh://ubuntu@vercoi05.front.sepia.ceph.com/system?no_tty', - 'vercoi06=qemu+ssh://ubuntu@vercoi06.front.sepia.ceph.com/system?no_tty', - 'vercoi07=qemu+ssh://ubuntu@vercoi07.front.sepia.ceph.com/system?no_tty', - 'vercoi08=qemu+ssh://ubuntu@vercoi08.front.sepia.ceph.com/system?no_tty', - 'senta01=qemu+ssh://ubuntu@senta01.front.sepia.ceph.com/system?no_tty', - 'senta02=qemu+ssh://ubuntu@senta02.front.sepia.ceph.com/system?no_tty', - 'senta03=qemu+ssh://ubuntu@senta03.front.sepia.ceph.com/system?no_tty', - 'senta04=qemu+ssh://ubuntu@senta04.front.sepia.ceph.com/system?no_tty', - ] - -DOWNBURST: ----------- - -When a virtual machine is locked, downburst is run on that machine to -install a new image. This allows the user to set different virtual -OSes to be installed on the newly created virtual machine. Currently -the default virtual machine is ubuntu (precise). A different vm installation -can be set using the ``--os-type`` option in ``teuthology.lock``. - -When a virtual machine is unlocked, downburst destroys the image on the -machine. - -Temporary yaml files are used to downburst a virtual machine. A typical -yaml file will look like this:: - - downburst: - cpus: 1 - disk-size: 30G - distro: centos - networks: - - {source: front} - ram: 4G - -These values are used by downburst to create the virtual machine. - -HOST KEYS: ----------- - -Because teuthology reinstalls a new machine, a new hostkey is generated. After -locking, once a connection is established to the new machine, -``teuthology-lock`` with the ``--list`` or ``--list-targets`` options will -display the new keys. When vps machines are locked using the ``--lock-many`` -option, a message is displayed indicating that ``--list-targets`` should be run -later. - -CEPH-QA-CHEF: -------------- - -Once teuthology starts after a new vm is installed, teuthology -checks for the existence of ``/ceph-qa-ready``. If this file is not -present, ``ceph-qa-chef`` is run when teuthology first comes up. - -ASSUMPTIONS: ------------- - -It is assumed that downburst is on the user's ``$PATH``. - - -Test Suites -=========== - -Most of the current teuthology test suite execution scripts automatically -download their tests from the master branch of the appropriate github -repository. People who want to run experimental test suites usually modify -the download method in the ``teuthology/task`` script to use some other branch -or repository. This should be generalized in later teuthology releases. -Teuthology QA suites can be found in ``src/ceph-qa-suite``. Make sure that this -directory exists in your source tree before running the test suites. - -Each suite name is determined by the name of the directory in ``ceph-qa-suite`` -that contains that suite. The directory contains subdirectories and yaml files, -which, when assembled, produce valid tests that can be run. The test suite -application generates combinations of these files and thus ends up running -a set of tests based off the data in the directory for the suite. - -To run a suite, enter:: - - ./schedule_suite.sh