From 1a5823b4888f611884e649a37f9abc768f6c721e Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 14 Nov 2017 18:40:35 -0500 Subject: [PATCH 01/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/crawler.py | 12 ++- crawler/safe_containers_crawler.py | 162 ++++++++++++++++++++++++++++ crawler/utils/plugincont/Dockerfile | 0 3 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 crawler/safe_containers_crawler.py create mode 100644 crawler/utils/plugincont/Dockerfile diff --git a/crawler/crawler.py b/crawler/crawler.py index f4faa8cd..8e53708b 100755 --- a/crawler/crawler.py +++ b/crawler/crawler.py @@ -93,11 +93,12 @@ def main(): Modes.OUTVM, Modes.MOUNTPOINT, Modes.OUTCONTAINER, + Modes.OUTCONTAINERSAFE, Modes.MESOS, ], default=Modes.INVM, help='The crawler mode: ' - '{INVM,OUTVM,MOUNTPOINT,OUTCONTAINER}. ' + '{INVM,OUTVM,MOUNTPOINT,OUTCONTAINER,OUTCONTAINERSAFE}. ' 'Defaults to INVM', ) parser.add_argument( @@ -222,6 +223,15 @@ def main(): host_namespace=args.namespace, plugin_places=args.plugin_places, options=options) + elif args.crawlmode == 'OUTCONTAINERSAFE': + crawler = SafeContainersCrawler( + features=args.features, + environment=args.environment, + user_list=args.crawlContainers, + host_namespace=args.namespace, + plugin_places=args.plugin_places, + frequency=args.frequency, + options=options) else: raise NotImplementedError('Invalid crawlmode') diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py new file mode 100644 index 00000000..beea0761 --- /dev/null +++ b/crawler/safe_containers_crawler.py @@ -0,0 +1,162 @@ +import ast +import sys +from containers import poll_containers, get_containers +import plugins_manager +from base_crawler import BaseCrawler, BaseFrame +import utils.dockerutils + + +class ContainerFrame(BaseFrame): + + def __init__(self, feature_types, container): + BaseFrame.__init__(self, feature_types) + self.metadata.update(container.get_metadata_dict()) + self.metadata['system_type'] = 'container' + + +class SafeContainersCrawler(BaseCrawler): + + def __init__(self, + features=['os', 'cpu'], + environment='cloudsight', + user_list='ALL', + host_namespace='', + plugin_places=['plugins'], + frequency=-1, + options={}): + + BaseCrawler.__init__( + self, + features=features, + plugin_places=plugin_places, + options=options) + plugins_manager.reload_env_plugin(environment, plugin_places) + plugins_manager.reload_container_crawl_plugins( + features, plugin_places, options) + self.plugins = plugins_manager.get_container_crawl_plugins(features) + self.environment = environment + self.host_namespace = host_namespace + self.user_list = user_list + self.frequency = frequency + + + def create_plugincont(self, guestcont): + #TODO: build plugin cont from Dockerfile first + + #plugincont_image = 'plugincont_image' + #pip install docker=2.0.0 + #client.containers.run("ruby", "tail -f /dev/null", pid_mode='container:d98cd4f1e518e671bc376ac429146937fbec9df7dbbfbb389e615a90c23ca27a', detach=True) + # maybe userns_mode='host' + guestcont_id = guestcont.long_id + guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) + plugincont_image = 'crawler_plugins12' + plugincont = None + seccomp_profile_path = os.getcwd() + '/utils/plugincont/seccomp-no-ptrace.json' + client = docker.from_env() + try: + plugincont = client.containers.run( + image=plugincont_image, + name='plugin_cont', + user='user1', + command="/usr/bin/python2.7 crawler/crawler_lite.py --frequency="+frequency, + pid_mode='container:'+guestcont_id, + network_mode='container:'+guestcont_id, + cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], + security_opt=['seccomp='+seccomp_profile_path], + volumes={guestcont_rootfs:{'bind':'/rootfs_local','mode':'ro'}}, + detach=True) + except: + print sys.exc_info()[0] + guestcont.plugincont = plugincont + + def setup_plugincont(self, guestcont): + self.create_plugincont(guestcont) + if guestcont.plugincont is not None: + plugincont_id = guestcont.plugincont.id + # TODO: + + # Return list of features after reading frame from plugin cont + def get_plugincont_features(self, guestcont): + features = [] + if guestcont.plugincont is None: + self.setup_plugincont(guestcont) + if guestcont.plugincont is None: + return features + + plugincont_id = guestcont.plugincont.id + rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) + frame_dir = rootfs+'/home/user1/' + frame_list = os.listdir(frame_dir) + frame_list.sort(key=int) + + if frame_list != []: + try: + earliest_frame_file = frame_dir+frame_list[0] + fd = open(earliest_frame_file) + for feature_line in fd.readlines(): + (type, key, val) = feature_line.strip().split() + features.append((key, ast.literal_eval(val), type)) + fd.close() + os.remove(earliest_frame_file) + except: + print sys.exc_info()[0] + + return features + + + def crawl_container(self, container, ignore_plugin_exception=True): + """ + Crawls a specific container and returns a Frame for it. + + :param container: a Container object + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a Frame object. The returned frame can have 0 features and + still have metadata. This can occur if there were no plugins, or all + the plugins raised an exception (and ignore_plugin_exception was True). + """ + frame = ContainerFrame(self.features, container) + + # collect plugin crawl output for privileged plugins run at host + for (plugin_obj, plugin_args) in self.plugins: + try: + frame.add_features( + plugin_obj.crawl( + container_id=container.long_id, + **plugin_args)) + except Exception as exc: + if not ignore_plugin_exception: + raise exc + + # collect plugin crawl output from inside plugin sidecar container + try: + frame.add_features(self.get_plugincont_features(container)) + except Exception as exc: + if not ignore_plugin_exception: + raise exc + + return frame + + def polling_crawl(self, timeout, ignore_plugin_exception=True): + """ + Crawls any container created before `timeout` seconds have elapsed. + + :param timeout: seconds to wait for new containers + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a Frame object + """ + # Not implemented + sleep(timeout) + return None + + def crawl(self, ignore_plugin_exception=True): + """ + Crawls all containers. + + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a list generator of Frame objects + """ + containers_list = get_containers( + user_list=self.user_list, + host_namespace=self.host_namespace) + for container in containers_list: + yield self.crawl_container(container, ignore_plugin_exception) diff --git a/crawler/utils/plugincont/Dockerfile b/crawler/utils/plugincont/Dockerfile new file mode 100644 index 00000000..e69de29b From 3fbb25018a4e3e41bbc3ed0876730e2651bc6321 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 16 Nov 2017 10:02:06 -0500 Subject: [PATCH 02/47] lugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 59 +++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index beea0761..98a4dcc1 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -1,5 +1,7 @@ import ast import sys +import docker +import iptc from containers import poll_containers, get_containers import plugins_manager from base_crawler import BaseCrawler, BaseFrame @@ -41,7 +43,7 @@ def __init__(self, def create_plugincont(self, guestcont): - #TODO: build plugin cont from Dockerfile first + #TODO: build plugin cont image from Dockerfile first #plugincont_image = 'plugincont_image' #pip install docker=2.0.0 @@ -69,10 +71,65 @@ def create_plugincont(self, guestcont): print sys.exc_info()[0] guestcont.plugincont = plugincont + def _add_iptable_rules(self): + # pip install python-iptables + rule = iptc.Rule() + rule.protocol = "all" + match = iptc.Match(rule, "owner") + match.uid_owner = "166536" #uid of plugin cont's user1 on host; from dokcer userns remapping + rule.add_match(match) + rule.target = iptc.Target(rule, "DROP") + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") + chain.insert_rule(rule) + #TODO + + def _setup_netcls_cgroup(self, plugincont_id): + try: + cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id + tasks_path = cgroup_netcls_path+'/tasks' + block_path = cgroup_netcls_path+'/block' + block_classid_path = block_path+'/net_cls.classid' + block_tasks_path = block_path+'/tasks' + + if not os.path.isdir(block_path): + os.makedirs(block_path) + + fd = open(block_classid_path,'w') + fd.write('43') #random cgroup net cls id + fd.close() + + fd = open(tasks_path,'r') + plugincont_pids = fd.readlines() #should be just one pid == plugincont_pid + fd.close() + + fd = open(block_tasks_path,'r') + for pid in plugincont_pids: + fd.write(pid) + fd.close() + except: + print sys.exc_info()[0] + + def set_plugincont_iptables(self, plugincont_id): + try: + client = docker.APIClient(base_url='unix://var/run/docker.sock') + plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] + #netns_path = '/var/run/netns' + #if not os.path.isdir(netns_path): + # os.makedirs(netns_path) + self._setup_netcls_cgroup(plugincont_id) + run_as_another_namespace(plugincont_pid, + ['net'], + self._add_iptable_rules) + + except: + print sys.exc_info()[0] + + def setup_plugincont(self, guestcont): self.create_plugincont(guestcont) if guestcont.plugincont is not None: plugincont_id = guestcont.plugincont.id + self.set_plugincont_iptables(plugincont_id) # TODO: # Return list of features after reading frame from plugin cont From 12fa3c78912740afc3e62a1dc80d1304fffbea52 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 16 Nov 2017 11:05:27 -0500 Subject: [PATCH 03/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 89 +++++++++++++++++++++--------- 1 file changed, 63 insertions(+), 26 deletions(-) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 98a4dcc1..6fea0365 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -40,32 +40,47 @@ def __init__(self, self.host_namespace = host_namespace self.user_list = user_list self.frequency = frequency + #magic numbers + #self.plugincont_image = 'plugincont_image' + self.plugincont_image = 'crawler_plugins12' + self.plugincont_name = 'plugin_cont' + self.plugincont_username = 'user1' + self.plugincont.workdir = '/home/user1/' + self.plugincont_seccomp_profile_path = '/utils/plugincont/seccomp-no-ptrace.json' + self.plugincont_guestcont_mountpoint = '/rootfs_local' + self.plugincont_host_uid = '166536' #from docker userns remapping + self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id + def destroy_plugincont(self, guestcont): + client = docker.APIClient(base_url='unix://var/run/docker.sock') + plugincont_id = guestcont.plugincont.id + client.stop(plugincont_id) + client.remove_container(plugincont_id) + guestcont.plugincont = None + def create_plugincont(self, guestcont): #TODO: build plugin cont image from Dockerfile first - #plugincont_image = 'plugincont_image' #pip install docker=2.0.0 #client.containers.run("ruby", "tail -f /dev/null", pid_mode='container:d98cd4f1e518e671bc376ac429146937fbec9df7dbbfbb389e615a90c23ca27a', detach=True) # maybe userns_mode='host' guestcont_id = guestcont.long_id guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) - plugincont_image = 'crawler_plugins12' plugincont = None - seccomp_profile_path = os.getcwd() + '/utils/plugincont/seccomp-no-ptrace.json' + seccomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path client = docker.from_env() try: plugincont = client.containers.run( - image=plugincont_image, - name='plugin_cont', - user='user1', + image=self.plugincont_image, + name=self.plugincont_name, + user=self.plugincont_username, command="/usr/bin/python2.7 crawler/crawler_lite.py --frequency="+frequency, pid_mode='container:'+guestcont_id, network_mode='container:'+guestcont_id, cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], security_opt=['seccomp='+seccomp_profile_path], - volumes={guestcont_rootfs:{'bind':'/rootfs_local','mode':'ro'}}, + volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, detach=True) except: print sys.exc_info()[0] @@ -73,18 +88,35 @@ def create_plugincont(self, guestcont): def _add_iptable_rules(self): # pip install python-iptables - rule = iptc.Rule() - rule.protocol = "all" - match = iptc.Match(rule, "owner") - match.uid_owner = "166536" #uid of plugin cont's user1 on host; from dokcer userns remapping - rule.add_match(match) - rule.target = iptc.Target(rule, "DROP") - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") - chain.insert_rule(rule) - #TODO + retVal = 0 + try: + rule = iptc.Rule() + match = iptc.Match(rule, "owner") + match.uid_owner = self.plugincont_host_uid + rule.add_match(match) + rule.dst = "!localhost" + rule.protocol = "all" + rule.target = iptc.Target(rule, "DROP") + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") + chain.insert_rule(rule) + + rule = iptc.Rule() + match = iptc.Match(rule, "cgroup") + match.cgroup = self.plugincont_cgroup_netclsid) + rule.add_match(match) + rule.src = "!localhost" + rule.target = iptc.Target(rule, "DROP") + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") + chain.insert_rule(rule) + except: + print sys.exc_info()[0] + retVal = -1 + return retVal def _setup_netcls_cgroup(self, plugincont_id): + retVal = 0 try: + #TODO cgroup path cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id tasks_path = cgroup_netcls_path+'/tasks' block_path = cgroup_netcls_path+'/block' @@ -95,7 +127,7 @@ def _setup_netcls_cgroup(self, plugincont_id): os.makedirs(block_path) fd = open(block_classid_path,'w') - fd.write('43') #random cgroup net cls id + fd.write(self.plugincont_cgroup_netclsid) fd.close() fd = open(tasks_path,'r') @@ -108,29 +140,34 @@ def _setup_netcls_cgroup(self, plugincont_id): fd.close() except: print sys.exc_info()[0] + retVal = -1 + return retVal def set_plugincont_iptables(self, plugincont_id): + retVal = 0 try: client = docker.APIClient(base_url='unix://var/run/docker.sock') plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] #netns_path = '/var/run/netns' #if not os.path.isdir(netns_path): # os.makedirs(netns_path) - self._setup_netcls_cgroup(plugincont_id) - run_as_another_namespace(plugincont_pid, - ['net'], - self._add_iptable_rules) - + retVal = self._setup_netcls_cgroup(plugincont_id, plugincont_pid) + if retVal == 0: + retVal = run_as_another_namespace(plugincont_pid, + ['net'], + self._add_iptable_rules) except: print sys.exc_info()[0] - + retVal = -1 + return retVal def setup_plugincont(self, guestcont): self.create_plugincont(guestcont) if guestcont.plugincont is not None: plugincont_id = guestcont.plugincont.id - self.set_plugincont_iptables(plugincont_id) - # TODO: + if self.set_plugincont_iptables(plugincont_id) != 0: + self.destroy_plugincont(guestcont) + guestcont.plugincont = None # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): @@ -142,7 +179,7 @@ def get_plugincont_features(self, guestcont): plugincont_id = guestcont.plugincont.id rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - frame_dir = rootfs+'/home/user1/' + frame_dir = rootfs+self.plugincont.workdir frame_list = os.listdir(frame_dir) frame_list.sort(key=int) From b8081d5afbe85590f0c7adf448373d8529b46f02 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 16 Nov 2017 16:24:15 -0500 Subject: [PATCH 04/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 45 ++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 6fea0365..201a26ed 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -2,11 +2,11 @@ import sys import docker import iptc -from containers import poll_containers, get_containers import plugins_manager -from base_crawler import BaseCrawler, BaseFrame import utils.dockerutils - +from base_crawler import BaseCrawler, BaseFrame +from containers import poll_containers, get_containers +from utils.crawler_exceptions import ContainerWithoutCgroups class ContainerFrame(BaseFrame): @@ -45,7 +45,7 @@ def __init__(self, self.plugincont_image = 'crawler_plugins12' self.plugincont_name = 'plugin_cont' self.plugincont_username = 'user1' - self.plugincont.workdir = '/home/user1/' + self.plugincont.workdir = '/home/user1/features/' self.plugincont_seccomp_profile_path = '/utils/plugincont/seccomp-no-ptrace.json' self.plugincont_guestcont_mountpoint = '/rootfs_local' self.plugincont_host_uid = '166536' #from docker userns remapping @@ -102,7 +102,7 @@ def _add_iptable_rules(self): rule = iptc.Rule() match = iptc.Match(rule, "cgroup") - match.cgroup = self.plugincont_cgroup_netclsid) + match.cgroup = self.plugincont_cgroup_netclsid rule.add_match(match) rule.src = "!localhost" rule.target = iptc.Target(rule, "DROP") @@ -113,11 +113,27 @@ def _add_iptable_rules(self): retVal = -1 return retVal + def _get_cgroup_dir(self, devlist=[]): + for dev in devlist: + paths = [os.path.join('/cgroup/', dev), + os.path.join('/sys/fs/cgroup/', dev)] + for path in paths: + if os.path.ismount(path): + return path + + # Try getting the mount point from /proc/mounts + for l in open('/proc/mounts', 'r'): + _type, mnt, _, _, _, _ = l.split(' ') + if _type == 'cgroup' and mnt.endswith('cgroup/' + dev): + return mnt + + raise ContainerWithoutCgroups('Can not find the cgroup dir') + def _setup_netcls_cgroup(self, plugincont_id): retVal = 0 try: - #TODO cgroup path - cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id + # cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id + cgroup_netcls_path = _get_cgroup_dir(['net_cls','net_cls,net_prio'])+'/docker/'+plugincont_id tasks_path = cgroup_netcls_path+'/tasks' block_path = cgroup_netcls_path+'/block' block_classid_path = block_path+'/net_cls.classid' @@ -180,11 +196,10 @@ def get_plugincont_features(self, guestcont): plugincont_id = guestcont.plugincont.id rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) frame_dir = rootfs+self.plugincont.workdir - frame_list = os.listdir(frame_dir) - frame_list.sort(key=int) - - if frame_list != []: - try: + try: + frame_list = os.listdir(frame_dir) + frame_list.sort(key=int) + if frame_list != []: earliest_frame_file = frame_dir+frame_list[0] fd = open(earliest_frame_file) for feature_line in fd.readlines(): @@ -192,8 +207,8 @@ def get_plugincont_features(self, guestcont): features.append((key, ast.literal_eval(val), type)) fd.close() os.remove(earliest_frame_file) - except: - print sys.exc_info()[0] + except: + print sys.exc_info()[0] return features @@ -239,7 +254,7 @@ def polling_crawl(self, timeout, ignore_plugin_exception=True): :return: a Frame object """ # Not implemented - sleep(timeout) + time.sleep(timeout) return None def crawl(self, ignore_plugin_exception=True): From 630d9d35b824658939370f23557c48447b9c2e58 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Fri, 17 Nov 2017 18:06:01 -0500 Subject: [PATCH 05/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/crawler.py | 1 + crawler/crawlmodes.py | 1 + crawler/dockercontainer.py | 2 +- crawler/safe_containers_crawler.py | 41 ++++++++++++++++++++++-------- crawler/utils/dockerutils.py | 38 +++++++++++++++------------ 5 files changed, 56 insertions(+), 27 deletions(-) diff --git a/crawler/crawler.py b/crawler/crawler.py index 8e53708b..3508d756 100755 --- a/crawler/crawler.py +++ b/crawler/crawler.py @@ -7,6 +7,7 @@ from worker import Worker from containers_crawler import ContainersCrawler +from safe_containers_crawler import SafeContainersCrawler from utils import misc from crawlmodes import Modes from emitters_manager import EmittersManager diff --git a/crawler/crawlmodes.py b/crawler/crawlmodes.py index ce91ed83..251fa8d8 100644 --- a/crawler/crawlmodes.py +++ b/crawler/crawlmodes.py @@ -4,4 +4,5 @@ OUTVM='OUTVM', MOUNTPOINT='MOUNTPOINT', OUTCONTAINER='OUTCONTAINER', + OUTCONTAINERSAFE='OUTCONTAINERSAFE', MESOS='MESOS') diff --git a/crawler/dockercontainer.py b/crawler/dockercontainer.py index 7f9c8eed..bd26e809 100644 --- a/crawler/dockercontainer.py +++ b/crawler/dockercontainer.py @@ -159,7 +159,7 @@ def __init__( self.volumes = inspect.get('Volumes') self.image_name = inspect['Config']['Image'] self.inspect = inspect - + self.plugincont = None self.process_namespace = (process_namespace or namespace.get_pid_namespace(self.pid)) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 201a26ed..e34d4412 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -1,5 +1,8 @@ import ast +import os import sys +import time +import json import docker import iptc import plugins_manager @@ -42,11 +45,11 @@ def __init__(self, self.frequency = frequency #magic numbers #self.plugincont_image = 'plugincont_image' - self.plugincont_image = 'crawler_plugins12' + self.plugincont_image = 'crawler_plugins15' self.plugincont_name = 'plugin_cont' self.plugincont_username = 'user1' - self.plugincont.workdir = '/home/user1/features/' - self.plugincont_seccomp_profile_path = '/utils/plugincont/seccomp-no-ptrace.json' + self.plugincont_workdir = '/home/user1/features/' + self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' self.plugincont_guestcont_mountpoint = '/rootfs_local' self.plugincont_host_uid = '166536' #from docker userns remapping self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id @@ -68,21 +71,25 @@ def create_plugincont(self, guestcont): guestcont_id = guestcont.long_id guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) plugincont = None - seccomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path + seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) + #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path client = docker.from_env() try: plugincont = client.containers.run( image=self.plugincont_image, - name=self.plugincont_name, + #name=self.plugincont_name, user=self.plugincont_username, - command="/usr/bin/python2.7 crawler/crawler_lite.py --frequency="+frequency, + command="/usr/bin/python2.7 /crawler/crawler/crawler_lite.py --frequency="+str(self.frequency), + #command="tail -f /dev/null", pid_mode='container:'+guestcont_id, network_mode='container:'+guestcont_id, cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], - security_opt=['seccomp='+seccomp_profile_path], + #security_opt=['seccomp:'+seccomp_profile_path], + security_opt=['seccomp:'+seccomp_attr], volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, detach=True) - except: + except Exception as exc: + print exc print sys.exc_info()[0] guestcont.plugincont = plugincont @@ -182,7 +189,8 @@ def setup_plugincont(self, guestcont): if guestcont.plugincont is not None: plugincont_id = guestcont.plugincont.id if self.set_plugincont_iptables(plugincont_id) != 0: - self.destroy_plugincont(guestcont) + #TODO: uncomment following + #self.destroy_plugincont(guestcont) guestcont.plugincont = None # Return list of features after reading frame from plugin cont @@ -195,7 +203,7 @@ def get_plugincont_features(self, guestcont): plugincont_id = guestcont.plugincont.id rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - frame_dir = rootfs+self.plugincont.workdir + frame_dir = rootfs+self.plugincont_workdir try: frame_list = os.listdir(frame_dir) frame_list.sort(key=int) @@ -214,6 +222,17 @@ def get_plugincont_features(self, guestcont): def crawl_container(self, container, ignore_plugin_exception=True): + frame = ContainerFrame(self.features, container) + try: + import pdb + pdb.set_trace() + frame.add_features(self.get_plugincont_features(container)) + except Exception as exc: + if not ignore_plugin_exception: + raise exc + return frame + + def crawl_container_org(self, container, ignore_plugin_exception=True): """ Crawls a specific container and returns a Frame for it. @@ -238,6 +257,8 @@ def crawl_container(self, container, ignore_plugin_exception=True): # collect plugin crawl output from inside plugin sidecar container try: + #import pdb + #pdb.set_trace() frame.add_features(self.get_plugincont_features(container)) except Exception as exc: if not ignore_plugin_exception: diff --git a/crawler/utils/dockerutils.py b/crawler/utils/dockerutils.py index d94e85db..da065370 100644 --- a/crawler/utils/dockerutils.py +++ b/crawler/utils/dockerutils.py @@ -30,8 +30,9 @@ def exec_dockerps(): This call executes the `docker inspect` command every time it is invoked. """ try: - client = docker.Client( - base_url='unix://var/run/docker.sock', version='auto') + # client = docker.Client( + # base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient(base_url='unix://var/run/docker.sock') containers = client.containers() inspect_arr = [] for container in containers: @@ -46,8 +47,9 @@ def exec_dockerps(): def exec_docker_history(long_id): try: - client = docker.Client(base_url='unix://var/run/docker.sock', - version='auto') + # client = docker.Client(base_url='unix://var/run/docker.sock', + # version='auto') + client = docker.APIClient(base_url='unix://var/run/docker.sock') image = client.inspect_container(long_id)['Image'] history = client.history(image) return history @@ -70,8 +72,9 @@ def _reformat_inspect(inspect): def exec_dockerinspect(long_id): try: - client = docker.Client( - base_url='unix://var/run/docker.sock', version='auto') + # client = docker.Client( + # base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient(base_url='unix://var/run/docker.sock') inspect = client.inspect_container(long_id) _reformat_inspect(inspect) except docker.errors.DockerException as e: @@ -107,8 +110,9 @@ def _get_docker_storage_driver(): # Step 1, get it from "docker info" try: - client = docker.Client( - base_url='unix://var/run/docker.sock', version='auto') + # client = docker.Client( + # base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient(base_url='unix://var/run/docker.sock') driver = client.info()['Driver'] except (docker.errors.DockerException, KeyError): pass # try to continue with the default of 'devicemapper' @@ -193,8 +197,9 @@ def _get_docker_server_version(): """Run the `docker info` command to get server version """ try: - client = docker.Client( - base_url='unix://var/run/docker.sock', version='auto') + # client = docker.Client( + # base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient(base_url='unix://var/run/docker.sock') return client.version()['Version'] except (docker.errors.DockerException, KeyError) as e: logger.warning(str(e)) @@ -292,7 +297,7 @@ def _get_container_rootfs_path_aufs(long_id, inspect=None): if VERSION_SPEC.match(semantic_version.Version(_fix_version( server_version))): aufs_path = None - mountid_path = ('/var/lib/docker/image/aufs/layerdb/mounts/' + + mountid_path = ('/var/lib/docker/165536.165536/image/aufs/layerdb/mounts/' + long_id + '/mount-id') try: with open(mountid_path, 'r') as f: @@ -301,11 +306,11 @@ def _get_container_rootfs_path_aufs(long_id, inspect=None): logger.warning(str(e)) if not aufs_path: raise DockerutilsException('Failed to get rootfs on aufs') - rootfs_path = '/var/lib/docker/aufs/mnt/' + aufs_path + rootfs_path = '/var/lib/docker/165536.165536/aufs/mnt/' + aufs_path else: rootfs_path = None - for _path in ['/var/lib/docker/aufs/mnt/' + long_id, - '/var/lib/docker/aufs/diff/' + long_id]: + for _path in ['/var/lib/docker/165536.165536/aufs/mnt/' + long_id, + '/var/lib/docker/165536.165536/aufs/diff/' + long_id]: if os.path.isdir(_path) and os.listdir(_path): rootfs_path = _path break @@ -383,8 +388,9 @@ def get_docker_container_rootfs_path(long_id, inspect=None): def poll_container_create_events(timeout=0.1): try: - client = docker.Client(base_url='unix://var/run/docker.sock', - version='auto') + # client = docker.Client(base_url='unix://var/run/docker.sock', + # version='auto') + client = docker.APIClient(base_url='unix://var/run/docker.sock') filters = dict() filters['type'] = 'container' filters['event'] = 'start' From 5e9aee4cc34c5c3002ddf87f1226717881fc844c Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Mon, 20 Nov 2017 15:47:47 -0500 Subject: [PATCH 06/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/containers.py | 18 +++++--- crawler/safe_containers_crawler.py | 71 ++++++++++++++++++------------ 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/crawler/containers.py b/crawler/containers.py index 1c10cde7..22a229f2 100644 --- a/crawler/containers.py +++ b/crawler/containers.py @@ -10,7 +10,8 @@ def list_all_containers(user_list='ALL', host_namespace='', - ignore_raw_containers=True): + ignore_raw_containers=True, + group_by_pid_namespace=True): """ Returns a list of all running containers in the host. @@ -24,10 +25,13 @@ def list_all_containers(user_list='ALL', host_namespace='', for _container in get_docker_containers(host_namespace=host_namespace, user_list=user_list): - curr_ns = _container.process_namespace - if curr_ns not in visited_ns: - visited_ns.add(curr_ns) + if group_by_pid_namespace is False: yield _container + else: + curr_ns = _container.process_namespace + if curr_ns not in visited_ns: + visited_ns.add(curr_ns) + yield _container # XXX get list of rkt containers @@ -62,7 +66,8 @@ def get_containers( environment='cloudsight', host_namespace=misc.get_host_ipaddr(), user_list='ALL', - ignore_raw_containers=True + ignore_raw_containers=True, + group_by_pid_namespace=True ): """ Returns a list of all containers running in the host. @@ -79,7 +84,8 @@ def get_containers( """ filtered_list = [] containers_list = list_all_containers(user_list, host_namespace, - ignore_raw_containers) + ignore_raw_containers, + group_by_pid_namespace) for _container in containers_list: default_environment = 'cloudsight' if (environment != default_environment and diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index e34d4412..4ce87731 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -10,6 +10,7 @@ from base_crawler import BaseCrawler, BaseFrame from containers import poll_containers, get_containers from utils.crawler_exceptions import ContainerWithoutCgroups +from utils.namespace import run_as_another_namespace class ContainerFrame(BaseFrame): @@ -43,10 +44,11 @@ def __init__(self, self.host_namespace = host_namespace self.user_list = user_list self.frequency = frequency + self.pluginconts = dict() #magic numbers #self.plugincont_image = 'plugincont_image' - self.plugincont_image = 'crawler_plugins15' - self.plugincont_name = 'plugin_cont' + self.plugincont_image = 'crawler_plugins16' + self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'user1' self.plugincont_workdir = '/home/user1/features/' self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' @@ -54,13 +56,11 @@ def __init__(self, self.plugincont_host_uid = '166536' #from docker userns remapping self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id - - def destroy_plugincont(self, guestcont): + + def destroy_cont(self, cont_id): client = docker.APIClient(base_url='unix://var/run/docker.sock') - plugincont_id = guestcont.plugincont.id - client.stop(plugincont_id) - client.remove_container(plugincont_id) - guestcont.plugincont = None + client.stop(cont_id) + client.remove_container(cont_id) def create_plugincont(self, guestcont): #TODO: build plugin cont image from Dockerfile first @@ -71,13 +71,15 @@ def create_plugincont(self, guestcont): guestcont_id = guestcont.long_id guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) plugincont = None + plugincont_name = self.plugincont_name_prefix+'_'+guestcont_id seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path client = docker.from_env() try: + self.destroy_cont(plugincont_name) plugincont = client.containers.run( image=self.plugincont_image, - #name=self.plugincont_name, + name=plugincont_name, user=self.plugincont_username, command="/usr/bin/python2.7 /crawler/crawler/crawler_lite.py --frequency="+str(self.frequency), #command="tail -f /dev/null", @@ -88,9 +90,12 @@ def create_plugincont(self, guestcont): security_opt=['seccomp:'+seccomp_attr], volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, detach=True) + time.sleep(5) except Exception as exc: print exc print sys.exc_info()[0] + + self.pluginconts[str(guestcont_id)] = plugincont guestcont.plugincont = plugincont def _add_iptable_rules(self): @@ -115,7 +120,8 @@ def _add_iptable_rules(self): rule.target = iptc.Target(rule, "DROP") chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") chain.insert_rule(rule) - except: + except Exception as exc: + print exc print sys.exc_info()[0] retVal = -1 return retVal @@ -140,7 +146,7 @@ def _setup_netcls_cgroup(self, plugincont_id): retVal = 0 try: # cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id - cgroup_netcls_path = _get_cgroup_dir(['net_cls','net_cls,net_prio'])+'/docker/'+plugincont_id + cgroup_netcls_path = self._get_cgroup_dir(['net_cls','net_cls,net_prio'])+'/docker/'+plugincont_id tasks_path = cgroup_netcls_path+'/tasks' block_path = cgroup_netcls_path+'/block' block_classid_path = block_path+'/net_cls.classid' @@ -157,11 +163,12 @@ def _setup_netcls_cgroup(self, plugincont_id): plugincont_pids = fd.readlines() #should be just one pid == plugincont_pid fd.close() - fd = open(block_tasks_path,'r') + fd = open(block_tasks_path,'w') for pid in plugincont_pids: fd.write(pid) fd.close() - except: + except Exception as exc: + print exc print sys.exc_info()[0] retVal = -1 return retVal @@ -174,27 +181,35 @@ def set_plugincont_iptables(self, plugincont_id): #netns_path = '/var/run/netns' #if not os.path.isdir(netns_path): # os.makedirs(netns_path) - retVal = self._setup_netcls_cgroup(plugincont_id, plugincont_pid) - if retVal == 0: - retVal = run_as_another_namespace(plugincont_pid, - ['net'], - self._add_iptable_rules) - except: + retVal = self._setup_netcls_cgroup(plugincont_id) + #if retVal == 0: + # retVal = run_as_another_namespace(plugincont_pid, + # ['net'], + # self._add_iptable_rules) + except Exception as exc: + print exc print sys.exc_info()[0] retVal = -1 return retVal def setup_plugincont(self, guestcont): + guestcont_id = str(guestcont.long_id) + if guestcont_id in self.pluginconts.keys(): + guestcont.plugincont = self.pluginconts[guestcont_id] + return + self.create_plugincont(guestcont) if guestcont.plugincont is not None: plugincont_id = guestcont.plugincont.id if self.set_plugincont_iptables(plugincont_id) != 0: #TODO: uncomment following - #self.destroy_plugincont(guestcont) + #self.destroy_cont(plugincont_id) guestcont.plugincont = None # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): + #import pdb + #pdb.set_trace() features = [] if guestcont.plugincont is None: self.setup_plugincont(guestcont) @@ -203,6 +218,7 @@ def get_plugincont_features(self, guestcont): plugincont_id = guestcont.plugincont.id rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) + print plugincont_id, rootfs frame_dir = rootfs+self.plugincont_workdir try: frame_list = os.listdir(frame_dir) @@ -211,11 +227,12 @@ def get_plugincont_features(self, guestcont): earliest_frame_file = frame_dir+frame_list[0] fd = open(earliest_frame_file) for feature_line in fd.readlines(): - (type, key, val) = feature_line.strip().split() + (type, key, val) = feature_line.strip().split('\t') features.append((key, ast.literal_eval(val), type)) fd.close() os.remove(earliest_frame_file) - except: + except Exception as exc: + print exc print sys.exc_info()[0] return features @@ -224,8 +241,6 @@ def get_plugincont_features(self, guestcont): def crawl_container(self, container, ignore_plugin_exception=True): frame = ContainerFrame(self.features, container) try: - import pdb - pdb.set_trace() frame.add_features(self.get_plugincont_features(container)) except Exception as exc: if not ignore_plugin_exception: @@ -257,8 +272,6 @@ def crawl_container_org(self, container, ignore_plugin_exception=True): # collect plugin crawl output from inside plugin sidecar container try: - #import pdb - #pdb.set_trace() frame.add_features(self.get_plugincont_features(container)) except Exception as exc: if not ignore_plugin_exception: @@ -287,6 +300,8 @@ def crawl(self, ignore_plugin_exception=True): """ containers_list = get_containers( user_list=self.user_list, - host_namespace=self.host_namespace) + host_namespace=self.host_namespace, + group_by_pid_namespace=False) for container in containers_list: - yield self.crawl_container(container, ignore_plugin_exception) + if not container.name.startswith(self.plugincont_name_prefix): + yield self.crawl_container(container, ignore_plugin_exception) From a198f04a5607ec78ee61fe2c571f2af008cb20c7 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Mon, 20 Nov 2017 16:30:45 -0500 Subject: [PATCH 07/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 4ce87731..7799144a 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -47,7 +47,7 @@ def __init__(self, self.pluginconts = dict() #magic numbers #self.plugincont_image = 'plugincont_image' - self.plugincont_image = 'crawler_plugins16' + self.plugincont_image = 'crawler_plugins17' self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'user1' self.plugincont_workdir = '/home/user1/features/' @@ -106,8 +106,7 @@ def _add_iptable_rules(self): match = iptc.Match(rule, "owner") match.uid_owner = self.plugincont_host_uid rule.add_match(match) - rule.dst = "!localhost" - rule.protocol = "all" + rule.dst = "!127.0.0.1" rule.target = iptc.Target(rule, "DROP") chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") chain.insert_rule(rule) @@ -116,7 +115,7 @@ def _add_iptable_rules(self): match = iptc.Match(rule, "cgroup") match.cgroup = self.plugincont_cgroup_netclsid rule.add_match(match) - rule.src = "!localhost" + rule.src = "!127.0.0.1" rule.target = iptc.Target(rule, "DROP") chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") chain.insert_rule(rule) @@ -182,10 +181,10 @@ def set_plugincont_iptables(self, plugincont_id): #if not os.path.isdir(netns_path): # os.makedirs(netns_path) retVal = self._setup_netcls_cgroup(plugincont_id) - #if retVal == 0: - # retVal = run_as_another_namespace(plugincont_pid, - # ['net'], - # self._add_iptable_rules) + if retVal == 0: + retVal = run_as_another_namespace(str(plugincont_pid), + ['net'], + self._add_iptable_rules) except Exception as exc: print exc print sys.exc_info()[0] @@ -228,7 +227,7 @@ def get_plugincont_features(self, guestcont): fd = open(earliest_frame_file) for feature_line in fd.readlines(): (type, key, val) = feature_line.strip().split('\t') - features.append((key, ast.literal_eval(val), type)) + features.append((ast.literal_eval(key), ast.literal_eval(val), type)) fd.close() os.remove(earliest_frame_file) except Exception as exc: From c19ff9600c305c4d721a78a7323cb250cffacc70 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Mon, 20 Nov 2017 18:55:06 -0500 Subject: [PATCH 08/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 31 +++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 7799144a..512eb07e 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -57,10 +57,19 @@ def __init__(self, self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id - def destroy_cont(self, cont_id): - client = docker.APIClient(base_url='unix://var/run/docker.sock') - client.stop(cont_id) - client.remove_container(cont_id) + def destroy_cont(self, id=None, name=None): + client = docker.APIClient(base_url='unix://var/run/docker.sock') + if name is None and id is None: + return + if name is not None: + _id = name + filter = {'name':name} + else: + _id = id + filter = {'id':id} + if client.containers(all=True,filters=filter) != []: + client.stop(_id) + client.remove_container(_id) def create_plugincont(self, guestcont): #TODO: build plugin cont image from Dockerfile first @@ -76,7 +85,7 @@ def create_plugincont(self, guestcont): #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path client = docker.from_env() try: - self.destroy_cont(plugincont_name) + self.destroy_cont(name=plugincont_name) plugincont = client.containers.run( image=self.plugincont_image, name=plugincont_name, @@ -191,6 +200,13 @@ def set_plugincont_iptables(self, plugincont_id): retVal = -1 return retVal + def destroy_plugincont(self, guestcont): + guestcont_id = str(guestcont.long_id) + plugincont_id = guestcont.plugincont.id + self.destroy_cont(id=plugincont_id) + guestcont.plugincont = None + self.pluginconts.pop(str(guestcont_id)) + def setup_plugincont(self, guestcont): guestcont_id = str(guestcont.long_id) if guestcont_id in self.pluginconts.keys(): @@ -201,9 +217,7 @@ def setup_plugincont(self, guestcont): if guestcont.plugincont is not None: plugincont_id = guestcont.plugincont.id if self.set_plugincont_iptables(plugincont_id) != 0: - #TODO: uncomment following - #self.destroy_cont(plugincont_id) - guestcont.plugincont = None + self.destroy_plugincont(guestcont) # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): @@ -217,7 +231,6 @@ def get_plugincont_features(self, guestcont): plugincont_id = guestcont.plugincont.id rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - print plugincont_id, rootfs frame_dir = rootfs+self.plugincont_workdir try: frame_list = os.listdir(frame_dir) From 1acc702936668d5f9bd3d75cd036438d89f70590 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Mon, 20 Nov 2017 19:14:19 -0500 Subject: [PATCH 09/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 512eb07e..f8d6f581 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -47,7 +47,7 @@ def __init__(self, self.pluginconts = dict() #magic numbers #self.plugincont_image = 'plugincont_image' - self.plugincont_image = 'crawler_plugins17' + self.plugincont_image = 'crawler_plugins18' self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'user1' self.plugincont_workdir = '/home/user1/features/' From 9b1e46f3a2a8f7f83f52812c277b82019cdb6f5b Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Wed, 22 Nov 2017 10:41:41 -0500 Subject: [PATCH 10/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 199 +++++++++++++++++++++++++++ crawler/safe_containers_crawler.py | 186 +------------------------ 2 files changed, 204 insertions(+), 181 deletions(-) create mode 100644 crawler/plugin_containers_manager.py diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py new file mode 100644 index 00000000..353cfe3a --- /dev/null +++ b/crawler/plugin_containers_manager.py @@ -0,0 +1,199 @@ +import ast +import os +import sys +import time +import json +import docker +import iptc +import plugins_manager +import utils.dockerutils +from base_crawler import BaseCrawler, BaseFrame +from containers import poll_containers, get_containers +from utils.crawler_exceptions import ContainerWithoutCgroups +from utils.namespace import run_as_another_namespace + +class PluginContainersManager(): + + def __init__(self, frequency=-1): + self.frequency = frequency + self.pluginconts = dict() + #self.plugincont_image = 'plugincont_image' + self.plugincont_image = 'crawler_plugins18' + self.plugincont_name_prefix = 'plugin_cont' + self.plugincont_username = 'user1' + self.plugincont_framedir = '/home/user1/features/' + self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' + self.plugincont_guestcont_mountpoint = '/rootfs_local' + self.plugincont_host_uid = '166536' #from docker userns remapping + self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id + + def get_plugincont_framedir(self, guestcont): + frame_dir = None + if guestcont is not None and guestcont.plugincont is not None: + plugincont_id = guestcont.plugincont.id + rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) + frame_dir = rootfs+self.plugincont_framedir + return frame_dir + + def destroy_cont(self, id=None, name=None): + client = docker.APIClient(base_url='unix://var/run/docker.sock') + if name is None and id is None: + return + if name is not None: + _id = name + filter = {'name':name} + else: + _id = id + filter = {'id':id} + if client.containers(all=True,filters=filter) != []: + client.stop(_id) + client.remove_container(_id) + + def create_plugincont(self, guestcont): + #TODO: build plugin cont image from Dockerfile first + + #pip install docker=2.0.0 + #client.containers.run("ruby", "tail -f /dev/null", pid_mode='container:d98cd4f1e518e671bc376ac429146937fbec9df7dbbfbb389e615a90c23ca27a', detach=True) + # maybe userns_mode='host' + guestcont_id = guestcont.long_id + guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) + plugincont = None + plugincont_name = self.plugincont_name_prefix+'_'+guestcont_id + seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) + #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path + client = docker.from_env() + try: + self.destroy_cont(name=plugincont_name) + plugincont = client.containers.run( + image=self.plugincont_image, + name=plugincont_name, + user=self.plugincont_username, + command="/usr/bin/python2.7 /crawler/crawler/crawler_lite.py --frequency="+str(self.frequency), + #command="tail -f /dev/null", + pid_mode='container:'+guestcont_id, + network_mode='container:'+guestcont_id, + cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], + #security_opt=['seccomp:'+seccomp_profile_path], + security_opt=['seccomp:'+seccomp_attr], + volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, + detach=True) + time.sleep(5) + except Exception as exc: + print exc + print sys.exc_info()[0] + + self.pluginconts[str(guestcont_id)] = plugincont + guestcont.plugincont = plugincont + + def _add_iptable_rules(self): + # pip install python-iptables + retVal = 0 + try: + rule = iptc.Rule() + match = iptc.Match(rule, "owner") + match.uid_owner = self.plugincont_host_uid + rule.add_match(match) + rule.dst = "!127.0.0.1" + rule.target = iptc.Target(rule, "DROP") + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") + chain.insert_rule(rule) + + rule = iptc.Rule() + match = iptc.Match(rule, "cgroup") + match.cgroup = self.plugincont_cgroup_netclsid + rule.add_match(match) + rule.src = "!127.0.0.1" + rule.target = iptc.Target(rule, "DROP") + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") + chain.insert_rule(rule) + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal + + def _get_cgroup_dir(self, devlist=[]): + for dev in devlist: + paths = [os.path.join('/cgroup/', dev), + os.path.join('/sys/fs/cgroup/', dev)] + for path in paths: + if os.path.ismount(path): + return path + + # Try getting the mount point from /proc/mounts + for l in open('/proc/mounts', 'r'): + _type, mnt, _, _, _, _ = l.split(' ') + if _type == 'cgroup' and mnt.endswith('cgroup/' + dev): + return mnt + + raise ContainerWithoutCgroups('Can not find the cgroup dir') + + def _setup_netcls_cgroup(self, plugincont_id): + retVal = 0 + try: + # cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id + cgroup_netcls_path = self._get_cgroup_dir(['net_cls','net_cls,net_prio'])+'/docker/'+plugincont_id + tasks_path = cgroup_netcls_path+'/tasks' + block_path = cgroup_netcls_path+'/block' + block_classid_path = block_path+'/net_cls.classid' + block_tasks_path = block_path+'/tasks' + + if not os.path.isdir(block_path): + os.makedirs(block_path) + + fd = open(block_classid_path,'w') + fd.write(self.plugincont_cgroup_netclsid) + fd.close() + + fd = open(tasks_path,'r') + plugincont_pids = fd.readlines() #should be just one pid == plugincont_pid + fd.close() + + fd = open(block_tasks_path,'w') + for pid in plugincont_pids: + fd.write(pid) + fd.close() + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal + + def set_plugincont_iptables(self, plugincont_id): + retVal = 0 + try: + client = docker.APIClient(base_url='unix://var/run/docker.sock') + plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] + #netns_path = '/var/run/netns' + #if not os.path.isdir(netns_path): + # os.makedirs(netns_path) + retVal = self._setup_netcls_cgroup(plugincont_id) + if retVal == 0: + retVal = run_as_another_namespace(str(plugincont_pid), + ['net'], + self._add_iptable_rules) + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal + + def destroy_plugincont(self, guestcont): + guestcont_id = str(guestcont.long_id) + plugincont_id = guestcont.plugincont.id + self.destroy_cont(id=plugincont_id) + guestcont.plugincont = None + self.pluginconts.pop(str(guestcont_id)) + + def setup_plugincont(self, guestcont): + guestcont_id = str(guestcont.long_id) + if guestcont_id in self.pluginconts.keys(): + guestcont.plugincont = self.pluginconts[guestcont_id] + return + + self.create_plugincont(guestcont) + if guestcont.plugincont is not None: + plugincont_id = guestcont.plugincont.id + if self.set_plugincont_iptables(plugincont_id) != 0: + self.destroy_plugincont(guestcont) + diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index f8d6f581..80f820e4 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -8,6 +8,7 @@ import plugins_manager import utils.dockerutils from base_crawler import BaseCrawler, BaseFrame +from plugin_containers_manager import PluginContainersManager from containers import poll_containers, get_containers from utils.crawler_exceptions import ContainerWithoutCgroups from utils.namespace import run_as_another_namespace @@ -43,195 +44,18 @@ def __init__(self, self.environment = environment self.host_namespace = host_namespace self.user_list = user_list - self.frequency = frequency - self.pluginconts = dict() - #magic numbers - #self.plugincont_image = 'plugincont_image' - self.plugincont_image = 'crawler_plugins18' - self.plugincont_name_prefix = 'plugin_cont' - self.plugincont_username = 'user1' - self.plugincont_workdir = '/home/user1/features/' - self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' - self.plugincont_guestcont_mountpoint = '/rootfs_local' - self.plugincont_host_uid = '166536' #from docker userns remapping - self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id - + self.pluginconts_manager = PluginContainersManager(frequency) - def destroy_cont(self, id=None, name=None): - client = docker.APIClient(base_url='unix://var/run/docker.sock') - if name is None and id is None: - return - if name is not None: - _id = name - filter = {'name':name} - else: - _id = id - filter = {'id':id} - if client.containers(all=True,filters=filter) != []: - client.stop(_id) - client.remove_container(_id) - - def create_plugincont(self, guestcont): - #TODO: build plugin cont image from Dockerfile first - - #pip install docker=2.0.0 - #client.containers.run("ruby", "tail -f /dev/null", pid_mode='container:d98cd4f1e518e671bc376ac429146937fbec9df7dbbfbb389e615a90c23ca27a', detach=True) - # maybe userns_mode='host' - guestcont_id = guestcont.long_id - guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) - plugincont = None - plugincont_name = self.plugincont_name_prefix+'_'+guestcont_id - seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) - #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path - client = docker.from_env() - try: - self.destroy_cont(name=plugincont_name) - plugincont = client.containers.run( - image=self.plugincont_image, - name=plugincont_name, - user=self.plugincont_username, - command="/usr/bin/python2.7 /crawler/crawler/crawler_lite.py --frequency="+str(self.frequency), - #command="tail -f /dev/null", - pid_mode='container:'+guestcont_id, - network_mode='container:'+guestcont_id, - cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], - #security_opt=['seccomp:'+seccomp_profile_path], - security_opt=['seccomp:'+seccomp_attr], - volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, - detach=True) - time.sleep(5) - except Exception as exc: - print exc - print sys.exc_info()[0] - - self.pluginconts[str(guestcont_id)] = plugincont - guestcont.plugincont = plugincont - - def _add_iptable_rules(self): - # pip install python-iptables - retVal = 0 - try: - rule = iptc.Rule() - match = iptc.Match(rule, "owner") - match.uid_owner = self.plugincont_host_uid - rule.add_match(match) - rule.dst = "!127.0.0.1" - rule.target = iptc.Target(rule, "DROP") - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") - chain.insert_rule(rule) - - rule = iptc.Rule() - match = iptc.Match(rule, "cgroup") - match.cgroup = self.plugincont_cgroup_netclsid - rule.add_match(match) - rule.src = "!127.0.0.1" - rule.target = iptc.Target(rule, "DROP") - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") - chain.insert_rule(rule) - except Exception as exc: - print exc - print sys.exc_info()[0] - retVal = -1 - return retVal - - def _get_cgroup_dir(self, devlist=[]): - for dev in devlist: - paths = [os.path.join('/cgroup/', dev), - os.path.join('/sys/fs/cgroup/', dev)] - for path in paths: - if os.path.ismount(path): - return path - - # Try getting the mount point from /proc/mounts - for l in open('/proc/mounts', 'r'): - _type, mnt, _, _, _, _ = l.split(' ') - if _type == 'cgroup' and mnt.endswith('cgroup/' + dev): - return mnt - - raise ContainerWithoutCgroups('Can not find the cgroup dir') - - def _setup_netcls_cgroup(self, plugincont_id): - retVal = 0 - try: - # cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id - cgroup_netcls_path = self._get_cgroup_dir(['net_cls','net_cls,net_prio'])+'/docker/'+plugincont_id - tasks_path = cgroup_netcls_path+'/tasks' - block_path = cgroup_netcls_path+'/block' - block_classid_path = block_path+'/net_cls.classid' - block_tasks_path = block_path+'/tasks' - - if not os.path.isdir(block_path): - os.makedirs(block_path) - - fd = open(block_classid_path,'w') - fd.write(self.plugincont_cgroup_netclsid) - fd.close() - - fd = open(tasks_path,'r') - plugincont_pids = fd.readlines() #should be just one pid == plugincont_pid - fd.close() - - fd = open(block_tasks_path,'w') - for pid in plugincont_pids: - fd.write(pid) - fd.close() - except Exception as exc: - print exc - print sys.exc_info()[0] - retVal = -1 - return retVal - - def set_plugincont_iptables(self, plugincont_id): - retVal = 0 - try: - client = docker.APIClient(base_url='unix://var/run/docker.sock') - plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] - #netns_path = '/var/run/netns' - #if not os.path.isdir(netns_path): - # os.makedirs(netns_path) - retVal = self._setup_netcls_cgroup(plugincont_id) - if retVal == 0: - retVal = run_as_another_namespace(str(plugincont_pid), - ['net'], - self._add_iptable_rules) - except Exception as exc: - print exc - print sys.exc_info()[0] - retVal = -1 - return retVal - - def destroy_plugincont(self, guestcont): - guestcont_id = str(guestcont.long_id) - plugincont_id = guestcont.plugincont.id - self.destroy_cont(id=plugincont_id) - guestcont.plugincont = None - self.pluginconts.pop(str(guestcont_id)) - - def setup_plugincont(self, guestcont): - guestcont_id = str(guestcont.long_id) - if guestcont_id in self.pluginconts.keys(): - guestcont.plugincont = self.pluginconts[guestcont_id] - return - - self.create_plugincont(guestcont) - if guestcont.plugincont is not None: - plugincont_id = guestcont.plugincont.id - if self.set_plugincont_iptables(plugincont_id) != 0: - self.destroy_plugincont(guestcont) - # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): #import pdb #pdb.set_trace() features = [] if guestcont.plugincont is None: - self.setup_plugincont(guestcont) + self.pluginconts_manager.setup_plugincont(guestcont) if guestcont.plugincont is None: return features - - plugincont_id = guestcont.plugincont.id - rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - frame_dir = rootfs+self.plugincont_workdir + frame_dir = self.pluginconts_manager.get_plugincont_framedir(guestcont) try: frame_list = os.listdir(frame_dir) frame_list.sort(key=int) @@ -315,5 +139,5 @@ def crawl(self, ignore_plugin_exception=True): host_namespace=self.host_namespace, group_by_pid_namespace=False) for container in containers_list: - if not container.name.startswith(self.plugincont_name_prefix): + if not container.name.startswith(self.pluginconts_manager.plugincont_name_prefix): yield self.crawl_container(container, ignore_plugin_exception) From 2523ca8996c7d63e4ad257a5bfb0dd246d61568f Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Mon, 27 Nov 2017 20:14:02 -0500 Subject: [PATCH 11/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 64 ++- .../plugincont/plugincont_img/Dockerfile | 32 ++ .../plugincont_img/crawler/.gitignore | 8 + .../plugincont_img/crawler/__init__.py | 2 + .../plugincont_img/crawler/base_crawler.py | 70 +++ .../plugincont_img/crawler/config_parser.py | 37 ++ .../crawler/config_spec_and_defaults.conf | 10 + .../plugincont_img/crawler/container.py | 114 ++++ .../plugincont_img/crawler/containers.py | 91 ++++ .../crawler/containers_crawler.py | 87 +++ .../crawler/containers_logs_linker.py | 123 +++++ .../plugincont_img/crawler/crawler.conf | 80 +++ .../plugincont_img/crawler/crawler.py | 239 ++++++++ .../plugincont_img/crawler/crawler_lite.py | 180 +++++++ .../crawler/crawler_lite_back.py | 11 + .../plugincont_img/crawler/crawlmodes.py | 7 + .../plugincont_img/crawler/dockercontainer.py | 510 ++++++++++++++++++ .../crawler/emitters_manager.py | 66 +++ .../plugincont_img/crawler/formatters.py | 140 +++++ .../plugincont_img/crawler/host_crawler.py | 42 ++ .../plugincont_img/crawler/icrawl_plugin.py | 73 +++ .../plugincont_img/crawler/iemit_plugin.py | 57 ++ .../crawler/plugins/__init__.py | 2 + .../crawler/plugins/applications/__init__.py | 2 + .../plugins/applications/apache/__init__.py | 2 + .../apache/apache_container_crawler.plugin | 9 + .../apache/apache_container_crawler.py | 51 ++ .../applications/apache/apache_crawler.py | 75 +++ .../apache/apache_host_crawler.plugin | 9 + .../apache/apache_host_crawler.py | 21 + .../plugins/applications/apache/feature.py | 46 ++ .../plugins/applications/db2/__init__.py | 2 + .../db2/db2_container_crawler.plugin | 14 + .../applications/db2/db2_container_crawler.py | 65 +++ .../plugins/applications/db2/db2_crawler.py | 158 ++++++ .../applications/db2/db2_host_crawler.plugin | 14 + .../applications/db2/db2_host_crawler.py | 39 ++ .../plugins/applications/db2/feature.py | 37 ++ .../plugins/applications/liberty/__init__.py | 2 + .../plugins/applications/liberty/feature.py | 45 ++ .../liberty/liberty_container_crawler.plugin | 13 + .../liberty/liberty_container_crawler.py | 57 ++ .../applications/liberty/liberty_crawler.py | 243 +++++++++ .../liberty/liberty_host_crawler.plugin | 13 + .../liberty/liberty_host_crawler.py | 32 ++ .../plugins/applications/nginx/__init__.py | 2 + .../plugins/applications/nginx/feature.py | 22 + .../nginx/nginx_container_crawler.plugin | 9 + .../nginx/nginx_container_crawler.py | 48 ++ .../applications/nginx/nginx_crawler.py | 34 ++ .../nginx/nginx_host_crawler.plugin | 9 + .../applications/nginx/nginx_host_crawler.py | 21 + .../plugins/applications/redis/__init__.py | 2 + .../plugins/applications/redis/feature.py | 174 ++++++ .../redis/redis_container_crawler.plugin | 8 + .../redis/redis_container_crawler.py | 57 ++ .../redis/redis_host_crawler.plugin | 8 + .../applications/redis/redis_host_crawler.py | 37 ++ .../plugins/applications/tomcat/__init__.py | 2 + .../plugins/applications/tomcat/feature.py | 41 ++ .../tomcat/tomcat_container_crawler.plugin | 12 + .../tomcat/tomcat_container_crawler.py | 57 ++ .../applications/tomcat/tomcat_crawler.py | 82 +++ .../tomcat/tomcat_host_crawler.plugin | 12 + .../tomcat/tomcat_host_crawler.py | 32 ++ .../crawler/plugins/emitters/__init__.py} | 0 .../crawler/plugins/emitters/base_emitter.py | 27 + .../plugins/emitters/base_http_emitter.py | 80 +++ .../plugins/emitters/file_emitter.plugin | 8 + .../crawler/plugins/emitters/file_emitter.py | 46 ++ .../plugins/emitters/fluentd_emitter.plugin | 8 + .../plugins/emitters/fluentd_emitter.py | 98 ++++ .../plugins/emitters/http_emitter.plugin | 8 + .../crawler/plugins/emitters/http_emitter.py | 12 + .../plugins/emitters/https_emitter.plugin | 8 + .../crawler/plugins/emitters/https_emitter.py | 12 + .../plugins/emitters/kafka_emitter.plugin | 8 + .../crawler/plugins/emitters/kafka_emitter.py | 71 +++ .../emitters/mtgraphite_emitter.plugin | 8 + .../plugins/emitters/mtgraphite_emitter.py | 43 ++ .../plugins/emitters/sas_emitter.plugin | 14 + .../crawler/plugins/emitters/sas_emitter.py | 146 +++++ .../plugins/emitters/stdout_emitter.plugin | 8 + .../plugins/emitters/stdout_emitter.py | 40 ++ .../cloudsight_environment.plugin | 8 + .../environments/cloudsight_environment.py | 42 ++ .../kubernetes_environment.plugin | 8 + .../environments/kubernetes_environment.py | 69 +++ .../plugincont_img/crawler/plugins/sahil.py | 11 + .../crawler/plugins/systems/__init__.py | 0 .../plugincont_img/crawler/plugins/systems/c | Bin 0 -> 120 bytes .../systems/config_container_crawler.plugin | 8 + .../systems/config_container_crawler.py | 66 +++ .../systems/config_host_crawler.plugin | 8 + .../plugins/systems/config_host_crawler.py | 43 ++ .../connection_container_crawler.plugin | 8 + .../systems/connection_container_crawler.py | 24 + .../systems/connection_host_crawler.plugin | 8 + .../systems/connection_host_crawler.py | 17 + .../systems/connection_vm_crawler.plugin | 8 + .../plugins/systems/connection_vm_crawler.py | 50 ++ .../systems/cpu_container_crawler.plugin | 8 + .../plugins/systems/cpu_container_crawler.py | 166 ++++++ .../plugins/systems/cpu_host_crawler.plugin | 8 + .../plugins/systems/cpu_host_crawler.py | 31 ++ .../plugins/systems/cpu_vm_crawler.plugin | 8 + .../crawler/plugins/systems/cpu_vm_crawler.py | 20 + .../systems/ctprobe_container_crawler.plugin | 8 + .../systems/ctprobe_container_crawler.py | 438 +++++++++++++++ .../systems/disk_container_crawler.plugin | 8 + .../plugins/systems/disk_container_crawler.py | 24 + .../plugins/systems/disk_host_crawler.plugin | 8 + .../plugins/systems/disk_host_crawler.py | 17 + .../plugins/systems/disk_vm_crawler.plugin | 8 + .../plugins/systems/disk_vm_crawler.py | 19 + .../dockerhistory_container_crawler.plugin | 8 + .../dockerhistory_container_crawler.py | 17 + .../dockerinspect_container_crawler.plugin | 8 + .../dockerinspect_container_crawler.py | 16 + .../systems/dockerps_host_crawler.plugin | 8 + .../plugins/systems/dockerps_host_crawler.py | 27 + .../systems/file_container_crawler.plugin | 8 + .../plugins/systems/file_container_crawler.py | 54 ++ .../plugins/systems/file_host_crawler.plugin | 8 + .../plugins/systems/file_host_crawler.py | 26 + .../systems/fprobe_container_crawler.plugin | 8 + .../systems/fprobe_container_crawler.py | 478 ++++++++++++++++ .../interface_container_crawler.plugin | 8 + .../systems/interface_container_crawler.py | 83 +++ .../systems/interface_host_crawler.plugin | 8 + .../plugins/systems/interface_host_crawler.py | 73 +++ .../systems/interface_vm_crawler.plugin | 8 + .../plugins/systems/interface_vm_crawler.py | 83 +++ .../systems/load_container_crawler.plugin | 8 + .../plugins/systems/load_container_crawler.py | 31 ++ .../plugins/systems/load_host_crawler.plugin | 8 + .../plugins/systems/load_host_crawler.py | 24 + .../plugins/systems/load_vm_crawler.plugin | 8 + .../plugins/systems/load_vm_crawler.py | 18 + .../systems/memory_container_crawler.plugin | 8 + .../systems/memory_container_crawler.py | 67 +++ .../systems/memory_host_crawler.plugin | 8 + .../plugins/systems/memory_host_crawler.py | 29 + .../plugins/systems/memory_vm_crawler.plugin | 8 + .../plugins/systems/memory_vm_crawler.py | 37 ++ .../systems/metric_container_crawler.plugin | 8 + .../systems/metric_container_crawler.py | 24 + .../systems/metric_host_crawler.plugin | 8 + .../plugins/systems/metric_host_crawler.py | 17 + .../plugins/systems/metric_vm_crawler.plugin | 8 + .../plugins/systems/metric_vm_crawler.py | 132 +++++ .../systems/os_container_crawler.plugin | 10 + .../plugins/systems/os_container_crawler.py | 27 + .../plugins/systems/os_host_crawler.plugin | 8 + .../plugins/systems/os_host_crawler.py | 14 + .../plugins/systems/os_vm_crawler.plugin | 8 + .../crawler/plugins/systems/os_vm_crawler.py | 41 ++ .../systems/package_container_crawler.plugin | 8 + .../systems/package_container_crawler.py | 48 ++ .../systems/package_host_crawler.plugin | 8 + .../plugins/systems/package_host_crawler.py | 15 + .../systems/process_container_crawler.plugin | 8 + .../systems/process_container_crawler.py | 97 ++++ .../systems/process_container_crawler.py.org | 99 ++++ .../systems/process_host_crawler.plugin | 8 + .../plugins/systems/process_host_crawler.py | 87 +++ .../plugins/systems/process_vm_crawler.plugin | 8 + .../plugins/systems/process_vm_crawler.py | 98 ++++ .../pythonpackage_container_crawler.plugin | 12 + .../pythonpackage_container_crawler.py | 114 ++++ .../rubypackage_container_crawler.plugin | 12 + .../systems/rubypackage_container_crawler.py | 90 ++++ .../rubypackage_container_crawler.py.org | 94 ++++ .../plugincont_img/crawler/plugins_manager.py | 267 +++++++++ .../crawler/runtime_environment.py | 58 ++ .../plugincont_img/crawler/utils/__init__.py | 0 .../crawler/utils/config_utils.py | 102 ++++ .../crawler/utils/connection_utils.py | 65 +++ .../crawler/utils/crawler_exceptions.py | 122 +++++ .../crawler/utils/disk_utils.py | 21 + .../crawler/utils/dockerevent.py | 23 + .../crawler/utils/dockerutils.py | 409 ++++++++++++++ .../plugincont_img/crawler/utils/ethtool.py | 92 ++++ .../plugincont_img/crawler/utils/features.py | 117 ++++ .../crawler/utils/file_utils.py | 164 ++++++ .../plugincont_img/crawler/utils/mesos.py | 90 ++++ .../crawler/utils/metric_utils.py | 76 +++ .../plugincont_img/crawler/utils/misc.py | 253 +++++++++ .../crawler/utils/mtgraphite.py | 297 ++++++++++ .../plugincont_img/crawler/utils/namespace.py | 256 +++++++++ .../plugincont_img/crawler/utils/os_utils.py | 68 +++ .../plugincont_img/crawler/utils/osinfo.py | 120 +++++ .../crawler/utils/package_utils.py | 193 +++++++ .../crawler/utils/process_utils.py | 136 +++++ .../crawler/utils/socket_utils.py | 34 ++ .../crawler/utils/timeout_utils.py | 21 + .../plugincont_img/crawler/virtual_machine.py | 75 +++ .../plugincont_img/crawler/vms_crawler.py | 83 +++ .../plugincont_img/crawler/worker.py | 87 +++ .../python-conntrackprobe_0.2.1-1_all.deb | Bin 0 -> 11952 bytes ...ython-socket-datacollector_0.1.4-1_all.deb | Bin 0 -> 4402 bytes .../softflowd_0.9.9902-1_amd64.deb | Bin 0 -> 39838 bytes .../plugincont_img/requirements.txt | 14 + requirements.txt | 3 +- 204 files changed, 10981 insertions(+), 16 deletions(-) create mode 100644 crawler/utils/plugincont/plugincont_img/Dockerfile create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/.gitignore create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/base_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/config_parser.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/config_spec_and_defaults.conf create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/container.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/containers.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/containers_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/containers_logs_linker.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/crawler.conf create mode 100755 crawler/utils/plugincont/plugincont_img/crawler/crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/crawler_lite_back.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/crawlmodes.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/dockercontainer.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/emitters_manager.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/formatters.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/icrawl_plugin.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/iemit_plugin.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/feature.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/feature.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/feature.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/feature.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/feature.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/feature.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.py rename crawler/utils/plugincont/{Dockerfile => plugincont_img/crawler/plugins/emitters/__init__.py} (100%) create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_http_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/sahil.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/c create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py.org create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py.org create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins_manager.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/runtime_environment.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/__init__.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/config_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/connection_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/crawler_exceptions.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/disk_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/dockerevent.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/dockerutils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/ethtool.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/features.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/file_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/mesos.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/misc.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/mtgraphite.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/namespace.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/os_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/osinfo.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/process_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/socket_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/utils/timeout_utils.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/virtual_machine.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/vms_crawler.py create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/worker.py create mode 100644 crawler/utils/plugincont/plugincont_img/dependencies/python-conntrackprobe_0.2.1-1_all.deb create mode 100644 crawler/utils/plugincont/plugincont_img/dependencies/python-socket-datacollector_0.1.4-1_all.deb create mode 100644 crawler/utils/plugincont/plugincont_img/dependencies/softflowd_0.9.9902-1_amd64.deb create mode 100644 crawler/utils/plugincont/plugincont_img/requirements.txt diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 353cfe3a..e26106f3 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -5,6 +5,7 @@ import json import docker import iptc +import ctypes import plugins_manager import utils.dockerutils from base_crawler import BaseCrawler, BaseFrame @@ -17,15 +18,20 @@ class PluginContainersManager(): def __init__(self, frequency=-1): self.frequency = frequency self.pluginconts = dict() - #self.plugincont_image = 'plugincont_image' - self.plugincont_image = 'crawler_plugins18' + self.plugincont_image = 'plugincont_image' + #self.plugincont_image = 'crawler_plugins18' self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'user1' self.plugincont_framedir = '/home/user1/features/' + self.plugincont_py_path = '/usr/bin/python2.7' self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' + self.plugincont_image_path = os.getcwd() + '/crawler/utils/plugincont/plugincont_img' self.plugincont_guestcont_mountpoint = '/rootfs_local' self.plugincont_host_uid = '166536' #from docker userns remapping self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id + self.docker_client = docker.from_env() + self.docker_APIclient = docker.APIClient(base_url='unix://var/run/docker.sock') + self.build_plugincont_img() def get_plugincont_framedir(self, guestcont): frame_dir = None @@ -36,7 +42,7 @@ def get_plugincont_framedir(self, guestcont): return frame_dir def destroy_cont(self, id=None, name=None): - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = self.docker_APIClient if name is None and id is None: return if name is not None: @@ -49,19 +55,18 @@ def destroy_cont(self, id=None, name=None): client.stop(_id) client.remove_container(_id) - def create_plugincont(self, guestcont): - #TODO: build plugin cont image from Dockerfile first + def build_plugincont_img(self): + build_status = list(self.docker_APIclient.build(path=self.plugincont_image_path, tag=self.plugincont_image)) + assert 'Successfully built' in build_status[-1] - #pip install docker=2.0.0 - #client.containers.run("ruby", "tail -f /dev/null", pid_mode='container:d98cd4f1e518e671bc376ac429146937fbec9df7dbbfbb389e615a90c23ca27a', detach=True) - # maybe userns_mode='host' + def create_plugincont(self, guestcont): guestcont_id = guestcont.long_id guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) plugincont = None plugincont_name = self.plugincont_name_prefix+'_'+guestcont_id seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path - client = docker.from_env() + client = self.docker_client try: self.destroy_cont(name=plugincont_name) plugincont = client.containers.run( @@ -86,7 +91,6 @@ def create_plugincont(self, guestcont): guestcont.plugincont = plugincont def _add_iptable_rules(self): - # pip install python-iptables retVal = 0 try: rule = iptc.Rule() @@ -162,7 +166,7 @@ def _setup_netcls_cgroup(self, plugincont_id): def set_plugincont_iptables(self, plugincont_id): retVal = 0 try: - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = self.docker_APIClient plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] #netns_path = '/var/run/netns' #if not os.path.isdir(netns_path): @@ -185,6 +189,28 @@ def destroy_plugincont(self, guestcont): guestcont.plugincont = None self.pluginconts.pop(str(guestcont_id)) + def set_plugincont_py_cap(self, plugincont_id): + retVal = 0 + verify = False + try: + rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) + py_path = rootfs+self.plugincont_py_path + libcap = ctypes.cdll.LoadLibrary("libcap.so") + caps = libcap.cap_from_text('cap_dac_read_search,cap_sys_chroot,cap_sys_ptrace+ep') + retVal = libcap.cap_set_file(py_path,caps) + if verify is True: + libcap.cap_to_text.restype = ctypes.c_char_p + caps_set = libcap.cap_get_file(py_path,caps) + caps_set_str = libcap.cap_to_text(caps_set, None) + assert 'cap_dac_read_search' in caps_set_str + assert 'cap_sys_chroot' in caps_set_str + assert 'cap_sys_ptrace' in caps_set_str + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal + def setup_plugincont(self, guestcont): guestcont_id = str(guestcont.long_id) if guestcont_id in self.pluginconts.keys(): @@ -192,8 +218,16 @@ def setup_plugincont(self, guestcont): return self.create_plugincont(guestcont) - if guestcont.plugincont is not None: - plugincont_id = guestcont.plugincont.id - if self.set_plugincont_iptables(plugincont_id) != 0: - self.destroy_plugincont(guestcont) + + if guestcont.plugincont is None: + return + + plugincont_id = guestcont.plugincont.id + if self.set_plugincont_iptables(plugincont_id) != 0: + self.destroy_plugincont(guestcont) + return + + if self.set_plugincont_py_cap(plugincont_id) != 0: + self.destroy_plugincont(guestcont) + return diff --git a/crawler/utils/plugincont/plugincont_img/Dockerfile b/crawler/utils/plugincont/plugincont_img/Dockerfile new file mode 100644 index 00000000..e8c58652 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/Dockerfile @@ -0,0 +1,32 @@ +FROM python:2.7 + +WORKDIR /crawler + +COPY requirements.txt /crawler/requirements.txt +RUN pip install -r requirements.txt + +COPY \ + dependencies/python-socket-datacollector_0.1.4-1_all.deb \ + dependencies/softflowd_0.9.9902-1_amd64.deb \ + dependencies/python-conntrackprobe_0.2.1-1_all.deb \ + /tmp/ + +RUN dpkg -i /tmp/python-socket-datacollector_*_all.deb && \ + apt-get -y update && \ + apt-get -y install libpcap0.8 && \ + dpkg -i /tmp/softflowd_0.9.*_amd64.deb && \ + pip install pyroute2 py-radix requests-unixsocket json-rpc && \ + dpkg -i /tmp/python-conntrackprobe_*_all.deb && \ + rm -f /tmp/*.deb + +ENV PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/site-packages + +ADD crawler /crawler + +RUN groupadd -r user1 -g 1000 && \ + useradd -u 1000 -m user1 -g user1 && \ + usermod -a -G user1 user1 && \ + chsh -s /bin/bash user1 + +RUN sed -i s/" and isfile_strict(file):"/:/ /usr/local/lib/python2.7/site-packages/psutil/_pslinux.py + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/.gitignore b/crawler/utils/plugincont/plugincont_img/crawler/.gitignore new file mode 100644 index 00000000..cff5a5e1 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/.gitignore @@ -0,0 +1,8 @@ +deprecated/ +*.pyc +binaries/ +kafka-producer.py +timeout.py +alchemy.py +*.json +*.sh diff --git a/crawler/utils/plugincont/plugincont_img/crawler/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/base_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/base_crawler.py new file mode 100644 index 00000000..a7fe255c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/base_crawler.py @@ -0,0 +1,70 @@ +import time +import uuid + + +class BaseFrame: + + def __init__(self, feature_types): + """ + + :param feature_types: list of feature types, e.g. ['os','cpu']. + This list is just used to describe the features in a frame. No + checks are made to verify that all features in this list + have an actual feature in .data + """ + self.data = [] + self.metadata = {} + self.metadata['features'] = ','.join(map(str, feature_types)) + self.metadata['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S%z') + self.metadata['uuid'] = str(uuid.uuid4()) + self.num_features = 0 + + def add_features(self, features=[]): + """features is a list of (str, FeatureObject, str)""" + self.data.extend(features) + self.num_features += len(features) + + def add_feature(self, feature_type, feature_key, feature_value): + self.data.append((feature_type, feature_key, feature_value)) + self.num_features += 1 + + def __str__(self): + return '\n'.join(str(feature) for feature in self.data) + + +class BaseCrawler: + + def __init__(self, + features=['os', 'cpu'], + plugin_places=['plugins'], + options={}): + """ + Store and check the types of the arguments. + + :param frequency: Sleep seconds between iterations + """ + self.features = features + self.plugin_places = plugin_places + self.options = options + + def crawl(self, ignore_plugin_exception=True): + """ + Crawl to get a list of snapshot frames for all systems. + + :param ignore_plugin_exception: ignore exceptions raised on a plugin + :return: a list generator of Frame objects + """ + raise NotImplementedError('crawl method implementation is missing.') + + def polling_crawl(self, timeout, ignore_plugin_exception=True): + """ + Crawl to get a snapshot frame of any new system created before + `timeout` seconds. + + :param timeout: seconds to wait for new systems + :param ignore_plugin_exception: ignore exceptions raised on a plugin + :return: a Frame object or None if no system was created. + """ + if timeout > 0: + time.sleep(timeout) + return None diff --git a/crawler/utils/plugincont/plugincont_img/crawler/config_parser.py b/crawler/utils/plugincont/plugincont_img/crawler/config_parser.py new file mode 100644 index 00000000..9e0eb9d8 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/config_parser.py @@ -0,0 +1,37 @@ +import logging + +from configobj import ConfigObj +from validate import Validator + +from utils import misc + +CONFIG_SPEC_PATH = 'config_spec_and_defaults.conf' + +_config = None + +logger = logging.getLogger('crawlutils') + + +def parse_crawler_config(config_path='crawler.conf'): + global _config + + # 1. get configs + _config = ConfigObj(infile=misc.execution_path(config_path), + configspec=misc.execution_path(CONFIG_SPEC_PATH)) + + # Configspec is not being used currently + # but keeping validate() and apply_user_args() for future. + # Essentially NOP right now + + # 2. apply defaults + vdt = Validator() + _config.validate(vdt) + + +def get_config(): + global _config + + if not _config: + parse_crawler_config() + + return _config diff --git a/crawler/utils/plugincont/plugincont_img/crawler/config_spec_and_defaults.conf b/crawler/utils/plugincont/plugincont_img/crawler/config_spec_and_defaults.conf new file mode 100644 index 00000000..f314c8e0 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/config_spec_and_defaults.conf @@ -0,0 +1,10 @@ +[ general ] +features_to_crawl = string_list(default=list('os', 'cpu')) +environment = string(min=1, max=30, default='cloudsight') + +plugin_places = string_list(default=list('plugins')) +compress = boolean(default=False) + +link_container_log_files = boolean(default=False) +default_mountpoint = string(default='/') +docker_containers_list = string(default='ALL') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/container.py b/crawler/utils/plugincont/plugincont_img/crawler/container.py new file mode 100644 index 00000000..31a41d0f --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/container.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import logging +import os + +import psutil + +from utils import misc, namespace + +logger = logging.getLogger('crawlutils') + + +def list_raw_containers(user_list='ALL'): + """ + A running container is defined as a group of processes with the + `pid` namespace different to the `init` process `pid` namespace. + """ + init_ns = namespace.get_pid_namespace(1) + for p in psutil.process_iter(): + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + if pid == 1 or pid == '1': + + # don't confuse the init process as a container + + continue + if user_list not in ['ALL', 'all', 'All']: + if str(pid) not in user_list: + + # skip containers not in the list + + continue + if misc.process_is_crawler(pid): + + # don't confuse the crawler process with a container + + continue + curr_ns = namespace.get_pid_namespace(pid) + if not curr_ns: + + # invalid container + + continue + if curr_ns == init_ns: + continue + yield Container(pid, curr_ns) + + +class Container(object): + + """ + This class abstracts a running Linux container. + """ + + def __init__(self, pid, process_namespace=None): + self.pid = str(pid) + self.short_id = str(hash(pid)) + self.long_id = str(hash(pid)) + self.name = str(pid) + self.namespace = str(pid) + self.image = None + self.root_fs = None + self.log_prefix = None + self.log_file_list = None + self.process_namespace = (process_namespace or + namespace.get_pid_namespace(pid)) + + def __eq__(self, other): + """ + A container is equal to another if they have the same PID + """ + if isinstance(other, Container): + return self.pid == other.pid + else: + return False + + def __hash__(self): + return 1 + + def __ne__(self, other): + return not self.__eq__(other) + + def is_docker_container(self): + return False + + def __str__(self): + return str(self.__dict__) + + def get_metadata_dict(self): + metadata = { + 'namespace': self.namespace, + 'container_long_id': self.long_id, + 'container_short_id': self.short_id, + 'container_name': self.name, + 'container_image': self.image, + 'emit_shortname': self.short_id, + } + return metadata + + def get_memory_cgroup_path(self, node='memory.stat'): + raise NotImplementedError() + + def get_cpu_cgroup_path(self, node='cpuacct.usage'): + raise NotImplementedError() + + def is_running(self): + return os.path.exists('/proc/' + self.pid) + + def link_logfiles(self): + # no-op + pass + + def unlink_logfiles(self): + # no-op + pass diff --git a/crawler/utils/plugincont/plugincont_img/crawler/containers.py b/crawler/utils/plugincont/plugincont_img/crawler/containers.py new file mode 100644 index 00000000..1c10cde7 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/containers.py @@ -0,0 +1,91 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import logging + +import container +from utils import misc +from dockercontainer import get_docker_containers, poll_docker_containers + +logger = logging.getLogger('crawlutils') + + +def list_all_containers(user_list='ALL', host_namespace='', + ignore_raw_containers=True): + """ + Returns a list of all running containers in the host. + + :param user_list: list of Docker container IDs. TODO: include rkt Ids. + :param host_namespace: string representing the host name (e.g. host IP) + :param ignore_raw_containers: if True, only include Docker or rkt. + An example of a non-docker container is a chromium-browser process. + :return: a list of Container objects + """ + visited_ns = set() # visited PID namespaces + + for _container in get_docker_containers(host_namespace=host_namespace, + user_list=user_list): + curr_ns = _container.process_namespace + if curr_ns not in visited_ns: + visited_ns.add(curr_ns) + yield _container + + # XXX get list of rkt containers + + if ignore_raw_containers: + return + + for _container in container.list_raw_containers(user_list): + curr_ns = _container.process_namespace + if curr_ns not in visited_ns: + visited_ns.add(curr_ns) + yield _container + + +def poll_containers(timeout, user_list='ALL', host_namespace='', + ignore_raw_containers=True): + """ + Returns a list of all running containers in the host. + + :param timeout: seconds to wait for a new container + :param user_list: list of Docker container IDs. TODO: include rkt Ids. + :param host_namespace: string representing the host name (e.g. host IP) + :param ignore_raw_containers: if True, only include Docker or rkt. + An example of a non-docker container is a chromium-browser process. + :return: a list of Container objects + """ + # XXX: we only support polling docker containers + return poll_docker_containers(timeout, user_list=user_list, + host_namespace=host_namespace) + + +def get_containers( + environment='cloudsight', + host_namespace=misc.get_host_ipaddr(), + user_list='ALL', + ignore_raw_containers=True +): + """ + Returns a list of all containers running in the host. + + XXX This list excludes non-docker containers when running in non-cloudsight + environment. TODO: fix this weird behaviour. + + :param environment: this defines how the name (namespace) is constructed. + :param host_namespace: string representing the host name (e.g. host IP) + :param user_list: list of Docker container IDs. TODO: include rkt. + :param ignore_raw_containers: if True, only include Docker or rkt. + An example of a non-docker container is a chromium-browser process. + :return: a list of Container objects. + """ + filtered_list = [] + containers_list = list_all_containers(user_list, host_namespace, + ignore_raw_containers) + for _container in containers_list: + default_environment = 'cloudsight' + if (environment != default_environment and + not _container.is_docker_container()): + continue + + filtered_list.append(_container) + + return filtered_list diff --git a/crawler/utils/plugincont/plugincont_img/crawler/containers_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/containers_crawler.py new file mode 100644 index 00000000..d1305210 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/containers_crawler.py @@ -0,0 +1,87 @@ +from containers import poll_containers, get_containers +import plugins_manager +from base_crawler import BaseCrawler, BaseFrame + + +class ContainerFrame(BaseFrame): + + def __init__(self, feature_types, container): + BaseFrame.__init__(self, feature_types) + self.metadata.update(container.get_metadata_dict()) + self.metadata['system_type'] = 'container' + + +class ContainersCrawler(BaseCrawler): + + def __init__(self, + features=['os', 'cpu'], + environment='cloudsight', + user_list='ALL', + host_namespace='', + plugin_places=['plugins'], + options={}): + + BaseCrawler.__init__( + self, + features=features, + plugin_places=plugin_places, + options=options) + plugins_manager.reload_env_plugin(environment, plugin_places) + plugins_manager.reload_container_crawl_plugins( + features, plugin_places, options) + self.plugins = plugins_manager.get_container_crawl_plugins(features) + self.environment = environment + self.host_namespace = host_namespace + self.user_list = user_list + + def crawl_container(self, container, ignore_plugin_exception=True): + """ + Crawls a specific container and returns a Frame for it. + + :param container: a Container object + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a Frame object. The returned frame can have 0 features and + still have metadata. This can occur if there were no plugins, or all + the plugins raised an exception (and ignore_plugin_exception was True). + """ + frame = ContainerFrame(self.features, container) + for (plugin_obj, plugin_args) in self.plugins: + try: + frame.add_features( + plugin_obj.crawl( + container_id=container.long_id, + **plugin_args)) + except Exception as exc: + if not ignore_plugin_exception: + raise exc + return frame + + def polling_crawl(self, timeout, ignore_plugin_exception=True): + """ + Crawls any container created before `timeout` seconds have elapsed. + + :param timeout: seconds to wait for new containers + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a Frame object + """ + container = poll_containers( + timeout, + user_list=self.user_list, + host_namespace=self.host_namespace) + if container: + return self.crawl_container(container, ignore_plugin_exception) + + return None + + def crawl(self, ignore_plugin_exception=True): + """ + Crawls all containers. + + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a list generator of Frame objects + """ + containers_list = get_containers( + user_list=self.user_list, + host_namespace=self.host_namespace) + for container in containers_list: + yield self.crawl_container(container, ignore_plugin_exception) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/containers_logs_linker.py b/crawler/utils/plugincont/plugincont_img/crawler/containers_logs_linker.py new file mode 100644 index 00000000..0e9d6c07 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/containers_logs_linker.py @@ -0,0 +1,123 @@ +import argparse +import os + +from base_crawler import BaseCrawler +from worker import Worker +from containers import get_containers +from utils import misc + + +class DockerContainersLogsLinker(BaseCrawler): + """ + Class used to maintain symlinks to container log files. The idea with this + is to symlink all log files of interest (from all containers of interest) + to some known directory in the host. Then point some log collector like + logstash to it (and get container logs). + """ + + def __init__(self, + environment='cloudsight', + user_list='ALL', + host_namespace=''): + self.containers_list = set() + self.new = set() + self.deleted = set() + self.environment = environment + self.host_namespace = host_namespace + self.user_list = user_list + + def update_containers_list(self): + """ + Actually poll for new containers. This updates the list of new and + deleted containers, in self.new and self.deleted. + :return: None + """ + curr_containers = set( + get_containers( + environment=self.environment, + user_list=self.user_list, + host_namespace=self.host_namespace)) + self.new = curr_containers - self.containers_list + self.deleted = self.containers_list - curr_containers + self.containers_list = curr_containers + + def link_containers(self): + for container in self.deleted: + container.unlink_logfiles() + for container in self.new: + container.link_logfiles() + + def crawl(self): + self.update_containers_list() + self.link_containers() + return [] + + +if __name__ == '__main__': + + euid = os.geteuid() + if euid != 0: + print 'Need to run this as root.' + exit(1) + + parser = argparse.ArgumentParser() + parser.add_argument( + '--namespace', + dest='namespace', + type=str, + nargs='?', + default=misc.get_host_ipaddr(), + help='Data source this crawler is associated with. Defaults to ' + '/localhost', + ) + parser.add_argument( + '--frequency', + dest='frequency', + type=int, + default=-1, + help='Target time period for iterations. Defaults to -1 which ' + 'means only run one iteration.' + ) + parser.add_argument('--logfile', dest='logfile', type=str, + default='crawler.log', + help='Logfile path. Defaults to crawler.log' + ) + parser.add_argument( + '--crawlContainers', + dest='crawlContainers', + type=str, + nargs='?', + default='ALL', + help='List of containers to crawl as a list of Docker container IDs. ' + 'If this is not passed, then just the host is crawled. ' + 'Alternatively the word "ALL" can be used to crawl every ' + 'container. "ALL" will crawl all namespaces including the host ' + 'itself. This option is only valid for INVM crawl mode. Example: ' + '--crawlContainers 5f3380d2319e,681be3e32661', + ) + parser.add_argument( + '--environment', + dest='environment', + type=str, + default='cloudsight', + help='This speficies some environment specific behavior, like how ' + 'to name a container. The way to add a new behavior is by ' + 'implementing a plugin (see plugins/cloudsight_environment.py ' + 'as an example. Defaults to "cloudsight".', + ) + + misc.setup_logger('crawlutils', 'linker.log') + misc.setup_logger('yapsy', 'yapsy.log') + args = parser.parse_args() + crawler = DockerContainersLogsLinker(environment=args.environment, + user_list=args.crawlContainers, + host_namespace=args.namespace) + + worker = Worker(emitters=None, + frequency=args.frequency, + crawler=crawler) + + try: + worker.run() + except KeyboardInterrupt: + pass diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf b/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf new file mode 100644 index 00000000..88b042c6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf @@ -0,0 +1,80 @@ +[ general ] + #enabled_plugins = os_container, cpu_container + #enabled_emitter_plugins = Stdout Emitter, File Emitter +[ crawlers ] + + [[ os_container ]] + target = CONTAINER + + [[ process_container ]] + + [[ os_vm ]] + + [[ process_vm ]] + + [[ os_host ]] + + [[ process_host ]] + + [[ ruby_pkg ]] + + [[ python_pkg ]] + avoid_setns = False + + [[ fprobe_container ]] + # parameters for softflowd timeouts + maxlife_timeout = 5 + + # flow probe must create the chosen netflow version + netflow_version = 10 + + # The directory where all the flow probe's output data will be written to + fprobe_output_dir = /tmp/crawler-fprobe + + # The filename pattern of the files that the data collector will produce + # container-id, pid, and timestamp will be replaced with concrete values + output_filepattern = fprobe-{ifname}-{timestamp} + + # The user to switch socket-datafile collector to in order to + # drop root privileges + fprobe_user = nobody + + # Terminate the started netflow probe process when terminating the crawler; + # this is useful when running the crawler as a process and all started + # flow probe processes should automatically terminate, thus ending to + # produce further data; set to 'false' or '0' to disable, enable otherwise; + # the default value is 'false' + terminate_fprobe = 1 + + # Berkel packet filter for the probe + fprobe_bpf = (tcp[tcpflags] & (tcp-syn|tcp-ack|tcp-fin) != 0) or not tcp + + [[ ctprobe_container ]] + + # The user to switch socket-datafile collector and conntrackprobe to + # in order to drop root privileges + ctprobe_user = nobody + + # The directory where all the probe's output data will be written to + ctprobe_output_dir = /tmp/crawler-ctprobe + + # The filename pattern of the files that the data collector will produce + # container-id, pid, and timestamp will be replaced with concrete values + output_filepattern = fprobe-{ifname}-{timestamp} + +[ emitters ] + + [[ Stdout Emitter ]] + arg_from_conf = 1 + format = csv + + [[ File Emitter ]] + url = file://tmp/crawler-out + format = csv + arg_from_conf = 2 + + [[ SAS Https Emitter ]] + token_filepath = /etc/sas-secrets/token + access_group_filepath = /etc/sas-secrets/access_group + cloudoe_filepath = /etc/sas-secrets/cloudoe + ssl_verification = False diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/crawler.py new file mode 100755 index 00000000..f4faa8cd --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import argparse +import json +import os + +from worker import Worker +from containers_crawler import ContainersCrawler +from utils import misc +from crawlmodes import Modes +from emitters_manager import EmittersManager +from host_crawler import HostCrawler +from vms_crawler import VirtualMachinesCrawler + +logger = None + + +def csv_list(string): + return string.split(',') + + +def json_parser(string): + return json.loads(string) + + +def main(): + + euid = os.geteuid() + if euid != 0: + print 'Need to run this as root.' + exit(1) + + parser = argparse.ArgumentParser() + parser.add_argument( + '--options', + dest='options', + type=json_parser, + default={}, + help='JSON dict of crawler options used to be passed as arguments' + 'to the crawler plugins.' + ) + parser.add_argument( + '--url', + dest='url', + type=csv_list, + default=['stdout://'], + help='Send the snapshot data to URL. Defaults to the console.', + ) + parser.add_argument( + '--namespace', + dest='namespace', + type=str, + nargs='?', + default=misc.get_host_ipaddr(), + help='Data source this crawler is associated with. Defaults to ' + '/localhost', + ) + parser.add_argument( + '--features', + dest='features', + type=csv_list, + default=['os', 'cpu'], + help='Comma-separated list of feature-types to crawl. Defaults to ' + 'os,cpu', + ) + parser.add_argument( + '--frequency', + dest='frequency', + type=int, + default=-1, + help='Target time period for iterations. Defaults to -1 which ' + 'means only run one iteration.' + ) + parser.add_argument( + '--compress', + dest='compress', + action='store_true', + default=False, + help='Whether to GZIP-compress the output frame data, must be one of ' + '{true,false}. Defaults to false', + ) + parser.add_argument('--logfile', dest='logfile', type=str, + default='crawler.log', + help='Logfile path. Defaults to crawler.log' + ) + parser.add_argument( + '--crawlmode', + dest='crawlmode', + type=str, + choices=[ + Modes.INVM, + Modes.OUTVM, + Modes.MOUNTPOINT, + Modes.OUTCONTAINER, + Modes.MESOS, + ], + default=Modes.INVM, + help='The crawler mode: ' + '{INVM,OUTVM,MOUNTPOINT,OUTCONTAINER}. ' + 'Defaults to INVM', + ) + parser.add_argument( + '--mountpoint', + dest='mountpoint', + type=str, + default='/', + help='Mountpoint location used as the / for features like packages,' + 'files, config' + ) + parser.add_argument( + '--format', + dest='format', + type=str, + default='csv', + choices=['csv', 'graphite', 'json', 'logstash'], + help='Emitted data format.', + ) + parser.add_argument( + '--crawlContainers', + dest='crawlContainers', + type=str, + nargs='?', + default='ALL', + help='List of containers to crawl as a list of Docker container IDs' + '(only Docker is supported at the moment). ' 'Defaults to all ' + 'running containers. Example: --crawlContainers aaa,bbb', + ) + parser.add_argument( + '--crawlVMs', + dest='vm_descs_list', + nargs='+', + default='ALL', + help='List of VMs to crawl' + 'Default is \'ALL\' VMs' + 'Currently need following as input for each VM' + '\'vm_name, kernel_version_long, linux_flavour, arch\'' + 'Auto kernel version detection in future, when only vm names' + '(\'ALL\' by default) would need to be passed' + 'Example --crawlVM' + 'vm1,3.13.0-24-generic_3.13.0-24.x86_64,ubuntu,x86_64' + 'vm2,4.0.3.x86_64,vanilla,x86_64', + ) + parser.add_argument( + '--environment', + dest='environment', + type=str, + default='cloudsight', + help='This speficies some environment specific behavior, like how ' + 'to name a container. The way to add a new behavior is by ' + 'implementing a plugin (see plugins/cloudsight_environment.py ' + 'as an example. Defaults to "cloudsight".', + ) + parser.add_argument( + '--plugins', + dest='plugin_places', + type=csv_list, + default=['plugins'], + help='This is a comma separated list of directories where to find ' + 'plugins. Each path can be an absolute, or a relative to the ' + 'location of the crawler.py. Default is "plugins"', + ) + parser.add_argument( + '--numprocesses', + dest='numprocesses', + type=int, + default=1, + help='Number of processes used for container crawling. Defaults ' + 'to the number of cores. NOT SUPPORTED.' + ) + parser.add_argument( + '--extraMetadata', + dest='extraMetadata', + type=json_parser, + default={}, + help='Json with data to annotate all features. It can be used ' + 'to append a set of system identifiers to the metadata feature ' + 'and if the --extraMetadataForAll' + ) + parser.add_argument( + '--avoidSetns', + dest='avoid_setns', + action='store_true', + default=False, + help='Avoids the use of the setns() syscall to crawl containers. ' + 'Some features like process will not work with this option. ' + 'Only applies to the OUTCONTAINER mode' + ) + + args = parser.parse_args() + misc.setup_logger('crawlutils', args.logfile) + misc.setup_logger('yapsy', 'yapsy.log') + + options = args.options + options['avoid_setns'] = args.avoid_setns + options['mountpoint'] = args.mountpoint + + emitters = EmittersManager(urls=args.url, + format=args.format, + compress=args.compress, + extra_metadata=args.extraMetadata, + plugin_places=args.plugin_places) + + if args.crawlmode == 'OUTCONTAINER': + crawler = ContainersCrawler( + features=args.features, + environment=args.environment, + user_list=args.crawlContainers, + host_namespace=args.namespace, + plugin_places=args.plugin_places, + options=options) + elif args.crawlmode == 'INVM' or args.crawlmode == 'MOUNTPOINT': + crawler = HostCrawler( + features=args.features, + namespace=args.namespace, + plugin_places=args.plugin_places, + options=options) + elif args.crawlmode == 'OUTVM': + crawler = VirtualMachinesCrawler( + features=args.features, + user_list=args.vm_descs_list, + host_namespace=args.namespace, + plugin_places=args.plugin_places, + options=options) + else: + raise NotImplementedError('Invalid crawlmode') + + worker = Worker(emitters=emitters, + frequency=args.frequency, + crawler=crawler) + + try: + worker.run() + except KeyboardInterrupt: + pass + + +if __name__ == '__main__': + main() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py new file mode 100644 index 00000000..d175aef9 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py @@ -0,0 +1,180 @@ +import os +import sys +import inspect +import imp +import time +import argparse +import shutil +import cStringIO +import json +from icrawl_plugin import IContainerCrawler + +plugins_dir = '/crawler/crawler/plugins/systems/' # might eventually become /home/user1/crawler/plugins/... +guestcont_plugins_file = '/rootfs_local/crawlplugins' +plugins_file = '/rootfs_local/crawlplugins' # should eventually be /home/user1/crawlplugins +frame_dir = '/home/user1/features/' +plugin_objs = [] +active_plugins = [] +frquency = -1 +next_iteration_time = None + +def get_plugin_obj(plugin_name): + plugin_module_name = plugin_name.strip()+'_container_crawler' + plugin_filename = plugin_name.strip()+'_container_crawler.py' + for filename in os.listdir(plugins_dir): + if plugin_filename == filename: + plugin_module = imp.load_source(plugin_module_name, plugins_dir+plugin_filename) + plugin_classes = inspect.getmembers(plugin_module, inspect.isclass) + for plugin_class_name, plugin_class in plugin_classes: + if plugin_class_name is not 'IContainerCrawler' and issubclass(plugin_class, IContainerCrawler): + plugin_obj = plugin_class() + return plugin_obj + break + +def run_plugins_org(): + # import pdb + # pdb.set_trace() + plugin_names = tuple(open('/crawlercmd/crawlplugins','r')) + for plugin_name in plugin_names: + print plugin_name + plugin_obj = get_plugin_obj(plugin_name) + print plugin_obj.get_feature() + try: + for i in plugin_obj.crawl('some_cont_id',avoid_setns=False): + print i + except: + print sys.exc_info()[0] + +def parse_args(): + global frequency + parser = argparse.ArgumentParser() + parser.add_argument( + '--frequency', + dest='frequency', + type=int, + default=-1, + help='Target time period for iterations. Defaults to -1 which ' + 'means only run one iteration.' + ) + args = parser.parse_args() + frequency = args.frequency + +def _get_next_iteration_time(snapshot_time): + """ + Returns the number of seconds to sleep before the next iteration. + :param snapshot_time: Start timestamp of the current iteration. + :return: Seconds to sleep as a float. + """ + global next_iteration_time + if frequency == 0: + return 0 + + if next_iteration_time is None: + next_iteration_time = snapshot_time + frequency + else: + next_iteration_time += frequency + + while next_iteration_time + frequency < time.time(): + next_iteration_time += frequency + + time_to_sleep = next_iteration_time - time.time() + return time_to_sleep + +def format(frame): + """ + Writes frame data and metadata into iostream in csv format. + + :param iostream: a CStringIO used to buffer the formatted features. + :param frame: a BaseFrame object to be written into iostream + :return: None + """ + iostream = cStringIO.StringIO() + for (key, val, feature_type) in frame: + if not isinstance(val, dict): + val = val._asdict() + iostream.write('%s\t%s\t%s\n' % ( + feature_type, json.dumps(key), + json.dumps(val, separators=(',', ':')))) + return iostream + +def iterate(snapshot_time=0, timeout=0): + if timeout > 0: + time.sleep(timeout) + try: + reload_plugins() + frame_file = frame_dir+str(int(snapshot_time)) + fd = open(frame_file,'w') + for plugin_obj in plugin_objs: + plugin_crawl_output = plugin_obj.crawl('some_cont_id',avoid_setns=False) + iostream = format(plugin_crawl_output) + iostream.seek(0) + shutil.copyfileobj(iostream, fd) + fd.close() + except: + print sys.exc_info()[0] + +def run_plugins(): + if os.path.isdir(frame_dir): + shutil.rmtree(frame_dir) + os.makedirs(frame_dir) + time_to_sleep = 0 + while True: + snapshot_time = time.time() + iterate(snapshot_time,time_to_sleep) + # Frequency < 0 means only one run. + if frequency < 0: + break + time_to_sleep = _get_next_iteration_time(snapshot_time) + +def get_plugin_external(url): + # download tar or .plugin+.py files from url + # put inside plugins_dir == crawler/plugins/ + # do pip install requirements.txt for plugin + # add plugin name to plugins_file /home/user1/crawlplugins + # TODO + pass + +def get_plugin_local(plugin_name): + # collect plugin using plugin_name from a central crawler-specific repo. + # put inside plugins_dir == crawler/plugins/ + # do pip install requirements.txt for plugin + # add plugin name to plugins_file /home/user1/crawlplugins + # central repo plugins can also be preloaded in plugin cont + # TODO + pass + +def gather_plugins(): + if not os.path.exists(guestcont_plugins_file): + return + + fd = open(guestcont_plugins_file,'r') + for plugin_line in fd.readlines(): + if plugin_line.startswith('http'): + get_plugin_external(plugin_line) + else: + get_plugin_local(plugin_line) + fd.close() + + global plugin_objs + global active_plugins + plugin_names = tuple(open(plugins_file,'r')) + for plugin_name in plugin_names: + if plugin_name in active_plugins: + continue + plugin_obj = get_plugin_obj(plugin_name) + if plugin_obj is not None: + print plugin_name, plugin_obj.get_feature() + plugin_objs.append(plugin_obj) + active_plugins.append(plugin_name) + +def reload_plugins(): + gather_plugins() + +def sleep_forever(): + while True: + time.sleep(10) + +parse_args() +gather_plugins() +run_plugins() +sleep_forever() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite_back.py b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite_back.py new file mode 100644 index 00000000..bc34dd88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite_back.py @@ -0,0 +1,11 @@ +import os + +plugin_names = tuple(open('/crawlercmd/crawlplugins','r')) +for plugin_name in plugin_names: + plugin_file = plugin_name.strip()+'_container_crawler.py' + plugin_module = plugin_name.strip()+'_container_crawler' + for filename in os.listdir('/crawler/crawler/plugins/systems'): + if filename == plugin_file: + print filename + import plugin_module + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawlmodes.py b/crawler/utils/plugincont/plugincont_img/crawler/crawlmodes.py new file mode 100644 index 00000000..ce91ed83 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawlmodes.py @@ -0,0 +1,7 @@ +from utils.misc import enum + +Modes = enum(INVM='INVM', + OUTVM='OUTVM', + MOUNTPOINT='MOUNTPOINT', + OUTCONTAINER='OUTCONTAINER', + MESOS='MESOS') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/dockercontainer.py b/crawler/utils/plugincont/plugincont_img/crawler/dockercontainer.py new file mode 100644 index 00000000..7d8ca25a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/dockercontainer.py @@ -0,0 +1,510 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import glob +import json +import logging +import os +import shutil + +from requests.exceptions import HTTPError + +import plugins_manager +from container import Container +from utils import misc, namespace + +from utils.crawler_exceptions import (ContainerInvalidEnvironment, + ContainerNonExistent, + DockerutilsNoJsonLog, + DockerutilsException, + ContainerWithoutCgroups) +from utils.dockerutils import (exec_dockerps, + get_docker_container_json_logs_path, + get_docker_container_rootfs_path, + exec_dockerinspect, + poll_container_create_events) + +logger = logging.getLogger('crawlutils') + +HOST_LOG_BASEDIR = '/var/log/crawler_container_logs/' +LOG_TYPES_FILE = 'd464347c-3b99-11e5-b0e9-062dcffc249f.type-mapping' +DEFAULT_LOG_FILES = [{'name': '/var/log/messages', + 'type': None}, + {'name': '/etc/csf_env.properties', + 'type': None}, ] + + +def get_docker_containers(user_list=None, host_namespace=''): + """ + Get the list of running Docker containers, as `DockerContainer` objects. + This is basically polling. Ideally, we should subscribe to Docker + events so we can keep the containers list up to date without having to + poll like this. + + :param host_namespace: string representing the host name (e.g. host IP) + :param user_list: list of Docker container IDs. `None` means all + containers. + :return: a list of DockerContainer objects + """ + for inspect in exec_dockerps(): + long_id = inspect['Id'] + + if user_list not in ['ALL', 'all', 'All', None]: + user_ctrs = [cid[:12] for cid in user_list.split(',')] + short_id = long_id[:12] + if short_id not in user_ctrs: + continue + + try: + c = DockerContainer(long_id, inspect=inspect, + host_namespace=host_namespace) + if c.namespace: + yield c + except ContainerInvalidEnvironment as e: + logger.exception(e) + + +def poll_docker_containers(timeout, user_list=None, host_namespace=''): + """ + Get the first container created before `timeout` seconds have elapsed. + + :param timeout: seconds to wait for a new container. + :param host_namespace: string representing the host name (e.g. host IP) + :param user_list: list of Docker container IDs. `None` means all + containers. + :return: a DockerContainer object (just the first container created). + """ + if timeout <= 0: + return None + + try: + cEvent = poll_container_create_events(timeout) + + if not cEvent: + return None + c = DockerContainer(cEvent.get_containerid(), inspect=None, + host_namespace=host_namespace) + if c.namespace: + return c + except ContainerInvalidEnvironment as e: + logger.exception(e) + + +class LogFileLink(): + """ + If `host_log_dir is not None`, then we should prefix `dest` with + `host_log_dir`. + """ + + def __init__(self, name=None, type=None, source=None, + dest=None, host_log_dir=None): + self.name = name + self.type = type + self.source = source + self.dest = dest + self.host_log_dir = host_log_dir + + def __str__(self): + return "%s: %s --> %s" % (self.name, self.source, self.dest) + + def get_dest(self): + if self.host_log_dir: + return misc.join_abs_paths(self.host_log_dir, self.dest) + return self.dest + + +class DockerContainer(Container): + + DOCKER_JSON_LOG_FILE = "docker.log" + + def __init__( + self, + long_id, + inspect=None, + host_namespace='', + process_namespace=None, + ): + + # Some quick sanity checks + if not isinstance(long_id, basestring): + raise TypeError('long_id should be a string') + if inspect and not isinstance(inspect, dict): + raise TypeError('inspect should be a dict.') + + if not inspect: + try: + inspect = exec_dockerinspect(long_id) + except HTTPError: + raise ContainerNonExistent('No docker container with ID: %s' + % long_id) + + state = inspect['State'] + self.image = inspect['Image'] + + assert(long_id == inspect['Id']) + self.long_id = long_id + self.host_namespace = host_namespace + self.pid = str(state['Pid']) + self.name = inspect['Name'] + self.running = state['Running'] + self.created = inspect['Created'] + self.network_settings = inspect['NetworkSettings'] + self.cmd = inspect['Config']['Cmd'] + self.mounts = inspect.get('Mounts') + self.volumes = inspect.get('Volumes') + self.image_name = inspect['Config']['Image'] + self.inspect = inspect + + self.process_namespace = (process_namespace or + namespace.get_pid_namespace(self.pid)) + + # This short ID is mainly used for logging purposes + self.short_id = long_id[:12] + + # Docker prepends a '/' to the name. Let's remove it. + if self.name[0] == '/': + self.name = self.name[1:] + + self._set_image_fields(inspect.get('RepoTag', '')) + self._set_mounts_list() + + try: + self.root_fs = get_docker_container_rootfs_path(self.long_id) + except (HTTPError, RuntimeError, DockerutilsException) as e: + logger.exception(e) + self.root_fs = None + + self._set_logs_list_input() + self._set_environment_specific_options() + self._set_logs_list() + + def _set_image_fields(self, repo_tag): + """ + This function parses the image repository:tag string to try + to get info like the registry, and the "owner_namespace". + This "owner_namespace" field is not exactly officially a docker + concept, but it usually points to the owner of the image. + """ + self.docker_image_long_name = repo_tag + self.docker_image_short_name = os.path.basename(repo_tag) + if (':' in repo_tag) and ('/' not in repo_tag.rsplit(':', 1)[1]): + self.docker_image_tag = repo_tag.rsplit(':', 1)[1] + else: + self.docker_image_tag = '' + self.docker_image_registry = os.path.dirname(repo_tag).split('/')[0] + try: + # This is the 'abc' in 'registry/abc/bla:latest' + self.owner_namespace = os.path.dirname(repo_tag).split('/', 1)[1] + except IndexError: + self.owner_namespace = '' + + def is_docker_container(self): + return True + + def get_container_ip(self): + ip = self.inspect['NetworkSettings'][ + 'Networks']['bridge']['IPAddress'] + return ip + + def get_container_ports(self): + ports = [] + for item in self.inspect['Config']['ExposedPorts'].keys(): + ports.append(item.split('/')[0]) + return ports + + def get_metadata_dict(self): + metadata = super(DockerContainer, self).get_metadata_dict() + metadata['owner_namespace'] = self.owner_namespace + metadata['docker_image_long_name'] = self.docker_image_long_name + metadata['docker_image_short_name'] = self.docker_image_short_name + metadata['docker_image_tag'] = self.docker_image_tag + metadata['docker_image_registry'] = self.docker_image_registry + + return metadata + + def _set_environment_specific_options(self): + """ + This function is used to setup these environment specific fields: + namespace, log_prefix, and logfile_links. + """ + + logger.info('setup_namespace_and_metadata: long_id=' + + self.long_id) + + try: + _options = { + 'root_fs': self.root_fs, + 'type': 'docker', + 'name': self.name, + 'host_namespace': self.host_namespace, + 'container_logs': DEFAULT_LOG_FILES} + env = plugins_manager.get_runtime_env_plugin() + namespace = env.get_container_namespace( + self.long_id, _options) + if not namespace: + _env = env.get_environment_name() + logger.warning('Container %s does not have %s ' + 'metadata.' % (self.short_id, _env)) + raise ContainerInvalidEnvironment('') + self.namespace = namespace + + self.log_prefix = env.get_container_log_prefix( + self.long_id, _options) + + self.logs_list_input.extend([LogFileLink(name=log['name']) + for log in + env.get_container_log_file_list( + self.long_id, _options)]) + except ValueError: + # XXX-kollerr: plugins are not supposed to throw ValueError + logger.warning('Container %s does not have a valid alchemy ' + 'metadata json file.' % self.short_id) + raise ContainerInvalidEnvironment() + + def _set_mounts_list(self): + """ + Create self.mounts out of Volumes for old versions of Docker + """ + + if not self.mounts and self.volumes: + self.mounts = [{'Destination': vol, + 'Source': self.volumes[vol]} + for vol in self.volumes] + elif not self.mounts and not self.volumes: + self.mounts = [] + + # Find the mount point of the specified cgroup + + def _get_cgroup_dir(self, devlist=[]): + for dev in devlist: + paths = [os.path.join('/cgroup/', dev), + os.path.join('/sys/fs/cgroup/', dev)] + for path in paths: + if os.path.ismount(path): + return path + + # Try getting the mount point from /proc/mounts + for l in open('/proc/mounts', 'r'): + _type, mnt, _, _, _, _ = l.split(' ') + if _type == 'cgroup' and mnt.endswith('cgroup/' + dev): + return mnt + + raise ContainerWithoutCgroups('Can not find the cgroup dir') + + def get_memory_cgroup_path(self, node='memory.stat'): + return os.path.join(self._get_cgroup_dir(['memory']), 'docker', + self.long_id, node) + + def get_cpu_cgroup_path(self, node='cpuacct.usage'): + # In kernels 4.x, the node is actually called 'cpu,cpuacct' + cgroup_dir = self._get_cgroup_dir(['cpuacct', 'cpu,cpuacct']) + return os.path.join(cgroup_dir, 'docker', self.long_id, node) + + def __str__(self): + return str(self.__dict__) + + def link_logfiles(self): + + host_log_dir = self._get_logfiles_links_dest(HOST_LOG_BASEDIR) + + logger.debug('Linking log files for container %s' % self.short_id) + + # create an empty dir for the container logs + + if not os.path.exists(host_log_dir): + os.makedirs(host_log_dir) + + # Create a symlink from src to dst + + for log in self.logs_list: + dest = log.get_dest() + try: + if not os.path.exists(log.source): + logger.debug( + 'Log file %s does not exist, but linking it anyway' + % log.source) + dest_dir = os.path.dirname(dest) + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + os.symlink(log.source, dest) + logger.info( + 'Linking container %s %s logfile %s -> %s' % + (self.short_id, log.name, log.source, dest)) + except (OSError, IOError) as e: + logger.debug(e) + logger.debug('Link already exists: %s -> %s' + % (log.source, dest)) + except Exception as e: + logger.warning(e) + + # Keep record of what is linked in a file. + + try: + types_host_log_path = os.path.join(host_log_dir, + LOG_TYPES_FILE) + with open(types_host_log_path, 'w') as outfile: + logs_dict = [{'name': log.name, 'type': log.type} + for log in self.logs_list] + json.dump(logs_dict, outfile) + except (OSError, IOError) as e: + # Not a critical error: move on + logger.exception(e) + + def unlink_logfiles(self): + + host_log_dir = self._get_logfiles_links_dest(HOST_LOG_BASEDIR) + + logger.info('Un-linking log files for container %s.' + % self.short_id) + + logger.info('Trying to delete this directory and its symlinks: %s.' + % host_log_dir) + assert(host_log_dir.startswith('/var/log/crawler_container_logs/')) + + try: + shutil.rmtree(host_log_dir) + except (IOError, OSError) as exc: + logger.error('Could not delete directory %s: %s' % + (host_log_dir, exc)) + + def _parse_user_input_logs(self, var='LOG_LOCATIONS'): + """ + The user can provide a list of logfiles in a container for us + to maintain links to. This list of log files is passed as with + the `var` environment variable. + """ + + container = self + logs = [] # list of LogFileLink's + try: + logs = [LogFileLink(name=name) for name in + misc.get_process_env(container.pid)[var].split(',')] + except (IOError, KeyError, ValueError) as e: + logger.debug('There is a problem with the env. variables: %s' % e) + return logs + + def _set_logs_list_input(self): + """ + Sets the list of container logs that we should maintain links for. + + The paths are relative to the filesystem of the container. For example + the path for /var/log/messages in the container will be just + /var/log/messages in this list. + """ + + self.logs_list_input = self._parse_user_input_logs(var='LOG_LOCATIONS') + + def _expand_and_map_log_link(self, log, host_log_dir, rootfs_path): + """ + Returns a list of LogFileLinks with all the fields set after + expanding the globs and mapping mount points. + """ + _logs = [] + if not self.mounts: + source = misc.join_abs_paths(rootfs_path, log.name) + if "*" in source: + _logs = [LogFileLink(name=log.name, + source=s, + type=log.type, + dest=s.split(rootfs_path, 1)[1], + host_log_dir=host_log_dir) + for s in glob.glob(source)] + else: + _logs = [LogFileLink(name=log.name, + type=log.type, + source=source, + dest=log.name, + host_log_dir=host_log_dir)] + + for mount in self.mounts: + mount_src = mount['Source'] + mount_dst = mount['Destination'] + if log.name.startswith(mount['Destination']): + source = log.name.replace(mount_dst, mount_src) + if "*" in source: + _logs = [LogFileLink(name=log.name, + source=s, + type=log.type, + dest=s.replace(mount_src, + mount_dst), + host_log_dir=host_log_dir) + for s in glob.glob(source)] + else: + _logs = [LogFileLink(name=log.name, + source=source, + dest=log.name, + type=log.type, + host_log_dir=host_log_dir)] + else: + source = misc.join_abs_paths(rootfs_path, log.name) + if "*" in source: + _logs = [LogFileLink(name=log.name, + source=s, + type=log.type, + dest=s.split(rootfs_path, 1)[1], + host_log_dir=host_log_dir) + for s in glob.glob(source)] + else: + _logs = [LogFileLink(name=log.name, + source=source, + dest=log.name, + type=log.type, + host_log_dir=host_log_dir)] + return _logs + + def _set_logs_list(self): + """ + Initializes the LogFileLinks list in `self.logs_list` + """ + + host_log_dir = self._get_logfiles_links_dest(HOST_LOG_BASEDIR) + + self.logs_list = [] + + rootfs_path = self.root_fs + if not rootfs_path: + logger.warning( + 'Container %s does not have a rootfs_path set' % + self.short_id) + return + + # remove relative paths + for log in self.logs_list_input: + # remove relative paths + if (not os.path.isabs(log.name)) or ('../' in log.name): + logger.warning('User provided a log file path that is not ' + 'absolute: %s' % log.name) + continue + + _logs = self._expand_and_map_log_link(log, + host_log_dir, + rootfs_path) + for log in _logs: + if log not in self.logs_list: + self.logs_list.append(log) + + logger.debug('logmap %s' % self.logs_list) + + # Link the container json log file name if there is one + + try: + docker_log_source = get_docker_container_json_logs_path( + self.long_id, self.inspect) + docker_log_dest = os.path.join(host_log_dir, + self.DOCKER_JSON_LOG_FILE) + self.logs_list.append(LogFileLink(name=self.DOCKER_JSON_LOG_FILE, + type=None, + source=docker_log_source, + dest=docker_log_dest)) + except DockerutilsNoJsonLog as e: + logger.exception(e) + + def _get_logfiles_links_dest( + self, + host_log_basedir + ): + """ + Returns the path in the host file system where the container's log + files should be linked to. + """ + + return os.path.join(host_log_basedir, self.log_prefix) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/emitters_manager.py b/crawler/utils/plugincont/plugincont_img/crawler/emitters_manager.py new file mode 100644 index 00000000..f8ca5988 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/emitters_manager.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import logging + +import plugins_manager +from base_crawler import BaseFrame +from utils.crawler_exceptions import EmitterUnsupportedProtocol + +logger = logging.getLogger('crawlutils') + + +class EmittersManager: + + """ + Class that manages a list of formatter and emitter objects, one per url. + The formatter takes a frame and writes it into an iostream, and the + emitter takes the iostream and emits it. + + This class should be instantiated at the beginning of the program, + and emit() should be called for each frame. + """ + + def __init__( + self, + urls, + format='csv', + compress=False, + extra_metadata={}, + plugin_places=['plugins'] + ): + """ + Initializes a list of emitter objects; also stores all the args. + + :param urls: list of URLs to send to + :param format: format of each feature string + :param compress: gzip each emitter frame or not + :param extra_metadata: dict added to the metadata of each frame + """ + self.extra_metadata = extra_metadata + self.compress = compress + + # Create a list of Emitter objects based on urls + self.emitter_plugins = plugins_manager.get_emitter_plugins( + urls, + format, + plugin_places) + if not self.emitter_plugins: + raise EmitterUnsupportedProtocol('Emit protocols not supported') + + def emit(self, frame, snapshot_num=0): + """ + Sends a frame to the URLs specified at __init__ + + :param frame: frame of type BaseFrame + :param snapshot_num: iteration count (from worker.py). This is just + used to differentiate successive frame files (when url is file://). + :return: None + """ + if not isinstance(frame, BaseFrame): + raise TypeError('frame is not of type BaseFrame') + + metadata = frame.metadata + metadata.update(self.extra_metadata) + for (emitter_obj, emitter_args) in self.emitter_plugins: + emitter_obj.emit(frame, self.compress, + metadata, snapshot_num, **(emitter_args or {})) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/formatters.py b/crawler/utils/plugincont/plugincont_img/crawler/formatters.py new file mode 100644 index 00000000..babd5a06 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/formatters.py @@ -0,0 +1,140 @@ +import json +import time +from morph import flatten + + +def write_in_csv_format(iostream, frame): + """ + Writes frame data and metadata into iostream in csv format. + + :param iostream: a CStringIO used to buffer the formatted features. + :param frame: a BaseFrame object to be written into iostream + :return: None + """ + iostream.write('%s\t%s\t%s\n' % + ('metadata', json.dumps('metadata'), + json.dumps(frame.metadata, separators=(',', ':')))) + for (key, val, feature_type) in frame.data: + if not isinstance(val, dict): + val = val._asdict() + iostream.write('%s\t%s\t%s\n' % ( + feature_type, json.dumps(key), + json.dumps(val, separators=(',', ':')))) + + +def write_in_json_format(iostream, frame): + """ + Writes frame data and metadata into iostream in json format. + + :param iostream: a CStringIO used to buffer the formatted features. + :param frame: a BaseFrame object to be written into iostream + :return: None + """ + iostream.write('%s\n' % json.dumps(frame.metadata)) + for (key, val, feature_type) in frame.data: + if not isinstance(val, dict): + val = val._asdict() + val['feature_type'] = feature_type + val['namespace'] = frame.metadata.get('namespace', '') + iostream.write('%s\n' % json.dumps(val)) + + +def write_in_logstash_format(iostream, frame): + """ + Writes frame data and meta data in json format. + Similar to write_in_json_format, but this method concatenate them + in to a single json object. + + :param iostream: a CStringIO used to buffer the formatted features. + :param frame: a BaseFrame Object to be written into iostream + :return: None + """ + payload = {} + payload['metadata'] = frame.metadata + for (key, val, feature_type) in frame.data: + if not isinstance(val, dict): + val = val._asdict() + if feature_type not in payload: + payload[feature_type] = {} + payload[feature_type][key] = val + iostream.write('%s\n' % json.dumps(payload)) + + +def write_in_graphite_format(iostream, frame): + """ + Writes frame data and metadata into iostream in graphite format. + + :param iostream: a CStringIO used to buffer the formatted features. + :param frame: a BaseFrame object to be written into iostream + :return: None + """ + namespace = frame.metadata.get('namespace', '') + timestamp = frame.metadata.get('timestamp', '') + for (key, val, feature_type) in frame.data: + if not isinstance(val, dict): + val = val._asdict() + write_feature_in_graphite_format(iostream, namespace, timestamp, + key, val, feature_type) + + +def write_feature_in_graphite_format(iostream, namespace, timestamp, + feature_key, feature_val, + feature_type): + """ + Write a feature in graphite format into iostream. The graphite format + looks like this, one line per metric value: + + [namespace].[feature_key].[metric] [value] [timestamp]\r\n + [namespace].[feature_key].[metric] [value] [timestamp]\r\n + [namespace].[feature_key].[metric] [value] [timestamp]\r\n + + This function converts a feature into that string and writes it into + the iostream. + + :param namespace: Frame namespace for this feature + :param timestamp: From frame metadata, fmt: %Y-%m-%dT%H:%M:%S%z + :param feature_type: + :param feature_key: + :param feature_val: + :param iostream: a CStringIO used to buffer the formatted features. + :return: None + """ + # to convert 2017-02-07T13:20:15-0500 to 1486491615 (=epoch) + # for python >=3.2, following works + # time.strptime(timestamp,'%Y-%m-%dT%H:%M:%S%z'), + # but previous pyhon versions don't respect %z timezone info, + # so skipping time zone conversion assuming + # timezone during str timestamp creation in metadata is same for reverse + + timestamp = time.mktime(time.strptime(timestamp[:-5], '%Y-%m-%dT%H:%M:%S')) + + items = flatten(feature_val).items() + if isinstance(namespace, dict): + namespace = json.dumps(namespace) + else: + namespace = namespace.replace('/', '.') + + for (metric, value) in items: + try: + # Only emit values that we can cast as floats + value = float(value) + except (TypeError, ValueError): + continue + + metric = metric.replace('(', '_').replace(')', '') + metric = metric.replace(' ', '_').replace('-', '_') + metric = metric.replace('/', '_').replace('\\', '_') + + feature_key = feature_key.replace('_', '-') + if 'cpu' in feature_key or 'memory' in feature_key: + metric = metric.replace('_', '-') + if 'if' in metric: + metric = metric.replace('_tx', '.tx') + metric = metric.replace('_rx', '.rx') + if feature_key == 'load': + feature_key = 'load.load' + feature_key = feature_key.replace('/', '$') + + tmp_message = '%s.%s.%s %f %d\r\n' % (namespace, feature_key, + metric, value, timestamp) + iostream.write(tmp_message) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/host_crawler.py new file mode 100644 index 00000000..422daa99 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/host_crawler.py @@ -0,0 +1,42 @@ +import plugins_manager +from base_crawler import BaseCrawler, BaseFrame + + +class HostFrame(BaseFrame): + + def __init__(self, feature_types, namespace): + BaseFrame.__init__(self, feature_types) + self.metadata['namespace'] = namespace + self.metadata['system_type'] = 'host' + + +class HostCrawler(BaseCrawler): + + def __init__(self, + features=['os', 'cpu'], namespace='', + plugin_places=['plugins'], options={}): + BaseCrawler.__init__( + self, + features=features, + plugin_places=plugin_places) + plugins_manager.reload_host_crawl_plugins( + features, plugin_places, options) + self.plugins = plugins_manager.get_host_crawl_plugins( + features=features) + self.namespace = namespace + + def crawl(self, ignore_plugin_exception=True): + """ + Crawl the host with all the plugins loaded on __init__ + + :param ignore_plugin_exception: just ignore exceptions on a plugin + :return: a list generator with a frame object + """ + frame = HostFrame(self.features, self.namespace) + for (plugin_obj, plugin_args) in self.plugins: + try: + frame.add_features(plugin_obj.crawl(**plugin_args)) + except Exception as exc: + if not ignore_plugin_exception: + raise exc + yield frame diff --git a/crawler/utils/plugincont/plugincont_img/crawler/icrawl_plugin.py b/crawler/utils/plugincont/plugincont_img/crawler/icrawl_plugin.py new file mode 100644 index 00000000..7e9fcaa7 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/icrawl_plugin.py @@ -0,0 +1,73 @@ +from yapsy.IPlugin import IPlugin + + +class IContainerCrawler(IPlugin): + + """ + Crawler plugin interface + + Subclasses of this class can be used to implement crawling functions + for different systems. + """ + + def crawl(self, container_id): + """ + Crawling function that should return a list of features for + `container_id`. This function is called once for every container + at every crawling interval. + """ + raise NotImplementedError() + + def get_feature(self): + """ + Returns the feature type as a string. + """ + raise NotImplementedError() + + +class IVMCrawler(IPlugin): + + """ + Crawler plugin interface + + Subclasses of this class can be used to implement crawling functions + for different systems. + """ + + def crawl(self, vm_desc): + """ + Crawling function that should return a list of features for + `vm_desc`. This should change to 'vm_name' after auto kernel version + detection. This function is called once for every VM + at every crawling interval. + """ + raise NotImplementedError() + + def get_feature(self): + """ + Returns the feature type as a string. + """ + raise NotImplementedError() + + +class IHostCrawler(IPlugin): + + """ + Crawler plugin interface + + Subclasses of this class can be used to implement crawling functions + for different host features (e.g. processes running in the host). + """ + + def crawl(self): + """ + Crawling function that should return a list of features for the host. + This function is called once at every crawling interval. + """ + raise NotImplementedError() + + def get_feature(self): + """ + Returns the feature type as a string. + """ + raise NotImplementedError() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/iemit_plugin.py b/crawler/utils/plugincont/plugincont_img/crawler/iemit_plugin.py new file mode 100644 index 00000000..7bf0c597 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/iemit_plugin.py @@ -0,0 +1,57 @@ +import cStringIO +from yapsy.IPlugin import IPlugin +from formatters import (write_in_csv_format, + write_in_json_format, + write_in_graphite_format, + write_in_logstash_format) +from utils.crawler_exceptions import (EmitterUnsupportedFormat) + + +class IEmitter(IPlugin): + + """ + Base emitter class from which emitters like FileEmitter, StdoutEmitter + should inherit. The main idea is that all emitters get a url, and should + implement an emit() function given an iostream (a buffer with the features + to emit). + """ + + def init(self, url, timeout=1, max_retries=5, emit_format='csv'): + self.url = url + self.timeout = timeout + self.max_retries = max_retries + self.emit_per_line = False + + self.supported_formats = {'csv': write_in_csv_format, + 'graphite': write_in_graphite_format, + 'json': write_in_json_format, + 'logstash': write_in_logstash_format} + + if emit_format in self.supported_formats: + self.formatter = self.supported_formats[emit_format] + else: + raise EmitterUnsupportedFormat('Not supported: %s' % emit_format) + + def get_emitter_protocol(self): + raise NotImplementedError() + + def format(self, frame): + # this writes the frame metadata and data into iostream + # Pass iostream to the emitters so they can send its content to their + # respective url + iostream = cStringIO.StringIO() + self.formatter(iostream, frame) + return iostream + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param iostream: a CStringIO used to buffer the formatted features. + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + # this formats and emits an input frame + raise NotImplementedError() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.plugin new file mode 100644 index 00000000..c32738ca --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.plugin @@ -0,0 +1,9 @@ +[Core] +Name = application_apache_container +Module = apache_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Apache httpd server" + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.py new file mode 100644 index 00000000..d7c0eb75 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_container_crawler.py @@ -0,0 +1,51 @@ +import logging + + +import dockercontainer +from icrawl_plugin import IContainerCrawler +from plugins.applications.apache import apache_crawler +from utils.crawler_exceptions import CrawlError + +logger = logging.getLogger('crawlutils') + + +class ApacheContainerCrawler(IContainerCrawler): + feature_type = 'application' + feature_key = 'apache' + + def get_feature(self): + return self.feature_key + + def crawl(self, container_id=None, **kwargs): + + c = dockercontainer.DockerContainer(container_id) + + # check image name + if c.image_name.find("httpd") == -1: + + logger.error("%s is not %s container", + c.image_name, + self.feature_key) + raise CrawlError("%s does not have expected name for %s (name=%s)", + container_id, + self.feature_key, + c.image_name) + + # extract IP and Port information + ip = c.get_container_ip() + ports = c.get_container_ports() + + # crawl all candidate ports + for port in ports: + try: + metrics = apache_crawler.retrieve_metrics(ip, port) + except CrawlError: + logger.error("can't find metrics endpoint at http://%s:%s", + ip, + port) + continue + return [(self.feature_key, metrics, self.feature_type)] + + raise CrawlError("%s has no accessible endpoint for %s", + container_id, + self.feature_key) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_crawler.py new file mode 100644 index 00000000..a6aa4cd1 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_crawler.py @@ -0,0 +1,75 @@ +import urllib2 +from plugins.applications.apache import feature +from collections import defaultdict +from utils.crawler_exceptions import CrawlError + + +def retrieve_status_page(host, port): + statusPage = "http://%s:%s/server-status?auto" % (host, port) + req = urllib2.Request(statusPage) + response = urllib2.urlopen(req) + return response.read() + + +def parse_score_board(line, stats): + switch = { + "_": 'waiting_for_connection', + "S": 'starting_up', + "R": 'reading_request', + "W": 'sending_reply', + "K": 'keepalive_read', + "D": 'dns_lookup', + "C": 'closing_connection', + "L": 'logging', + "G": 'graceful_finishing', + "I": 'idle_worker_cleanup', + } + res = line.split(': ') + + workcounts = defaultdict(int) + for i in res[1]: + workcounts[i] += 1 + + for x, y in workcounts.iteritems(): + stats[switch.get(x)] = str(y) + + +def retrieve_metrics(host='localhost', port=80): + try: + status = retrieve_status_page(host, port).splitlines() + except Exception: + raise CrawlError("can't access to http://%s:%s", + host, port) + switch = { + "Total kBytes": 'Total_kBytes', + "Total Accesses": 'Total_Accesses', + "BusyWorkers": "BusyWorkers", + "IdleWorkers": "IdleWorkers", + "BytesPerSec": "BytesPerSec", + "BytesPerReq": "BytesPerReq", + "ReqPerSec": "ReqPerSec", + "Uptime": "Uptime" + } + + stats = {} + + for line in status: + if "Scoreboard" in line: + parse_score_board(line, stats) + + else: + res = line.split(': ') + if res[0] in switch: + stats[switch.get(res[0])] = res[1] + + feature_attributes = feature.ApacheFeature + + if len(stats) == 0: + raise CrawlError("failure to parse http://%s:%s", host, port) + + for name in feature_attributes._fields: + if name not in stats: + stats[name] = '0' + + feature_attributes = feature.get_feature(stats) + return feature_attributes diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.plugin new file mode 100644 index 00000000..9a72f283 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.plugin @@ -0,0 +1,9 @@ +[Core] +Name = application_apache_host +Module = apache_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Apache httpd server" + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.py new file mode 100644 index 00000000..9c7a7bc8 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/apache_host_crawler.py @@ -0,0 +1,21 @@ +from icrawl_plugin import IHostCrawler +from plugins.applications.apache import apache_crawler +import logging + +logger = logging.getLogger('crawlutils') + + +class ApacheHostCrawler(IHostCrawler): + feature_type = 'application' + feature_key = 'apache' + default_port = 80 + + def get_feature(self): + return self.feature_key + + def crawl(self): + metrics = apache_crawler.retrieve_metrics( + host='localhost', + port=self.default_port + ) + return [(self.feature_key, metrics, self.feature_type)] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/feature.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/feature.py new file mode 100644 index 00000000..cc88c6a2 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/apache/feature.py @@ -0,0 +1,46 @@ +from collections import namedtuple + + +def get_feature(stats): + feature_attributes = ApacheFeature( + stats['BusyWorkers'], + stats['IdleWorkers'], + stats['waiting_for_connection'], + stats['starting_up'], + stats['reading_request'], + stats['sending_reply'], + stats['keepalive_read'], + stats['dns_lookup'], + stats['closing_connection'], + stats['logging'], + stats['graceful_finishing'], + stats['idle_worker_cleanup'], + stats['BytesPerSec'], + stats['BytesPerReq'], + stats['ReqPerSec'], + stats['Uptime'], + stats['Total_kBytes'], + stats['Total_Accesses'] + ) + return feature_attributes + +ApacheFeature = namedtuple('ApacheFeature', [ + 'BusyWorkers', + 'IdleWorkers', + 'waiting_for_connection', + 'starting_up', + 'reading_request', + 'sending_reply', + 'keepalive_read', + 'dns_lookup', + 'closing_connection', + 'logging', + 'graceful_finishing', + 'idle_worker_cleanup', + 'BytesPerSec', + 'BytesPerReq', + 'ReqPerSec', + 'Uptime', + 'Total_kBytes', + 'Total_Accesses' +]) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.plugin new file mode 100644 index 00000000..7639fcdb --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.plugin @@ -0,0 +1,14 @@ +[Core] +Name = application_db2_container +Module = db2_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "DB2 server" + +[Options] +user = administrator user name, Default is db2inst1 +password = administrator password, Default is db2inst1-pwd +db = db name to connect, Default is sample + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.py new file mode 100644 index 00000000..7843b291 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_container_crawler.py @@ -0,0 +1,65 @@ +import logging + +import dockercontainer +from icrawl_plugin import IContainerCrawler +from plugins.applications.db2 import db2_crawler +from utils.crawler_exceptions import CrawlError + +logger = logging.getLogger('crawlutils') + + +class DB2ContainerCrawler(IContainerCrawler): + feature_type = 'application' + feature_key = 'db2' + + def get_feature(self): + return self.feature_key + + def crawl(self, container_id=None, **kwargs): + password = "db2inst1" + user = "db2inst1-pwd" + db = "sample" + + if "password" in kwargs: + password = kwargs["password"] + + if "user" in kwargs: + user = kwargs["user"] + + if "db" in kwargs: + db = kwargs["db"] + + c = dockercontainer.DockerContainer(container_id) + + # check image name + if c.image_name.find(self.feature_key) == -1: + logger.error("%s is not %s container", + c.image_name, + self.feature_key) + raise CrawlError("%s does not have expected name for %s (name=%s)", + container_id, + self.feature_key, + c.image_name) + + # extract IP and Port information + ip = c.get_container_ip() + ports = c.get_container_ports() + + # crawl all candidate ports + for each_port in ports: + try: + metrics = db2_crawler.retrieve_metrics( + host=ip, + user=user, + password=password, + db=db, + ) + except CrawlError: + logger.error("can't find metrics endpoint at %s db %s", + ip, db) + continue + return [(self.feature_key, metrics, self.feature_type)] + + raise CrawlError("%s has no accessible endpoint for %s", + container_id, + self.feature_key) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_crawler.py new file mode 100644 index 00000000..24f4323b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_crawler.py @@ -0,0 +1,158 @@ +import logging +from plugins.applications.db2 import feature +from utils.crawler_exceptions import CrawlError + +logger = logging.getLogger('crawlutils') + + +def retrieve_metrics(host='localhost', + user='db2inst1', password='db2inst1-pwd', + db='sample'): + import pip + pip.main(['install', 'ibm_db']) + import ibm_db_dbi + import ibm_db + + sql_list = ["SELECT db_size FROM systools.stmg_dbsize_info", + "SELECT db_capacity FROM systools.stmg_dbsize_info", + "select service_level concat ' FP'" + "concat fixpack_num from sysibmadm.env_inst_info", + "select inst_name from sysibmadm.env_inst_info", + "Select PRODUCT_NAME from sysibmadm.snapdbm", + "Select DB_NAME from sysibmadm.snapdb", + "Select SERVICE_LEVEL from sysibmadm.snapdbm", + "Select REM_CONS_IN + LOCAL_CONS from sysibmadm.snapdbm", + "Select sum(POOL_CUR_SIZE) from sysibmadm.SNAPDBM_MEMORY_POOL", + "Select TOTAL_CONS from sysibmadm.snapdb", + "Select TOTAL_LOG_USED *1. / " + "TOTAL_LOG_AVAILABLE * 100. from sysibmadm.snapdb", + "Select NUM_INDOUBT_TRANS from sysibmadm.snapdb", + "Select X_LOCK_ESCALS from sysibmadm.snapdb", + "Select LOCK_ESCALS from sysibmadm.snapdb", + "Select LOCK_TIMEOUTS from sysibmadm.snapdb", + "Select DEADLOCKS from sysibmadm.snapdb", + "Select LAST_BACKUP from sysibmadm.snapdb", + "Select DB_STATUS from sysibmadm.snapdb", + "select DB2_STATUS from sysibmadm.snapdbm", + "select case POOL_INDEX_L_READS when 0 then 1 else " + "(POOL_INDEX_L_READS * 1. - POOL_INDEX_P_READS * 1.) / " + "POOL_INDEX_L_READS end * 100. from sysibmadm.snapdb", + "select case POOL_DATA_L_READS when 0 then 1 else " + "(POOL_DATA_L_READS * 1. - POOL_DATA_P_READS * 1.) / " + "POOL_DATA_L_READS end *100. from sysibmadm.snapdb", + "select case TOTAL_SORTS when 0 then 0 else SORT_OVERFLOWS " + "*1. / TOTAL_SORTS *1. end * 100. from sysibmadm.snapdb", + "select COALESCE(AGENTS_WAITING_TOP,0) from sysibmadm.snapdbm", + "Select ROWS_UPDATED from sysibmadm.snapdb", + "Select ROWS_INSERTED from sysibmadm.snapdb", + "Select ROWS_SELECTED from sysibmadm.snapdb", + "Select ROWS_DELETED from sysibmadm.snapdb", + "Select SELECT_SQL_STMTS from sysibmadm.snapdb", + "Select STATIC_SQL_STMTS from sysibmadm.snapdb", + "Select DYNAMIC_SQL_STMTS from sysibmadm.snapdb", + "Select ROLLBACK_SQL_STMTS from sysibmadm.snapdb", + "Select COMMIT_SQL_STMTS from sysibmadm.snapdb", + "select case POOL_TEMP_INDEX_L_READS when 0 then 1 " + "else (POOL_TEMP_INDEX_L_READS * 1. - " + "POOL_TEMP_INDEX_P_READS * 1.) / POOL_TEMP_INDEX_L_READS end " + "* 100 from sysibmadm.snapdb", + "select case POOL_TEMP_DATA_L_READS when 0 then 1 else " + "(POOL_TEMP_DATA_L_READS * 1. - POOL_TEMP_DATA_P_READS * 1.) /" + " POOL_TEMP_DATA_L_READS end * 100. from sysibmadm.snapdb" + ] + + sql_stats = ["dbSize", + "dbCapacity", + "dbVersion", + "instanceName", + "productName", + "dbName", + "serviceLevel", + "instanceConn", + "instanceUsedMem", + "dbConn", + "usedLog", + "transcationInDoubt", + "xlocksEscalation", + "locksEscalation", + "locksTimeOut", + "deadLock", + "lastBackupTime", + "dbStatus", + "instanceStatus", + "bpIndexHitRatio", + "bpDatahitRatio", + "sortsInOverflow", + "agetnsWait", + "updateRows", + "insertRows", + "selectedRows", + "deleteRows", + "selects", + "selectSQLs", + "dynamicSQLs", + "rollbacks", + "commits", + "bpTempIndexHitRatio", + "bpTempDataHitRatio" + ] + + sql_stats_list = {} + + try: + ibm_db_conn = ibm_db.connect("DATABASE=" + db + + ";HOSTNAME=" + host + + ";UID=" + user + + ";PWD="+password+";", "", "") + conn = ibm_db_dbi.Connection(ibm_db_conn) + except: + raise CrawlError("cannot connect to database," + " db: %s, host: %s ", db, host) + + c = conn.cursor() + + i = 0 + for sql in sql_list: + try: + c.execute(sql) + except: + raise CrawlError("cannot execute sql %s", sql) + sql_stats_list[sql_stats[i]] = str(c.fetchone()[0]) + i += 1 + + db2_attributes = feature.DB2Feature( + sql_stats_list["dbCapacity"], + sql_stats_list["dbVersion"], + sql_stats_list["instanceName"], + sql_stats_list["productName"], + sql_stats_list["dbName"], + sql_stats_list["serviceLevel"], + sql_stats_list["instanceConn"], + sql_stats_list["instanceUsedMem"], + sql_stats_list["dbConn"], + sql_stats_list["usedLog"], + sql_stats_list["transcationInDoubt"], + sql_stats_list["xlocksEscalation"], + sql_stats_list["locksEscalation"], + sql_stats_list["locksTimeOut"], + sql_stats_list["deadLock"], + sql_stats_list["lastBackupTime"], + sql_stats_list["dbStatus"], + sql_stats_list["instanceStatus"], + sql_stats_list["bpIndexHitRatio"], + sql_stats_list["bpDatahitRatio"], + sql_stats_list["sortsInOverflow"], + sql_stats_list["agetnsWait"], + sql_stats_list["updateRows"], + sql_stats_list["insertRows"], + sql_stats_list["selectedRows"], + sql_stats_list["deleteRows"], + sql_stats_list["selects"], + sql_stats_list["selectSQLs"], + sql_stats_list["dynamicSQLs"], + sql_stats_list["rollbacks"], + sql_stats_list["commits"], + sql_stats_list["bpTempIndexHitRatio"], + sql_stats_list["bpTempDataHitRatio"] + ) + return db2_attributes diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.plugin new file mode 100644 index 00000000..87e917eb --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.plugin @@ -0,0 +1,14 @@ +[Core] +Name = application_db2_host +Module = db2_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "DB2 server" + +[Options] +user = administrator user name, Default is db2inst1 +password = administrator password, Default is db2inst1-pwd +db = db name to connect, Default is sample + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.py new file mode 100644 index 00000000..cd7f8a36 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/db2_host_crawler.py @@ -0,0 +1,39 @@ +from icrawl_plugin import IHostCrawler +from plugins.applications.db2 import db2_crawler +from utils.crawler_exceptions import CrawlError +import logging + +logger = logging.getLogger('crawlutils') + + +class DB2HostCrawler(IHostCrawler): + feature_type = 'application' + feature_key = 'db2' + + def get_feature(self): + return self.feature_key + + def crawl(self, **options): + password = "db2inst1-pwd" + user = "db2inst1" + db = "sample" + + if "password" in options: + password = options["password"] + + if "user" in options: + user = options["user"] + + if "db" in options: + db = options["db"] + + try: + metrics = db2_crawler.retrieve_metrics( + host="localhost", + user=user, + password=password, + db=db + ) + return [(self.feature_key, metrics, self.feature_type)] + except: + raise CrawlError("cannot retrice metrics db %s", db) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/feature.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/feature.py new file mode 100644 index 00000000..766ea8f6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/db2/feature.py @@ -0,0 +1,37 @@ +from collections import namedtuple + +DB2Feature = namedtuple('DB2Feature', [ + "dbCapacity", + "dbVersion", + "instanceName", + "productName", + "dbName", + "serviceLevel", + "instanceConn", + "instanceUsedMem", + "dbConn", + "usedLog", + "transcationInDoubt", + "xlocksEscalation", + "locksEscalation", + "locksTimeOut", + "deadLock", + "lastBackupTime", + "dbStatus", + "instanceStatus", + "bpIndexHitRatio", + "bpDatahitRatio", + "sortsInOverflow", + "agetnsWait", + "updateRows", + "insertRows", + "selectedRows", + "deleteRows", + "selects", + "selectSQLs", + "dynamicSQLs", + "rollbacks", + "commits", + "bpTempIndexHitRatio", + "bpTempDataHitRatio" +]) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/feature.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/feature.py new file mode 100644 index 00000000..0cf6c75c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/feature.py @@ -0,0 +1,45 @@ +from collections import namedtuple + +LibertyServletFeature = namedtuple('LibertyServletFeature', [ + 'name', + 'appName', + 'reqCount', + 'responseMean', + 'responseMax', + 'responseMin' + ]) + +LibertyJVMFeature = namedtuple('LibertyJVMFeature', [ + 'heap', + 'freeMemory', + 'usedMemory', + 'processCPU', + 'gcCount', + 'gcTime', + 'upTime' + ]) + +LibertyThreadFeature = namedtuple('LibertyThreadFeature', [ + 'activeThreads', + 'poolSize', + 'poolName' + ]) + +LibertySessionFeature = namedtuple('LibertySessionFeature', [ + 'name', + 'createCount', + 'liveCount', + 'activeCount', + 'invalidatedCount', + 'invalidatedCountByTimeout', + ]) + +LibertyMongoConnectionFeature = namedtuple('LibertyMongoConnectionFeature', [ + 'checkedOutCount', + 'waitQueueSize', + 'maxSize', + 'minSize', + 'host', + 'port', + 'size', + ]) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.plugin new file mode 100644 index 00000000..3df968c3 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.plugin @@ -0,0 +1,13 @@ +[Core] +Name = application_liberty_container +Module = liberty_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Liberty crawling function for containers on the host + +[Options] +user = administrator user name, Default is user +password = administrator password, Default is password + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.py new file mode 100644 index 00000000..098ec01a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_container_crawler.py @@ -0,0 +1,57 @@ +import logging + +import dockercontainer +from icrawl_plugin import IContainerCrawler +from plugins.applications.liberty import liberty_crawler +from utils.crawler_exceptions import CrawlError + +logger = logging.getLogger('crawlutils') + + +class LibertyContainerCrawler(IContainerCrawler): + feature_type = 'application' + feature_key = 'liberty' + default_port = 9443 + + def get_feature(self): + return self.feature_key + + def crawl(self, container_id=None, **kwargs): + password = "password" + user = "user" + + if "password" in kwargs: + password = kwargs["password"] + + if "user" in kwargs: + user = kwargs["user"] + + c = dockercontainer.DockerContainer(container_id) + + # check image name + if c.image_name.find(self.feature_key) == -1: + logger.error("%s is not %s container", + c.image_name, + self.feature_key) + raise CrawlError("%s does not have expected name for %s (name=%s)", + container_id, + self.feature_key, + c.image_name) + + # extract IP and Port information + ip = c.get_container_ip() + ports = c.get_container_ports() + + # crawl all candidate ports + for each_port in ports: + return liberty_crawler.retrieve_metrics( + host=ip, + port=each_port, + user=user, + password=password, + feature_type=self.feature_type + ) + + raise CrawlError("%s has no accessible endpoint for %s", + container_id, + self.feature_key) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_crawler.py new file mode 100644 index 00000000..f2a58425 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_crawler.py @@ -0,0 +1,243 @@ +import urllib2 +import ssl +import json +import re +from plugins.applications.liberty import feature +from utils.crawler_exceptions import CrawlError + + +def retrieve_status_page(user, password, url): + + try: + ssl._create_unverified_context + except AttributeError: + pass + else: + ssl._create_default_https_context = ssl._create_unverified_context + + password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_mgr.add_password(None, url, user, password) + handler = urllib2.HTTPBasicAuthHandler(password_mgr) + opener = urllib2.build_opener(handler) + urllib2.install_opener(opener) + + req = urllib2.Request(url) + try: + response = urllib2.urlopen(req) + return response.read() + except Exception: + raise CrawlError("can't access to http://%s", url) + + +def get_url(json_array, className): + urllist = [] + + for each_json in json_array: + if each_json.get("className") == className: + urllist.append(each_json.get("URL")) + + return urllist + + +def get_url_and_name(json_array, className): + url_name_list = [] + r = re.compile("name=(.+)") + for each_json in json_array: + + if each_json.get("className") == className: + m = r.search(each_json.get("objectName")) + if m: + url_name_list.append([each_json.get("URL"), m.group(1)]) + else: + url_name_list.append([each_json.get("URL"), 0]) + + return url_name_list + + +def servlet_get_url(attribute_array, name): + for attribute in attribute_array: + if attribute.get("name") == name: + return attribute.get("URL") + + +def get_servlet_stats(base_url, url, user, password): + monitor_status = json.loads(retrieve_status_page( + user, password, base_url+url)) + serv_stats = {} + + attribute_array = monitor_status.get("attributes") + servlet_url = servlet_get_url(attribute_array, "ResponseTimeDetails") + servlet_status = json.loads(retrieve_status_page( + user, password, base_url+servlet_url)) + + serv_stats["reqCount"] = servlet_status.get("value").get("count") + serv_stats["responseMean"] = servlet_status.get("value").get("mean") + serv_stats["responseMax"] = servlet_status.get("value").get("maximumValue") + serv_stats["responseMin"] = servlet_status.get("value").get("minimumValue") + + servlet_url = servlet_get_url(attribute_array, "ServletName") + servlet_status = json.loads(retrieve_status_page( + user, password, base_url + servlet_url)) + serv_stats["name"] = servlet_status.get("value") + + servlet_url = servlet_get_url(attribute_array, "AppName") + servlet_status = json.loads(retrieve_status_page( + user, password, base_url + servlet_url)) + serv_stats["appName"] = servlet_status.get("value") + return serv_stats + + +def get_jvm_stats(base_url, url, user, password): + monitor_status = json.loads(retrieve_status_page( + user, password, base_url+url)) + jvm_stats = {} + + attribute_array = monitor_status.get("attributes") + stats_name_array = ["Heap", "FreeMemory", "UsedMemory", + "ProcessCPU", "GcCount", "GcTime", "UpTime"] + for stat_name in stats_name_array: + jvm_url = servlet_get_url(attribute_array, stat_name) + jvm_status = json.loads(retrieve_status_page( + user, password, base_url+jvm_url)) + jvm_stats[stat_name] = jvm_status.get("value") + + return jvm_stats + + +def get_thread_stats(base_url, url, user, password): + monitor_status = json.loads(retrieve_status_page( + user, password, base_url+url)) + thread_stats = {} + + attribute_array = monitor_status.get("attributes") + stats_name_array = ["ActiveThreads", "PoolSize", "PoolName"] + for stat_name in stats_name_array: + thread_url = servlet_get_url(attribute_array, stat_name) + thread_status = json.loads(retrieve_status_page( + user, password, base_url+thread_url)) + thread_stats[stat_name] = thread_status.get("value") + + return thread_stats + + +def get_session_stats(base_url, url, user, password): + monitor_status = json.loads(retrieve_status_page( + user, password, base_url+url)) + session_stats = {} + + attribute_array = monitor_status.get("attributes") + session_name_array = ["CreateCount", "LiveCount", "ActiveCount", + "InvalidatedCount", "InvalidatedCountbyTimeout"] + for stat_name in session_name_array: + session_url = servlet_get_url(attribute_array, stat_name) + session_status = json.loads(retrieve_status_page( + user, password, base_url+session_url)) + session_stats[stat_name] = session_status.get("value") + + return session_stats + + +def get_mongo_connection_stats(base_url, url, user, password): + monitor_status = json.loads(retrieve_status_page( + user, password, base_url+url)) + connection_stats = {} + + attribute_array = monitor_status.get("attributes") + connection_name_array = ["CheckedOutCount", "WaitQueueSize", + "MinSize", "MaxSize", "Size", "Host", "Port"] + for stat_name in connection_name_array: + connection_url = servlet_get_url(attribute_array, stat_name) + connection_status = json.loads(retrieve_status_page( + user, password, base_url+connection_url)) + connection_stats[stat_name] = connection_status.get("value") + + return connection_stats + + +def retrieve_metrics(host='localhost', port=9443, + user='user', password='password', + feature_type='application'): + url = "https://%s:%s/IBMJMXConnectorREST/mbeans/" % (host, port) + + status = retrieve_status_page(user, password, url) + json_obj = json.loads(status) + base_url = "https://%s:%s" % (host, port) + + mbeans_url_array = get_url(json_obj, + "com.ibm.ws.webcontainer.monitor.ServletStats") + for url in mbeans_url_array: + serv_stats = get_servlet_stats(base_url, url, user, password) + servlet_attributes = feature.LibertyServletFeature( + serv_stats.get("name"), + serv_stats.get("appName"), + serv_stats.get("reqCount"), + serv_stats.get("responseMean"), + serv_stats.get("responseMax"), + serv_stats.get("responseMin") + ) + yield ('liberty_servlet_status', servlet_attributes, feature_type) + + mbeans_url_array = get_url(json_obj, "com.ibm.ws.monitors.helper.JvmStats") + + for url in mbeans_url_array: + jvm_stats = get_jvm_stats(base_url, url, user, password) + jvm_attributes = feature.LibertyJVMFeature( + jvm_stats.get("Heap"), + jvm_stats.get("FreeMemory"), + jvm_stats.get("UsedMemory"), + jvm_stats.get("ProcessCPU"), + jvm_stats.get("GcCount"), + jvm_stats.get("GcTime"), + jvm_stats.get("UpTime") + ) + yield ('liberty_jvm_status', jvm_attributes, feature_type) + + mbeans_url_array = get_url(json_obj, + "com.ibm.ws.monitors.helper.ThreadPoolStats") + + for url in mbeans_url_array: + thread_stats = get_thread_stats(base_url, url, user, password) + thread_attributes = feature.LibertyThreadFeature( + thread_stats.get("ActiveThreads"), + thread_stats.get("PoolSize"), + thread_stats.get("PoolName") + ) + yield ('liberty_thread_status', thread_attributes, feature_type) + + mbeans_url_name_array = get_url_and_name(json_obj, + "com.ibm.ws.session.monitor" + ".SessionStats") + + for url_name in mbeans_url_name_array: + session_stats = get_session_stats(base_url, + url_name[0], user, password) + session_attributes = feature.LibertySessionFeature( + url_name[1], + session_stats.get("CreateCount"), + session_stats.get("LiveCount"), + session_stats.get("ActiveCount"), + session_stats.get("InvalidatedCount"), + session_stats.get("InvalidatedCountbyTimeout"), + ) + yield ('liberty_session_status', session_attributes, feature_type) + + mbeans_url_name_array = get_url_and_name(json_obj, + "com.mongodb.management" + ".ConnectionPoolStatistics") + + for url_name in mbeans_url_name_array: + connection_stats = get_mongo_connection_stats(base_url, + url_name[0], + user, password) + + connection_attributes = feature.LibertyMongoConnectionFeature( + connection_stats.get("CheckedOutCount"), + connection_stats.get("WaitQueueSize"), + connection_stats.get("MaxSize"), + connection_stats.get("MinSize"), + connection_stats.get("Host"), + connection_stats.get("Port"), + connection_stats.get("Size") + ) + yield ('liberty_mongo_connection_status', + connection_attributes, feature_type) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.plugin new file mode 100644 index 00000000..a5419ec8 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.plugin @@ -0,0 +1,13 @@ +[Core] +Name = application_liberty_host +Module = liberty_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "WAS liberty server" + +[Options] +user = administrator user name, Default is user +password = administrator password, Default is password + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.py new file mode 100644 index 00000000..136c1506 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/liberty/liberty_host_crawler.py @@ -0,0 +1,32 @@ +from icrawl_plugin import IHostCrawler +from plugins.applications.liberty import liberty_crawler +import logging + +logger = logging.getLogger('crawlutils') + + +class LibertyHostCrawler(IHostCrawler): + feature_type = 'application' + feature_key = 'liberty' + default_port = 9443 + + def get_feature(self): + return self.feature_key + + def crawl(self, **options): + password = "password" + user = "user" + + if "password" in options: + password = options["password"] + + if "user" in options: + user = options["user"] + + return liberty_crawler.retrieve_metrics( + host='localhost', + port=self.default_port, + user=user, + password=password, + feature_type=self.feature_type + ) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/feature.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/feature.py new file mode 100644 index 00000000..16b72ec0 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/feature.py @@ -0,0 +1,22 @@ +from collections import namedtuple + + +def get_feature(match1, match2, match3): + feature_attributes = NginxFeature( + int(match1.group(1)), + int(match2.group(1)), + int(match2.group(3)), + int(match3.group(1)), + int(match3.group(2)), + int(match3.group(3)) + ) + return feature_attributes + +NginxFeature = namedtuple('NginxFeature', [ + 'Connections', + 'Accepted', + 'Requests', + 'Reading', + 'Writing', + 'Waiting' +]) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.plugin new file mode 100644 index 00000000..cbe9ab0a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.plugin @@ -0,0 +1,9 @@ +[Core] +Name = application_nginx_container +Module = nginx_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "nginx httpd server" + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.py new file mode 100644 index 00000000..006ffab4 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_container_crawler.py @@ -0,0 +1,48 @@ +import logging + +import dockercontainer +from icrawl_plugin import IContainerCrawler +from plugins.applications.nginx import nginx_crawler +from utils.crawler_exceptions import CrawlError + +logger = logging.getLogger('crawlutils') + + +class NginxContainerCrawler(IContainerCrawler): + feature_type = 'application' + feature_key = 'nginx' + + def get_feature(self): + return self.feature_key + + def crawl(self, container_id=None, **kwargs): + c = dockercontainer.DockerContainer(container_id) + + # check image name + if c.image_name.find(self.feature_key) == -1: + logger.error("%s is not %s container", + c.image_name, + self.feature_key) + raise CrawlError("%s does not have expected name for %s (name=%s)", + container_id, + self.feature_key, + c.image_name) + + # extract IP and Port information + ip = c.get_container_ip() + ports = c.get_container_ports() + + # crawl all candidate ports + for port in ports: + try: + metrics = nginx_crawler.retrieve_metrics(ip, port) + except CrawlError: + logger.error("can't find metrics endpoint at http://%s:%s", + ip, + port) + continue + return [(self.feature_key, metrics, self.feature_type)] + + raise CrawlError("%s has no accessible endpoint for %s", + container_id, + self.feature_key) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_crawler.py new file mode 100644 index 00000000..b4e0536a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_crawler.py @@ -0,0 +1,34 @@ +import re +import urllib2 + +from plugins.applications.nginx import feature +from utils.crawler_exceptions import CrawlError + + +def retrieve_status_page(host, port): + status_page = "http://%s:%s/nginx_status" % (host, port) + req = urllib2.Request(status_page) + response = urllib2.urlopen(req) + return response.read() + + +def retrieve_metrics(host='localhost', port=80): + try: + status = retrieve_status_page(host, port) + except Exception: + raise CrawlError("can't access to http://%s:%s", + host, port) + + match1 = re.search(r'Active connections:\s+(\d+)', status) + match2 = re.search(r'\s*(\d+)\s+(\d+)\s+(\d+)', status) + match3 = re.search(r'Reading:\s*(\d+)\s*Writing:\s*(\d+)\s*' + 'Waiting:\s*(\d+)', status) + + try: + feature_attributes = feature.get_feature( + match1, + match2, + match3) + return feature_attributes + except Exception: + raise CrawlError("failure to parse http://%s:%s", host, port) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.plugin new file mode 100644 index 00000000..0bb1d099 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.plugin @@ -0,0 +1,9 @@ +[Core] +Name = application_nginx_host +Module = nginx_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "nginx httpd server" + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.py new file mode 100644 index 00000000..ce237e92 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/nginx/nginx_host_crawler.py @@ -0,0 +1,21 @@ +from icrawl_plugin import IHostCrawler +from plugins.applications.nginx import nginx_crawler +import logging + +logger = logging.getLogger('crawlutils') + + +class NginxHostCrawler(IHostCrawler): + feature_type = 'application' + feature_key = 'nginx' + default_port = 80 + + def get_feature(self): + return self.feature_key + + def crawl(self): + metrics = nginx_crawler.retrieve_metrics( + host='localhost', + port=self.default_port + ) + return [(self.feature_key, metrics, self.feature_type)] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/feature.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/feature.py new file mode 100644 index 00000000..a3e263fc --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/feature.py @@ -0,0 +1,174 @@ +from collections import namedtuple + + +def create_feature(metrics): + fields = RedisFeature._fields + + for field_name in fields: + if field_name not in metrics: + metrics[field_name] = "" + + feature_attributes = RedisFeature( + metrics['aof_current_rewrite_time_sec'], + metrics['aof_enabled'], + metrics['aof_last_bgrewrite_status'], + metrics['aof_last_rewrite_time_sec'], + metrics['aof_last_write_status'], + metrics['aof_rewrite_in_progress'], + metrics['aof_rewrite_scheduled'], + metrics['arch_bits'], + metrics['blocked_clients'], + metrics['client_biggest_input_buf'], + metrics['client_longest_output_list'], + metrics['cluster_enabled'], + metrics['config_file'], + metrics['connected_clients'], + metrics['connected_slaves'], + metrics['evicted_keys'], + metrics['executable'], + metrics['expired_keys'], + metrics['gcc_version'], + metrics['hz'], + metrics['instantaneous_input_kbps'], + metrics['instantaneous_ops_per_sec'], + metrics['instantaneous_output_kbps'], + metrics['keyspace_hits'], + metrics['keyspace_misses'], + metrics['latest_fork_usec'], + metrics['loading'], + metrics['lru_clock'], + metrics['master_repl_offset'], + metrics['maxmemory'], + metrics['maxmemory_human'], + metrics['maxmemory_policy'], + metrics['mem_allocator'], + metrics['mem_fragmentation_ratio'], + metrics['migrate_cached_sockets'], + metrics['multiplexing_api'], + metrics['os'], + metrics['process_id'], + metrics['pubsub_channels'], + metrics['pubsub_patterns'], + metrics['rdb_bgsave_in_progress'], + metrics['rdb_changes_since_last_save'], + metrics['rdb_current_bgsave_time_sec'], + metrics['rdb_last_bgsave_status'], + metrics['rdb_last_bgsave_time_sec'], + metrics['rdb_last_save_time'], + metrics['redis_build_id'], + metrics['redis_git_dirty'], + metrics['redis_git_sha1'], + metrics['redis_mode'], + metrics['redis_version'], + metrics['rejected_connections'], + metrics['repl_backlog_active'], + metrics['repl_backlog_first_byte_offset'], + metrics['repl_backlog_histlen'], + metrics['repl_backlog_size'], + metrics['role'], + metrics['run_id'], + metrics['sync_full'], + metrics['sync_partial_err'], + metrics['sync_partial_ok'], + metrics['tcp_port'], + metrics['total_commands_processed'], + metrics['total_connections_received'], + metrics['total_net_input_bytes'], + metrics['total_net_output_bytes'], + metrics['total_system_memory'], + metrics['total_system_memory_human'], + metrics['uptime_in_days'], + metrics['uptime_in_seconds'], + metrics['used_cpu_sys'], + metrics['used_cpu_sys_children'], + metrics['used_cpu_user'], + metrics['used_cpu_user_children'], + metrics['used_memory'], + metrics['used_memory_peak'], + metrics['used_memory_peak_human'], + metrics['used_memory_rss'], + metrics['used_memory_rss_human'] + ) + return feature_attributes + +RedisFeature = namedtuple('RedisFeature', [ + 'aof_current_rewrite_time_sec', + 'aof_enabled', + 'aof_last_bgrewrite_status', + 'aof_last_rewrite_time_sec', + 'aof_last_write_status', + 'aof_rewrite_in_progress', + 'aof_rewrite_scheduled', + 'arch_bits', + 'blocked_clients', + 'client_biggest_input_buf', + 'client_longest_output_list', + 'cluster_enabled', + 'config_file', + 'connected_clients', + 'connected_slaves', + 'evicted_keys', + 'executable', + 'expired_keys', + 'gcc_version', + 'hz', + 'instantaneous_input_kbps', + 'instantaneous_ops_per_sec', + 'instantaneous_output_kbps', + 'keyspace_hits', + 'keyspace_misses', + 'latest_fork_usec', + 'loading', + 'lru_clock', + 'master_repl_offset', + 'maxmemory', + 'maxmemory_human', + 'maxmemory_policy', + 'mem_allocator', + 'mem_fragmentation_ratio', + 'migrate_cached_sockets', + 'multiplexing_api', + 'os', + 'process_id', + 'pubsub_channels', + 'pubsub_patterns', + 'rdb_bgsave_in_progress', + 'rdb_changes_since_last_save', + 'rdb_current_bgsave_time_sec', + 'rdb_last_bgsave_status', + 'rdb_last_bgsave_time_sec', + 'rdb_last_save_time', + 'redis_build_id', + 'redis_git_dirty', + 'redis_git_sha1', + 'redis_mode', + 'redis_version', + 'rejected_connections', + 'repl_backlog_active', + 'repl_backlog_first_byte_offset', + 'repl_backlog_histlen', + 'repl_backlog_size', + 'role', + 'run_id', + 'sync_full', + 'sync_partial_err', + 'sync_partial_ok', + 'tcp_port', + 'total_commands_processed', + 'total_connections_received', + 'total_net_input_bytes', + 'total_net_output_bytes', + 'total_system_memory', + 'total_system_memory_human', + 'uptime_in_days', + 'uptime_in_seconds', + 'used_cpu_sys', + 'used_cpu_sys_children', + 'used_cpu_user', + 'used_cpu_user_children', + 'used_memory', + 'used_memory_peak', + 'used_memory_peak_human', + 'used_memory_rss', + 'used_memory_rss_human', +]) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.plugin new file mode 100644 index 00000000..51aba1af --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = application_redis_container +Module = redis_container_crawler + +[Documentation] +Author = Tatsuhiro Chiba (chiba@jp.ibm.com) +Version = 0.1 +Description = Redis crawling function for containers on the host diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.py new file mode 100644 index 00000000..f5d5314d --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_container_crawler.py @@ -0,0 +1,57 @@ +from icrawl_plugin import IContainerCrawler +from plugins.applications.redis import feature +import dockercontainer +from requests.exceptions import ConnectionError +import logging + + +logger = logging.getLogger('crawlutils') + + +class RedisContainerCrawler(IContainerCrawler): + ''' + Crawling app provided metrics for redis container on docker. + Usually redis listens on port 6379. + ''' + + feature_type = "application" + feature_key = "redis" + default_port = 6379 + + def get_feature(self): + return self.feature_key + + def crawl(self, container_id=None, **kwargs): + + import pip + pip.main(['install', 'redis']) + import redis + + # only crawl redis container. Otherwise, quit. + c = dockercontainer.DockerContainer(container_id) + if c.image_name.find(self.feature_key) == -1: + logger.debug("%s is not %s container" % + (c.image_name, self.feature_key)) + raise NameError("this is not target crawl container") + + # extract IP and Port information + ip = c.get_container_ip() + ports = c.get_container_ports() + + # set default port number + if len(ports) == 0: + ports.append(self.default_port) + + # query to all available ports + for port in ports: + client = redis.Redis(host=ip, port=port) + try: + metrics = client.info() + except ConnectionError: + logger.info("redis does not listen on port:%d", port) + continue + feature_attributes = feature.create_feature(metrics) + return [(self.feature_key, feature_attributes, self.feature_type)] + + # any ports are not available + raise ConnectionError("no listen ports") diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.plugin new file mode 100644 index 00000000..c9e55de7 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = application_redis_host +Module = redis_host_crawler + +[Documentation] +Author = Tatsuhiro Chiba (chiba@jp.ibm.com) +Version = 0.1 +Description = Redis crawling function for the local host diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.py new file mode 100644 index 00000000..697d24c1 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/redis/redis_host_crawler.py @@ -0,0 +1,37 @@ +from icrawl_plugin import IHostCrawler +from plugins.applications.redis import feature +from requests.exceptions import ConnectionError +import logging + +logger = logging.getLogger('crawlutils') + + +class RedisHostCrawler(IHostCrawler): + ''' + Crawling app provided metrics for redis on host. + Usually redis listens on port 6379. + ''' + + feature_type = "application" + feature_key = "redis" + default_port = 6379 + + def get_feature(self): + return self.feature_key + + # TODO: prepare an useful way to set host/port + def crawl(self, root_dir='/', **kwargs): + import pip + pip.main(['install', 'redis']) + import redis + + try: + client = redis.Redis(host='localhost', port=self.default_port) + metrics = client.info() + except ConnectionError: + logger.info("redis does not listen on port:%d", self.default_port) + raise ConnectionError("no listen at %d", self.default_port) + + feature_attributes = feature.create_feature(metrics) + + return [(self.feature_key, feature_attributes, self.feature_type)] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/__init__.py new file mode 100644 index 00000000..836e3e88 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/feature.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/feature.py new file mode 100644 index 00000000..8fb45603 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/feature.py @@ -0,0 +1,41 @@ +from collections import namedtuple + + +TomcatJVMFeature = namedtuple('TomcatJVMFeature', [ + 'free', + 'total', + 'max' + ]) + +TomcatMemoryFeature = namedtuple('TomcatMemoryFeature', [ + 'name', + 'type', + 'initial', + 'committed', + 'maximum', + 'used' + ]) + +TomcatConnectorFeature = namedtuple('TomcatConnectorFeature', [ + 'connector', + 'maxThread', + 'currentThread', + 'currentThreadBusy', + 'requestMaxTime', + 'processingTime', + 'requestCount', + 'errorCount', + 'byteReceived', + 'byteSent' + ]) + +TomcatWorkerFeature = namedtuple('TomcatWorkerFeature', [ + 'connector', + 'stage', + 'time', + 'byteSent', + 'byteReceived', + 'client', + 'vhost', + 'request' + ]) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.plugin new file mode 100644 index 00000000..571695b6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.plugin @@ -0,0 +1,12 @@ +[Core] +Name = application_tomcat_container +Module = tomcat_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Tomcat crawling function for containers on the host + +[Options] +user = administrator user name, Default is tomcat +password = administrator password, Default is password diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.py new file mode 100644 index 00000000..69fea3d0 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_container_crawler.py @@ -0,0 +1,57 @@ +import logging + +import dockercontainer +from icrawl_plugin import IContainerCrawler +from plugins.applications.tomcat import tomcat_crawler +from utils.crawler_exceptions import CrawlError + +logger = logging.getLogger('crawlutils') + + +class TomcatContainerCrawler(IContainerCrawler): + feature_type = 'application' + feature_key = 'tomcat' + default_port = 8080 + + def get_feature(self): + return self.feature_key + + def crawl(self, container_id=None, **kwargs): + password = "password" + user = "tomcat" + + if "password" in kwargs: + password = kwargs["password"] + + if "user" in kwargs: + user = kwargs["user"] + + c = dockercontainer.DockerContainer(container_id) + + # check image name + if c.image_name.find(self.feature_key) == -1: + logger.error("%s is not %s container", + c.image_name, + self.feature_key) + raise CrawlError("%s does not have expected name for %s (name=%s)", + container_id, + self.feature_key, + c.image_name) + + # extract IP and Port information + ip = c.get_container_ip() + ports = c.get_container_ports() + + # crawl all candidate ports + for each_port in ports: + return tomcat_crawler.retrieve_metrics( + host=ip, + port=each_port, + user=user, + password=password, + feature_type=self.feature_type + ) + + raise CrawlError("%s has no accessible endpoint for %s", + container_id, + self.feature_key) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_crawler.py new file mode 100644 index 00000000..9511ba70 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_crawler.py @@ -0,0 +1,82 @@ +import urllib2 +from plugins.applications.tomcat import feature +from xml.etree import ElementTree +from utils.crawler_exceptions import CrawlError + + +def retrieve_status_page(hostname, port, user, password): + statusPage = "http://%s:%s/manager/status?XML=true" % (hostname, port) + + password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_mgr.add_password(None, statusPage, user, password) + handler = urllib2.HTTPBasicAuthHandler(password_mgr) + opener = urllib2.build_opener(handler) + urllib2.install_opener(opener) + + req = urllib2.Request(statusPage) + try: + response = urllib2.urlopen(req) + return response.read() + except Exception: + raise CrawlError("can't access to http://%s:%s", + hostname, port) + + +def retrieve_metrics(host='localhost', port=8080, + user='tomcat', password='password', + feature_type='application'): + + status = retrieve_status_page(host, port, user, password) + tree = ElementTree.XML(status) + memoryNode = tree.find('jvm/memory') + jvm_attributes = feature.TomcatJVMFeature( + memoryNode.get("free"), + memoryNode.get("total"), + memoryNode.get("max") + ) + + yield('tomcat_jvm', jvm_attributes, feature_type) + + for node in tree.iter('memorypool'): + memory_pool_attributes = feature.TomcatMemoryFeature( + node.get("name"), + node.get("type"), + node.get("usageInit"), + node.get("usageCommitted"), + node.get("usageMax"), + node.get("usageUsed") + ) + yield('tomcat_memory', memory_pool_attributes, feature_type) + + ConnectorNode = tree.iter('connector') + for node in ConnectorNode: + threadInfo = node.find("threadInfo") + reqInfo = node.find("requestInfo") + + connector_feature_attributes = feature.TomcatConnectorFeature( + node.get("name"), + threadInfo.get("maxThreads"), + threadInfo.get("currentThreadCount"), + threadInfo.get("currentThreadsBusy"), + reqInfo.get("maxTime"), + reqInfo.get("processingTime"), + reqInfo.get("requestCount"), + reqInfo.get("errorCount"), + reqInfo.get("bytesReceived"), + reqInfo.get("bytesSent") + ) + yield('tomcat_connector', connector_feature_attributes, feature_type) + + workNode = node.iter("worker") + for work in workNode: + worker_feature_attributes = feature.TomcatWorkerFeature( + node.get("name"), + work.get("stage"), + work.get("requestProcessingTime"), + work.get("requestBytesSent"), + work.get("requestBytesReceived"), + work.get("remoteAddr"), + work.get("virtualHost"), + work.get("currentUri") + ) + yield('tomcat_worker', worker_feature_attributes, feature_type) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.plugin new file mode 100644 index 00000000..3869c3f5 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.plugin @@ -0,0 +1,12 @@ +[Core] +Name = application_tomcat_host +Module = tomcat_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Apache httpd server" + +[Options] +user = administrator user name, Default is tomcat +password = administrator password, Default is password diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.py new file mode 100644 index 00000000..3091971e --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/applications/tomcat/tomcat_host_crawler.py @@ -0,0 +1,32 @@ +from icrawl_plugin import IHostCrawler +from plugins.applications.tomcat import tomcat_crawler +import logging + +logger = logging.getLogger('crawlutils') + + +class TomcatHostCrawler(IHostCrawler): + feature_type = 'application' + feature_key = 'tomcat' + default_port = 8080 + + def get_feature(self): + return self.feature_key + + def crawl(self, **options): + password = "password" + user = "tomcat" + + if "password" in options: + password = options["password"] + + if "user" in options: + user = options["user"] + + return tomcat_crawler.retrieve_metrics( + host='localhost', + port=self.default_port, + user=user, + password=password, + feature_type=self.feature_type + ) diff --git a/crawler/utils/plugincont/Dockerfile b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/__init__.py similarity index 100% rename from crawler/utils/plugincont/Dockerfile rename to crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/__init__.py diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_emitter.py new file mode 100644 index 00000000..ab65ba6f --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_emitter.py @@ -0,0 +1,27 @@ + +class BaseEmitter: + """ + Base emitter class from which emitters like FileEmitter, StdoutEmitter + should inherit. The main idea is that all emitters get a url, and should + implement an emit() function given an iostream (a buffer with the features + to emit). + """ + + def __init__(self, url, timeout=1, max_retries=5, + emit_per_line=False): + self.url = url + self.timeout = timeout + self.max_retries = max_retries + self.emit_per_line = emit_per_line + + def emit(self, iostream, compress=False, + metadata={}, snapshot_num=0): + """ + + :param iostream: a CStringIO used to buffer the formatted features. + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + pass diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_http_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_http_emitter.py new file mode 100644 index 00000000..a6f142e2 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/base_http_emitter.py @@ -0,0 +1,80 @@ +import logging +import time + +import requests + +from iemit_plugin import IEmitter + +logger = logging.getLogger('crawlutils') + + +class BaseHttpEmitter: + """ + Base emitter class for HTTP/HTTPS protocol. + HTTP/HTTPS emitter inherit init(), emit() and post() function. + They should implement get_emitter_protocol(). + """ + + def get_emitter_protocol(self): + raise NotImplementedError("Use http or https emitter plugin instead.") + + def init(self, url, timeout=1, max_retries=5, emit_format='csv'): + IEmitter.init(self, url, + timeout=timeout, + max_retries=max_retries, + emit_format=emit_format) + if emit_format == 'json': + self.emit_per_line = True + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param frame: a frame containing extracted features + :param compress: + :param metadata: + :param snapshot_num: + :return: None + """ + iostream = self.format(frame) + if compress: + proto = self.get_emitter_protocol() + raise NotImplementedError( + '%s emitter does not support gzip.' % proto + ) + if self.emit_per_line: + iostream.seek(0) + for line in iostream.readlines(): + self.post(line, metadata) + else: + self.post(iostream.getvalue(), metadata) + + def post(self, content='', metadata={}): + headers = {'content-type': 'application/csv'} + params = {} + for attempt in range(self.max_retries): + try: + response = requests.post(self.url, headers=headers, + params=params, + data=content) + except requests.exceptions.ChunkedEncodingError as e: + logger.exception(e) + logger.error( + "POST to %s resulted in exception (attempt %d of %d), " + "Exiting." % (self.url, attempt + 1, self.max_retries)) + break + except requests.exceptions.RequestException as e: + logger.exception(e) + logger.error( + "POST to %s resulted in exception (attempt %d of %d)" % + (self.url, attempt + 1, self.max_retries)) + time.sleep(2.0 ** attempt * 0.1) + continue + if response.status_code != requests.codes.ok: + logger.error("POST to %s resulted in status code %s: %s " + "(attempt %d of %d)" % + (self.url, str(response.status_code), + response.text, attempt + 1, self.max_retries)) + time.sleep(2.0 ** attempt * 0.1) + else: + break diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.plugin new file mode 100644 index 00000000..fbb901bd --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = File Emitter +Module = file_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to emit frame to file diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.py new file mode 100644 index 00000000..85cb0988 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/file_emitter.py @@ -0,0 +1,46 @@ +import gzip +import shutil + +from iemit_plugin import IEmitter + + +class FileEmitter(IEmitter): + + """ + Emitter to file. This creates one file per frame. The file names + are the ones in the url. For example: for file:///tmp/a the file for + the first frame would be /tmp/a.0 for a host, and /tmp/a.xyz.0 for a + container with id xyz. + """ + + def get_emitter_protocol(self): + return 'file' + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param iostream: a CStringIO used to buffer the formatted features. + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + iostream = self.format(frame) + output_path = self.url[len('file://'):] + short_name = metadata.get('emit_shortname', None) + if not short_name: + file_suffix = str(snapshot_num) + else: + file_suffix = '{0}.{1}'.format(short_name, snapshot_num) + output_path = '{0}.{1}'.format(output_path, file_suffix) + output_path += '.gz' if compress else '' + + with open(output_path, 'w') as fd: + if compress: + gzip_file = gzip.GzipFile(fileobj=fd, mode='w') + gzip_file.write(iostream.getvalue()) + gzip_file.close() + else: + iostream.seek(0) + shutil.copyfileobj(iostream, fd) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.plugin new file mode 100644 index 00000000..9c80ca91 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Fluentd Emitter +Module = fluentd_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to emit frame to Fluentd diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.py new file mode 100644 index 00000000..1fa4ed34 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/fluentd_emitter.py @@ -0,0 +1,98 @@ +import logging + +from iemit_plugin import IEmitter +from utils.crawler_exceptions import EmitterUnsupportedFormat +from utils.misc import call_with_retries +from fluent import sender +import time + +logger = logging.getLogger('crawlutils') + + +class FluentdEmitter(IEmitter): + + def get_emitter_protocol(self): + return 'fluentd' + + def init(self, url, timeout=1, max_retries=5, emit_format='fluentd'): + self.url = url + self.timeout = timeout + self.max_retries = max_retries + self.emit_per_line = True + + if emit_format != 'json': + raise EmitterUnsupportedFormat('Not supported: %s' % emit_format) + + try: + # assumption URL fot fluentd engine is of form fuentd://IP:PORT + host, port = url[len('fluentd://'):].split(':') + except (KeyError, TypeError) as exc: + logger.warn('Can not parse the url provided.') + raise exc + + self.fluentd_sender = None + + call_with_retries(self.connect_to_fluentd_engine, + max_retries=self.max_retries, + _args=tuple((host, int(port)))) + + def connect_to_fluentd_engine(self, host, port): + self.fluentd_sender = sender.FluentSender( + 'crawler', host=host, port=port) + if self.fluentd_sender.socket is None: + raise Exception + + def get_json_item(self, frame): + yield frame.metadata + for (key, val, feature_type) in frame.data: + output = dict() + if not isinstance(val, dict): + val = val._asdict() + output['feature_type'] = feature_type + output['feature_key'] = key + output['feature_val'] = val + yield output + + def emit_frame_atonce(self, tag, timestamp, frame): + combined_dict = dict() + item_count = 0 + + for json_item in self.get_json_item(frame): + key = 'feature' + str(item_count) + combined_dict[key] = json_item + item_count += 1 + + self._emit(tag, timestamp, combined_dict) + + def _emit(self, tag, timestamp, item): + self.fluentd_sender.emit_with_time(tag, timestamp, item) + if self.fluentd_sender.last_error is not None: + self.fluentd_sender.clear_last_error() + raise Exception + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + if compress: + raise NotImplementedError('Compress not implemented.') + + tag = frame.metadata.get('namespace', '') + timestamp = frame.metadata.get('timestamp', '') + timestamp = time.mktime( + time.strptime(timestamp[:-5], '%Y-%m-%dT%H:%M:%S')) + + if self.emit_per_line: + for json_item in self.get_json_item(frame): + call_with_retries(self._emit, + max_retries=self.max_retries, + _args=tuple((tag, timestamp, json_item))) + else: + call_with_retries(self.emit_frame_atonce, + max_retries=self.max_retries, + _args=tuple((tag, timestamp, frame))) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.plugin new file mode 100644 index 00000000..c2d7a759 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Http Emitter +Module = http_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to post frame data to http server diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.py new file mode 100644 index 00000000..58731f10 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/http_emitter.py @@ -0,0 +1,12 @@ +import logging + +from iemit_plugin import IEmitter +from plugins.emitters.base_http_emitter import BaseHttpEmitter + +logger = logging.getLogger('crawlutils') + + +class HttpEmitter(BaseHttpEmitter, IEmitter): + + def get_emitter_protocol(self): + return 'http' diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.plugin new file mode 100644 index 00000000..bb4a44e3 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Https Emitter +Module = https_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to post frame data to https server diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.py new file mode 100644 index 00000000..d43a29fb --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/https_emitter.py @@ -0,0 +1,12 @@ +import logging + +from iemit_plugin import IEmitter +from plugins.emitters.base_http_emitter import BaseHttpEmitter + +logger = logging.getLogger('crawlutils') + + +class HttpsEmitter(BaseHttpEmitter, IEmitter): + + def get_emitter_protocol(self): + return 'https' diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.plugin new file mode 100644 index 00000000..e917d99c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Kafka Emitter +Module = kafka_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to emit frame over kafka diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.py new file mode 100644 index 00000000..b633e49a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/kafka_emitter.py @@ -0,0 +1,71 @@ +import logging + +import kafka as kafka_python +import pykafka + +from iemit_plugin import IEmitter +from utils.misc import (NullHandler, call_with_retries) + +logger = logging.getLogger('crawlutils') +# Kafka logs too much +logging.getLogger('kafka').addHandler(NullHandler()) + + +class KafkaEmitter(IEmitter): + + def get_emitter_protocol(self): + return 'kafka' + + def init(self, url, timeout=1, max_retries=10, emit_format='csv'): + IEmitter.init(self, url, + timeout=timeout, + max_retries=max_retries, + emit_format=emit_format) + + if emit_format == 'json': + self.emit_per_line = True + + try: + broker, topic = url[len('kafka://'):].split('/') + except (KeyError, TypeError) as exc: + logger.warn('Can not parse the url provided.') + raise exc + + self.client = None + self.producer = None + + call_with_retries(self.connect_to_broker, + max_retries=self.max_retries, + _args=tuple((broker, topic))) + + def connect_to_broker(self, broker, topic): + kafka_python_client = kafka_python.SimpleClient(broker) + kafka_python_client.ensure_topic_exists(topic) + + self.client = pykafka.KafkaClient(hosts=broker) + self.producer = self.client.topics[topic].get_producer() + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + iostream = self.format(frame) + if compress: + raise NotImplementedError('Compress not implemented.') + + if self.emit_per_line: + iostream.seek(0) + for line in iostream.readlines(): + call_with_retries(lambda io: self.producer.produce([line]), + max_retries=self.max_retries, + _args=tuple([iostream])) + else: + call_with_retries( + lambda io: self.producer.produce([io.getvalue()]), + max_retries=self.max_retries, + _args=tuple([iostream])) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.plugin new file mode 100644 index 00000000..af6da177 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = MTGraphite Emitter +Module = mtgraphite_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to emit frame to MTGraphite server diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.py new file mode 100644 index 00000000..1d841e5a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/mtgraphite_emitter.py @@ -0,0 +1,43 @@ +import logging + +from iemit_plugin import IEmitter +from utils.mtgraphite import MTGraphiteClient +from formatters import write_in_graphite_format +from utils.crawler_exceptions import EmitterUnsupportedFormat + +logger = logging.getLogger('crawlutils') + + +class MtGraphiteEmitter(IEmitter): + + def get_emitter_protocol(self): + return 'mtgraphite' + + def init(self, url, timeout=1, max_retries=5, emit_format='graphite'): + self.url = url + self.timeout = timeout + self.max_retries = max_retries + self.emit_per_line = True + + if emit_format != 'graphite': + raise EmitterUnsupportedFormat('Not supported: %s' % emit_format) + + self.formatter = write_in_graphite_format + self.mtgraphite_client = MTGraphiteClient(self.url) + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + iostream = self.format(frame) + if self.emit_per_line: + iostream.seek(0) + num = self.mtgraphite_client.send_messages(iostream.readlines()) + else: + num = self.mtgraphite_client.send_messages([iostream.getvalue()]) + logger.debug('Pushed %d messages to mtgraphite queue' % num) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.plugin new file mode 100644 index 00000000..8134d8f3 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.plugin @@ -0,0 +1,14 @@ +[Core] +Name = SAS Https Emitter +Module = sas_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to post frame data to SAS (security analytics service) https server + +[Options] +token_filepath = /etc/sas-secrets/token +access_group_filepath = /etc/sas-secrets/access_group +cloudoe_filepath = /etc/sas-secrets/cloudoe +ssl_verifcation = False diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.py new file mode 100644 index 00000000..0943e1b3 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/sas_emitter.py @@ -0,0 +1,146 @@ +import logging +import os +import json +import time + +import requests + +from iemit_plugin import IEmitter +from plugins.emitters.base_http_emitter import BaseHttpEmitter +from utils.crawler_exceptions import EmitterUnsupportedFormat + +logger = logging.getLogger('crawlutils') + + +class SasEmitter(BaseHttpEmitter, IEmitter): + + def get_emitter_protocol(self): + return 'sas' + + def init(self, url, timeout=1, max_retries=5, emit_format='csv'): + IEmitter.init(self, url, + timeout=timeout, + max_retries=max_retries, + emit_format=emit_format) + if emit_format != 'csv': + raise EmitterUnsupportedFormat('Not supported: %s' % emit_format) + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param frame: a frame containing extracted features + :param compress: + :param metadata: + :param snapshot_num: + :return: None + """ + self.token_filepath = kwargs.get("token_filepath", "") + self.access_group_filepath = kwargs.get("access_group_filepath", "") + self.cloudoe_filepath = kwargs.get("cloudoe_filepath", "") + self.ssl_verification = kwargs.get("ssl_verification", "") + + iostream = self.format(frame) + if compress: + proto = self.get_emitter_protocol() + raise NotImplementedError( + '%s emitter does not support gzip.' % proto + ) + if self.emit_per_line: + iostream.seek(0) + for line in iostream.readlines(): + self.post(line, metadata) + else: + self.post(iostream.getvalue(), metadata) + + ''' + This function retrievs sas token information from k8s secrets. + Current model of secret deployment in k8s is through mounting + 'secret' inside crawler container. + ''' + def get_sas_tokens(self): + assert(os.path.exists(self.token_filepath)) + assert(os.path.exists(self.access_group_filepath)) + assert(os.path.exists(self.cloudoe_filepath)) + + fp = open(self.access_group_filepath) + access_group = fp.read().rstrip('\n') + fp.close() + + fp = open(self.cloudoe_filepath) + cloudoe = fp.read().rstrip('\n') + fp.close() + + fp = open(self.token_filepath) + token = fp.read().rstrip('\n') + fp.close() + + return(token, cloudoe, access_group) + + ''' + SAS requires following crawl metadata about entity + being crawled. + - timestamp + - namespace + - features + - source type + This function parses the crawled metadata feature and + gets these information. + ''' + def __parse_crawl_metadata(self, content=''): + metadata_str = content.split('\n')[0].split()[2] + metadata_json = json.loads(metadata_str) + timestamp = metadata_json.get('timestamp', '') + namespace = metadata_json.get('namespace', '') + features = metadata_json.get('features', '') + system_type = metadata_json.get('system_type', '') + + return (namespace, timestamp, features, system_type) + + def post(self, content='', metadata={}): + (namespace, timestamp, features, system_type) =\ + self.__parse_crawl_metadata(content) + (token, cloudoe, access_group) = self.get_sas_tokens() + headers = {'content-type': 'application/csv'} + headers.update({'Cloud-OE-ID': cloudoe}) + headers.update({'X-Auth-Token': token}) + + params = {} + params.update({'access_group': access_group}) + params.update({'namespace': namespace}) + params.update({'features': features}) + params.update({'timestamp': timestamp}) + params.update({'source_type': system_type}) + + self.url = self.url.replace('sas:', 'https:') + + verify = True + if self.ssl_verification == "False": + verify = False + + for attempt in range(self.max_retries): + try: + response = requests.post(self.url, headers=headers, + params=params, + data=content, verify=verify) + except requests.exceptions.ChunkedEncodingError as e: + logger.exception(e) + logger.error( + "POST to %s resulted in exception (attempt %d of %d), " + "Exiting." % (self.url, attempt + 1, self.max_retries)) + break + except requests.exceptions.RequestException as e: + logger.exception(e) + logger.error( + "POST to %s resulted in exception (attempt %d of %d)" % + (self.url, attempt + 1, self.max_retries)) + time.sleep(2.0 ** attempt * 0.1) + continue + if response.status_code != requests.codes.ok: + logger.error("POST to %s resulted in status code %s: %s " + "(attempt %d of %d)" % + (self.url, str(response.status_code), + response.text, attempt + 1, self.max_retries)) + time.sleep(2.0 ** attempt * 0.1) + else: + break diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.plugin new file mode 100644 index 00000000..d00b6a34 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Stdout Emitter +Module = stdout_emitter + +[Documentation] +Author = IBM +Version = 0.1 +Description = Plugin to emit frame to console diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.py new file mode 100644 index 00000000..88c52c75 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/emitters/stdout_emitter.py @@ -0,0 +1,40 @@ +import cStringIO +import gzip +import sys + +from iemit_plugin import IEmitter + + +class StdoutEmitter(IEmitter): + + def get_emitter_protocol(self): + return 'stdout' + + def emit(self, frame, compress=False, + metadata={}, snapshot_num=0, **kwargs): + """ + + :param iostream: a CStringIO used to buffer the formatted features. + :param compress: + :param metadata: + :param snapshot_num: + :return: + """ + iostream = self.format(frame) + if self.emit_per_line: + iostream.seek(0) + for line in iostream.readlines(): + self.emit_string(line, compress) + else: + self.emit_string(iostream.getvalue().strip(), compress) + + def emit_string(self, string, compress): + if compress: + tempio = cStringIO.StringIO() + gzip_file = gzip.GzipFile(fileobj=tempio, mode='w') + gzip_file.write(string) + gzip_file.close() + print tempio.getvalue() + else: + print "%s" % string + sys.stdout.flush() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.plugin new file mode 100644 index 00000000..e83c226e --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Cloudsight Environment +Module = cloudsight_environment + +[Documentation] +Author = IBM +Version = 0.1 +Description = Default environment diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.py new file mode 100644 index 00000000..c2a73560 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/cloudsight_environment.py @@ -0,0 +1,42 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +import logging +import copy + +from runtime_environment import IRuntimeEnvironment + +logger = logging.getLogger('crawlutils') + + +class CloudsightEnvironment(IRuntimeEnvironment): + name = 'cloudsight' + + def get_environment_name(self): + return self.name + + def get_container_namespace(self, long_id, options): + assert isinstance(long_id, str) or unicode, "long_id is not a string" + assert 'name' in options and 'host_namespace' in options + name = options['name'] + name = (name if len(name) > 0 else long_id[:12]) + name = (name[1:] if name[0] == '/' else name) + return options['host_namespace'] + '/' + name + + def get_container_log_file_list(self, long_id, options): + assert isinstance(long_id, str) or unicode, "long_id is not a string" + assert 'container_logs' in options + container_logs = copy.deepcopy(options['container_logs']) + for log in container_logs: + name = log['name'] + if not os.path.isabs(name) or '..' in name: + container_logs.remove(log) + logger.warning( + 'User provided a log file path that is not absolute: %s' % + name) + return container_logs + + def get_container_log_prefix(self, long_id, options): + assert isinstance(long_id, str) or unicode, "long_id is not a string" + return self.get_container_namespace(long_id, options) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.plugin new file mode 100644 index 00000000..400cd125 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.plugin @@ -0,0 +1,8 @@ +[Core] +Name = Kubernetes Environment +Module = kubernetes_environment + +[Documentation] +Author = IBM +Version = 0.1 +Description = Kubernetes environment diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.py new file mode 100644 index 00000000..5a38cf6c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/environments/kubernetes_environment.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +import logging +import copy + +from runtime_environment import IRuntimeEnvironment +from utils.dockerutils import exec_dockerinspect + +logger = logging.getLogger('crawlutils') + +META_CONFIG = 'Config' +META_LABELS = 'Labels' +META_UUID = 'Id' +META_HOSTNAME = 'Hostname' + +K8S_NS_LABEL = "io.kubernetes.pod.namespace" +K8S_POD_LABEL = "io.kubernetes.pod.name" +K8S_CONTAINER_NAME_LABEL = "io.kubernetes.container.name" + +CRAWLER_NAMESPACE_FORMAT = "{K8S_NS}/{K8S_POD}/{K8S_CONT_NAME}/{K8S_CONT_ID}" + + +class KubernetesEnvironment(IRuntimeEnvironment): + name = 'kubernetes' + + def get_environment_name(self): + return self.name + + def get_container_namespace(self, long_id, options): + assert isinstance(long_id, str) or unicode, "long_id is not a string" + crawler_k8s_ns = "" + container_meta = exec_dockerinspect(long_id) + try: + labels = container_meta.get(META_CONFIG).get(META_LABELS) + if labels: + crawler_k8s_ns = CRAWLER_NAMESPACE_FORMAT.format( + K8S_NS=labels.get(K8S_NS_LABEL, ""), + K8S_POD=labels.get(K8S_POD_LABEL, ""), + K8S_CONT_NAME=labels.get(K8S_CONTAINER_NAME_LABEL, ""), + K8S_CONT_ID=long_id) + except KeyError: + logger.error('Error retrieving container labels for: %s' % + long_id) + pass + + return crawler_k8s_ns + + def get_container_log_file_list(self, long_id, options): + assert isinstance(long_id, str) or unicode, "long_id is not a string" + assert 'container_logs' in options + container_logs = copy.deepcopy(options['container_logs']) + for log in container_logs: + name = log['name'] + if not os.path.isabs(name) or '..' in name: + container_logs.remove(log) + logger.warning( + 'User provided a log file path that is not absolute: %s' % + name) + return container_logs + + def get_container_log_prefix(self, long_id, options): + assert isinstance(long_id, str) or unicode, "long_id is not a string" + assert 'name' in options and 'host_namespace' in options + name = options['name'] + name = (name if len(name) > 0 else long_id[:12]) + name = (name[1:] if name[0] == '/' else name) + return options['host_namespace'] + '/' + name diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/sahil.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/sahil.py new file mode 100644 index 00000000..aa2dc124 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/sahil.py @@ -0,0 +1,11 @@ +import subprocess + +proc = subprocess.Popen( + ['python', '-c', 'import pkg_resources; pkgs = [ (p.key, p.version) for p in pkg_resources.working_set]; print pkgs'], + #['sh', '-c', 'pip list'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) +output, err = proc.communicate() + +if output: + print output diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/c b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/c new file mode 100644 index 0000000000000000000000000000000000000000..b4d2a4459eb4f333cde633f9354aeebdcd4f83e8 GIT binary patch literal 120 zcmZSn%*(a3I4m-m0SXv_v;z 100.0: + usage_percent = 100.0 + idle = 100.0 - usage_percent + + # Approximation 1 + + user_plus_sys_hz = cpu_user_system['user'] \ + + cpu_user_system['system'] + if user_plus_sys_hz == 0: + # Fake value to avoid divide by zero. + user_plus_sys_hz = 0.1 + user = usage_percent * (cpu_user_system['user'] / + user_plus_sys_hz) + system = usage_percent * (cpu_user_system['system'] / + user_plus_sys_hz) + + # Approximation 2 + + nice = host_cpu_feature[index][1] + wait = host_cpu_feature[index][3] + interrupt = host_cpu_feature[index][5] + steal = host_cpu_feature[index][6] + feature_key = '{0}-{1}'.format('cpu', index) + feature_attributes = CpuFeature( + idle, + nice, + user, + wait, + system, + interrupt, + steal, + usage_percent, + ) + yield (feature_key, feature_attributes, 'cpu') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.plugin new file mode 100644 index 00000000..f993c9ca --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = cpu_host +Module = cpu_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Cpu crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.py new file mode 100644 index 00000000..449d1595 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_host_crawler.py @@ -0,0 +1,31 @@ +import logging + +import psutil + +from icrawl_plugin import IHostCrawler +from utils.features import CpuFeature + +logger = logging.getLogger('crawlutils') + + +class CpuHostCrawler(IHostCrawler): + + def get_feature(self): + return 'cpu' + + def crawl(self, **kwargs): + logger.debug('Crawling %s' % (self.get_feature())) + + for (idx, cpu) in enumerate(psutil.cpu_times_percent(percpu=True)): + feature_attributes = CpuFeature( + cpu.idle, + cpu.nice, + cpu.user, + cpu.iowait, + cpu.system, + cpu.irq, + cpu.steal, + 100 - int(cpu.idle), + ) + feature_key = '{0}-{1}'.format('cpu', idx) + yield (feature_key, feature_attributes, 'cpu') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.plugin new file mode 100644 index 00000000..cf30eb4e --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = cpu_vm +Module = cpu_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Cpu crawling function for VMs" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.py new file mode 100644 index 00000000..042aa0d2 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_vm_crawler.py @@ -0,0 +1,20 @@ +from icrawl_plugin import IVMCrawler +import logging + +# External dependencies that must be pip install'ed separately + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class cpu_vm_crawler(IVMCrawler): + + def get_feature(self): + return 'cpu' + + def crawl(self, vm_desc, **kwargs): + raise NotImplementedError('Unsupported crawl mode') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.plugin new file mode 100644 index 00000000..1d6b8ae6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = ctprobe_container +Module = ctprobe_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Crawling function for containers to start conntrackprobe diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.py new file mode 100644 index 00000000..5f3864fa --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/ctprobe_container_crawler.py @@ -0,0 +1,438 @@ + +import errno +import glob +import json +import logging +import os +import pwd +import signal +import time + +from collections import namedtuple + +import netifaces +import psutil +import utils.dockerutils +import requests_unixsocket + +from icrawl_plugin import IContainerCrawler +from utils.ethtool import ethtool_get_peer_ifindex +from utils.namespace import run_as_another_namespace +from utils.process_utils import start_child +from utils.socket_utils import if_indextoname + +logger = logging.getLogger('crawlutils') + +PeerInterface = namedtuple('PeerInterface', ['peer_ifindex', 'ip_addresses']) +NetlinkFeature = namedtuple('NetlinkFeature', ['data']) + +DEFAULT_UNIX_PATH = '/var/run/conntrackprobe.sock' + + +class ConntrackProbeClient(object): + """ Client class for talking to the conntrack probe """ + def __init__(self, sockpath=DEFAULT_UNIX_PATH): + self.sockpath = sockpath + + def add_collector(self, url, ipaddresses, ifname): + """ + Add a collector for the given IP addresses and tied to the given + interface. + """ + code, content = self.send_request('add_collector', + [url, ipaddresses, ifname]) + if code == 200: + return True + else: + raise Exception('HTTP Error %d: %s' % (code, content['error'])) + + def send_request(self, method, params): + req = { + 'jsonrpc': '2.0', + 'method': method, + 'params': params, + 'id': 1, + } + sp = self.sockpath.replace('/', '%2f') + session = requests_unixsocket.Session() + r = session.get('http+unix://%s' % sp, data=json.dumps(req)) + + return r.status_code, json.loads(r.content) + + +class CTProbeContainerCrawler(IContainerCrawler): + # Class for acquiring netlink data via a conntrackprobe + + BIND_ADDRESS = '127.0.0.1' + STALE_FILE_TIMEOUT = 3600 + + # whether the conntrackprobe process has been started + ctprobe_pid = 0 + + # Interfaces for which conntrackprobe has been configured. + # This is a list of interfaces for which conntrackprobe + # has been configured. + ifaces_monitored = [] + + # Since we don't get notified when a container dies + # we need to periodically check the interfaces on the host + # against those in ctprobes_monitored. + next_cleanup = 0 + + def get_feature(self): + return 'ctprobe' + + def setup_outputdir(self, output_dir, uid, gid): + """ + If necessary create or change ownership of the output directory. + """ + if not os.path.exists(output_dir): + try: + os.makedirs(output_dir) + except Exception as ex: + logger.error('Could not created dir %s : %s' % + (output_dir, str(ex))) + return False + + try: + os.chown(output_dir, uid, gid) + except Exception as ex: + logger.error('Could not change ownership of %s: %s' % + (output_dir, str(ex))) + return False + + return True + + def _get_user(self, **kwargs): + """ Get the deprivileged user we are supposed to use """ + ctprobe_user = kwargs.get('ctprobe_user', 'nobody') + try: + passwd = pwd.getpwnam(ctprobe_user) + return ctprobe_user, passwd + except Exception as ex: + logger.error('Could not find user %s on this system: %s' % + (ctprobe_user, ex)) + return ctprobe_user, None + + def start_ctprobe(self, sockpath=DEFAULT_UNIX_PATH, **kwargs): + """ + Start the conntrackprobe process; + use the bindaddr and port as the collector. + This function returns the process ID of the started process + and an errcode (errno) in case an error was encountered in + the start_child function. + """ + ctprobe_user, passwd = self._get_user(**kwargs) + if not passwd: + return -1, errno.ENOENT + + params = ['conntrackprobe', + '--unix', sockpath, + '--user', ctprobe_user, + '--logfile', '/var/log/conntrackprobe.log'] + + try: + pid, errcode = start_child(params, [], [0, 1, 2], + [], + setsid=False, + max_close_fd=128) + logger.info('Started conntrackprobe as pid %d' % pid) + except Exception: + pid = -1 + errcode = errno.EINVAL + + return pid, errcode + + def terminate_ctprobe(self, pid): + """ + Terminate the conntrackprobe process given its PID + """ + proc = psutil.Process(pid=pid) + if proc and proc.name() == 'conntrackprobe': + os.kill(pid, signal.SIGKILL) + CTProbeContainerCrawler.ifaces_monitored = [] + + def check_ctprobe_alive(self, pid): + """ + Check whether the conntrackprobe with the given PID is still running + Returns True if the conntrackprobe is still alive, false otherwise. + """ + gone = False + try: + proc = psutil.Process(pid=pid) + if not proc or proc.name() != 'conntrackprobe': + gone = True + except Exception: + gone = True + + if gone: + CTProbeContainerCrawler.ifaces_monitored = [] + return not gone + + def configure_ctprobe(self, ipaddresses, ifname, filepath, **kwargs): + """ + Configure the CTprobe to listen for data from the current + container and have it write the data to files specific to + that container. + """ + coll = 'file+json://%s' % filepath + + cpc = ConntrackProbeClient(DEFAULT_UNIX_PATH) + try: + cpc.add_collector(coll, ipaddresses, ifname) + except Exception as ex: + logger.error('Could not add collector: %s' % ex) + return False + + return True + + def start_netlink_collection(self, ifname, ip_addresses, container_id, + **kwargs): + """ + Start the collector and program conntrackprobe. Return False in case + of an error, True otherwise + """ + + ctprobe_user, passwd = self._get_user(**kwargs) + if not passwd: + return False + + ctprobe_output_dir = kwargs.get('ctprobe_output_dir', + '/tmp/crawler-ctprobe') + if not self.setup_outputdir(ctprobe_output_dir, passwd.pw_uid, + passwd.pw_gid): + return False + + filepattern = kwargs.get('output_filepattern', + 'conntrack-{ifname}-{timestamp}') + filepath = '%s/%s' % (ctprobe_output_dir, filepattern) + + success = self.configure_ctprobe(ip_addresses, ifname, + filepath, **kwargs) + if not success: + logger.warn('Terminating malfunctioning conntrackprobe') + self.terminate_ctprobe(CTProbeContainerCrawler.ctprobe_pid) + # setting the PID to zero will cause it to be restarted + # upon next crawl() + CTProbeContainerCrawler.ctprobe_pid = 0 + + return success + + def cleanup(self, **kwargs): + """ + Check the available interfaces on the host versus those ones we + have flow probes running and remove those where the interface has + disappeared. We clean up the files with netlink data that were + written for those interfaces. + """ + devices = netifaces.interfaces() + + lst = [] + + for ifname in CTProbeContainerCrawler.ifaces_monitored: + if ifname not in devices: + self.remove_datafiles(ifname, **kwargs) + else: + lst.append(ifname) + + CTProbeContainerCrawler.ifaces_monitored = lst + + @classmethod + def remove_old_files(cls, **kwargs): + """ + Remove all old files that the crawler would never pick up. + """ + now = time.time() + output_dir = kwargs.get('ctprobe_output_dir', '/tmp/crawler-ctprobe') + + for filename in glob.glob('%s/*' % output_dir): + try: + statbuf = os.stat(filename) + # files older than 1 hour are removed + if statbuf.st_mtime + \ + CTProbeContainerCrawler.STALE_FILE_TIMEOUT < now: + os.remove(filename) + except Exception: + continue + + def crawl(self, container_id, avoid_setns=False, **kwargs): + """ + Start flow probe + data collector pairs on the interfaces of + the given container; collect the files that the collector + wrote and return their content. + """ + if not self.check_ctprobe_alive(CTProbeContainerCrawler.ctprobe_pid): + CTProbeContainerCrawler.ctprobe_pid = 0 + + if CTProbeContainerCrawler.ctprobe_pid == 0: + pid, errcode = self.start_ctprobe(**kwargs) + CTProbeContainerCrawler.ctprobe_pid = pid + if pid < 0: + logger.info('Starting conntrackprobe failed: %s' % + errcode) + + if CTProbeContainerCrawler.ctprobe_pid < 0: + return + + if time.time() > CTProbeContainerCrawler.next_cleanup: + # we won't run the cleanup of old files the first time + # but let the crawler do one full round of picking up + # relevant files and then only we do a proper cleaning + if CTProbeContainerCrawler.next_cleanup > 0: + CTProbeContainerCrawler.remove_old_files(**kwargs) + + self.cleanup(**kwargs) + CTProbeContainerCrawler.next_cleanup = time.time() + 30 + + ifnames = self.start_container_ctprobes(container_id, avoid_setns, + **kwargs) + + return self.collect_files(container_id, ifnames, **kwargs) + + def create_filenamepattern(self, **kwargs): + """ + Create the filename pattern for the files where the + socket-datacollector writes its data into. + """ + output_dir = kwargs.get('ctprobe_output_dir', '/tmp/crawler-ctprobe') + filepattern = kwargs.get('output_filepattern', + 'conntrack-{ifname}-{timestamp}') + filenamepattern = os.path.join(output_dir, filepattern) + + return filenamepattern.format(**kwargs) + + def remove_datafiles(self, ifname, **kwargs): + """ + Remove conntrack netlink data files that belong to an interface + """ + kwargs.update({ + 'container-id': '*', + 'ifname': ifname, + 'pid': '*', + 'timestamp': '*', + }) + filenamepattern = self.create_filenamepattern(**kwargs) + + for filename in glob.glob(filenamepattern): + try: + os.remove(filename) + except Exception: + pass + + def collect_files(self, container_id, ifnames, **kwargs): + """ + Collect the files with netlink data for the given interface + and container_id; + remove the files after reading their content + """ + for ifname in ifnames: + kwargs.update({ + 'container-id': container_id, + 'ifname': ifname, + 'pid': '*', + 'timestamp': '*', + }) + filenamepattern = self.create_filenamepattern(**kwargs) + + globs = glob.glob(filenamepattern) + for filename in globs: + # skip over files currently being written + if filename.endswith(".tmp"): + continue + try: + with open(filename, 'r') as f: + raw = f.read() + data = json.loads(raw) + except Exception as ex: + logger.info('Error reading datafile: %s' % ex) + continue + + try: + os.remove(filename) + except Exception as ex: + logger.info('Error removing datafile: %s' % ex) + continue + + feature_key = '{0}-{1}'.format('netlink', ifname) + + yield (feature_key, NetlinkFeature( + data + ), 'netlink') + + def start_container_ctprobes(self, container_id, avoid_setns=False, + **kwargs): + """ + Unless flow probes are already running on the interfaces of the + given container, we start them. + """ + inspect = utils.dockerutils.exec_dockerinspect(container_id) + state = inspect['State'] + pid = str(state['Pid']) + + if avoid_setns: + raise NotImplementedError('avoidsetns mode not implemented') + + ifnames = [] + + try: + peers = run_as_another_namespace(pid, + ['net'], + self._crawl_in_system) + for peer in peers or []: + # in rare cases we get an interface without IP address + # assigned ot it, yet; we skip it for now and try again + # on the next crawl + if len(peer.ip_addresses) == 0: + continue + + try: + ifname = if_indextoname(peer.peer_ifindex) + except Exception: + continue + + ifnames.append(ifname) + + if ifname not in CTProbeContainerCrawler.ifaces_monitored: + ok = self.start_netlink_collection(ifname, + peer.ip_addresses, + container_id, + **kwargs) + if ok: + CTProbeContainerCrawler.ifaces_monitored.append(ifname) + except Exception as ex: + logger.info("Error: %s" % str(ex)) + + return ifnames + + def get_ifaddresses(self, ifname): + """ + Get the list of IPv4 addresses on an interface name; in + case none could be found yet, wait a bit and try again + """ + + for ctr in range(0, 4): + res = [] + + for data in netifaces.ifaddresses(ifname).get(2, []): + addr = data.get('addr') + if addr: + res.append(addr) + if len(res): + break + time.sleep(0.01) + + return res + + def _crawl_in_system(self): + for ifname in netifaces.interfaces(): + if ifname == 'lo': + continue + + try: + peer_ifindex = ethtool_get_peer_ifindex(ifname) + except Exception: + peer_ifindex = -1 + + if peer_ifindex >= 0: + yield PeerInterface(peer_ifindex, + self.get_ifaddresses(ifname)) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.plugin new file mode 100644 index 00000000..732d9da1 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = disk_container +Module = disk_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Disk crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py new file mode 100644 index 00000000..e6d1fa4e --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py @@ -0,0 +1,24 @@ +import logging + +import utils.dockerutils +from icrawl_plugin import IContainerCrawler +from utils.disk_utils import crawl_disk_partitions +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES + +logger = logging.getLogger('crawlutils') + + +class DiskContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'disk' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + logger.debug( + 'Crawling %s for container %s' % + (self.get_feature(), container_id)) + + if avoid_setns: + raise NotImplementedError('avoidsetns mode not implemented') + else: # in all other cases, including wrong mode set + return crawl_disk_partitions() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.plugin new file mode 100644 index 00000000..95d243fb --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = disk_host +Module = disk_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Disk crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.py new file mode 100644 index 00000000..4523779b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_host_crawler.py @@ -0,0 +1,17 @@ +import logging + +from icrawl_plugin import IHostCrawler +from utils.disk_utils import crawl_disk_partitions + +logger = logging.getLogger('crawlutils') + + +class DiskHostCrawler(IHostCrawler): + + def get_feature(self): + return 'disk' + + def crawl(self, **kwargs): + logger.debug('Crawling %s' % (self.get_feature())) + + return crawl_disk_partitions() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.plugin new file mode 100644 index 00000000..65607cda --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = disk_vm +Module = disk_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Disk crawling function for VMs" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.py new file mode 100644 index 00000000..a8030f48 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_vm_crawler.py @@ -0,0 +1,19 @@ +from icrawl_plugin import IVMCrawler + +import logging + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class disk_vm_crawler(IVMCrawler): + + def get_feature(self): + return 'disk' + + def crawl(self, vm_desc, **kwargs): + raise NotImplementedError() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.plugin new file mode 100644 index 00000000..9bccdfd7 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = dockerhistory_container +Module = dockerhistory_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Dockerhistory crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.py new file mode 100644 index 00000000..8d5be3a1 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerhistory_container_crawler.py @@ -0,0 +1,17 @@ +from utils.dockerutils import exec_docker_history +from icrawl_plugin import IContainerCrawler + +import logging + +logger = logging.getLogger('crawlutils') + + +class DockerhistoryContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'dockerhistory' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + history = exec_docker_history(container_id) + image_id = history[0]['Id'] + yield (image_id, {'history': history}, 'dockerhistory') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.plugin new file mode 100644 index 00000000..d62ffbb6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = dockerinspect_container +Module = dockerinspect_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Dockerinspect crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.py new file mode 100644 index 00000000..0e851660 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerinspect_container_crawler.py @@ -0,0 +1,16 @@ +from utils.dockerutils import exec_dockerinspect +from icrawl_plugin import IContainerCrawler + +import logging + +logger = logging.getLogger('crawlutils') + + +class DockerinspectContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'dockerinspect' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + inspect = exec_dockerinspect(container_id) + yield (container_id, inspect, 'dockerinspect') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.plugin new file mode 100644 index 00000000..3fca91e6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = dockerps_host +Module = dockerps_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Dockerps crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.py new file mode 100644 index 00000000..b92c954a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/dockerps_host_crawler.py @@ -0,0 +1,27 @@ +import logging + +from icrawl_plugin import IHostCrawler +from utils.dockerutils import exec_dockerps +from utils.features import DockerPSFeature + +logger = logging.getLogger('crawlutils') + + +class DockerpsHostCrawler(IHostCrawler): + + def get_feature(self): + return 'dockerps' + + def crawl(self, **kwargs): + logger.debug('Crawling %s' % (self.get_feature())) + + for inspect in exec_dockerps(): + yield (inspect['Id'], DockerPSFeature._make([ + inspect['State']['Running'], + 0, + inspect['Image'], + [], + inspect['Config']['Cmd'], + inspect['Name'], + inspect['Id'], + ]), 'dockerps') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.plugin new file mode 100644 index 00000000..ad8133f9 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = file_container +Module = file_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = File crawling function for containers diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.py new file mode 100644 index 00000000..49ca65c4 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_container_crawler.py @@ -0,0 +1,54 @@ +import logging + +import utils.dockerutils +import utils.misc +from icrawl_plugin import IContainerCrawler +from utils.file_utils import crawl_files +from utils.namespace import run_as_another_namespace + +logger = logging.getLogger('crawlutils') + + +class FileContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'file' + + def crawl( + self, + container_id=None, + avoid_setns=False, + root_dir='/', + exclude_dirs=[ + '/boot', + '/dev', + '/proc', + '/sys', + '/mnt', + '/tmp', + '/var/cache', + '/usr/share/man', + '/usr/share/doc', + '/usr/share/mime'], + **kwargs): + inspect = utils.dockerutils.exec_dockerinspect(container_id) + state = inspect['State'] + pid = str(state['Pid']) + logger.debug('Crawling file for container %s' % container_id) + + if avoid_setns: + rootfs_dir = utils.dockerutils.get_docker_container_rootfs_path( + container_id) + exclude_dirs = [utils.misc.join_abs_paths(rootfs_dir, d) + for d in exclude_dirs] + return crawl_files( + root_dir=utils.misc.join_abs_paths(rootfs_dir, root_dir), + exclude_dirs=exclude_dirs, + root_dir_alias=root_dir) + else: # in all other cases, including wrong mode set + return run_as_another_namespace(pid, + ['mnt'], + crawl_files, + root_dir, + exclude_dirs, + None) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.plugin new file mode 100644 index 00000000..08996c84 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = file_host +Module = file_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = File crawling function for the local host diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.py new file mode 100644 index 00000000..c176a72a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/file_host_crawler.py @@ -0,0 +1,26 @@ +from icrawl_plugin import IHostCrawler +from utils.file_utils import crawl_files + + +class FileHostCrawler(IHostCrawler): + + def get_feature(self): + return 'file' + + def crawl( + self, + root_dir='/', + exclude_dirs=[ + '/boot', + '/dev', + '/proc', + '/sys', + '/mnt', + '/tmp', + '/var/cache', + '/usr/share/man', + '/usr/share/doc', + '/usr/share/mime'], + **kwargs): + return crawl_files(root_dir=root_dir, + exclude_dirs=exclude_dirs) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.plugin new file mode 100644 index 00000000..6ade3330 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = fprobe_container +Module = fprobe_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Crawling function for containers to start fprobe diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.py new file mode 100644 index 00000000..ed13898c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/fprobe_container_crawler.py @@ -0,0 +1,478 @@ + +import errno +import glob +import json +import logging +import os +import pwd +import signal +import time +import pdb + +from collections import namedtuple + +import netifaces +import psutil + +from icrawl_plugin import IContainerCrawler +from utils.ethtool import ethtool_get_peer_ifindex +from utils.misc import get_uint_arg +from utils.process_utils import start_child +from utils.socket_utils import open_udp_port, if_indextoname + +logger = logging.getLogger('crawlutils') + +PeerInterface = namedtuple('PeerInterface', ['peer_ifindex', 'ip_addresses']) +Interface = namedtuple('Interface', ['ifname', 'ip_addresses']) +NetflowFeature = namedtuple('NetflowFeature', ['data']) +fprobe_out_dir = '/home/user1/fprobe-out' + + +class FprobeContainerCrawler(IContainerCrawler): + # Class for acquiring netflow data via a 'flow probe' (softflowd) + + BIND_ADDRESS = '127.0.0.1' + STALE_FILE_TIMEOUT = 3600 + + # Interface where netflow probes were started on. + # This is a map with interface names and softflowd process IDs + fprobes_started = {} + + # Since we don't get notified when a container dies + # we need to periodically check the interfaces on the host + # against those in fprobes_started. + next_cleanup = 0 + + def get_feature(self): + return 'fprobe' + + @staticmethod + def is_my_fprobe(proc): + """ + Check whether the given process is an softflowd that was started by + this plugin. We only recognize softflowd with target address for + the collector being 127.0.0.1.We determine the parameter passed + after '-i', which is the name of the interface. + + Return the interface on which it is running on, None otherwise + """ + if proc.name() == 'softflowd': + params = proc.cmdline() + targetaddress = params[-1].split(':')[0] + if targetaddress == FprobeContainerCrawler.BIND_ADDRESS: + try: + i = params.index('-i') + logger.info('softflowd running on iface %s (pid=%s)' % + (params[i+1], proc.pid)) + return params[i+1] + except: + pass + return None + + @staticmethod + def is_my_fprobe_by_pid(pid): + """ + Given a pid, check whether 'my' flow probe is running there. Return + the name of the interface for which the flow probe is running, + None otherwise. + """ + try: + proc = psutil.Process(pid=pid) + return FprobeContainerCrawler.is_my_fprobe(proc) + except: + return None + + @staticmethod + def interfaces_with_fprobes(): + """ + Get a set of interfaces for which flow probe is already running + We walk the list of processes and check the 'softflowd' ones + and record those that could have been started by this plugin. + """ + res = {} + + for proc in psutil.process_iter(): + ifname = FprobeContainerCrawler.is_my_fprobe(proc) + if ifname: + res[ifname] = proc.pid + + return res + + def setup_outputdir(self, output_dir, uid, gid): + """ + If necessary create or change ownership of the output directory. + """ + if not os.path.exists(output_dir): + try: + os.makedirs(output_dir) + except Exception as ex: + logger.error('Could not created dir %s : %s' % + (output_dir, str(ex))) + return False + + try: + os.chown(output_dir, uid, gid) + except Exception as ex: + logger.error('Could not change ownership of %s: %s' % + (output_dir, str(ex))) + return False + + return True + + def start_fprobe(self, ifname, user, bindaddr, port, **kwargs): + """ + Start the flow probe process on the given interface; + use the bindaddr and port as the collector. + This function returns the process ID of the started process + and an errcode (errno) in case an error was encountered in + the start_child function. + """ + maxlife_timeout = get_uint_arg('maxlife_timeout', 30, **kwargs) + netflow_version = get_uint_arg('netflow_version', 5, **kwargs) + if netflow_version not in [1, 5, 9, 10]: + logger.info('Unsupported netflow version was chosen: %d' % + netflow_version) + netflow_version = 5 + + terminate_process = kwargs.get('terminate_fprobe', 'FALSE').upper() + #setsid = terminate_process in ['0', 'FALSE'] + setsid = False + fprobe_bpf = kwargs.get('fprobe_bpf', '') + + params = ['softflowd', + '-i', ifname, + '-v', '%d' % netflow_version, + '-d', + '-t', 'maxlife=%d' % maxlife_timeout, + '-n', '%s:%d' % (bindaddr, port), + '-c', '/home/user1/softflowd.ctl'] + if len(fprobe_bpf.strip()): + params.insert(1, fprobe_bpf) + if netflow_version == 10: + params.insert(1, '-b') + try: + pid, errcode = start_child(params, [], [0, 1, 2], + [signal.SIGCHLD], + setsid=setsid, + max_close_fd=128) + logger.info('Started softflowd as pid %d' % pid) + except: + pid = -1 + errcode = errno.EINVAL + + return pid, errcode + + def start_collector(self, user, socket, output_dir, watch_pid, metadata, + **kwargs): + """ + Start the collector process; have it drop privileges by + switching to the given user; have it write the data to the + output_dir and use a filename pattern given by + filenamepattern; have it watch the process with the given + watch_pid + """ + #pdb.set_trace() + filepattern = kwargs.get('output_filepattern', + 'fprobe-{ifname}-{timestamp}') + + # '--user', user, + params = ['socket-datacollector', + '--sockfd', str(socket.fileno()), + '--dir', output_dir, + '--filepattern', filepattern, + '--watch-pid', str(watch_pid), + '--metadata', json.dumps(metadata), + '--md-filter', 'ip-addresses'] + #params = ['sh', '-c', + # '/usr/bin/python /usr/bin/socket-datacollector --sockfd ' + str(socket.fileno()) + ' --dir ' + output_dir + ' --filepattern ' + filepattern + ' --watch-pid ' + str(watch_pid) + ' --metadata ' + json.dumps(metadata) + ' --md-filter ip-addresses &'] + try: + pid, errcode = start_child(params, [socket.fileno()], [], + [signal.SIGCHLD], + setsid=True, + max_close_fd=128) + logger.info('Started collector as pid %d' % pid) + except: + pid = -1 + errcode = errno.EINVAL + + return pid, errcode + + def start_netflow_collection(self, ifname, ip_addresses, container_id, + **kwargs): + """ + Start the collector and the softflowd. Return None in case of an + error, the process ID of softflowd otherwise + + Note: Fprobe will terminate when the container ends. The collector + watches the softflowd via its PID and will terminate once + softflowd is gone. To enable this, we have to start the + collector after softflowd. Since this is relatively quick, + we won't miss any netflow packets in the collector. + """ + #pdb.set_trace() + fprobe_user = kwargs.get('fprobe_user', 'user1') + try: + passwd = pwd.getpwnam(fprobe_user) + except Exception as ex: + logger.error('Could not find user %s on this system: %s' % + (fprobe_user, str(ex))) + return None + + fprobe_output_dir = kwargs.get('fprobe_output_dir', + fprobe_out_dir) + if not self.setup_outputdir(fprobe_output_dir, passwd.pw_uid, + passwd.pw_gid): + return None + + # Find an open port; we pass the port number for the flow probe and the + # file descriptor of the listening socket to the collector + bindaddr = FprobeContainerCrawler.BIND_ADDRESS + sock, port = open_udp_port(bindaddr, 40000, 65535) + if not sock: + return None + + #pdb.set_trace() + fprobe_pid, errcode = self.start_fprobe(ifname, fprobe_user, + bindaddr, port, + **kwargs) + + if fprobe_pid < 0: + logger.error('Could not start softflowd: %s' % + os.strerror(errcode)) + sock.close() + return None + + metadata = { + 'ifname': ifname, + 'ip-addresses': ip_addresses, + } + + #pdb.set_trace() + collector_pid, errcode = self.start_collector(fprobe_user, sock, + fprobe_output_dir, + fprobe_pid, + metadata, + **kwargs) + + sock.close() + + if collector_pid == -1: + logger.error('Could not start collector: %s' % + os.strerror(errcode)) + os.kill(fprobe_pid, signal.SIGKILL) + return None + + return fprobe_pid + + def cleanup(self, **kwargs): + """ + Check the available interfaces on the host versus those ones we + have flow probes running and remove those where the interface has + disappeared. We clean up the files with netflow data that were + written for those interfaces. + """ + devices = netifaces.interfaces() + + for ifname in FprobeContainerCrawler.fprobes_started.keys(): + if ifname not in devices: + del FprobeContainerCrawler.fprobes_started[ifname] + self.remove_datafiles(ifname, **kwargs) + + @classmethod + def remove_old_files(cls, **kwargs): + """ + Remove all old files that the crawler would never pick up. + """ + now = time.time() + output_dir = kwargs.get('fprobe_output_dir', fprobe_out_dir) + + for filename in glob.glob('%s/*' % output_dir): + try: + statbuf = os.stat(filename) + # files older than 1 hour are removed + if statbuf.st_mtime + \ + FprobeContainerCrawler.STALE_FILE_TIMEOUT < now: + os.remove(filename) + except: + continue + + def crawl(self, container_id, avoid_setns=False, **kwargs): + """ + Start flow probe + data collector pairs on the interfaces of + the given container; collect the files that the collector + wrote and return their content. + """ + if time.time() > FprobeContainerCrawler.next_cleanup: + # we won't run the cleanup of old files the first time + # but let the crawler do one full round of picking up + # relevant files and then only we do a proper cleaning + if FprobeContainerCrawler.next_cleanup > 0: + FprobeContainerCrawler.remove_old_files(**kwargs) + + self.cleanup(**kwargs) + FprobeContainerCrawler.next_cleanup = time.time() + 30 + + ifnames = self.start_container_fprobes(container_id, avoid_setns, + **kwargs) + + return self.collect_files(container_id, ifnames, **kwargs) + + def create_filenamepattern(self, **kwargs): + """ + Create the filename pattern for the files where the + socket-datacollector writes its data into. + """ + output_dir = kwargs.get('fprobe_output_dir', fprobe_out_dir) + filepattern = kwargs.get('output_filepattern', + 'fprobe-{ifname}-{timestamp}') + filenamepattern = os.path.join(output_dir, filepattern) + + return filenamepattern.format(**kwargs) + + def remove_datafiles(self, ifname, **kwargs): + """ + Remove netflow data files that belong to an interface + """ + kwargs.update({ + 'container-id': '*', + 'ifname': ifname, + 'pid': '*', + 'timestamp': '*', + }) + filenamepattern = self.create_filenamepattern(**kwargs) + + for filename in glob.glob(filenamepattern): + try: + os.remove(filename) + except: + pass + + def collect_files(self, container_id, ifnames, **kwargs): + """ + Collect the files with netflow data for the given interface + and container_id; + remove the files after reading their content + """ + for ifname in ifnames: + kwargs.update({ + 'container-id': container_id, + 'ifname': ifname, + 'pid': '*', + 'timestamp': '*', + }) + filenamepattern = self.create_filenamepattern(**kwargs) + + globs = glob.glob(filenamepattern) + for filename in globs: + # skip over files currently being written + if filename.endswith(".tmp"): + continue + try: + with open(filename, 'r') as f: + raw = f.read() + data = json.loads(raw) + except Exception as ex: + logger.info('Error reading datafile: %s' % str(ex)) + continue + + try: + os.remove(filename) + except Exception as ex: + logger.info('Error removing datafile: %s' % str(ex)) + continue + + feature_key = '{0}-{1}'.format('fprobe', ifname) + + yield (feature_key, NetflowFeature( + data + ), 'fprobe') + + def need_start_fprobe(self, ifname): + """ + Check whether we need to start a flow probe on this interface + We need to start it + - if no softflowd process is running on it. + - if the process id now represents a different process + (pid reused) + """ + pid = FprobeContainerCrawler.fprobes_started.get(ifname) + if not pid: + return True + if ifname != FprobeContainerCrawler.is_my_fprobe_by_pid(pid): + # something different runs under this pid... + del FprobeContainerCrawler.fprobes_started[ifname] + return True + return False + + def start_container_fprobes(self, container_id, avoid_setns=False, + **kwargs): + """ + Unless flow probes are already running on the interfaces of the + given container, we start them. + """ + if avoid_setns: + raise NotImplementedError('avoidsetns mode not implemented') + + ifnames = [] + + try: + ifaces = self._crawl_in_system() + for iface in ifaces: + # in rare cases we get an interface without IP address + # assigned ot it, yet; we skip it for now and try again + # on the next crawl + if len(iface.ip_addresses) == 0: + continue + + ifname = iface.ifname + ifnames.append(ifname) + + if self.need_start_fprobe(ifname): + logger.info('Need to start softflowd on %s' % ifname) + pid = self.start_netflow_collection(ifname, + iface.ip_addresses, + container_id, + **kwargs) + if pid: + FprobeContainerCrawler.fprobes_started[ifname] = pid + except Exception as ex: + logger.info("Error: %s" % str(ex)) + + return ifnames + + def get_ifaddresses(self, ifname): + """ + Get the list of IPv4 addresses on an interface name; in + case none could be found yet, wait a bit and try again + """ + + for ctr in range(0, 4): + res = [] + + for data in netifaces.ifaddresses(ifname).get(2, []): + addr = data.get('addr') + if addr: + res.append(addr) + if len(res): + break + time.sleep(0.01) + + return res + + def _crawl_in_system(self): + for ifname in netifaces.interfaces(): + if ifname == 'lo': + continue + + #try: + # peer_ifindex = ethtool_get_peer_ifindex(ifname) + #except Exception: + # peer_ifindex = -1 + + #if peer_ifindex >= 0: + # yield PeerInterface(peer_ifindex, + # self.get_ifaddresses(ifname)) + yield Interface(ifname,self.get_ifaddresses(ifname)) + +FprobeContainerCrawler.fprobes_started = \ + FprobeContainerCrawler.interfaces_with_fprobes() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.plugin new file mode 100644 index 00000000..85396963 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = interface_container +Module = interface_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Interface crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py new file mode 100644 index 00000000..a0b42022 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py @@ -0,0 +1,83 @@ +import logging +import time + +import psutil + +from dockercontainer import DockerContainer +from icrawl_plugin import IContainerCrawler +from utils.features import InterfaceFeature +from utils.namespace import run_as_another_namespace + +logger = logging.getLogger('crawlutils') + + +class InterfaceContainerCrawler(IContainerCrawler): + + """ + To calculate rates like packets sent per second, we need to + store the last measurement. We store it in this dictionary. + """ + + def __init__(self): + self._cached_values = {} + + def _cache_put_value(self, key, value): + self._cached_values[key] = (value, time.time()) + + def _cache_get_value(self, key): + if key in self._cached_values: + return self._cached_values[key] + else: + return None, None + + def _crawl_interface_counters(self): + _counters = psutil.net_io_counters(pernic=True) + for ifname in _counters: + interface = _counters[ifname] + curr_count = [ + interface.bytes_sent, + interface.bytes_recv, + interface.packets_sent, + interface.packets_recv, + interface.errout, + interface.errin, + ] + yield (ifname, curr_count) + + def get_feature(self): + return 'interface' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + + logger.debug( + 'Crawling %s for container %s' % + (self.get_feature(), container_id)) + + if avoid_setns: + raise NotImplementedError('avoidsetns mode not implemented') + else: + interfaces = self._crawl_interface_counters() + + for (ifname, curr_count) in interfaces: + feature_key = '{0}-{1}'.format('interface', ifname) + + cache_key = '{0}-{1}-{2}'.format(container_id, + container_id, + feature_key) + + (prev_count, prev_time) = self._cache_get_value(cache_key) + self._cache_put_value(cache_key, curr_count) + + if prev_count and prev_time: + d = time.time() - prev_time + diff = [(a - b) / d for (a, b) in zip(curr_count, + prev_count)] + else: + + # first measurement + + diff = [0] * 6 + + feature_attributes = InterfaceFeature._make(diff) + + yield (feature_key, feature_attributes, 'interface') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.plugin new file mode 100644 index 00000000..2b30607f --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = interface_host +Module = interface_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Interface crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.py new file mode 100644 index 00000000..01cb47d6 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_host_crawler.py @@ -0,0 +1,73 @@ +import logging +import time + +import psutil + +from icrawl_plugin import IHostCrawler +from utils.features import InterfaceFeature + +logger = logging.getLogger('crawlutils') + + +class InterfaceHostCrawler(IHostCrawler): + + """ + To calculate rates like packets sent per second, we need to + store the last measurement. We store it in this dictionary. + """ + + def __init__(self): + self._cached_values = {} + + def _cache_put_value(self, key, value): + self._cached_values[key] = (value, time.time()) + + def _cache_get_value(self, key): + if key in self._cached_values: + return self._cached_values[key] + else: + return None, None + + def _crawl_interface_counters(self): + _counters = psutil.net_io_counters(pernic=True) + for ifname in _counters: + interface = _counters[ifname] + curr_count = [ + interface.bytes_sent, + interface.bytes_recv, + interface.packets_sent, + interface.packets_recv, + interface.errout, + interface.errin, + ] + yield (ifname, curr_count) + + def get_feature(self): + return 'interface' + + def crawl(self, **kwargs): + + logger.debug('Crawling %s' % self.get_feature()) + + interfaces = self._crawl_interface_counters() + + for (ifname, curr_count) in interfaces: + feature_key = '{0}-{1}'.format('interface', ifname) + cache_key = '{0}-{1}'.format('INVM', feature_key) + + (prev_count, prev_time) = self._cache_get_value(cache_key) + self._cache_put_value(cache_key, curr_count) + + if prev_count and prev_time: + d = time.time() - prev_time + diff = [(a - b) / d for (a, b) in zip(curr_count, + prev_count)] + else: + + # first measurement + + diff = [0] * 6 + + feature_attributes = InterfaceFeature._make(diff) + + yield (feature_key, feature_attributes, 'interface') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.plugin new file mode 100644 index 00000000..4c685f98 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = interface_vm +Module = interface_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Interface crawling function for VMs" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.py new file mode 100644 index 00000000..69186de8 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_vm_crawler.py @@ -0,0 +1,83 @@ +import logging +import time + +from icrawl_plugin import IVMCrawler +from utils.features import InterfaceFeature + +try: + import psvmi +except ImportError: + psvmi = None + + +logger = logging.getLogger('crawlutils') + + +class InterfaceVmCrawler(IVMCrawler): + + """ + To calculate rates like packets sent per second, we need to + store the last measurement. We store it in this dictionary. + """ + + def __init__(self): + self._cached_values = {} + + def _cache_put_value(self, key, value): + self._cached_values[key] = (value, time.time()) + + def _cache_get_value(self, key): + if key in self._cached_values: + return self._cached_values[key] + else: + return None, None + + def _crawl_interface_counters(self, vm_context): + for interface in psvmi.interface_iter(vm_context): + curr_count = [ + interface.bytes_sent, + interface.bytes_recv, + interface.packets_sent, + interface.packets_recv, + interface.errout, + interface.errin, + ] + yield (interface.ifname, curr_count) + + def get_feature(self): + return 'interface' + + def crawl(self, vm_desc, **kwargs): + + logger.debug('Crawling %s' % self.get_feature()) + + if psvmi is None: + raise NotImplementedError() + else: + (domain_name, kernel_version, distro, arch) = vm_desc + # XXX: this has to be read from some cache instead of + # instead of once per plugin/feature + vm_context = psvmi.context_init( + domain_name, domain_name, kernel_version, distro, arch) + interfaces = self._crawl_interface_counters(vm_context) + + for (interface_name, curr_count) in interfaces: + feature_key = '{0}-{1}'.format('interface', interface_name) + cache_key = '{0}-{1}'.format('OUTVM', feature_key) + + (prev_count, prev_time) = self._cache_get_value(cache_key) + self._cache_put_value(cache_key, curr_count) + + if prev_count and prev_time: + d = time.time() - prev_time + diff = [(a - b) / d for (a, b) in zip(curr_count, + prev_count)] + else: + + # first measurement + + diff = [0] * 6 + + feature_attributes = InterfaceFeature._make(diff) + + yield (feature_key, feature_attributes, 'interface') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.plugin new file mode 100644 index 00000000..eafdc984 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = load_container +Module = load_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Load crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py new file mode 100644 index 00000000..bcb10111 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py @@ -0,0 +1,31 @@ +import logging +import os + +from dockercontainer import DockerContainer +from icrawl_plugin import IContainerCrawler +from utils.features import LoadFeature +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES + +logger = logging.getLogger('crawlutils') + + +class LoadContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'load' + + def crawl_load(self): + load = os.getloadavg() + feature_key = 'load' + feature_attributes = LoadFeature(load[0], load[1], load[1]) + yield (feature_key, feature_attributes, 'load') + + def crawl(self, container_id, avoid_setns=False, **kwargs): + logger.debug( + 'Crawling %s for container %s' % + (self.get_feature(), container_id)) + + if avoid_setns: + raise NotImplementedError() + else: # in all other cases, including wrong mode set + return self.crawl_load() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.plugin new file mode 100644 index 00000000..05571103 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = load_host +Module = load_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Load crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.py new file mode 100644 index 00000000..24fcd531 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_host_crawler.py @@ -0,0 +1,24 @@ +import logging +import os + +from icrawl_plugin import IHostCrawler +from utils.features import LoadFeature + +logger = logging.getLogger('crawlutils') + + +class LoadHostCrawler(IHostCrawler): + + def get_feature(self): + return 'load' + + def crawl_load(self): + load = os.getloadavg() + feature_key = 'load' + feature_attributes = LoadFeature(load[0], load[1], load[1]) + yield (feature_key, feature_attributes, 'load') + + def crawl(self, **kwargs): + logger.debug('Crawling %s' % (self.get_feature())) + + return self.crawl_load() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.plugin new file mode 100644 index 00000000..38c932eb --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = load_vm +Module = load_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Load crawling function for VMs" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.py new file mode 100644 index 00000000..49b2dbbc --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_vm_crawler.py @@ -0,0 +1,18 @@ +from icrawl_plugin import IVMCrawler +import logging + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class load_vm_crawler(IVMCrawler): + + def get_feature(self): + return 'load' + + def crawl(self, vm_desc, **kwargs): + raise NotImplementedError() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.plugin new file mode 100644 index 00000000..9bd753fe --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = memory_container +Module = memory_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Memory crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py new file mode 100644 index 00000000..038a58b8 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py @@ -0,0 +1,67 @@ +import logging +import os +import psutil + +from dockercontainer import DockerContainer +from icrawl_plugin import IContainerCrawler +from utils.features import MemoryFeature + +logger = logging.getLogger('crawlutils') + + +class MemoryContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'memory' + + def _get_cgroup_dir(self, devlist=[]): + for dev in devlist: + paths = [os.path.join('/cgroup/', dev), + os.path.join('/sys/fs/cgroup/', dev)] + for path in paths: + if os.path.ismount(path): + return path + + # Try getting the mount point from /proc/mounts + for l in open('/proc/mounts', 'r'): + _type, mnt, _, _, _, _ = l.split(' ') + if _type == 'cgroup' and mnt.endswith('cgroup/' + dev): + return mnt + + raise ValueError('Can not find the cgroup dir') + + def get_memory_cgroup_path(self, node='memory.stat'): + return os.path.join(self._get_cgroup_dir(['memory']), node) + + def crawl(self, container_id, avoid_setns=False, **kwargs): + + used = buffered = cached = free = 'unknown' + with open(self.get_memory_cgroup_path('memory.stat' + ), 'r') as f: + for line in f: + (key, value) = line.strip().split(' ') + if key == 'total_cache': + cached = int(value) + if key == 'total_active_file': + buffered = int(value) + + with open(self.get_memory_cgroup_path( + 'memory.limit_in_bytes'), 'r') as f: + limit = int(f.readline().strip()) + + with open(self.get_memory_cgroup_path( + 'memory.usage_in_bytes'), 'r') as f: + used = int(f.readline().strip()) + + host_free = psutil.virtual_memory().free + container_total = used + min(host_free, limit - used) + free = container_total - used + + if 'unknown' not in [used, free] and (free + used) > 0: + util_percentage = float(used) / (free + used) * 100.0 + else: + util_percentage = 'unknown' + + return [('memory', MemoryFeature(used, buffered, + cached, free, util_percentage), + 'memory')] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.plugin new file mode 100644 index 00000000..5cd3d585 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = memory_host +Module = memory_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Memory crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.py new file mode 100644 index 00000000..4454c7ee --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_host_crawler.py @@ -0,0 +1,29 @@ +import logging + +import psutil + +from icrawl_plugin import IHostCrawler +from utils.features import MemoryFeature + +logger = logging.getLogger('crawlutils') + + +class MemoryHostCrawler(IHostCrawler): + + def get_feature(self): + return 'memory' + + def crawl(self, **kwargs): + logger.debug('Crawling %s' % (self.get_feature())) + + vm = psutil.virtual_memory() + + if (vm.free + vm.used) > 0: + util_percentage = float(vm.used) / (vm.free + vm.used) * 100.0 + else: + util_percentage = 'unknown' + + feature_attributes = MemoryFeature(vm.used, vm.buffers, vm.cached, + vm.free, util_percentage) + + return [('memory', feature_attributes, 'memory')] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.plugin new file mode 100644 index 00000000..8f851230 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = memory_vm +Module = memory_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Memory crawling function for VMs" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.py new file mode 100644 index 00000000..b1eb3a56 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_vm_crawler.py @@ -0,0 +1,37 @@ +import logging + +from icrawl_plugin import IVMCrawler +from utils.features import MemoryFeature + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class MemoryVmCrawler(IVMCrawler): + + def get_feature(self): + return 'memory' + + def crawl(self, vm_desc, **kwargs): + if psvmi is None: + raise NotImplementedError() + else: + (domain_name, kernel_version, distro, arch) = vm_desc + # XXX: this has to be read from some cache instead of + # instead of once per plugin/feature + vm_context = psvmi.context_init( + domain_name, domain_name, kernel_version, distro, arch) + + sysmem = psvmi.system_memory_info(vm_context) + feature_attributes = MemoryFeature( + sysmem.memory_used, + sysmem.memory_buffered, + sysmem.memory_cached, + sysmem.memory_free, + (sysmem.memory_used * 100 / (sysmem.memory_used + + sysmem.memory_free))) + return [('memory', feature_attributes, 'memory')] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.plugin new file mode 100644 index 00000000..7a16da31 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = metric_container +Module = metric_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Metric crawling function for containers" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py new file mode 100644 index 00000000..96495fcf --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py @@ -0,0 +1,24 @@ +import logging + +import utils.dockerutils +from icrawl_plugin import IContainerCrawler +from utils.metric_utils import crawl_metrics +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES + +logger = logging.getLogger('crawlutils') + + +class MetricContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'metric' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + logger.debug( + 'Crawling %s for container %s' % + (self.get_feature(), container_id)) + + if avoid_setns: + raise NotImplementedError('avoidsetns mode not implemented') + else: # in all other cases, including wrong mode set + return list(crawl_metrics()) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.plugin new file mode 100644 index 00000000..2bf62970 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = metric_host +Module = metric_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Metric crawling function for hosts" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.py new file mode 100644 index 00000000..336a3628 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_host_crawler.py @@ -0,0 +1,17 @@ +import logging + +from icrawl_plugin import IHostCrawler +from utils.metric_utils import crawl_metrics + +logger = logging.getLogger('crawlutils') + + +class MetricHostCrawler(IHostCrawler): + + def get_feature(self): + return 'metric' + + def crawl(self, **kwargs): + logger.debug('Crawling %s' % (self.get_feature())) + + return crawl_metrics() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.plugin new file mode 100644 index 00000000..39e39f18 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = metric_vm +Module = metric_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "Metric crawling function for VMs" diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.py new file mode 100644 index 00000000..b1d97713 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_vm_crawler.py @@ -0,0 +1,132 @@ +import logging +import time + +import psutil + +from icrawl_plugin import IVMCrawler +from utils.features import MetricFeature + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class MetricVmCrawler(IVMCrawler): + + """ + To calculate rates like packets sent per second, we need to + store the last measurement. We store it in this dictionary. + """ + + def __init__(self): + self._cached_values = {} + + def _cache_put_value(self, key, value): + self._cached_values[key] = (value, time.time()) + + def _cache_get_value(self, key): + if key in self._cached_values: + return self._cached_values[key] + else: + return None, None + + def _crawl_metrics_cpu_percent(self, process): + p = process + cpu_percent = 0 + + feature_key = '{0}-{1}'.format('process', p.ident()) + cache_key = '{0}-{1}'.format('OUTVM', feature_key) + + curr_proc_cpu_time, curr_sys_cpu_time = p.get_cpu_times() + + (cputimeList, timestamp) = self._cache_get_value(cache_key) + self._cache_put_value( + cache_key, [curr_proc_cpu_time, curr_sys_cpu_time]) + + if cputimeList is not None: + prev_proc_cpu_time = cputimeList[0] + prev_sys_cpu_time = cputimeList[1] + + if prev_proc_cpu_time and prev_sys_cpu_time: + if curr_proc_cpu_time == -1 or prev_proc_cpu_time == -1: + cpu_percent = -1 # unsupported for this VM + else: + if curr_sys_cpu_time == prev_sys_cpu_time: + cpu_percent = 0 + else: + cpu_percent = (float(curr_proc_cpu_time - + prev_proc_cpu_time) * 100 / + float(curr_sys_cpu_time - + prev_sys_cpu_time)) + + return cpu_percent + + def crawl(self, vm_desc, **kwargs): + + created_since = -1 + logger.debug('Crawling Metrics') + + if psvmi is None: + raise NotImplementedError() + else: + (domain_name, kernel_version, distro, arch) = vm_desc + # XXX: this has to be read from some cache instead of + # instead of once per plugin/feature + vm_context = psvmi.context_init( + domain_name, domain_name, kernel_version, distro, arch) + list = psvmi.process_iter(vm_context) + + for p in list: + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + if create_time <= created_since: + continue + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + continue + username = ( + p.username() if hasattr( + p.username, + '__call__') else p.username) + meminfo = ( + p.get_memory_info() if hasattr( + p.get_memory_info, + '__call__') else p.memory_info) + ioinfo = ( + p.get_io_counters() if hasattr( + p.get_io_counters, + '__call__') else p.io_counters) + + cpu_percent = self._crawl_metrics_cpu_percent(p) + + memory_percent = ( + p.get_memory_percent() if hasattr( + p.get_memory_percent, + '__call__') else p.memory_percent) + + feature_key = '{0}/{1}'.format(name, pid) + yield (feature_key, MetricFeature( + round(cpu_percent, 2), + round(memory_percent, 2), + name, + pid, + ioinfo.read_bytes, + meminfo.rss, + str(status), + username, + meminfo.vms, + ioinfo.write_bytes, + ), 'metric') + + def get_feature(self): + return 'metric' diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.plugin new file mode 100644 index 00000000..a99ea6fc --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.plugin @@ -0,0 +1,10 @@ +[Core] +Name = os_container +Module = os_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = "OS crawling function for containers. Takes following optional arguments: + crawl_mode = {MOUNPOINT, OUTCONTAINER}. Former uses docker rootfs, latter setns()" + diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py new file mode 100644 index 00000000..903e0ec7 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py @@ -0,0 +1,27 @@ +import logging +import os +import utils.dockerutils +from icrawl_plugin import IContainerCrawler +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES +from utils.os_utils import crawl_os, crawl_os_mountpoint + +logger = logging.getLogger('crawlutils') + + +class OSContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'os' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + logger.debug('Crawling OS for container %s' % container_id) + + if avoid_setns: + return crawl_os_mountpoint('/rootfs_local') + else: # in all other cases, including wrong mode set + real_root = os.open('/', os.O_RDONLY) + os.chroot('/rootfs_local') + os_info = crawl_os() + os.fchdir(real_root) + os.chroot('.') + return os_info diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.plugin new file mode 100644 index 00000000..2c2b7125 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = os_host +Module = os_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = OS crawling function for the local host diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.py new file mode 100644 index 00000000..552d0b8f --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_host_crawler.py @@ -0,0 +1,14 @@ +from icrawl_plugin import IHostCrawler +from utils.os_utils import crawl_os, crawl_os_mountpoint + + +class OSHostCrawler(IHostCrawler): + + def get_feature(self): + return 'os' + + def crawl(self, root_dir='/', **kwargs): + if root_dir == '/': + return crawl_os() + else: + return crawl_os_mountpoint(root_dir) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.plugin new file mode 100644 index 00000000..92db3d40 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = os_vm +Module = os_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = OS crawling function for VMs diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.py new file mode 100644 index 00000000..bda8a380 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_vm_crawler.py @@ -0,0 +1,41 @@ +import logging + +from icrawl_plugin import IVMCrawler +from utils.features import OSFeature + +# External dependencies that must be pip install'ed separately + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class os_vm_crawler(IVMCrawler): + + def get_feature(self): + return 'os' + + def crawl(self, vm_desc, **kwargs): + if psvmi is None: + raise NotImplementedError() + else: + (domain_name, kernel_version, distro, arch) = vm_desc + # XXX: not good, context_init was being done once per VM + # in previous monolithic model, now it's once per plugin/feature + vm_context = psvmi.context_init( + domain_name, domain_name, kernel_version, distro, arch) + sys = psvmi.system_info(vm_context) + feature_attributes = OSFeature( + sys.boottime, + 'unknown', + sys.ipaddr, + sys.ostype, + sys.osversion, + sys.osrelease, + sys.osplatform + ) + feature_key = sys.ostype + return [(feature_key, feature_attributes, 'os')] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.plugin new file mode 100644 index 00000000..0335c437 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = package_container +Module = package_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Package crawling function for containers diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py new file mode 100644 index 00000000..5562ec47 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py @@ -0,0 +1,48 @@ +import logging +import os + +from icrawl_plugin import IContainerCrawler +from utils.crawler_exceptions import CrawlError +from utils.dockerutils import (exec_dockerinspect, + get_docker_container_rootfs_path) +from utils.misc import join_abs_paths +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES +from utils.package_utils import crawl_packages + +logger = logging.getLogger('crawlutils') + + +class PackageContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'package' + + def crawl(self, container_id=None, avoid_setns=False, + root_dir='/', **kwargs): + logger.debug('Crawling packages for container %s' % container_id) + + if avoid_setns: + rootfs_dir = '/rootfs_local' + return crawl_packages( + root_dir=join_abs_paths(rootfs_dir, root_dir), + reload_needed=True) + else: # in all other cases, including wrong mode set + try: + print "in package plugin" + real_root = os.open('/', os.O_RDONLY) + os.chroot('/rootfs_local') + pkg_list = list(crawl_packages(None, root_dir, 0, False)) + os.fchdir(real_root) + os.chroot('.') + return pkg_list + except CrawlError: + + # Retry the crawl avoiding the setns() syscall. This is + # needed for PPC where we can not jump into the container and + # run its apt or rpm commands. + + print "Got CrawlError in package plugin" + rootfs_dir = '/rootfs_local' + return crawl_packages( + root_dir=join_abs_paths(rootfs_dir, root_dir), + reload_needed=True) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.plugin new file mode 100644 index 00000000..b0ad3d74 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = package_host +Module = package_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Package crawling function for hosts diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.py new file mode 100644 index 00000000..4460c83b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_host_crawler.py @@ -0,0 +1,15 @@ +import logging + +from icrawl_plugin import IHostCrawler +from utils.package_utils import crawl_packages + +logger = logging.getLogger('crawlutils') + + +class PackageHostCrawler(IHostCrawler): + + def get_feature(self): + return 'package' + + def crawl(self, root_dir='/', **kwargs): + return crawl_packages(root_dir=root_dir, reload_needed=False) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.plugin new file mode 100644 index 00000000..5e3761de --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = process_container +Module = process_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Process crawling function for containers diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py new file mode 100644 index 00000000..7a901b9d --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py @@ -0,0 +1,97 @@ +import logging + +import psutil + +from icrawl_plugin import IContainerCrawler +from utils.features import ProcessFeature + +logger = logging.getLogger('crawlutils') + + +class ProcessContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'process' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + if avoid_setns: + raise NotImplementedError() + return self._crawl_in_system() + + def _crawl_in_system(self): + created_since = -1 + for p in psutil.process_iter(): + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + if create_time <= created_since: + continue + yield self._crawl_single_process(p) + + def _crawl_single_process(self, p): + """Returns a ProcessFeature""" + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + cmdline = (p.cmdline() if hasattr(p.cmdline, '__call__' + ) else p.cmdline) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + cwd = 'unknown' # invalid + else: + try: + cwd = (p.cwd() if hasattr(p, 'cwd') and + hasattr(p.cwd, '__call__') else p.getcwd()) + except Exception: + logger.error('Error crawling process %s for cwd' + % pid, exc_info=True) + cwd = 'unknown' + ppid = (p.ppid() if hasattr(p.ppid, '__call__' + ) else p.ppid) + try: + if (hasattr(p, 'num_threads') and + hasattr(p.num_threads, '__call__')): + num_threads = p.num_threads() + else: + num_threads = p.get_num_threads() + except: + num_threads = 'unknown' + + try: + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + except: + username = 'unknown' + + if username == 'nobody': + return + + openfiles = [] + try: + for f in p.get_open_files(): + openfiles.append(f.path) + openfiles.sort() + except psutil.AccessDenied: + print "got psutil.AccessDenied" + openfiles = [] + + feature_key = '{0}/{1}'.format(name, pid) + return (feature_key, ProcessFeature( + str(' '.join(cmdline)), + create_time, + cwd, + name, + openfiles, + pid, + ppid, + num_threads, + username, + ), 'process') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py.org b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py.org new file mode 100644 index 00000000..28332d29 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_container_crawler.py.org @@ -0,0 +1,99 @@ +import logging + +import psutil + +import utils.dockerutils +from icrawl_plugin import IContainerCrawler +from utils.features import ProcessFeature +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES + +logger = logging.getLogger('crawlutils') + + +class ProcessContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'process' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + inspect = utils.dockerutils.exec_dockerinspect(container_id) + state = inspect['State'] + pid = str(state['Pid']) + logger.debug('Crawling Processes for container %s' % container_id) + + if avoid_setns: + raise NotImplementedError() + + return run_as_another_namespace(pid, + ALL_NAMESPACES, + self._crawl_in_system) + + def _crawl_in_system(self): + created_since = -1 + for p in psutil.process_iter(): + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + if create_time <= created_since: + continue + yield self._crawl_single_process(p) + + def _crawl_single_process(self, p): + """Returns a ProcessFeature""" + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + cmdline = (p.cmdline() if hasattr(p.cmdline, '__call__' + ) else p.cmdline) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + cwd = 'unknown' # invalid + else: + try: + cwd = (p.cwd() if hasattr(p, 'cwd') and + hasattr(p.cwd, '__call__') else p.getcwd()) + except Exception: + logger.error('Error crawling process %s for cwd' + % pid, exc_info=True) + cwd = 'unknown' + ppid = (p.ppid() if hasattr(p.ppid, '__call__' + ) else p.ppid) + try: + if (hasattr(p, 'num_threads') and + hasattr(p.num_threads, '__call__')): + num_threads = p.num_threads() + else: + num_threads = p.get_num_threads() + except: + num_threads = 'unknown' + + try: + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + except: + username = 'unknown' + + openfiles = [] + for f in p.get_open_files(): + openfiles.append(f.path) + openfiles.sort() + feature_key = '{0}/{1}'.format(name, pid) + return (feature_key, ProcessFeature( + str(' '.join(cmdline)), + create_time, + cwd, + name, + openfiles, + pid, + ppid, + num_threads, + username, + ), 'process') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.plugin new file mode 100644 index 00000000..2c14e8ef --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = process_host +Module = process_host_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Process crawling function for the local host diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.py new file mode 100644 index 00000000..27714b99 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_host_crawler.py @@ -0,0 +1,87 @@ +import logging + +import psutil + +from icrawl_plugin import IHostCrawler +from utils.features import ProcessFeature + +logger = logging.getLogger('crawlutils') + + +class ProcessHostCrawler(IHostCrawler): + + def get_feature(self): + return 'process' + + def crawl(self, **kwargs): + return self._crawl_in_system() + + def _crawl_in_system(self): + created_since = -1 + for p in psutil.process_iter(): + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + if create_time <= created_since: + continue + yield self._crawl_single_process(p) + + def _crawl_single_process(self, p): + """Returns a ProcessFeature""" + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + cmdline = (p.cmdline() if hasattr(p.cmdline, '__call__' + ) else p.cmdline) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + cwd = 'unknown' # invalid + else: + try: + cwd = (p.cwd() if hasattr(p, 'cwd') and + hasattr(p.cwd, '__call__') else p.getcwd()) + except Exception: + logger.error('Error crawling process %s for cwd' + % pid, exc_info=True) + cwd = 'unknown' + ppid = (p.ppid() if hasattr(p.ppid, '__call__' + ) else p.ppid) + try: + if (hasattr(p, 'num_threads') and + hasattr(p.num_threads, '__call__')): + num_threads = p.num_threads() + else: + num_threads = p.get_num_threads() + except: + num_threads = 'unknown' + + try: + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + except: + username = 'unknown' + + openfiles = [] + for f in p.get_open_files(): + openfiles.append(f.path) + openfiles.sort() + feature_key = '{0}/{1}'.format(name, pid) + return (feature_key, ProcessFeature( + str(' '.join(cmdline)), + create_time, + cwd, + name, + openfiles, + pid, + ppid, + num_threads, + username, + ), 'process') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.plugin new file mode 100644 index 00000000..7ba6c0c0 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = process_vm +Module = process_vm_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Process crawling function for VMs diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.py new file mode 100644 index 00000000..8ee595b7 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/process_vm_crawler.py @@ -0,0 +1,98 @@ +import logging + +import psutil + +from icrawl_plugin import IVMCrawler +from utils.features import ProcessFeature + +try: + import psvmi +except ImportError: + psvmi = None + +logger = logging.getLogger('crawlutils') + + +class process_vm_crawler(IVMCrawler): + + def get_feature(self): + return 'process' + + def crawl(self, vm_desc, **kwargs): + if psvmi is None: + raise NotImplementedError() + else: + (domain_name, kernel_version, distro, arch) = vm_desc + # XXX: this has to be read from some cache instead of + # instead of once per plugin/feature + vm_context = psvmi.context_init( + domain_name, domain_name, kernel_version, distro, arch) + + created_since = -1 + for p in psvmi.process_iter(vm_context): + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + if create_time <= created_since: + continue + yield self._crawl_single_process(p) + + def _crawl_single_process(self, p): + """Returns a ProcessFeature""" + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + cmdline = (p.cmdline() if hasattr(p.cmdline, '__call__' + ) else p.cmdline) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + cwd = 'unknown' # invalid + else: + try: + cwd = (p.cwd() if hasattr(p, 'cwd') and + hasattr(p.cwd, '__call__') else p.getcwd()) + except Exception: + logger.error('Error crawling process %s for cwd' + % pid, exc_info=True) + cwd = 'unknown' + ppid = (p.ppid() if hasattr(p.ppid, '__call__' + ) else p.ppid) + try: + if (hasattr(p, 'num_threads') and + hasattr(p.num_threads, '__call__')): + num_threads = p.num_threads() + else: + num_threads = p.get_num_threads() + except: + num_threads = 'unknown' + + try: + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + except: + username = 'unknown' + + openfiles = [] + for f in p.get_open_files(): + openfiles.append(f.path) + openfiles.sort() + feature_key = '{0}/{1}'.format(name, pid) + return (feature_key, ProcessFeature( + str(' '.join(cmdline)), + create_time, + cwd, + name, + openfiles, + pid, + ppid, + num_threads, + username, + ), 'process') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin new file mode 100644 index 00000000..9bf66e80 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin @@ -0,0 +1,12 @@ +[Core] +Name = python_pkg +Module = pythonpackage_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Crawler for Python PyPi packages for containers and images +Format = [(pkg_name, pkg_version)] + +[Options] +avoid_setns = True|False. Default is True. diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py new file mode 100644 index 00000000..35c4d39c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py @@ -0,0 +1,114 @@ +import logging +import os +import re +import subprocess + +import utils.dockerutils + +from icrawl_plugin import IContainerCrawler + +logger = logging.getLogger('crawlutils') + + +class PythonPackageCrawler(IContainerCrawler): + + def get_feature(self): + return 'python-package' + + def _crawl_files(self, path, extensions): + output = [] + if os.path.isdir(path): + for (root_dirpath, dirs, files) in os.walk(path): + output += [ + f for ext in extensions for f in files if f.endswith(ext)] + output += [ + d for ext in extensions for d in dirs if d.endswith(ext)] + return output + + def _get_packages_by_extension(self, mountpoint): + candidate_paths = [ + "usr/lib/", + "usr/share/", + "usr/local/lib/", + "usr/local/share/", + "usr/local/bundle/", + "var/lib/"] + + packages = [] + + for path in candidate_paths: + path = os.path.join(mountpoint, path) + packages += self._crawl_files(path, ['.egg-info', '.dist-info']) + + for pkg in packages: + pkg_name = None + name_parts = re.match( + r'(.*)-([\d\.]*)(\.egg-info|\.dist-info)', pkg) + if name_parts is not None: + pkg_name = name_parts.group(1) + pkg_version = name_parts.group(2) + else: + name_parts = re.match(r'(.*)(\.egg-info|\.dist-info)', pkg) + if name_parts is not None: + pkg_name = name_parts.group(1) + pkg_version = 'unknown' + # TODO: get version from 'Version:' field in such files + # ex: /usr/lib/python2.7/argparse.egg-info: Version: 1.2.1 + if pkg_name is not None: + yield ( + pkg_name, + {"pkgname": pkg_name, "pkgversion": pkg_version}, + 'python-package') + + def _get_packages_by_cmd(self): + # better coverage with pkg_resources.working_set than + # pip list, pip freeze, pip.get_installed_distributions() + # but following throws child exception from + # namespace.py:run_as_another_namespace() + # with ERROR string index out of range + # but works fine in a standalalone python file: + # ['python', '-c', 'import pkg_resources; pkgs = + # [ (p.key, p.version) for p in pkg_resources.working_set]; + # print pkgs'], + + proc = subprocess.Popen( + ['sh', '-c', ' export LC_ALL=C; pip list'], + # othewrwise pip says locale.Error: unsupported locale setting + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + output, err = proc.communicate() + + if output: + pkg_list = output.strip('\n') + for pkg in pkg_list.split('\n'): + pkg_name = pkg.split()[0] + pkg_version = pkg.split()[1][1:-1] + yield ( + pkg_name, + {"pkgname": pkg_name, "pkgversion": pkg_version}, + 'python-package') + + def _crawl_without_setns(self, container_id): + return self._get_packages_by_extension('/rootfs_local') + + def _crawl_in_system(self): + real_root = os.open('/', os.O_RDONLY) + os.chroot('/rootfs_local') + + if self.get_packages_generic is True: + mountpoint = '/' + pkg_list = list(self._get_packages_by_extension(mountpoint)) + else: + pkg_list = list(self._get_packages_by_cmd()) + + os.fchdir(real_root) + os.chroot('.') + return pkg_list + + def crawl(self, container_id, avoid_setns=False, **kwargs): + + if avoid_setns: + return self._crawl_without_setns(container_id) + else: # in all other cases, including wrong mode set + self.get_packages_generic = False # can be made an arg to crawl() + return self._crawl_in_system() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin new file mode 100644 index 00000000..d89d3dcf --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin @@ -0,0 +1,12 @@ +[Core] +Name = ruby_pkg +Module = rubypackage_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = Crawler for Ruby GEM packages for containers and images +Format = [(pkg_name, pkg_version)] + +[Options] +avoid_setns = True|False. Default is True. diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py new file mode 100644 index 00000000..0bbf1c8b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py @@ -0,0 +1,90 @@ +import logging +import os +import re +import subprocess + +from icrawl_plugin import IContainerCrawler + +logger = logging.getLogger('crawlutils') + + +class RubyPackageCrawler(IContainerCrawler): + + def get_feature(self): + return 'ruby-package' + + def _crawl_files(self, path, extension): + output = [] + if os.path.isdir(path): + for (root_dirpath, dirs, files) in os.walk(path): + output += [f for f in files if f.endswith(extension)] + return output + + def _get_packages_by_extension(self, mountpoint): + candidate_paths = [ + "usr/lib/", + "usr/share/", + "usr/local/lib/", + "usr/local/share/", + "usr/local/bundle/", + "var/lib/"] + + packages = [] + + for path in candidate_paths: + path = os.path.join(mountpoint, path) + packages += self._crawl_files(path, ".gemspec") + + for pkg in packages: + name_parts = re.match(r'(.*)-([\d\.]*)(\.gemspec)', pkg) + if name_parts is not None: + pkg_name = name_parts.group(1) + pkg_version = name_parts.group(2) + yield ( + pkg_name, + {"pkgname": pkg_name, "pkgversion": pkg_version}, + 'ruby-package') + + def _get_packages_by_cmd(self): + proc = subprocess.Popen( + ['sh', '-c', 'gem list'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + output, err = proc.communicate() + + if output: + pkg_list = output.strip('\n') + if pkg_list: + for pkg in pkg_list.split('\n'): + pkg_name = pkg.split()[0] + pkg_versions = re.findall(r'[\d\.]+', pkg) + for pkg_version in pkg_versions: + yield ( + pkg_name, + {"pkgname": pkg_name, "pkgversion": pkg_version}, + 'ruby-package') + + def _crawl_without_setns(self, container_id): + return self._get_packages_by_extension('/rootfs_local') + + def _crawl_in_system(self): + real_root = os.open('/', os.O_RDONLY) + os.chroot('/rootfs_local') + + if self.get_packages_generic is True: + mountpoint = '/' + pkg_list = list(self._get_packages_by_extension(mountpoint)) + else: + pkg_list = list(self._get_packages_by_cmd()) + + os.fchdir(real_root) + os.chroot('.') + + return pkg_list + + def crawl(self, container_id, avoid_setns=False, **kwargs): + if avoid_setns: + return self._crawl_without_setns(container_id) + else: # in all other cases, including wrong mode set + self.get_packages_generic = False # can be made an arg to crawl() + return self._crawl_in_system() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py.org b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py.org new file mode 100644 index 00000000..7cd351da --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py.org @@ -0,0 +1,94 @@ +import logging +import os +import re +import subprocess + +import utils.dockerutils + +from icrawl_plugin import IContainerCrawler +from utils.namespace import run_as_another_namespace, ALL_NAMESPACES + +logger = logging.getLogger('crawlutils') + + +class RubyPackageCrawler(IContainerCrawler): + + def get_feature(self): + return 'ruby-package' + + def _crawl_files(self, path, extension): + output = [] + if os.path.isdir(path): + for (root_dirpath, dirs, files) in os.walk(path): + output += [f for f in files if f.endswith(extension)] + return output + + def _get_packages_by_extension(self, mountpoint): + candidate_paths = [ + "usr/lib/", + "usr/share/", + "usr/local/lib/", + "usr/local/share/", + "usr/local/bundle/", + "var/lib/"] + + packages = [] + + for path in candidate_paths: + path = os.path.join(mountpoint, path) + packages += self._crawl_files(path, ".gemspec") + + for pkg in packages: + name_parts = re.match(r'(.*)-([\d\.]*)(\.gemspec)', pkg) + if name_parts is not None: + pkg_name = name_parts.group(1) + pkg_version = name_parts.group(2) + yield ( + pkg_name, + {"pkgname": pkg_name, "pkgversion": pkg_version}, + 'ruby-package') + + def _get_packages_by_cmd(self): + proc = subprocess.Popen( + ['sh', '-c', 'gem list'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + output, err = proc.communicate() + + if output: + pkg_list = output.strip('\n') + if pkg_list: + for pkg in pkg_list.split('\n'): + pkg_name = pkg.split()[0] + pkg_versions = re.findall(r'[\d\.]+', pkg) + for pkg_version in pkg_versions: + yield ( + pkg_name, + {"pkgname": pkg_name, "pkgversion": pkg_version}, + 'ruby-package') + + def _crawl_without_setns(self, container_id): + mountpoint = utils.dockerutils.get_docker_container_rootfs_path( + container_id) + return self._get_packages_by_extension(mountpoint) + + def _crawl_in_system(self): + if self.get_packages_generic is True: + mountpoint = '/' + return self._get_packages_by_extension(mountpoint) + else: + return self._get_packages_by_cmd() + + def crawl(self, container_id, avoid_setns=False, **kwargs): + inspect = utils.dockerutils.exec_dockerinspect(container_id) + state = inspect['State'] + pid = str(state['Pid']) + logger.debug('Crawling OS for container %s' % container_id) + + if avoid_setns: + return self._crawl_without_setns(container_id) + else: # in all other cases, including wrong mode set + self.get_packages_generic = False # can be made an arg to crawl() + return run_as_another_namespace(pid, + ALL_NAMESPACES, + self._crawl_in_system) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins_manager.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins_manager.py new file mode 100644 index 00000000..9425a061 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins_manager.py @@ -0,0 +1,267 @@ +import logging + +from yapsy.PluginManager import PluginManager +import urlparse +import config_parser +from icrawl_plugin import IContainerCrawler, IVMCrawler, IHostCrawler +from iemit_plugin import IEmitter +from runtime_environment import IRuntimeEnvironment +from utils import misc +from utils.crawler_exceptions import RuntimeEnvironmentPluginNotFound + +logger = logging.getLogger('crawlutils') + +# default runtime environment: cloudsigth and plugins in 'plugins/' +runtime_env = None + +container_crawl_plugins = [] +vm_crawl_plugins = [] +host_crawl_plugins = [] +emitter_plugins = [] + +# XXX make this a class + + +def get_plugins( + category_filter={}, + plugin_places=['plugins']): + + pm = PluginManager(plugin_info_ext='plugin') + + # Normalize the paths to the location of this file. + # XXX-ricarkol: there has to be a better way to do this. + plugin_places = [misc.execution_path(x) for x in plugin_places] + + pm.setPluginPlaces(plugin_places) + pm.setCategoriesFilter(category_filter) + pm.collectPlugins() + return pm.getAllPlugins() + + +def get_emitter_plugin_args(plugin, config): + plugin_args = {} + if plugin.name in config['emitters']: + plugin_args = config['emitters'][plugin.name] + return plugin_args + + +def load_emitter_plugins(urls=['stdout://'], + format='csv', + plugin_places=['plugins']): + category_filter = {"emitter": IEmitter} + + # getting all emitter plugins from crawelr/plugins/emitters/* + all_emitter_plugins = get_plugins(category_filter, plugin_places) + + # getting enabled emitter pluggins from crawler.conf file + conf_enabled_plugins = [] + config = config_parser.get_config() + if 'enabled_emitter_plugins' in config['general']: + conf_enabled_plugins = config['general']['enabled_emitter_plugins'] + if 'ALL' in conf_enabled_plugins: + conf_enabled_plugins = [p for p in config['emitters']] + + for plugin in all_emitter_plugins: + plugin_obj = plugin.plugin_object + found_plugin = False + # iterate over CLI provided emitters + for url in urls: + parsed = urlparse.urlparse(url) + proto = parsed.scheme + if plugin_obj.get_emitter_protocol() == proto: + plugin_args = get_emitter_plugin_args(plugin, config) + plugin_obj.init(url, emit_format=format) + yield (plugin_obj, plugin_args) + found_plugin = True + if found_plugin is True: + continue + # iterate over conf provided emitters + if plugin.name in conf_enabled_plugins: + plugin_args = get_emitter_plugin_args(plugin, config) + plugin_obj.init(url=plugin_args.get('url', 'missing_url'), + emit_format=plugin_args.get( + 'format', 'missing_format')) + yield (plugin_obj, plugin_args) + + # Note1: 'Same' emitters would either be picked from CLI (preference 1) + # or crawler.conf (preference 2), not both + # Note3: This does not allow different 'same' emitters to have + # different args + # Note2: This does not properly process multiple 'same' emitter plugins + # inside crawler.conf, e.g.: two 'File Emitters' + + +def get_emitter_plugins(urls=['stdout://'], + format='csv', + plugin_places=['plugins']): + global emitter_plugins + if not emitter_plugins: + emitter_plugins = list( + load_emitter_plugins(urls=urls, + format=format, + plugin_places=plugin_places)) + return emitter_plugins + + +def reload_env_plugin(environment='cloudsight', plugin_places=['plugins']): + global runtime_env + + category_filter = {"env": IRuntimeEnvironment} + env_plugins = get_plugins(category_filter, plugin_places) + + for plugin in env_plugins: + plugin_obj = plugin.plugin_object + if plugin_obj.get_environment_name() == environment: + runtime_env = plugin_obj + break + + if runtime_env is None: + raise RuntimeEnvironmentPluginNotFound('Could not find a valid "%s" ' + 'environment plugin at %s' % + (environment, plugin_places)) + return runtime_env + + +def get_runtime_env_plugin(): + global runtime_env + if not runtime_env: + runtime_env = reload_env_plugin() + return runtime_env + + +def get_plugin_args(plugin, config, options): + plugin_args = {} + + if plugin.name in config['crawlers']: + plugin_args = config['crawlers'][plugin.name] + if 'avoid_setns' in plugin_args: + plugin_args['avoid_setns'] = plugin_args.as_bool('avoid_setns') + + is_feature_crawler = getattr(plugin.plugin_object, 'get_feature', None) + if is_feature_crawler is not None: + feature = plugin.plugin_object.get_feature() + if feature in options: + for arg in options[feature]: + plugin_args[arg] = options[feature][arg] + # the alternative: plugin_args = options.get(feature) + # might overwrite options from crawler.conf + + try: + if options['avoid_setns'] is True: + plugin_args['avoid_setns'] = options['avoid_setns'] + if options['mountpoint'] != '/': + plugin_args['root_dir'] = options['mountpoint'] + except KeyError as exc: + logger.warning( + 'Can not apply users --options configuration: %s' % exc) + + return plugin_args + + +def load_crawl_plugins( + category_filter={}, + features=['os', 'cpu'], + plugin_places=['plugins'], + options={}): + + crawl_plugins = get_plugins(category_filter, plugin_places) + config = config_parser.get_config() + + enabled_plugins = [] + if 'enabled_plugins' in config['general']: + enabled_plugins = config['general']['enabled_plugins'] + if 'ALL' in enabled_plugins: + enabled_plugins = [p for p in config['crawlers']] + # Reading from 'crawlers' section inside crawler.conf + # Alternatively, 'ALL' can be made to signify + # all crawlers in plugins/* + + for plugin in crawl_plugins: + if ((plugin.name in enabled_plugins) or ( + plugin.plugin_object.get_feature() in features)): + plugin_args = get_plugin_args(plugin, config, options) + yield (plugin.plugin_object, plugin_args) + + +def reload_container_crawl_plugins( + features=['os', 'cpu'], + plugin_places=['plugins'], + options={}): + global container_crawl_plugins + + container_crawl_plugins = list( + load_crawl_plugins( + category_filter={ + "crawler": IContainerCrawler}, + features=features, + plugin_places=plugin_places, + options=options)) + + +def reload_vm_crawl_plugins( + features=['os', 'cpu'], + plugin_places=['plugins'], + options={}): + global vm_crawl_plugins + + vm_crawl_plugins = list( + load_crawl_plugins( + category_filter={ + "crawler": IVMCrawler}, + features=features, + plugin_places=plugin_places, + options=options)) + + +def reload_host_crawl_plugins( + features=['os', 'cpu'], + plugin_places=['plugins'], + options={}): + global host_crawl_plugins + + host_crawl_plugins = list( + load_crawl_plugins( + category_filter={ + "crawler": IHostCrawler}, + features=features, + plugin_places=plugin_places, + options=options)) + + +def get_container_crawl_plugins( + features=[ + 'package', + 'os', + 'process', + 'file', + 'config']): + global container_crawl_plugins + if not container_crawl_plugins: + reload_container_crawl_plugins(features=features) + return container_crawl_plugins + + +def get_vm_crawl_plugins( + features=[ + 'package', + 'os', + 'process', + 'file', + 'config']): + global vm_crawl_plugins + if not vm_crawl_plugins: + reload_vm_crawl_plugins(features=features) + return vm_crawl_plugins + + +def get_host_crawl_plugins( + features=[ + 'package', + 'os', + 'process', + 'file', + 'config']): + global host_crawl_plugins + if not host_crawl_plugins: + reload_host_crawl_plugins(features=features) + return host_crawl_plugins diff --git a/crawler/utils/plugincont/plugincont_img/crawler/runtime_environment.py b/crawler/utils/plugincont/plugincont_img/crawler/runtime_environment.py new file mode 100644 index 00000000..14b00d7a --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/runtime_environment.py @@ -0,0 +1,58 @@ +from yapsy.IPlugin import IPlugin + + +class IRuntimeEnvironment(IPlugin): + + """ + Runtime Environment Plugin Interface + + Subclasses of this class can be used to specify environment specific + parameters for the crawls. These include: how to name a container, how to + link the container logs in the host (i.e. the --linkContainerLogs arg). + """ + # TODO-ricarkol: only applies to containers at the moment. + # TODO-ricarkol: options should define an actual explicit list of params. + + def get_environment_name(self): + """Returns a unique string that identifies this environment + """ + raise NotImplementedError() + + def get_container_namespace(self, long_id, options): + """ + Specifies how to create the namespace of a container. This is a string + that uniquely identifies a container instance. The default + implementation, class CloudsightEnvironment, uses + /, but some organizations might prefer something + else like: //. This is done by + implementing the get_container_namespace() method. + + :param long_id: The container ID. + :param options: Dictionary with "options". XXX-ricarkol should define + an actual explicit list of params. + """ + raise NotImplementedError() + + def get_container_log_file_list(self, long_id, options): + """ + Specifies what are the containers logs linked in the host (i.e. the + --linkContainerLogs arg). The default implementation, class + CloudsightEnvironment, uses the list in defaults.py:default_log_files. + + :param long_id: The container ID. + :param options: Dictionary with "options". + """ + raise NotImplementedError() + + def get_container_log_prefix(self, long_id, options): + """ + Specifies where are the containers logs linked in the host (i.e. the + --linkContainerLogs arg). By default, a container log like /log/a.log + is linked to ///log/a.log, but + it might be desirable to specify another way of constructing this path. + This is done by implementing the get_container_log_prefix() function. + + :param long_id: The container ID. + :param options: Dictionary with "options". + """ + raise NotImplementedError() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/__init__.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/config_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/config_utils.py new file mode 100644 index 00000000..9cbe6d3f --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/config_utils.py @@ -0,0 +1,102 @@ +import codecs +import fnmatch +import logging +import os +import re + +import utils.misc +from utils.features import ConfigFeature + +logger = logging.getLogger('crawlutils') + + +def crawl_config_files( + root_dir='/', + exclude_dirs=[], + root_dir_alias=None, + known_config_files=[], + discover_config_files=False, + accessed_since=0 +): + + saved_args = locals() + logger.debug('Crawling config files: %s' % (saved_args)) + + if not os.path.isdir(root_dir): + return + + root_dir_alias = root_dir_alias or root_dir + exclude_dirs = [utils.misc.join_abs_paths(root_dir, d) for d in + exclude_dirs] + exclude_regex = r'|'.join([fnmatch.translate(d) for d in + exclude_dirs]) or r'$.' + known_config_files[:] = [utils.misc.join_abs_paths(root_dir, f) for f in + known_config_files] + known_config_files[:] = [f for f in known_config_files + if not re.match(exclude_regex, f)] + config_file_set = set() + for fpath in known_config_files: + if os.path.exists(fpath): + lstat = os.lstat(fpath) + if (lstat.st_atime > accessed_since or + lstat.st_ctime > accessed_since): + config_file_set.add(fpath) + + if discover_config_files: + discover_config_file_paths(accessed_since, config_file_set, + exclude_regex, root_dir) + + for fpath in config_file_set: + (_, fname) = os.path.split(fpath) + # realpath sanitizes the path a bit, for example: '//abc/' to '/abc/' + frelpath = os.path.realpath(fpath.replace(root_dir, root_dir_alias, 1)) + with codecs.open(filename=fpath, mode='r', + encoding='utf-8', errors='ignore') as \ + config_file: + + # Encode the contents of config_file as utf-8. + + yield (frelpath, ConfigFeature(fname, + config_file.read(), + frelpath), 'config') + + +def discover_config_file_paths(accessed_since, config_file_set, + exclude_regex, root_dir): + # Walk the directory hierarchy starting at 'root_dir' in BFS + # order looking for config files. + for (root_dirpath, dirs, files) in os.walk(root_dir): + dirs[:] = [os.path.join(root_dirpath, d) for d in + dirs] + dirs[:] = [d for d in dirs + if not re.match(exclude_regex, d)] + files = [os.path.join(root_dirpath, f) for f in + files] + files = [f for f in files + if not re.match(exclude_regex, f)] + for fpath in files: + if os.path.exists(fpath) \ + and _is_config_file(fpath): + lstat = os.lstat(fpath) + if lstat.st_atime > accessed_since \ + or lstat.st_ctime > accessed_since: + config_file_set.add(fpath) + + +def _is_config_file(fpath): + (_, ext) = os.path.splitext(fpath) + if os.path.isfile(fpath) and ext in [ + '.xml', + '.ini', + '.properties', + '.conf', + '.cnf', + '.cfg', + '.cf', + '.config', + '.allow', + '.deny', + '.lst', + ] and os.path.getsize(fpath) <= 204800: + return True + return False diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/connection_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/connection_utils.py new file mode 100644 index 00000000..3550dd65 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/connection_utils.py @@ -0,0 +1,65 @@ +import psutil + +from utils.features import ConnectionFeature + + +def crawl_connections(): + created_since = -1 + + proc_list = psutil.process_iter() + + for p in proc_list: + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + continue + + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + name = (p.name() if hasattr(p.name, '__call__') else p.name) + + if create_time <= created_since: + continue + for conn in p.get_connections(): + yield crawl_single_connection(conn, pid, name) + + +def crawl_single_connection(c, pid, name): + """Returns a ConnectionFeature""" + try: + (localipaddr, localport) = c.laddr[:] + except: + + # Older version of psutil uses local_address instead of + # laddr. + + (localipaddr, localport) = c.local_address[:] + try: + if c.raddr: + (remoteipaddr, remoteport) = c.raddr[:] + else: + (remoteipaddr, remoteport) = (None, None) + except: + + # Older version of psutil uses remote_address instead + # of raddr. + + if c.remote_address: + (remoteipaddr, remoteport) = \ + c.remote_address[:] + else: + (remoteipaddr, remoteport) = (None, None) + feature_key = '{0}/{1}/{2}'.format(pid, + localipaddr, localport) + return (feature_key, ConnectionFeature( + localipaddr, + localport, + name, + pid, + remoteipaddr, + remoteport, + str(c.status), + ), 'connection') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/crawler_exceptions.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/crawler_exceptions.py new file mode 100644 index 00000000..aef4bc0b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/crawler_exceptions.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +class CrawlError(Exception): + + """Indicates that a crawl timed out.""" + + pass + + +class CrawlTimeoutError(CrawlError): + + """Indicates some error during crawling.""" + + pass + + +class CrawlUnsupportedPackageManager(CrawlError): + + """Could not detect what is the package manager.""" + + pass + + +class ContainerInvalidEnvironment(Exception): + + """Indicates that the environment can not be applied to the operation.""" + + pass + + +class ContainerNonExistent(Exception): + + """The container does not exist.""" + + pass + + +class ContainerWithoutCgroups(Exception): + + """Can not find the cgroup node for a container""" + + pass + + +class DockerutilsException(Exception): + + """Exception from the dockerutils module.""" + + pass + + +class DockerutilsNoJsonLog(DockerutilsException): + + """Could not find the json log for the container. Most likely because the + docker logging driver is not json-file.""" + + pass + + +class AlchemyInvalidMetadata(ContainerInvalidEnvironment): + + """Invalid or non-present alchemy metadata file.""" + + pass + + +class AlchemyInvalidContainer(ContainerInvalidEnvironment): + + """Invalid or non-present alchemy metadata file.""" + + pass + + +class RuntimeEnvironmentPluginNotFound(Exception): + + """Invalid or non-present plugin for the given environment.""" + + pass + + +class EmitterUnsupportedProtocol(Exception): + + """User requested an unsupported protocol for the frame emision""" + + pass + + +class EmitterUnsupportedFormat(Exception): + + """User requested an unsupported format for the emitted frame""" + + pass + + +class EmitterBadURL(Exception): + + """The emit URL is invalid""" + + pass + + +class EmitterEmitTimeout(Exception): + + """The emit timed out""" + + pass + + +class MTGraphiteInvalidTenant(Exception): + + """Invalid tenant""" + + pass + + +class NamespaceFailedSetns(Exception): + + """Invalid tenant""" + + pass diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/disk_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/disk_utils.py new file mode 100644 index 00000000..c8221cce --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/disk_utils.py @@ -0,0 +1,21 @@ +import psutil + +from utils.features import DiskFeature + + +def crawl_disk_partitions(): + partitions = [] + for partition in psutil.disk_partitions(all=True): + try: + pdiskusage = psutil.disk_usage(partition.mountpoint) + partitions.append((partition.mountpoint, DiskFeature( + partition.device, + 100.0 - pdiskusage.percent, + partition.fstype, + partition.mountpoint, + partition.opts, + pdiskusage.total, + ), 'disk')) + except OSError: + continue + return partitions diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/dockerevent.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/dockerevent.py new file mode 100644 index 00000000..96083be0 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/dockerevent.py @@ -0,0 +1,23 @@ +""" +Docker container event object +""" + + +class DockerContainerEvent(object): + def __init__(self, contId, imgId, event, etime): + self.contId = contId + self.imgId = imgId + self.event = event + self.eventTime = etime + + def get_containerid(self): + return self.contId + + def get_imgageid(self): + return self.imgId + + def get_event(self): + return self.event + + def get_eventTime(self): + return self.eventTime diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/dockerutils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/dockerutils.py new file mode 100644 index 00000000..d94e85db --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/dockerutils.py @@ -0,0 +1,409 @@ +#!usr/bin/python +# -*- coding: utf-8 -*- +import logging +import os + +import dateutil.parser as dp +import docker +import semantic_version +import itertools +import re + +from utils import misc +from crawler_exceptions import (DockerutilsNoJsonLog, + DockerutilsException) +from timeout_utils import (Timeout, TimeoutError) +from dockerevent import DockerContainerEvent + +# version at which docker image layer organization changed +VERSION_SPEC = semantic_version.Spec('>=1.10.0') + +logger = logging.getLogger('crawlutils') + +SUPPORTED_DRIVERS = ['btrfs', 'devicemapper', 'aufs', 'vfs'] + + +def exec_dockerps(): + """ + Returns a list of docker inspect jsons, one for each container. + + This call executes the `docker inspect` command every time it is invoked. + """ + try: + client = docker.Client( + base_url='unix://var/run/docker.sock', version='auto') + containers = client.containers() + inspect_arr = [] + for container in containers: + inspect = exec_dockerinspect(container['Id']) + inspect_arr.append(inspect) + except docker.errors.DockerException as e: + logger.warning(str(e)) + raise DockerutilsException('Failed to exec dockerps') + + return inspect_arr + + +def exec_docker_history(long_id): + try: + client = docker.Client(base_url='unix://var/run/docker.sock', + version='auto') + image = client.inspect_container(long_id)['Image'] + history = client.history(image) + return history + except docker.errors.DockerException as e: + logger.warning(str(e)) + raise DockerutilsException('Failed to exec dockerhistory') + + +def _reformat_inspect(inspect): + """Fixes some basic issues with the inspect json returned by docker. + """ + # For some reason, Docker inspect sometimes returns the pid in scientific + # notation. + inspect['State']['Pid'] = '%.0f' % float(inspect['State']['Pid']) + + docker_datetime = dp.parse(inspect['Created']) + epoch_seconds = docker_datetime.strftime('%s') + inspect['Created'] = epoch_seconds + + +def exec_dockerinspect(long_id): + try: + client = docker.Client( + base_url='unix://var/run/docker.sock', version='auto') + inspect = client.inspect_container(long_id) + _reformat_inspect(inspect) + except docker.errors.DockerException as e: + logger.warning(str(e)) + raise DockerutilsException('Failed to exec dockerinspect') + + try: + # get the first RepoTag + inspect['RepoTag'] = client.inspect_image( + inspect['Image'])['RepoTags'][0] + except (docker.errors.DockerException, KeyError, IndexError): + inspect['RepoTag'] = '' + + return inspect + + +def _get_docker_storage_driver_using_proc_mounts(): + for l in open('/proc/mounts', 'r'): + _, mnt, _, _, _, _ = l.split(' ') + for driver in SUPPORTED_DRIVERS: + if mnt == '/var/lib/docker/' + driver: + return driver + raise OSError('Could not find the driver in /proc/mounts') + + +def _get_docker_storage_driver(): + """ + We will try several steps in order to ensure that we return + one of the 4 types (btrfs, devicemapper, aufs, vfs). + """ + driver = None + + # Step 1, get it from "docker info" + + try: + client = docker.Client( + base_url='unix://var/run/docker.sock', version='auto') + driver = client.info()['Driver'] + except (docker.errors.DockerException, KeyError): + pass # try to continue with the default of 'devicemapper' + + if driver in SUPPORTED_DRIVERS: + return driver + + # Step 2, get it from /proc/mounts + + try: + driver = _get_docker_storage_driver_using_proc_mounts() + except (OSError, IOError): + logger.debug('Could not read /proc/mounts') + + if driver in SUPPORTED_DRIVERS: + return driver + + # Step 3, we default to "devicemapper" (last resort) + + if driver not in SUPPORTED_DRIVERS: + + driver = 'devicemapper' + + return driver + + +def get_docker_container_json_logs_path(long_id, inspect=None): + """ + Returns the path to a container (with ID=long_id) docker logs file in the + docker host file system. + + There are 2 big potential problems with this: + + 1. This assumes that the docker Logging Driver is `json-file`. Other + drivers are detailed here: + https://docs.docker.com/engine/reference/logging/overview/ + + 2. This is an abstraction violation as we are breaking the Docker + abstraction barrier. But, it is so incredibly useful to do this kind of + introspection that we are willing to pay the price. + """ + # First try is the default location + + path = '/var/lib/docker/containers/%s/%s-json.log' % (long_id, + long_id) + if os.path.isfile(path): + return path + + # Second try is to get docker inspect LogPath + + if not inspect: + inspect = exec_dockerinspect(long_id) + + path = None + try: + path = inspect['LogPath'] + except KeyError: + pass + + if path and os.path.isfile(path): + return path + + # Third try is to guess the LogPath based on the HostnamePath + + path = None + try: + path = inspect['HostnamePath'] + path = os.path.join(os.path.dirname(path), '%s-json.log' + % long_id) + except KeyError: + pass + + if path and os.path.isfile(path): + return path + + raise DockerutilsNoJsonLog( + 'Container %s does not have a json log.' % + long_id) + + +def _get_docker_server_version(): + """Run the `docker info` command to get server version + """ + try: + client = docker.Client( + base_url='unix://var/run/docker.sock', version='auto') + return client.version()['Version'] + except (docker.errors.DockerException, KeyError) as e: + logger.warning(str(e)) + raise DockerutilsException('Failed to get the docker version') + + +try: + server_version = _get_docker_server_version() + driver = _get_docker_storage_driver() +except DockerutilsException: + server_version = None + driver = None + + +def _get_container_rootfs_path_dm(long_id, inspect=None): + + if not inspect: + inspect = exec_dockerinspect(long_id) + + pid = str(inspect['State']['Pid']) + + rootfs_path = None + device = None + try: + with open('/proc/' + pid + '/mounts', 'r') as f: + for line in f: + _device, _mountpoint, _, _, _, _ = line.split() + if _mountpoint == '/' and _device != 'rootfs': + device = _device + with open('/proc/mounts', 'r') as f: + for line in f: + _device, _mountpoint, _, _, _, _ = line.split() + if device in line and _mountpoint != '/': + rootfs_path = _mountpoint + break + except IOError as e: + logger.warning(str(e)) + if not rootfs_path or rootfs_path == '/': + raise DockerutilsException('Failed to get rootfs on devicemapper') + + return rootfs_path + '/rootfs' + + +def _fix_version(v): + # removing leading zeroes from docker version + # which are not liked by semantic_version + version_parts = re.match(r'(\d+).(\d+).(\d+)', v) + if version_parts is not None: + fixed_v = '' + for item in version_parts.groups(): + if len(item) > 1 and item.startswith('0'): + item = item[1:] + fixed_v = fixed_v + item + '.' + return fixed_v[:-1] + + +def _get_container_rootfs_path_btrfs(long_id, inspect=None): + + rootfs_path = None + + if VERSION_SPEC.match(semantic_version.Version(_fix_version( + server_version))): + btrfs_path = None + mountid_path = ('/var/lib/docker/image/btrfs/layerdb/mounts/' + + long_id + '/mount-id') + try: + with open(mountid_path, 'r') as f: + btrfs_path = f.read().strip() + except IOError as e: + logger.warning(str(e)) + if not btrfs_path: + raise DockerutilsException('Failed to get rootfs on btrfs') + rootfs_path = '/var/lib/docker/btrfs/subvolumes/' + btrfs_path + else: + btrfs_path = None + try: + for submodule in misc.btrfs_list_subvolumes('/var/lib/docker'): + _, _, _, _, _, _, _, _, mountpoint = submodule + if (long_id in mountpoint) and ('init' not in mountpoint): + btrfs_path = mountpoint + break + except RuntimeError: + pass + if not btrfs_path: + raise DockerutilsException('Failed to get rootfs on btrfs') + rootfs_path = '/var/lib/docker/' + btrfs_path + + return rootfs_path + + +def _get_container_rootfs_path_aufs(long_id, inspect=None): + + rootfs_path = None + + if VERSION_SPEC.match(semantic_version.Version(_fix_version( + server_version))): + aufs_path = None + mountid_path = ('/var/lib/docker/image/aufs/layerdb/mounts/' + + long_id + '/mount-id') + try: + with open(mountid_path, 'r') as f: + aufs_path = f.read().strip() + except IOError as e: + logger.warning(str(e)) + if not aufs_path: + raise DockerutilsException('Failed to get rootfs on aufs') + rootfs_path = '/var/lib/docker/aufs/mnt/' + aufs_path + else: + rootfs_path = None + for _path in ['/var/lib/docker/aufs/mnt/' + long_id, + '/var/lib/docker/aufs/diff/' + long_id]: + if os.path.isdir(_path) and os.listdir(_path): + rootfs_path = _path + break + if not rootfs_path: + raise DockerutilsException('Failed to get rootfs on aufs') + + return rootfs_path + + +def _get_container_rootfs_path_vfs(long_id, inspect=None): + + rootfs_path = None + + vfs_path = None + mountid_path = ('/var/lib/docker/image/vfs/layerdb/mounts/' + + long_id + '/mount-id') + try: + with open(mountid_path, 'r') as f: + vfs_path = f.read().strip() + except IOError as e: + logger.warning(str(e)) + if not vfs_path: + raise DockerutilsException('Failed to get rootfs on vfs') + + rootfs_path = '/var/lib/docker/vfs/dir/' + vfs_path + + return rootfs_path + + +def get_docker_container_rootfs_path(long_id, inspect=None): + """ + Returns the path to a container root (with ID=long_id) in the docker host + file system. + + This is an abstraction violation as we are breaking the Docker abstraction + barrier. But, it is so incredibly useful to do this kind of introspection + that we are willing to pay the price. + + FIXME The mount has to be a `shared mount`, otherwise the container + rootfs will not be accessible from the host. As an example, in Docker v + 1.7.1 the daemon is started like this: + + unshare -m -- /usr/bin/docker -d + + This means that for a device mapper driver, whenever the docker daemon + mounts a dm device, this mount will only be accessible to the docker + daemon and containers. + """ + global server_version + global driver + + rootfs_path = None + + if (not server_version) or (not driver): + raise DockerutilsException('Not supported docker storage driver.') + + # should be debug, for now info + logger.info('get_docker_container_rootfs_path: long_id=' + + long_id + ', deriver=' + driver + + ', server_version=' + server_version) + + if driver == 'devicemapper': + rootfs_path = _get_container_rootfs_path_dm(long_id, inspect) + elif driver == 'btrfs': + rootfs_path = _get_container_rootfs_path_btrfs(long_id, inspect) + elif driver == 'aufs': + rootfs_path = _get_container_rootfs_path_aufs(long_id, inspect) + elif driver == 'vfs': + rootfs_path = _get_container_rootfs_path_vfs(long_id, inspect) + else: + raise DockerutilsException('Not supported docker storage driver.') + + return rootfs_path + + +def poll_container_create_events(timeout=0.1): + try: + client = docker.Client(base_url='unix://var/run/docker.sock', + version='auto') + filters = dict() + filters['type'] = 'container' + filters['event'] = 'start' + events = client.events(filters=filters, decode=True) + with Timeout(seconds=timeout): + # we are expecting a single event + event = list(itertools.islice(events, 1))[0] + + containerid = event['id'] + imageid = event['from'] + epochtime = event['time'] + cEvent = DockerContainerEvent(containerid, imageid, + event['Action'], epochtime) + return cEvent + except docker.errors.DockerException as e: + logger.warning(str(e)) + raise DockerutilsException('Failed to exec dockerhistory') + except TimeoutError: + logger.info("Container event timeout") + pass + + return None diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/ethtool.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/ethtool.py new file mode 100644 index 00000000..9a84a667 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/ethtool.py @@ -0,0 +1,92 @@ + +import array +import fcntl +import socket +import struct + +SIOCETHTOOL = 0x8946 + +ETHTOOL_GSET = 0x00000001 +ETHTOOL_GSTRINGS = 0x0000001b +ETHTOOL_GSTATS = 0x0000001d +ETHTOOL_GSSET_INFO = 0x00000037 + +ETH_SS_STATS = 1 + + +def stripped(name): + return "".join(i for i in name if 31 < ord(i) < 127) + + +def ethtool_get_stats(nic): + sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + + ecmd_sset_info = array.array('B', struct.pack('@IIQI', + ETHTOOL_GSSET_INFO, + 0, + 1 << ETH_SS_STATS, + 0)) + ifreq = struct.pack('@16sP16x', nic, ecmd_sset_info.buffer_info()[0]) + try: + fcntl.ioctl(sockfd, SIOCETHTOOL, ifreq) + except IOError as err: + raise err + res = ecmd_sset_info.tostring() + _, _, _, n_stats = struct.unpack('IIQI', res) + + if not n_stats: + return {} + + ecmd_gstrings = array.array('B', struct.pack('@III%ds' % (n_stats * 32), + ETHTOOL_GSTRINGS, + ETH_SS_STATS, + 0, + '\x00' * 32 * n_stats)) + ifreq = struct.pack('@16sP16x', nic, ecmd_gstrings.buffer_info()[0]) + try: + fcntl.ioctl(sockfd, SIOCETHTOOL, ifreq) + except IOError as err: + raise err + + gstrings = ecmd_gstrings.tostring() + name = gstrings[12:32].strip() + + # Get the peer ifindex number + ecmd_gstats = array.array('B', struct.pack('@II%ds' % (n_stats * 8), + ETHTOOL_GSTATS, + ETH_SS_STATS, + '\x00' * 8 * n_stats)) + ifreq = struct.pack('@16sP16x', nic, ecmd_gstats.buffer_info()[0]) + try: + fcntl.ioctl(sockfd, SIOCETHTOOL, ifreq) + except IOError as err: + raise err + + gstats = ecmd_gstats.tostring() + + res = {} + gstrings_idx = 12 + gstats_idx = 8 + + while n_stats > 0: + name = stripped(gstrings[gstrings_idx:gstrings_idx + 32]) + gstrings_idx += 32 + value, = struct.unpack('@Q', gstats[gstats_idx:gstats_idx + 8]) + gstats_idx += 8 + res[name] = value + n_stats -= 1 + + return res + + +def ethtool_get_peer_ifindex(nic): + """ + Get the interface index of the peer device of a veth device. + Returns a positive number in case the peer device's interface + index could be determined, a negative value otherwise. + """ + try: + res = ethtool_get_stats(nic) + return int(res.get('peer_ifindex', -1)) + except: + return -2 diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/features.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/features.py new file mode 100644 index 00000000..d55f2895 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/features.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from collections import namedtuple + +OSFeature = namedtuple('OSFeature', [ + 'boottime', + 'uptime', + 'ipaddr', + 'os', + 'os_version', + 'os_kernel', + 'architecture', +]) +FileFeature = namedtuple('FileFeature', [ + 'atime', + 'ctime', + 'gid', + 'linksto', + 'mode', + 'mtime', + 'name', + 'path', + 'size', + 'type', + 'uid', +]) +ConfigFeature = namedtuple('ConfigFeature', ['name', 'content', 'path']) +DiskFeature = namedtuple('DiskFeature', [ + 'partitionname', + 'freepct', + 'fstype', + 'mountpt', + 'mountopts', + 'partitionsize', +]) +ProcessFeature = namedtuple('ProcessFeature', [ + 'cmd', + 'created', + 'cwd', + 'pname', + 'openfiles', + 'pid', + 'ppid', + 'threads', + 'user', +]) +MetricFeature = namedtuple('MetricFeature', [ + 'cpupct', + 'mempct', + 'pname', + 'pid', + 'read', + 'rss', + 'status', + 'user', + 'vms', + 'write', +]) +ConnectionFeature = namedtuple('ConnectionFeature', [ + 'localipaddr', + 'localport', + 'pname', + 'pid', + 'remoteipaddr', + 'remoteport', + 'connstatus', +]) +PackageFeature = namedtuple('PackageFeature', ['installed', 'pkgname', + 'pkgsize', 'pkgversion', + 'pkgarchitecture']) +MemoryFeature = namedtuple('MemoryFeature', [ + 'memory_used', + 'memory_buffered', + 'memory_cached', + 'memory_free', + 'memory_util_percentage' +]) +CpuFeature = namedtuple('CpuFeature', [ + 'cpu_idle', + 'cpu_nice', + 'cpu_user', + 'cpu_wait', + 'cpu_system', + 'cpu_interrupt', + 'cpu_steal', + 'cpu_util', +]) +InterfaceFeature = namedtuple('InterfaceFeature', [ + 'if_octets_tx', + 'if_octets_rx', + 'if_packets_tx', + 'if_packets_rx', + 'if_errors_tx', + 'if_errors_rx', +]) +LoadFeature = namedtuple('LoadFeature', ['shortterm', 'midterm', + 'longterm']) +DockerPSFeature = namedtuple('DockerPSFeature', [ + 'Status', + 'Created', + 'Image', + 'Ports', + 'Command', + 'Names', + 'Id', +]) +DockerHistoryFeature = namedtuple('DockerHistoryFeature', ['history']) +ModuleFeature = namedtuple('ModuleFeature', ['name', 'state']) +CpuHwFeature = namedtuple('CpuHwFeature', [ + 'cpu_family', + 'cpu_vendor', + 'cpu_model', + 'cpu_vedor_id', + 'cpu_module_id', + 'cpu_khz', + 'cpu_cache_size_kb', + 'cpu_num_cores']) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/file_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/file_utils.py new file mode 100644 index 00000000..889232df --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/file_utils.py @@ -0,0 +1,164 @@ +import fnmatch +import logging +import os +import re +import stat + +from utils.features import FileFeature + +logger = logging.getLogger('crawlutils') + + +def crawl_files( + root_dir='/', + exclude_dirs=[], + root_dir_alias=None, + accessed_since=0): + + if not os.path.isdir(root_dir): + return + + saved_args = locals() + logger.debug('crawl_files: %s' % (saved_args)) + + assert os.path.isdir(root_dir) + if root_dir_alias is None: + root_dir_alias = root_dir + exclude_dirs = [os.path.join(root_dir, d) for d in + exclude_dirs] + exclude_regex = r'|'.join([fnmatch.translate(d) + for d in exclude_dirs]) or r'$.' + + # walk the directory hierarchy starting at 'root_dir' in BFS + # order + + feature = _crawl_file(root_dir, root_dir, + root_dir_alias) + if feature and (feature.ctime > accessed_since or + feature.atime > accessed_since): + yield (feature.path, feature, 'file') + for (root_dirpath, dirs, files) in os.walk(root_dir): + dirs[:] = [os.path.join(root_dirpath, d) for d in + dirs] + dirs[:] = [d for d in dirs + if not re.match(exclude_regex, d)] + files = [os.path.join(root_dirpath, f) for f in + files] + files = [f for f in files + if not re.match(exclude_regex, f)] + for fpath in files: + feature = _crawl_file(root_dir, fpath, + root_dir_alias) + if feature and (feature.ctime > accessed_since or + feature.atime > accessed_since): + yield (feature.path, feature, 'file') + for fpath in dirs: + feature = _crawl_file(root_dir, fpath, + root_dir_alias) + if feature and (feature.ctime > accessed_since or + feature.atime > accessed_since): + yield (feature.path, feature, 'file') + + +def _filetype(fpath, fperm): + modebit = fperm[0] + ftype = { + 'l': 'link', + '-': 'file', + 'b': 'block', + 'd': 'dir', + 'c': 'char', + 'p': 'pipe', + }.get(modebit) + return ftype + +_filemode_table = ( + ( + (stat.S_IFLNK, 'l'), + (stat.S_IFREG, '-'), + (stat.S_IFBLK, 'b'), + (stat.S_IFDIR, 'd'), + (stat.S_IFCHR, 'c'), + (stat.S_IFIFO, 'p'), + ), + ((stat.S_IRUSR, 'r'), ), + ((stat.S_IWUSR, 'w'), ), + ((stat.S_IXUSR | stat.S_ISUID, 's'), (stat.S_ISUID, 'S'), + (stat.S_IXUSR, 'x')), + ((stat.S_IRGRP, 'r'), ), + ((stat.S_IWGRP, 'w'), ), + ((stat.S_IXGRP | stat.S_ISGID, 's'), (stat.S_ISGID, 'S'), + (stat.S_IXGRP, 'x')), + ((stat.S_IROTH, 'r'), ), + ((stat.S_IWOTH, 'w'), ), + ((stat.S_IXOTH | stat.S_ISVTX, 't'), (stat.S_ISVTX, 'T'), + (stat.S_IXOTH, 'x')), +) + + +def _fileperm(mode): + + # Convert a file's mode to a string of the form '-rwxrwxrwx' + + perm = [] + for table in _filemode_table: + for (bit, char) in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append('-') + return ''.join(perm) + + +def _is_executable(fpath): + return os.access(fpath, os.X_OK) + +# crawl a single file + + +def _crawl_file( + root_dir, + fpath, + root_dir_alias, +): + lstat = os.lstat(fpath) + fmode = lstat.st_mode + fperm = _fileperm(fmode) + ftype = _filetype(fpath, fperm) + flinksto = None + if ftype == 'link': + try: + + # This has to be an absolute path, not a root-relative path + + flinksto = os.readlink(fpath) + except: + logger.error('Error reading linksto info for file %s' + % fpath, exc_info=True) + fgroup = lstat.st_gid + fuser = lstat.st_uid + + # This replaces `//a/b/c` with `//a/b/c` + + frelpath = os.path.join(root_dir_alias, + os.path.relpath(fpath, root_dir)) + + # This converts something like `/.` to `/` + + frelpath = os.path.normpath(frelpath) + + (_, fname) = os.path.split(frelpath) + return FileFeature( + lstat.st_atime, + lstat.st_ctime, + fgroup, + flinksto, + fmode, + lstat.st_mtime, + fname, + frelpath, + lstat.st_size, + ftype, + fuser, + ) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/mesos.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/mesos.py new file mode 100644 index 00000000..6dbcf8c9 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/mesos.py @@ -0,0 +1,90 @@ +#! /usr/bin/python +# Copyright 2015 Ray Rodriguez + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import urllib2 +import logging +import logging.handlers +import collections +import os + +logger = None +PREFIX = "mesos-master" +MESOS_INSTANCE = "" +MESOS_HOST = "localhost" +MESOS_PORT = 5050 +MESOS_VERSION = "0.22.0" +MESOS_URL = "" +VERBOSE_LOGGING = False + +CONFIGS = [] + +Stat = collections.namedtuple('Stat', ('type', 'path')) + +logger = logging.getLogger('crawlutils') + + +def configure_crawler_mesos(inurl): + logger.debug('Mesos url %s' % inurl) + CONFIGS.append({ + 'mesos_url': inurl + }) + + +def fetch_stats(mesos_version): + if CONFIGS == []: + CONFIGS.append({ + 'mesos_url': 'http://localhost:5050/metrics/snapshot' + }) + logger.debug('connecting to %s' % CONFIGS[0]['mesos_url']) + try: + result = json.loads( + urllib2.urlopen(CONFIGS[0]['mesos_url'], timeout=10).read()) + except urllib2.URLError: + logger.exception('Exception opening mesos url %s', None) + return None + logger.debug('mesos_stats %s' % result) + return result + + +def setup_logger(logger_name, logfile='crawler.log', process_id=None): + _logger = logging.getLogger(logger_name) + _logger.setLevel(logging.DEBUG) + (logfile_name, logfile_xtnsion) = os.path.splitext(logfile) + if process_id is None: + fname = logfile + else: + fname = '{0}-{1}{2}'.format(logfile_name, process_id, + logfile_xtnsion) + h = logging.handlers.RotatingFileHandler(filename=fname, + maxBytes=10e6, backupCount=1) + f = logging.Formatter( + '%(asctime)s %(processName)-10s %(levelname)-8s %(message)s') + h.setFormatter(f) + _logger.addHandler(h) + + +def log_verbose(enabled, msg): + if not enabled: + return + logger.debug('mesos-master plugin [verbose]: %s' % msg) + + +def snapshot_crawler_mesos_frame(inurl='http://localhost:9092'): + setup_logger('crawler-mesos', 'crawler-mesos.log') + mesos_version = MESOS_VERSION + configure_crawler_mesos(inurl) + + return fetch_stats(mesos_version) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py new file mode 100644 index 00000000..9c08c656 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py @@ -0,0 +1,76 @@ +import os +import psutil +from collections import namedtuple +from utils.features import MetricFeature + + +def _crawl_metrics_cpu_percent(process): + cpu_percent = ( + process.get_cpu_percent( + interval=0) if hasattr( + process.get_cpu_percent, + '__call__') else process.cpu_percent) + return cpu_percent + + +def crawl_metrics(): + created_since = -1 + + for p in psutil.process_iter(): + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + if create_time <= created_since: + continue + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + continue + username = ( + p.username() if hasattr( + p.username, + '__call__') else p.username) + meminfo = ( + p.get_memory_info() if hasattr( + p.get_memory_info, + '__call__') else p.memory_info) + try: + ioinfo = ( + p.get_io_counters() if hasattr( + p.get_io_counters, + '__call__') else p.io_counters) + except psutil.AccessDenied: + selfpid = os.getpid() + if pid != selfpid: + # http://lukasz.langa.pl/5/error-opening-file-for-reading-permission-denied/ + print "got psutil.AccessDenied for pid:", pid + ioinfo = namedtuple('ioinfo', ['read_count', 'write_count', + 'read_bytes', 'write_bytes']) + ioinfo.read_bytes = 0 + ioinfo.write_bytes = 0 + + cpu_percent = _crawl_metrics_cpu_percent(p) + + memory_percent = ( + p.get_memory_percent() if hasattr( + p.get_memory_percent, + '__call__') else p.memory_percent) + + feature_key = '{0}/{1}'.format(name, pid) + yield (feature_key, MetricFeature( + round(cpu_percent, 2), + round(memory_percent, 2), + name, + pid, + ioinfo.read_bytes, + meminfo.rss, + str(status), + username, + meminfo.vms, + ioinfo.write_bytes, + ), 'metric') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/misc.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/misc.py new file mode 100644 index 00000000..e666c02c --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/misc.py @@ -0,0 +1,253 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import os +import sys +import inspect +import socket +import subprocess +import psutil +import logging +import logging.handlers +import time +import random + +# Additional modules + +# External dependencies that must be pip install'ed separately + +from netifaces import interfaces, ifaddresses, AF_INET + +logger = logging.getLogger('crawlutils') + + +def setup_logger(logger_name, logfile='crawler.log'): + """ + Setup a logger node called logger_name with rotation every 10MBs. + + :param logger_name: logger node + :param logfile: filename for the log + :return: a logger object + """ + _logger = logging.getLogger(logger_name) + _logger.setLevel(logging.INFO) + h = logging.handlers.RotatingFileHandler(filename=logfile, + maxBytes=10e6, backupCount=1) + f = logging.Formatter( + '%(asctime)s %(processName)-10s %(levelname)-8s %(message)s') + h.setFormatter(f) + _logger.addHandler(h) + return _logger + + +def subprocess_run(cmd, ignore_failure=False, shell=True): + """ + Runs cmd_string as a shell command. It returns stdout as a string, and + raises RuntimeError if the return code is not equal to `good_rc`. + + It returns the tuple: (stdout, stderr, returncode) + Can raise AttributeError or RuntimeError: + """ + try: + proc = subprocess.Popen( + cmd, + shell=shell, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = proc.communicate() + rc = proc.returncode + + except OSError as exc: + raise RuntimeError('Failed to run ' + cmd + ': [Errno: %d] ' % + exc.errno + exc.strerror + ' [Exception: ' + + type(exc).__name__ + ']') + if (not ignore_failure) and (rc != 0): + raise RuntimeError('(%s) failed with rc=%s: %s' % + (cmd, rc, err)) + return out + + +def enum(**enums): + return type('Enum', (), enums) + + +def get_process_env(pid=1): + """the environment settings from the processes perpective, + @return C{dict} + """ + + try: + pid = int(pid) + except ValueError: + raise TypeError('pid has to be an integer') + + env = {} + envlist = open('/proc/%s/environ' % pid).read().split('\000') + for e in envlist: + (k, _, v) = e.partition('=') + (k, v) = (k.strip(), v.strip()) + if not k: + continue + env[k] = v + return env + + +def process_is_crawler(pid): + """This is really checking if proc is the current process. + """ + try: + pid = int(pid) + except ValueError: + raise TypeError('pid has to be an integer') + + try: + proc = psutil.Process(pid) + cmdline = (proc.cmdline() if hasattr(proc.cmdline, '__call__' + ) else proc.cmdline) + # curr is the crawler process + + curr = psutil.Process(os.getpid()) + curr_cmdline = ( + curr.cmdline() if hasattr( + curr.cmdline, + '__call__') else curr.cmdline) + if cmdline == curr_cmdline: + return True + + # Process not found + return False + except psutil.NoSuchProcess: + # If the process does not exist, then it's definitely not the crawler + return False + except psutil.AccessDenied: + # If we don't have permissions to see that process details, then it can + # not be this process. + return False + + +class NullHandler(logging.Handler): + + def emit(self, record): + pass + + +# try to determine this host's IP address + +def get_host_ipaddr(): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + s.connect(('www.ibm.com', 9)) + return s.getsockname()[0] + except socket.error: + return socket.gethostname() + finally: + del s + + +def get_host_ip4_addresses(): + ip_list = [] + for interface in interfaces(): + if AF_INET in ifaddresses(interface): + for link in ifaddresses(interface)[AF_INET]: + ip_list.append(link['addr']) + return ip_list + + +# Find the mountpoint of a given path + +def find_mount_point(path): + path = os.path.abspath(path) + while not os.path.ismount(path): + path = os.path.dirname(path) + return path + + +def join_abs_paths(root, appended_root): + """ Join absolute paths: appended_root is appended after root + """ + if not os.path.isabs(appended_root): + appended_root = '/' + appended_root + return os.path.normpath(os.path.join(root, + os.path.relpath(appended_root, '/'))) + + +def is_process_running(pid): + """ Check For the existence of a unix pid. + """ + try: + pid = int(pid) + except ValueError: + raise TypeError('pid has to be an integer') + + try: + os.kill(pid, 0) + except OSError as exc: + if 'not permitted' in str(exc): + return True + return False + else: + return True + + +def execution_path(filename): + # if filename is an absolute path, os.path.join will return filename + return os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), + filename) + + +def btrfs_list_subvolumes(path): + out = subprocess_run('btrfs subvolume list ' + path) + + for line in out.strip().split('\n'): + submodule = line.split() + if len(submodule) != 9: + raise RuntimeError('Expecting the output of `btrfs subvolume` to' + ' have 9 columns. Received this: %s' % line) + yield submodule + + +def call_with_retries(function, max_retries=10, + exception_types=(Exception), + _args=(), _kwargs={}): + """ + Call `function` with up to `max_retries` retries. A retry is only + performed if the exception thrown is in `exception_types`. + + :param function: Function to be called. + :param max_retries: Max number of retries. For example if retries is 1, + then a failing function will be called twice before exiting with the + latest exception thrown. + :param exception_types: List of exceptions for which `function` will + be retried. + :param _args: List of args passed to the called function. + :param _kwargs: Key value arguments passed to the called function. + :return: Return value of `function`. + """ + assert max_retries >= 0 + + retries = 0 + last_exc = Exception('Unknown exception') + while retries <= max_retries: + try: + return function(*_args, **_kwargs) + except exception_types as exc: + retries += 1 + wait = 2.0 ** retries * 0.1 + (random.randint(0, 1000) / 1000) + time.sleep(wait) + last_exc = exc + raise last_exc + + +def get_uint_arg(name, default, **kwargs): + """ + Get an unsigned int argument. Return the default value + if no parameter with the given name can be found. + """ + try: + val = int(kwargs.get(name, default)) + if val < 0: + logger.error('Parameter %s must not be negative') + val = default + return val + except: + logger.error('Parameter %s is not an integer' % name) + return default diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/mtgraphite.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/mtgraphite.py new file mode 100644 index 00000000..21277618 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/mtgraphite.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import logging +import socket +import ssl +import struct +import time +import re + +from crawler_exceptions import MTGraphiteInvalidTenant + +# This code is based upon the Kafka producer/client classes + +logger = logging.getLogger('crawlutils') + +DEFAULT_SOCKET_TIMEOUT_SECONDS = 120 + + +class MTGraphiteClient(object): + + """ + xxx + """ + + def __init__( + self, + host_url, + batch_send_every_t=5, + batch_send_every_n=1000, + ): + self.host_url = host_url + + # A MTGraphite URL should look like: + # mtgraphite://:/: + + regex = re.compile( + r'^mtgraphite://([^:/]+):([^:/]+)/([^:/]+):([^:/]+)$') + if not regex.match(host_url): + raise ValueError('The MTGraphite URL is invalid. It should be:' + ' mtgraphite://:/:') + + matches = regex.search(host_url) + self.host = matches.group(1) + self.port = matches.group(2) + self.tenant = matches.group(3) + self.password = matches.group(4) + + # create a connection only when we need it, but keep it alive + + self.conn = None + self.socket = None + self.batch_send_every_n = batch_send_every_n + self.batch_send_every_t = batch_send_every_t + self.msgset = [] + self.next_timeout = time.time() + batch_send_every_t + + # + # Private API # + # + + def _create_identification_message(self, self_identifier): + identification_message = """""" + identification_message += '1I' + identification_message += chr(len(self_identifier)) + identification_message += self_identifier + return identification_message + + def _create_authentication_msg( + self, + tenant, + password, + supertenant=True): + authentication_message = """""" + if supertenant: + authentication_message += '2S' + else: + authentication_message += '2T' + authentication_message += chr(len(tenant)) + authentication_message += tenant + authentication_message += \ + chr(len(password)) + authentication_message += password + return authentication_message + + def _send_and_check_identification_message(self, identification_message): + identification_message_sent = self.conn.write(identification_message) + + if identification_message_sent != len(identification_message): + logger.warning( + 'Identification message not sent properly, returned ' + 'len = %d', identification_message_sent) + return False + else: + return True + + def _send_and_check_authentication_message(self, authentication_message): + authentication_message_sent = self.conn.write(authentication_message) + logger.info( + 'Sent authentication with mtgraphite, returned length = ' + '%d' % authentication_message_sent) + if authentication_message_sent != len(authentication_message): + raise RuntimeError('failed to send tenant/password') + chunk = self.conn.read(6) # Expecting "1A" + code = bytearray(chunk)[:2] + + logger.info('MTGraphite authentication server response of %s' + % code) + if code == '0A': + raise MTGraphiteInvalidTenant('Invalid password') + + def _get_socket(self): + '''Get or create a connection to a broker using host and port''' + if self.conn is not None: + return self.conn + + logger.debug('Creating a new socket with _get_socket()') + while self.conn is None: + try: + self.sequence = 1 # start with 1 as last_ack = 0 + self.socket = socket.socket(socket.AF_INET, + socket.SOCK_STREAM) + self.socket.settimeout(DEFAULT_SOCKET_TIMEOUT_SECONDS) + self.conn = ssl.wrap_socket(self.socket, + cert_reqs=ssl.CERT_NONE) + self.conn.connect((self.host, int(self.port))) + + # We send this identifier message so that the server-side can + # identify this specific crawler in the logs (its behind + # load-balancer so it never sees our source-ip without this). + + self_identifier = str(self.conn.getsockname()[0]) + logger.debug('self_identifier = %s', self_identifier) + identification_message = self._create_identification_message( + self_identifier) + self._send_and_check_identification_message( + identification_message) + + msg = self._create_authentication_msg(self.tenant, + self.password, + supertenant=True) + # We first try with a super tenant account. + try: + self._send_and_check_authentication_message(msg) + except Exception as e: + logger.info("Attempting to log in as tenant") + msg = self._create_authentication_msg(self.tenant, + self.password, + supertenant=False) + self._send_and_check_authentication_message(msg) + return self.conn + + except Exception as e: + logger.exception(e) + if self.conn: + self.conn.close() + self.conn = None + time.sleep(2) # sleep for 2 seconds for now + raise e + + def _write_messages_no_retries(self, msgset): + s = self._get_socket() + messages_string = bytearray('1W') + messages_string.extend(bytearray(struct.pack('!I', + len(msgset)))) + for m in msgset: + if m == msgset[0]: + + # logger.debug the first message + + logger.debug(m.strip()) + messages_string.extend('1M') + messages_string.extend(bytearray(struct.pack('!I', + self.sequence))) + messages_string.extend(bytearray(struct.pack('!I', len(m)))) + messages_string.extend(m) + self.sequence += 1 + len_to_send = len(messages_string) + len_sent = 0 + while len_sent < len_to_send: + t = time.time() * 1000 + logger.debug( + 'About to write to the socket (already sent %d out of %d ' + 'bytes)' % (len_sent, len_to_send)) + written = s.write(buffer(messages_string, len_sent)) + write_time = time.time() * 1000 - t + logger.debug('Written %d bytes to socket in %f ms' + % (written, write_time)) + if written == 0: + raise RuntimeError('socket connection broken') + self.close() + return False + len_sent += written + logger.debug('Waiting for response from mtgraphite server') + chunk = s.read(6) # Expecting "1A"+4byte_num_of_metrics_received + code = bytearray(chunk)[:2] + logger.debug('MTGraphite server response of %s' + % bytearray(chunk).strip()) + if code == '1A': + logger.info('Confirmed write to mtgraphite socket.') + return True + + def _write_messages(self, msgset, max_emit_retries=10): + msg_sent = False + retries = 0 + while not msg_sent and retries <= max_emit_retries: + try: + retries += 1 + self._write_messages_no_retries(msgset) + msg_sent = True + except Exception: + if retries <= max_emit_retries: + + # Wait for (2^retries * 100) milliseconds + + wait_time = 2.0 ** retries * 0.1 + logger.error( + 'Could not connect to the mtgraphite server.Retry in ' + '%f seconds.' % wait_time) + + # The connection will be created again by + # _write_messages_no_retries(). + + self.close() + time.sleep(wait_time) + else: + logger.error('Bail out on sending to mtgraphite server' + ) + raise + + # + # Public API # + # + + def close(self): + if self.conn: + try: + self.conn.close() + except Exception as e: + logger.exception(e) + self.conn = None + + def send_messages(self, messages): + """ + Helper method to send produce requests + @param: *messages, one or more message payloads -- type str + @returns: # of messages sent + raises on error + """ + + # Guarantee that messages is actually a list or tuple (should always be + # true) + + if not isinstance(messages, (list, tuple)): + raise TypeError('messages is not a list or tuple!') + + # Raise TypeError if any message is not encoded as a str + + for m in messages: + if not isinstance(m, str): + raise TypeError('all produce message payloads must be type str' + ) + + logger.debug("""""") + logger.debug('New message:') + logger.debug('len(msgset)=%d, batch_every_n=%d, time=%d, ' + 'next_timeout=%d' % (len(self.msgset), + self.batch_send_every_n, + time.time(), + self.next_timeout)) + if messages: + self.msgset.extend(messages) + if len(self.msgset) >= self.batch_send_every_n or time.time() \ + > self.next_timeout: + self._write_messages(self.msgset) + self.msgset = [] + self.next_timeout = time.time() + self.batch_send_every_t + + return len(messages) + + def construct_message(self, space_id, group_id, metric_type, value, + timestamp=None): + """ + Message constructor. Creates a message that you can then append to a + list and send using send_messages. + + params: + :param string space_id: space id (you can get this via logmet) + :param string group_id: group id to access the metric + :param string metric_type: type of metric (e.g., cpu, memory) + :param int value: value of the metric + :param int timestamp: None by default. If left as None, the current + time is used instead. + + returns: a string that contains the message you want to send. + """ + return '%s.%s.%s %d %d\r\n' % (space_id, group_id, metric_type, + value, timestamp or int(time.time())) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/namespace.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/namespace.py new file mode 100644 index 00000000..7077e186 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/namespace.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import os +import multiprocessing +import Queue +import logging +import sys +import types +import signal +import ctypes +from crawler_exceptions import (CrawlTimeoutError, + CrawlError, + NamespaceFailedSetns) + +logger = logging.getLogger('crawlutils') + +try: + libc = ctypes.CDLL('libc.so.6') +except Exception as e: + logger.warning('Can not crawl containers as there is no libc: %s' % e) + libc = None + + +ALL_NAMESPACES = 'user pid uts ipc net mnt'.split() +IN_PROCESS_TIMEOUT = 30 + + +def get_errno_msg(): + try: + libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int) + errno = libc.__errno_location().contents.value + errno_msg = os.strerror(errno) + return errno_msg + except (OSError, AttributeError): + # Getting an error while trying to get the errorno + return 'unknown error' + + +def get_libc(): + global libc + return libc + + +def get_pid_namespace(pid): + try: + ns = os.stat('/proc/' + str(pid) + '/ns/pid').st_ino + return ns + except OSError: + logger.debug('There is no container with pid=%s running.' + % pid) + return None + + +class ProcessContext: + + def __init__(self, pid, namespaces): + self.namespaces = namespaces + self.pid = pid + self.host_ns_fds = {} + self.container_ns_fds = {} + self.host_cwd = os.getcwd() + open_process_namespaces('self', self.host_ns_fds, + self.namespaces) + open_process_namespaces(self.pid, self.container_ns_fds, + self.namespaces) + + def attach(self): + # Disable logging just to be sure log rotation does not happen in + # the container. + logging.disable(logging.CRITICAL) + attach_to_process_namespaces(self.container_ns_fds, self.namespaces) + + def detach(self): + try: + # Re-attach to the process original namespaces. + attach_to_process_namespaces(self.host_ns_fds, + self.namespaces) + # We are now in host context + os.chdir(self.host_cwd) + close_process_namespaces(self.container_ns_fds, + self.namespaces) + close_process_namespaces(self.host_ns_fds, self.namespaces) + finally: + # Enable logging again + logging.disable(logging.NOTSET) + + +def run_as_another_namespace( + pid, + namespaces, + function, + *args, + **kwargs +): + hack_to_pre_load_modules() + + _args = (pid, namespaces, function) + _kwargs = {'_args': tuple(args), '_kwargs': dict(kwargs)} + return run_as_another_process(_run_as_another_namespace, _args, _kwargs) + + +def run_as_another_process(function, _args=(), _kwargs={}): + try: + queue = multiprocessing.Queue(2 ** 15) + except OSError: + # try again with a smaller queue + queue = multiprocessing.Queue(2 ** 14) + + child_process = multiprocessing.Process( + target=_function_wrapper, + args=(queue, function), + kwargs={'_args': _args, '_kwargs': _kwargs}) + child_process.start() + + child_exception, result = None, None + try: + (result, child_exception) = queue.get(timeout=IN_PROCESS_TIMEOUT) + except Queue.Empty: + child_exception = CrawlTimeoutError() + except Exception as exc: + logger.warn(exc) + + child_process.join(IN_PROCESS_TIMEOUT) + + # The join failed and the process might still be alive + + if child_process.is_alive(): + errmsg = ('Timed out waiting for process %d to exit.' % + child_process.pid) + queue.close() + os.kill(child_process.pid, 9) + logger.error(errmsg) + raise CrawlTimeoutError(errmsg) + + if result is None: + if child_exception: + raise child_exception + raise CrawlError('Unknown crawl error.') + return result + + +def _function_wrapper( + queue, + function, + _args=(), + _kwargs={} +): + """ + Function to be used by run_as_another_process to wrap `function` + and call it with _args and _kwargs. `queue` is used to get the result + and any exception raised. + :param queue: + :param function: + :param _args: + :param _kwargs: + :return: + """ + + # Die if the parent dies + PR_SET_PDEATHSIG = 1 + get_libc().prctl(PR_SET_PDEATHSIG, signal.SIGHUP) + + def signal_handler_sighup(*args): + logger.warning('Crawler parent process died, so exiting... Bye!') + queue.close() + exit(1) + + signal.signal(signal.SIGHUP, signal_handler_sighup) + + try: + result = function(*_args, **_kwargs) + + # if res is a generator (i.e. function uses yield) + + if isinstance(result, types.GeneratorType): + result = list(result) + queue.put((result, None)) + queue.close() + sys.exit(0) + except Exception as e: + queue.put((None, e)) + queue.close() + sys.exit(1) + + +def _run_as_another_namespace( + pid, + namespaces, + function, + _args=(), + _kwargs={} +): + + # os.closerange(1, 1000) + context = ProcessContext(pid, namespaces) + context.attach() + try: + return run_as_another_process(function, _args, _kwargs) + finally: + context.detach() + + +def hack_to_pre_load_modules(): + queue = multiprocessing.Queue() + + def foo(queue): + queue.put('dummy') + pass + + p = multiprocessing.Process(target=foo, args=(queue, )) + p.start() + queue.get() + p.join() + + +def open_process_namespaces(pid, namespace_fd, namespaces): + for ct_ns in namespaces: + ns_path = os.path.join('/proc', pid, 'ns', ct_ns) + # arg 0 means readonly + namespace_fd[ct_ns] = get_libc().open(ns_path, 0) + if namespace_fd[ct_ns] == -1: + errno_msg = get_errno_msg() + error_msg = 'Opening the %s namespace file failed: %s' \ + % (ct_ns, errno_msg) + logger.warning(error_msg) + raise NamespaceFailedSetns(error_msg) + + +def close_process_namespaces(namespace_fd, namespaces): + for ct_ns in namespaces: + r = get_libc().close(namespace_fd[ct_ns]) + if r == -1: + errno_msg = get_errno_msg() + error_msg = ('Could not close the %s ' + 'namespace (fd=%s): %s' % + (ct_ns, namespace_fd[ct_ns], errno_msg)) + logger.warning(error_msg) + + +def attach_to_process_namespaces(namespace_fd, ct_namespaces): + for ct_ns in ct_namespaces: + if hasattr(get_libc(), 'setns'): + r = get_libc().setns(namespace_fd[ct_ns], 0) + else: + # The Linux kernel ABI should be stable enough + __NR_setns = 308 + r = get_libc().syscall(__NR_setns, namespace_fd[ct_ns], 0) + if r == -1: + errno_msg = get_errno_msg() + error_msg = ('Could not attach to the container %s ' + 'namespace (fd=%s): %s' % + (ct_ns, namespace_fd[ct_ns], errno_msg)) + logger.warning(error_msg) + if ct_ns == 'user': + continue + raise NamespaceFailedSetns(error_msg) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/os_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/os_utils.py new file mode 100644 index 00000000..7329b1db --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/os_utils.py @@ -0,0 +1,68 @@ +import logging +import platform +import time + +import psutil + +import utils.misc +from utils import osinfo +from utils.features import OSFeature + +logger = logging.getLogger('crawlutils') + + +def crawl_os(): + feature_key = platform.system().lower() + try: + os_kernel = platform.platform() + except: + os_kernel = 'unknown' + + result = osinfo.get_osinfo(mount_point='/') + if result: + os_distro = result['os'] + os_version = result['version'] + else: + os_distro = 'unknown' + os_version = 'unknown' + + ips = utils.misc.get_host_ip4_addresses() + + #boot_time = psutil.boot_time() + #uptime = int(time.time()) - boot_time + boot_time = 'unknown' + uptime = 'unknown' + feature_attributes = OSFeature( + boot_time, + uptime, + ips, + os_distro, + os_version, + os_kernel, + platform.machine() + ) + + return [(feature_key, feature_attributes, 'os')] + + +def crawl_os_mountpoint(mountpoint='/'): + result = osinfo.get_osinfo(mount_point=mountpoint) + if result: + os_distro = result['os'] + os_version = result['version'] + else: + os_distro = 'unknown' + os_version = 'unknown' + + feature_key = 'linux' + feature_attributes = OSFeature( # boot time unknown for img + # live IP unknown for img + 'unsupported', + 'unsupported', + '0.0.0.0', + os_distro, + os_version, + 'unknown', + 'unknown' + ) + return [(feature_key, feature_attributes, 'os')] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/osinfo.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/osinfo.py new file mode 100644 index 00000000..61754860 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/osinfo.py @@ -0,0 +1,120 @@ +import os +import re + +LSB_RELEASE = 'etc/lsb-release' +OS_RELEASE = 'etc/os-release' +USR_OS_RELEASE = 'usr/lib/os-release' +APT_SOURCES = 'etc/apt/sources.list' +REDHAT_RELEASE = 'etc/redhat-release' +CENTOS_RELEASE = 'etc/centos-release' +SYSTEM_RELEASE = 'etc/system-release' + +REDHAT_RE = re.compile(r'red hat enterprise linux .* release (\d+(\.\d)?).*') +CENTOS_RE = re.compile(r'centos (?:linux )?release (\d+(\.\d)?).*') + + +def _get_file_name(mount_point, filename): + if mount_point: + return os.path.join(mount_point, filename) + return os.path.join('/', filename) + + +def parse_lsb_release(data): + result = {} + for line in data: + if line.startswith('DISTRIB_ID'): + result['os'] = line.strip().split('=')[1].lower() + if line.startswith('DISTRIB_RELEASE'): + result['version'] = line.strip().split('=')[1].lower() + return result + + +def parse_os_release(data): + result = {} + for line in data: + if line.startswith('ID='): + result['os'] = line.strip().split('=')[1].lower().strip('"') + if line.startswith('VERSION_ID'): + result['version'] = line.strip().split('=')[1].lower().strip('"') + return result + + +def parse_redhat_release(data): + result = {} + for line in data: + match = REDHAT_RE.match(line.lower()) + if match: + result['os'] = 'rhel' + result['version'] = match.group(1) + return result + + +def parse_centos_release(data): + result = {} + for line in data: + match = CENTOS_RE.match(line.lower()) + if match: + result['os'] = 'centos' + result['version'] = match.group(1) + return result + + +def parse_redhat_centos_release(data): + for line in data: + if 'centos' in line.lower(): + return parse_centos_release(data) + elif 'red hat' in line.lower(): + return parse_redhat_release(data) + return {} + + +def get_osinfo_from_redhat_centos(mount_point='/'): + + try: + with open(_get_file_name(mount_point, CENTOS_RELEASE), 'r') as lsbp: + return parse_redhat_centos_release(lsbp.readlines()) + except IOError: + try: + with open(_get_file_name(mount_point, + REDHAT_RELEASE), 'r') as lsbp: + return parse_redhat_centos_release(lsbp.readlines()) + except IOError: + try: + with open(_get_file_name(mount_point, + SYSTEM_RELEASE), 'r') as lsbp: + return parse_redhat_centos_release(lsbp.readlines()) + except IOError: + return {} + + +def get_osinfo_from_lsb_release(mount_point='/'): + try: + with open(_get_file_name(mount_point, LSB_RELEASE), 'r') as lsbp: + return parse_lsb_release(lsbp.readlines()) + except IOError: + return {} + + +def get_osinfo_from_os_release(mount_point='/'): + try: + with open(_get_file_name(mount_point, OS_RELEASE), 'r') as lsbp: + return parse_os_release(lsbp.readlines()) + except IOError: + try: + with open(USR_OS_RELEASE, 'r') as lsbp: + return parse_os_release(lsbp.readlines()) + except IOError: + return {} + + +def get_osinfo(mount_point='/'): + + result = get_osinfo_from_lsb_release(mount_point) + if result: + return result + + result = get_osinfo_from_os_release(mount_point) + if result: + return result + + return get_osinfo_from_redhat_centos(mount_point) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py new file mode 100644 index 00000000..12f4ba68 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py @@ -0,0 +1,193 @@ +import logging +import os +import shutil +import tempfile + +from crawler_exceptions import CrawlError, CrawlUnsupportedPackageManager +from utils import osinfo +from utils.features import PackageFeature +from utils.misc import subprocess_run + +logger = logging.getLogger('crawlutils') + + +def get_dpkg_packages( + root_dir='/', + dbpath='var/lib/dpkg', + installed_since=0): + + if os.path.isabs(dbpath): + logger.warning( + 'dbpath: ' + + dbpath + + ' is defined absolute. Ignoring prefix: ' + + root_dir + + '.') + + # Update for a different route. + + dbpath = os.path.join(root_dir, dbpath) + + output = subprocess_run(['dpkg-query', '-W', + '--admindir={0}'.format(dbpath), + '-f=${Package}|${Version}' + '|${Architecture}|${Installed-Size}\n'], + shell=False) + dpkglist = output.strip('\n') + if dpkglist: + for dpkginfo in dpkglist.split('\n'): + (name, version, architecture, size) = dpkginfo.split(r'|') + + # dpkg does not provide any installtime field + # feature_key = '{0}/{1}'.format(name, version) --> + # changed to below per Suriya's request + + feature_key = '{0}'.format(name, version) + yield (feature_key, PackageFeature(None, name, + size, version, + architecture)) + + +def get_rpm_packages( + root_dir='/', + dbpath='var/lib/rpm', + installed_since=0, + reload_needed=False): + + if os.path.isabs(dbpath): + logger.warning( + 'dbpath: ' + + dbpath + + ' is defined absolute. Ignoring prefix: ' + + root_dir + + '.') + + # update for a different route + + dbpath = os.path.join(root_dir, dbpath) + + try: + if reload_needed: + reloaded_db_dir = tempfile.mkdtemp() + _rpm_reload_db(root_dir, dbpath, reloaded_db_dir) + dbpath = reloaded_db_dir + + output = subprocess_run(['rpm', + '--dbpath', + dbpath, + '-qa', + '--queryformat', + '%{installtime}|%{name}|%{version}' + '-%{release}|%{arch}|%{size}\n'], + shell=False, + ignore_failure=True) + # We ignore failures because sometimes rpm returns rc=1 but still + # outputs all the data. + rpmlist = output.strip('\n') + finally: + if reload_needed: + logger.debug('Deleting directory: %s' % (reloaded_db_dir)) + shutil.rmtree(reloaded_db_dir) + + if rpmlist: + for rpminfo in rpmlist.split('\n'): + (installtime, name, version, architecture, size) = \ + rpminfo.split(r'|') + """ + if int(installtime) <= installed_since: --> this + barfs for sth like: 1376416422. Consider try: xxx + except ValueError: pass + """ + + if installtime <= installed_since: + continue + """ + feature_key = '{0}/{1}'.format(name, version) --> + changed to below per Suriya's request + """ + + feature_key = '{0}'.format(name, version) + yield (feature_key, + PackageFeature(installtime, + name, size, version, architecture)) + + +def _rpm_reload_db( + root_dir='/', + dbpath='var/lib/rpm', + reloaded_db_dir='/tmp/'): + """ + Dumps and reloads the rpm database. + + Returns the path to the new rpm database, or raises RuntimeError if the + dump and load commands failed. + """ + + try: + dump_dir = tempfile.mkdtemp() + + subprocess_run(['/usr/bin/db_dump', + os.path.join(dbpath, 'Packages'), + '-f', + os.path.join(dump_dir, 'Packages')], + shell=False) + subprocess_run(['/usr/bin/db_load', + '-f', + os.path.join(dump_dir, 'Packages'), + os.path.join(reloaded_db_dir, 'Packages')], + shell=False) + finally: + logger.debug('Deleting directory: %s' % (dump_dir)) + shutil.rmtree(dump_dir) + + return reloaded_db_dir + + +def crawl_packages( + dbpath=None, + root_dir='/', + installed_since=0, + reload_needed=True): + + # package attributes: ["installed", "name", "size", "version"] + + logger.debug('Crawling Packages') + + pkg_manager = _get_package_manager(root_dir) + + try: + if pkg_manager == 'dpkg': + dbpath = dbpath or 'var/lib/dpkg' + for (key, feature) in get_dpkg_packages( + root_dir, dbpath, installed_since): + yield (key, feature, 'package') + elif pkg_manager == 'rpm': + dbpath = dbpath or 'var/lib/rpm' + for (key, feature) in get_rpm_packages( + root_dir, dbpath, installed_since, reload_needed): + yield (key, feature, 'package') + else: + logger.warning('Unsupported package manager for Linux distro') + except Exception as e: + logger.error('Error crawling packages', + exc_info=True) + raise CrawlError(e) + + +def _get_package_manager(root_dir): + result = osinfo.get_osinfo(mount_point=root_dir) + if result: + os_distro = result['os'] + else: + raise CrawlUnsupportedPackageManager() + + pkg_manager = None + if os_distro in ['ubuntu', 'debian']: + pkg_manager = 'dpkg' + elif os_distro in ['redhat', 'red hat', 'rhel', 'fedora', 'centos']: + pkg_manager = 'rpm' + elif os.path.exists(os.path.join(root_dir, 'var/lib/dpkg')): + pkg_manager = 'dpkg' + elif os.path.exists(os.path.join(root_dir, 'var/lib/rpm')): + pkg_manager = 'rpm' + return pkg_manager diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/process_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/process_utils.py new file mode 100644 index 00000000..492970bc --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/process_utils.py @@ -0,0 +1,136 @@ + +import fcntl +import os +import signal +import struct +import subprocess + +_SC_OPEN_MAX = 4 + + +# Flake 8's complexity 10 limit requires odd code changes; so skip +# its QA here +# flake8: noqa + + +def _close_fds(keep_fds, max_close_fd=None): + """ + Have a process close all file descriptors except for stderr, stdout, + and stdin and those ones in the keep_fds list + The maximum file descriptor to close can be provided to avoid long + delays; this max_fd value depends on the program being used and could + be a low number if the program does not have many file descriptors + """ + maxfd = os.sysconf(_SC_OPEN_MAX) + if max_close_fd: + maxfd = min(maxfd, max_close_fd) + + for fd in range(3, maxfd): + if fd in keep_fds: + continue + try: + os.close(fd) + except: + pass + + +def start_child1(params, pass_fds, null_fds, ign_sigs, setsid=False, + max_close_fd=None, **kwargs): + errcode = 0 + try: + process = subprocess.Popen(params, **kwargs) + pid = process.pid + except OSError as err: + errcode = err.errno + stdout, stderr = process.communicate() + assert process.returncode == 0 + return pid, errcode + +def start_child(params, pass_fds, null_fds, ign_sigs, setsid=False, + max_close_fd=None, **kwargs): + """ + Start a child process without leaking file descriptors of the + current process. We pass a list of file descriptors to the + child process and close all other ones. We redirect a list of + null_fds (typically stderr, stdout, stdin) to /dev/null. + + This function is a wrapper for subprocess.Popen(). + + @params: start the process with the given parameters. + @pass_fds: a list of file descriptors to pass to the child process + close all file descriptors not in this list starting + at file descriptor '3'. + @null_fds: a list of file descriptors to redirect to /dev/null; + a typical list here would be 0, 1, and 2 for + stdin, stdout, and stderr + @ign_sigs: a list of signals to ignore + @set_sid: whether to call os.setsid() + @max_close_fd: max. number of file descriptors to close; + can be a low number in case program doesn't + typically have many open file descriptors; + @**kwargs: kwargs to pass to subprocess.Popen() + + This function returns the process ID of the process that + was started and an error code. In case of success the process + ID is a positive number, -1 otherwise. The error code indicates + the errno returned from subprocess.Popen() + + """ + rfd, wfd = os.pipe() + + try: + pid = os.fork() + except OSError as err: + os.close(rfd) + os.close(wfd) + return -1, err.errno + + if pid == 0: + # child + os.close(rfd) + flags = fcntl.fcntl(wfd, fcntl.F_GETFD) + fcntl.fcntl(wfd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) + + if len(null_fds): + nullfd = os.open('/dev/null', os.O_RDWR) + for fd in null_fds: + os.dup2(nullfd, fd) + os.close(nullfd) + + keep_fds = pass_fds + keep_fds.extend(null_fds) + keep_fds.append(wfd) + + _close_fds(keep_fds, max_close_fd=max_close_fd) + + for ign_sig in ign_sigs: + signal.signal(ign_sig, signal.SIG_IGN) + if setsid: + os.setsid() + + errcode = 0 + pid = -1 + + try: + process = subprocess.Popen(params, **kwargs) + pid = process.pid + except OSError as err: + errcode = err.errno + + data = struct.pack('ii', pid, errcode) + os.write(wfd, data) + + os._exit(0) + else: + os.close(wfd) + + try: + message = os.read(rfd, 8) + pid, errcode, = struct.unpack('ii', message) + except: + pid = -1 + os.close(rfd) + # wait for child process to _exit() + os.waitpid(-1, 0) + + return pid, errcode diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/socket_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/socket_utils.py new file mode 100644 index 00000000..0a1ee5e4 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/socket_utils.py @@ -0,0 +1,34 @@ +import ctypes.util +import errno +import os +import socket + +libc = ctypes.CDLL(ctypes.util.find_library('c')) + + +def if_indextoname(ifindex): + libc.if_indextoname.argtypes = [ctypes.c_uint32, ctypes.c_char_p] + libc.if_indextoname.restype = ctypes.c_char_p + + ifname = ctypes.create_string_buffer(16) + ifname = libc.if_indextoname(ifindex, ifname) + if not ifname: + err = errno.ENXIO + raise OSError(err, os.strerror(err)) + return ifname + + +def open_udp_port(bindaddr, min, max): + """ + Try to open a UDP listening port in the given range + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + for port in range(min, max + 1): + try: + sock.bind((bindaddr, port)) + return sock, port + except: + pass + + sock.close() + return None, None diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/timeout_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/timeout_utils.py new file mode 100644 index 00000000..ec6b4d3f --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/timeout_utils.py @@ -0,0 +1,21 @@ +import signal + + +class TimeoutError(Exception): + pass + + +class Timeout: + def __init__(self, seconds=0.1, error_message='Timeout'): + self.seconds = seconds + self.error_message = error_message + + def handle_timeout(self, signum, frame): + raise TimeoutError(self.error_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self.handle_timeout) + signal.setitimer(signal.ITIMER_REAL, self.seconds) + + def __exit__(self, type, value, traceback): + signal.alarm(0) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/virtual_machine.py b/crawler/utils/plugincont/plugincont_img/crawler/virtual_machine.py new file mode 100644 index 00000000..2ff40824 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/virtual_machine.py @@ -0,0 +1,75 @@ +import psutil + + +def get_virtual_machines(user_list=[], host_namespace=''): + """ + Returns the list of Virtual Machines running in the system. + + XXX: Only QEMU VMs are supported at the moment, this includes + kvm and non-kvm VMs. + + :param user_list: a list of VM descriptor strings 'name,kernel,distro,arch' + :return: A list of VirtualMachine objects + """ + if user_list is []: + raise NotImplementedError( + 'Discovery of virtual machines is not supported') + + vms = [] + for vm_desc in user_list: + try: + name, kernel, distro, arch = vm_desc.split(',') + vms.append(QemuVirtualMachine(name, kernel, distro, arch, + host_namespace=host_namespace)) + except (ValueError, KeyError): + continue + return vms + + +class VirtualMachine(): + + def __init__(self, name, kernel, distro, arch, host_namespace=''): + self.name = name + self.namespace = host_namespace + '/' + name + self.kernel = kernel + self.distro = distro + self.arch = arch + self.pid = 0 + + def get_vm_desc(self): + """ + Returns a list of strings, which all identify a VM + + XXX: make this a dictionary + + :return: a VM descriptor to be passed to the VM crawl plugins and used + to identify the VM. + """ + return str(self.pid), self.kernel, self.distro, self.arch + + def get_metadata_dict(self): + return {'namespace': self.namespace, + 'name': self.name, + 'emit_shortname': self.name} + + +class QemuVirtualMachine(VirtualMachine): + + def __init__(self, name, kernel, distro, arch, host_namespace='', + pid=None): + VirtualMachine.__init__(self, name, kernel, distro, arch, + host_namespace=host_namespace) + + if pid is None: + # Find the pid of the QEMU process running virtual machine `name` + self.pid = None + for proc in psutil.process_iter(): + if 'qemu' in proc.name(): + line = proc.cmdline() + if name == line[line.index('-name') + 1]: + self.pid = proc.pid + + if self.pid is None: + raise ValueError('no VM with vm_name: %s' % name) + else: + self.pid = pid diff --git a/crawler/utils/plugincont/plugincont_img/crawler/vms_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/vms_crawler.py new file mode 100644 index 00000000..fc2e12e1 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/vms_crawler.py @@ -0,0 +1,83 @@ +import plugins_manager +from base_crawler import BaseCrawler, BaseFrame +from virtual_machine import get_virtual_machines + + +class VirtualMachineFrame(BaseFrame): + + def __init__(self, feature_types, vm): + BaseFrame.__init__(self, feature_types) + self.metadata.update(vm.get_metadata_dict()) + self.metadata['system_type'] = 'vm' + + +class VirtualMachinesCrawler(BaseCrawler): + + def __init__(self, + features=['os', 'cpu'], + user_list=[], + host_namespace='', + plugin_places=['plugins'], + options={}): + + BaseCrawler.__init__( + self, + features=features, + plugin_places=plugin_places, + options=options) + self.vms_list = [] + plugins_manager.reload_vm_crawl_plugins( + features, plugin_places, options) + self.plugins = plugins_manager.get_vm_crawl_plugins(features) + self.host_namespace = host_namespace + self.user_list = user_list + + def update_vms_list(self): + """ + Updates the self.vms_list. + + :return: None + """ + self.vms_list = get_virtual_machines( + user_list=self.user_list, + host_namespace=self.host_namespace) + + def crawl_vm(self, vm, ignore_plugin_exception=True): + """ + Crawls a specific vm and returns a Frame for it. + + :param vm: a VirtualMachine object + :param ignore_plugin_exception: just ignore exceptions on a plugin + :return: a Frame object. The returned frame can have 0 features and + still have metadata. This can occur if there were no plugins, or all + the plugins raised an exception (and ignore_plugin_exception was True). + """ + frame = VirtualMachineFrame(self.features, vm) + for (plugin_obj, plugin_args) in self.plugins: + try: + frame.add_features(plugin_obj.crawl(vm_desc=vm.get_vm_desc(), + **plugin_args)) + except Exception as exc: + if not ignore_plugin_exception: + raise exc + return frame + + def crawl_vms(self, ignore_plugin_exception=True): + """ + Crawl all vms stored in self.vms_list + + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a list generator of Frame objects + """ + for vm in self.vms_list: + yield self.crawl_vm(vm, ignore_plugin_exception) + + def crawl(self, ignore_plugin_exception=True): + """ + Crawl all vms running in the system. + + :param ignore_plugin_exception: just ignore exceptions in a plugin + :return: a list generator of Frame objects + """ + self.update_vms_list() + return self.crawl_vms(ignore_plugin_exception) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/worker.py b/crawler/utils/plugincont/plugincont_img/crawler/worker.py new file mode 100644 index 00000000..f4d0aa7b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/worker.py @@ -0,0 +1,87 @@ +import time + + +class Worker: + + def __init__(self, + emitters=None, + frequency=-1, + crawler=None): + """ + Store and check the types of the arguments. + + :param emitters: EmittersManager that holds the list of Emitters. + If it is None, then no emit is done. + :param frequency: Sleep seconds between iterations + """ + self.iter_count = 0 + self.frequency = frequency + self.next_iteration_time = None + self.emitters = emitters + self.crawler = crawler + + def iterate(self, timeout=0): + """ + Function called at each iteration. + + Side effects: increments iter_count + + :param timeout: seconds to wait for polling crawls. If 0, then + just use the regular crawl() method and do not poll. + :return: None + """ + + # Start by polling new systems created within `timeout` seconds + end_time = time.time() + timeout + while timeout > 0: + # If polling is not implemented, this is a sleep(timeout) + frame = self.crawler.polling_crawl(timeout) + if frame and self.emitters: + self.emitters.emit(frame, snapshot_num=self.iter_count) + timeout = end_time - time.time() + # just used for output purposes + self.iter_count += 1 + + # Crawl all systems now + for frame in self.crawler.crawl(): + if self.emitters is not None: + self.emitters.emit(frame, snapshot_num=self.iter_count) + + # just used for output purposes + self.iter_count += 1 + + def _get_next_iteration_time(self, snapshot_time): + """ + Returns the number of seconds to sleep before the next iteration. + + :param snapshot_time: Start timestamp of the current iteration. + :return: Seconds to sleep as a float. + """ + if self.frequency == 0: + return 0 + + if self.next_iteration_time is None: + self.next_iteration_time = snapshot_time + self.frequency + else: + self.next_iteration_time += self.frequency + + while self.next_iteration_time + self.frequency < time.time(): + self.next_iteration_time += self.frequency + + time_to_sleep = self.next_iteration_time - time.time() + return time_to_sleep + + def run(self): + """ + Main crawler loop. Each iteration is one crawl and a sleep. + + :return: None + """ + time_to_sleep = 0 + while True: + snapshot_time = time.time() + self.iterate(time_to_sleep) + # Frequency < 0 means only one run. + if self.frequency < 0: + break + time_to_sleep = self._get_next_iteration_time(snapshot_time) diff --git a/crawler/utils/plugincont/plugincont_img/dependencies/python-conntrackprobe_0.2.1-1_all.deb b/crawler/utils/plugincont/plugincont_img/dependencies/python-conntrackprobe_0.2.1-1_all.deb new file mode 100644 index 0000000000000000000000000000000000000000..1cbf3427f79c095526e18557d4e20c2a34a7bf31 GIT binary patch literal 11952 zcmaiaQ;aT55al>8{jEpGrn`Pe?uRd znVu2a$j;W;(axIQ*}#$B%=7>6&&9;_fBgxfAOZgqz#uoNHdeSCh+}s(FI3B<$>5y` zn-VuGY;swx!R!?S^Bgu@&y1TWyIORK^b;jFZgsb_@C^+(!iG2~-})LgtUQ_)uL4D^ zSv#B_`88KNeD?3XR!NDdq;->x<|2Mur!TN;OznevkB-$&@6|Fc!8e*zI(u$OvMUgI#Mieimny~lCp*z-WCeWHdIl` zuAuV^V?fnZb>M{W+0V!dK=c0uPc_YVk~PIkk<5}y(vPyB$gsaMrt|wiYd107?B#!E z{8GTUx|F>Ld{N&W|B@g7cH2az|JntG>0KvwX%R!P-&M0(JzGg>nyA(Nis z(56MPG|VKZy{#xkk>#=*0@x3W;wAJRfwYl z)}Y1Dt`P#U$W1h<7lQ6K;;f(Al5GGo49;Cc+=bOWe0V!y(0y)0zV%%V+RK3`Ed&qx-Dm^=j)b6 zEql`q8Lc4Jkh9tKo|{?Uh-yuKV0Y*xWB6etvouBu5556;2|pA^-SAz0ZZCc1Q&S&x zQg|-%$J0`yVWV)e{Mlq{tDX#Q)DcPP`+411aVLC;(E-LUHNFl21Ew3- z2PmZ2 z^~6Ue94;_xclDpw$MXePsQSGc6!>POg_Y0yP0dTx!5<)Kyu16nlgOS+8~xTz1YXdK z+N+G{1#$m?C{^og{e~;lNDHH<=3|k+Z~Udau`a9kxBOsg&20f5SyjlSUgH)Abjc5h zQOviYp2!ZJ?)ai(>-j;T z$4oI~iZ#T^WGbDNA8QGBuLk!u?JtPiPjutYXN395ERAZz|)`gIFpt9Q(%0Ur2TTuf`&T;#f^wCwV{U0p2pgK z*V!XPBMK1wNTGq^b)t*KNyDOL?6$+Bbuxv3g(Y?8O$N{g`c-KGie*ysSUP~4F@@tc z%+){rrK795<0_s7m;zEwGzj!-(b?}_{CHk82Zs1ySR-ohAV96Kg}jT1?iQN%;Ie8m z{ZJ8VHr?oy5I0d!+2#gXy_WLO9j-;KqfBQs*#R8PWc#!6ak`KQ`*#nBK$Ha|Akpw7 ztDdvFzpM7-wRR+09IGJzajKX0#>cJYY=+ByD6xTe)3V}D|Jf_S@*4+zi&BU@AfiL{ z>$;mb6_&U^VQFCuk;{mYz4wW%=`w1dkOYKYyk?tXsIpB@|HMpJrF{ZOpefgIF8cbr2 zxt+B$Yw${GMuKKuBPcRke@j@hC2@v3=#fFBL#2b*ZnYJJV@B|TFqj?1dF@6Vh5_&v zQBdX^WYyzkCjp_9v9+?BSCI5^c6pB*5W{|Nn^e`L4PGr)w=E5{cbnTGfB}&kU9RuJ z@^7A7eAzKvz}kM%^P8Nla%(ZX$Plceq~* ztSwcd1BEvT(YG!8rzG&ety#C2mpJSX%)jJ5saybX5)iX$8>P)X*9?#1sc-{%+!RTu z$+mn?kV19u_3MJJP_CY?MY;$8pXFbGzFsgFoZC8W$H8`$tXZU`k_1 zL4jh*@_K%c*19=#h^qaHn4MjQjqw6F*dOyfx7GHXu+%jL%}-Da=m*ROUDyqMHNe<;5GocF135 z!89q}t@N;ePfFLAW%$ueTBxi*&;=;3-qv*`Q%{8mfbnBRvzK?vWp^c!2T+AFvYBkf zRC8OK^`=(5DYw7*;_RCzq09yp?)j1)i5>nSfH~UM0K=864897?ya?p#Ifhpv5b+o@ zQ@CZ^VB+y>ihjc&t~HRnsu}(BxysbGb@c@YJXJ z`preUqvPX(Wd{xy7|`0LMwnd7K+jI~nZlj?#Bry<%NUDy2+(gJ%*sPXfr2L4QRte5 zZk~hIkhqpFZ4xQn_fPrS!8oU5kWe9x&Rv+u+Dj=auPnHK3W+{60*OSDMgNM;DW(CV z886Uxo5_qg?|L&S4Vw|nL(&{}X{Wcrzo?t>`2LBWH5k8Nlp_{HIrjoWbMPHT;!QGT z27{-eU;{%M&inYAC_Bqia0=%|yDpFD-pl(Y^7q>Apb4M;>iG=l{U#*_%dm|&D_@;4 z%74n7dFd0Y6-?nVPhIT;zN5+^*+XLA&)z>k|BeAZt=$;46dUw~q$iMd$f|#y1}U2p zLQbHIo*D6WHL!+?Y~-Wyi+&uU`L>1tEhdf`^wW37d9KB245i@Hf+MO)zqE;n?~%OTB)X%4xurA zQjUM$0AjL82mLC;31fsTOuVW2zxDLul_>UA84t z8db3)ABLo{@=daB0rmPQ8K+Tn9Bop08WK2D*qC5|BFPhDdf!}1_kMBxsR_?>&r57Y zACQ9dRu@x~8BL>s+ATPiOFd_udJ!V?NCKcTA^)G^f>cP+bKY=OK?pTFF{8--N3snBW9! zuY1F32OYB(j=0x@VE!t)g72))uyGz{q)TcHJpD7URy~yXkc4=hp_~m<5ZD=|`Fr?w zYMnm3RWB=MlQ96r+zCo--z?HGy~ur%N!e+uY5X@hw@$z#co1Z|xZB_5+O)}XSBO1l7&Ye`|7 zG-%@Lm|GxOxj8WK9(&=2%OI3(@iZ~|M8Zpw^1+^@pUC`CQpfkrAMqndY_8_C&?#)` zh=*&GZkrklI)n{TDy-nsgi95i+#Xv@&G^v&&Enn2Y-vQM?($ed>=^Muf7Rz@sF~@miO@VlQK)^5AmxP}*`L?XXd!swWh_ZL{E-5BQdvEx%zIYnga>WVxmpsTF5IEPyln}__ z1DwA@IgT#zOKDZz%{oc2w!%6Z6@3&prg{WD8Z&AD0yuh4@ta0=N%h+(--+{EUp_Dy zdi!gZv?2MheaB9}DuLlEJ=JOdbfaSizyf=WI84lze_e}xqtH$Rwhgs;+=o=e=DZ@2 ztrT9^*q_A4;D9L7GL$G4w7m5K(ySd!)naBt(DY81z=$ES|qEr8E(< zqA=GbrkxQxBdGov?(VutZPb4$F9%01 zwrccpD|wY*`tPTm;lR`q3SLVpDS%)biwuYoZ;o0QTcE_2=4FaYO7pnv`-N?Q0$7_h z=5=7KKpcVT+-#vob@A^=lL|P~H`TE4&<1=s%6Bw}c2%6Le59h=cW!Uy1CLla{;8L9 zx*+EXa;riC%M_+!)uYkO5wF$=?{jwA>zh5;g|3oBos=S8Y(I}9w?Wg|X704@*i5{u znctJ<)yf34kA6*T|1|iA*OjnWLRDJ-WClaEfh1)BuiRvpLoyVC#$Ak#p32{*zr6Vm zTv}SoaFg&MlM~$fsS$#L5f>zQ(FmAi6|3hV4QbSz9OR`w&%VPkI%IJQy{ zbO9eVhZXzF%Cz#5>@a$Phd$yJNZBc@Z4JkX&%Z!Gl1y;}_sD2mYEWbx`h1*TA{Pfm zmoh<@#jU}@LjyA&cfxM>N^J&N>yYF$E4*2yDjo1a+pCMTdvRPFp*(GWHrAW*)`U1$ z&#isLEd zEsY@`5aL0*(ej8qYYqZulQoGQ#*v-N0&9|~ty{^_Am(WQ2EkrF__}@XS>6x%Dmp1R#E>cl!mH7u&85>K%TN!XORQdE< zSSw)ZgfH!N>+Mam^<9+8TRmgM;)7w(3gm&?F>=o4aCAB5+tpo!6G2)p1g#fBt8KtX zktuPBR%^t%&178o*ie#Q$Rn7HGjAW|76mGd$XJ50lut^M|EfT|WAFVq=Pr&@-}S@U zvriHtHqI}xDfO3NG{_p;Q}+d`HF4J^pH(pb0>Q~iALkNR!sMQM31I&TU%^D}g8+Ld z(XFI;M&FkoCb!Iz9AV|KxUgm9WInQ)d^ONT-qeUCu*0?W?7#ZGa)=wu<4Zp9cncs( zBd@d53>ZDUnh7BLeX{D2QZyZ@M)$T!3fgcq$+YHUIwsG>wb_(B(9bteugq`-P4HUw z-PP3}8|kPjQD+GN^AiU`c1pl%o+}R^QdAO`;^ycwhQn^z15i8I`2Rx{IVHCAy5C+xg7Yy#JCb%rHjZS%_CUy2Cf0d=; z?c>(;4(em@0CaN(a)|os^P{z)q{OeFqgz?+0t*!u==0}K*4_%v|GfRtAp*|^1QO1G z5c_{{U{GHs*vaS&iG6oVmlv48;7&A?u36yateL1>V0jt|?(FgJhg2x&QaVr7qfZ5M z3daE$$Bnp>##$;rD$VF?VD1scJoyt{!?BGZRy6Idi!O9y=|+B!-{yfP94QP(@6LP| z9VJ)~opv6V{X~=ehfBaINO#Irv(TBd+>Q>R%D!7}r&_x|qzpRVHTNXAi_G;n3S2^cN9Y0G8!vwqXqXa9YTU0EM{A?%NPZ${^7=2sI(hpUO!tS8J z%VsTlxJokVCGzco+7v2Axq0BxQhIc`jLLXUlabmv=`hduwNYhevHcyJL zaR)SdUP(mq5$z{`MWPvOLCoR6$Lg?eS!KN8i*j=GaW`|nD#c%0n%7}c;9MBk3!*!M zy4ez+EhO^iIe!dlI2*qZ(DUF5GHx0En$?f2TZd!NB;h5(Z}T8a`XARZ{?@9jCI;SP z5vf&f2lQ|+P^-pFAo|Vo$F09xp3P$59YR3(h_iYx?^`cCN0yKmv8vGjpFdW;mN6p(c?%9teh(Xs5UAvN`wt7FVr z2p%s&IyC^h{=o2(Mc89v9~EZGerux`}3ZDsvfPgyD2pxBa)>^gCqOvR)R)f*+eqqp=A*0WMnSE z;1g6^gPgy<=AxO>9sEUaCgIS5KX-fEF-gJ_Pb}ku=2dL<;DDIusQwoQmP!Ej#zQ1g zOrxBjSh1@ZbqzFF*pkum>N}|X71hu+Sp-J?YJ^_;RMi{yFkeB!1b=?YgVx{Q?bc=7nMx8MS5eb9u*?1+?98@rNT%v^ zphkb0srDV)6upPNcQQk|4rh}E;*f41!|*?F@Ux=*2@8MZzTqHwPIiP-@?(x_iO;&% za@8AF7Lm`KFs{lh1VH zZbw>A^@X3pNjV)q&(aBb_Sr6yv$40AN8yP(p$v-TpehsJsGNlHgjqI?VMQcZwnu-N z!5X=q!H9K>b#=7FJJ0Cb`&VI-tf%Bxg z&hxb^Cu!F&!SEB%-~16)sXI`mQY7OIm?{1Cpfd-P4`;h@R$BcChtV%C5zLfLCB-5SotC6OwMrO8)H0&d8x; zdW*dIQHrl5laJ|w?=d_hWI*W~XK6-$F2scK+Nrm0^x^5&n6Rpu)(uU0y!gR0Och^( zF)A9|iBInJpwIu3Bab+GIgeo4tf7AW8qBg@Cyen=UoN-Fw#B|*lkIB!2}(?Q)!RQt zYTQ0A(Hu1NfGO$Lws(KVflsM6O;%xFT7;-U`If)U?N$Z-x75`*O7>Nhx0RBOXHAcA5*K)d~;SY0%SH68ypd&2AsUo7c2--z!Cl_>Q z{a4^lv&3WkQ5fbK?W^ymNV`Nbn!2&Cg-^8epuWfxa;s;;pz_mMzT5zY0uIezBMz4h zv7QG{rxrC~IkKGB7TEy;jeS9P6w%#t*30a(#;G2ppC!@2Su-_Edw!~^TLkP{oAxoZ(3Am^Kv@}J!WUz@orSnA67D0}lja}EYIGTH zX2+Z0B%J(AHKk9hMp=kuN2L&~WtJ zQB66Hx9IPTnumFy)07&uK%RX{0$vB9c`WUr4B*J_>DhUr98-4NM~$@|!UJxeZ(c;i z@lWkDh`mNx$pC#jgW6488kbZ?XMuY^CMH~fwjG0dYN2G8QcEjKL@S_yml90!U7s+X zO@~o7jqwlzHx5%9=~%fj_K{={7BJ+w>%3+pXLiOyL+(B;2!}ilM7XPMh=OJZpj!bANv09U2+1p@y!x4EUTVP4T%dGQcf$<%N*fK^0^c*mJ z;|)N>b~)h4Qt6S(&Qxz?LKb4Xps9_i<0u|F>h3GOLtac`%|>4CR`1;qFS83#-_6?w zTCqg)j)e>z5hlhmFI%BLxRycUL07{V?FqVCB&wVt)qUxFlh(Ot;>C8@_;V#Yptr_8 zA%0L*B0USKRUinumc?m5d1`{v(xPJ6o(mZw^EKi~`o%|I3{OD4_Vc{Bhpb`*#)_k2v}a0lE#E@4>@I;PPY2dr%f#qEY0 zu#@`-z7C`Y*jK6w9GHG`uGn#Z9?MT zV2sa14s`{hGomU7QNe14>I`l$CM`bR?KIn~zQUA86Qz;n%JPMHv2Rd&Tct(paNl#3t9K;}@Uu(vw#_i$C711lSl1zOQgA}u2piLYPnp*M+3+|Tt zB%WN^uVc9R5}r+82DIh_$NtkFwNj_v4}a{L9HCUruh6oo*+5>oEm5^ri}>#x^v%cL zQFILHrb7rWKxIpQ3Ui4-Iw~_SGl+Bf6{gu?H0xLv+E8AZR59Bo2aq8DLj1k^N;!PUdgpB;N z(k6-?O^IUA_%9O-4y?2x5xx)9Y&gR`eh~l&Mf1>l=VRVncFh+bk{TmvjCM(s@+u|X zffN%D(l}EC6+YB5ur9jb9_NkYQ+Kp3b&qC@g&~q$(RmFc9O!+nElSWRLzmi=R!C6b zr?B3L&FVg?+HO0mv)ao4V5!t%Jp+s@gcXjdqH)f;eV}S1E3|u7ixq%&!l-)IXA@0w z%>Jg-$ozqf=~uYknTh_^6~PkeRTYwnS9cSu0Jp3QGFJ~|K(^DjXJduuw;nZJC@Wh( z7h|>!ycc?QfhX}QsE(_h5RfPi-3K4P162@6>=Gc&dBv1bYJP1aekWaM(gAGQakjJ9 zJ`kps-P2xoTb&Bzanl^GUU)$; zdU*ACM!#DX=UcuZme=e*GhMI0uEo4ZQLOYjjgMKjHEh&1x%Q(EFb zq14em226H<nukbDvI+B|d}pNtS+FK4`+#Eq zO*T0jhsTy%MhxGFLnmg*0S2p&J1mq8r^hKeN|NP`W#$vNi$056@P!>J#~W1;d_UJh zxxEg}KZh;E+kE`4u1`7#WBGEnI(3{!vCh4iZsEcGyFa}3CxtZzE#96lb#<*J#MyB# zO)wg9An(({?qBsSBf>v3rGh+s&mm`!ZsRVnK~M2qjBi)DrWr2*%k>;-$N$R5Pb&fi zkSiubH~lv@U?qnyS7XUQRYJd|NBp3M$@H09c=Y_CFGWnd)1#&blEg=xn3o04{Lu(w z1Y(g%G@S`oXT%(oFU+=Ha&yRpM&KPE!TB9Vil?$ka(+^QIm8w>tt8_!tuG$?1Tiu2 zQRN=ytd&{n61qS(uLI25Y&ipgvsK`?#s~J_7lrL32_MrFA9GbsP8_n~z@(_y<~U%O zkm;DA!2N*%8Rz8J&0>*VM{O3Sb$Fmw`H2MER_IRjQD@R?qCNhVCfO`uS1#*?;IsGfARWmbv21Z=!L{L}XMez3$fDt<=!i3>C1h z^ia^n6@Mbn1^FzS+u8)a=8v|>6N-3h?tz;%EPiYN71>+LA;oxm)!ZUX^SK4^F9hkB&~Gb{2V5*5=<<; zNHF;FoCFna1AT1?Q7+i2_{{a;`MSdv1Wf>BU{ZEAMb4kRqBp#RtIu_`EDIYuV*5@# zAUcN^l2U&eLTb3m?_wH(Mr0(vJ`8X%JUw7THFgaz$9c9!eV-`B~5MVLcMNy6(C zi2)6h`Mq@IR2*;e!yvzx6%O(uCo{?z8Ks$gMiz-rwvrP60FerQQiqFCW=z17b+T2Z z#%y+k>uZ4cgMloOrf?6+h&McA>$g`hR22Y|`rrzTFYE812B$TCASDpe6}v_0mydV^ z{o2;*Xz3O&sos68V4^Ul94pK{quCsp;#s~a*n2=^T^KwBn?w^Bmq5v0hIn|XxfPME zX1uj_^_6y-CiIAkVRUQkO9HGM2po_m|Ht|x@*x}#ILr&d0IU5DgehnSD8Hf-kQ9ol zg0;fqywL@Msvre05={!{DTN$Y_f`A8&EWF$>nLZjz&0E<7I49FSTeaii`nGw17~X{~ zZ0}3xf^2qL{_6ZV#XLMtFOB1C?bE+-c{$?*ufBoevIfOz;6a-1L zgRtkenRJ`V!;2>GXum(e#b=5=SWVa}gUF2rT3RE-Kaw!RI8^gpA+r1zQDy)s`og)y zcr!7ys~!l4>}?5@Ps-Y!z`}u$3QO_+><3F*Ng=j`Fl;_tkbNE&g-eKdK0%U1=#f5D zp4r%+3ovMh%AFgeO&n&Ew9_+569nOTf(HLmyNLG=*VRv<{8?5 zT3qXt3i&kBQp@NM8PYP>yTuH!%fQCgvmsGt0`vmpO=M65G;e-t3)uGJkaLo^S77i02 zub$ue5B${#a9M|3Q%2F{c6!p0B41d1AO{{#|FZD6Y8cI zmo~E0KdXAGV|db@-D`o%0^mA^lHsvNliOZ-0c$9XZN_QR7k-FSxSd56p*)GOW=qNO zUTj8?k%mMj?iga%vybkvW{06RoSKW-<@^22_1zen!atVRAq%ZEnzQwpy@bMcAmRmu zmVdtgIKoEbcmQ(BhfHY$$qXifplNAKQEN72Ua^Q4w<~dZvUFoRqCjG7B;F_#Q9dAi%>%VCCYBaCdR!L0Gu+*!ceUzLfBKN<$AEu> zjkBNP3Nx;ZG^ zFVg=g)D_g0M+bQwFProCIpe{zVYVh>e4Q+pgW%|keL-$pHT*&4#?KX;TB>}xi&zz7 z$`MCs1599JwCNOcDG^HL?#YQH9`I*$zUgz=;=e7Wq! zE>2#jyK=|JhgU{A$l^W+ZU;>&jhdQQ<>NB;RT+j;F^e%!5Y8#BZ$pk}u@sH=!*3S6 z)*@9^gC4?e>Et}vFi?+`mlMxbu(+K8&d?U(?2t&v)(lgb_!Bs%qcKp^R7ui8)T>xm z2jwXWQ8Dtghqmm!x3*`xfg-$eBtv@pB(nowC-%i|K6C{e{dTys`Pv-+pr}j9|F8@M zcg#P#0Md4?;ga}9=6-E6QF2MzYZXp04Cx)F#r)V4QdaqFcLX6Uqf%!Ty@Qyitka47Pi;zz&iENQ2Ei=pz~8#fuH>tF1ZJ^$0MA3Zr0nUh|)r3}zhDcmD)7)UXst4}x<0zP8LiE9OGq!+JoiY^N~vT)z% z7G=sCrL*&e@VILG?d5~;S2&uXVr^2+W|@$8R`waz<$K3~_Pr4GkG|RTnP6!-p3yod zZ^0aH@<5NnSH!~uYc5ity@Lp>BWi|&f^!uz2B=l`5r(6&2QK3!Jy)7V<7Fwu@cpIV z&EU#IL*?VM>ILE6C>-yr>1md^^TWKpur z|A}?HE4ofF#~R={006T?aR&pnUlB5qs_Yhm50Ut$C&kE=jt?LB`y=$_@w-5qn8Jp8SwPxQM1!!~lW zLKWNr3~fS24=A?=+@x6}tj?KGQkFH2Y~SmOI+u#Tfl!$`4yM~m65!kQ8s5c6-0Qvy zDLLx-AEtzMj6HRA)ah5heG3DDe_o#ufhE~=ipttwxGKL?d`!B(CdhtCEt4y(%1U}= zr7*;kq9DGBcC^9#zC4juAk^T75u|?NDRGq0?K=T65p`@KZ)lj0szUF!_s1PX`g~Rh zeRp}zC%*)^7V|onD}4UDUHO>v7|TRLabq4)H78VT&~38V+Ank0SzzGU!U=733sN=s zK{=5Ko+Y0T_+Zt(FA^5aweKra&lW!Ea0?f`kZ;Q{|D}*^n4_)|$xocoyG6 z`sH{8nA-D+_c1To{ViU|7ODM|yp*4a8VDgZB!8X^mNpx~Ugno-jTeJY>EoRVhmi$I z7b8|2d6tgVik5^44g>i62x{A_#%a4evopz-1w7Sx-^Vy|7?OHx8kwcyz@EDTtO9CPyk~om75<_a^&lDBKGof-jMaSyp@9m^M1s%8}e?QPD zl|$jBgmO-((GNwsknEubkZD5g><2^7!H%x=s)n6doY}|0n8YFLVn&U-NvjvZL4`YS z#+cA`(Y*T|0kdkMcv~kY9K_~*w7J50a`kQ1oHzEnoM@g*45}rC>+A);xQ<|PiZb)Q z&)*gX?UgdQ!$-Zlk|PMN6XwDDSOVAm#>&Uim6O5NI?jBMx9FO`2-ADG@GIyCXS(m7 z{kEgY874mY*ev!er<$iPnV*P~IXSXf*W{rZO#FBc9UJxmT1X`F_`SH720D?3Eti>pQG`Eoo}Lf+oZt<0rCY$S78~{u#{SzDSKH5# z`VlW&=yU~V#SZB;HD9MwyLN~I_6UEx{AG+2x1~P+COS#;D1s-WFOTwc}$^73;YH z^Z?^gvSF2&w=i=ux1Gwbbx(LtUKsTNlsU7o;2Xd1uE5I*!HAE`rZkdyT)*pW>GRJ#wxj6+?-2XP`rDwIYp6)Rw2S2 z2^P!5qzsfeV}(=Bujh$#s;8!w-0?(>q$xJa>UnFeniTt649;)-Qpw+T~(EplFn0YR?T3aK&o(VJKeBO2QwaRSxwXR#-Gv&g3>pAP0HesnVLwb`+ZHH*-yx@Z<^bxe|I$5FimwF)bgi4xDj>eS|B zf?UiQ!q$w7hp_@Um4N}w$yw-nRIBi!9t%n)={gh}5< zOHy985DV;C;RMk~3IxR6HWY!My zE#H^#<4&a_#y>Uq8GVO~;k>H?T2mfHnKb1GbHZcU1CX|T@^Vp0+yPq)IN<5DjD3qt z`dr}HcbPYQ3niNdgZ%9E<0A1ADz7gBsK_)G-DcYSwSBkn^jS+&8ecrU47}#RaRm|d zpQ7iJfa%iq%C&&)(1|&>MlqW8Bu0fTnm>g$-{1>7bH%1QvP*E!OWTxFxKqjWC$$_h z9oD27^Jg8CR>$p{9SjoF-tB1-nc$B!s?Ad5RQ(guw{PE1*WFs5nucX0cQeC2XOtu zE#ECX3EVEop67x^c&4qXax0~v7(Fy0F*w(OMdk6$J0+%Ok8aK9zD3}9%V=t(Snb^0 zdvrgW!@N$jhpHpdq+cYe5NG1EfoNWEOosV)0@^YR%x?NG zoNAN9>(wt-7Qdk5*ee1zmKc#F2H2``&*+^-5-fNZC9?rupcRnXPwwJwEOTe?am=D% zjmIat(6x2*FC|jAP+2;&doeZ@ZbaT4X)r{1p(4Nev_Aoe+?_j;9Q#5`qLoGZ$Cj4Le_rJN3bu8hUNFJmTr^^} z4(F#(eZ8aZwMS-_DkZVE=AxFJ#CSi|6)g(uo_(wQ2jo~s7hPxg?gU@SaN6DPWSh!) zt11lNK1`z4TSb5P^XH@0<&!5=#5j|YRoJ@D+yQeH5hHGQAKCDwxeIW_nq&^NJ7!rq#5hqmt;G)$}!Z8DPq;)96@ZGpFQiVB1XSIDOJcCHJyexl?k$l z?n&xRGutJCmTiR)8v+Wb1M}uSj_^{U5;WA{$Eom-LfD7e2bgG>Zr26Iqhr?L)hyqz zUYlb(6aE{9wU(ENSVkKiarb=IWZ0vQc&^a-B*(i~`5#)c;WJs}BNpQ|LR2R%T&t5* z&JuPfZTu8r`OLR9y_M=gZ84F+oZRwJr#=mOx<-v%zRe1}+l53)|A@R3)xAx|2FlUo z*q`MC>=`Ol(=!$y^QY7?jKq484cccY*xI4LT&cn|v!V)qneyC?JPzJM+Mn$-xI<`U z6%^Ujh9_<(jeU;%a~2zZsJxCIrDU=mn*--vFNanTjxuVOXBSkwoaj)C`t3b-i;}co zC0PlU>-z>-WckD_)#T|z(M*i@D3^rUM!iBp-t^;J(_)@iK_Lg6`%ExEV_o>(8bD z$-b)ZenfhKyPC0VC>>eL`1~Q#?8*e%F4mSO+>`>U;O_(GZPaB8ygYPfA)W21jE9> z1Y_#x;O63J$KYn{!eHt1|F36eVdeZk4UAw2W1)clH$Xsdw6+|!Igp-hpU`V^BO%6R zif>(Vo2>!G-cqP+!iF)$h2sY?4P-E6=DwkRev!V?tqRwLLiS^|5L#B5I0Ok&Guhm@ z5|C>S%sSa;SG1cWS0B9c)_SAjIm$6d=GbSDWP+B zc6OaUGB#c}K2|i#{RaG?1(HLO<hQZh9YBK?Q_vFf@k;jeGp4IRGMDL)rmVg&h_|!A5 zg1$g(;YhMfv0f|VVPBKzpKO9ZkA)QU+a996hHryHpNliF_BWyQ+O-cPv>`)rGQ<$E zRv6IusF+fm^|rh?LFK$CF_Msp2j17eHc+aZe8+P!=fMwZaJbj zZ;Cp_Lq;s|sbigCl{!G(ry^n(5T6HllxcTD~@{XqVu zl;(Ti{icA!}sELRD6BDimmX(vCB>?qh|2TNJ?j?MK6__}-@ zY6H3FFzrjHs4SqBg#jAdD^X+~8>_Uvx%+c7#~6oBbm4%lG>p5|#>=Tv)Ec|#Qgcp8 zEUf{C3H`bpbVW;3cV4QSNVTQ?GRjICQ_a%i+h|Fge3sH0=4EKtFl_vFaXPIGR9y8$ z>4RhGYHFo#L0aEt#MVPN#IMW^Ngs+WfoR4jq|w!YbiDRLZ*cZ{aS>H*G8XC>nR4j> z@|bm_X|uP!4Cr}6^J`ydn zfYK!bzvZiKUv6A$&PjSm%nh1WLfL>T)UUvLuttUo(V|h0aj%q_5L1w3O_tj`6QMhZ zLMF0o1>4Yb3*L*}-y1`Rv6UNQygp>owHmq4&&tg@kXK&z{2S=f7aRT`DtoZrFDb7d@e)a#oMX+sl zjjS#@=@ti~0s?|DGj=onA3F5~ytR0tAQ{ z7~?;7bYnazIfVR8AJ+%Mgzd)j2Tt$Dml5zrADpHr(Pm+@6B;oo+#6nH;)uozK?2oQ zmasS)_krXbs&!?Ss#gY@dRu#E6{xe4&jqKC- z<7z?i!JG%j%Kw(Qd}4plrHU6=v8+(fce3DXV${ps7O6V^`pE4F}lQ4rR_in{m0`|Idb6@Oq{eAQ1< z6%BjdWaQsp9bB3x(7;!-!MBHnRndy{mTq!0Idl zf-Cp;oIDTM#uO9HkxxoDb-bU!D7uMIRPqOc8Nt!Zl2BFP~| z4NOLBtxZvlxbKYO4vJn)h*w8n4r6N_xzw?mU?^Hp;6pDWqJTD(^sl?v{nlUTIfg*7 zycD@veM3+L^d7^I61kusBTDyDu29P=CV_bLGkXa<4wO+LB_zE2BKv|nPLwp&uSh-m z!$`JHv;|5;RCOHvxwV`eite8n+AxJ}{<8Gm{YS?9RCm>nx1%N}ackw!{ucv}ylG_# zBrj^hOI|ml!4F1fx^sN6#Ns! zrKt^|dU_G!-sYy&U4%E(|5bA9Mpf77iCO? zcqAzO>_YP?NS+@9S{8eWgmlOIqrsf`J&^77{*)&UG-M;~aT-R}38jx^5^+J!;Y^iTmm4h>>_D z-Hr2?Ue^7_3>cr%K8cV!96#ggxZeZx^r1n?j=AV1v80WMQ{vnVz*{W(373XiL5n)> zZyM9#JE3e8y9^ADm%qk`n{!d4We8D{nA?=Wt)-%nDB_b)#2G(VwtaW>T z%&0eXt>`yH&iTe(Up6?Afh^N6;rux}s}`ZTFn2ePRwrC3WI0n84R0GQ)T}MPRryDS zdZOeAlS4!z?&O0SLWZ&B3KDs8wG}w4isDmavmA zC>UZ`{->p#>1Uh>XkjbV&AxR;Q3{Bx%+9k&VH%EFDs^nF0dI83?6PxKd=sEvDQw&y zF#CFA9qDB{E)O~%zW7@9NJ}R-d~kBssCm!fQcczn0H4R>Z&_0XZ*Ah@%|$-rTj00y zJlQyql0IX?XpZG=&cccCR62FOo^;1ZsjX3{g9U>K;pGHe4Trc`j{BCz>5WvXPgPUO(;fbVwYGLP5i=;3y+x^XQk(_z5+O5VoYM9u~Dq@G3mbGaX2jcuL zL$KxXab2S*(Zwg{CG5Gha>#O4n|C$*{LcwhgP&#g24NMAp3*cGa% z?7y$Cyc_A2*l6_l(_QIDZs0m-z9z#?mRv1$C8RqCn%ro>GTQAob5~|Rx`)(}w8Sv4 zrsy#CTy$3hH=bfp(^qk_5=Ft|24FloSN5We$K3Kg^n5-H<(Jym2ed-+M4(OVP9nQ(D@au=gBvn9Se)(Nr;lL6Wm@34|2Gv61>12sng#2dc5XOp z%y9Xk%-L*@fatrp>PJW($-==P>AHl|?*f~w{E}&nb#qM%%`~vAMR=^q zoJAO237u{f&~nsp6tL-O!uu;C$S?1IHJ7dmx$YqRQ+bKZs8j@uA1XXvKyv*}M_g5P~vdmr1MEOIx%tS+n!NW^An5|I*g zDy|gOzf39`vA}H_GZ?UnW+Kt|17s)6H-0cP-mk4EqO33Nr`JX-qE+>J0Zi|$lHUN z>+`x*ZqN@IM1e>sR2+V2=Mn>pZ}4S4>vd{%aSuBMX|P9&AkgF1@Pk%jq?Ge7K+n7S zd@JR7C!nOmD%00Be0o#<7s}=iJk#$M-eDXb<0;@YOUI6@{aB|r&1{46F}a& zkD=)0q`q7c_~O%`G>Igb5EG@KGoR*Q*y0{_!kTwhikoLhm(@~;GmcW4isx1PJjpZd z%PV;dWbR)AyMnqJka1}_0*a3%HDH!N+khv$*)wH~d2#mxTgrvAj?}*^T znOD9a4f;K2*ZRv+YQ`(6Q2b}K`@7_tsL>QGDx}7IBQaxBEl3k+JY>cfqwOXt1;7%) z7`HRnlieUPvB!NH5!-}l(Kn*j+n$gd`}_+x9YbRdh{@DLO#6=s_Hd!sD6Fl%O;t6t}D^jR)9t&ER1h1kLzZi<|vZ2qtxwf!H zIghnbcu0r{pf>qsq%<_6Nznh$4f9*?{>;QG0P+KMIpKG-7iHFGY8G{e3vHoTBJdy_ z)c@}}4!G6qkHc(FWY>Oye7l0#J|7L+@Dr@MMU?9%n($4uO-Kg8y`Z*pArjQ*&bhTi zEZ6s(NY8tfNg5N2X+TvrJ{u#GK`6f%+Gii|^2L`(U-{5DBhM{D;Q_F!jqjGU99}gOC_NX=+2#~hQuKpzs~%DxQ_Gmm%)QnA3G4^N z8|cGj(11PmAWk3LB#(~tHSP%$ZwAbofC%!j?|uXj;WUIcwmlBy2mnqUXETig8Lns0 zAa1(6d_BT;N~Smk!@(E6YQ_io@1OASE66i8sAn+^=blqth?5W$P>B$dEE|A`3Ux~-@+7cpm6w^GzyK$VbtkX_fw_F-eu zn8A&*z6#S{XPQeq>~`jwz}N3q zmG$4GeMi(5>D@sS14O#q<{hMRwtdH#5e*p`LZkRJupweHG==7->wByVZ%FbJ6e4XB z78aA$hNjKixC$aA7bCK|m>f+an=6=+l`(uS@Z^w4&}t{bePp5e22Z=tB?;+at>*qu z7Yio*gGS#GcZ7iH;hbONK5?LrtzqB~v3q?>{NhV~NlDSCUlnjOzgDlFwzZq*H}Pm| z@s&8E6(_1aXc94G9lnzk60p2MT?==ko+|he6K<+8)^vi;R&G=2-^~@Cu1)M+U^br3 zk1;szhEfI?45VIBGDlB1i|ZG=D9cbc1`HAH2Nj}dR@5GmfDR=zaYoK&H!;yS7j<5} z1hLr!S3omr)&rU|kGLo-Hjyn~f(eV#sg%@p-@IbjTcbny5BC>~(J7~y<48yYxE^Y5 zQmw{uR^K|BRZ>EU(Qid|$EbO}b}?@Qf^XuCo(NG;&#@yLjO3>X!VvR*lWH_^x<|LQ zIvo#Qx9rlt;CalJ{voEL0P*t^3|#)Z8Iz6d)KTtf=_ zq8x07#I(^Sf(%;_M#dRTo<%O3e?0X(iZq>_wSD?QJs%C}*~*&1`2~we*t*%^>$;x* zM2hmAZGRbNC{%5WksLe%TQgQtb5K?41v7CI=qXj(FCiw$vXzK8l?WF8r1;MUB3q$+ zz(4&w-}!a^Ad%9)4=@{jdR6RJ3|v`>hub&DMmU_fQ|?(vZzM&-4kHR_LPWh`CzQDH=7eSJ3-%GOHo zZg8}|Ct?be3WSzheFqbiWFrmB7v?&caNxnmKZyuy8#GBzYa;O<6NGYt!$CSx)Jv6;8Zrx(#ffRarNkkxdgRYXZGTer~inmV@OCfUSl|MzNdt%O?jZiVc3 z9W*x~XO5~9eS#fBO1-Q{Sa4#OMfpSmjja25((ky%07^&Ci(-eg6ckD)IJ0u1FU#A8 zG`Vm1yj8rqF0kXpoJ4b}ThBy%l{yV~B|ry5i4gT40nM zr?!X{%RD7G(?|URczzXvNWJee^8is}q8L$D#z3@^ZyL2#JFlR4&z zMkDJ!dngZr6Gg=*2P*sYX^Js!xRKi5wf=jbFvO<}?URLz&V^>X1r4kMzN0`yA3GX9|5@^gZqM+cO>;XecLhXzObFp@p zwp4N9h>2LM)wi7BW#?^5=4h{0SYS7apKalBbmpKpZXD1fK5Gm8@+TDfTJ35v2^O++ zn3=&;7oJ2(roEqqALBW~X#3_FP?omwgN&uKl~H;u^M&RRMMxi$d!iyxEU(D=dk&-| zio!4Q8`f0i;aE|)9U8KYYgNV829hkn8ecfS-oq3PqYr$6^t{;d-6*Nujs}c$qe^-Q zv2_jw8eBw80@9o!$4oId5ZmHHd1fc-(>_S|f5Vuhs4T+zJ^1cx7Ddcj$d=lcw?NGR z<{E6vR-rkC-l1l#1m1|Gj*vWQ*ZHGb(8fwaGBu1sIK-Q&n4w=d4SkSwiEaGk6j>DD`2XD*+5!y8PRocBJDep z|1%DA-o}3X0GkAo0Zn6)erhk9T{${rc<1m{L$C-C{!<&Lo~L_gGT?E(eyf127)jq50$ zy$^bAvKXAo{wKLO+l7r&$3%3n6IXwY=Qx}}Nno`BFPpQUS->XZzMa z=h+kJ4U?1fT)MlH>)H>8$l0${J=KyAQey4TMia~gp|iUD(mp>%Y@cQzeF-!nv<$qA zU!ExXRmaYs00j2w=-a^{l1&GQ;r6HLykPzA=+>I{t#8L~S+hO-r@cG~PZ4`ZYO zccsTMvl#k*T_j&+KhV6InRy8VlfM7aDFNRYcL zS!lIY+D}u>xUF5uLlsDFCz|L-3U*8kGbA;hU0ETi1dXkfJ4-hF@BTd@%P2MQmom{G zVv(nPhgU(R1uS8B;c>BQvW@j{5xhUxdS?|^Y*DPF1!pX&88=gNq5@}o;t8Hv_^S_` zZJ_~5q$&BbjMS-@1ZQ)Z(s!lenS8FgDrD8;d^T!m#U=Qy(UGBTlH z0)`DuwqFtt?(=e&V;qY^#Fu-!FqVXG;hM1^?z*a?m%1DIF&v3nN$U+={I+eMh5 zNOFEGQnd!0$`__o)<>u_^U>x?=s$_%@@o8lWz>g08xfPLB^bg{=_Xn|MBtZGrI%-33;c48-)#akAbe>4Qaxgv52vHBI% z6b!D1Z4wU-&*+JTO2=9&_B5d)9UaCwN6X!m)1p@LiH}%OJq!)L6r7J)mJv z7W7Pa0O9k+e`3D$2dwF~{SJ7##0wziSCrMK;T5akXnm5u-d^0#U6#2s_(;z(Ts+$r z)6%wwqsqq>fIl?W==BP9bq6WiKTV5U;#L^Rrw)l68`PM{xTGi0TaLe5QD)$gV6@8{WRGQ)CCVPb7i_!Rzf9bBGWev-|C zJWfCdC(UP@jJwXdvxGTY&sqv#BTr5HS!WbqEIW7YHu1pjr04&Ej0&6bUWql61 zY8)$(YeWK$RrIDC6rg(&U~}FvsO*o9D(ZADY_Y3!0(N!t@qcI_-cU0UOHWhPkWZrS z)Z{JNa`XfoBdG@1fqamXt(0S6HO$a_9)1M{tFvcrp+@85OmcaH(T_%Wv(jf_+RDvi zz3-)o3JX1-kylT%^whqEkgOgPT`MY?IBqL>pa=wFp*L3XEgfcm>&I?#bl_oW6R`-NF!Ntcw$=y12?`s{dF|{u?z@Ha?%ZAf;W! zlQ!*M#2#@*_;25ND@&lOQv_$&Vo#7Y`x^uFeA=gf zW>kCeSy2#`-Suya9hB!v07V~~`Bc>=wXtGnt}X@@inN6J*SoS!vNR{3$!*(ux>#CB zP(u~Nspbqr?@j9;?a;(@Ia{D$76yRqOMST=koJMCN=5=bYg)~>exo;>3&+u!rr__T zN;!(P&#*ph&L;lTt@f0FqF`3zR8j?^9wc}NDAPSSnRHo)ZJBI{LMPj$yitwlQYZEM zwhm`(0(!zvjk~rd4wACnUp^6s1J0K{)uvDvD=1Hmxm0Yb%!|rsjdg$_`FJ29nkpUo zE585a z*$2)yrqsfHY(vD8P@$O&xtg!#4?V3|=stL|cp-T?Va zgV~IWoe6jCa<*N4{e!g!X_5q+x@y1jN@&aTfp!pMoIpL`loMF4t>Gx*B0$;4U|*C% zLOtb%_{^5Fbun>jA~!BQ-hvglKC&k9Trd*j;?jQHY1f#7ya0$T{wo&i7CpC1rB1 z`k9@>Q?l%U&oGROLX3kK&6q%>$nNp}$pw2Wj{vUvy8b>4VD?mg6g5Vjowm!n&NouKAyJ9_A2adeU>bn7r%Z<{E zuw+>LM8R4_yywzn>NO3PCP)tV%x|G|g~n>U7h=E%P}S{Z>y^yXSw#bv;2#u~8kAK} z3W;Mm#q!Wv@Lg(J3^2(AIgZy(Mf8NTeT=8OVo~UY)j9XYIT1WK}5aQXd+G%gc<-41^postSa9yMU9$h9zzYOgoX@&RG`wL(-7@2uqDR0 z@lPvO7n@`FlVzdd@eefhEnv_wYvS)M#pp_ptiwVz@g%hAAy~Ka}Pjps|L|;n|Q}MMP>?7!8F!EyAm&qW$9iQ zL5}t;Khh`Fw|)K6SFCAyP%k$qF4gVDc!rHs&UD+mpMoCd7;klbY;N%<}Z0QInYO`;WN@^3N@R}Q@xYk=Et>Tf9*=)it}p;%tx z?(I9v+l|=~!lV@n__~7u25xj*z)!ak+EkEu&1iyTrV?4VP?M$zdsArnOtTe(zTS3= z1*??cE3ZK{WY`Yb0MO|TWx33fI#Am&TFoo)+@-5I@FlUeR#IC5pLsv=NI6FguZGH6 zVn-bMD|XllgJMJk?_i!z(J+RSYsWh_pvrMrO=fJ(GmYzQ%WW`F4d!as?C((Fndv@W zJ$7q;#^!b+za?srOm|i>8SymP6OFFks70(M(vWrMg=F8GU4E5uI}<-<{ktJaS9*SC ziZ0O))V5s}c?~X>AqP`ZlEr6-ILUCnsGBPg{?juMP+x!}^3UF6SGWWpS)$g7%>Q>5ONBkszvJ;IxE9 zl{1WajVqvNxtDP<-r!n}E@SgrxrN>x7Hz&ocJ(-j!zRP;=zI`1l}m?wOYV?Q6cbDg z8(4H|oA{ILP?C^Lzt@5C8ro^skpF6HcPC@#gFi*b8hNGY`sQ~=@|(YC_jk942n&6*_|zFoujOMgHkwr#8k zGw3PY2n00|@Q6gmi`vbCBrXJY>ct31J~L?tawr{sf47{dw?}a!yYgcOW!`qg<#4 z9n3@K+`lu52yD@Mr~^IP&23)f{p3=vazm4+P1O3J4At7 zvg~|T`ZZO}iNhcgTil1Gc;H@|jQ(My$9yt9D8a}n*wgg|=$|{!%BSR2I&k$u?md!7 z(aB<$es2l4{94hyVWXs8wq8N)we3_tN+$d-g_HubGjf@L>R3t%;E9Rv_=?CyiO}ji z@SxQHPCE>+Nf%ojKW1LVf8CVh`M*?**19yvw%TU;i0tchI>R;xgtX7a&sr2SavZX= z1eTY32uK2VZMYm}rxlFf6hK+^_nh7#CYOKXtBtcS1qM}nENyoV^WO$p5ZlC#;k_jm zb6>!F-55$rvHvWMS-{3E8`6)@a!QO$xWj;?@g0}bjXx_9i~co`(j16A-;~rGU=U}9 zvV++%;c2mRYw*m7f+E~`G3c)++w7*G^NW(@y~Dl9cQPPN33}f>KO<-d)Eb-jhdp^w zvF&_KrTUZ<=u`70WcqFBmh1_?VlEX4Lz}g30RiE!hfj33k_iAQkiVen;EK^7#?p1m zm-Y0W3G@xCkk6-1Q@J=cEB`ZYgp;kGh+3po3zRPi-hAs8#{z=HzdZ8Dlfc^4EfRi# z%ukt?hKc-fa;+9R;ZxH!W?T<>_Fz+5;9XHjtE~}F)+`Va z7S4bnr~_DPzEo0BUwfe%~Pll*`579719&*Lb96pflbXSrK-u1w!{ zbCuHOQcu8V>H#N)pmDyRaswx1DZ~x)J5+r|jO_UBpH&Pur@l|w5rf|g&G@(71~3eg0WwlMUIEbSthCgWa%e14y6qR5wIi#RZ}s}s$M zH@S{VK^8E($tHS^5pT z(P6_+gm}p_0H6&m1~H zpV~wcA4!ni>M@!koKu8f|Arhqhd*FY9;eHl5 z*94_MK>jKY=rv=b`EsCYgU6fGi=VWg6I$8_wIlg^FZ;I6*;4J3j9C9*D;I`=h^~|A zCm0ptdHW7-Ul%}C!A^A0$j%j=WpB#gMSRNw?VBMHH*Y{|>t^oBS>ouAGfpt~xBL6nI*> znvq$X2+ODWZubV%y*j+bCa#yFzT{?A>z|1W1MF{PiubN0Fp}K$D@1LB6$2j{3?+! zrr~4!zuxU5x5wL1W~~P8B$`eqY=(hEcbk#stAAegS)*goC||S%$SN$lIhL>ga5$bm zd~_Y{7m~+*2VcU+uhM^RDW&R~n+-%EN9(&*8`>n;8`{1J2z~J5=7(JB z9EnmB;tG-hn^6{f)}_uhV1)$YTkEJ!n~oK6d^FJF5&g+-Js&tmrt&*VIe>l-mPrW8 zjHwgF!h3_$%p=jN=LrB}#KkJl3bf+$66i+qT6G(^{sMXTzn39xH`F9Ipe&OyX1z67 zwB>WJ{7kIWuNIf3mtoK=?c$N-&Nz|r;fA*`xW(Mo_%7kVs9@u|*n}Z$#GE0RlNpgq zT0bijJ*&dEDME^dt=qe$c_a2e(Qwx*F}dx628Qa%t^cHR*?8ykA7`qF-7V+KXVHI5|k<-m2s*g(cXTvJtwjP#|E;`eyuV-bQt>Lp5NCdR*$sR67 zZgfOg^p#J3Ha_p~daQXV6eZ8O%a>W(qCyj3Y%vxxGtbYwmXNR`o?``bc#@eNT~c4{ z6qOJhWT484r2rBc3$fa7V)KGCTw1B(UN zv|J!QL1C&@tiL#-z8L5>Hx5QI-uiupwlA)YWYBsM6T!K9) zkrpe~5c+r-(DM7JV6u>d^2_I7=3lrb@!AI-yrn`?`T6AatsfC@hiW_L;g8zX7`IeL zSRmGd^HKKWR>u`+@P0 zrF%0FL<0|C;LfsMOv^N*n^3_{8^2d-dMlX?P7?*23VpcgOEd^f9VD-;lvau^3PuWL z6^q>=O64GVHHNj6eFI)dFx$f-mbJA1MS@{&dZwb@2jDm8bJn(PmLeAyC4rJm z(sS+-T0*%0lfUAIzpgtdJmfP`SPa^4t(!!ecyo()*}XySU6*Dq%Q0W}7@Rk9HYe)U z{h>pHK41@VwPPJOSi!84j;xWIkb>0sV>*J~M$z&SWkfWTwL_x0^_n~hNRLaP$}N>X zlnbV^Ry|u)mZ`!sp~woWj7nA0-~#Fi;l^;OBLfDJ5A#|$Qp>iSX4`AEl;}Kb*+bn$ zxB)iAJ2#AV6z6{db2L0>JEN8zG?f6Uyfdvyc7TIt>h_cD*stQk)ofv}9 zf?5P)s%J#*OGJ+=Y1g7Nf`!T!*&CyZpUgkwwuO_tQ1a^;?_Db^rx-6|8;sH8xbToX zB}b)>gwtBGNKvhquW$mTINFL3{+?r%rrpa_hcBXm)nnG0L zWol~3KTKt0ASwXSUw(O9f>&m4%K6RtZH%n^G)X&#k~#)C;zcT&P>NLz!s*q%$o@zl zMA~V?;i}9=0Mh%}PJbWE$J0yRXwLDD#(Axj8OSMB5vw2g=R4#f*CM)6f=B=j)|-aA zfMxohWOyysi{+(DCI2^SiO|<%|0apcHb8$0&vxqfy-yTv$ZG9ixWZxOJp!KBP!Wx}P>e)e8ZcP$T;Q@(piYB&tGc3%aweqS7idX7z7{pL#il!tax`-=~E>^FwJXMt~ zik1vU!4NcWU_0?Ea<(2bD|8&42KW&Vc0vdX9C>wJia$?(akdjgfM1&$7)+P}xKP?= z%^>B>yhHCFYodQLP2o_j7c_XGMz4zF8)<_K-h!$HDk)e;vTa5}B&Q%_VpjXsqvR1j zN$A0gEn{ArN<_3!MEkjw_9vrCGpL%l{NB$x_Ah~)G4x{+M`rVqWq76vuDJBmiPi-c z#kA}Y^5gzc=4dVE0Utd1rfgKvSVXl;k4?~17}^!3dK+dCb~XoPVO)L=#Cp8xf6_b? zO!PDjSce*5Gx(kc4RV>RwBrV69TJY0#BA4tL_+E_B3Icg=fg~*h)0chkfshloZZrT z+cMtkaP0cSEt*8DOYQKIG0I58oG2%a8`h?Fdu*I)4C}I?duzVEcBDHwuNlmi$^Ft# z*{UF=1G#g|8VL>M4dDuN5^@ctb>Rt1W0(RZWnkB>D>2WA@(ONb>hE(TzxBVuk|!Ba z4~JY0>}dr8--Apo*EG!UOf&E9W0$J7ZW40fij+yc=~+H|uK5L8KAXbm66aGbXBxt8 z_6zls0Xze?DOGZ6V7bkrZrwX^mG3of>n!>O_IAve6Ju6uNM%0ya_VO0B8DF%0e`iH*3$^?Pj$w30ncl{dclE{KV%(0)HmMKzV`bx zhEn2|AlW7be0t^yvnte0hrOls&|KJ3s^B*P6Y4S9WhW>OYvm%@0%S08w+iNKs)oNi)Ce+s>DbLv;dhAYyii1zh60Ak36ryvc9){c7=fkWFtMq5?UgGu zVZ*V9n~U-7!a*>Vh*{_{D*jgu$%I(eP}eT6Ii;7e<6=;LcedR=X`ZPpoB~+5R(g!2 z0r94M%Zh%-Mv-FW7dc*GZ>70o3m{R0`xo1Zt|eP*Y#%xi>MZg&#gk>NJqgF(({_f7 zBD=ly4VY+Dcfo@p28ZS#r@j<1%_@w3N>GjngVP-ibE|)&q#QPdsm!{laSL0V?5+~z z)2V4}4RpHNM$^K&ArW9&xuMiqx<+judtr6J|B!slcvs)kRN7^C>NBX?=ZMV|H_{i$P87_~J4k)S8Zs1ZxA9#66P!FM(^Rw?fXc~5Mw zqomJa!kgQ=Pu1ehDmQg+1@^#7eI&i&%p>u8II!HI)Ue8#=e4J9*f@RmsD#|h-6dEJ zX(*+w7XiO&?0VoE9(2nRU1tsqih0!v6zRKMzsog{HtL+XrP}XI-WT>0o)yYi*{szC zdP`1JVBJl~J(KNM>8GhuO(Gwz;`8b?&EYY{jL}|uFR8GczAPcp=-#x)UrBN2K=e{m z0BqpO$-X)6ekaS#aq$*n(+HnFjCQcD8MquK|GIKm*lxzdW)WX|0 z_qp>N{V`UN<(Gb?z38|UGZzrFM#28$a7`;j zgV@MD(YgXX>@Y*JKw_-yfry-phjyDvmTLZ?cQLl2sCT7+rnr*A(e(iYC{}ijtoH`< zoynICD;v;BwYGix1syYo6W0ZNQO-G+#tV>zi>anPLkn6^T#lG-OQQf?gYpXwTMej* zy_bqJY&b>LC?IBmIMjUVM#L`C0-=r>ybwe<8M#O~Q#(jHmH>@ePn%W0M-?_4N*>1F z{V%|P+0pB{i4Mvc}zU23PZm;(7r| zr-cC--Qk!h>uN)w|nC*>1tr;MZw!_d)5EAe%O?xMzf%3NQUIl=`#VE zYEyLDCI(3(OeGuOHBFv0-XJO2rFt6xgdTN%s>}%l6!~-V39uKz^?bwC=;U46k=v0H zt3(E*9|}4OFZB)RVBWf-hQ$1lhlR0&Ck`;BpGq67;HGd27ly3E=^p`eJ{(WXD3^~P zx;4+?v(;gvFA1_cgPZLxuukJT3R(xpF@ba!I2W%1{_9U)j45KT_40Q`nc<@KHtT#b z@URDQrSRs53knmeu}MtzrgcW$>BRsi;vvWaG-Jnw!n^@YInUi%jw!PDo*QO)=@g) zj%AF4s=>*upH{ip};aweHHTRf%yb<(Rb-*ua**!VWz zJI72%^T>;is%SLp53P5Lba|tuH#z+b#aa$<?dq|IH^_1B=c;qaHrXnuYCVO`Lp?g~qepFu^J zr(BA0&(U7(uym6n8qYD)79L|zHn2B%bs|Z$&?oAGKO*0VfPJ*^NM5;gDlWgUiLSpf z5YHK6;>wX}R4VTk2g2T z?Dtrg2o;z6H7Td$JSZ-M2_x0(%qa50@x4vC&LhkbIi_jsYTa)g{|hca(Z9z$)bw+q z?*${*ElksT^eiGlRxH7^aT;by*jmy@q!v5ogj*as6)HZrEWP* zquf}FR4Au_SS_|sDtQ`p72w24PYY&c5=5gLN!i8nhaQ+$ldU3^gnjI!Y39ZVPG4M~ zR#C*ZU*D2N9tcGK%&?qJoWB`o5E1lb_)3_2cgyW(Sqi&gaSQ6IvdC{TM6B__K-?A> zFGWJP#&qlM*CnG$%sPajedfAy%>qRZbu;EW1ZlK3glTOJE}RWul7vERtkMphDpqhr?T0^W(s-le~?_<%EQ1xve5E4cz7UqDY}=BJ`Aa@10D z>a8DMOkm7KipDWr^7D$Ml(h{k_VG;+OGUC zRaiMIeK7J;BU~7#`4&k+Jfs?rFZhvlk-c?RKdPM~SoD@cR@y0 zAmd&6L5ro3mbIk=PGsrkAOhwt1Jc_CHn3<^V6Ey-whZ|mUcNx~KW+#82#Q?|>&X`t zJ$nrd)sN8JQ^r-}x47SVx+)5v&0oFwAZO7~^gD8X*240!8%at)6xh9*)>Y>QNPvgQ z(o95PnuU7Gibpl3)MW-QE$5I8%G)&3`{f2&ZxJ!n#X=V+$I2$HlhF>#CQ4~6f1iP- z5+ub8))}T~p)1k?A(#dcsM{jhG?|bozxSueZVR1hAG)qb8%^UtpBqjtj^%51bT07X z`M>`Y$%?zWaxAr~4YR_{1eq}=hdZg;VFu-)qliXXe&6HWXKn-qBYSAs8$b`WhV;K8 z4C->Tz&})Cw0BL_**e;D(iCu zjBDXFq^bl-24|0W&T{%dhx`Y;w(g$+cS3IRTj*r?V?Or%wU<)94WwGYb<;vFK(m$4 zuLRhx@>>|B9jBVl3nJmB!Y^2lIy?(B1o5%) za-};RwIk?b8a`x`x5!nA_U6YRu0BhPx5uSQ{G7$e1-kLl%MUMLu*5bh1 z8*3d`a>@{ACI@wY$~}W{UctcDj_pW0?~fo~Bw3ek_K&liKYpY8GxBE((z{VVRJMR2 zs7-!Rk6Q2`y)6B1h@w-H%0@{Y-|;{B>3Da{EVdiM7QV%$IPLlGiJw+D)mdeLc)PlI zN2#|AIeSBi_`b!dUWhjVYvt)=;38Ief({cGZ8Wd35GC2or1!C!o^?(ej)-ph77O3v zbmm1a{W`g=p&UPZh`C`^wBs2ZHS1;yma zP5aEJUoTL?a~e#j`CJ#DDI%PW^5yH;EYC&dMRog}D#fUkE|ZdWBj4iAK1|tjj0gL+ z6h*kUsdg?+>MxZuzR>szi$P2s*ez?gax*}aaK0EChG3a9KHTLD*5ub({z%0&pIozm zII3(4sgrv({4d_4s|tCOd=BRcv)Nf+jPor&RKjtkbv(n+>=w(sn5(|IzbiH#GB8g#R(e5skDOF z<>6c@w*zWvr4>={Pa3gjXx4#WG7bEBEU^-t{SZg#>~wC6I-7c~)5l;4yZzoBCo#Q- zz%7AEd(=Mhl{;r=bEJ^%)z;E)baL_M=Z?Ezogd1mUfAy{n6A8TXaz8k;R*|Q3El81 zAl<5((Xk{oVLOF(OxR#lLnTNQm%RNk-EQgA$ zyNFKYeqC`;lco4>eVrJpzf-9dvojGJ4&)e#KQ{OI&YKVpBV*BCu_h$nmA9}*3kcU- zNZT{l?xE{l*%x7U8Atyo(ZOXYDrS}h)a4;1c)TZnf|P~qog>yhBzA(d#OLtLp*=pP zm%*Auqi)r^S$W>u9yB$37o_N3pG!+!E|8wub#}*4$-Do$!em`R0+f}TlryH>;2CQ}#0cUS0XuYOR`5ML`~t!z37a4M zL~qq{LUAw<`lB>xLs3ONlLEe?X0xDLH*zm}jnqKwPfewq*bd#~OF47QQwpv9TGc&T zycD&XR$*>s7Ko-S=V!9F@q;!km~U`bUDAvo(csAjRFk7ORW16)eF>i6Sj68kgu*af z%M#l+3~C9QE}|zdx8|}CO5cy0`?^x7qCz{j-C@C|{MY<;GYnCy4A@2Y1~XxByHs0gblvamphugunI-BzM`6(gDsjWbN^PLa-A zGu&U)AYzwGrO5BxAD?ro{MR@ymm&z~IAV}4UU$dU)?zQ4CbPKS=5s*YsT4){N~4bz z>`537eJ*~?Ospaa>X}oWb)h3kr@e01K3$6%zBq5wygqP8NsUqTEnT|?{m{Jnz^)xejOE@`ZsP2HfGz`~)%!aZwOCOuD( zQZhnj*BUD5zXk&xTz@bIe{rvh`xEd6IE&=QW-K7LQk>twcVScbG-*2NoQg`KnR7?& zV}fvcfBna5Svr2LGpq6CKGxYUx@c0ID*k-r!rY~^4a>V3B^*&wBIi7G$KD-wx>M+I<8B|2^8A44qnoTxvF_1JI$>dFd~i1Dr)*+%}M z<7>o?GfVbW&P(=0rPn9Tr4=22P)ig6d8C++=>1o!4Sz1ZV&MIZJ7ad%-&H`ttbLI) zo7uu8{>-Hf?if?3!_F16ny)_1fZBymRV1tncSuvV>QjT|)MC2ISBy$CWs2;YXs}>{ zH;;JcoKr^9WG45UiM*LGm5(50MG;V~C++IKY0I(lq!punZ%a_-E-^Ka6x^c`DG0%X z2A4BP>mQ%-%L{YPDeZRW<)mu&7=>|thQ6z#Bg!AYqn}(`I{r+~c3`k zq&6s&sK=UPr<$dBk*UYrVR4MU(cs`BPAsD=+z#U9D*7|yszi!*a{-1K)cv;4%=VX8 zE2GSCL7vnWAxNrncCEJa;7T!zes#c#^msRU@{HSmbO((Dq!$UK9>kRrT#_hwzk>b4TVgY+ENa`3Kd%;(%0v0~k1vr^0-4#{idQ z^YbTNRPpKMcS^)@3Ll1fH+<E#p6)2W!sw>}0!QLOqV{rFVE1zHtzW^DM3egjUmg0QcuQeCP zGWq#94(1^P7!ji1=i;kwNZeQ+5~&@2BEgz#T?L7iEck>#jJM{RZV)-`VoboGlD$2X z`am_`s(|Dj%&7xqC^A$P&pgk+X+r7p4X9-^zT70%(deId5g*BJVuQSDBN@&zbxiLv zMDVaV*<`6BBeAJ1Cw9vgW`t4OSmvoO4&Jmh#IQ-j65>AD8Hqp^CYbPak~=1ZF&l-* zH1Q`#z?!Mh31d>#3a!uEqkuxlY6ZdcBlvGWYT5vVENIh=oRKUEc?#qw}#o^ z;UJ3ptFxdyGXTrJX7!$%`(~M?VC!zAbXB3s^uT(|V)WmJm9ZTTWm3mI%^!C_N0R2+ zn4n5mmP)u8!%Kd8z>;)2XuQ$|Xsc1y zR1N*Rgpot$ySQqHydVtG{)=xf9stYyjiq3fgS*2HLMSrJkcmfHwf<1%P``Do)gP}G zu;fj(fXm5Pvn5{PsF*?9H{&){r5x{oEhZ_3GW>It4B6tl*+J_+4P15+AnvXad{pbz z{S34up@6QF*cw(^WQ*X6zL(;hX#K&PR~r%W;|CnreEB~=CLxZ6m{?#M3@#Im`xourn?6w4?pUg?of0CqX|gtsxC?0^XB0>` z^En46)}GvMYtFDlzDYJ)2}*_Rkz-+o479)VMT*l>D3muvDOV!u#!Q_X<qzMD0`t^kC7rsh1Nmpeea zgaI#fj;eY{GxjpVu@B-69fRSuLApAhY@0>U{c_ljDMWPqNtEkA)%mEDg!TY7@QMSE zp7*CLk-A5!+fFl*gmDiAC<4*sFSjWpa9=ybBv-B zL$p5KV~{zKHt+V=`8cOkW;EMmnZjjC`$RLNL<05?WS9NYA<7SlzM|j^tSbgI_aNc7 zMZ;vG(I#6w_`+baP}hrF3eHa#ntJ_1re_p;2{UV76_1glpnkP#)x|tz3RGFRgyyk~ z_u(J@&=fO4=hpK7+h+N3YAv{(jNi=NeE&1Lmk%yuDhxxgUY_mSb(765Z+iSZ+2}{m zx1I#5OX>9%6hpjJO6K*xT_U&Ud|6nO?A;i@AT6kS?hk#(OL-GKVJcblneBDLCzK=b zCo-zZ>k5So#8rL*Nx%NW|DQ?O0=-tY#TgjS9r@AerUk+%Q3@5ne>s&vv!?qhAi>fr zIV3SFI5Ao<-F}x1;6_m^R;m-Pi42;t&Ty3*R5gI+#|Q;HN<`r`TmBg?;f#+m_+c&+ z=tCD4#08Y(_H)sAV&HJC@~`LwY;N~rrEcc@y}Ti3w~cct#bHN#$X(Q`%JW7Ur}u-2 zz3+YxD5drG(#)$S6`%n)egQc}9|nvf?OlXy`8{r*7o-Nggl#($qWP6d_$0XxQ{-)5 zM2KjE&U8_kzUZQ?!qd)8cvodb3w7&Wp`t)u1&8k+79r`Vz}?9FYL-gIN}<4-5AyxR zyUTPC&{{gErm2MyN!OKOREt5~cT!O)B|$Z~%3CjcpPW?{#;nCG^ar3`wqF|-n)~c0 ze)((8ZA(4^`*@G(pW%0^wRJTu+9PedXGXM3`U589YI1;|VsAV0d_(5g z4Pu;a48_1UkwWQssNvfI1iO!6$wv`f1n0|Gn-yja4~s-Xy3r-eX9=1HOPPPC%fvxC zgN3conrO5|e?gM|jnB3u26+BlzW1o}dMEleelnlHL@!#0MYS0Ae_psIo_2OQ$$5(lfr`rE$xr^Mp8;DEjcvi{HBP`#Tlp%499<+TOLEorjr`LF zt{IAC3$?pWT^GMfDfqYvVK@0wey^t^u9_pYoGoBgjg_n&G19!qfxT&?2`N^UBfosH z{j5AF9&=1F0jt;iaTAX*wxA^YJ{-uCa_-!g4US=K&BOi$Bng5N+rK!*qkzQz$Za|IxwB4H*@%2zYMQ8g4J~q9MyDh0`;BiQdzQNm*0s; z!c>4JgSMfDK!vzd5rR8%k3JATgOwPSO2ap&kigpYE4TRERphMR#OkY5E+6nT?-26G ziaAw3#9%_Y0SA3T(3iHZ*@ib%#FLRn3pLIC5;#0*P~@JtEj`du$L0q$Hyn=LH9so? zUt-5X^mDH!Zm=OgRn#|87ax2VyReP4cQ`t?#F+E2wmEE=FyrWs0-4^E3gRGnA4Cih zXi9!}T~+|vdKstF1cM{`8^Cz!F-WdVz?LA-z`c(*q&v12J?0l)h^Y-*k<=_ibzRHd zwch1HG3XwaFTl8CO+kR>*8tGTfz!!`y)F!6D*+hYGA017kaq3C_QV!pe?96&nVir7 zhXB`q_4tiGc~RR}^4uj+Y0*w8DQyy_C!=jbjd5mtJowJHSD zawD;mC|W4ajo8U+RuhRyv6ZLVHJgFRmRXx32x&voc3=@B+q(gznxHw{=WCD1rDAnST&o<-+~|G_>ce{gOs6L(E)V0i zJbr&w#Lcu!h+bn6Q(xAJk?sIQf~PH~K;u_VY6TIX^TA;RU+Djj-*VnsI+Qs7#rVrC zx*02p8`W@w!I!P7ikE}aSy3;9h1**m#bd$zpsb#|ZQo{N=ksXp!KVPB19L1W3>vTL z+GSvH^(ejzK4(OaD!`2*Mmt|7R_b2&!|3$B2vWitgcEWyRrvIq3rwQHWNy;#m;bC( z7M*Lj6%PvG;7VG?>XJ%qB`+*d9TH$g6yE=@q$=qk-}4X+8KP1j^a%i7Err3YT$_Od z2!CA1OQEw?GTn|>NQUpTmEgE+InTkP{tZDOGatsZu}*>j7VqsS@zkSVExki6@_xId zmeQ$p(U~gVOwl5P`mMyNnC!!)T}GamQ0N@OF+rZ?f>6{kl+arG%R zkFGjg{EweQ2GARQXubh`Rk%sy4q`WC6NzXFa5)8!t;N}*b$-hc>tQmnYu*gmsbJU- z7uW0J1m^ZtI8e#|w1UKi>1p@uID+!6MbG3>SQ9Ye;u7z1W41YY2~!IncgI2V>jUtK z?>;;{M37aie!lRwX*wL_l;pfl{gt;h7_92Jw=Iq^B{T&bAI;hRYRttgsCee@nyxw- zHc3xF)&FQGz6U~F5?Szx03#}0T1(I@KKJ}+787Eg+ZoL8??|WeY!{QZmpMUPE2z{0 zG=N6I{8jy=3~Tc3|61wQX(&e-H{GAXsocaeBLb(zo#m&YbN<4kaCvVb%pZZ>nz91& zr<%r2blRjEZQJA3`foo&zzyJ$hNlKsUc!L%g^r{F*DcezIjxR_QVN}Wa2;=~cUdO{q{)5{A3IJ%|)vFt!j_ zoN1LijjX2AMMtM20mss|b_)rdFpO(;ccGVkYq^aFh%%f+DDt(ukBpy$_@{@_U_>S@ z^cD5{qM>jG4Ue?>Qn?&a6&bcjZ~25LvZ?U~9s8vqP{SImwR5@bI)A#qnfkx#rl*Az zLR>_GhQJbXf@_o3>QhwQ>~Z-=!AG@ztBn9W0Pb3;P0uI_*w3zdDd&>UEE&>SlIWv2 zF$uZ1eEi-FW}hv~WGtMo(d_k8qnZgwJeld%fd`}{N>~@XyKCWqYiaa3-=2vTIm%5$ zx?CJ;KmoaE;~ON=6=wX95OC1-JZC7>(hpVCDo0OeB#`tn(LT567u@qw9(ouw#z=bG zFqIxZwYWl9q0cQ+%fDY5l{`2&gbeCqMv0lAlmyEO7A5yIT8-u3$r_i0^dkCvdN3`} zY3u2dVu&9fcW9z(irF9MH=e56>$rbl;KF}|`##Ros@pluee~Fs@Cpp0A~DEL+CY$H z5o**#QNm1aFgt~O!^bHB1eAGp5yg+ts@KY#sI7rYfG@H!e$dR$Mrj(Pkm$m_z=maL zH|I?hk#Y?c#gVn1Z@1mlu6pN<960g(?{ZfCum*muIfeAW^}0J75EAW($*}yjv#hl}1O?v* zE{~|9yWYy>5bVv-2kguC*LRMeHj+{P*zPU9k6f?OzIGb7MvKq>)|OM!TKo3GzY;;7 zSGntxZIvQ{gD(<;hc_QHIRgZ)0Esa8Pdk$Skt~fAt@XxF3OUv0Myl*R8C|?`p_vqI z+YsVEU1(q}BxUs=a=eh=d62Dfygzvz3!}CigITmPSd?}X+0vdOqD6(RR-h=$U$`?E z;TQg}qw8Ul5s5jdOI7`?$!>32&6_i2sRZ~FFAXtjHl+;=jUNpV6cQ5YeHkm zjZ0x!zydr3v$7(oX?IVC1Cq3#+R8g|pU;{qm-xFA%%ni^gOUQP6C;Ba$Te3u-qm+P zI7lc){vv5uW}-=96xXHkPJXnyXI~GwN;B_Y5K2@|rTiy0wkem7ifO4o9T(|&otYKE z%#^?*bfoRH0h23ii-1-$Jm&cpe?|%F8)^=?I~0KRvg>Lo&x zw%yQ8nv7posIj^SRp)218*LuU@35on#DXh#6NrYX+F9=`X59No)^n!Q!2cFVdjO|FASXXN?SI#k5c>#Vv0w=@Y4hyM=hsq5Deu>_;VgQF+T zDxn2Gqw9aXg8_({92cM~GRXT2p}PJssGA2CTY{k1FM)t+Oiq1jSO@zVo)apLlw#(_}@HSO4xT zxg_E^@d;Vtxf~VKRd3N{6lN8jU8&?aVRGY+Uzp>ft!Uv?+3E>CybO;9gz+Rjh76&W zY7=H6tSdG5V8VM-@6s{qY!Pv9LPqrJ8nzR_i~G(uYBeWC(?XG*pf{07q4FT6O&6p_ z4fuOCh|o_we*qpKT4Up6HMzizy{g^&8!fd8HMX(Xu|{QheY%?6HI5SV%l713uas?v zR!5UmhEXB-G}HA(>zO3bo5GG!n&p+F)H3ID^kC1JYZX4_1=5=u!Fk5M0e%;?$MFiT!J`3p{BQX_jmx{I2Lw0b6&baF~8geN!?>0Pkv}IJYLDqU!{rQ*&bGm) zyf)9Gy|MW%nc-;(K-ETD3INug&8K1WLV%pVq3Zqf*caQ~BVfpE}n@v-?KR*e)Tf2H&k}^%IbH zx$0T>+?N*{$q=1|V5?MA#f#pp!Yq?(-<`D1N8?a>5Ax_CgWI=I4Tht3)Zm=AReH>UFD6`%`_vz@BLtt%h~g%-bV&U=i>ZywuYN+b z8@c_a`ETSR;xUFT3 zIcjvNv&zqKxhcYT;lS{gbvbV^Fz~Sc>f%u(a29j{_9M1z#r1Hcw#l?S<%q_JY|d`J zB})i{z8Ce@5RJ`7z0Ask01u%%47ek=dKlEVk84$~{+PCBluKI|#*8k;%uSYS9msEr z;Toh%_57Yp^iTAN1@tbYM;uo1ZK#<3G8B<~ojQUoH1-J^?TIy>NHA6Jl?{x%rEBsK zZ%Ka%W942pCmjZam$qp&RSPN4jToS^!}(ykX)?b_I>!P`8RI;ev-!Zi$#I-Ux~Zel zg@L*6N!FdBk|@oT-e_4feGlTKSKf37I?JAV#a{nzCn$_@tiQzaHbLm>)|Gdrcr!o+ zGD&$>Y-yBl#TvsYq0-~HY9ySR{YJDTr9;Ks2XuCoM_x=T*4^=L=O3m9+Zz$dm}NLX z1-ex4a){VD$;@GYCOU6*Qnb%~U{IC(<~C& z)C;s=6*(tLEu@9_wr?m1HauKc3%)B6DdqLI*s2adp`EWctI$k>m(G1>bpYZTjS~rh z#21*Uqw3`TTl{x9;^sA<&WAZbTuT@Nj!jJ5$J(TpzcGUN8~nCNJ~xxhi(K2d`6b`> z-r++LVKBBwX{cANw!ZE@J?!ew?5pZA#Iov-q)5>n;449gC;%yMD{Ih#RG)e3L9WOU z*BW&`WHjc+#q;_yCc4npk>-%ibZu7pcEt#BD57eu?7eZURpMAHpP9+U`iOA8fjvK6LMq9N}`oJ%KM zrlC}D@{mJZbE&=TZI9p(e=r~|(24dtMC#JGLd1L4FO{3&4`AY(=EM984o?cGLc+3sXIzX3T}NI1l|gpmFYIikOMV7w_b15+=+&!f6j##bxBs5?dbq5ljFD{#v!ZZ4=!d zF<(#34%oN=zb^q_c*f0(%lIiPn1asCtf*>=vsR>~hr-rfbHTreD}cdE@lMagf%;#m znCWb=IBQprCX_SqzIXukU0CkOxZ!rk4x(skB2?0@C86KD`@w~LD8?_mCk6i#ah^bR zvGUeh(455hA4^kNAK24ZQ;;f*H}mlN*Gf%x-0ep}OtcAR)C_E?;)x>HwVtAVa%(&b zFi5el`%S_JPvsy}O7celIk~;tHU6~v_>RxOWeu0-Jb4>zq%}5c`6TLG9 zZ!ug?P1eays(y`3jKE6Iu6d4%Y#0ANS{iJxDgh9t5ncRGDqWIRA}dH7=#`k%v86ol zQwzdt;#2>+T!)K-cm*xV2rPJ^_S2aK+wt}fB%hp3x1&AaxMqCI8w6d5bFub_&g~K- zDW9g4KqCObg zm{?X*ID{Ye_-2#v^}MZJM5KVOV;3rWCCvfk|ay{s5^9*S(J^WgBn_t&`0OoiJRn9Kl@+VQp<7pLQ2qb=e32 z=-81atgro-C`}>DHGAY+;p_<_HN1IRcCWgB%-^zLZhaAReo-`aZP8@ERHxJo`d>Nj zo@&VPY`o$^AbV1Lk_|5;M5G#Icc!^xwF9I0D08%NPl5r3 zV1ZP)jNDBbTR@Km&S>FG@|r_(v7VqL5z|E*)ifrnLwxZ-^ecT#^vI9+lj;`)^+!%0 zmihVA2v{e92mKUgf&17I@7;D*8A+p@yL_)70^d|-*pY6-I33WiNG$FM#j$PvhS4a% zRDEHoytyTKix=9kaJ{I(@=n>8P>&R*grs6E%Ri3i#Pd;%T29bksI*+iO*k<4 z{t;Fn#rKHaDyp`z2aeKCzwDd9vVn~+y5*`_>>76cJjda?j2$;^_h1#k=G1Q(VVW|0Nr^0xcG}8lWbPib!oiG>HvM>OGH7<$= zytN$7@^Kw*hrE1&8~qH)?e~TPA^8?iX@Wr8dtm2Tu}!;*oaEjPT0|IgQq{&{K&NYJ zWuuoX1Azzik8PQTV4I27hW>p_q>@kb{SpU$c!ZY%IQC2U?IY(z_y@RJEf-6-?>?d{ z$ZviptB;so`J3@%GEKkD-2_c~2X)!?y~hy@0)@Ep$u2HP>2u+CoDrNM@ktTu>-i5Ysz1CmVrj`4I>+NH@-?o9E?S%C)9#w z&Rs~E);OWWF(~P*bg5Y=a(ZK_Hg&_o*ds*owQ;$b-+h~&BTo9BgUYF87cr~dh~Hre zv$P#C38l;SX(K>jSQ-ypfm2E%JoVSk;R)SV#1v~gm+j*;0UWsx?EP$eFPmo17gHvT zCH9%&DhE}IhgI18`ttAZxGNGq3Yo+tkX2^?+a90uhZ|=vz=}P41%*Giiu5^M59j|p zrrv^@<Fca|-QUcnZ#BlMpGD~Z!IubBp`p!uuN^2J|Sih))oup4gT|Ds}h1FIM z8C=)2Sp7>J{_Rl+6225xe*ZneLXrJrQ-{!>KIsG}5kOHaZGD-6GN8%GRbEuA#eBS| znfIh4PLFr-@2Sp9c)wVA(h~WUQ}a*n72#7awM_2-Th_%z8`O%kN|%nv|3GFE`H+QxI>|WQEM^T?1Oh^Wa zmXFQ7(d|1y%Rb09iN1kggIEjhqZN0h-xlG71CLu|bUY-fZLgYBnR`g`82LhBDhd^z z#>PC9R{cL%>~8IqSpR~Mn&OMD)gC!@vbE`(StX-@yPS#-Bk~>vt4e!5^M&>xd&Lk* z`-jGAN{rw}rTOmP=g)5FP^21C{ezr~2(yAnDLCvwy`mEcWeuF~QaFmh68<9v^v_>z%}RDske{sKy?EMf0d_1c%Cf0f>PP7jy`|S%($2h*I@H} znVyHltGKa3tzY&ClGT@@sma=o{)kG{v9D<2=r85*e}ET^XpRZN+&EsBCEYdW?=-U2 zpq&Xz`N+OJ!=l&uqStrubo!Is^C%ucj!+QL0|Hl|Sq=FbkgV&0K?!9^R@uss@*X)P z__-D1c5c09lF@F4LdPS1FlPA&i~339%>_=)7xXn;T6ssL^|p!t&J)U=l1gXUu&{X4K z##lPgCIC@mb>BnKfPdfn9hWh|^~)AHCa3@jcK)D^b1t zs4jOAB_$Tc@tCN7Jo$>8a5>Ucxsi*f zIqfPKp!}y{OOyQ>Qhps_fLEF(zI5iRq3lgB<}K38g9!Tshktv)4#~}ygPXh;e!t}y z`yMk!LcsHrsBDPo^`MtAbh*E*?{~Qg?k;c{aC9KRg zI2hN@g=lfhyg*XzY5@_Jj#5)6WxQl6a^JVtfx=6(6 zgw?tvi;%nqLoS;qCNVTQ>-MQ{K@>KD)w#}wv)3Z1$G%)_>ATGK(mvIyV5Nd1xap+5 zO^O{TTE}coa>2H|*bXYA>HBK=;t-2hNLV;duu5-YKd{Iid_yyCMQWgIuu=^zPqeO& z=pyx^a`aPUh?C@rjd_K zduT{ZXG3M}!8b_PI)@hVNJo6(?$GpJbq}%ukw%sg4h`dEpVbjf^DLB;-)Il-TW95N zD95>7H8?(9X$m#AiVl=Rc#Ji{GV_>`rl>~_+wI|oJtgfQ2sNqh-n`Ik_Zn4dpjnuh z$tbSm2+6j)cE8dau|6~znj12ytJ`NKEETe(5Zv3<36nJT$kXl_81CgxQ6jUU{ZFbj zY(AyVY&qMRNWE}w_toa$^Q|7ZOpdyAvyW&HZK<^d>Uuau#zO8P6h^|%d5ciSj+qzv zr>4LzO_LHC(O|@8Tg%`zGv51R`}mgU0c5ozMM|$8&&#Y86E3-pHHF0t-KCT+}}3nK(xg>59HuT9<&!~T&6XCS7!}zC_YDM zxZt|&E76_80z%~VH&BWi`q<5~@2fb6qgP`j4~{S!C$%Y%%k%H|&`BsuCK>#?yM--> zxxyf6F&wo3Y6B-rBUIQ>-^too`GOc-!*Hm`%L*B{KrI~i`K}kNOsjSKMY5!Y-q#i| zPtl<6W2z1hJUV{_49?7wu&A;@XqZ1l27D78Zlg$DbmBHS*hFp?=%|NrSHhWB^rmzG z{cOtaB{gr500CgLI$-Fo-x&M~RRP|i99{l4fNj}!9#99jIj5j|=4U+P2W;Z)@E-$I zP2%b^;@L3IRLU=xp;7?&=db$kzHY$2;+q6j*dC&xQQywdpa80vILBK{D zSTItRxz=`;+cZG?nH4$zWrfZAa(xx_(#EJ>5_E>UB;gQCc`X`W-x3i-bN%y)-T-DJ z%;)TeH(mC^^p?Qt6+kb`DOerr!J?4152cBp%_hYhXexH~ScEjN`m|n@Nm{$o<29tH zcuCdpCitNu8CFG62_yc}@@lD~TkwZAjO|1MzB5fj&*GkORa%)<3^(g$zH_@eP8u3y zxAoiL(4GoH>?*m^85q3&@K6$dS`=}3K+oYSwQCor6HY-6oJ!S0G)uSA(KGQaUgjf& z4n;q4q05l@>yDRw{uOzjvZ|}}%fz7w`PX+on=Gu6iYpYWEvDERS>&_3-Nr#fq*vvD zQRGkNXoQ4cvSvhvPr9^oBa~C;xB^RjT`ZLvNoHEp#Q^jz;-_6{^PHL56qM+*)`sgK zAhTc0{!B(fL6{2r9D~*gfr1sNqbUN`Cdia|2{Ka0Q11kJxHW1zsC1il=zFd@7UK}i z1kW<=vgb73pw}gxW~7oAizM{0O?@Jp;6HDeTXume0Ss+7dKPu{`&TX2SERkQU}$1< z8p>tY_z!;trYT+;r4Rh;Ch%^1JS5LZrC@uUXWvHzQF#$$qFK8`YPZExtyD+_s$*oF9E~zX?sNr0*vLnB)z*Y>V?HvMFRmz(q!=vWF2+5P8>%7 zXQp9;dzTPHsSoi)RQ{ap+IN_2-o68I(r@hwm*AzXBi3RC#e(D^;wr zbX%*v7s_H@1h8g>Vo%ty{@?$nL=prS4?{*ZCb!)h^hzTCQ&S!O$2#*-T)`NAAWQqi9{d`q4%k zikETGcDbIF-7;6D!IZp)Ek0BN7QGK+jVu4=CGVpZ<-%YVn?21rc(d6z>SZ6nO#m(* z#$K|Mjmg&}*XS!(*s4o@`QUDm0tLhIIRV0aXkB%jjUl$>jmi0PJ2MHluZ}B@^z}A^9wx>oqgL`-C3bTsMi4_IXNK8O_2iZbXc>zM5 z>wxxTik?_|3L9=!wHGOe0lg%iZ)>UKq@+tSj)CiAN8HZ)Ibh~EM?L!)jlVq($SRZD z206ylgJ^`-55R_L3@WG;))T7#J(#F?<7Q!w`G&11A+;q>l_b}b(2s;-bGH^)CScPneS>;m2a|mo z4E8)xNmH|P&5C!D(JxcUuLTJ*B6>P5(V8Xz4npA)tt!`L*OV@7F5VYFL#kCT)%D_) zU$Ad2&K)@e{v(@pqePNKY$5!bv<508zg#T2`ydo8jHfG=^miAONRZ?N2fk|3twowmbK&w3F_h6faG3)H6LxeWFW^AQy2p)z` zVvN;Q%NOKSThNvT7TdT%`T!A8O;31~7j-xm=#UF|9`7@Z*dc&QeVvHeM3dTSWB?e5 zMNP15pI~9xM&e|)Nibf}okH%U%-eoclWQyjFVo96v*b835bGO~0rVOR<(P@O zNU(awpT9C7ZJ}tHq4IM+svgQx2h-gB=(4olA&tu1oPJPyuA=a0EYA%@QKh3DHGfZ2FSZJ$ys4wUv4?R7 zN`8bxC0i)&X>fUIHZ)$k7ORP<7p9&zI42(0nUXfQp~%Cy8B9c7F4wRJZ4*n<9LrAC z{MPgFGB->%cM@#BHvd(0p~z{ZV@e?d7Z8@apFK!!UlEt$V?T_ZAjHcu)N=BXg2bYH zQe*0Fvv#qL*UxTpjqS4$KrCtkJ7HB2i!jG-?Em+pQBLaG+56rq>%Q35`pXp$EgQaxHbF9tw@&6J;5+|@vpIwxBZp2eo~1F#Nfs1 zmq_2(0SX-%i>c?9T1tir(*sy3`x8Gx0ToIZ4IU5{HavTkbnMg=GW>gXz1C?#MD#_% z2!B?(Qca6)YSbL8Z{zgoP*qKu?7Yl4*nV zIiHW$#5se#nt=esEz({ZEN5y56;BFOk@0kjX7+~IDu~V@nm~z}W}S_Iajc1D^964R z?^wEwE1yHLuD0~9&WG$Dx7{+?n93w?IjfI7#SAYWSF? zaFj|)efe!tD|dm%&WNeMJ8=iI50(nckODesS`1FJOuk7ydbm#)c+(7 zq+P7J9d$*w;%-O#NRNGx%h7JG!2)I#@m>WJg$lJ7dV%bd3IdhMO1XIz8*Kd>a8 z+uf1d|1hY=)?HL(8$)bzY6It36oBM!?tIk!voROOgy$`YDkjk;eyB}!#YBRtYJ{*M zWU9dKTQ8^LecxB_1^MRs+m;l77C56t^U>k_jPmpk!__^{gLuKRSv&J`Lr`!nG!%jX z{dm{~+964<{(z`e@i%l3IOB7T{c&8V+!VoDAZBNghx+6LM4a^LOKm1%=8U?$twEsL zK7O&|B(JYwAz$NzunwTn8S8iJAy{PORa8yg*XpENEL1dJKxVQrM+qIUTWT}I;>wG7 zDZ5jty8A0cWg(!2SsrQq5!{}MNBh4g2jX$J7OR>0B5Er|MEYpC=8eq^Hfup}Pevr; zN=6{0ta%7%DMJNDEoCgKACZ-Lh*N#eOcot9+Dwty9s)y#4WoWm)w71B(atbKa6lEU zTy-0KraHm3GyCX9rkfF$4G49&$F!j0O0ie8-C~NKzvs=&ff#N9#p2-r?c@i1m%6SWoxLO`f1``$dRq{soB zz(yP8Jhy#x{(V?zrV%MdpYA0wk9;=Og6|W84)X2KV+MRuZC)7B?8ENun=XZ_%;^Ob zdB<(>TxTgjzt9|D+XOH1-oWPmOF7w3^xE&pg?PJtC3u&KKOAPBi*vFZ~_P%=`N4xcd3HsSal@k3jLI}hy&AYkvc}zRE~1K zdYx@DiOld-<{$7fyvy*QPd#cgUGTHEFhJPZ-2E>#t((4CWQ;M^rp~l(mgZFx?K1Ef z4;YKV$p-~c!-$H$m>ixUJtk+QKpwViHB^1&?h|iCGlRqcHq}qMo-Dbm&~-D3$m?P- zEJd<5P&~Q09URZB!tlB*UBs!>BwC#-T`I6fYOGzJ!ZhEN1qibk(CkP0L-9os+pEB; zeuxXx_ZQ}WOCid9u619c4?`!Ij%_%ep@8$3<$-ana6UB&OrCN7Kl{(Ds2>fH(dub)UC2zNeNd`W{L1r0JGqLL!w!536mNhn0EsEt=TyYwRkV8ZdGkw>;!mFbY9Ksto7V~K z720PdvEWBx5)322F`{B-&uY5&%@nXIQ!;nO%;Yhs;=R*%EO(5XXX}+vxVfP~WA#rZ z0LXu9XXvjyFb7xxCGqg=L`zwSlU*%gs zpygb~zH=1U7}@f)Y6%`>ktiNyX<-|nI>D-9hK`Y^&GX%y<~sC(5okdNk17f zkXC@ZmoMsbYyBHcj3J|~={q6a-N_t><#Pj-3w)I zqhX+*_b-_pHDs}pyJyp$kt*5tQb_)1MO2m%iU~sLRlU+tpg}&DVLV)0l%hW5Z5!7c z%WmuPa^9H@M;$e7*WG0pL(226IGf9Qk(dJy63v+JU&7<+jgy+|#tSTSp|R+z(>}7l z?gW?Xrck1yDAR%51!$;MESCBp6s9X~k>7HixlI_)C>2c#CJ&1%f&ixFYnbUij%o1U zMEwW4Bflm6Q4R9Gs1eSAci)MSL|WEvEx+#H@@)R}y&Y<3Q%gD2O1s zzn+HK18t76fr{=!1cc>Iw67wYn_V)^1(>5z`R~92X#{Xc$5i^2%UdbCHS)g*xZ%Di zA&wNRQ9OqaG(kg!6rH=nzxHC$Y7`;t80gfI4@*FG~AsO1R za^wv;DvqVnq{VzMyH+JDW|d3rc8)}TemL4gzOa9!a~o?&xsDjWUzEV5{BH4f&00hr z*O7VVNi;h>VPWto&uW5M0ib;+xJN0l9UN+D-Z>vt=Z?Ge;hpoGS%6?ct z$s&~;2{eHp9G0lbW1)Gim&NK@4E0DQdQiPOIG?^Wh5jj|B6S#5{@KPQA=3Q@Lw%xS@7qgS!xrm*=&!t40C zm{S0N44thH74hshQeFyR2#;myvcjHFgBp*$>>I1Ze7iLCmDdam2UV_%;1l@+v=@h< zQWce9n<~pMy?IMdy-HG>?ilwY@+7S4K&cU|t?630N>H3U^oiv1h+q#UK*r45qB~ks z!n?LS?rgwSwp5>=LhOnkFwZm!lK;Hajacs7zw=sX%0^TsmCY{C4lhWLx{D7)kA)J! z;zeYkqqaLmHL0XB`jgn4jTIcKh`IwF7Wkr*7cSaOq)}+AJINr5jr`WAz?L<374qP8 zL8+*C^fus?hr#xRdCedS|5qg!17C`(vX{2Zf8s+x7p)L*kMcO)Xo{@Z)Bwi|cpfkF zvOJX%V(kFmgGkO1kzzjcIaf2jDS5v1RZDR8PcB#~@fkLx1#OS|(`wxOwzh@58-qe{NLksp2DXZDu7&Sz*DW@fO=$-L&B zE`-aDotp_-$viuzpRm7)7?d3cX+K#tvYEsp*$$5N)mG2-X>@A~=ow|Xb^@nr)GhoC z<+W{9;85U_V*_Fx+D0coeJ8);gqcB`wUrdh5(V8r{UADvgD~>xGdSmip`^iuIoF%o zzar*(3flEmOior!0`N$7wTnDz04}zQC3W@5UpY8neS6PJIxi4iKh?Nxb!S9-&x?EC zxi@6#353t-uT%Rygo@^_C_|;QHUP(>9Tak(%FbQSdRA|<;hN%v0lX+}p7^^_<#sN> zR3bc{(@#FL(-ZgOVid6Bu^yLgql7ay{9G)-=;&Cp)TQEOyztz|-mrIX?_N{)ru=#X znh*4O+M-}Rxs&e5zTf*{6^p1rKW2#&V|qY$-YM+@`JmRdwA*>2+^is6M96)HH=vw? zeGoppe9hL!0=q{{_c0Z6yr!2hCB10==cKIn)d#lFH$Y)xK2^PAa+{7*LXqXl!&`nw zOdZfLLf;ioCDvIN=ETD+$Dmn!O)ey^>`h~Z6__j3@?D^>|Cr?LFsxYE?z3WAkhKcQ zgJb;Y>|XIqLk`<(#1C#RpQ93gwhUX6^Q{6vmt5dS9=A;s!Z^!+$i#?ZgNS#11aE(w z>v@5}x({-MGCtM8!zhU8{_#~3@3iL*AA9h})T2$EN!_a_^1>q^Wor$rA1_()cC2fi ziJ?<=Q+!ID8mi=m{=BFnP@VP<-9#1}$S7V;mtPEsfc{tsT933cW%${%D)y>ZFp!*M zaPaSjluZbV)5PQIaWh_>aBNytQplyY;`6t~`5%A8!X|uuYiQbsXQbC8On)e%-mq&C z_fR7pK%D}mG+o}WFJp`SbbD=!Z-l*r8_2upbs!YYi8`%<2~}mXhf?2xg)rh5O+qTG z-lS>KZ!;#EG109Wz3b+D*>)llKZEsjspau$3jGOT9E^dwprpojV?ePU^uJ!HwLyt1 z33XZd5q?k%ioxrEqXboPO^?9gIqq{Y1yd958kMa1r70|Vur}L%y3Ya*wPv1DGWmfJ zmYl|pR|A+$+seKi`ZuW`5O;I2iG^BKQ&jOA!M;&8+T_xkg!+mT%qIW{Is4}^d@@Br z&d{|lx;hn0UJQjIzQd?UhaGvXEpONx21xqw`uL@G7f~nvTe~ht3rg8dEs1an9DKfy zCURvDEEl_HHwN?y7HTKJ`j)N%R~V+X!wfzJj{EcBHnP0f!mi%>xD55^1J;UueIQ$~ ze2!Z!x@42Q91ZkzGdfqyd6rk3;J#v(dxn&nQ7B*5WxJ4fWqCq=vNg-Oe`HBfbI^U5 z@e#U1J4RDLvNI+_E?=m+(51vQ)iT46Ll6^?6}gbk8V{S6z4-u;@r95=44j1(hll>% zP5^s4LT2u-9JMU(@BB>XMg?WB^dRt#`^U|!!V^FtXIQa8JLO8eFSG&>?lm_~uTcj* z3H`hGkFdW9;B6tp2l-~mlER0m)CpAsUpITCBs3d@Z_9`V&fw*(qRx#r4HsA?V7Md( zL}oAb!Y$@v{crNWgGX^*Hngu5E_u>qp&UZ$`{Ex(Z;d=cSuf8pukCPF{gWsSq*F}aSN3zm2SXd zby3yuQ6LP*Z&MGPfYGJHLvQ3w$eV8h)?8&5?TT{RDLI@X&6e&newlJ89nIYmWBpnb z%6lBnQGI&W?+nNWUgjMV)PO*S~A%x&oaO8#$=N^P467;Uez$&Qf34PaU z#)C=My{ZY)@+*aeQUM#1q`~Bsz7<&p@;f#S`j5ovLsdB|>Xbsl>~|usre-JymX0|u z7@~67(qPKnNB;m%(G*hr2?BF_-?CIDkdNc$!5kM0F z5k#GD>BsD-@kfE_H~obW)wg|!6$qGB`>&DsFbvG>9-;56lP;X#!@16GA(2l+E_*4(i(c~uKoM7mhfV+S` z8|>+B^X@gp09fOH_lTSHaO)$>U`Ps^j%<7DaH{t-+q>A~*%eI%e&O6^EoA(3+!VmI zJ`N0%7C6{ot3%}k_=5Nd91AwRiHxV5E;?W?1Vc&M7d(&hB#H%&EJIDItuXJJ zE3taV2EX~9%tQ?O;CmiCZiLu;O?ZZ|Vb| zNPL!`u_$X;Rcm7Ib~e}8AIyHJdmb0Ytvw=t5tvXO!@CXewnAIFF>OeZA1o(*# z(fCbR>O9d4X~RN^DYf!Fj^640$1yZjZ9~-ROly8e%F^!(3W8>%uBw={0}X5+?PUs)@{8{$rf#{iN!WvxBEBcR%?SIjI1e zzX^3vaS`weK$hdxJmbYIqSIl9COKF;{qi*|C*S;vx$9^ZW+Q+p#yVzmJ(kr_L_cnK k05h^+<^TWy literal 0 HcmV?d00001 diff --git a/crawler/utils/plugincont/plugincont_img/requirements.txt b/crawler/utils/plugincont/plugincont_img/requirements.txt new file mode 100644 index 00000000..7fe20159 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/requirements.txt @@ -0,0 +1,14 @@ +psutil==2.1.3 +requests>=2.7.13 +netifaces==0.10.4 +kafka-python==1.3.1 +pykafka==1.1.0 +kafka==1.3.3 +docker-py==1.10.6 +python-dateutil==2.4.2 +semantic_version==2.5.0 +Yapsy==1.11.223 +configobj==4.7.0 +morph==0.1.2 +fluent-logger==0.4.6 +requests_unixsocket==0.1.5 diff --git a/requirements.txt b/requirements.txt index 7fe20159..fe9370eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ netifaces==0.10.4 kafka-python==1.3.1 pykafka==1.1.0 kafka==1.3.3 -docker-py==1.10.6 +docker=2.0.0 python-dateutil==2.4.2 semantic_version==2.5.0 Yapsy==1.11.223 @@ -12,3 +12,4 @@ configobj==4.7.0 morph==0.1.2 fluent-logger==0.4.6 requests_unixsocket==0.1.5 +python-iptables From e31e55fdac7e733f81db0d2a94d192fca9ca54a9 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 28 Nov 2017 14:14:06 -0500 Subject: [PATCH 12/47] plugincont wip Signed-off-by: Sahil Suneja --- Dockerfile | 1 + crawler/plugin_containers_manager.py | 93 ++++++++++--------- crawler/safe_containers_crawler.py | 11 ++- crawler/utils/dockerutils.py | 20 +++- .../plugincont_img/crawler/crawler_lite.py | 2 +- 5 files changed, 79 insertions(+), 48 deletions(-) diff --git a/Dockerfile b/Dockerfile index d5549a73..65fddb4a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,7 @@ COPY \ RUN dpkg -i /tmp/python-socket-datacollector_*_all.deb && \ apt-get -y update && \ apt-get -y install libpcap0.8 && \ + apt-get -y install libpcap-dev && \ dpkg -i /tmp/softflowd_0.9.*_amd64.deb && \ pip install pyroute2 py-radix requests-unixsocket json-rpc && \ dpkg -i /tmp/python-conntrackprobe_*_all.deb && \ diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index e26106f3..e1d8d56e 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -19,7 +19,6 @@ def __init__(self, frequency=-1): self.frequency = frequency self.pluginconts = dict() self.plugincont_image = 'plugincont_image' - #self.plugincont_image = 'crawler_plugins18' self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'user1' self.plugincont_framedir = '/home/user1/features/' @@ -31,18 +30,11 @@ def __init__(self, frequency=-1): self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id self.docker_client = docker.from_env() self.docker_APIclient = docker.APIClient(base_url='unix://var/run/docker.sock') - self.build_plugincont_img() - - def get_plugincont_framedir(self, guestcont): - frame_dir = None - if guestcont is not None and guestcont.plugincont is not None: - plugincont_id = guestcont.plugincont.id - rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - frame_dir = rootfs+self.plugincont_framedir - return frame_dir + if self.build_plugincont_img() != 0: + raise ValueError('Failed to build image') def destroy_cont(self, id=None, name=None): - client = self.docker_APIClient + client = self.docker_APIclient if name is None and id is None: return if name is not None: @@ -54,10 +46,56 @@ def destroy_cont(self, id=None, name=None): if client.containers(all=True,filters=filter) != []: client.stop(_id) client.remove_container(_id) + + def set_plugincont_py_cap(self, plugincont_id): + retVal = 0 + verify = False + try: + rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) + py_path = rootfs+self.plugincont_py_path + libcap = ctypes.cdll.LoadLibrary("libcap.so") + caps = libcap.cap_from_text('cap_dac_read_search,cap_sys_chroot,cap_sys_ptrace+ep') + retVal = libcap.cap_set_file(py_path,caps) + if verify is True: + libcap.cap_to_text.restype = ctypes.c_char_p + caps_set = libcap.cap_get_file(py_path,caps) + caps_set_str = libcap.cap_to_text(caps_set, None) + assert 'cap_dac_read_search' in caps_set_str + assert 'cap_sys_chroot' in caps_set_str + assert 'cap_sys_ptrace' in caps_set_str + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal def build_plugincont_img(self): + retVal = 0 build_status = list(self.docker_APIclient.build(path=self.plugincont_image_path, tag=self.plugincont_image)) assert 'Successfully built' in build_status[-1] + try: + plugincont = self.docker_client.containers.run( + image=self.plugincont_image, + command="tail -f /dev/null", + detach=True) + time.sleep(5) + retVal = self.set_plugincont_py_cap(plugincont.id) + if retVal == 0: + self.docker_APIclient.commit(plugincont.id,repository=self.plugincont_image) + self.destroy_cont(id=plugincont.id) + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal + + def get_plugincont_framedir(self, guestcont): + frame_dir = None + if guestcont is not None and guestcont.plugincont is not None: + plugincont_id = guestcont.plugincont.id + rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) + frame_dir = rootfs+self.plugincont_framedir + return frame_dir def create_plugincont(self, guestcont): guestcont_id = guestcont.long_id @@ -65,7 +103,6 @@ def create_plugincont(self, guestcont): plugincont = None plugincont_name = self.plugincont_name_prefix+'_'+guestcont_id seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) - #secomp_profile_path = os.getcwd() + self.plugincont_seccomp_profile_path client = self.docker_client try: self.destroy_cont(name=plugincont_name) @@ -73,12 +110,10 @@ def create_plugincont(self, guestcont): image=self.plugincont_image, name=plugincont_name, user=self.plugincont_username, - command="/usr/bin/python2.7 /crawler/crawler/crawler_lite.py --frequency="+str(self.frequency), - #command="tail -f /dev/null", + command="/usr/bin/python2.7 /crawler/crawler_lite.py --frequency="+str(self.frequency), pid_mode='container:'+guestcont_id, network_mode='container:'+guestcont_id, cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], - #security_opt=['seccomp:'+seccomp_profile_path], security_opt=['seccomp:'+seccomp_attr], volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, detach=True) @@ -166,7 +201,7 @@ def _setup_netcls_cgroup(self, plugincont_id): def set_plugincont_iptables(self, plugincont_id): retVal = 0 try: - client = self.docker_APIClient + client = self.docker_APIclient plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] #netns_path = '/var/run/netns' #if not os.path.isdir(netns_path): @@ -189,28 +224,6 @@ def destroy_plugincont(self, guestcont): guestcont.plugincont = None self.pluginconts.pop(str(guestcont_id)) - def set_plugincont_py_cap(self, plugincont_id): - retVal = 0 - verify = False - try: - rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - py_path = rootfs+self.plugincont_py_path - libcap = ctypes.cdll.LoadLibrary("libcap.so") - caps = libcap.cap_from_text('cap_dac_read_search,cap_sys_chroot,cap_sys_ptrace+ep') - retVal = libcap.cap_set_file(py_path,caps) - if verify is True: - libcap.cap_to_text.restype = ctypes.c_char_p - caps_set = libcap.cap_get_file(py_path,caps) - caps_set_str = libcap.cap_to_text(caps_set, None) - assert 'cap_dac_read_search' in caps_set_str - assert 'cap_sys_chroot' in caps_set_str - assert 'cap_sys_ptrace' in caps_set_str - except Exception as exc: - print exc - print sys.exc_info()[0] - retVal = -1 - return retVal - def setup_plugincont(self, guestcont): guestcont_id = str(guestcont.long_id) if guestcont_id in self.pluginconts.keys(): @@ -218,7 +231,6 @@ def setup_plugincont(self, guestcont): return self.create_plugincont(guestcont) - if guestcont.plugincont is None: return @@ -227,7 +239,4 @@ def setup_plugincont(self, guestcont): self.destroy_plugincont(guestcont) return - if self.set_plugincont_py_cap(plugincont_id) != 0: - self.destroy_plugincont(guestcont) - return diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 80f820e4..1c92ef33 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -44,13 +44,20 @@ def __init__(self, self.environment = environment self.host_namespace = host_namespace self.user_list = user_list - self.pluginconts_manager = PluginContainersManager(frequency) + self.pluginconts_manager = None + try: + self.pluginconts_manager = PluginContainersManager(frequency) + except ValueError as err: + print(err.args) # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): #import pdb #pdb.set_trace() features = [] + if self.pluginconts_manager is None: + return features + if guestcont.plugincont is None: self.pluginconts_manager.setup_plugincont(guestcont) if guestcont.plugincont is None: @@ -134,6 +141,8 @@ def crawl(self, ignore_plugin_exception=True): :param ignore_plugin_exception: just ignore exceptions in a plugin :return: a list generator of Frame objects """ + if self.pluginconts_manager is None: + return containers_list = get_containers( user_list=self.user_list, host_namespace=self.host_namespace, diff --git a/crawler/utils/dockerutils.py b/crawler/utils/dockerutils.py index da065370..52766d7b 100644 --- a/crawler/utils/dockerutils.py +++ b/crawler/utils/dockerutils.py @@ -290,14 +290,26 @@ def _get_container_rootfs_path_btrfs(long_id, inspect=None): return rootfs_path +def _get_docker_root_dir(): + try: + client = docker.from_env() + docker_info = client.info() + root_dir = str(docker_info['DockerRootDir']) + return root_dir + except docker.errors.APIError as e: + logger.warning(str(e)) + raise DockerutilsException('Failed to get docker info') + def _get_container_rootfs_path_aufs(long_id, inspect=None): rootfs_path = None + root_dir_prefix = _get_docker_root_dir() + if VERSION_SPEC.match(semantic_version.Version(_fix_version( server_version))): aufs_path = None - mountid_path = ('/var/lib/docker/165536.165536/image/aufs/layerdb/mounts/' + + mountid_path = (root_dir_prefix + '/image/aufs/layerdb/mounts/' + long_id + '/mount-id') try: with open(mountid_path, 'r') as f: @@ -306,11 +318,11 @@ def _get_container_rootfs_path_aufs(long_id, inspect=None): logger.warning(str(e)) if not aufs_path: raise DockerutilsException('Failed to get rootfs on aufs') - rootfs_path = '/var/lib/docker/165536.165536/aufs/mnt/' + aufs_path + rootfs_path = root_dir_prefix + '/aufs/mnt/' + aufs_path else: rootfs_path = None - for _path in ['/var/lib/docker/165536.165536/aufs/mnt/' + long_id, - '/var/lib/docker/165536.165536/aufs/diff/' + long_id]: + for _path in [root_dir_prefix + '/aufs/mnt/' + long_id, + root_dir_prefix + '/aufs/diff/' + long_id]: if os.path.isdir(_path) and os.listdir(_path): rootfs_path = _path break diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py index d175aef9..25a1c8b8 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py @@ -9,7 +9,7 @@ import json from icrawl_plugin import IContainerCrawler -plugins_dir = '/crawler/crawler/plugins/systems/' # might eventually become /home/user1/crawler/plugins/... +plugins_dir = '/crawler/plugins/systems/' # might eventually become /home/user1/crawler/plugins/... guestcont_plugins_file = '/rootfs_local/crawlplugins' plugins_file = '/rootfs_local/crawlplugins' # should eventually be /home/user1/crawlplugins frame_dir = '/home/user1/features/' From 45360fb0f62ffaa70f0c4cbbbbe70648532473b2 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 30 Nov 2017 12:15:43 -0500 Subject: [PATCH 13/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 47 ++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index e1d8d56e..123638f4 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -12,6 +12,7 @@ from containers import poll_containers, get_containers from utils.crawler_exceptions import ContainerWithoutCgroups from utils.namespace import run_as_another_namespace +from dockerutils import _get_docker_root_dir class PluginContainersManager(): @@ -26,13 +27,55 @@ def __init__(self, frequency=-1): self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' self.plugincont_image_path = os.getcwd() + '/crawler/utils/plugincont/plugincont_img' self.plugincont_guestcont_mountpoint = '/rootfs_local' - self.plugincont_host_uid = '166536' #from docker userns remapping - self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id self.docker_client = docker.from_env() self.docker_APIclient = docker.APIClient(base_url='unix://var/run/docker.sock') + if self.get_plugincont_host_uid() == -1: + raise ValueError('Failed to verify docker userns-remap settings') + if self.get_plugincont_cgroup_netclsid() == -1: + raise ValueError('Failed to set cgroup netclsid') if self.build_plugincont_img() != 0: raise ValueError('Failed to build image') + def isInt(s): + try: + int(s) + return True + except ValueError: + return False + + def get_plugincont_host_uid(self): + # from docker userns remapping + try: + docker_root_dir = _get_docker_root_dir() # /var/lib/docker/165536.16553 + leaf_dir = docker_root_dir.split('/')[-1] # 165536.165536 + possible_uid = leaf_dir.split('.')[0] # 165536 + if isInt(possible_uid) is True: + self.plugincont_host_uid = int(possible_uid) + except Exception as exc: + print exc + print sys.exc_info()[0] + self.plugincont_host_uid = -1 + + def get_plugincont_cgroup_netclsid(self): + # self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id + res_clsid = -1 + try: + cgroup_netcls_path = self._get_cgroup_dir(['net_cls','net_cls,net_prio']) + for root, dirs, files in os.walk(cgroup_netcls_path): + for file in files: + if file.endswith('net_cls.classid'): + fd = open(root+'/'+file,'r') + clsid = int(fd.readline()) + if res_clsid <= clsid: + res_clsid = clsid + 1 + fd.close() + res_clsid = res_clsid + 2 + except Exception as exc: + print exc + print sys.exc_info()[0] + res_clsid = -1 + self.plugincont_cgroup_netclsid = res_clsid + def destroy_cont(self, id=None, name=None): client = self.docker_APIclient if name is None and id is None: From 2934140d595407864e35d636752a7e961992b02b Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 30 Nov 2017 12:20:09 -0500 Subject: [PATCH 14/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 123638f4..57e5c91b 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -65,7 +65,7 @@ def get_plugincont_cgroup_netclsid(self): for file in files: if file.endswith('net_cls.classid'): fd = open(root+'/'+file,'r') - clsid = int(fd.readline()) + clsid = int(fd.readline(), 16) if res_clsid <= clsid: res_clsid = clsid + 1 fd.close() From 0afc1b28a00890835f1ef0cb969180693366d913 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 30 Nov 2017 14:30:56 -0500 Subject: [PATCH 15/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 62 ++++++++++++++++--- .../{Dockerfile => Dockerfile.template} | 5 -- 2 files changed, 52 insertions(+), 15 deletions(-) rename crawler/utils/plugincont/plugincont_img/{Dockerfile => Dockerfile.template} (85%) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 57e5c91b..823baf27 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -21,7 +21,7 @@ def __init__(self, frequency=-1): self.pluginconts = dict() self.plugincont_image = 'plugincont_image' self.plugincont_name_prefix = 'plugin_cont' - self.plugincont_username = 'user1' + self.plugincont_username = 'plugincont_user' self.plugincont_framedir = '/home/user1/features/' self.plugincont_py_path = '/usr/bin/python2.7' self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' @@ -29,34 +29,76 @@ def __init__(self, frequency=-1): self.plugincont_guestcont_mountpoint = '/rootfs_local' self.docker_client = docker.from_env() self.docker_APIclient = docker.APIClient(base_url='unix://var/run/docker.sock') - if self.get_plugincont_host_uid() == -1: + if self.set_plugincont_uid() == -1: raise ValueError('Failed to verify docker userns-remap settings') - if self.get_plugincont_cgroup_netclsid() == -1: + if self.set_plugincont_cgroup_netclsid() == -1: raise ValueError('Failed to set cgroup netclsid') if self.build_plugincont_img() != 0: raise ValueError('Failed to build image') - def isInt(s): + def is_int(self, s): try: int(s) return True except ValueError: return False - def get_plugincont_host_uid(self): - # from docker userns remapping + def _get_next_uid(self): + # TODO: check uid is within [UID_MIN,UID_MAX] in /etc/login.defs + # alternate approach: useradd nextid; id -u nextid; userdel nextid + # assumption the unused uid on host is also an unused uid in container + uid = 1010 + uids_in_use = [] try: + fd = open('/etc/passwd','r') + for users in fd.readlines(): + uids_in_use.append(users.split(':')[2]) + fd.close() + while str(uid) in uids_in_use: + uid = uid + 1 + except Exception as exc: + print exc + print sys.exc_info()[0] + uid = -1 + return uid + + def set_plugincont_dockerfile_uid(self, uid): + retVal = 0 + uid = str(uid) + user = self.plugincont_username + try: + shutil.copyfile(self.plugincont_image_path+'/Dockerfile.template', self.plugincont_image_path+'/Dockerfile') + fd = open(self.plugincont_image_path+'/Dockerfile','w') + fd.write('RUN groupadd -r ' + user + ' -g ' + uid) + fd.write('RUN useradd -u ' + uid + ' -m ' + user + ' -g ' + user) + fd.write('RUN usermod -a -G ' + user + ' ' + user) + fd.write('RUN chsh -s /bin/bash ' + user) + fd.close() + except Exception as exc: + print exc + print sys.exc_info()[0] + retVal = -1 + return retVal + + def set_plugincont_uid(self): + self.plugincont_host_uid = -1 + try: + uid = self._get_next_uid() + if uid <= 0: + return + if self.set_plugincont_dockerfile_uid(uid) != 0: + return docker_root_dir = _get_docker_root_dir() # /var/lib/docker/165536.16553 leaf_dir = docker_root_dir.split('/')[-1] # 165536.165536 - possible_uid = leaf_dir.split('.')[0] # 165536 - if isInt(possible_uid) is True: - self.plugincont_host_uid = int(possible_uid) + possible_sub_uid = leaf_dir.split('.')[0] # 165536 + if self.is_int(possible_sub_uid) is True: # from docker userns remapping + self.plugincont_host_uid = int(possible_sub_uid) + uid except Exception as exc: print exc print sys.exc_info()[0] self.plugincont_host_uid = -1 - def get_plugincont_cgroup_netclsid(self): + def set_plugincont_cgroup_netclsid(self): # self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id res_clsid = -1 try: diff --git a/crawler/utils/plugincont/plugincont_img/Dockerfile b/crawler/utils/plugincont/plugincont_img/Dockerfile.template similarity index 85% rename from crawler/utils/plugincont/plugincont_img/Dockerfile rename to crawler/utils/plugincont/plugincont_img/Dockerfile.template index e8c58652..956222a8 100644 --- a/crawler/utils/plugincont/plugincont_img/Dockerfile +++ b/crawler/utils/plugincont/plugincont_img/Dockerfile.template @@ -23,10 +23,5 @@ ENV PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/site-pa ADD crawler /crawler -RUN groupadd -r user1 -g 1000 && \ - useradd -u 1000 -m user1 -g user1 && \ - usermod -a -G user1 user1 && \ - chsh -s /bin/bash user1 - RUN sed -i s/" and isfile_strict(file):"/:/ /usr/local/lib/python2.7/site-packages/psutil/_pslinux.py From fd7a7e72cfbf917f9f57d8904efd0afba814d0ee Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 30 Nov 2017 15:46:13 -0500 Subject: [PATCH 16/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 54 ++++++++----------- .../plugincont_img/crawler/crawler_lite.py | 2 +- 2 files changed, 24 insertions(+), 32 deletions(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 823baf27..789955b3 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -5,6 +5,7 @@ import json import docker import iptc +import shutil import ctypes import plugins_manager import utils.dockerutils @@ -12,7 +13,6 @@ from containers import poll_containers, get_containers from utils.crawler_exceptions import ContainerWithoutCgroups from utils.namespace import run_as_another_namespace -from dockerutils import _get_docker_root_dir class PluginContainersManager(): @@ -22,7 +22,7 @@ def __init__(self, frequency=-1): self.plugincont_image = 'plugincont_image' self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'plugincont_user' - self.plugincont_framedir = '/home/user1/features/' + self.plugincont_framedir = '/home/' + self.plugincont_username + '/features/' self.plugincont_py_path = '/usr/bin/python2.7' self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' self.plugincont_image_path = os.getcwd() + '/crawler/utils/plugincont/plugincont_img' @@ -47,6 +47,7 @@ def _get_next_uid(self): # TODO: check uid is within [UID_MIN,UID_MAX] in /etc/login.defs # alternate approach: useradd nextid; id -u nextid; userdel nextid # assumption the unused uid on host is also an unused uid in container + # exact ranges maybe found in /etc/subuid uid = 1010 uids_in_use = [] try: @@ -57,8 +58,7 @@ def _get_next_uid(self): while str(uid) in uids_in_use: uid = uid + 1 except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno uid = -1 return uid @@ -68,15 +68,14 @@ def set_plugincont_dockerfile_uid(self, uid): user = self.plugincont_username try: shutil.copyfile(self.plugincont_image_path+'/Dockerfile.template', self.plugincont_image_path+'/Dockerfile') - fd = open(self.plugincont_image_path+'/Dockerfile','w') - fd.write('RUN groupadd -r ' + user + ' -g ' + uid) - fd.write('RUN useradd -u ' + uid + ' -m ' + user + ' -g ' + user) - fd.write('RUN usermod -a -G ' + user + ' ' + user) - fd.write('RUN chsh -s /bin/bash ' + user) + fd = open(self.plugincont_image_path+'/Dockerfile','a') + fd.write('RUN groupadd -r ' + user + ' -g ' + uid + '\n') + fd.write('RUN useradd -u ' + uid + ' -m ' + user + ' -g ' + user + '\n') + fd.write('RUN usermod -a -G ' + user + ' ' + user + '\n') + fd.write('RUN chsh -s /bin/bash ' + user + '\n') fd.close() except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -88,14 +87,14 @@ def set_plugincont_uid(self): return if self.set_plugincont_dockerfile_uid(uid) != 0: return - docker_root_dir = _get_docker_root_dir() # /var/lib/docker/165536.16553 + self.plugincont_host_uid = uid + docker_root_dir = utils.dockerutils._get_docker_root_dir() # /var/lib/docker/165536.16553 leaf_dir = docker_root_dir.split('/')[-1] # 165536.165536 possible_sub_uid = leaf_dir.split('.')[0] # 165536 if self.is_int(possible_sub_uid) is True: # from docker userns remapping self.plugincont_host_uid = int(possible_sub_uid) + uid except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno self.plugincont_host_uid = -1 def set_plugincont_cgroup_netclsid(self): @@ -113,8 +112,7 @@ def set_plugincont_cgroup_netclsid(self): fd.close() res_clsid = res_clsid + 2 except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno res_clsid = -1 self.plugincont_cgroup_netclsid = res_clsid @@ -149,8 +147,7 @@ def set_plugincont_py_cap(self, plugincont_id): assert 'cap_sys_chroot' in caps_set_str assert 'cap_sys_ptrace' in caps_set_str except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -169,8 +166,7 @@ def build_plugincont_img(self): self.docker_APIclient.commit(plugincont.id,repository=self.plugincont_image) self.destroy_cont(id=plugincont.id) except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -204,8 +200,7 @@ def create_plugincont(self, guestcont): detach=True) time.sleep(5) except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno self.pluginconts[str(guestcont_id)] = plugincont guestcont.plugincont = plugincont @@ -215,7 +210,7 @@ def _add_iptable_rules(self): try: rule = iptc.Rule() match = iptc.Match(rule, "owner") - match.uid_owner = self.plugincont_host_uid + match.uid_owner = str(self.plugincont_host_uid) rule.add_match(match) rule.dst = "!127.0.0.1" rule.target = iptc.Target(rule, "DROP") @@ -224,15 +219,14 @@ def _add_iptable_rules(self): rule = iptc.Rule() match = iptc.Match(rule, "cgroup") - match.cgroup = self.plugincont_cgroup_netclsid + match.cgroup = str(self.plugincont_cgroup_netclsid) rule.add_match(match) rule.src = "!127.0.0.1" rule.target = iptc.Target(rule, "DROP") chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") chain.insert_rule(rule) except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -266,7 +260,7 @@ def _setup_netcls_cgroup(self, plugincont_id): os.makedirs(block_path) fd = open(block_classid_path,'w') - fd.write(self.plugincont_cgroup_netclsid) + fd.write(str(self.plugincont_cgroup_netclsid)) fd.close() fd = open(tasks_path,'r') @@ -278,8 +272,7 @@ def _setup_netcls_cgroup(self, plugincont_id): fd.write(pid) fd.close() except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -297,8 +290,7 @@ def set_plugincont_iptables(self, plugincont_id): ['net'], self._add_iptable_rules) except Exception as exc: - print exc - print sys.exc_info()[0] + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py index 25a1c8b8..2a9823b7 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py @@ -12,7 +12,7 @@ plugins_dir = '/crawler/plugins/systems/' # might eventually become /home/user1/crawler/plugins/... guestcont_plugins_file = '/rootfs_local/crawlplugins' plugins_file = '/rootfs_local/crawlplugins' # should eventually be /home/user1/crawlplugins -frame_dir = '/home/user1/features/' +frame_dir = os.path.expanduser('~') + '/features/' # '/home/plugincont_user/features/' plugin_objs = [] active_plugins = [] frquency = -1 From 7548b03782994a98f4c198dd8b64a04f82c58f1e Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 30 Nov 2017 19:29:39 -0500 Subject: [PATCH 17/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/safe_containers_crawler.py | 4 +- crawler/utils/plugincont/namespace.py | 268 ++++++++++++++++++++++++++ 2 files changed, 270 insertions(+), 2 deletions(-) create mode 100644 crawler/utils/plugincont/namespace.py diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 1c92ef33..6f2bb46b 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -81,7 +81,7 @@ def get_plugincont_features(self, guestcont): return features - def crawl_container(self, container, ignore_plugin_exception=True): + def crawl_container_mini(self, container, ignore_plugin_exception=True): frame = ContainerFrame(self.features, container) try: frame.add_features(self.get_plugincont_features(container)) @@ -90,7 +90,7 @@ def crawl_container(self, container, ignore_plugin_exception=True): raise exc return frame - def crawl_container_org(self, container, ignore_plugin_exception=True): + def crawl_container(self, container, ignore_plugin_exception=True): """ Crawls a specific container and returns a Frame for it. diff --git a/crawler/utils/plugincont/namespace.py b/crawler/utils/plugincont/namespace.py new file mode 100644 index 00000000..50d533c6 --- /dev/null +++ b/crawler/utils/plugincont/namespace.py @@ -0,0 +1,268 @@ +#This namespace.py is needed for setns() in userns-remap world +#!/usr/bin/python +# -*- coding: utf-8 -*- +import os +import multiprocessing +import Queue +import logging +import sys +import types +import signal +import ctypes +import misc +import traceback +import time +from crawler_exceptions import CrawlTimeoutError, CrawlError + +logger = logging.getLogger('crawlutils') + +try: + libc = ctypes.CDLL('libc.so.6') +except Exception as e: + libc = None + +ALL_NAMESPACES = [ + 'user', + 'pid', + 'uts', + 'ipc', + 'net', + 'mnt', +] + +IN_CONTAINER_TIMEOUT = 300 + +def get_pid_namespace(pid): + try: + ns = os.stat('/proc/' + str(pid) + '/ns/pid').st_ino + return ns + except Exception: + logger.debug('There is no container with pid=%s running.' + % pid) + return None + + +class ProcessContext: + + def __init__(self, pid, namespaces): + self.namespaces = namespaces + self.pid = pid + + def attach(self): + # Just to be sure log rotation does not happen in the container + + logging.disable(logging.CRITICAL) + + self.container_ns_fds = {} + try: + open_process_namespaces(self.pid, self.container_ns_fds, + self.namespaces) + except Exception as e: + logging.disable(logging.NOTSET) + logger.debug(e) + try: + close_process_namespaces(self.host_ns_fds, self.namespaces) + except Exception as e: + logger.warning('Could not close the namespaces: %s' % e) + raise + + try: + attach_to_process_namespaces(self.container_ns_fds, + self.namespaces) + except Exception as e: + logging.disable(logging.NOTSET) + error_msg = ('Could not attach to a pid={pid} namespace, Exception: {exc}'.format( + pid=self.pid, exc=e)) + logger.error(error_msg) + raise + +def run_as_another_namespace( + pid, + namespaces, + function, + *args, + **kwargs +): + + # Create the queue and its pipes before attaching to the container mnt namespace + queue = multiprocessing.Queue(2 ** 15) + + context = ProcessContext(pid, namespaces) + + # Fork before attaching to the container mnt namespace to drop to a single thread + child_process = multiprocessing.Process(target=_run_as_another_namespace_executor, + args=(queue, context, pid, function, args), + kwargs=kwargs) + child_process.start() + + grandchild_exception = None + try: + (result, grandchild_exception) = queue.get(timeout=IN_CONTAINER_TIMEOUT) + except Queue.Empty: + grandchild_exception = CrawlTimeoutError('Timed out waiting for response from crawler process') + except Exception: + result = None + if grandchild_exception: + result = None + + child_process.join(1) + # If the join timed out the process might still be alive + if child_process.is_alive(): + errmsg = ('Timed out waiting for process %d to exit.' % + child_process.pid) + queue.close() + os.kill(child_process.pid, 9) + logger.error(errmsg) + raise CrawlTimeoutError(errmsg) + + if result is None: + if grandchild_exception: + raise grandchild_exception + raise CrawlError('Unknown crawl error.') + return result + +def signal_handler_sighup(*args): + logger.warning('Crawler parent process died, so exiting... Bye!') + exit(1) + +def cache_modules_from_crawler_mnt_namespace(): + prime_process = multiprocessing.Process(target=time.sleep, args=(1,)) + prime_process.start() + prime_process.is_alive() + prime_process.join(0.001) + prime_process.terminate() + prime_process.join() + prime_process.is_alive() + del prime_process + prime_queue = multiprocessing.Queue(2 ** 15) + prime_queue.put('something') + prime_queue.get() + prime_queue.close() + prime_queue.join_thread() + del prime_queue + +def wait_for_linux_thread_cleanup(expected_threads): + start_time = os.times()[4] + while True: + task_count = len(os.listdir('/proc/{}/task'.format(os.getpid()))) + if task_count > expected_threads: + time.sleep(0.001) + else: + break + logger.debug('Waited {} seconds for Linux to cleanup terminated threads'.format(os.times()[4] - start_time)) + +def _run_as_another_namespace_executor(queue, context, pid, function, args, **kwargs): + # Die if the parent dies + PR_SET_PDEATHSIG = 1 + libc.prctl(PR_SET_PDEATHSIG, signal.SIGHUP) + signal.signal(signal.SIGHUP, signal_handler_sighup) + + cache_modules_from_crawler_mnt_namespace() + wait_for_linux_thread_cleanup(1) + try: + context.attach() + except Exception as e: + queue.put((None, e)) + sys.exit(1) + + try: + grandchild_process = multiprocessing.Process( + name='crawler-%s' % pid, + target=function_wrapper, + args=(queue, function, args), + kwargs=kwargs) + grandchild_process.start() + except OSError: + sys.exit(1) + + grandchild_process.join(IN_CONTAINER_TIMEOUT) + # If the join timed out the process might still be alive + if grandchild_process.is_alive(): + os.kill(grandchild_process.pid, 9) + sys.exit(1) + +def function_wrapper( + queue, + function, + *args, + **kwargs +): + + # Die if the parent dies + PR_SET_PDEATHSIG = 1 + libc.prctl(PR_SET_PDEATHSIG, signal.SIGHUP) + signal.signal(signal.SIGHUP, signal_handler_sighup) + + result = None + try: + args = args[0] + result = function(*args) + + # if res is a generator (i.e. function uses yield) + if isinstance(result, types.GeneratorType): + result = list(result) + + queue.put((result, None)) + queue.close() + sys.exit(0) + except Exception as e: + e.traceback = traceback.format_exc() + queue.put((None, e)) + queue.close() + sys.exit(1) + +def open_process_namespaces(pid, namespace_fd, namespaces): + for ct_ns in namespaces: + try: + + # arg 0 means readonly + namespace_fd[ct_ns] = libc.open('/proc/' + str(pid) + '/ns/' + ct_ns, 0) + if namespace_fd[ct_ns] == -1: + errno_msg = get_errno_msg(libc) + error_msg = 'Opening the %s namespace file failed: %s' % (ct_ns, errno_msg) + logger.warning(error_msg) + raise OSError('Failed to open {ns} namespace of {pid}: {err}'.format(ns=ct_ns, pid=pid, err=error_msg)) + except Exception as e: + error_msg = 'The open() syscall failed with: %s' % e + logger.warning(error_msg) + raise + +def close_process_namespaces(namespace_fd, namespaces): + for ct_ns in namespaces: + try: + libc.close(namespace_fd[ct_ns]) + except Exception as e: + error_msg = 'The close() syscall failed with: %s' % e + logger.warning(error_msg) + +def attach_to_process_namespaces(namespace_fd, ct_namespaces): + for ct_ns in ct_namespaces: + try: + if hasattr(libc, 'setns'): + r = libc.setns(namespace_fd[ct_ns], 0) + else: + # The Linux kernel ABI should be stable enough + __NR_setns = 308 + r = libc.syscall(__NR_setns, namespace_fd[ct_ns], 0) + if r == -1: + errno_msg = get_errno_msg(libc) + error_msg = ('Could not attach to the container %s ' + 'namespace (fd=%s): %s' % + (ct_ns, namespace_fd[ct_ns], errno_msg)) + logger.warning(error_msg) + raise OSError('Failed to attach to {ns} namespace of {fd}: {err}'.format(ns=ct_ns, fd=namespace_fd[ct_ns], err=error_msg)) + except Exception as e: + error_msg = 'The setns() syscall failed with: %s' % e + logger.warning(error_msg) + logger.exception(e) + raise + +def get_errno_msg(libc): + try: + import ctypes + libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int) + errno = libc.__errno_location().contents.value + errno_msg = os.strerror(errno) + return errno_msg + except Exception: + return 'unknown error' From c099c66c42647bfa378c3748837061e90a9d5231 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Fri, 1 Dec 2017 14:42:22 -0500 Subject: [PATCH 18/47] plugincont wip Signed-off-by: Sahil Suneja --- .cache/v/cache/lastfailed | 1 - Dockerfile | 2 +- crawler/crawler.conf | 4 +- .../pythonpackage_container_crawler.plugin | 2 +- .../pythonpackage_container_crawler.py | 6 +-- .../rubypackage_container_crawler.plugin | 2 +- .../systems/rubypackage_container_crawler.py | 6 +-- crawler/utils/dockerutils.py | 24 +++------ .../plugincont_img/crawler/crawler.conf | 4 +- .../pythonpackage_container_crawler.plugin | 2 +- .../pythonpackage_container_crawler.py | 8 +-- .../rubypackage_container_crawler.plugin | 2 +- .../systems/rubypackage_container_crawler.py | 6 +-- requirements.txt | 2 +- .../test_functional_containers_crawler.py | 2 +- tests/functional/test_functional_ctprobe.py | 2 +- .../test_functional_dockerevents.py | 2 +- .../functional/test_functional_dockerutils.py | 2 +- tests/functional/test_functional_fprobe.py | 2 +- .../test_functional_k8s_environment.py | 2 +- .../functional/test_functional_logs_linker.py | 2 +- tests/functional/test_functional_namespace.py | 2 +- tests/functional/test_functional_plugins.py | 2 +- tests/unit/test_dockerutils.py | 54 +++++++++---------- 24 files changed, 65 insertions(+), 78 deletions(-) delete mode 100644 .cache/v/cache/lastfailed diff --git a/.cache/v/cache/lastfailed b/.cache/v/cache/lastfailed deleted file mode 100644 index 9e26dfee..00000000 --- a/.cache/v/cache/lastfailed +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 65fddb4a..265ac7d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,7 @@ COPY \ RUN dpkg -i /tmp/python-socket-datacollector_*_all.deb && \ apt-get -y update && \ apt-get -y install libpcap0.8 && \ - apt-get -y install libpcap-dev && \ + apt-get -y install libcap-dev && \ dpkg -i /tmp/softflowd_0.9.*_amd64.deb && \ pip install pyroute2 py-radix requests-unixsocket json-rpc && \ dpkg -i /tmp/python-conntrackprobe_*_all.deb && \ diff --git a/crawler/crawler.conf b/crawler/crawler.conf index 046fc28c..54bd55bb 100644 --- a/crawler/crawler.conf +++ b/crawler/crawler.conf @@ -17,9 +17,9 @@ [[ process_host ]] - [[ ruby_pkg ]] + [[ rubypackage ]] - [[ python_pkg ]] + [[ pythonpackage ]] avoid_setns = False [[ fprobe_container ]] diff --git a/crawler/plugins/systems/pythonpackage_container_crawler.plugin b/crawler/plugins/systems/pythonpackage_container_crawler.plugin index 9bf66e80..0d45bb0b 100644 --- a/crawler/plugins/systems/pythonpackage_container_crawler.plugin +++ b/crawler/plugins/systems/pythonpackage_container_crawler.plugin @@ -1,5 +1,5 @@ [Core] -Name = python_pkg +Name = pythonpackage Module = pythonpackage_container_crawler [Documentation] diff --git a/crawler/plugins/systems/pythonpackage_container_crawler.py b/crawler/plugins/systems/pythonpackage_container_crawler.py index 45f2af31..8e6b7663 100644 --- a/crawler/plugins/systems/pythonpackage_container_crawler.py +++ b/crawler/plugins/systems/pythonpackage_container_crawler.py @@ -14,7 +14,7 @@ class PythonPackageCrawler(IContainerCrawler): def get_feature(self): - return 'python-package' + return 'pythonpackage' def _crawl_files(self, path, extensions): output = [] @@ -59,7 +59,7 @@ def _get_packages_by_extension(self, mountpoint): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'python-package') + 'pythonpackage') def _get_packages_by_cmd(self): # better coverage with pkg_resources.working_set than @@ -87,7 +87,7 @@ def _get_packages_by_cmd(self): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'python-package') + 'pythonpackage') def _crawl_without_setns(self, container_id): mountpoint = utils.dockerutils.get_docker_container_rootfs_path( diff --git a/crawler/plugins/systems/rubypackage_container_crawler.plugin b/crawler/plugins/systems/rubypackage_container_crawler.plugin index d89d3dcf..a4a38dc2 100644 --- a/crawler/plugins/systems/rubypackage_container_crawler.plugin +++ b/crawler/plugins/systems/rubypackage_container_crawler.plugin @@ -1,5 +1,5 @@ [Core] -Name = ruby_pkg +Name = rubypackage Module = rubypackage_container_crawler [Documentation] diff --git a/crawler/plugins/systems/rubypackage_container_crawler.py b/crawler/plugins/systems/rubypackage_container_crawler.py index 7cd351da..3ef7a990 100644 --- a/crawler/plugins/systems/rubypackage_container_crawler.py +++ b/crawler/plugins/systems/rubypackage_container_crawler.py @@ -14,7 +14,7 @@ class RubyPackageCrawler(IContainerCrawler): def get_feature(self): - return 'ruby-package' + return 'rubypackage' def _crawl_files(self, path, extension): output = [] @@ -46,7 +46,7 @@ def _get_packages_by_extension(self, mountpoint): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'ruby-package') + 'rubypackage') def _get_packages_by_cmd(self): proc = subprocess.Popen( @@ -65,7 +65,7 @@ def _get_packages_by_cmd(self): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'ruby-package') + 'rubypackage') def _crawl_without_setns(self, container_id): mountpoint = utils.dockerutils.get_docker_container_rootfs_path( diff --git a/crawler/utils/dockerutils.py b/crawler/utils/dockerutils.py index 52766d7b..66116cef 100644 --- a/crawler/utils/dockerutils.py +++ b/crawler/utils/dockerutils.py @@ -30,9 +30,7 @@ def exec_dockerps(): This call executes the `docker inspect` command every time it is invoked. """ try: - # client = docker.Client( - # base_url='unix://var/run/docker.sock', version='auto') - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') containers = client.containers() inspect_arr = [] for container in containers: @@ -47,9 +45,7 @@ def exec_dockerps(): def exec_docker_history(long_id): try: - # client = docker.Client(base_url='unix://var/run/docker.sock', - # version='auto') - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') image = client.inspect_container(long_id)['Image'] history = client.history(image) return history @@ -72,9 +68,7 @@ def _reformat_inspect(inspect): def exec_dockerinspect(long_id): try: - # client = docker.Client( - # base_url='unix://var/run/docker.sock', version='auto') - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') inspect = client.inspect_container(long_id) _reformat_inspect(inspect) except docker.errors.DockerException as e: @@ -110,9 +104,7 @@ def _get_docker_storage_driver(): # Step 1, get it from "docker info" try: - # client = docker.Client( - # base_url='unix://var/run/docker.sock', version='auto') - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') driver = client.info()['Driver'] except (docker.errors.DockerException, KeyError): pass # try to continue with the default of 'devicemapper' @@ -197,9 +189,7 @@ def _get_docker_server_version(): """Run the `docker info` command to get server version """ try: - # client = docker.Client( - # base_url='unix://var/run/docker.sock', version='auto') - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') return client.version()['Version'] except (docker.errors.DockerException, KeyError) as e: logger.warning(str(e)) @@ -400,9 +390,7 @@ def get_docker_container_rootfs_path(long_id, inspect=None): def poll_container_create_events(timeout=0.1): try: - # client = docker.Client(base_url='unix://var/run/docker.sock', - # version='auto') - client = docker.APIClient(base_url='unix://var/run/docker.sock') + client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') filters = dict() filters['type'] = 'container' filters['event'] = 'start' diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf b/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf index 88b042c6..591c6e1f 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler.conf @@ -16,9 +16,9 @@ [[ process_host ]] - [[ ruby_pkg ]] + [[ rubypackage ]] - [[ python_pkg ]] + [[ pythonpackage ]] avoid_setns = False [[ fprobe_container ]] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin index 9bf66e80..0d45bb0b 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.plugin @@ -1,5 +1,5 @@ [Core] -Name = python_pkg +Name = pythonpackage Module = pythonpackage_container_crawler [Documentation] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py index 35c4d39c..32a466e8 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py @@ -13,7 +13,7 @@ class PythonPackageCrawler(IContainerCrawler): def get_feature(self): - return 'python-package' + return 'pythonpackage' def _crawl_files(self, path, extensions): output = [] @@ -58,7 +58,7 @@ def _get_packages_by_extension(self, mountpoint): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'python-package') + 'pythonpackage') def _get_packages_by_cmd(self): # better coverage with pkg_resources.working_set than @@ -86,7 +86,7 @@ def _get_packages_by_cmd(self): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'python-package') + 'pythonpackage') def _crawl_without_setns(self, container_id): return self._get_packages_by_extension('/rootfs_local') @@ -110,5 +110,5 @@ def crawl(self, container_id, avoid_setns=False, **kwargs): if avoid_setns: return self._crawl_without_setns(container_id) else: # in all other cases, including wrong mode set - self.get_packages_generic = False # can be made an arg to crawl() + self.get_packages_generic = True # can be made an arg to crawl() return self._crawl_in_system() diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin index d89d3dcf..a4a38dc2 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.plugin @@ -1,5 +1,5 @@ [Core] -Name = ruby_pkg +Name = rubypackage Module = rubypackage_container_crawler [Documentation] diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py index 0bbf1c8b..3984dc8a 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py @@ -11,7 +11,7 @@ class RubyPackageCrawler(IContainerCrawler): def get_feature(self): - return 'ruby-package' + return 'rubypackage' def _crawl_files(self, path, extension): output = [] @@ -43,7 +43,7 @@ def _get_packages_by_extension(self, mountpoint): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'ruby-package') + 'rubypackage') def _get_packages_by_cmd(self): proc = subprocess.Popen( @@ -62,7 +62,7 @@ def _get_packages_by_cmd(self): yield ( pkg_name, {"pkgname": pkg_name, "pkgversion": pkg_version}, - 'ruby-package') + 'rubypackage') def _crawl_without_setns(self, container_id): return self._get_packages_by_extension('/rootfs_local') diff --git a/requirements.txt b/requirements.txt index fe9370eb..3da5b256 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,4 @@ configobj==4.7.0 morph==0.1.2 fluent-logger==0.4.6 requests_unixsocket==0.1.5 -python-iptables +python-iptables==0.12.0 diff --git a/tests/functional/test_functional_containers_crawler.py b/tests/functional/test_functional_containers_crawler.py index 26f322e1..8ad8423d 100644 --- a/tests/functional/test_functional_containers_crawler.py +++ b/tests/functional/test_functional_containers_crawler.py @@ -31,7 +31,7 @@ def setUp(self): ch.setFormatter(formatter) root.addHandler(ch) - self.docker = docker.Client(base_url='unix://var/run/docker.sock', + self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/functional/test_functional_ctprobe.py b/tests/functional/test_functional_ctprobe.py index 379103e0..79192602 100644 --- a/tests/functional/test_functional_ctprobe.py +++ b/tests/functional/test_functional_ctprobe.py @@ -90,7 +90,7 @@ class CtprobeFunctionalTests(unittest.TestCase): image_name = 'alpine:latest' def setUp(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/functional/test_functional_dockerevents.py b/tests/functional/test_functional_dockerevents.py index aa320c8a..32dc853f 100644 --- a/tests/functional/test_functional_dockerevents.py +++ b/tests/functional/test_functional_dockerevents.py @@ -20,7 +20,7 @@ class CrawlerDockerEventTests(unittest.TestCase): def setUp(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/functional/test_functional_dockerutils.py b/tests/functional/test_functional_dockerutils.py index 7dd2b1e2..576f0ef6 100644 --- a/tests/functional/test_functional_dockerutils.py +++ b/tests/functional/test_functional_dockerutils.py @@ -22,7 +22,7 @@ class DockerUtilsTests(unittest.TestCase): long_image_name = 'docker.io/alpine:latest' def setUp(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/functional/test_functional_fprobe.py b/tests/functional/test_functional_fprobe.py index d0160e77..0584ead5 100644 --- a/tests/functional/test_functional_fprobe.py +++ b/tests/functional/test_functional_fprobe.py @@ -97,7 +97,7 @@ class FprobeFunctionalTests(unittest.TestCase): image_name = 'alpine:latest' def setUp(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/functional/test_functional_k8s_environment.py b/tests/functional/test_functional_k8s_environment.py index 2984357e..ee66588a 100644 --- a/tests/functional/test_functional_k8s_environment.py +++ b/tests/functional/test_functional_k8s_environment.py @@ -36,7 +36,7 @@ def setUp(self): ch.setFormatter(formatter) root.addHandler(ch) - self.docker = docker.Client(base_url='unix://var/run/docker.sock', + self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') self.k8s_labels = dict() self.k8s_labels[CONT_NAME] = "simson" diff --git a/tests/functional/test_functional_logs_linker.py b/tests/functional/test_functional_logs_linker.py index d1b02da7..90e23fab 100644 --- a/tests/functional/test_functional_logs_linker.py +++ b/tests/functional/test_functional_logs_linker.py @@ -36,7 +36,7 @@ def setUp(self): pass def startContainer(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') self.docker.pull(repository='ubuntu', tag='latest') self.container = self.docker.create_container( diff --git a/tests/functional/test_functional_namespace.py b/tests/functional/test_functional_namespace.py index 50baee2d..95514a88 100644 --- a/tests/functional/test_functional_namespace.py +++ b/tests/functional/test_functional_namespace.py @@ -48,7 +48,7 @@ class NamespaceLibTests(unittest.TestCase): image_name = 'alpine:latest' def setUp(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/functional/test_functional_plugins.py b/tests/functional/test_functional_plugins.py index 8fd9fc69..59bdd269 100644 --- a/tests/functional/test_functional_plugins.py +++ b/tests/functional/test_functional_plugins.py @@ -21,7 +21,7 @@ class HostAndContainerPluginsFunctionalTests(unittest.TestCase): image_name = 'alpine:latest' def setUp(self): - self.docker = docker.Client( + self.docker = docker.APIClient( base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: diff --git a/tests/unit/test_dockerutils.py b/tests/unit/test_dockerutils.py index f2aa03ee..09c82f40 100644 --- a/tests/unit/test_dockerutils.py +++ b/tests/unit/test_dockerutils.py @@ -88,7 +88,7 @@ def setUp(self): def tearDown(self): pass - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_exec_dockerps(self, *args): for c in utils.dockerutils.exec_dockerps(): @@ -117,7 +117,7 @@ def test_exec_dockerps(self, *args): 'HostIp': ''}]}}, 'Id': 'good_id'} - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.exec_dockerinspect', side_effect=throw_docker_exception) @@ -125,19 +125,19 @@ def test_exec_dockerps_failure(self, *args): with self.assertRaises(DockerutilsException): utils.dockerutils.exec_dockerps() - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_exec_docker_history(self, *args): h = utils.dockerutils.exec_docker_history('ididid') assert h == [{'History': 'xxx'}] - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=throw_docker_exception) def test_exec_docker_history_failure(self, *args): with self.assertRaises(DockerutilsException): utils.dockerutils.exec_docker_history('ididid') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_exec_docker_inspect(self, *args): i = utils.dockerutils.exec_dockerinspect('ididid') @@ -165,13 +165,13 @@ def test_exec_docker_inspect(self, *args): 'HostIp': ''}]}}, 'Id': 'good_id'} - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=throw_docker_exception) def test_exec_docker_inspect_failure(self, *args): with self.assertRaises(DockerutilsException): utils.dockerutils.exec_dockerinspect('ididid') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=throw_docker_exception) @mock.patch('utils.dockerutils.open') def test_get_docker_storage_driver_step1a(self, mock_open, mock_client): @@ -185,32 +185,32 @@ def test_get_docker_storage_driver_step1a(self, mock_open, mock_client): mock_open.return_value = open('tests/unit/proc_mounts_btrfs') assert utils.dockerutils._get_docker_storage_driver() == 'btrfs' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) def test_get_docker_storage_driver_step2(self, mock_open, mock_client): assert utils.dockerutils._get_docker_storage_driver() == 'btrfs' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=throw_docker_exception) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) def test_get_docker_storage_driver_failure(self, mock_open, mock_client): assert utils.dockerutils._get_docker_storage_driver() == 'devicemapper' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_get_docker_server_version(self, mock_client): assert utils.dockerutils._get_docker_server_version() == '1.10.1' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=throw_docker_exception) def test_get_docker_server_version_failure(self, mock_client): with self.assertRaises(DockerutilsException): utils.dockerutils._get_docker_server_version() - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch( 'crawler.utils.dockerutils.os.path.isfile', @@ -221,7 +221,7 @@ def test_get_json_logs_path_from_path(self, mock_isfile, mock_client): assert utils.dockerutils.get_docker_container_json_logs_path( 'id') == '/var/lib/docker/containers/id/id-json.log' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.os.path.isfile', side_effect=lambda p: @@ -230,7 +230,7 @@ def test_get_json_logs_path_from_daemon(self, mock_isfile, mock_client): assert utils.dockerutils.get_docker_container_json_logs_path( 'id') == '/a/b/c/log.json' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.os.path.isfile', side_effect=lambda p: False) @@ -238,7 +238,7 @@ def test_get_json_logs_path_failure(self, mock_isfile, mock_client): with self.assertRaises(DockerutilsNoJsonLog): utils.dockerutils.get_docker_container_json_logs_path('id') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) @@ -248,7 +248,7 @@ def test_get_rootfs_not_supported_driver_failure( with self.assertRaises(DockerutilsException): utils.dockerutils.get_docker_container_rootfs_path('id') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=[open('tests/unit/proc_pid_mounts_devicemapper'), @@ -260,7 +260,7 @@ def test_get_rootfs_devicemapper(self, mock_open, mock_client): "65fe676c24fe1faea1f06e222cc3811cc" "9b651c381702ca4f787ffe562a5e39b/rootfs") - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) @@ -278,7 +278,7 @@ def test_get_rootfs_devicemapper_failure(self, mock_open, mock_client): 'level', '5', 'path', 'sub1/abcde/sub2'), ] ) - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_get_rootfs_btrfs_v1_8(self, mock_client, mock_list): utils.dockerutils.driver = 'btrfs' @@ -288,7 +288,7 @@ def test_get_rootfs_btrfs_v1_8(self, mock_client, mock_list): @mock.patch('utils.dockerutils.misc.btrfs_list_subvolumes', side_effect=throw_runtime_error) - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_get_rootfs_btrfs_v1_8_failure(self, mock_client, mock_list): utils.dockerutils.driver = 'btrfs' @@ -296,7 +296,7 @@ def test_get_rootfs_btrfs_v1_8_failure(self, mock_client, mock_list): with self.assertRaises(DockerutilsException): utils.dockerutils.get_docker_container_rootfs_path('abcde') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=[open('tests/unit/btrfs_mount_init-id')]) @@ -306,7 +306,7 @@ def test_get_rootfs_btrfs_v1_10(self, mock_open, mock_client): assert utils.dockerutils.get_docker_container_rootfs_path( 'id') == '/var/lib/docker/btrfs/subvolumes/vol1/id/rootfs-a-b-c' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) @@ -320,7 +320,7 @@ def test_get_rootfs_btrfs_v1_10_failure(self, mock_open, mock_client): side_effect=lambda d: True) @mock.patch('utils.dockerutils.os.listdir', side_effect=lambda d: ['usr', 'boot', 'var']) - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_get_rootfs_aufs_v1_8(self, *args): utils.dockerutils.driver = 'aufs' @@ -332,7 +332,7 @@ def test_get_rootfs_aufs_v1_8(self, *args): side_effect=lambda d: False) @mock.patch('utils.dockerutils.os.listdir', side_effect=lambda d: ['usr', 'boot', 'var']) - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) def test_get_rootfs_aufs_v1_8_failure(self, *args): utils.dockerutils.driver = 'aufs' @@ -340,7 +340,7 @@ def test_get_rootfs_aufs_v1_8_failure(self, *args): with self.assertRaises(DockerutilsException): utils.dockerutils.get_docker_container_rootfs_path('abcde') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=[open('tests/unit/aufs_mount_init-id')]) @@ -350,7 +350,7 @@ def test_get_rootfs_aufs_v1_10(self, *args): assert utils.dockerutils.get_docker_container_rootfs_path( 'abcde') == '/var/lib/docker/aufs/mnt/vol1/id/rootfs-a-b-c' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) @@ -360,7 +360,7 @@ def test_get_rootfs_aufs_v1_10_failure(self, *args): with self.assertRaises(DockerutilsException): utils.dockerutils.get_docker_container_rootfs_path('abcde') - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=[open('tests/unit/vfs_mount_init-id')]) @@ -370,7 +370,7 @@ def test_get_rootfs_vfs_v1_10(self, *args): assert utils.dockerutils.get_docker_container_rootfs_path( 'abcde') == '/var/lib/docker/vfs/dir/vol1/id/rootfs-a-b-c' - @mock.patch('utils.dockerutils.docker.Client', + @mock.patch('utils.dockerutils.docker.APIClient', side_effect=lambda base_url, version: MockedClient()) @mock.patch('utils.dockerutils.open', side_effect=throw_io_error) From 1b8ca7e7eb81fc06f205b784dc6e2088a0479e9e Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Fri, 1 Dec 2017 16:29:59 -0500 Subject: [PATCH 19/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 5 +- crawler/safe_containers_crawler.py | 2 - .../crawler/utils/package_utils.py | 2 +- .../test_functional_safecontainers_crawler.py | 191 ++++++++++++++++++ 4 files changed, 195 insertions(+), 5 deletions(-) create mode 100644 tests/functional/test_functional_safecontainers_crawler.py diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 789955b3..2055de19 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -84,9 +84,9 @@ def set_plugincont_uid(self): try: uid = self._get_next_uid() if uid <= 0: - return + return -1 if self.set_plugincont_dockerfile_uid(uid) != 0: - return + return -1 self.plugincont_host_uid = uid docker_root_dir = utils.dockerutils._get_docker_root_dir() # /var/lib/docker/165536.16553 leaf_dir = docker_root_dir.split('/')[-1] # 165536.165536 @@ -96,6 +96,7 @@ def set_plugincont_uid(self): except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno self.plugincont_host_uid = -1 + return self.plugincont_host_uid def set_plugincont_cgroup_netclsid(self): # self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index 6f2bb46b..a629f7a9 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -52,8 +52,6 @@ def __init__(self, # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): - #import pdb - #pdb.set_trace() features = [] if self.pluginconts_manager is None: return features diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py index 12f4ba68..b031c42b 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/package_utils.py @@ -43,7 +43,7 @@ def get_dpkg_packages( # changed to below per Suriya's request feature_key = '{0}'.format(name, version) - yield (feature_key, PackageFeature(None, name, + yield (feature_key, PackageFeature("null", name, size, version, architecture)) diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py new file mode 100644 index 00000000..283779cb --- /dev/null +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -0,0 +1,191 @@ +import unittest +import docker +import requests.exceptions +import tempfile +import os +import time +import shutil +import subprocess +import sys +import pykafka + +# Tests for crawlers in kraken crawlers configuration. + +from safe_containers_crawler import SafeContainersCrawler +from worker import Worker +from emitters_manager import EmittersManager +from utils.dockerutils import get_docker_container_rootfs_path + +import logging + +# Tests conducted with a single container running. + + +class SafeContainersCrawlerTests(unittest.TestCase): + + def setUp(self): + root = logging.getLogger() + root.setLevel(logging.INFO) + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + root.addHandler(ch) + + self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', + version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.start_crawled_container() + + # start a kakfa+zookeeper container to send data to (to test our + # kafka emitter) + self.start_kafka_container() + + def start_kafka_container(self): + self.docker.pull(repository='spotify/kafka', tag='latest') + self.kafka_container = self.docker.create_container( + image='spotify/kafka', ports=[9092, 2181], + host_config=self.docker.create_host_config(port_bindings={ + 9092: 9092, + 2181: 2181 + }), + environment={'ADVERTISED_HOST': 'localhost', + 'ADVERTISED_PORT': '9092'}) + self.docker.start(container=self.kafka_container['Id']) + + def start_crawled_container(self): + # start a container to be crawled + self.docker.pull(repository='ruby', tag='latest') + self.container = self.docker.create_container( + image='ruby:latest', command='tail -f /dev/null', + ports=[8192], + host_config=self.docker.create_host_config(port_bindings={ + 8192: 8192, + }), + environment={'ADVERTISED_HOST': 'localhost', + 'ADVERTISED_PORT': '8192'}) + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + time.sleep(5) + rootfs = get_docker_container_rootfs_path(self.container['Id']) + fd = open(rootfs+'/crawlplugins','w') + fd.write('cpu\n') + fd.write('os\n') + fd.write('memory\n') + fd.write('interface\n') + fd.write('process\n') + fd.write('pythonpackage\n') + fd.close() + + def tearDown(self): + self.remove_crawled_container() + self.remove_kafka_container() + + shutil.rmtree(self.tempd) + + def remove_kafka_container(self): + self.docker.stop(container=self.kafka_container['Id']) + self.docker.remove_container(container=self.kafka_container['Id']) + + def remove_crawled_container(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + def testCrawlContainer1(self): + crawler = SafeContainersCrawler(features=[],user_list=self.container['Id']) + frames = list(crawler.crawl()) + output = str(frames[0]) + print output # only printed if the test fails + assert 'interface-lo' in output + assert 'if_octets_tx' in output + assert 'cpu-0' in output + assert 'cpu_nice' in output + assert 'memory' in output + assert 'memory_buffered' in output + assert 'os' in output + assert 'linux' in output + assert 'process' in output + assert 'tail' in output + assert 'plugincont_user' in output + assert 'pythonpackage' in output + assert 'Python' in output + + def testCrawlContainer2(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'none', + '--crawlContainers', self.container['Id'], + '--format', 'graphite', + '--crawlmode', 'OUTCONTAINERSAFE', + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + assert 'interface-lo.if_octets.tx' in output + assert 'cpu-0.cpu-idle' in output + assert 'memory.memory-used' in output + assert 'apt.pkgsize' in output + f.close() + + def testCrawlContainerKafka(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'kafka://localhost:9092/test', + '--features', 'os,process', + '--crawlContainers', self.container['Id'], + '--crawlmode', 'OUTCONTAINER', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + kafka = pykafka.KafkaClient(hosts='localhost:9092') + topic = kafka.topics['test'] + consumer = topic.get_simple_consumer() + message = consumer.consume() + assert '"cmd":"tail -f /dev/null"' in message.value + + def _testCrawlContainerKafka(self): + # TODO: verify sandbox restraints here + pass + +if __name__ == '__main__': + unittest.main() From 7dd606bd63ad701f165ac9e6cdaf95a85bf8315c Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Fri, 1 Dec 2017 20:01:26 -0500 Subject: [PATCH 20/47] plugincont wip Signed-off-by: Sahil Suneja --- .../systems/evil_container_crawler.plugin | 8 ++ .../plugins/systems/evil_container_crawler.py | 120 ++++++++++++++++++ .../test_functional_safecontainers_crawler.py | 80 +++++++++++- 3 files changed, 205 insertions(+), 3 deletions(-) create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.plugin create mode 100644 crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.plugin b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.plugin new file mode 100644 index 00000000..c97c9cf9 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.plugin @@ -0,0 +1,8 @@ +[Core] +Name = evil +Module = evil_container_crawler + +[Documentation] +Author = IBM +Version = 0.1 +Description = This plugin's operations should not succeed when run inside sandbox diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py new file mode 100644 index 00000000..254def5b --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py @@ -0,0 +1,120 @@ +import logging + +import psutil + +from icrawl_plugin import IContainerCrawler + +logger = logging.getLogger('crawlutils') + + +class EvilContainerCrawler(IContainerCrawler): + + def get_feature(self): + return 'evil' + + def crawl(self, container_id, avoid_setns=False, **kwargs): + if avoid_setns: + raise NotImplementedError() + return self.crawl_in_system() + + def crawl_in_system(self): + return self.kill_proc() + + def kill_proc(self): + for p in psutil.process_iter(): + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + continue + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + try: + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + if username == 'plugincont_user': + continue + p.kill() + except psutil.AccessDenied: + yield ( + name, + {"pid": pid, "username": username, "killstatus": "expected_failed"}, + 'evil' + ) + break + except: + continue + yield ( + name, + {"pid": pid, "username": username, "killstatus": "unexpected_succeeded"}, + 'evil' + ) + break + + def _crawl_single_process(self, p): + """Returns a ProcessFeature""" + create_time = ( + p.create_time() if hasattr( + p.create_time, + '__call__') else p.create_time) + + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + cmdline = (p.cmdline() if hasattr(p.cmdline, '__call__' + ) else p.cmdline) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + cwd = 'unknown' # invalid + else: + try: + cwd = (p.cwd() if hasattr(p, 'cwd') and + hasattr(p.cwd, '__call__') else p.getcwd()) + except Exception: + logger.error('Error crawling process %s for cwd' + % pid, exc_info=True) + cwd = 'unknown' + ppid = (p.ppid() if hasattr(p.ppid, '__call__' + ) else p.ppid) + try: + if (hasattr(p, 'num_threads') and + hasattr(p.num_threads, '__call__')): + num_threads = p.num_threads() + else: + num_threads = p.get_num_threads() + except: + num_threads = 'unknown' + + try: + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + except: + username = 'unknown' + + if username == 'nobody': + return + + openfiles = [] + try: + for f in p.get_open_files(): + openfiles.append(f.path) + openfiles.sort() + except psutil.AccessDenied: + print "got psutil.AccessDenied" + openfiles = [] + + feature_key = '{0}/{1}'.format(name, pid) + return (feature_key, ProcessFeature( + str(' '.join(cmdline)), + create_time, + cwd, + name, + openfiles, + pid, + ppid, + num_threads, + username, + ), 'process') diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index 283779cb..ccf90979 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -152,7 +152,46 @@ def testCrawlContainer2(self): assert 'interface-lo.if_octets.tx' in output assert 'cpu-0.cpu-idle' in output assert 'memory.memory-used' in output - assert 'apt.pkgsize' in output + f.close() + + def testCrawlContainerNoPlugins(self): + rootfs = get_docker_container_rootfs_path(self.container['Id']) + fd = open(rootfs+'/crawlplugins','w') + fd.write('noplugin\n') + fd.close() + + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'none', + '--crawlContainers', self.container['Id'], + '--crawlmode', 'OUTCONTAINERSAFE', + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + assert 'metadata' in output + assert 'interface-lo' not in output + assert 'cpu-0' not in output + assert 'memory' not in output f.close() def testCrawlContainerKafka(self): @@ -183,9 +222,44 @@ def testCrawlContainerKafka(self): message = consumer.consume() assert '"cmd":"tail -f /dev/null"' in message.value - def _testCrawlContainerKafka(self): + def testCrawlContainerBadPlugin(self): # TODO: verify sandbox restraints here - pass + rootfs = get_docker_container_rootfs_path(self.container['Id']) + fd = open(rootfs+'/crawlplugins','w') + fd.write('evil\n') + fd.close() + + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'none', + '--crawlContainers', self.container['Id'], + '--crawlmode', 'OUTCONTAINERSAFE', + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + assert 'killstatus' in output + assert 'expected_failed' in output + f.close() if __name__ == '__main__': unittest.main() From c4823aab83b51149305ea1744d7d40d95c88cd08 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 13:52:01 -0500 Subject: [PATCH 21/47] safe plugin mode with tests Signed-off-by: Sahil Suneja --- .coveragerc | 1 + crawler/containers.py | 2 +- crawler/plugin_containers_manager.py | 208 ++++++++++-------- crawler/safe_containers_crawler.py | 30 ++- crawler/utils/dockerutils.py | 19 +- .../systems/config_container_crawler.py | 10 +- .../plugins/systems/evil_container_crawler.py | 185 ++++++++++------ .../systems/memory_container_crawler.py | 2 +- .../crawler/utils/metric_utils.py | 4 +- .../plugincont_img/requirements.txt.template | 14 ++ .../plugincont_img/requirements.txt.testing | 15 ++ .../test_functional_containers_crawler.py | 6 +- .../test_functional_k8s_environment.py | 8 +- .../test_functional_safecontainers_crawler.py | 62 ++++-- tox.ini | 2 + 15 files changed, 347 insertions(+), 221 deletions(-) create mode 100644 crawler/utils/plugincont/plugincont_img/requirements.txt.template create mode 100644 crawler/utils/plugincont/plugincont_img/requirements.txt.testing create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc index 46041110..8c2c9130 100644 --- a/.coveragerc +++ b/.coveragerc @@ -10,3 +10,4 @@ exclude_lines = ignore_errors = True omit = tests/* + crawler/utils/plugincont/* diff --git a/crawler/containers.py b/crawler/containers.py index 22a229f2..28d10df6 100644 --- a/crawler/containers.py +++ b/crawler/containers.py @@ -27,7 +27,7 @@ def list_all_containers(user_list='ALL', host_namespace='', user_list=user_list): if group_by_pid_namespace is False: yield _container - else: + else: curr_ns = _container.process_namespace if curr_ns not in visited_ns: visited_ns.add(curr_ns) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 2055de19..8ea21368 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -1,4 +1,3 @@ -import ast import os import sys import time @@ -7,13 +6,11 @@ import iptc import shutil import ctypes -import plugins_manager import utils.dockerutils -from base_crawler import BaseCrawler, BaseFrame -from containers import poll_containers, get_containers from utils.crawler_exceptions import ContainerWithoutCgroups from utils.namespace import run_as_another_namespace + class PluginContainersManager(): def __init__(self, frequency=-1): @@ -22,13 +19,17 @@ def __init__(self, frequency=-1): self.plugincont_image = 'plugincont_image' self.plugincont_name_prefix = 'plugin_cont' self.plugincont_username = 'plugincont_user' - self.plugincont_framedir = '/home/' + self.plugincont_username + '/features/' + self.plugincont_framedir = '/home/' + \ + self.plugincont_username + '/features/' self.plugincont_py_path = '/usr/bin/python2.7' - self.plugincont_seccomp_profile_path = os.getcwd() + '/crawler/utils/plugincont/seccomp-no-ptrace.json' - self.plugincont_image_path = os.getcwd() + '/crawler/utils/plugincont/plugincont_img' + self.plugincont_seccomp_profile_path = os.getcwd( + ) + '/crawler/utils/plugincont/seccomp-no-ptrace.json' + self.plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' self.plugincont_guestcont_mountpoint = '/rootfs_local' self.docker_client = docker.from_env() - self.docker_APIclient = docker.APIClient(base_url='unix://var/run/docker.sock') + self.docker_APIclient = docker.APIClient( + base_url='unix://var/run/docker.sock') if self.set_plugincont_uid() == -1: raise ValueError('Failed to verify docker userns-remap settings') if self.set_plugincont_cgroup_netclsid() == -1: @@ -37,7 +38,7 @@ def __init__(self, frequency=-1): raise ValueError('Failed to build image') def is_int(self, s): - try: + try: int(s) return True except ValueError: @@ -51,30 +52,33 @@ def _get_next_uid(self): uid = 1010 uids_in_use = [] try: - fd = open('/etc/passwd','r') + fd = open('/etc/passwd', 'r') for users in fd.readlines(): uids_in_use.append(users.split(':')[2]) - fd.close() + fd.close() while str(uid) in uids_in_use: uid = uid + 1 - except Exception as exc: + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno uid = -1 - return uid + return uid def set_plugincont_dockerfile_uid(self, uid): retVal = 0 uid = str(uid) user = self.plugincont_username try: - shutil.copyfile(self.plugincont_image_path+'/Dockerfile.template', self.plugincont_image_path+'/Dockerfile') - fd = open(self.plugincont_image_path+'/Dockerfile','a') + shutil.copyfile( + self.plugincont_image_path + '/Dockerfile.template', + self.plugincont_image_path + '/Dockerfile') + fd = open(self.plugincont_image_path + '/Dockerfile', 'a') fd.write('RUN groupadd -r ' + user + ' -g ' + uid + '\n') - fd.write('RUN useradd -u ' + uid + ' -m ' + user + ' -g ' + user + '\n') + fd.write('RUN useradd -u ' + uid + + ' -m ' + user + ' -g ' + user + '\n') fd.write('RUN usermod -a -G ' + user + ' ' + user + '\n') fd.write('RUN chsh -s /bin/bash ' + user + '\n') fd.close() - except Exception as exc: + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -88,31 +92,33 @@ def set_plugincont_uid(self): if self.set_plugincont_dockerfile_uid(uid) != 0: return -1 self.plugincont_host_uid = uid - docker_root_dir = utils.dockerutils._get_docker_root_dir() # /var/lib/docker/165536.16553 + # /var/lib/docker/165536.16553 from docker userns remapping + docker_root_dir = utils.dockerutils._get_docker_root_dir() leaf_dir = docker_root_dir.split('/')[-1] # 165536.165536 possible_sub_uid = leaf_dir.split('.')[0] # 165536 - if self.is_int(possible_sub_uid) is True: # from docker userns remapping + if self.is_int(possible_sub_uid) is True: self.plugincont_host_uid = int(possible_sub_uid) + uid - except Exception as exc: + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno self.plugincont_host_uid = -1 - return self.plugincont_host_uid + return self.plugincont_host_uid def set_plugincont_cgroup_netclsid(self): # self.plugincont_cgroup_netclsid = '43' #random cgroup net cls id res_clsid = -1 try: - cgroup_netcls_path = self._get_cgroup_dir(['net_cls','net_cls,net_prio']) - for root, dirs, files in os.walk(cgroup_netcls_path): + cgroup_netcls_path = self._get_cgroup_dir( + ['net_cls', 'net_cls,net_prio']) + for root, dirs, files in os.walk(cgroup_netcls_path): for file in files: if file.endswith('net_cls.classid'): - fd = open(root+'/'+file,'r') + fd = open(root + '/' + file, 'r') clsid = int(fd.readline(), 16) if res_clsid <= clsid: res_clsid = clsid + 1 fd.close() - res_clsid = res_clsid + 2 - except Exception as exc: + res_clsid = res_clsid + 2 + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno res_clsid = -1 self.plugincont_cgroup_netclsid = res_clsid @@ -123,86 +129,97 @@ def destroy_cont(self, id=None, name=None): return if name is not None: _id = name - filter = {'name':name} + filter = {'name': name} else: _id = id - filter = {'id':id} - if client.containers(all=True,filters=filter) != []: + filter = {'id': id} + if client.containers(all=True, filters=filter) != []: client.stop(_id) client.remove_container(_id) - + def set_plugincont_py_cap(self, plugincont_id): retVal = 0 verify = False try: - rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - py_path = rootfs+self.plugincont_py_path + rootfs = utils.dockerutils.get_docker_container_rootfs_path( + plugincont_id) + py_path = rootfs + self.plugincont_py_path libcap = ctypes.cdll.LoadLibrary("libcap.so") - caps = libcap.cap_from_text('cap_dac_read_search,cap_sys_chroot,cap_sys_ptrace+ep') - retVal = libcap.cap_set_file(py_path,caps) + caps = libcap.cap_from_text( + 'cap_dac_read_search,cap_sys_chroot,cap_sys_ptrace+ep') + retVal = libcap.cap_set_file(py_path, caps) if verify is True: libcap.cap_to_text.restype = ctypes.c_char_p - caps_set = libcap.cap_get_file(py_path,caps) + caps_set = libcap.cap_get_file(py_path, caps) caps_set_str = libcap.cap_to_text(caps_set, None) assert 'cap_dac_read_search' in caps_set_str assert 'cap_sys_chroot' in caps_set_str assert 'cap_sys_ptrace' in caps_set_str - except Exception as exc: + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 - return retVal + return retVal def build_plugincont_img(self): retVal = 0 - build_status = list(self.docker_APIclient.build(path=self.plugincont_image_path, tag=self.plugincont_image)) + build_status = list(self.docker_APIclient.build( + path=self.plugincont_image_path, tag=self.plugincont_image)) assert 'Successfully built' in build_status[-1] try: plugincont = self.docker_client.containers.run( - image=self.plugincont_image, + image=self.plugincont_image, command="tail -f /dev/null", detach=True) - time.sleep(5) + time.sleep(5) retVal = self.set_plugincont_py_cap(plugincont.id) if retVal == 0: - self.docker_APIclient.commit(plugincont.id,repository=self.plugincont_image) - self.destroy_cont(id=plugincont.id) - except Exception as exc: + self.docker_APIclient.commit( + plugincont.id, repository=self.plugincont_image) + self.destroy_cont(id=plugincont.id) + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 - return retVal - + return retVal + def get_plugincont_framedir(self, guestcont): frame_dir = None if guestcont is not None and guestcont.plugincont is not None: plugincont_id = guestcont.plugincont.id - rootfs = utils.dockerutils.get_docker_container_rootfs_path(plugincont_id) - frame_dir = rootfs+self.plugincont_framedir - return frame_dir + rootfs = utils.dockerutils.get_docker_container_rootfs_path( + plugincont_id) + frame_dir = rootfs + self.plugincont_framedir + return frame_dir def create_plugincont(self, guestcont): guestcont_id = guestcont.long_id - guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path(guestcont_id) + guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path( + guestcont_id) plugincont = None - plugincont_name = self.plugincont_name_prefix+'_'+guestcont_id - seccomp_attr = json.dumps(json.load(open(self.plugincont_seccomp_profile_path))) + plugincont_name = self.plugincont_name_prefix + '_' + guestcont_id + seccomp_attr = json.dumps( + json.load(open(self.plugincont_seccomp_profile_path))) client = self.docker_client try: self.destroy_cont(name=plugincont_name) plugincont = client.containers.run( - image=self.plugincont_image, + image=self.plugincont_image, name=plugincont_name, user=self.plugincont_username, - command="/usr/bin/python2.7 /crawler/crawler_lite.py --frequency="+str(self.frequency), - pid_mode='container:'+guestcont_id, - network_mode='container:'+guestcont_id, - cap_add=["SYS_PTRACE","DAC_READ_SEARCH"], - security_opt=['seccomp:'+seccomp_attr], - volumes={guestcont_rootfs:{'bind':self.plugincont_guestcont_mountpoint,'mode':'ro'}}, + command="/usr/bin/python2.7 /crawler/crawler_lite.py " + "--frequency=" + str(self.frequency), + pid_mode='container:' + guestcont_id, + network_mode='container:' + guestcont_id, + cap_add=["SYS_PTRACE", "DAC_READ_SEARCH"], + security_opt=['seccomp:' + seccomp_attr], + volumes={ + guestcont_rootfs: { + 'bind': self.plugincont_guestcont_mountpoint, + 'mode': 'ro'}}, detach=True) - time.sleep(5) - except Exception as exc: + time.sleep(5) + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno - + self.pluginconts[str(guestcont_id)] = plugincont guestcont.plugincont = plugincont @@ -217,16 +234,16 @@ def _add_iptable_rules(self): rule.target = iptc.Target(rule, "DROP") chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") chain.insert_rule(rule) - + rule = iptc.Rule() match = iptc.Match(rule, "cgroup") - match.cgroup = str(self.plugincont_cgroup_netclsid) + match.cgroup = str(self.plugincont_cgroup_netclsid) rule.add_match(match) rule.src = "!127.0.0.1" rule.target = iptc.Target(rule, "DROP") chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") chain.insert_rule(rule) - except Exception as exc: + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal @@ -250,55 +267,56 @@ def _get_cgroup_dir(self, devlist=[]): def _setup_netcls_cgroup(self, plugincont_id): retVal = 0 try: - # cgroup_netcls_path = '/sys/fs/cgroup/net_cls/docker/'+plugincont_id - cgroup_netcls_path = self._get_cgroup_dir(['net_cls','net_cls,net_prio'])+'/docker/'+plugincont_id - tasks_path = cgroup_netcls_path+'/tasks' - block_path = cgroup_netcls_path+'/block' - block_classid_path = block_path+'/net_cls.classid' - block_tasks_path = block_path+'/tasks' - + # cgroup_netcls_path = + # '/sys/fs/cgroup/net_cls/docker/'+plugincont_id + cgroup_netcls_path = self._get_cgroup_dir( + ['net_cls', 'net_cls,net_prio']) + '/docker/' + plugincont_id + tasks_path = cgroup_netcls_path + '/tasks' + block_path = cgroup_netcls_path + '/block' + block_classid_path = block_path + '/net_cls.classid' + block_tasks_path = block_path + '/tasks' + if not os.path.isdir(block_path): os.makedirs(block_path) - - fd = open(block_classid_path,'w') + + fd = open(block_classid_path, 'w') fd.write(str(self.plugincont_cgroup_netclsid)) fd.close() - - fd = open(tasks_path,'r') - plugincont_pids = fd.readlines() #should be just one pid == plugincont_pid + + fd = open(tasks_path, 'r') + plugincont_pids = fd.readlines() + # should be just one pid == plugincont_pid fd.close() - - fd = open(block_tasks_path,'w') + + fd = open(block_tasks_path, 'w') for pid in plugincont_pids: fd.write(pid) fd.close() - except Exception as exc: + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 - return retVal - + return retVal + def set_plugincont_iptables(self, plugincont_id): retVal = 0 try: client = self.docker_APIclient - plugincont_pid = client.inspect_container(plugincont_id)['State']['Pid'] - #netns_path = '/var/run/netns' - #if not os.path.isdir(netns_path): - # os.makedirs(netns_path) + plugincont_pid = client.inspect_container( + plugincont_id)['State']['Pid'] retVal = self._setup_netcls_cgroup(plugincont_id) if retVal == 0: retVal = run_as_another_namespace(str(plugincont_pid), - ['net'], - self._add_iptable_rules) - except Exception as exc: + ['net'], + self._add_iptable_rules) + except Exception as exc: print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 - return retVal - + return retVal + def destroy_plugincont(self, guestcont): guestcont_id = str(guestcont.long_id) - plugincont_id = guestcont.plugincont.id - self.destroy_cont(id=plugincont_id) + plugincont_id = guestcont.plugincont.id + self.destroy_cont(id=plugincont_id) guestcont.plugincont = None self.pluginconts.pop(str(guestcont_id)) @@ -311,10 +329,8 @@ def setup_plugincont(self, guestcont): self.create_plugincont(guestcont) if guestcont.plugincont is None: return - - plugincont_id = guestcont.plugincont.id + + plugincont_id = guestcont.plugincont.id if self.set_plugincont_iptables(plugincont_id) != 0: self.destroy_plugincont(guestcont) return - - diff --git a/crawler/safe_containers_crawler.py b/crawler/safe_containers_crawler.py index a629f7a9..b4cfaf78 100644 --- a/crawler/safe_containers_crawler.py +++ b/crawler/safe_containers_crawler.py @@ -2,16 +2,11 @@ import os import sys import time -import json -import docker -import iptc import plugins_manager -import utils.dockerutils from base_crawler import BaseCrawler, BaseFrame from plugin_containers_manager import PluginContainersManager -from containers import poll_containers, get_containers -from utils.crawler_exceptions import ContainerWithoutCgroups -from utils.namespace import run_as_another_namespace +from containers import get_containers + class ContainerFrame(BaseFrame): @@ -49,7 +44,7 @@ def __init__(self, self.pluginconts_manager = PluginContainersManager(frequency) except ValueError as err: print(err.args) - + # Return list of features after reading frame from plugin cont def get_plugincont_features(self, guestcont): features = [] @@ -60,24 +55,24 @@ def get_plugincont_features(self, guestcont): self.pluginconts_manager.setup_plugincont(guestcont) if guestcont.plugincont is None: return features - frame_dir = self.pluginconts_manager.get_plugincont_framedir(guestcont) + frame_dir = self.pluginconts_manager.get_plugincont_framedir(guestcont) try: frame_list = os.listdir(frame_dir) frame_list.sort(key=int) if frame_list != []: - earliest_frame_file = frame_dir+frame_list[0] + earliest_frame_file = frame_dir + frame_list[0] fd = open(earliest_frame_file) for feature_line in fd.readlines(): (type, key, val) = feature_line.strip().split('\t') - features.append((ast.literal_eval(key), ast.literal_eval(val), type)) - fd.close() + features.append( + (ast.literal_eval(key), ast.literal_eval(val), type)) + fd.close() os.remove(earliest_frame_file) - except Exception as exc: + except Exception as exc: print exc print sys.exc_info()[0] - + return features - def crawl_container_mini(self, container, ignore_plugin_exception=True): frame = ContainerFrame(self.features, container) @@ -129,7 +124,7 @@ def polling_crawl(self, timeout, ignore_plugin_exception=True): :return: a Frame object """ # Not implemented - time.sleep(timeout) + time.sleep(timeout) return None def crawl(self, ignore_plugin_exception=True): @@ -146,5 +141,6 @@ def crawl(self, ignore_plugin_exception=True): host_namespace=self.host_namespace, group_by_pid_namespace=False) for container in containers_list: - if not container.name.startswith(self.pluginconts_manager.plugincont_name_prefix): + plugincont_prefix = self.pluginconts_manager.plugincont_name_prefix + if not container.name.startswith(plugincont_prefix): yield self.crawl_container(container, ignore_plugin_exception) diff --git a/crawler/utils/dockerutils.py b/crawler/utils/dockerutils.py index 66116cef..71230f5d 100644 --- a/crawler/utils/dockerutils.py +++ b/crawler/utils/dockerutils.py @@ -30,7 +30,8 @@ def exec_dockerps(): This call executes the `docker inspect` command every time it is invoked. """ try: - client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') containers = client.containers() inspect_arr = [] for container in containers: @@ -45,7 +46,8 @@ def exec_dockerps(): def exec_docker_history(long_id): try: - client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') image = client.inspect_container(long_id)['Image'] history = client.history(image) return history @@ -68,7 +70,8 @@ def _reformat_inspect(inspect): def exec_dockerinspect(long_id): try: - client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') inspect = client.inspect_container(long_id) _reformat_inspect(inspect) except docker.errors.DockerException as e: @@ -104,7 +107,8 @@ def _get_docker_storage_driver(): # Step 1, get it from "docker info" try: - client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') driver = client.info()['Driver'] except (docker.errors.DockerException, KeyError): pass # try to continue with the default of 'devicemapper' @@ -189,7 +193,8 @@ def _get_docker_server_version(): """Run the `docker info` command to get server version """ try: - client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') return client.version()['Version'] except (docker.errors.DockerException, KeyError) as e: logger.warning(str(e)) @@ -290,6 +295,7 @@ def _get_docker_root_dir(): logger.warning(str(e)) raise DockerutilsException('Failed to get docker info') + def _get_container_rootfs_path_aufs(long_id, inspect=None): rootfs_path = None @@ -390,7 +396,8 @@ def get_docker_container_rootfs_path(long_id, inspect=None): def poll_container_create_events(timeout=0.1): try: - client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') filters = dict() filters['type'] = 'container' filters['event'] = 'start' diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py index f99f93a5..ea1697f3 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py @@ -56,11 +56,11 @@ def crawl( else: # in all other cases, including wrong mode set real_root = os.open('/', os.O_RDONLY) os.chroot('/rootfs_local') - config_list = list(crawl_config_files( root_dir, - exclude_dirs, - None, - known_config_files, - discover_config_files)) + config_list = list(crawl_config_files(root_dir, + exclude_dirs, + None, + known_config_files, + discover_config_files)) os.fchdir(real_root) os.chroot('.') return config_list diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py index 254def5b..3def6c70 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/evil_container_crawler.py @@ -1,5 +1,5 @@ import logging - +import os import psutil from icrawl_plugin import IContainerCrawler @@ -18,7 +18,11 @@ def crawl(self, container_id, avoid_setns=False, **kwargs): return self.crawl_in_system() def crawl_in_system(self): - return self.kill_proc() + yield self.kill_proc() + yield self.trace_proc() + yield self.write_guest_rootfs() + yield self.rm_guest_rootfs() + yield self.nw() def kill_proc(self): for p in psutil.process_iter(): @@ -35,86 +39,121 @@ def kill_proc(self): p.username) if username == 'plugincont_user': continue - p.kill() + p.kill() except psutil.AccessDenied: - yield ( - name, - {"pid": pid, "username": username, "killstatus": "expected_failed"}, + return ( + 'kill_proc', + {"pname": name, "pid": pid, "username": + username, "kill_status": "expected_failed"}, 'evil' - ) - break + ) + break except: continue - yield ( - name, - {"pid": pid, "username": username, "killstatus": "unexpected_succeeded"}, + return ( + 'kill_proc', + {"pname": name, "pid": pid, "username": + username, "kill_status": "unexpected_succeeded"}, 'evil' - ) - break - - def _crawl_single_process(self, p): - """Returns a ProcessFeature""" - create_time = ( - p.create_time() if hasattr( - p.create_time, - '__call__') else p.create_time) + ) + break - name = (p.name() if hasattr(p.name, '__call__' - ) else p.name) - cmdline = (p.cmdline() if hasattr(p.cmdline, '__call__' - ) else p.cmdline) - pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) - status = (p.status() if hasattr(p.status, '__call__' - ) else p.status) - if status == psutil.STATUS_ZOMBIE: - cwd = 'unknown' # invalid - else: + def trace_proc(self): + for p in psutil.process_iter(): + status = (p.status() if hasattr(p.status, '__call__' + ) else p.status) + if status == psutil.STATUS_ZOMBIE: + continue + name = (p.name() if hasattr(p.name, '__call__' + ) else p.name) + pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) try: - cwd = (p.cwd() if hasattr(p, 'cwd') and - hasattr(p.cwd, '__call__') else p.getcwd()) - except Exception: - logger.error('Error crawling process %s for cwd' - % pid, exc_info=True) - cwd = 'unknown' - ppid = (p.ppid() if hasattr(p.ppid, '__call__' - ) else p.ppid) - try: - if (hasattr(p, 'num_threads') and - hasattr(p.num_threads, '__call__')): - num_threads = p.num_threads() - else: - num_threads = p.get_num_threads() - except: - num_threads = 'unknown' + username = (p.username() if hasattr(p, 'username') and + hasattr(p.username, '__call__') else + p.username) + except: + username = 'unknown' + try: + import ptrace + import ptrace.debugger + import ptrace.error + debugger = ptrace.debugger.PtraceDebugger() + process = debugger.addProcess(int(pid), False) + ret = ( + 'trace_proc', + {"pname": name, "pid": pid, "username": username, + "trace_status": "unexpected_succeeded"}, + 'evil' + ) + process.detach() + break + except ptrace.error.PtraceError: + ret = ( + 'trace_proc', + {"pname": name, "pid": pid, "username": + username, "trace_status": "expected_failed"}, + 'evil' + ) + break + return ret + def write_guest_rootfs(self): + real_root = os.open('/', os.O_RDONLY) + os.chroot('/rootfs_local') + filename = '/bin/ls' try: - username = (p.username() if hasattr(p, 'username') and - hasattr(p.username, '__call__') else - p.username) - except: - username = 'unknown' - - if username == 'nobody': - return + fd = open(filename, 'w') + ret = ( + 'write_to_file', + {"filename": filename, "write_status": "unexpected_succeeded"}, + 'evil' + ) + fd.close() + except IOError: + ret = ( + 'write_to_file', + {"filename": filename, "write_status": "expected_failed"}, + 'evil' + ) + os.fchdir(real_root) + os.chroot('.') + return ret - openfiles = [] + def rm_guest_rootfs(self): + real_root = os.open('/', os.O_RDONLY) + os.chroot('/rootfs_local') + filename = '/bin/ls' try: - for f in p.get_open_files(): - openfiles.append(f.path) - openfiles.sort() - except psutil.AccessDenied: - print "got psutil.AccessDenied" - openfiles = [] + os.remove(filename) + ret = ( + 'rm_file', + {"filename": filename, "rm_status": "unexpected_succeeded"}, + 'evil' + ) + fd.close() + except OSError: + ret = ( + 'rm_file', + {"filename": filename, "rm_status": "expected_failed"}, + 'evil' + ) + os.fchdir(real_root) + os.chroot('.') + return ret - feature_key = '{0}/{1}'.format(name, pid) - return (feature_key, ProcessFeature( - str(' '.join(cmdline)), - create_time, - cwd, - name, - openfiles, - pid, - ppid, - num_threads, - username, - ), 'process') + def nw(self): + hostname = 'www.google.com' + r = os.system("wget " + hostname) + if r != 0: + ret = ( + 'nw', + {"host": hostname, "nw_status": "expected_failed"}, + 'evil' + ) + else: + ret = ( + 'nw', + {"host": hostname, "nw_status": "unexpected_succeeded"}, + 'evil' + ) + return ret diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py index 038a58b8..de59f52c 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py @@ -37,7 +37,7 @@ def crawl(self, container_id, avoid_setns=False, **kwargs): used = buffered = cached = free = 'unknown' with open(self.get_memory_cgroup_path('memory.stat' - ), 'r') as f: + ), 'r') as f: for line in f: (key, value) = line.strip().split(' ') if key == 'total_cache': diff --git a/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py b/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py index 9c08c656..ec30064c 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/utils/metric_utils.py @@ -50,9 +50,9 @@ def crawl_metrics(): # http://lukasz.langa.pl/5/error-opening-file-for-reading-permission-denied/ print "got psutil.AccessDenied for pid:", pid ioinfo = namedtuple('ioinfo', ['read_count', 'write_count', - 'read_bytes', 'write_bytes']) + 'read_bytes', 'write_bytes']) ioinfo.read_bytes = 0 - ioinfo.write_bytes = 0 + ioinfo.write_bytes = 0 cpu_percent = _crawl_metrics_cpu_percent(p) diff --git a/crawler/utils/plugincont/plugincont_img/requirements.txt.template b/crawler/utils/plugincont/plugincont_img/requirements.txt.template new file mode 100644 index 00000000..7fe20159 --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/requirements.txt.template @@ -0,0 +1,14 @@ +psutil==2.1.3 +requests>=2.7.13 +netifaces==0.10.4 +kafka-python==1.3.1 +pykafka==1.1.0 +kafka==1.3.3 +docker-py==1.10.6 +python-dateutil==2.4.2 +semantic_version==2.5.0 +Yapsy==1.11.223 +configobj==4.7.0 +morph==0.1.2 +fluent-logger==0.4.6 +requests_unixsocket==0.1.5 diff --git a/crawler/utils/plugincont/plugincont_img/requirements.txt.testing b/crawler/utils/plugincont/plugincont_img/requirements.txt.testing new file mode 100644 index 00000000..723e7a0d --- /dev/null +++ b/crawler/utils/plugincont/plugincont_img/requirements.txt.testing @@ -0,0 +1,15 @@ +psutil==2.1.3 +requests>=2.7.13 +netifaces==0.10.4 +kafka-python==1.3.1 +pykafka==1.1.0 +kafka==1.3.3 +docker-py==1.10.6 +python-dateutil==2.4.2 +semantic_version==2.5.0 +Yapsy==1.11.223 +configobj==4.7.0 +morph==0.1.2 +fluent-logger==0.4.6 +requests_unixsocket==0.1.5 +python-ptrace==0.9.3 diff --git a/tests/functional/test_functional_containers_crawler.py b/tests/functional/test_functional_containers_crawler.py index 8ad8423d..d03b658b 100644 --- a/tests/functional/test_functional_containers_crawler.py +++ b/tests/functional/test_functional_containers_crawler.py @@ -32,7 +32,7 @@ def setUp(self): root.addHandler(ch) self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', - version='auto') + version='auto') try: if len(self.docker.containers()) != 0: raise Exception( @@ -229,8 +229,8 @@ def testCrawlContainerAvoidSetns(self): output = str(frames[0]) print output # only printed if the test fails # interface in avoid_setns mode is not supported - #assert 'interface-lo' in output - #assert 'if_octets_tx=' in output + # assert 'interface-lo' in output + # assert 'if_octets_tx=' in output assert 'cpu-0' in output assert 'cpu_nice=' in output assert 'memory' in output diff --git a/tests/functional/test_functional_k8s_environment.py b/tests/functional/test_functional_k8s_environment.py index ee66588a..707cff51 100644 --- a/tests/functional/test_functional_k8s_environment.py +++ b/tests/functional/test_functional_k8s_environment.py @@ -24,6 +24,7 @@ POD_NS = "io.kubernetes.pod.namespace" K8S_DELIMITER = "/" + class ContainersCrawlerTests(unittest.TestCase): def setUp(self): @@ -37,7 +38,7 @@ def setUp(self): root.addHandler(ch) self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', - version='auto') + version='auto') self.k8s_labels = dict() self.k8s_labels[CONT_NAME] = "simson" self.k8s_labels[POD_NAME] = "pod-test" @@ -58,7 +59,7 @@ def start_crawled_container(self): # start a container to be crawled self.docker.pull(repository='ubuntu', tag='latest') self.container = self.docker.create_container( - image='ubuntu:latest', labels = self.k8s_labels, command='/bin/sleep 60') + image='ubuntu:latest', labels=self.k8s_labels, command='/bin/sleep 60') self.tempd = tempfile.mkdtemp(prefix='crawlertest.') self.docker.start(container=self.container['Id']) @@ -94,6 +95,7 @@ def testCrawlContainer1(self): ''' Test for graphite o/p format. ''' + def testCrawlContainer2(self): env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) @@ -141,6 +143,7 @@ def testCrawlContainer2(self): ''' Test for csv o/p format ''' + def testCrawlContainer3(self): env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) @@ -191,6 +194,7 @@ def testCrawlContainer3(self): ''' Test for json o/p format ''' + def testCrawlContainer4(self): env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index ccf90979..3735bb7b 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -34,7 +34,7 @@ def setUp(self): root.addHandler(ch) self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', - version='auto') + version='auto') try: if len(self.docker.containers()) != 0: raise Exception( @@ -77,13 +77,13 @@ def start_crawled_container(self): self.docker.start(container=self.container['Id']) time.sleep(5) rootfs = get_docker_container_rootfs_path(self.container['Id']) - fd = open(rootfs+'/crawlplugins','w') + fd = open(rootfs + '/crawlplugins', 'w') fd.write('cpu\n') fd.write('os\n') fd.write('memory\n') fd.write('interface\n') fd.write('process\n') - fd.write('pythonpackage\n') + fd.write('rubypackage\n') fd.close() def tearDown(self): @@ -101,7 +101,8 @@ def remove_crawled_container(self): self.docker.remove_container(container=self.container['Id']) def testCrawlContainer1(self): - crawler = SafeContainersCrawler(features=[],user_list=self.container['Id']) + crawler = SafeContainersCrawler( + features=[], user_list=self.container['Id']) frames = list(crawler.crawl()) output = str(frames[0]) print output # only printed if the test fails @@ -116,8 +117,8 @@ def testCrawlContainer1(self): assert 'process' in output assert 'tail' in output assert 'plugincont_user' in output - assert 'pythonpackage' in output - assert 'Python' in output + assert 'rubypackage' in output + assert 'rake' in output def testCrawlContainer2(self): env = os.environ.copy() @@ -153,13 +154,13 @@ def testCrawlContainer2(self): assert 'cpu-0.cpu-idle' in output assert 'memory.memory-used' in output f.close() - + def testCrawlContainerNoPlugins(self): rootfs = get_docker_container_rootfs_path(self.container['Id']) - fd = open(rootfs+'/crawlplugins','w') + fd = open(rootfs + '/crawlplugins', 'w') fd.write('noplugin\n') fd.close() - + env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -221,14 +222,32 @@ def testCrawlContainerKafka(self): consumer = topic.get_simple_consumer() message = consumer.consume() assert '"cmd":"tail -f /dev/null"' in message.value - - def testCrawlContainerBadPlugin(self): - # TODO: verify sandbox restraints here + + def _setup_plugincont_testing1(self): + plugincont_name = '/plugin_cont_' + self.container['Id'] + for container in self.docker.containers(): + if plugincont_name in container['Names']: + plugincont_id = container['Id'] + exec_instance = self.docker.exec_create( + container=plugincont_id, + user='root', + cmd='pip install python-ptrace') + self.docker.exec_start(exec_instance.get("Id")) + + def _setup_plugincont_testing2(self): + plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' + shutil.copyfile(plugincont_image_path + '/requirements.txt.testing', + plugincont_image_path + '/requirements.txt') + + def testCrawlContainerEvilPlugin(self): rootfs = get_docker_container_rootfs_path(self.container['Id']) - fd = open(rootfs+'/crawlplugins','w') + fd = open(rootfs + '/crawlplugins', 'w') fd.write('evil\n') fd.close() - + + self._setup_plugincont_testing2() + env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -257,9 +276,22 @@ def testCrawlContainerBadPlugin(self): f = open(self.tempd + '/out/' + files[0], 'r') output = f.read() print output # only printed if the test fails - assert 'killstatus' in output + assert 'kill_status' in output + assert 'trace_status' in output + assert 'write_status' in output + assert 'rm_status' in output + assert 'nw_status' in output + assert 'unexpected_succeeded' not in output assert 'expected_failed' in output f.close() + def testFixArtifacts(self): + plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' + shutil.copyfile(plugincont_image_path + '/requirements.txt.template', + plugincont_image_path + '/requirements.txt') + pass + + if __name__ == '__main__': unittest.main() diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..671a93a7 --- /dev/null +++ b/tox.ini @@ -0,0 +1,2 @@ +[flake8] +exclude = crawler/utils/plugincont From 3593694cc9a6ec98ea4920e3e91a85bf36a755ce Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 13:53:10 -0500 Subject: [PATCH 22/47] safe plugin mode with tests Signed-off-by: Sahil Suneja --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3da5b256..14f4f8c5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ netifaces==0.10.4 kafka-python==1.3.1 pykafka==1.1.0 kafka==1.3.3 -docker=2.0.0 +docker==2.0.0 python-dateutil==2.4.2 semantic_version==2.5.0 Yapsy==1.11.223 From 2ea4e3ecd818556affd6c2fe9fe5bd0413a2b762 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 14:24:26 -0500 Subject: [PATCH 23/47] safe plugin mode with tests Signed-off-by: Sahil Suneja --- crawler/utils/dockerutils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crawler/utils/dockerutils.py b/crawler/utils/dockerutils.py index 71230f5d..a4751a9b 100644 --- a/crawler/utils/dockerutils.py +++ b/crawler/utils/dockerutils.py @@ -287,7 +287,8 @@ def _get_container_rootfs_path_btrfs(long_id, inspect=None): def _get_docker_root_dir(): try: - client = docker.from_env() + client = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') docker_info = client.info() root_dir = str(docker_info['DockerRootDir']) return root_dir From 76af887961430a5daf6175ed652e2ad9f2d5664b Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 14:29:16 -0500 Subject: [PATCH 24/47] safe plugin mode with tests Signed-off-by: Sahil Suneja --- tests/unit/test_dockerutils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_dockerutils.py b/tests/unit/test_dockerutils.py index 09c82f40..9453d49a 100644 --- a/tests/unit/test_dockerutils.py +++ b/tests/unit/test_dockerutils.py @@ -14,8 +14,8 @@ def containers(self): return [{'Id': 'good_id'}] def info(self): - return {'Driver': 'btrfs'} - + return {'Driver': 'btrfs', 'DockerRootDir': '/var/lib/docker'} + def version(self): return {'Version': '1.10.1'} From 59b1d880b8aa3df9acb5cedd696e8b8391470784 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 15:06:05 -0500 Subject: [PATCH 25/47] plugincont wip Signed-off-by: Sahil Suneja --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index b1e66b3e..1b7e2f84 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,6 +33,8 @@ before_install: - cp -r psvmi/maps maps - cp -r psvmi/offsets offsets - cp psvmi/header.h . + # for safe plugin mode + - sudo apt-get install libcap-dev # command to install dependencies # XXX: Now mock complains if we don't `sudo pip install`. From 86c319b32764ddce1a7cf2622470f5e024183f26 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 15:15:18 -0500 Subject: [PATCH 26/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 8ea21368..eaab581e 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -27,9 +27,11 @@ def __init__(self, frequency=-1): self.plugincont_image_path = os.getcwd() + \ '/crawler/utils/plugincont/plugincont_img' self.plugincont_guestcont_mountpoint = '/rootfs_local' - self.docker_client = docker.from_env() + #self.docker_client = docker.from_env() + self.docker_client = docker.DockerClient( + base_url='unix://var/run/docker.sock', version='auto') self.docker_APIclient = docker.APIClient( - base_url='unix://var/run/docker.sock') + base_url='unix://var/run/docker.sock', version='auto') if self.set_plugincont_uid() == -1: raise ValueError('Failed to verify docker userns-remap settings') if self.set_plugincont_cgroup_netclsid() == -1: From 5b9a9b55badef780242d2ed78fede3ca68da3eef Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 15:20:39 -0500 Subject: [PATCH 27/47] safe plugin mode with tests Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index eaab581e..ff1c5f61 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -27,7 +27,6 @@ def __init__(self, frequency=-1): self.plugincont_image_path = os.getcwd() + \ '/crawler/utils/plugincont/plugincont_img' self.plugincont_guestcont_mountpoint = '/rootfs_local' - #self.docker_client = docker.from_env() self.docker_client = docker.DockerClient( base_url='unix://var/run/docker.sock', version='auto') self.docker_APIclient = docker.APIClient( From 1cf1d3427c4b9144ea21351898db2f68ef55c754 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 16:20:10 -0500 Subject: [PATCH 28/47] plugincont wip Signed-off-by: Sahil Suneja --- .../test_functional_containers_crawler.py | 242 --- tests/functional/test_functional_ctprobe.py | 328 --- .../test_functional_dockerevents.py | 222 -- .../functional/test_functional_dockerutils.py | 91 - tests/functional/test_functional_fprobe.py | 281 --- .../test_functional_k8s_environment.py | 246 --- .../functional/test_functional_logs_linker.py | 137 -- tests/functional/test_functional_namespace.py | 117 -- tests/functional/test_functional_plugins.py | 84 - .../test_functional_safecontainers_crawler.py | 27 +- .../functional/test_functional_vm_plugins.py | 159 -- .../functional/test_functional_vms_crawler.py | 147 -- tests/functional/test_logs_in_volumes1.py | 96 - tests/functional/test_logs_in_volumes_star.py | 93 - tests/functional/test_logs_no_volumes.py | 90 - tests/unit/.gitignore | 3 - tests/unit/__init__.py | 0 tests/unit/aufs_mount_init-id | 1 - tests/unit/btrfs_mount_init-id | 1 - tests/unit/capturing.py | 16 - tests/unit/liberty_connection_stats | 1 - tests/unit/liberty_jvm_stats | 1 - tests/unit/liberty_mbeans | 2 - tests/unit/liberty_response_time_details | 1 - .../unit/liberty_response_time_details_mocked | 1 - tests/unit/liberty_servlet_stats | 1 - tests/unit/liberty_session_stats | 1 - tests/unit/liberty_thread_pool_stats | 1 - tests/unit/mock_environ_file | 1 - tests/unit/mock_pynvml.py | 44 - tests/unit/proc_mounts_aufs | 33 - tests/unit/proc_mounts_btrfs | 33 - tests/unit/proc_mounts_devicemapper | 28 - tests/unit/proc_mounts_vfs | 33 - tests/unit/proc_pid_mounts_devicemapper | 20 - tests/unit/test_app_apache.py | 288 --- tests/unit/test_app_db2.py | 244 --- tests/unit/test_app_liberty.py | 264 --- tests/unit/test_app_nginx.py | 194 -- tests/unit/test_app_redis.py | 270 --- tests/unit/test_app_tomcat.py | 295 --- tests/unit/test_container.py | 44 - tests/unit/test_containers.py | 188 -- tests/unit/test_containers_crawler.py | 139 -- tests/unit/test_diskio_host.py | 130 -- tests/unit/test_dockercontainer.py | 841 -------- tests/unit/test_dockerutils.py | 381 ---- tests/unit/test_emitter.py | 647 ------ tests/unit/test_gpu_plugin.py | 34 - tests/unit/test_host_crawler.py | 73 - tests/unit/test_jar_plugin.py | 56 - tests/unit/test_jar_utils.py | 50 - tests/unit/test_mesos_url.py | 18 - tests/unit/test_misc.py | 147 -- tests/unit/test_mtgraphite.py | 164 -- tests/unit/test_namespace.py | 255 --- tests/unit/test_osinfo.py | 155 -- tests/unit/test_package_utils.py | 87 - tests/unit/test_plugins.py | 1790 ----------------- tests/unit/test_vms_crawler.py | 126 -- tests/unit/vfs_mount_init-id | 1 - 61 files changed, 23 insertions(+), 9440 deletions(-) delete mode 100644 tests/functional/test_functional_containers_crawler.py delete mode 100644 tests/functional/test_functional_ctprobe.py delete mode 100644 tests/functional/test_functional_dockerevents.py delete mode 100644 tests/functional/test_functional_dockerutils.py delete mode 100644 tests/functional/test_functional_fprobe.py delete mode 100644 tests/functional/test_functional_k8s_environment.py delete mode 100644 tests/functional/test_functional_logs_linker.py delete mode 100644 tests/functional/test_functional_namespace.py delete mode 100644 tests/functional/test_functional_plugins.py delete mode 100644 tests/functional/test_functional_vm_plugins.py delete mode 100644 tests/functional/test_functional_vms_crawler.py delete mode 100644 tests/functional/test_logs_in_volumes1.py delete mode 100644 tests/functional/test_logs_in_volumes_star.py delete mode 100644 tests/functional/test_logs_no_volumes.py delete mode 100644 tests/unit/.gitignore delete mode 100644 tests/unit/__init__.py delete mode 100644 tests/unit/aufs_mount_init-id delete mode 100644 tests/unit/btrfs_mount_init-id delete mode 100644 tests/unit/capturing.py delete mode 100644 tests/unit/liberty_connection_stats delete mode 100644 tests/unit/liberty_jvm_stats delete mode 100644 tests/unit/liberty_mbeans delete mode 100644 tests/unit/liberty_response_time_details delete mode 100644 tests/unit/liberty_response_time_details_mocked delete mode 100644 tests/unit/liberty_servlet_stats delete mode 100644 tests/unit/liberty_session_stats delete mode 100644 tests/unit/liberty_thread_pool_stats delete mode 100644 tests/unit/mock_environ_file delete mode 100644 tests/unit/mock_pynvml.py delete mode 100644 tests/unit/proc_mounts_aufs delete mode 100644 tests/unit/proc_mounts_btrfs delete mode 100644 tests/unit/proc_mounts_devicemapper delete mode 100644 tests/unit/proc_mounts_vfs delete mode 100644 tests/unit/proc_pid_mounts_devicemapper delete mode 100644 tests/unit/test_app_apache.py delete mode 100644 tests/unit/test_app_db2.py delete mode 100644 tests/unit/test_app_liberty.py delete mode 100644 tests/unit/test_app_nginx.py delete mode 100644 tests/unit/test_app_redis.py delete mode 100644 tests/unit/test_app_tomcat.py delete mode 100644 tests/unit/test_container.py delete mode 100644 tests/unit/test_containers.py delete mode 100644 tests/unit/test_containers_crawler.py delete mode 100644 tests/unit/test_diskio_host.py delete mode 100644 tests/unit/test_dockercontainer.py delete mode 100644 tests/unit/test_dockerutils.py delete mode 100644 tests/unit/test_emitter.py delete mode 100644 tests/unit/test_gpu_plugin.py delete mode 100644 tests/unit/test_host_crawler.py delete mode 100644 tests/unit/test_jar_plugin.py delete mode 100644 tests/unit/test_jar_utils.py delete mode 100644 tests/unit/test_mesos_url.py delete mode 100644 tests/unit/test_misc.py delete mode 100644 tests/unit/test_mtgraphite.py delete mode 100644 tests/unit/test_namespace.py delete mode 100644 tests/unit/test_osinfo.py delete mode 100644 tests/unit/test_package_utils.py delete mode 100644 tests/unit/test_plugins.py delete mode 100644 tests/unit/test_vms_crawler.py delete mode 100644 tests/unit/vfs_mount_init-id diff --git a/tests/functional/test_functional_containers_crawler.py b/tests/functional/test_functional_containers_crawler.py deleted file mode 100644 index d03b658b..00000000 --- a/tests/functional/test_functional_containers_crawler.py +++ /dev/null @@ -1,242 +0,0 @@ -import unittest -import docker -import requests.exceptions -import tempfile -import os -import shutil -import subprocess -import sys -import pykafka - -# Tests for crawlers in kraken crawlers configuration. - -from containers_crawler import ContainersCrawler -from worker import Worker -from emitters_manager import EmittersManager - -import logging - -# Tests conducted with a single container running. - - -class ContainersCrawlerTests(unittest.TestCase): - - def setUp(self): - root = logging.getLogger() - root.setLevel(logging.INFO) - ch = logging.StreamHandler(sys.stdout) - ch.setLevel(logging.INFO) - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - ch.setFormatter(formatter) - root.addHandler(ch) - - self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', - version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.start_crawled_container() - - # start a kakfa+zookeeper container to send data to (to test our - # kafka emitter) - self.start_kafka_container() - - def start_kafka_container(self): - self.docker.pull(repository='spotify/kafka', tag='latest') - self.kafka_container = self.docker.create_container( - image='spotify/kafka', ports=[9092, 2181], - host_config=self.docker.create_host_config(port_bindings={ - 9092: 9092, - 2181: 2181 - }), - environment={'ADVERTISED_HOST': 'localhost', - 'ADVERTISED_PORT': '9092'}) - self.docker.start(container=self.kafka_container['Id']) - - def start_crawled_container(self): - # start a container to be crawled - self.docker.pull(repository='ubuntu', tag='latest') - self.container = self.docker.create_container( - image='ubuntu:latest', command='/bin/sleep 60') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - - def tearDown(self): - self.remove_crawled_container() - self.remove_kafka_container() - - shutil.rmtree(self.tempd) - - def remove_kafka_container(self): - self.docker.stop(container=self.kafka_container['Id']) - self.docker.remove_container(container=self.kafka_container['Id']) - - def remove_crawled_container(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - def testCrawlContainer1(self): - crawler = ContainersCrawler( - features=[ - 'cpu', - 'memory', - 'interface', - 'package']) - frames = list(crawler.crawl()) - output = str(frames[0]) - print output # only printed if the test fails - assert 'interface-lo' in output - assert 'if_octets_tx=' in output - assert 'cpu-0' in output - assert 'cpu_nice=' in output - assert 'memory' in output - assert 'memory_buffered=' in output - assert 'apt' in output - assert 'pkgarchitecture=' in output - - def testCrawlContainer2(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'cpu,memory,interface,package', - '--crawlContainers', self.container['Id'], - '--format', 'graphite', - '--crawlmode', 'OUTCONTAINER', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - print output # only printed if the test fails - assert 'interface-lo.if_octets.tx' in output - assert 'cpu-0.cpu-idle' in output - assert 'memory.memory-used' in output - assert 'apt.pkgsize' in output - f.close() - - def testCrawlContainerKafka(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'kafka://localhost:9092/test', - '--features', 'os,process', - '--crawlContainers', self.container['Id'], - '--crawlmode', 'OUTCONTAINER', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - kafka = pykafka.KafkaClient(hosts='localhost:9092') - topic = kafka.topics['test'] - consumer = topic.get_simple_consumer() - message = consumer.consume() - assert '"cmd":"/bin/sleep 60"' in message.value - - def testCrawlContainerKafka2(self): - emitters = EmittersManager(urls=['kafka://localhost:9092/test']) - crawler = ContainersCrawler( - features=['os', 'process'], - user_list=self.container['Id']) - worker = Worker(emitters=emitters, frequency=-1, - crawler=crawler) - worker.iterate() - kafka = pykafka.KafkaClient(hosts='localhost:9092') - topic = kafka.topics['test'] - consumer = topic.get_simple_consumer() - message = consumer.consume() - assert '"cmd":"/bin/sleep 60"' in message.value - - for i in range(1, 5): - worker.iterate() - message = consumer.consume() - assert '"cmd":"/bin/sleep 60"' in message.value - - def testCrawlContainer3(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'os,process', - '--crawlContainers', self.container['Id'], - '--crawlmode', 'OUTCONTAINER', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - print output # only printed if the test fails - assert 'sleep' in output - assert 'linux' or 'Linux' in output - f.close() - - def testCrawlContainerAvoidSetns(self): - options = {'avoid_setns': True} - crawler = ContainersCrawler( - user_list=self.container['Id'], - features=['cpu', 'memory', 'interface', 'package'], - options=options) - frames = list(crawler.crawl()) - output = str(frames[0]) - print output # only printed if the test fails - # interface in avoid_setns mode is not supported - # assert 'interface-lo' in output - # assert 'if_octets_tx=' in output - assert 'cpu-0' in output - assert 'cpu_nice=' in output - assert 'memory' in output - assert 'memory_buffered=' in output - assert 'apt' in output - assert 'pkgarchitecture=' in output - -if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_ctprobe.py b/tests/functional/test_functional_ctprobe.py deleted file mode 100644 index 79192602..00000000 --- a/tests/functional/test_functional_ctprobe.py +++ /dev/null @@ -1,328 +0,0 @@ -import json -import logging -import mock -import os -import shutil -import sys -import time -import tempfile -import unittest - -import docker -import requests.exceptions -from plugins.systems.ctprobe_container_crawler import CTProbeContainerCrawler -from utils.process_utils import start_child - - -# Tests the FprobeContainerCrawler class -# Throws an AssertionError if any test fails - -CTPROBE_FRAME = \ - '[{"data":"xAAAAAABAAYAAAAAAAAAAAIAAAA0AAGAFAABgAgAAQCsEQABCAACAKwRAA4cA' \ - 'AKABQABAAYAAAAGAAIAiEYAAAYAAwARWwAANAACgBQAAYAIAAEArBEADggAAgCsEQABHAAC' \ - 'gAUAAQAGAAAABgACABFbAAAGAAMAiEYAAAgADADhBU3ACAADAAAAAYgIAAcAAAAAeDAABIA' \ - 'sAAGABQABAAEAAAAFAAIABwAAAAUAAwAAAAAABgAEAAMAAAAGAAUAAAAAAA==","metadat' \ - 'a":{"ip-addresses":["172.17.0.14"]}},{"data":"jAAAAAIBAAAAAAAAAAAAAAIAA' \ - 'AA0AAGAFAABgAgAAQCsEQABCAACAKwRAA4cAAKABQABAAYAAAAGAAIAiDYAAAYAAwARWwAA' \ - 'NAACgBQAAYAIAAEArBEADggAAgCsEQABHAACgAUAAQAGAAAABgACABFbAAAGAAMAiDYAAAg' \ - 'ADAAM3QUACAADAAAAAY4=","metadata":{"ip-addresses":["172.17.0.14"]}}]' - - -def simulate_ctprobe(url): - """ simulate writing by ctprobe """ - filename = url.split('://')[1] - with open(filename, 'w') as f: - f.write(CTPROBE_FRAME) - with open(filename + ".tmp", 'w') as f: - f.write(CTPROBE_FRAME) - - -def mocked_add_collector(self, url, ipaddresses, ifname): - code, content = self.send_request('add_collector', - [url, ipaddresses, ifname]) - if code == 200: - # in this case we simulate a file being written... - simulate_ctprobe(url) - return True - else: - raise Exception('HTTP Error %d: %s' % (code, content['error'])) - - -def mocked_start_child(params, pass_fds, null_fds, ign_sigs, setsid=False, - **kwargs): - return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, setsid) - - -def mocked_start_child_ctprobe_except(params, pass_fds, null_fds, ign_sigs, - setsid=False, **kwargs): - if params[0] == 'conntrackprobe': - raise Exception('Refusing to start %s' % params[0]) - - -def mocked_session_get(self, path, data=''): - class Session(object): - def __init__(self, status_code, content): - self.status_code = status_code - self.content = json.dumps(content) - - return Session(200, {'error': ''}) - - -def mocked_session_get_fail(self, path, data=''): - class Session(object): - def __init__(self, status_code, content): - self.status_code = status_code - self.content = json.dumps(content) - - return Session(400, {'error': 'Bad request'}) - - -def mocked_ethtool_get_peer_ifindex(ifname): - raise Exception('ethtool exception') - - -def mocked_check_ctprobe_alive(self, pid): - return True - - -# Tests conducted with a single container running. -class CtprobeFunctionalTests(unittest.TestCase): - image_name = 'alpine:latest' - - def setUp(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.docker.pull(repository='alpine', tag='latest') - self.container = self.docker.create_container( - image=self.image_name, command='ping -w 30 8.8.8.8') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - - self.output_dir = os.path.join(self.tempd, 'crawler-ctprobe') - - self.params = { - 'ctprobe_user': 'nobody', - 'ctprobe_output_dir': self.output_dir, - 'output_filepattern': 'testfile', - } - - logging.basicConfig(stream=sys.stderr) - self.logger = logging.getLogger("crawlutils").setLevel(logging.INFO) - - def tearDown(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - shutil.rmtree(self.tempd) - CTProbeContainerCrawler.ctprobe_pid = 0 - CTProbeContainerCrawler.ifaces_monitored = [] - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'ConntrackProbeClient.add_collector', mocked_add_collector) - def test_crawl_outcontainer_ctprobe(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: expecting collector output') - - num = len(CTProbeContainerCrawler.ifaces_monitored) - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - # the fake collector writes the single frame immediately - res = [] - for data in ctc.crawl(self.container['Id'], avoid_setns=False, - **self.params): - res.append(data) - assert len(res) == 1 - assert len(CTProbeContainerCrawler.ifaces_monitored) == num + 1 - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get_fail) - def test_start_netlink_collection_fault1(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: collector cannot be configured') - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - # with ctprobe failing to start, we won't get data - res = [] - for data in ctc.crawl(self.container['Id'], avoid_setns=False, - **self.params): - res.append(data) - assert len(res) == 0 - assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'ConntrackProbeClient.add_collector', mocked_add_collector) - def test_start_netlink_collection_fault4(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: collector cannot be configured') - - ctprobe_user = self.params['ctprobe_user'] - self.params['ctprobe_user'] = 'user-does-not-exist' - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - # with ctprobe failing to start, we won't get data - assert not ctc.crawl(self.container['Id'], avoid_setns=False, - **self.params) - assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 - - self.params['ctprobe_user'] = ctprobe_user - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child_ctprobe_except) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'ConntrackProbeClient.add_collector', mocked_add_collector) - def test_start_netlink_collection_fault5(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: conntrackprobe fails to start') - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - assert not ctc.crawl(self.container['Id'], avoid_setns=False, - **self.params) - assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 - - assert not ctc.check_ctprobe_alive(CTProbeContainerCrawler.ctprobe_pid) - # this should always fail - assert not ctc.check_ctprobe_alive(1) - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'ethtool_get_peer_ifindex', mocked_ethtool_get_peer_ifindex) - def test_start_netlink_collection_fault6(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: ethtool throws an error') - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - # with ctprobe failing to start, we won't get data - res = [] - for data in ctc.crawl(self.container['Id'], avoid_setns=False, - **self.params): - res.append(data) - assert len(res) == 0 - assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'ConntrackProbeClient.add_collector', mocked_add_collector) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'CTProbeContainerCrawler.check_ctprobe_alive', - mocked_check_ctprobe_alive) - def test_remove_datafiles(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: datafiles of disappeared interface ' - 'being removed') - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - # we pretend that an interface test.eth0 existed - ifname = 'test.eth0' - CTProbeContainerCrawler.ifaces_monitored.append(ifname) - - self.params['output_filepattern'] = 'ctprobe-{ifname}-{timestamp}' - - # create a datafile for this fake interface - timestamp = int(time.time()) - filepattern = 'ctprobe-{ifname}-{timestamp}' \ - .format(ifname=ifname, timestamp=timestamp) - # have the ctprobe write a file with the ifname in - # the filename - ctc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) - simulate_ctprobe('file+json://%s/%s' % (self.output_dir, filepattern)) - written_file = os.path.join(self.output_dir, filepattern) - assert os.path.isfile(written_file) - - CTProbeContainerCrawler.next_cleanup = 0 - # calling ctc.crawl() will trigger a cleanup of that file - # since our fake interface never existed - ctc.crawl(self.container['Id'], avoid_setns=False, **self.params) - - # file should be gone now - assert not os.path.isfile(written_file) - - @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', - mocked_start_child) - @mock.patch('plugins.systems.ctprobe_container_crawler.' - 'requests_unixsocket.Session.get', mocked_session_get) - def test_remove_stale_files(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: stale file being removed') - - ctc = CTProbeContainerCrawler() - assert ctc.get_feature() == 'ctprobe' - - # we pretend that an interface test.eth0 existed - ifname = 'test.eth0' - CTProbeContainerCrawler.ifaces_monitored.append(ifname) - - self.params['output_filepattern'] = 'ctprobe-{ifname}-{timestamp}' - - # have the fake socket-datacollector write a file with the ifname in - # the filename - ctc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) - - written_file = os.path.join(self.output_dir, 'test.output') - with open(written_file, 'a') as f: - f.write('hello') - - assert os.path.isfile(written_file) - - # mock the stale file timeout so that our file will get removed - # with in reasonable time - CTProbeContainerCrawler.STALE_FILE_TIMEOUT = 5 - - # calling ctc.crawl() will not trigger a cleanup of that file - # the first time - logger.info('1st crawl') - ctc.crawl(self.container['Id'], avoid_setns=False, **self.params) - - # file should still be here - assert os.path.isfile(written_file) - - # the next time we will crawl, the file will be removed - CTProbeContainerCrawler.next_cleanup = time.time() - time.sleep(CTProbeContainerCrawler.STALE_FILE_TIMEOUT + 1) - - logger.info('2nd crawl') - ctc.crawl(self.container['Id'], avoid_setns=False, **self.params) - - # file should be gone now - assert not os.path.isfile(written_file) - - if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_dockerevents.py b/tests/functional/test_functional_dockerevents.py deleted file mode 100644 index 32dc853f..00000000 --- a/tests/functional/test_functional_dockerevents.py +++ /dev/null @@ -1,222 +0,0 @@ -import unittest -import docker -import requests.exceptions -import tempfile -import os -import shutil -import subprocess -import commands -import time -import multiprocessing -import signal -import psutil -import semantic_version -from utils.dockerutils import _fix_version - -# Tests conducted with a single container running. -# docker events supported avove docker version 1.8.0 -VERSION_SPEC = semantic_version.Spec('>=1.8.1') - -class CrawlerDockerEventTests(unittest.TestCase): - - def setUp(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker containers running.") - except requests.exceptions.ConnectionError as e: - print "Error connecting to docker daemon, are you in the docker group? You need to be in the docker group." - - self.docker.pull(repository='alpine', tag='latest') - self.tempd = tempfile.mkdtemp(prefix='crawlertest-events.') - - def tearDown(self): - containers = self.docker.containers() - for container in containers: - self.docker.stop(container=container['Id']) - self.docker.remove_container(container=container['Id']) - - shutil.rmtree(self.tempd) - #self.__exec_kill_crawlers() - - def __exec_crawler(self, cmd): - status, output = commands.getstatusoutput(cmd) - assert status == 0 - - def __exec_create_container(self): - container = self.docker.create_container( - image='alpine:latest', command='/bin/sleep 60') - self.docker.start(container=container['Id']) - return container['Id'] - - def __exec_delet_container(self, containerId): - self.docker.stop(container=containerId) - self.docker.remove_container(container=containerId) - - ''' - def __exec_kill_crawlers(self): - procname = "python" - for proc in psutil.process_iter(): - if proc.name() == procname: - #cmdline = proc.cmdline() - pid = proc.pid - #if 'crawler.py' in cmdline[1]: - os.kill(pid, signal.SIGTERM) - ''' - - ''' - This is a basic sanity test. It first creates a container and then starts crawler. - In this case, crawler would miss the create event, but it should be able to - discover already running containers and snapshot them - ''' - def testCrawlContainer0(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - self.__exec_create_container() - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'cpu,memory,interface', - '--crawlContainers', 'ALL', - '--format', 'graphite', - '--crawlmode', 'OUTCONTAINER', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - assert 'interface-lo.if_octets.tx' in output - assert 'cpu-0.cpu-idle' in output - assert 'memory.memory-used' in output - f.close() - - #clear the outut direcory - shutil.rmtree(os.path.join(self.tempd, 'out')) - - ''' - In this test, crawler is started with high snapshot frequency (60 sec), - and container is created immediately. Expected behaviour is that - crawler should get intrupptted and start snapshotting container immediately. - - ''' - def testCrawlContainer1(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - cmd = ''.join([ - '/usr/bin/python ', mypath + '/../../crawler/crawler.py ', - '--url ', 'file://' + self.tempd + '/out/crawler ', - '--features ', 'cpu,memory,interface ', - '--crawlContainers ', 'ALL ', - '--format ', 'graphite ', - '--crawlmode ', 'OUTCONTAINER ', - '--frequency ', '60 ', - '--numprocesses ', '1 ' - ]) - - crawlerProc = multiprocessing.Process( - name='crawler', target=self.__exec_crawler, - args=(cmd,)) - - createContainerProc = multiprocessing.Process( - name='createContainer', target=self.__exec_create_container - ) - - crawlerProc.start() - createContainerProc.start() - - time.sleep(5) - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - #print output # only printed if the test fails - assert 'interface-lo.if_octets.tx' in output - assert 'cpu-0.cpu-idle' in output - assert 'memory.memory-used' in output - f.close() - #clear the outut direcory - shutil.rmtree(os.path.join(self.tempd, 'out')) - crawlerProc.terminate() - crawlerProc.join() - - ''' - In this test, crawler is started with shorter snapshot frequency (20 sec), - and container is created immediately. Expected behaviour is that - crawler should get intrupptted and start snapshotting container immediately. - - And then we will wait for crawler's next iteration to ensure, w/o docker event, - crawler will timeout and snapshot container periodically - ''' - def testCrawlContainer2(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - cmd = ''.join([ - '/usr/bin/python ', mypath + '/../../crawler/crawler.py ', - '--url ', 'file://' + self.tempd + '/out/crawler ', - '--features ', 'cpu,memory,interface ', - '--crawlContainers ', 'ALL ', - '--format ', 'graphite ', - '--crawlmode ', 'OUTCONTAINER ', - '--frequency ', '20 ', - '--numprocesses ', '1 ' - ]) - - crawlerProc = multiprocessing.Process( - name='crawler', target=self.__exec_crawler, - args=(cmd,)) - - createContainerProc = multiprocessing.Process( - name='createContainer', target=self.__exec_create_container - ) - - crawlerProc.start() - createContainerProc.start() - - time.sleep(30) - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - docker_server_version = self.docker.version()['Version'] - if VERSION_SPEC.match(semantic_version.Version(_fix_version(docker_server_version))): - assert len(files) == 2 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - #print output # only printed if the test fails - assert 'interface-lo.if_octets.tx' in output - assert 'cpu-0.cpu-idle' in output - assert 'memory.memory-used' in output - f.close() - #clear the outut direcory - shutil.rmtree(os.path.join(self.tempd, 'out')) - crawlerProc.terminate() - crawlerProc.join() - -if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_dockerutils.py b/tests/functional/test_functional_dockerutils.py deleted file mode 100644 index 576f0ef6..00000000 --- a/tests/functional/test_functional_dockerutils.py +++ /dev/null @@ -1,91 +0,0 @@ -import logging -import unittest -import docker -import requests.exceptions -import tempfile -import shutil - -from utils.dockerutils import ( - exec_dockerps, - exec_docker_history, - exec_dockerinspect, - _get_docker_server_version, - _fix_version, - get_docker_container_rootfs_path -) - -# Tests conducted with a single container running. - - -class DockerUtilsTests(unittest.TestCase): - image_name = 'alpine:latest' - long_image_name = 'docker.io/alpine:latest' - - def setUp(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.docker.pull(repository='alpine', tag='latest') - self.container = self.docker.create_container( - image=self.image_name, command='/bin/sleep 60') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - - def tearDown(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - shutil.rmtree(self.tempd) - - def test_fix_version(self): - import semantic_version - ver = u'17.03.01-ce' - fixed_ver = _fix_version(ver) - assert fixed_ver == u'17.3.1' - VERSION_SPEC = semantic_version.Spec('>=1.10.0') - assert VERSION_SPEC.match(semantic_version.Version(fixed_ver)) is True - - def test_docker_version(self): - ver = _get_docker_server_version() - import re - pattern = re.compile("^[0-9]+\.[0-9]+\.[0-9]+") - assert pattern.match(ver) - - def test_dockerps(self): - for inspect in exec_dockerps(): - c_long_id = inspect['Id'] - break # there should only be one container anyway - assert self.container['Id'] == c_long_id - - def test_docker_history(self): - history = exec_docker_history(self.container['Id']) - print history[0] - assert self.image_name in history[0][ - 'Tags'] or self.long_image_name in history[0]['Tags'] - - def test_dockerinspect(self): - inspect = exec_dockerinspect(self.container['Id']) - print inspect - assert self.container['Id'] == inspect['Id'] - - def test_get_container_rootfs(self): - root = get_docker_container_rootfs_path(self.container['Id']) - print root - assert root.startswith('/var/lib/docker') - - if __name__ == '__main__': - logging.basicConfig( - filename='test_dockerutils.log', - filemode='a', - format='%(asctime)s %(levelname)s : %(message)s', - level=logging.DEBUG) - - unittest.main() diff --git a/tests/functional/test_functional_fprobe.py b/tests/functional/test_functional_fprobe.py deleted file mode 100644 index 0584ead5..00000000 --- a/tests/functional/test_functional_fprobe.py +++ /dev/null @@ -1,281 +0,0 @@ -import logging -import mock -import os -import shutil -import sys -import time -import tempfile -import unittest - -import docker -import requests.exceptions -from plugins.systems.fprobe_container_crawler import FprobeContainerCrawler -from utils.process_utils import start_child - - -# Tests the FprobeContainerCrawler class -# Throws an AssertionError if any test fails - -FPROBE_FRAME = \ - '[{"data": "AAUACD6AE4dYsG5IAAGSWAAABngAAAAArBA3AqwQNwEAAAAAAAAAAAAAAAQAA'\ - 'AFiPn/cGT5/3Bsfkez+ABsGAAAAAAAAAAAArBA3AawQNwIAAAAAAAAAAAAAAAYAAAHDPn/cF'\ - 'j5/3BfcUh+QABsGAAAAAAAAAAAArBA3AgoKCgEAAAAAAAAAAAAAAAYAAAFiPn/dmj5//Q2TJ'\ - 'gG7ABgGAAAAAAAAAAAArBA3AawQNwIAAAAAAAAAAAAAAAYAAAHDPn/cGT5/3BvcUx+QABsGA'\ - 'AAAAAAAAAAArBA3AqwQNwEAAAAAAAAAAAAAAAQAAAFhPn/cGT5/3BsfkNxTABsGAAAAAAAAA'\ - 'AAArBA3AawQNwIAAAAAAAAAAAAAAAYAAAG9Pn/cGT5/3Bvs/h+RABsGAAAAAAAAAAAArBA3A'\ - 'qwQNwEAAAAAAAAAAAAAAAQAAAFhPn/cFj5/3BgfkNxSABsGAAAAAAAAAAAACgoKAawQNwIAA'\ - 'AAAAAAAAAAAAAsAABn8Pn/dfj5//Q0Bu5MmABgGAAAAAAAAAAAA", "metadata": {"send'\ - 'er": "127.0.0.1", "timestamp": 1487957576.000248, "ifname": "vethcfd6842'\ - '", "sport": 46246, "ip-addresses": ["172.16.55.2"], "container-id": "5f2'\ - 'e9fb6168da249e1ef215c41c1454e921a7e4ee722d85191d3027703ea613e"}}]' - - -def simulate_socket_datacollector(params): - """ simulate writing by the socket-datacollector """ - dir_idx = params.index('--dir') - assert dir_idx > 0 - output_dir = params[dir_idx + 1] - - filepattern_idx = params.index('--filepattern') - assert filepattern_idx > 0 - filepattern = params[filepattern_idx + 1] - - filename = os.path.join(output_dir, filepattern) - with open(filename, 'w') as f: - f.write(FPROBE_FRAME) - print 'Write file %s' % filename - with open(filename + ".tmp", 'w') as f: - f.write(FPROBE_FRAME) - - -def mocked_start_child(params, pass_fds, null_fds, ign_sigs, setsid=False, - **kwargs): - if params[0] == 'socket-datacollector': - # in case the socket-datacollector is started, we just write - # the frame without actually starting that program. - simulate_socket_datacollector(params) - - # return appropriate values - return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, setsid) - - -def mocked_start_child_fprobe_fail(params, pass_fds, null_fds, ign_sigs, - setsid=False, **kwargs): - if params[0] == 'softflowd': - return start_child(['___no_such_file'], pass_fds, null_fds, ign_sigs, - setsid, **kwargs) - return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, setsid, - **kwargs) - - -def mocked_start_child_collector_fail(params, pass_fds, null_fds, ign_sigs, - setsid=False, **kwargs): - if params[0] == 'socket-datacollector': - return start_child(['___no_such_file'], pass_fds, null_fds, ign_sigs, - setsid, **kwargs) - return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, - setsid, **kwargs) - - -def mocked_psutil_process_iter(): - class MyProcess(object): - def __init__(self, _name, _cmdline, _pid): - self._name = _name - self._cmdline = _cmdline - self.pid = _pid - - def name(self): - return self._name - - def cmdline(self): - return self._cmdline - yield MyProcess('softflowd', ['-i', 'test.eth0', '127.0.0.1:1234'], 11111) - - -# Tests conducted with a single container running. -class FprobeFunctionalTests(unittest.TestCase): - image_name = 'alpine:latest' - - def setUp(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.docker.pull(repository='alpine', tag='latest') - self.container = self.docker.create_container( - image=self.image_name, command='ping -w 30 8.8.8.8') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - - self.output_dir = os.path.join(self.tempd, 'crawler-fprobe') - - self.params = { - 'fprobe_user': 'nobody', - 'fprobe_output_dir': self.output_dir, - 'output_filepattern': 'testfile', - 'netflow_version': 10, - } - - logging.basicConfig(stream=sys.stderr) - self.logger = logging.getLogger("crawlutils").setLevel(logging.INFO) - - def tearDown(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - shutil.rmtree(self.tempd) - - @mock.patch('plugins.systems.fprobe_container_crawler.start_child', - mocked_start_child) - def test_crawl_outcontainer_fprobe(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: expecting collector output') - - fc = FprobeContainerCrawler() - assert fc.get_feature() == 'fprobe' - - # the fake collector writes the single frame immediately - res = [] - for data in fc.crawl(self.container['Id'], avoid_setns=False, - **self.params): - res.append(data) - assert len(res) == 1 - - @mock.patch('plugins.systems.fprobe_container_crawler.start_child', - mocked_start_child_fprobe_fail) - def test_start_netflow_collection_fault1(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: fprobe fails to start') - - fc = FprobeContainerCrawler() - assert fc.get_feature() == 'fprobe' - - # with fprobe failing to start, we won't get data - res = [] - for data in fc.crawl(self.container['Id'], avoid_setns=False, - **self.params): - res.append(data) - assert len(res) == 0 - - @mock.patch('plugins.systems.fprobe_container_crawler.start_child', - mocked_start_child_collector_fail) - def test_start_netflow_collection_fault2(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: collector fails to start') - - fc = FprobeContainerCrawler() - assert fc.get_feature() == 'fprobe' - - # with fprobe failing to start, we won't get data - res = [] - for data in fc.crawl(self.container['Id'], avoid_setns=False, - **self.params): - res.append(data) - assert len(res) == 0 - - @mock.patch('plugins.systems.fprobe_container_crawler.start_child', - mocked_start_child) - def test_remove_datafiles(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: datafiles of disappeared interface ' - 'being removed') - - fc = FprobeContainerCrawler() - assert fc.get_feature() == 'fprobe' - - # we pretend that an interface test.eth0 existed - ifname = 'test.eth0' - FprobeContainerCrawler.fprobes_started[ifname] = 1234 - - self.params['output_filepattern'] = 'fprobe-{ifname}-{timestamp}' - - # create a datafile for this fake interface - timestamp = int(time.time()) - filepattern = 'fprobe-{ifname}-{timestamp}'.format(ifname=ifname, - timestamp=timestamp) - params = [ - 'socket-datacollector', - '--dir', self.output_dir, - '--filepattern', filepattern, - ] - - # have the fake socket-datacollector write a file with the ifname in - # the filename - fc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) - simulate_socket_datacollector(params) - written_file = os.path.join(self.output_dir, filepattern) - assert os.path.isfile(written_file) - - FprobeContainerCrawler.next_cleanup = 0 - # calling fc.crawl() will trigger a cleanup of that file - # since our fake interface never existed - fc.crawl(self.container['Id'], avoid_setns=False, **self.params) - - # file should be gone now - assert not os.path.isfile(written_file) - - @mock.patch('plugins.systems.fprobe_container_crawler.psutil.process_iter', - mocked_psutil_process_iter) - def test_interfaces_with_fprobes(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: determine interfaces on which flow probes ' - 'are running') - s = FprobeContainerCrawler.interfaces_with_fprobes() - assert 'test.eth0' in s.keys() - - @mock.patch('plugins.systems.fprobe_container_crawler.start_child', - mocked_start_child) - def test_remove_stale_files(self): - logger = logging.getLogger("crawlutils") - logger.info('>>> Testcase: stale file being removed') - - fc = FprobeContainerCrawler() - assert fc.get_feature() == 'fprobe' - - # we pretend that an interface test.eth0 existed - ifname = 'test.eth0' - FprobeContainerCrawler.fprobes_started[ifname] = 1234 - - self.params['output_filepattern'] = 'fprobe-{ifname}-{timestamp}' - - # have the fake socket-datacollector write a file with the ifname in - # the filename - fc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) - - written_file = os.path.join(self.output_dir, 'test.output') - with open(written_file, 'a') as f: - f.write('hello') - - assert os.path.isfile(written_file) - - # mock the stale file timeout so that our file will get removed - # with in reasonable time - FprobeContainerCrawler.STALE_FILE_TIMEOUT = 5 - - # calling fc.crawl() will not trigger a cleanup of that file - # the first time - logger.info('1st crawl') - fc.crawl(self.container['Id'], avoid_setns=False, **self.params) - - # file should still be here - assert os.path.isfile(written_file) - - # the next time we will crawl, the file will be removed - FprobeContainerCrawler.next_cleanup = time.time() - time.sleep(FprobeContainerCrawler.STALE_FILE_TIMEOUT + 1) - - logger.info('2nd crawl') - fc.crawl(self.container['Id'], avoid_setns=False, **self.params) - - # file should be gone now - assert not os.path.isfile(written_file) - - if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_k8s_environment.py b/tests/functional/test_functional_k8s_environment.py deleted file mode 100644 index 707cff51..00000000 --- a/tests/functional/test_functional_k8s_environment.py +++ /dev/null @@ -1,246 +0,0 @@ -import unittest -import docker -import requests.exceptions -import tempfile -import os -import shutil -import subprocess -import sys -import json - -# Tests for crawlers in kubernetes crawlers configuration. - -from containers_crawler import ContainersCrawler -from worker import Worker -from emitters_manager import EmittersManager - -import logging - -# Tests conducted with a single container running. - -CONT_NAME = "io.kubernetes.container.name" -POD_NAME = "io.kubernetes.pod.name" -POD_UID = "io.kubernetes.pod.uid" -POD_NS = "io.kubernetes.pod.namespace" -K8S_DELIMITER = "/" - - -class ContainersCrawlerTests(unittest.TestCase): - - def setUp(self): - root = logging.getLogger() - root.setLevel(logging.INFO) - ch = logging.StreamHandler(sys.stdout) - ch.setLevel(logging.INFO) - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - ch.setFormatter(formatter) - root.addHandler(ch) - - self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', - version='auto') - self.k8s_labels = dict() - self.k8s_labels[CONT_NAME] = "simson" - self.k8s_labels[POD_NAME] = "pod-test" - self.k8s_labels[POD_UID] = "pod-123" - self.k8s_labels[POD_NS] = "devtest" - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.start_crawled_container() - - def start_crawled_container(self): - # start a container to be crawled - self.docker.pull(repository='ubuntu', tag='latest') - self.container = self.docker.create_container( - image='ubuntu:latest', labels=self.k8s_labels, command='/bin/sleep 60') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - - def tearDown(self): - self.remove_crawled_container() - - shutil.rmtree(self.tempd) - - def remove_crawled_container(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - def testCrawlContainer1(self): - crawler = ContainersCrawler( - features=[ - 'cpu', - 'memory', - 'interface', - 'package'], - environment='kubernetes') - frames = list(crawler.crawl()) - output = str(frames[0]) - print output # only printed if the test fails - assert 'interface-lo' in output - assert 'if_octets_tx=' in output - assert 'cpu-0' in output - assert 'cpu_nice=' in output - assert 'memory' in output - assert 'memory_buffered=' in output - assert 'apt' in output - assert 'pkgarchitecture=' in output - - ''' - Test for graphite o/p format. - ''' - - def testCrawlContainer2(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'cpu,memory,interface', - '--crawlContainers', self.container['Id'], - '--format', 'graphite', - '--crawlmode', 'OUTCONTAINER', - '--environment', 'kubernetes', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - print output # only printed if the test fails - sample_out = output.split('\n')[0] - print sample_out - namespace_parts = sample_out.split(".")[:4] - assert len(namespace_parts) == 4 - assert namespace_parts[0] == self.k8s_labels[POD_NS] - assert namespace_parts[1] == self.k8s_labels[POD_NAME] - assert namespace_parts[2] == self.k8s_labels[CONT_NAME] - assert 'interface-lo.if_octets.tx' in output - assert 'cpu-0.cpu-idle' in output - assert 'memory.memory-used' in output - f.close() - - ''' - Test for csv o/p format - ''' - - def testCrawlContainer3(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'cpu,memory,interface', - '--crawlContainers', self.container['Id'], - '--format', 'csv', - '--crawlmode', 'OUTCONTAINER', - '--environment', 'kubernetes', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - print output # only printed if the test fails - metadata_frame = output.split('\n')[0] - metadata_str = metadata_frame.split()[2] - metadata_json = json.loads(metadata_str) - namespace_str = metadata_json['namespace'] - assert namespace_str - namespace_parts = namespace_str.split(K8S_DELIMITER) - assert len(namespace_parts) == 4 - assert namespace_parts[0] == self.k8s_labels[POD_NS] - assert namespace_parts[1] == self.k8s_labels[POD_NAME] - assert namespace_parts[2] == self.k8s_labels[CONT_NAME] - assert 'interface-lo' in output - assert 'cpu-0' in output - assert 'memory' in output - f.close() - - ''' - Test for json o/p format - ''' - - def testCrawlContainer4(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - # crawler itself needs to be root - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'cpu,memory,interface', - '--crawlContainers', self.container['Id'], - '--format', 'json', - '--crawlmode', 'OUTCONTAINER', - '--environment', 'kubernetes', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == 1 - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - print output # only printed if the test fails - sample_out = output.split('\n')[0] - metadata_json = json.loads(sample_out) - namespace_str = metadata_json['namespace'] - assert namespace_str - namespace_parts = namespace_str.split(K8S_DELIMITER) - assert len(namespace_parts) == 4 - assert namespace_parts[0] == self.k8s_labels[POD_NS] - assert namespace_parts[1] == self.k8s_labels[POD_NAME] - assert namespace_parts[2] == self.k8s_labels[CONT_NAME] - assert 'memory_used' in output - assert 'if_octets_tx' in output - assert 'cpu_idle' in output - f.close() - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_logs_linker.py b/tests/functional/test_functional_logs_linker.py deleted file mode 100644 index 90e23fab..00000000 --- a/tests/functional/test_functional_logs_linker.py +++ /dev/null @@ -1,137 +0,0 @@ -import unittest -import docker -import os -import shutil -import sys -import subprocess -import plugins_manager - -from containers_logs_linker import DockerContainersLogsLinker -from worker import Worker -from dockercontainer import HOST_LOG_BASEDIR -from utils.misc import get_host_ipaddr - -import logging - - -class LogsLinkerTests(unittest.TestCase): - - def setUp(self): - root = logging.getLogger() - root.setLevel(logging.INFO) - ch = logging.StreamHandler(sys.stdout) - ch.setLevel(logging.INFO) - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - ch.setFormatter(formatter) - root.addHandler(ch) - plugins_manager.runtime_env = None - self.container = {} - self.container_name = 'LogLinkerContainer' - self.host_namespace = get_host_ipaddr() - try: - shutil.rmtree(os.path.join(HOST_LOG_BASEDIR, self.host_namespace, - self.container_name)) - except OSError: - pass - - def startContainer(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - self.docker.pull(repository='ubuntu', tag='latest') - self.container = self.docker.create_container( - image='ubuntu:latest', - command='bash -c "echo hi ; echo hi > /var/log/messages; /bin/sleep 120"', - name=self.container_name) - self.docker.start(container=self.container['Id']) - - def tearDown(self): - try: - self.removeContainer() - shutil.rmtree(os.path.join(HOST_LOG_BASEDIR, self.host_namespace, - self.container_name)) - except Exception: - pass - - def removeContainer(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - def testLinkUnlinkContainer(self): - docker_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, - self.container_name, 'docker.log') - messages_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, - self.container_name, 'var/log/messages') - crawler = DockerContainersLogsLinker( - environment='cloudsight', - user_list='ALL', - host_namespace=self.host_namespace) - worker = Worker(crawler=crawler) - - self.startContainer() - worker.iterate() - with open(docker_log, 'r') as log: - assert 'hi' in log.read() - with open(messages_log, 'r') as log: - assert 'hi' in log.read() - assert os.path.exists(docker_log) - assert os.path.exists(messages_log) - assert os.path.islink(docker_log) - assert os.path.islink(messages_log) - - self.removeContainer() - worker.iterate() - assert not os.path.exists(docker_log) - assert not os.path.exists(messages_log) - assert not os.path.islink(docker_log) - assert not os.path.islink(messages_log) - - self.startContainer() - worker.iterate() - assert os.path.exists(docker_log) - with open(docker_log, 'r') as log: - assert 'hi' in log.read() - with open(messages_log, 'r') as log: - assert 'hi' in log.read() - assert os.path.exists(messages_log) - assert os.path.islink(docker_log) - assert os.path.islink(messages_log) - - self.removeContainer() - - def testLinkUnlinkContainerCli(self): - docker_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, - self.container_name, 'docker.log') - messages_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, - self.container_name, 'var/log/messages') - - self.startContainer() - - # crawler itself needs to be root - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/containers_logs_linker.py' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - with open(docker_log, 'r') as log: - assert 'hi' in log.read() - with open(messages_log, 'r') as log: - assert 'hi' in log.read() - assert os.path.exists(docker_log) - assert os.path.exists(messages_log) - assert os.path.islink(docker_log) - assert os.path.islink(messages_log) - - self.removeContainer() - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_namespace.py b/tests/functional/test_functional_namespace.py deleted file mode 100644 index 95514a88..00000000 --- a/tests/functional/test_functional_namespace.py +++ /dev/null @@ -1,117 +0,0 @@ -import logging -import shutil -import sys -import tempfile -import time -import unittest - -import docker -import requests.exceptions - -from utils.crawler_exceptions import CrawlTimeoutError -from utils.namespace import run_as_another_namespace - -all_namespaces = ["user", "pid", "uts", "ipc", "net", "mnt"] - - -# Functions used to test the library -def func_args(arg1, arg2): - return "test %s %s" % (arg1, arg2) - -def func_kwargs(arg1='a', arg2='b'): - return "test %s %s" % (arg1, arg2) - -def func_mixed_args(arg1, arg2='b'): - return "test %s %s" % (arg1, arg2) - -def func_no_args(arg="default"): - return "test %s" % (arg) - - -class FooError(Exception): - pass - - -def func_crash(arg, *args, **kwargs): - print locals() - raise FooError("oops") - - -def func_infinite_loop(arg): - while True: - time.sleep(1) - -# Tests conducted with a single container running. - - -class NamespaceLibTests(unittest.TestCase): - image_name = 'alpine:latest' - - def setUp(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.docker.pull(repository='alpine', tag='latest') - self.container = self.docker.create_container( - image=self.image_name, command='/bin/sleep 300') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - inspect = self.docker.inspect_container(self.container['Id']) - print inspect - self.pid = str(inspect['State']['Pid']) - - def tearDown(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - shutil.rmtree(self.tempd) - - def test_run_as_another_namespace_function_args(self): - res = run_as_another_namespace( - self.pid, all_namespaces, func_args, "arg1", "arg2") - assert res == "test arg1 arg2" - print sys._getframe().f_code.co_name, 1 - - def test_run_as_another_namespace_function_kwargs(self): - res = run_as_another_namespace( - self.pid, all_namespaces, func_kwargs, arg1="arg1", arg2="arg2") - assert res == "test arg1 arg2" - print sys._getframe().f_code.co_name, 1 - - def test_run_as_another_namespace_function_mixed_args(self): - res = run_as_another_namespace( - self.pid, all_namespaces, func_mixed_args, "arg1", arg2="arg2") - assert res == "test arg1 arg2" - print sys._getframe().f_code.co_name, 1 - - def test_run_as_another_namespace_simple_function_no_args(self): - res = run_as_another_namespace(self.pid, all_namespaces, func_no_args) - assert res == "test default" - print sys._getframe().f_code.co_name, 1 - - def test_run_as_another_namespace_crashing_function(self): - with self.assertRaises(FooError): - run_as_another_namespace( - self.pid, all_namespaces, func_crash, "arg") - - def test_run_as_another_namespace_infinite_loop_function(self): - with self.assertRaises(CrawlTimeoutError): - run_as_another_namespace( - self.pid, all_namespaces, func_infinite_loop, "arg") - - if __name__ == '__main__': - logging.basicConfig( - filename='test_namespace.log', - filemode='a', - format='%(asctime)s %(levelname)s : %(message)s', - level=logging.DEBUG) - - unittest.main() diff --git a/tests/functional/test_functional_plugins.py b/tests/functional/test_functional_plugins.py deleted file mode 100644 index 59bdd269..00000000 --- a/tests/functional/test_functional_plugins.py +++ /dev/null @@ -1,84 +0,0 @@ -import shutil -import tempfile -import unittest - -import docker -import requests.exceptions -from plugins.systems.cpu_container_crawler import CpuContainerCrawler -from plugins.systems.cpu_host_crawler import CpuHostCrawler -from plugins.systems.memory_container_crawler import MemoryContainerCrawler -from plugins.systems.memory_host_crawler import MemoryHostCrawler -from plugins.systems.os_container_crawler import OSContainerCrawler -from plugins.systems.process_container_crawler import ProcessContainerCrawler - - -# Tests the FeaturesCrawler class -# Throws an AssertionError if any test fails - - -# Tests conducted with a single container running. -class HostAndContainerPluginsFunctionalTests(unittest.TestCase): - image_name = 'alpine:latest' - - def setUp(self): - self.docker = docker.APIClient( - base_url='unix://var/run/docker.sock', version='auto') - try: - if len(self.docker.containers()) != 0: - raise Exception( - "Sorry, this test requires a machine with no docker" - "containers running.") - except requests.exceptions.ConnectionError: - print ("Error connecting to docker daemon, are you in the docker" - "group? You need to be in the docker group.") - - self.docker.pull(repository='alpine', tag='latest') - self.container = self.docker.create_container( - image=self.image_name, command='/bin/sleep 60') - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - self.docker.start(container=self.container['Id']) - - def tearDown(self): - self.docker.stop(container=self.container['Id']) - self.docker.remove_container(container=self.container['Id']) - - shutil.rmtree(self.tempd) - - def test_crawl_invm_cpu(self): - fc = CpuHostCrawler() - cores = len(list(fc.crawl())) - assert cores > 0 - - def test_crawl_invm_mem(self): - fc = MemoryHostCrawler() - cores = len(list(fc.crawl())) - assert cores > 0 - - def test_crawl_outcontainer_cpu(self): - fc = CpuContainerCrawler() - for key, feature, t in fc.crawl(self.container['Id']): - print key, feature - cores = len(list(fc.crawl(self.container['Id']))) - assert cores > 0 - - def test_crawl_outcontainer_os(self): - fc = OSContainerCrawler() - assert len(list(fc.crawl(self.container['Id']))) == 1 - - def test_crawl_outcontainer_processes(self): - fc = ProcessContainerCrawler() - # sleep + crawler - assert len(list(fc.crawl(self.container['Id']))) == 2 - - def test_crawl_outcontainer_processes_mmapfiles(self): - fc = ProcessContainerCrawler() - output = "%s" % list(fc.crawl(self.container['Id'], get_mmap_files='True')) - assert '/bin/busybox' in output - - def test_crawl_outcontainer_mem(self): - fc = MemoryContainerCrawler() - output = "%s" % list(fc.crawl(self.container['Id'])) - assert 'memory_used' in output - - if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index 3735bb7b..af33f32d 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -100,7 +100,7 @@ def remove_crawled_container(self): self.docker.stop(container=self.container['Id']) self.docker.remove_container(container=self.container['Id']) - def testCrawlContainer1(self): + def _testCrawlContainer1(self): crawler = SafeContainersCrawler( features=[], user_list=self.container['Id']) frames = list(crawler.crawl()) @@ -120,7 +120,7 @@ def testCrawlContainer1(self): assert 'rubypackage' in output assert 'rake' in output - def testCrawlContainer2(self): + def _testCrawlContainer2(self): env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -136,6 +136,7 @@ def testCrawlContainer2(self): '--crawlmode', 'OUTCONTAINERSAFE', ], env=env) + time.sleep(30) stdout, stderr = process.communicate() assert process.returncode == 0 @@ -175,6 +176,7 @@ def testCrawlContainerNoPlugins(self): '--crawlmode', 'OUTCONTAINERSAFE', ], env=env) + time.sleep(30) stdout, stderr = process.communicate() assert process.returncode == 0 @@ -205,12 +207,14 @@ def testCrawlContainerKafka(self): [ '/usr/bin/python', mypath + '/../../crawler/crawler.py', '--url', 'kafka://localhost:9092/test', - '--features', 'os,process', + '--features', 'none', '--crawlContainers', self.container['Id'], - '--crawlmode', 'OUTCONTAINER', + '--crawlmode', 'OUTCONTAINERSAFE', '--numprocesses', '1' ], env=env) + time.sleep(30) + stdout, stderr = process.communicate() assert process.returncode == 0 @@ -221,7 +225,21 @@ def testCrawlContainerKafka(self): topic = kafka.topics['test'] consumer = topic.get_simple_consumer() message = consumer.consume() + print message.value assert '"cmd":"tail -f /dev/null"' in message.value + assert 'interface-lo' in message.value + assert 'if_octets_tx' in message.value + assert 'cpu-0' in message.value + assert 'cpu_nice' in message.value + assert 'memory' in message.value + assert 'memory_buffered' in message.value + assert 'os' in message.value + assert 'linux' in message.value + assert 'process' in message.value + assert 'tail' in message.value + assert 'plugincont_user' in message.value + assert 'rubypackage' in message.value + assert 'rake' in message.value def _setup_plugincont_testing1(self): plugincont_name = '/plugin_cont_' + self.container['Id'] @@ -262,6 +280,7 @@ def testCrawlContainerEvilPlugin(self): '--crawlmode', 'OUTCONTAINERSAFE', ], env=env) + time.sleep(30) stdout, stderr = process.communicate() assert process.returncode == 0 diff --git a/tests/functional/test_functional_vm_plugins.py b/tests/functional/test_functional_vm_plugins.py deleted file mode 100644 index 3f049342..00000000 --- a/tests/functional/test_functional_vm_plugins.py +++ /dev/null @@ -1,159 +0,0 @@ -import subprocess -import time -import unittest - -from plugins.systems.connection_vm_crawler import ConnectionVmCrawler -from plugins.systems.interface_vm_crawler import InterfaceVmCrawler -from plugins.systems.memory_vm_crawler import MemoryVmCrawler -from plugins.systems.metric_vm_crawler import MetricVmCrawler -from plugins.systems.os_vm_crawler import os_vm_crawler - -from plugins.systems.process_vm_crawler import process_vm_crawler -from utils.features import ( - ProcessFeature, - MetricFeature, - MemoryFeature, -) - - -# Tests the FeaturesCrawler class -# Throws an AssertionError if any test fails - -class VmPluginsFunctionalTests(unittest.TestCase): - - SETUP_ONCE = False - vm_descs = [['vm2', '4.0.3.x86_64', 'vanilla', 'x86_64'], - ['vm3', '3.2.0-101-generic_3.2.0-101.x86_64', - 'ubuntu', 'x86_64'], - ['vm4', '3.13.0-24-generic_3.13.0-24.x86_64', - 'ubuntu', 'x86_64'] - ] - - def create_vm_via_bash(self, vmID): - qemu_out_file = "/tmp/psvmi_qemu_out" - serial = "file:" + qemu_out_file - - vmlinuz = "psvmi/tests/vmlinuz/vmlinuz-" + vmID[1] - vm_name = vmID[0] - - disk_file = "psvmi/tests/" + vm_name + "disk.qow2" - subprocess.call(["cp", "psvmi/tests/disk.qcow2", disk_file]) - disk = "format=raw,file=" + disk_file - - qemu_cmd = subprocess.Popen( - ("qemu-system-x86_64", - "-kernel", - vmlinuz, - "-append", - ("init=psvmi_test_init root=/dev/sda console=ttyAMA0 " - "console=ttyS0"), - "-name", - vm_name, - "-m", - "512", - "-smp", - "1", - "-drive", - disk, - "-display", - "none", - "-serial", - serial)) - - vmID.append(str(qemu_cmd.pid)) # vmID[4]=qemu_pid - - # ugly way to fiogure out if a VM has booted, could not pipe output - # from qemu properly - vm_ready = False - - while True: - time.sleep(4) - - fr = open(qemu_out_file, "r") - for line in fr.readlines(): - if "Mounted root" in line: - time.sleep(3) - vm_ready = True - break - fr.close() - - if vm_ready is True: - break - - def setUp(self): - if VmPluginsFunctionalTests.SETUP_ONCE is False: - for vm_desc in VmPluginsFunctionalTests.vm_descs: - self.create_vm_via_bash(vm_desc) - VmPluginsFunctionalTests.SETUP_ONCE = True - self.vm_descs = VmPluginsFunctionalTests.vm_descs - - @classmethod - def teardown_class(cls): - for _, _, _, _, pid in VmPluginsFunctionalTests.vm_descs: - subprocess.call(["kill", "-9", pid]) - - def _tearDown(self): - for _, _, _, _, pid in self.vm_descs: - subprocess.call(["kill", "-9", pid]) - # no need to rm qcow disk files since they get destroyed on - # container exit - - def test_crawl_outvm_os(self): - fc = os_vm_crawler() - for _, kernel, distro, arch, pid in self.vm_descs: - for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): - assert 'Linux' in item - - def test_crawl_outvm_process(self): - fc = process_vm_crawler() - for _, kernel, distro, arch, pid in self.vm_descs: - for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): - p = ProcessFeature._make(item[1]) - if p.pid == 0: - assert 'swapper' in str(p.pname) - elif p.pname == 'psvmi_test_init': - assert 'devconsole' in str(p.openfiles) - else: - assert p.pid > 0 - - def test_crawl_outvm_mem(self): - fc = MemoryVmCrawler() - for _, kernel, distro, arch, pid in self.vm_descs: - for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): - meminfo = MemoryFeature._make(item[1]) - assert (meminfo.memory_util_percentage >= 0) - - def test_crawl_outvm_metrics(self): - fc = MetricVmCrawler() - for _, kernel, distro, arch, pid in self.vm_descs: - for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): - p = MetricFeature._make(item[1]) - if p.pname == 'psvmi_test_init': - assert p.rss > 0 - assert p.vms > 0 - assert p.mempct >= 0 - # stritly speaking > 0 but due to rounding - - # to see if 100% cpu util shows up for psvmi_test_init - # time.sleep(1) - # print list(crawler.crawl_metrics()) - - def _test_crawl_outvm_modules(self): - for crawler in self.crawlers: - output = crawler.crawl_modules() - assert len(list(output)) > 0 - - def test_crawl_outvm_interface(self): - fc = InterfaceVmCrawler() - for _, kernel, distro, arch, pid in self.vm_descs: - output = fc.crawl(vm_desc=(pid, kernel, distro, arch)) - assert any('lo' in item[0] for item in output) - - def test_crawl_outvm_connections(self): - fc = ConnectionVmCrawler() - for _, kernel, distro, arch, pid in self.vm_descs: - output = fc.crawl(vm_desc=(pid, kernel, distro, arch)) - assert len(list(output)) == 0 # There are no connections - - if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_functional_vms_crawler.py b/tests/functional/test_functional_vms_crawler.py deleted file mode 100644 index e864a3c7..00000000 --- a/tests/functional/test_functional_vms_crawler.py +++ /dev/null @@ -1,147 +0,0 @@ -import unittest -import tempfile -import os -import subprocess -import time - -# Tests for crawlers in kraken crawlers configuration. - -from vms_crawler import VirtualMachinesCrawler - -# Tests conducted with a single container running. - - -class VirtualMachinesCrawlerTests(unittest.TestCase): - - SETUP_ONCE = False - - vmIDs = [['vm2', '4.0.3.x86_64', 'vanilla', 'x86_64'], - ['vm3', '3.2.0-101-generic_3.2.0-101.x86_64', 'ubuntu', 'x86_64'], - ['vm4', '3.13.0-24-generic_3.13.0-24.x86_64', 'ubuntu', 'x86_64'] - ] - - def create_vm_via_bash(self, vmID): - qemu_out_file = "/tmp/psvmi_qemu_out" - serial = "file:" + qemu_out_file - - vmlinuz = "psvmi/tests/vmlinuz/vmlinuz-" + vmID[1] - vm_name = vmID[0] - - disk_file = "psvmi/tests/" + vm_name + "disk.qow2" - subprocess.call(["cp", "psvmi/tests/disk.qcow2", disk_file]) - disk = "format=raw,file=" + disk_file - - qemu_cmd = subprocess.Popen( - ("qemu-system-x86_64", - "-kernel", - vmlinuz, - "-append", - ("init=psvmi_test_init root=/dev/sda console=ttyAMA0 " - "console=ttyS0"), - "-name", - vm_name, - "-m", - "512", - "-smp", - "1", - "-drive", - disk, - "-display", - "none", - "-serial", - serial)) - - vmID.append(str(qemu_cmd.pid)) # vmID[4]=qemu_pid - - # ugly way to fiogure out if a VM has booted, could not pipe output - # from qemu properly - vm_ready = False - - while True: - time.sleep(4) - - fr = open(qemu_out_file, "r") - for line in fr.readlines(): - if "Mounted root" in line: - time.sleep(3) - vm_ready = True - break - fr.close() - - if vm_ready is True: - break - - def create_vms(self): - for vmID in VirtualMachinesCrawlerTests.vmIDs: - self.create_vm_via_bash(vmID) - - @classmethod - def teardown_class(cls): - for vmID in VirtualMachinesCrawlerTests.vmIDs: - subprocess.call(["kill", "-9", vmID[4]]) - - def setUp(self): - self.tempd = tempfile.mkdtemp(prefix='crawlertest.') - if VirtualMachinesCrawlerTests.SETUP_ONCE is False: - self.create_vms() - VirtualMachinesCrawlerTests.SETUP_ONCE = True - - def testCrawlVM1(self): - vm_list = [ - 'vm2,4.0.3.x86_64,vanilla,x86_64', - 'vm3,3.2.0-101-generic_3.2.0-101.x86_64,ubuntu,x86_64', - 'vm4,3.13.0-24-generic_3.13.0-24.x86_64,ubuntu,x86_64'] - crawler = VirtualMachinesCrawler( - features=[ - 'os', - 'memory', - 'interface', - 'process'], - user_list=vm_list) - frames = list(crawler.crawl()) - output = str(frames[0]) - print output # only printed if the test fails - assert 'interface-lo' in output - assert 'if_octets_tx=' in output - assert 'memory' in output - assert 'memory_buffered=' in output - - def testCrawlVM2(self): - env = os.environ.copy() - mypath = os.path.dirname(os.path.realpath(__file__)) - os.makedirs(self.tempd + '/out') - - process = subprocess.Popen( - [ - '/usr/bin/python', mypath + '/../../crawler/crawler.py', - '--url', 'file://' + self.tempd + '/out/crawler', - '--features', 'os,memory,interface,process', - '--crawlVMs', 'vm2,4.0.3.x86_64,vanilla,x86_64', - 'vm3,3.2.0-101-generic_3.2.0-101.x86_64,ubuntu,x86_64', - 'vm4,3.13.0-24-generic_3.13.0-24.x86_64,ubuntu,x86_64', - '--crawlmode', 'OUTVM', - '--numprocesses', '1' - ], - env=env) - stdout, stderr = process.communicate() - assert process.returncode == 0 - - print stderr - print stdout - - subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) - - files = os.listdir(self.tempd + '/out') - assert len(files) == len(VirtualMachinesCrawlerTests.vmIDs) - - f = open(self.tempd + '/out/' + files[0], 'r') - output = f.read() - print output # only printed if the test fails - assert 'psvmi_test_init' in output - assert 'Linux' in output - assert 'memory_used' in output - assert 'interface-lo' in output - f.close() - - if __name__ == '__main__': - unittest.main() diff --git a/tests/functional/test_logs_in_volumes1.py b/tests/functional/test_logs_in_volumes1.py deleted file mode 100644 index 0fbf13b1..00000000 --- a/tests/functional/test_logs_in_volumes1.py +++ /dev/null @@ -1,96 +0,0 @@ -import logging -import unittest -import tempfile -import os -import shutil -import mock - -import utils.dockerutils -import dockercontainer -import plugins_manager - -# Tests dockercontainer._get_logfiles_list -# the log file, test1.log is in a host directory -# mounted as volume - - -def get_container_log_files(path, options): - pass - - -@mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=lambda id: 'rootfs') -class DockerContainerTests(unittest.TestCase): - - def setUp(self): - - self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.') - self.volume = tempfile.mkdtemp(prefix='volume.') - for logf in ['test1.log', 'test2.log']: - with open(os.path.join(self.volume, logf), 'w') as logp: - logp.write(logf) - - def tearDown(self): - shutil.rmtree(self.volume) - shutil.rmtree(self.host_log_dir) - - def test_get_logfiles_list(self, *args): - - inspect = { - "Id": ("1e744b5e3e11e848863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea" - "24e847"), - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186}, - "Image": ("sha256:07c86167cdc4264926fa5d2894e34a339ad27f730e8cc81a" - "16cd21b7479e8eac"), - "Name": "/pensive_rosalind", - "LogPath": ("/var/lib/docker/containers/1e744b5e3e11e848863fefe9d9" - "a8b3731070c6b0c702a04d2b8ab948ea24e847/1e744b5e3e11e8" - "48863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" - "-json.log"), - "HostnamePath": ("/var/lib/docker/containers/1e744b5e3e11e848863fe" - "fe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" - "/hostname"), - "Mounts": [ - { - "Source": self.volume, - "Destination": "/data"}], - "Config": { - "Cmd": ["bash"], - "Image": "ubuntu:trusty"}, - "docker_image_long_name": "long_name/short_name", - "docker_image_short_name": "short_name", - "docker_image_tag": "image_tag", - "docker_image_registry": "image_registry", - "owner_namespace": "owner_namespace", - "NetworkSettings": {}} - - plugins_manager.runtime_env = None - self.docker_container = \ - dockercontainer.DockerContainer(inspect['Id'], inspect) - - self.docker_container._get_container_log_files = \ - get_container_log_files - self.docker_container.log_file_list = [ - {'name': '/data/test1.log', 'type': None}] - - log_list = self.docker_container._set_logs_list() - log_list = self.docker_container.logs_list - for log in log_list: - if log.name == '/data/test1.log': - self.assertEqual( - log.dest, self.host_log_dir + '/data/test1.log') - self.assertEqual(log.source, - self.volume + '/test1.log') - -if __name__ == '__main__': - logging.basicConfig( - filename='test_dockerutils.log', - filemode='a', - format='%(asctime)s %(levelname)s : %(message)s', - level=logging.DEBUG) - - unittest.main() diff --git a/tests/functional/test_logs_in_volumes_star.py b/tests/functional/test_logs_in_volumes_star.py deleted file mode 100644 index c6460a5c..00000000 --- a/tests/functional/test_logs_in_volumes_star.py +++ /dev/null @@ -1,93 +0,0 @@ -import logging -import unittest -import tempfile -import os -import shutil -import mock - -import utils.dockerutils -import dockercontainer - -# Tests dockercontainer._get_logfiles_list -# the log file, test1.log is in a host directory -# mounted as volume - - -def get_container_log_files(path, options): - pass - - -@mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=lambda id: 'rootfs') -class DockerContainerTests(unittest.TestCase): - - def setUp(self): - - self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.') - self.volume = tempfile.mkdtemp(prefix='volume.') - self.log_file_list = ['test1.log', 'test2.log'] - for logf in self.log_file_list: - with open(os.path.join(self.volume, logf), 'w') as logp: - logp.write(logf) - - def tearDown(self): - shutil.rmtree(self.volume) - shutil.rmtree(self.host_log_dir) - - def test_get_logfiles_list(self, *args): - - inspect = { - "Id": ("1e744b5e3e11e848863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea" - "24e847"), - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186}, - "Image": ("sha256:07c86167cdc4264926fa5d2894e34a339ad27f730e8cc81a" - "16cd21b7479e8eac"), - "Name": "/pensive_rosalind", - "LogPath": ("/var/lib/docker/containers/1e744b5e3e11e848863fefe9d9" - "a8b3731070c6b0c702a04d2b8ab948ea24e847/1e744b5e3e11e8" - "48863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" - "-json.log"), - "HostnamePath": ("/var/lib/docker/containers/1e744b5e3e11e848863fe" - "fe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" - "/hostname"), - "Mounts": [ - { - "Source": self.volume, - "Destination": "/data"}], - "Config": { - "Cmd": ["bash"], - "Image": "ubuntu:trusty"}, - "docker_image_long_name": "long_name/short_name", - "docker_image_short_name": "short_name", - "docker_image_tag": "image_tag", - "docker_image_registry": "image_registry", - "owner_namespace": "owner_namespace", - "NetworkSettings": {}} - self.docker_container = dockercontainer.\ - DockerContainer(inspect['Id'], inspect) - - self.docker_container.\ - _get_container_log_files = get_container_log_files - self.docker_container.log_file_list = [ - {'name': '/data/test*.log', 'type': None}] - - self.docker_container._set_logs_list() - log_list = self.docker_container.logs_list - for log in log_list: - if log.name == '/data/test*.log': - assert os.path.basename(log.dest) in self.log_file_list - assert os.path.basename( - log.source) in self.log_file_list - -if __name__ == '__main__': - logging.basicConfig( - filename='test_dockerutils.log', - filemode='a', - format='%(asctime)s %(levelname)s : %(message)s', - level=logging.DEBUG) - - unittest.main() diff --git a/tests/functional/test_logs_no_volumes.py b/tests/functional/test_logs_no_volumes.py deleted file mode 100644 index 057a7d30..00000000 --- a/tests/functional/test_logs_no_volumes.py +++ /dev/null @@ -1,90 +0,0 @@ -import logging -import unittest -import tempfile -import os -import shutil -import mock - -import utils.dockerutils -import dockercontainer - - -# Tests dockercontainer._get_logfiles_list -# for the case when no volumes are mounted - - -def get_container_log_files(path, options): - pass - - -@mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=lambda id: 'rootfs') -class DockerContainerTests(unittest.TestCase): - - def setUp(self): - - self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.') - self.volume = tempfile.mkdtemp(prefix='volume.') - for logf in ['test1.log', 'test2.log']: - with open(os.path.join(self.volume, logf), 'w') as logp: - logp.write(logf) - - def tearDown(self): - shutil.rmtree(self.volume) - shutil.rmtree(self.host_log_dir) - - def test_get_logfiles_list(self, *args): - - inspect = { - "Id": ("1e744b5e3e11e848863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea" - "24e847"), - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186}, - "Image": ("sha256:07c86167cdc4264926fa5d2894e34a339ad27f730e8cc81a" - "16cd21b7479e8eac"), - "Name": "/pensive_rosalind", - "LogPath": ("/var/lib/docker/containers/1e744b5e3e11e848863fefe9d9" - "a8b3731070c6b0c702a04d2b8ab948ea24e847/1e744b5e3e11e8" - "48863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" - "-json.log"), - "HostnamePath": ("/var/lib/docker/containers/1e744b5e3e11e848863fe" - "fe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" - "/hostname"), - "Mounts": [], - "Config": { - "Cmd": ["bash"], - "Image": "ubuntu:trusty"}, - "docker_image_long_name": "long_name/short_name", - "docker_image_short_name": "short_name", - "docker_image_tag": "image_tag", - "docker_image_registry": "image_registry", - "owner_namespace": "owner_namespace", - "NetworkSettings": {}} - self.docker_container = dockercontainer.\ - DockerContainer(inspect['Id'], inspect) - - self.docker_container.\ - _get_container_log_files = get_container_log_files - self.docker_container.log_file_list = [ - {'name': '/data/test1.log', 'type': None}] - - self.docker_container._set_logs_list() - log_list = self.docker_container.logs_list - for log in log_list: - if log.name == '/data/test1.log': - self.assertEqual( - log.dest, self.host_log_dir + - '/data/test1.log' - ) - -if __name__ == '__main__': - logging.basicConfig( - filename='test_dockerutils.log', - filemode='a', - format='%(asctime)s %(levelname)s : %(message)s', - level=logging.DEBUG) - - unittest.main() diff --git a/tests/unit/.gitignore b/tests/unit/.gitignore deleted file mode 100644 index 9e1ea78e..00000000 --- a/tests/unit/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.pyc -*alchemy* -*.log* diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unit/aufs_mount_init-id b/tests/unit/aufs_mount_init-id deleted file mode 100644 index 08dbf2d4..00000000 --- a/tests/unit/aufs_mount_init-id +++ /dev/null @@ -1 +0,0 @@ -vol1/id/rootfs-a-b-c diff --git a/tests/unit/btrfs_mount_init-id b/tests/unit/btrfs_mount_init-id deleted file mode 100644 index 08dbf2d4..00000000 --- a/tests/unit/btrfs_mount_init-id +++ /dev/null @@ -1 +0,0 @@ -vol1/id/rootfs-a-b-c diff --git a/tests/unit/capturing.py b/tests/unit/capturing.py deleted file mode 100644 index 29117fe1..00000000 --- a/tests/unit/capturing.py +++ /dev/null @@ -1,16 +0,0 @@ -from cStringIO import StringIO -import sys - -# Class used to capture the stdout of a function - - -class Capturing(list): - - def __enter__(self): - self._stdout = sys.stdout - sys.stdout = self._stringio = StringIO() - return self - - def __exit__(self, *args): - self.extend(self._stringio.getvalue().splitlines()) - sys.stdout = self._stdout diff --git a/tests/unit/liberty_connection_stats b/tests/unit/liberty_connection_stats deleted file mode 100644 index 75581e60..00000000 --- a/tests/unit/liberty_connection_stats +++ /dev/null @@ -1 +0,0 @@ -{"className":"com.ibm.ws.session.monitor.SessionStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.session.monitor.SessionStatsMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"CheckedOutCount","type":"long","description":"ActiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/CheckedOutCountValue"},{"name":"WaitQueueSize","type":"long","description":"LiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/WaitQueueSizeValue"},{"name":"MinSize","type":"long","description":"CreateCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/MinSizeValue"},{"name":"MaxSize","type":"long","description":"InvalidatedCountbyTimeout","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/MaxSizeValue"},{"name":"Size","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/SizeValue"},{"name":"Host","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/HostValue"},{"name":"Port","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/PortValue"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes","constructors":[{"name":"com.ibm.ws.session.monitor.SessionStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[]}],"notifications":[],"operations":[]} diff --git a/tests/unit/liberty_jvm_stats b/tests/unit/liberty_jvm_stats deleted file mode 100644 index 3cec3cce..00000000 --- a/tests/unit/liberty_jvm_stats +++ /dev/null @@ -1 +0,0 @@ -{"className":"com.ibm.ws.monitors.helper.JvmStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.monitor.meters.JvmMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"UsedMemory","type":"long","description":"UsedMemory","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/UsedMemory"},{"name":"FreeMemory","type":"long","description":"FreeMemory","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/FreeMemory"},{"name":"Heap","type":"long","description":"Heap","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/Heap"},{"name":"UpTime","type":"long","description":"UpTime","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/UpTime"},{"name":"ProcessCPU","type":"double","description":"ProcessCPU","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuRG91YmxlcQB+AARxAH4ABA=="}},{"value":"double","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/ProcessCPU"},{"name":"GcCount","type":"long","description":"GcCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/GcCount"},{"name":"GcTime","type":"long","description":"GcTime","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/GcTime"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes","constructors":[{"name":"com.ibm.ws.monitors.helper.JvmStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[{"name":"p1","type":"com.ibm.ws.monitors.helper.JvmMonitorHelper","description":"","descriptor":{"names":[],"values":[]}}]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/liberty_mbeans b/tests/unit/liberty_mbeans deleted file mode 100644 index d80a306b..00000000 --- a/tests/unit/liberty_mbeans +++ /dev/null @@ -1,2 +0,0 @@ -[{"objectName":"WebSphere:type=ServletStats","className":"com.mongodb.management.ConnectionPoolStatistics","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DConnectionPool"},{"objectName":"WebSphere:type=ServletStats,name=com.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet","className":"com.ibm.ws.webcontainer.monitor.ServletStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats"},{"objectName":"java.lang:type=MemoryPool,name=Java heap","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJava+heap%2Ctype%3DMemoryPool"},{"objectName":"java.lang:type=GarbageCollector,name=Copy","className":"com.ibm.lang.management.GarbageCollectorMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DCopy%2Ctype%3DGarbageCollector"},{"objectName":"WebSphere:name=com.ibm.websphere.config.mbeans.FeatureListMBean","className":"com.ibm.ws.config.featuregen.internal.FeatureListMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.websphere.config.mbeans.FeatureListMBean"},{"objectName":"java.lang:type=MemoryPool,name=class storage","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3Dclass+storage%2Ctype%3DMemoryPool"},{"objectName":"osgi.core:type=bundleState,version=1.7,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.BundleState","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3DbundleState%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.7"},{"objectName":"WebSphere:name=com.ibm.websphere.runtime.update.RuntimeUpdateNotificationMBean","className":"com.ibm.ws.runtime.update.internal.RuntimeUpdateNotificationMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.websphere.runtime.update.RuntimeUpdateNotificationMBean"},{"objectName":"java.lang:type=GarbageCollector,name=MarkSweepCompact","className":"com.ibm.lang.management.GarbageCollectorMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DMarkSweepCompact%2Ctype%3DGarbageCollector"},{"objectName":"java.lang:type=Memory","className":"com.ibm.lang.management.MemoryMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DMemory"},{"objectName":"java.lang:type=Compilation","className":"com.ibm.lang.management.CompilationMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DCompilation"},{"objectName":"java.util.logging:type=Logging","className":"com.ibm.lang.management.LoggingMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.util.logging%3Atype%3DLogging"},{"objectName":"java.nio:type=BufferPool,name=mapped","className":"com.ibm.lang.management.BufferPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.nio%3Aname%3Dmapped%2Ctype%3DBufferPool"},{"objectName":"WebSphere:name=com.ibm.ws.jmx.mbeans.sessionManagerMBean","className":"com.ibm.ws.session.SessionManagerMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.mbeans.sessionManagerMBean"},{"objectName":"WebSphere:name=com.ibm.ws.config.serverSchemaGenerator","className":"com.ibm.ws.config.schemagen.internal.ServerSchemaGeneratorImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.config.serverSchemaGenerator"},{"objectName":"WebSphere:feature=kernel,name=ServerInfo","className":"com.ibm.ws.kernel.server.internal.ServerInfoMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3Dkernel%2Cname%3DServerInfo"},{"objectName":"WebSphere:type=ThreadPoolStats,name=Default Executor","className":"com.ibm.ws.monitors.helper.ThreadPoolStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats"},{"objectName":"WebSphere:name=com.ibm.ws.jmx.mbeans.generatePluginConfig","className":"com.ibm.ws.webcontainer.osgi.mbeans.GeneratePluginConfigMBean","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.mbeans.generatePluginConfig"},{"objectName":"JMImplementation:type=MBeanServerDelegate","className":"com.ibm.ws.kernel.boot.jmx.internal.PlatformMBeanServerDelegate","URL":"/IBMJMXConnectorREST/mbeans/JMImplementation%3Atype%3DMBeanServerDelegate"},{"objectName":"osgi.core:type=packageState,version=1.5,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.PackageState","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3DpackageState%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.5"},{"objectName":"WebSphere:feature=CacheAdmin,type=DynaCache,name=DistributedMap","className":"com.ibm.ws.cache.MBeans","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3DCacheAdmin%2Cname%3DDistributedMap%2Ctype%3DDynaCache"},{"objectName":"osgi.compendium:service=cm,version=1.3,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"com.ibm.ws.jmx.internal.ReadOnlyConfigurationAdmin","URL":"/IBMJMXConnectorREST/mbeans/osgi.compendium%3Aframework%3Dorg.eclipse.osgi%2Cservice%3Dcm%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.3"},{"objectName":"WebSphere:type=SessionStats,name=default_host/IBMJMXConnectorREST","className":"com.ibm.ws.session.monitor.SessionStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats"},{"objectName":"WebSphere:feature=channelfw,type=endpoint,name=defaultHttpEndpoint-ssl","className":"com.ibm.websphere.channelfw.EndPointInfo","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3Dchannelfw%2Cname%3DdefaultHttpEndpoint-ssl%2Ctype%3Dendpoint"},{"objectName":"java.lang:type=ClassLoading","className":"com.ibm.lang.management.ClassLoadingMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DClassLoading"},{"objectName":"WebSphere:name=com.ibm.websphere.config.mbeans.ServerXMLConfigurationMBean","className":"com.ibm.ws.config.xml.internal.ServerXMLConfigurationMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.websphere.config.mbeans.ServerXMLConfigurationMBean"},{"objectName":"com.ibm.lang.management:type=JvmCpuMonitor","className":"com.ibm.lang.management.JvmCpuMonitor","URL":"/IBMJMXConnectorREST/mbeans/com.ibm.lang.management%3Atype%3DJvmCpuMonitor"},{"objectName":"osgi.core:type=serviceState,version=1.7,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.ServiceState","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3DserviceState%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.7"},{"objectName":"java.lang:type=OperatingSystem","className":"com.ibm.lang.management.UnixExtendedOperatingSystem","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DOperatingSystem"},{"objectName":"com.ibm.virtualization.management:type=Hypervisor","className":"com.ibm.virtualization.management.HypervisorMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/com.ibm.virtualization.management%3Atype%3DHypervisor"},{"objectName":"java.lang:type=MemoryPool,name=JIT data cache","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJIT+data+cache%2Ctype%3DMemoryPool"},{"objectName":"osgi.core:service=permissionadmin,version=1.2,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.permissionadmin.PermissionAdmin","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Cservice%3Dpermissionadmin%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.2"},{"objectName":"java.lang:type=MemoryPool,name=JIT code cache","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJIT+code+cache%2Ctype%3DMemoryPool"},{"objectName":"java.lang:type=Runtime","className":"com.ibm.lang.management.RuntimeMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DRuntime"},{"objectName":"java.lang:type=Threading","className":"com.ibm.lang.management.ThreadMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DThreading"},{"objectName":"WebSphere:type=JvmStats","className":"com.ibm.ws.monitors.helper.JvmStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats"},{"objectName":"java.lang:type=MemoryManager,name=J9 non-heap manager","className":"com.ibm.lang.management.MemoryManagerMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJ9+non-heap+manager%2Ctype%3DMemoryManager"},{"objectName":"java.nio:type=BufferPool,name=direct","className":"com.ibm.lang.management.BufferPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.nio%3Aname%3Ddirect%2Ctype%3DBufferPool"},{"objectName":"osgi.core:type=framework,version=1.7,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.Framework","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3Dframework%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.7"},{"objectName":"WebSphere:service=com.ibm.ws.kernel.filemonitor.FileNotificationMBean","className":"com.ibm.ws.kernel.filemonitor.internal.FileNotificationImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aservice%3Dcom.ibm.ws.kernel.filemonitor.FileNotificationMBean"},{"objectName":"WebSphere:feature=channelfw,type=endpoint,name=defaultHttpEndpoint","className":"com.ibm.websphere.channelfw.EndPointInfo","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3Dchannelfw%2Cname%3DdefaultHttpEndpoint%2Ctype%3Dendpoint"},{"objectName":"com.ibm.virtualization.management:type=GuestOS","className":"com.ibm.virtualization.management.GuestOS","URL":"/IBMJMXConnectorREST/mbeans/com.ibm.virtualization.management%3Atype%3DGuestOS"},{"objectName":"WebSphere:feature=restConnector,type=FileService,name=FileService","className":"com.ibm.ws.filetransfer.internal.mbean.FileService","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3DrestConnector%2Cname%3DFileService%2Ctype%3DFileService"},{"objectName":"java.lang:type=MemoryPool,name=miscellaneous non-heap storage","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3Dmiscellaneous+non-heap+storage%2Ctype%3DMemoryPool"},{"objectName":"WebSphere:feature=restConnector,type=FileTransfer,name=FileTransfer","className":"com.ibm.ws.filetransfer.internal.mbean.FileTransfer","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3DrestConnector%2Cname%3DFileTransfer%2Ctype%3DFileTransfer"}] - diff --git a/tests/unit/liberty_response_time_details b/tests/unit/liberty_response_time_details deleted file mode 100644 index fb46a5cf..00000000 --- a/tests/unit/liberty_response_time_details +++ /dev/null @@ -1 +0,0 @@ -{"value":{"count":"292","description":"Average Response Time for servlet","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","reading":{"count":"292","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","standardDeviation":"7747033.106769906","timestamp":"1479283670331","total":"4.80750166E8","unit":"ns","variance":"6.001652195738899E13"},"standardDeviation":"7746816.577149615","total":"4.80750166E8","unit":"ns","variance":"5.980967601894751E13"},"type":{"className":"javax.management.openmbean.CompositeDataSupport","openType":"0"},"openTypes":[{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsMeter","description":"com.ibm.websphere.monitor.meters.StatisticsMeter","items":[{"key":"count","description":"count","type":"1"},{"key":"description","description":"description","type":"2"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"reading","description":"reading","type":"4"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]},"java.lang.Long","java.lang.String","java.lang.Double",{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsReading","description":"com.ibm.websphere.monitor.meters.StatisticsReading","items":[{"key":"count","description":"count","type":"1"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"timestamp","description":"timestamp","type":"1"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]}]} \ No newline at end of file diff --git a/tests/unit/liberty_response_time_details_mocked b/tests/unit/liberty_response_time_details_mocked deleted file mode 100644 index a78e0ba2..00000000 --- a/tests/unit/liberty_response_time_details_mocked +++ /dev/null @@ -1 +0,0 @@ -{"className":"com.ibm.ws.webcontainer.monitor.ServletStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats","value":{"count":"292","description":"Average Response Time for servlet","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","reading":{"count":"292","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","standardDeviation":"7747033.106769906","timestamp":"1479283670331","total":"4.80750166E8","unit":"ns","variance":"6.001652195738899E13"},"standardDeviation":"7746816.577149615","total":"4.80750166E8","unit":"ns","variance":"5.980967601894751E13"},"type":{"className":"javax.management.openmbean.CompositeDataSupport","openType":"0"},"openTypes":[{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsMeter","description":"com.ibm.websphere.monitor.meters.StatisticsMeter","items":[{"key":"count","description":"count","type":"1"},{"key":"description","description":"description","type":"2"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"reading","description":"reading","type":"4"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]},"java.lang.Long","java.lang.String","java.lang.Double",{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsReading","description":"com.ibm.websphere.monitor.meters.StatisticsReading","items":[{"key":"count","description":"count","type":"1"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"timestamp","description":"timestamp","type":"1"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]}]} diff --git a/tests/unit/liberty_servlet_stats b/tests/unit/liberty_servlet_stats deleted file mode 100644 index 00e843ec..00000000 --- a/tests/unit/liberty_servlet_stats +++ /dev/null @@ -1 +0,0 @@ -{"className":"com.ibm.ws.webcontainer.monitor.ServletStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.ws.webcontainer.monitor.ServletStatsMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"RequestCountDetails","type":"javax.management.openmbean.CompositeData","description":"RequestCountDetails","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.CompositeType","value":"rO0ABXNyAChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVUeXBltYdG61oHn0ICAAJMABFuYW1lVG9EZXNjcmlwdGlvbnQAE0xqYXZhL3V0aWwvVHJlZU1hcDtMAApuYW1lVG9UeXBlcQB+AAF4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AANMAAh0eXBlTmFtZXEAfgADeHB0AChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVEYXRhdAAoY29tLmlibS53ZWJzcGhlcmUubW9uaXRvci5tZXRlcnMuQ291bnRlcnEAfgAGc3IAEWphdmEudXRpbC5UcmVlTWFwDMH2Pi0lauYDAAFMAApjb21wYXJhdG9ydAAWTGphdmEvdXRpbC9Db21wYXJhdG9yO3hwcHcEAAAABHQADGN1cnJlbnRWYWx1ZXEAfgAKdAALZGVzY3JpcHRpb25xAH4AC3QAB3JlYWRpbmdxAH4ADHQABHVuaXRxAH4ADXhzcQB+AAdwdwQAAAAEcQB+AApzcgAlamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uU2ltcGxlVHlwZR6/T/jcZXgnAgAAeHEAfgACdAAOamF2YS5sYW5nLkxvbmdxAH4AEXEAfgARcQB+AAtzcQB+AA90ABBqYXZhLmxhbmcuU3RyaW5ncQB+ABNxAH4AE3EAfgAMc3EAfgAAcQB+AAV0AC9jb20uaWJtLndlYnNwaGVyZS5tb25pdG9yLm1ldGVycy5Db3VudGVyUmVhZGluZ3EAfgAVc3EAfgAHcHcEAAAAA3QABWNvdW50cQB+ABd0AAl0aW1lc3RhbXBxAH4AGHQABHVuaXRxAH4AGXhzcQB+AAdwdwQAAAADcQB+ABdxAH4AEHEAfgAYcQB+ABBxAH4AGXEAfgASeHEAfgANcQB+ABJ4"}},{"value":"com.ibm.websphere.monitor.meters.Counter","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/RequestCountDetails"},{"name":"ResponseTimeDetails","type":"javax.management.openmbean.CompositeData","description":"ResponseTimeDetails","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.CompositeType","value":"rO0ABXNyAChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVUeXBltYdG61oHn0ICAAJMABFuYW1lVG9EZXNjcmlwdGlvbnQAE0xqYXZhL3V0aWwvVHJlZU1hcDtMAApuYW1lVG9UeXBlcQB+AAF4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AANMAAh0eXBlTmFtZXEAfgADeHB0AChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVEYXRhdAAwY29tLmlibS53ZWJzcGhlcmUubW9uaXRvci5tZXRlcnMuU3RhdGlzdGljc01ldGVycQB+AAZzcgARamF2YS51dGlsLlRyZWVNYXAMwfY+LSVq5gMAAUwACmNvbXBhcmF0b3J0ABZMamF2YS91dGlsL0NvbXBhcmF0b3I7eHBwdwQAAAAKdAAFY291bnRxAH4ACnQAC2Rlc2NyaXB0aW9ucQB+AAt0AAxtYXhpbXVtVmFsdWVxAH4ADHQABG1lYW5xAH4ADXQADG1pbmltdW1WYWx1ZXEAfgAOdAAHcmVhZGluZ3EAfgAPdAARc3RhbmRhcmREZXZpYXRpb25xAH4AEHQABXRvdGFscQB+ABF0AAR1bml0cQB+ABJ0AAh2YXJpYW5jZXEAfgATeHNxAH4AB3B3BAAAAApxAH4ACnNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cQB+AAJ0AA5qYXZhLmxhbmcuTG9uZ3EAfgAXcQB+ABdxAH4AC3NxAH4AFXQAEGphdmEubGFuZy5TdHJpbmdxAH4AGXEAfgAZcQB+AAxxAH4AFnEAfgANc3EAfgAVdAAQamF2YS5sYW5nLkRvdWJsZXEAfgAbcQB+ABtxAH4ADnEAfgAWcQB+AA9zcQB+AABxAH4ABXQAMmNvbS5pYm0ud2Vic3BoZXJlLm1vbml0b3IubWV0ZXJzLlN0YXRpc3RpY3NSZWFkaW5ncQB+AB1zcQB+AAdwdwQAAAAJdAAFY291bnRxAH4AH3QADG1heGltdW1WYWx1ZXEAfgAgdAAEbWVhbnEAfgAhdAAMbWluaW11bVZhbHVlcQB+ACJ0ABFzdGFuZGFyZERldmlhdGlvbnEAfgAjdAAJdGltZXN0YW1wcQB+ACR0AAV0b3RhbHEAfgAldAAEdW5pdHEAfgAmdAAIdmFyaWFuY2VxAH4AJ3hzcQB+AAdwdwQAAAAJcQB+AB9xAH4AFnEAfgAgcQB+ABZxAH4AIXEAfgAacQB+ACJxAH4AFnEAfgAjcQB+ABpxAH4AJHEAfgAWcQB+ACVxAH4AGnEAfgAmcQB+ABhxAH4AJ3EAfgAaeHEAfgAQcQB+ABpxAH4AEXEAfgAacQB+ABJxAH4AGHEAfgATcQB+ABp4"}},{"value":"com.ibm.websphere.monitor.meters.StatisticsMeter","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/ResponseTimeDetails"},{"name":"Description","type":"java.lang.String","description":"Description","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/Description"},{"name":"ServletName","type":"java.lang.String","description":"ServletName","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/ServletName"},{"name":"RequestCount","type":"long","description":"RequestCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/RequestCount"},{"name":"ResponseTime","type":"double","description":"ResponseTime","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuRG91YmxlcQB+AARxAH4ABA=="}},{"value":"double","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/ResponseTime"},{"name":"AppName","type":"java.lang.String","description":"AppName","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/AppName"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes","constructors":[{"name":"com.ibm.ws.webcontainer.monitor.ServletStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[{"name":"p1","type":"java.lang.String","description":"","descriptor":{"names":[],"values":[]}},{"name":"p2","type":"java.lang.String","description":"","descriptor":{"names":[],"values":[]}}]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/liberty_session_stats b/tests/unit/liberty_session_stats deleted file mode 100644 index 25fd34b3..00000000 --- a/tests/unit/liberty_session_stats +++ /dev/null @@ -1 +0,0 @@ -{"className":"com.ibm.ws.session.monitor.SessionStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.session.monitor.SessionStatsMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"ActiveCount","type":"long","description":"ActiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/ActiveCount"},{"name":"LiveCount","type":"long","description":"LiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/LiveCount"},{"name":"CreateCount","type":"long","description":"CreateCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/CreateCount"},{"name":"InvalidatedCountbyTimeout","type":"long","description":"InvalidatedCountbyTimeout","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/InvalidatedCountbyTimeout"},{"name":"InvalidatedCount","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/InvalidatedCount"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes","constructors":[{"name":"com.ibm.ws.session.monitor.SessionStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/liberty_thread_pool_stats b/tests/unit/liberty_thread_pool_stats deleted file mode 100644 index 661bba5b..00000000 --- a/tests/unit/liberty_thread_pool_stats +++ /dev/null @@ -1 +0,0 @@ -{"className":"com.ibm.ws.monitors.helper.ThreadPoolStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.monitor.meters.ThreadPoolMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"PoolName","type":"java.lang.String","description":"PoolName","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes/PoolName"},{"name":"ActiveThreads","type":"int","description":"ActiveThreads","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABFqYXZhLmxhbmcuSW50ZWdlcnEAfgAEcQB+AAQ="}},{"value":"int","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes/ActiveThreads"},{"name":"PoolSize","type":"int","description":"PoolSize","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABFqYXZhLmxhbmcuSW50ZWdlcnEAfgAEcQB+AAQ="}},{"value":"int","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes/PoolSize"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes","constructors":[{"name":"com.ibm.ws.monitors.helper.ThreadPoolStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[{"name":"p1","type":"java.lang.String","description":"","descriptor":{"names":[],"values":[]}},{"name":"p2","type":"java.lang.Object","description":"","descriptor":{"names":[],"values":[]}}]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/mock_environ_file b/tests/unit/mock_environ_file deleted file mode 100644 index 79bfbe3f..00000000 --- a/tests/unit/mock_environ_file +++ /dev/null @@ -1 +0,0 @@ -HOME=/TERM=linuxPATH=/sbin:/bin diff --git a/tests/unit/mock_pynvml.py b/tests/unit/mock_pynvml.py deleted file mode 100644 index 03f1e38d..00000000 --- a/tests/unit/mock_pynvml.py +++ /dev/null @@ -1,44 +0,0 @@ -#class pynvml() -import collections - -Memory = collections.namedtuple('Memory', 'total used free') -Utilization = collections.namedtuple('Utilization', 'gpu memory') - -NVML_TEMPERATURE_GPU = 0 - -class DummyProcess(): - pid = 1234 - usedGpuMemory = 273285120 - -def nvmlInit(): - pass - -def nvmlShutdown(): - pass - -def nvmlDeviceGetCount(): - return 1 - -def nvmlDeviceGetHandleByIndex(arg): - return 0 - -def nvmlDeviceGetTemperature(arg1, arg2): - return 31 - -def nvmlDeviceGetMemoryInfo(arg): - retVal = 12205 * 1024 * 1024 - return Memory(total=retVal, used=0, free=retVal) - -def nvmlDeviceGetPowerUsage(arg): - return 27000 - -def nvmlDeviceGetEnforcedPowerLimit(arg): - return 149000 - -def nvmlDeviceGetUtilizationRates(arg): - return Utilization(gpu=0, memory=0) - -def nvmlDeviceGetComputeRunningProcesses(arg): - p = DummyProcess() - return [p] - #return [{'pid': 1234, 'usedGpuMemory': 273285120}] diff --git a/tests/unit/proc_mounts_aufs b/tests/unit/proc_mounts_aufs deleted file mode 100644 index 49f40144..00000000 --- a/tests/unit/proc_mounts_aufs +++ /dev/null @@ -1,33 +0,0 @@ -rootfs / rootfs rw 0 0 -sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 -proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 -udev /dev devtmpfs rw,relatime,size=4008360k,nr_inodes=1002090,mode=755 0 0 -devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 -tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=804824k,mode=755 0 0 -/dev/dm-1 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0 -none /sys/fs/fuse/connections fusectl rw,relatime 0 0 -none /sys/kernel/debug debugfs rw,relatime 0 0 -none /sys/kernel/security securityfs rw,relatime 0 0 -cgroup /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 -cgroup /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 -cgroup /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 -none /sys/firmware/efi/efivars efivarfs rw,relatime 0 0 -none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 -none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 -cgroup /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 -none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0 -none /sys/fs/pstore pstore rw,relatime 0 0 -cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 -cgroup /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 -cgroup /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 -cgroup /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 -cgroup /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 -/dev/sda2 /boot ext2 rw,relatime 0 0 -/dev/sda1 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 -systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0 -/dev/dm-1 /var/lib/docker/aufs ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -gaufsd-fuse /run/user/1000/gaufs fuse.gaufsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1001 0 0 -none /var/lib/docker/aufs/mnt/e29e2a3403c1825008a1c53c61d4bd41774df3833cc5d8a12926fc6de39a466b aufs rw,relatime,si=90ef4e398f8a3d10,dio 0 0 -shm /var/lib/docker/containers/c751d4e5a334df29466b0fff65ea721317372d2c5b56012e371923ddaaa4f95a/shm tmpfs rw,nosuid,nodev,noexec,relatime,size=65536k 0 0 -proc /run/docker/netns/41815b5eedd6 proc rw,nosuid,nodev,noexec,relatime 0 0 diff --git a/tests/unit/proc_mounts_btrfs b/tests/unit/proc_mounts_btrfs deleted file mode 100644 index e4f52134..00000000 --- a/tests/unit/proc_mounts_btrfs +++ /dev/null @@ -1,33 +0,0 @@ -rootfs / rootfs rw 0 0 -sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 -proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 -udev /dev devtmpfs rw,relatime,size=4008360k,nr_inodes=1002090,mode=755 0 0 -devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 -tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=804824k,mode=755 0 0 -/dev/dm-1 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0 -none /sys/fs/fuse/connections fusectl rw,relatime 0 0 -none /sys/kernel/debug debugfs rw,relatime 0 0 -none /sys/kernel/security securityfs rw,relatime 0 0 -cgroup /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 -cgroup /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 -cgroup /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 -none /sys/firmware/efi/efivars efivarfs rw,relatime 0 0 -none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 -none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 -cgroup /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 -none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0 -none /sys/fs/pstore pstore rw,relatime 0 0 -cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 -cgroup /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 -cgroup /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 -cgroup /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 -cgroup /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 -/dev/sda2 /boot ext2 rw,relatime 0 0 -/dev/sda1 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 -systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0 -/dev/dm-1 /var/lib/docker/btrfs ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -gbtrfsd-fuse /run/user/1000/gbtrfs fuse.gbtrfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1001 0 0 -none /var/lib/docker/btrfs/mnt/e29e2a3403c1825008a1c53c61d4bd41774df3833cc5d8a12926fc6de39a466b btrfs rw,relatime,si=90ef4e398f8a3d10,dio 0 0 -shm /var/lib/docker/containers/c751d4e5a334df29466b0fff65ea721317372d2c5b56012e371923ddaaa4f95a/shm tmpfs rw,nosuid,nodev,noexec,relatime,size=65536k 0 0 -proc /run/docker/netns/41815b5eedd6 proc rw,nosuid,nodev,noexec,relatime 0 0 diff --git a/tests/unit/proc_mounts_devicemapper b/tests/unit/proc_mounts_devicemapper deleted file mode 100644 index 5b8cccdc..00000000 --- a/tests/unit/proc_mounts_devicemapper +++ /dev/null @@ -1,28 +0,0 @@ -rootfs / rootfs rw 0 0 -proc /proc proc rw,relatime 0 0 -sysfs /sys sysfs rw,seclabel,relatime 0 0 -devtmpfs /dev devtmpfs rw,seclabel,relatime,size=3916200k,nr_inodes=979050,mode=755 0 0 -devpts /dev/pts devpts rw,seclabel,relatime,gid=5,mode=620,ptmxmode=000 0 0 -tmpfs /dev/shm tmpfs rw,seclabel,relatime 0 0 -/dev/mapper/vg_oc3262877066-lv_root / ext4 rw,seclabel,relatime,data=ordered 0 0 -none /selinux selinuxfs rw,relatime 0 0 -devtmpfs /dev devtmpfs rw,seclabel,relatime,size=3916200k,nr_inodes=979050,mode=755 0 0 -/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0 -none /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0 -cgroup /cgroup/cpuset cgroup rw,relatime,cpuset 0 0 -cgroup /cgroup/cpu cgroup rw,relatime,cpu 0 0 -cgroup /cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 -cgroup /cgroup/memory cgroup rw,relatime,memory 0 0 -cgroup /cgroup/devices cgroup rw,relatime,devices 0 0 -cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0 -cgroup /cgroup/net_cls cgroup rw,relatime,net_cls 0 0 -cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0 -/etc/auto.misc /misc autofs rw,relatime,fd=7,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 --hosts /net autofs rw,relatime,fd=13,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 -/etc/auto.gsa /gsa autofs rw,relatime,fd=19,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 -/etc/auto.gsaro /gsaro autofs rw,relatime,fd=25,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 -/dev/mapper/vg_oc3262877066-lv_root /var/lib/docker/devicemapper ext4 rw,seclabel,relatime,data=ordered 0 0 -gvfs-fuse-daemon /root/.gvfs fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=0,group_id=0 0 0 -sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 -/dev/mapper/docker-253:2-29492713-65fe676c24fe1faea1f06e222cc3811cc9b651c381702ca4f787ffe562a5e39b /var/lib/docker/devicemapper/mnt/65fe676c24fe1faea1f06e222cc3811cc9b651c381702ca4f787ffe562a5e39b ext4 rw,seclabel,relatime,stripe=16,data=ordered 0 0 -proc /var/run/docker/netns/65fe676c24fe proc rw,relatime 0 0 diff --git a/tests/unit/proc_mounts_vfs b/tests/unit/proc_mounts_vfs deleted file mode 100644 index e93f154f..00000000 --- a/tests/unit/proc_mounts_vfs +++ /dev/null @@ -1,33 +0,0 @@ -rootfs / rootfs rw 0 0 -sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 -proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 -udev /dev devtmpfs rw,relatime,size=4008360k,nr_inodes=1002090,mode=755 0 0 -devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 -tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=804824k,mode=755 0 0 -/dev/dm-1 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0 -none /sys/fs/fuse/connections fusectl rw,relatime 0 0 -none /sys/kernel/debug debugfs rw,relatime 0 0 -none /sys/kernel/security securityfs rw,relatime 0 0 -cgroup /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 -cgroup /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 -cgroup /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 -none /sys/firmware/efi/efivars efivarfs rw,relatime 0 0 -none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 -none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 -cgroup /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 -none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0 -none /sys/fs/pstore pstore rw,relatime 0 0 -cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 -cgroup /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 -cgroup /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 -cgroup /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 -cgroup /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 -/dev/sda2 /boot ext2 rw,relatime 0 0 -/dev/sda1 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 -systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0 -/dev/dm-1 /var/lib/docker/vfs ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 -gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1001 0 0 -none /var/lib/docker/vfs/mnt/e29e2a3403c1825008a1c53c61d4bd41774df3833cc5d8a12926fc6de39a466b vfs rw,relatime,si=90ef4e398f8a3d10,dio 0 0 -shm /var/lib/docker/containers/c751d4e5a334df29466b0fff65ea721317372d2c5b56012e371923ddaaa4f95a/shm tmpfs rw,nosuid,nodev,noexec,relatime,size=65536k 0 0 -proc /run/docker/netns/41815b5eedd6 proc rw,nosuid,nodev,noexec,relatime 0 0 diff --git a/tests/unit/proc_pid_mounts_devicemapper b/tests/unit/proc_pid_mounts_devicemapper deleted file mode 100644 index 97ab51a2..00000000 --- a/tests/unit/proc_pid_mounts_devicemapper +++ /dev/null @@ -1,20 +0,0 @@ -rootfs / rootfs rw 0 0 -/dev/mapper/docker-253:2-29492713-65fe676c24fe1faea1f06e222cc3811cc9b651c381702ca4f787ffe562a5e39b / ext4 rw,seclabel,relatime,stripe=16,data=ordered 0 0 -proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 -tmpfs /dev tmpfs rw,seclabel,nosuid,mode=755 0 0 -devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666 0 0 -shm /dev/shm tmpfs rw,seclabel,nosuid,nodev,noexec,relatime,size=65536k 0 0 -mqueue /dev/mqueue mqueue rw,seclabel,nosuid,nodev,noexec,relatime 0 0 -sysfs /sys sysfs ro,seclabel,nosuid,nodev,noexec,relatime 0 0 -/dev/mapper/vg_oc3262877066-lv_root /etc/resolv.conf ext4 rw,seclabel,relatime,data=ordered 0 0 -/dev/mapper/vg_oc3262877066-lv_root /etc/hostname ext4 rw,seclabel,relatime,data=ordered 0 0 -/dev/mapper/vg_oc3262877066-lv_root /etc/hosts ext4 rw,seclabel,relatime,data=ordered 0 0 -devpts /dev/console devpts rw,seclabel,relatime,gid=5,mode=620,ptmxmode=000 0 0 -proc /proc/asound proc ro,nosuid,nodev,noexec,relatime 0 0 -proc /proc/bus proc ro,nosuid,nodev,noexec,relatime 0 0 -proc /proc/fs proc ro,nosuid,nodev,noexec,relatime 0 0 -proc /proc/irq proc ro,nosuid,nodev,noexec,relatime 0 0 -proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0 -proc /proc/sysrq-trigger proc ro,nosuid,nodev,noexec,relatime 0 0 -tmpfs /proc/kcore tmpfs rw,seclabel,nosuid,mode=755 0 0 -tmpfs /proc/timer_stats tmpfs rw,seclabel,nosuid,mode=755 0 0 diff --git a/tests/unit/test_app_apache.py b/tests/unit/test_app_apache.py deleted file mode 100644 index 7f1d55b2..00000000 --- a/tests/unit/test_app_apache.py +++ /dev/null @@ -1,288 +0,0 @@ -from unittest import TestCase -import mock -from plugins.applications.apache import apache_crawler -from plugins.applications.apache.feature import ApacheFeature -from plugins.applications.apache.apache_container_crawler \ - import ApacheContainerCrawler -from plugins.applications.apache.apache_host_crawler \ - import ApacheHostCrawler -from utils.crawler_exceptions import CrawlError -from requests.exceptions import ConnectionError - - -# expected format from apache status page -def mocked_wrong_status_page(host, port): - return ('No Acceptable status page format') - - -def mocked_urllib2_open(request): - return MockedURLResponse() - - -def mocked_urllib2_open_with_zero(request): - return MockedURLResponseWithZero() - - -def mocked_no_status_page(host, port): - raise Exception - - -def mocked_retrieve_status_page(host, port): - return ('Total Accesses: 172\n' - 'Total kBytes: 1182\n' - 'CPULoad: 2.34827\n' - 'Uptime: 1183\n' - 'ReqPerSec: .145393\n' - 'BytesPerSec: 1023.13\n' - 'BytesPerReq: 7037.02\n' - 'BusyWorkers: 2\n' - 'IdleWorkers: 9\n' - 'Scoreboard: __R_W______......G..C...' - 'DSKLI...............................' - '...................................................' - ) - - -class MockedURLResponse(object): - - def read(self): - return ('Total Accesses: 172\n' - 'Total kBytes: 1182\n' - 'CPULoad: 2.34827\n' - 'Uptime: 1183\n' - 'ReqPerSec: .145393\n' - 'BytesPerSec: 1023.13\n' - 'BytesPerReq: 7037.02\n' - 'BusyWorkers: 2\n' - 'IdleWorkers: 9\n' - 'Scoreboard: __R_W______......G..' - 'C...DSKLI........................' - '..........................................................' - ) - - -class MockedURLResponseWithZero(object): - - def read(self): - return ('Total Accesses: 172\n' - 'Total kBytes: 1182\n' - 'CPULoad: 2.34827\n' - 'ReqPerSec: .145393\n' - 'BytesPerSec: 1023.13\n' - 'BytesPerReq: 7037.02\n' - 'BusyWorkers: 2\n' - 'IdleWorkers: 9\n' - 'Scoreboard: __R_W______......G..C...' - 'DSKLI................................' - '..................................................' - ) - - -class MockedApacheContainer1(object): - - def __init__( - self, - container_id, - ): - ports = "[ {\"containerPort\" : \"80\"} ]" - self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": - {"annotation.io.kubernetes.container.ports": ports}}} - - -class MockedApacheContainer2(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["80"] - return ports - - -class MockedApacheContainer3(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["1234"] - return ports - - -class MockedNoPortContainer(object): - - def __init__( - self, - container_id, - ): - self.image_name = 'httpd-container' - - def get_container_ip(self): - return '1.2.3.4' - - def get_container_ports(self): - ports = [] - return ports - - -class MockedNoNameContainer(object): - - def __init__(self, container_id): - self.image_name = 'dummy' - - -class ApacheCrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('urllib2.urlopen', mocked_urllib2_open_with_zero) - def test_ok_with_zero(self): - status = apache_crawler.retrieve_metrics() - assert status == ApacheFeature( - BusyWorkers='2', - IdleWorkers='9', - waiting_for_connection='9', - starting_up='1', - reading_request='1', - sending_reply='1', - keepalive_read='1', - dns_lookup='1', - closing_connection='1', - logging='1', - graceful_finishing='1', - idle_worker_cleanup='1', - BytesPerSec='1023.13', - BytesPerReq='7037.02', - ReqPerSec='.145393', - Uptime='0', - Total_kBytes='1182', - Total_Accesses='172') - - @mock.patch('urllib2.urlopen', mocked_urllib2_open) - def test_ok(self): - status = apache_crawler.retrieve_metrics() - assert status == ApacheFeature( - BusyWorkers='2', - IdleWorkers='9', - waiting_for_connection='9', - starting_up='1', - reading_request='1', - sending_reply='1', - keepalive_read='1', - dns_lookup='1', - closing_connection='1', - logging='1', - graceful_finishing='1', - idle_worker_cleanup='1', - BytesPerSec='1023.13', - BytesPerReq='7037.02', - ReqPerSec='.145393', - Uptime='1183', - Total_kBytes='1182', - Total_Accesses='172') - - @mock.patch('plugins.applications.apache.' - 'apache_crawler.retrieve_status_page', - mocked_no_status_page) - def test_hundle_ioerror(self): - with self.assertRaises(CrawlError): - apache_crawler.retrieve_metrics() - - @mock.patch('plugins.applications.apache.' - 'apache_crawler.retrieve_status_page', - mocked_wrong_status_page) - def test_hundle_parseerror(self): - with self.assertRaises(CrawlError): - apache_crawler.retrieve_metrics() - - -class ApacheHostTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = ApacheHostCrawler() - self.assertEqual(c.get_feature(), 'apache') - - @mock.patch('plugins.applications.apache.' - 'apache_crawler.retrieve_status_page', - mocked_retrieve_status_page) - def test_get_metrics(self): - c = ApacheHostCrawler() - emitted = c.crawl()[0] - self.assertEqual(emitted[0], 'apache') - self.assertIsInstance(emitted[1], ApacheFeature) - self.assertEqual(emitted[2], 'application') - - -class ApacheContainerTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = ApacheContainerCrawler() - self.assertEqual(c.get_feature(), 'apache') - - @mock.patch('plugins.applications.apache.' - 'apache_crawler.retrieve_status_page', - mocked_retrieve_status_page) - @mock.patch(("plugins.applications.apache.apache_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - @mock.patch('dockercontainer.DockerContainer', - MockedApacheContainer1) - def test_apache_container_crawler_forkube(self, *args): - c = ApacheContainerCrawler() - emitted = c.crawl()[0] - self.assertEqual(emitted[0], 'apache') - self.assertIsInstance(emitted[1], ApacheFeature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('plugins.applications.apache.' - 'apache_crawler.retrieve_status_page', - mocked_retrieve_status_page) - @mock.patch(("plugins.applications.apache.apache_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - @mock.patch('dockercontainer.DockerContainer', - MockedApacheContainer2) - def test_apache_container_crawler_docker(self, *args): - c = ApacheContainerCrawler() - emitted = c.crawl()[0] - self.assertEqual(emitted[0], 'apache') - self.assertIsInstance(emitted[1], ApacheFeature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('dockercontainer.DockerContainer', - MockedApacheContainer3) - def test_no_available_ports(self): - c = ApacheContainerCrawler() - c.crawl() - pass - - @mock.patch('plugins.applications.apache.' - 'apache_crawler.retrieve_status_page', - mocked_no_status_page) - @mock.patch(("plugins.applications.apache.apache_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - @mock.patch('dockercontainer.DockerContainer', - MockedApacheContainer1) - def test_no_accessible_endpoint(self, *kwargs): - c = ApacheContainerCrawler() - with self.assertRaises(ConnectionError): - c.crawl("mockcontainer") diff --git a/tests/unit/test_app_db2.py b/tests/unit/test_app_db2.py deleted file mode 100644 index 7e8d8dfe..00000000 --- a/tests/unit/test_app_db2.py +++ /dev/null @@ -1,244 +0,0 @@ -import mock -import pip -from unittest import TestCase -from plugins.applications.db2 import db2_crawler -from plugins.applications.db2.feature import DB2Feature -from plugins.applications.db2.db2_container_crawler \ - import DB2ContainerCrawler -from plugins.applications.db2.db2_host_crawler \ - import DB2HostCrawler -from utils.crawler_exceptions import CrawlError -from requests.exceptions import ConnectionError - - -pip.main(['install', 'ibm_db']) - - -class MockedDB2Container1(object): - - def __init__(self, container_id): - ports = "[ {\"containerPort\" : \"50000\"} ]" - self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": - {"annotation.io.kubernetes.container.ports": ports}}} - - -class MockedDB2Container2(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["50000"] - return ports - - -class MockedDB2Container3(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["1234"] - return ports - - -def mocked_dbi_conn_error(ibm_db_conn): - raise Exception("error") - - -def mocked_dbi_conn(ibm_db_conn): - return - - -def mocked_db_exec_error(sql): - raise Exception("error") - - -def mocked_db_conn(req, opt1, opt2): - return - - -def mocked_ibm_db_dbi_conn(object): - conn = mocked_conn() - return conn - - -class mocked_conn(): - def cursor(obj): - return - - def execute(sql): - return - - -def mocked_retrieve_metrics(host, user, password, db): - - attribute = DB2Feature( - "dbCapacity", - "dbVersion", - "instanceName", - "productName", - "dbName", - "serviceLevel", - "instanceConn", - "instanceUsedMem", - "dbConn", - "usedLog", - "transcationInDoubt", - "xlocksEscalation", - "locksEscalation", - "locksTimeOut", - "deadLock", - "lastBackupTime", - "dbStatus", - "instanceStatus", - "bpIndexHitRatio", - "bpDatahitRatio", - "sortsInOverflow", - "agetnsWait", - "updateRows", - "insertRows", - "selectedRows", - "deleteRows", - "selects", - "selectSQLs", - "dynamicSQLs", - "rollbacks", - "commits", - "bpTempIndexHitRatio", - "bpTempDataHitRatio" - ) - - return attribute - - -def mocked_retrieve_metrics_error(host, user, password, db): - raise CrawlError - - -class DB2CrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('ibm_db_dbi.Connection', mocked_dbi_conn_error) - def test_conn_error(self): - with self.assertRaises(CrawlError): - db2_crawler.retrieve_metrics() - - @mock.patch('ibm_db.connect', mocked_db_conn) - @mock.patch('ibm_db_dbi.Connection', mocked_ibm_db_dbi_conn) - @mock.patch('ibm_db.execute', mocked_dbi_conn_error) - def test_exec_error(self): - with self.assertRaises(CrawlError): - db2_crawler.retrieve_metrics() - - @mock.patch('ibm_db.connect', mocked_db_conn) - @mock.patch('ibm_db_dbi.Connection') - def test_ok(self, mock_connect): - status = db2_crawler.retrieve_metrics() - self.assertIsInstance(status, DB2Feature) - - -class DB2HostTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = DB2HostCrawler() - self.assertEqual(c.get_feature(), 'db2') - - @mock.patch('plugins.applications.db2.' - 'db2_crawler.retrieve_metrics', - mocked_retrieve_metrics) - def test_get_metrics(self): - c = DB2HostCrawler() - options = {"password": "password", "user": "db2inst1", "db": "sample"} - emitted = c.crawl(**options)[0] - self.assertEqual(emitted[0], 'db2') - self.assertIsInstance(emitted[1], DB2Feature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('plugins.applications.db2.' - 'db2_crawler.retrieve_metrics', - mocked_retrieve_metrics_error) - def test_get_metrics_error(self): - with self.assertRaises(CrawlError): - c = DB2HostCrawler() - c.crawl()[0] - - -class DB2ContainerTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = DB2ContainerCrawler() - self.assertEqual(c.get_feature(), 'db2') - - @mock.patch('plugins.applications.db2.' - 'db2_crawler.retrieve_metrics', - mocked_retrieve_metrics) - @mock.patch('dockercontainer.DockerContainer', - MockedDB2Container1) - @mock.patch(("plugins.applications.db2.db2_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_db2_container_crawler_forkube(self, *kwargs): - c = DB2ContainerCrawler() - options = {"password": "password", "user": "db2inst1", "db": "sample"} - emitted = c.crawl(1234, **options)[0] - self.assertEqual(emitted[0], 'db2') - self.assertIsInstance(emitted[1], DB2Feature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('plugins.applications.db2.' - 'db2_crawler.retrieve_metrics', - mocked_retrieve_metrics) - @mock.patch('dockercontainer.DockerContainer', - MockedDB2Container2) - @mock.patch(("plugins.applications.db2.db2_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_db2_container_crawler_fordocker(self, *kwargs): - c = DB2ContainerCrawler() - options = {"password": "password", "user": "db2inst1", "db": "sample"} - emitted = c.crawl(1234, **options)[0] - self.assertEqual(emitted[0], 'db2') - self.assertIsInstance(emitted[1], DB2Feature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('dockercontainer.DockerContainer', - MockedDB2Container3) - def test_no_available_port(self): - c = DB2ContainerCrawler() - c.crawl("mockcontainer") - pass - - @mock.patch('plugins.applications.db2.' - 'db2_crawler.retrieve_metrics', - mocked_retrieve_metrics_error) - @mock.patch('dockercontainer.DockerContainer', - MockedDB2Container2) - @mock.patch(("plugins.applications.db2.db2_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_no_accessible_endpoint(self, *args): - c = DB2ContainerCrawler() - with self.assertRaises(ConnectionError): - options = {"password": "password", - "user": "db2inst1", "db": "sample"} - c.crawl(1234, **options)[0] diff --git a/tests/unit/test_app_liberty.py b/tests/unit/test_app_liberty.py deleted file mode 100644 index a2d04575..00000000 --- a/tests/unit/test_app_liberty.py +++ /dev/null @@ -1,264 +0,0 @@ -from unittest import TestCase -import mock -from plugins.applications.liberty import liberty_crawler -from plugins.applications.liberty import feature -from plugins.applications.liberty.liberty_container_crawler \ - import LibertyContainerCrawler -from plugins.applications.liberty.liberty_host_crawler \ - import LibertyHostCrawler -from utils.crawler_exceptions import CrawlError -from requests.exceptions import ConnectionError - - -def mocked_urllib2_open(request): - return MockedURLResponse() - - -def mock_status_value(user, password, url): - raise CrawlError - - -class MockedLibertyContainer1(object): - - def __init__(self, container_id): - ports = "[ {\"containerPort\" : \"9443\"} ]" - self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": - {"annotation.io.kubernetes.container.ports": ports}}} - - -class MockedLibertyContainer2(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["9443"] - return ports - - -class MockedLibertyContainer3(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["1234"] - return ports - - -class MockedURLResponse(object): - def read(self): - return open('tests/unit/liberty_response_time_details_mocked', - 'r').read() - - -def server_status_value(user, password, url): - url_list = url.lstrip('/').split("/") - url_list = filter(lambda a: a != '', url_list) - tmp_word = url_list[len(url_list)-1] - last_word = tmp_word.split('%3D') - last_word = last_word[len(last_word)-1] - - file_value = { - "mbeans": 'tests/unit/liberty_mbeans', - "ServletStats": 'tests/unit/liberty_servlet_stats', - "ResponseTimeDetails": 'tests/unit/liberty_response_time_details', - "JvmStats": 'tests/unit/liberty_jvm_stats', - "ThreadPoolStats": 'tests/unit/liberty_thread_pool_stats', - "SessionStats": 'tests/unit/liberty_session_stats', - "ConnectionPool": 'tests/unit/liberty_connection_stats' - } - - return_value = { - "ServletName": - '{"value":"JMXRESTProxyServlet","type":"java.lang.String"}', - "AppName": '{"value":"com.ibm.ws.jmx.connector.server.rest",\ - "type":"java.lang.String"}', - "Heap": '{"value":"31588352","type":"java.lang.Long"}', - "FreeMemory": '{"value":"9104704","type":"java.lang.Long"}', - "UsedMemory": '{"value":"23213312","type":"java.lang.Long"}', - "ProcessCPU": - '{"value":"0.07857719811500322","type":"java.lang.Double"}', - "GcCount": '{"value":"1325","type":"java.lang.Long"}', - "GcTime": '{"value":"1001","type":"java.lang.Long"}', - "UpTime": '{"value":"155755366","type":"java.lang.Long"}', - "ActiveThreads": '{"value":"1","type":"java.lang.Integer"}', - "PoolSize": '{"value":"4","type":"java.lang.Integer"}', - "PoolName": '{"value":"Default Executor","type":"java.lang.String"}', - "CreateCount": '{"value":"1","type":"java.lang.Long"}', - "LiveCount": '{"value":"0","type":"java.lang.Long"}', - "ActiveCount": '{"value":"0","type":"java.lang.Long"}', - "InvalidatedCount": '{"value":"1","type":"java.lang.Long"}', - "InvalidatedCountbyTimeout": '{"value":"2","type":"java.lang.Long"}', - "CheckedOutCountValue": '{"value":"1","type":"java.lang.Long"}', - "WaitQueueSizeValue": '{"value":"2","type":"java.lang.Long"}', - "MinSizeValue": '{"value":"3","type":"java.lang.Long"}', - "MaxSizeValue": '{"value":"4","type":"java.lang.Long"}', - "SizeValue": '{"value":"7","type":"java.lang.Long"}', - "HostValue": '{"value":"test","type":"java.lang.Long"}', - "PortValue": '{"value":"12","type":"java.lang.Long"}' - } - - if last_word in file_value: - return open(file_value.get(last_word), 'r').read() - - if last_word in return_value: - return return_value.get(last_word) - - -class LibertyCrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_hundle_ioerror(self): - with self.assertRaises(CrawlError): - liberty_crawler.retrieve_status_page("user", "pass", "localhost") - - @mock.patch('urllib2.urlopen', mocked_urllib2_open) - def test_read(self): - liberty_crawler.retrieve_status_page("user", "pass", "localhost") - self.assertNotIsInstance(liberty_crawler.retrieve_metrics(), - feature.LibertyServletFeature) - - @mock.patch('plugins.applications.liberty.' - 'liberty_crawler.retrieve_status_page', - side_effect=server_status_value) - def test_ok(self, server_status_value): - status = list(liberty_crawler.retrieve_metrics()) - assert status == [('liberty_servlet_status', - feature.LibertyServletFeature( - name='JMXRESTProxyServlet', - appName='com.ibm.ws.jmx.connector.server.rest', - reqCount='292', - responseMean='1646404.6780821919', - responseMax='129746827', - responseMin='257689'), - 'application'), - ('liberty_jvm_status', - feature.LibertyJVMFeature( - heap='31588352', - freeMemory='9104704', - usedMemory='23213312', - processCPU='0.07857719811500322', - gcCount='1325', - gcTime='1001', - upTime='155755366'), - 'application'), - ('liberty_thread_status', - feature.LibertyThreadFeature( - activeThreads='1', - poolSize='4', - poolName='Default Executor'), - 'application'), - ('liberty_session_status', - feature.LibertySessionFeature( - name='default_host/IBMJMXConnectorREST', - createCount='1', - liveCount='0', - activeCount='0', - invalidatedCount='1', - invalidatedCountByTimeout='2'), - 'application'), - ('liberty_mongo_connection_status', - feature.LibertyMongoConnectionFeature( - checkedOutCount='1', - waitQueueSize='2', - maxSize='4', - minSize='3', - host='test', - port='12', - size='7'), - 'application')] - - -class LibertyHostTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = LibertyHostCrawler() - self.assertEqual(c.get_feature(), 'liberty') - - @mock.patch('plugins.applications.liberty.' - 'liberty_crawler.retrieve_status_page', - server_status_value) - def test_get_metrics(self): - c = LibertyHostCrawler() - options = {"password": "password", "user": "liberty"} - emitted = list(c.crawl(**options)) - self.assertEqual(emitted[0][0], 'liberty_servlet_status') - self.assertEqual(emitted[0][2], 'application') - - -class LibertyContainerTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = LibertyContainerCrawler() - self.assertEqual(c.get_feature(), 'liberty') - - @mock.patch('plugins.applications.liberty.' - 'liberty_crawler.retrieve_status_page', - server_status_value) - @mock.patch('dockercontainer.DockerContainer', - MockedLibertyContainer1) - @mock.patch(("plugins.applications.liberty.liberty_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_liberty_container_forkube(self, *args): - c = LibertyContainerCrawler() - options = {"password": "password", "user": "liberty"} - emitted = list(c.crawl(**options)) - self.assertEqual(emitted[0][0], 'liberty_servlet_status') - self.assertEqual(emitted[0][2], 'application') - - @mock.patch('plugins.applications.liberty.' - 'liberty_crawler.retrieve_status_page', - server_status_value) - @mock.patch('dockercontainer.DockerContainer', - MockedLibertyContainer2) - @mock.patch(("plugins.applications.liberty.liberty_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_liberty_container_fordocker(self, *args): - c = LibertyContainerCrawler() - options = {"password": "password", "user": "liberty"} - emitted = list(c.crawl(**options)) - self.assertEqual(emitted[0][0], 'liberty_servlet_status') - self.assertEqual(emitted[0][2], 'application') - - @mock.patch('dockercontainer.DockerContainer', - MockedLibertyContainer3) - def test_liberty_container_noport(self, *args): - c = LibertyContainerCrawler() - c.crawl(1234) - pass - - @mock.patch('dockercontainer.DockerContainer', - MockedLibertyContainer1) - @mock.patch(("plugins.applications.liberty.liberty_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - @mock.patch('plugins.applications.liberty.' - 'liberty_crawler.retrieve_metrics', - mock_status_value) - def test_none_liberty_container(self, *args): - options = {"password": "password", "user": "liberty"} - c = LibertyContainerCrawler() - with self.assertRaises(ConnectionError): - c.crawl(1234, **options) diff --git a/tests/unit/test_app_nginx.py b/tests/unit/test_app_nginx.py deleted file mode 100644 index 5ef176ff..00000000 --- a/tests/unit/test_app_nginx.py +++ /dev/null @@ -1,194 +0,0 @@ -from unittest import TestCase -import mock -from plugins.applications.nginx import nginx_crawler -from plugins.applications.nginx.feature import NginxFeature -from plugins.applications.nginx.nginx_container_crawler \ - import NginxContainerCrawler -from plugins.applications.nginx.nginx_host_crawler \ - import NginxHostCrawler -from utils.crawler_exceptions import CrawlError -from requests.exceptions import ConnectionError - - -# expected format from nginx status page -def mocked_retrieve_status_page(host, port): - return ('Active connections: 2\n' - 'server accepts handled requests\n' - '2 2 1\n' - 'Reading: 0 Writing: 1 Waiting: 1' - ) - - -def mocked_no_status_page(host, port): - # raise urllib2.HTTPError(1,2,3,4,5) - raise Exception - - -def mocked_wrong_status_page(host, port): - return ('No Acceptable status page format') - - -def mocked_urllib2_open(request): - return MockedURLResponse() - - -class MockedURLResponse(object): - - def read(self): - return ('Active connections: 2\n' - 'server accepts handled requests\n' - '2 2 1\n' - 'Reading: 0 Writing: 1 Waiting: 1' - ) - - -class MockedNginxContainer1(object): - - def __init__(self, container_id): - ports = "[ {\"containerPort\" : \"80\"} ]" - self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": - {"annotation.io.kubernetes.container.ports": ports}}} - - -class MockedNginxContainer2(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["80"] - return ports - - -class MockedNginxContainer3(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["1234"] - return ports - - -class NginxCrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('urllib2.urlopen', mocked_urllib2_open) - def test_ok(self): - self.assertIsInstance(nginx_crawler.retrieve_metrics(), - NginxFeature) - - ''' - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_retrieve_status_page) - def test_successful_crawling(self): - self.assertIsInstance(nginx_crawler.retrieve_metrics(), - NginxFeature) - ''' - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_no_status_page) - def test_hundle_ioerror(self): - with self.assertRaises(CrawlError): - nginx_crawler.retrieve_metrics() - - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_wrong_status_page) - def test_hundle_parseerror(self): - with self.assertRaises(CrawlError): - nginx_crawler.retrieve_metrics() - - -class NginxHostTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = NginxHostCrawler() - self.assertEqual(c.get_feature(), 'nginx') - - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_retrieve_status_page) - def test_get_metrics(self): - c = NginxHostCrawler() - emitted = c.crawl()[0] - self.assertEqual(emitted[0], 'nginx') - self.assertIsInstance(emitted[1], NginxFeature) - self.assertEqual(emitted[2], 'application') - - -class NginxContainerTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = NginxContainerCrawler() - self.assertEqual(c.get_feature(), 'nginx') - - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_retrieve_status_page) - @mock.patch('dockercontainer.DockerContainer', - MockedNginxContainer1) - @mock.patch(("plugins.applications.nginx.nginx_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_nginx_container_forkube(self, *args): - c = NginxContainerCrawler() - emitted = c.crawl()[0] - self.assertEqual(emitted[0], 'nginx') - self.assertIsInstance(emitted[1], NginxFeature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_retrieve_status_page) - @mock.patch('dockercontainer.DockerContainer', - MockedNginxContainer2) - @mock.patch(("plugins.applications.nginx.nginx_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_nginx_container_fordocker(self, *args): - c = NginxContainerCrawler() - emitted = c.crawl()[0] - self.assertEqual(emitted[0], 'nginx') - self.assertIsInstance(emitted[1], NginxFeature) - self.assertEqual(emitted[2], 'application') - - @mock.patch('dockercontainer.DockerContainer', - MockedNginxContainer3) - def test_nginx_container_noport(self, *args): - c = NginxContainerCrawler() - c.crawl(1234) - pass - - @mock.patch('plugins.applications.nginx.' - 'nginx_crawler.retrieve_status_page', - mocked_no_status_page) - @mock.patch('dockercontainer.DockerContainer', - MockedNginxContainer2) - @mock.patch(("plugins.applications.nginx.nginx_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_no_accessible_endpoint(self, *arg): - c = NginxContainerCrawler() - with self.assertRaises(ConnectionError): - c.crawl("mockcontainer") diff --git a/tests/unit/test_app_redis.py b/tests/unit/test_app_redis.py deleted file mode 100644 index aaca14f9..00000000 --- a/tests/unit/test_app_redis.py +++ /dev/null @@ -1,270 +0,0 @@ -import mock -import pip -from unittest import TestCase -from plugins.applications.redis.feature import RedisFeature -from plugins.applications.redis.feature import create_feature -from plugins.applications.redis.redis_host_crawler \ - import RedisHostCrawler -from plugins.applications.redis.redis_container_crawler \ - import RedisContainerCrawler -from requests.exceptions import ConnectionError - -pip.main(['install', 'redis']) - - -class MockedRedisClient(object): - def __init__(self, host='localhost', port=6379): - self.host = host - self.port = port - - def info(self): - metrics = { - "aof_current_rewrite_time_sec": -1, - "aof_enabled": 0, - "aof_last_bgrewrite_status": "ok", - "aof_last_rewrite_time_sec": -1, - "aof_last_write_status": "ok", - "aof_rewrite_in_progress": 0, - "aof_rewrite_scheduled": 0, - "arch_bits": 64, - "blocked_clients": 0, - "client_biggest_input_buf": 0, - "client_longest_output_list": 0, - "cluster_enabled": 0, - "config_file": "", - "connected_clients": 1, - "connected_slaves": 0, - "evicted_keys": 0, - "executable": "/data/redis-server", - "expired_keys": 0, - "gcc_version": "4.9.2", - "hz": 10, - "instantaneous_input_kbps": 0.0, - "instantaneous_ops_per_sec": 0, - "instantaneous_output_kbps": 0.0, - "keyspace_hits": 0, - "keyspace_misses": 0, - "latest_fork_usec": 0, - "loading": 0, - "lru_clock": 3053805, - "master_repl_offset": 0, - "maxmemory": 0, - "maxmemory_human": "0B", - "maxmemory_policy": "noeviction", - "mem_allocator": "jemalloc-4.0.3", - "mem_fragmentation_ratio": 8.18, - "migrate_cached_sockets": 0, - "multiplexing_api": "epoll", - "os": "Linux 4.4.0-21-generic ppc64le", - "process_id": 1, - "pubsub_channels": 0, - "pubsub_patterns": 0, - "rdb_bgsave_in_progress": 0, - "rdb_changes_since_last_save": 0, - "rdb_current_bgsave_time_sec": -1, - "rdb_last_bgsave_status": "ok", - "rdb_last_bgsave_time_sec": -1, - "rdb_last_save_time": 1479217974, - "redis_build_id": "962858415ee795a5", - "redis_git_dirty": 0, - "redis_git_sha1": 0, - "redis_mode": "standalone", - "redis_version": "3.2.0", - "rejected_connections": 0, - "repl_backlog_active": 0, - "repl_backlog_first_byte_offset": 0, - "repl_backlog_histlen": 0, - "repl_backlog_size": 1048576, - "role": "master", - "run_id": "7b9a920c40761ad5750fbc8810408b69eca45c06", - "sync_full": 0, - "sync_partial_err": 0, - "sync_partial_ok": 0, - "tcp_port": 6379, - "total_commands_processed": 108, - "total_connections_received": 109, - "total_net_input_bytes": 1526, - "total_net_output_bytes": 228594, - "total_system_memory": 8557363200, - "total_system_memory_human": "7.97G", - "uptime_in_days": 2, - "uptime_in_seconds": 230839, - "used_cpu_sys": 86.48, - "used_cpu_sys_children": 0.0, - "used_cpu_user": 25.17, - "used_cpu_user_children": 0.0, - "used_memory": 856848, - "used_memory_peak": 857872, - "used_memory_peak_human": "837.77K", - "used_memory_rss": 7012352, - "used_memory_rss_human": "6.69M" - } - return metrics - - -class MockedRedisClient2(object): - - def __init__(self, host='localhost', port=6379): - self.host = host - self.port = port - - def info(self): - raise ConnectionError() - - -class MockedRedisClient3(object): - - def __init__(self, host='localhost', port=6379): - self.host = host - self.port = port - - def info(self): - metrics = { - "aof_current_rewrite_time_sec": -1, - "aof_enabled": 0, - "tcp_port": 6379, - "used_memory_rss_human": "6.69M" - } - return metrics - - -class MockedRedisContainer1(object): - - def __init__(self, container_id): - ports = "[ {\"containerPort\" : \"6379\"} ]" - self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": - {"annotation.io.kubernetes.container.ports": ports}}} - - -class MockedRedisContainer2(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["6379"] - return ports - - -class MockedRedisContainer3(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["1234"] - return ports - - -class RedisModuleTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_redis_module(self): - import redis - v = redis.VERSION - self.assertIsNotNone(v, "redis module does not exist") - - -class RedisContainerCrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = RedisContainerCrawler() - self.assertEqual(c.get_feature(), "redis") - - @mock.patch('dockercontainer.DockerContainer', - MockedRedisContainer1) - @mock.patch(("plugins.applications.redis.redis_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - @mock.patch('redis.Redis', MockedRedisClient) - def test_redis_container_crawler_forkube(self, *args): - c = RedisContainerCrawler() - emitted_tuple = c.crawl("mockcontainerid")[0] - self.assertEqual(emitted_tuple[0], "redis", - "feature key must be equal to redis") - self.assertIsInstance(emitted_tuple[1], RedisFeature) - self.assertEqual(emitted_tuple[2], "application", - "feature type must be equal to application") - - @mock.patch('dockercontainer.DockerContainer', - MockedRedisContainer2) - @mock.patch('redis.Redis', MockedRedisClient) - @mock.patch(("plugins.applications.redis.redis_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_redis_container_crawler_fordocker(self, *args): - c = RedisContainerCrawler() - emitted_tuple = c.crawl("mockcontainerid")[0] - self.assertEqual(emitted_tuple[0], "redis", - "feature key must be equal to redis") - self.assertIsInstance(emitted_tuple[1], RedisFeature) - self.assertEqual(emitted_tuple[2], "application", - "feature type must be equal to application") - - @mock.patch('dockercontainer.DockerContainer', - MockedRedisContainer3) - @mock.patch('redis.Redis', MockedRedisClient) - def test_no_available_ports(self): - c = RedisContainerCrawler() - c.crawl(1234) - pass - - @mock.patch('dockercontainer.DockerContainer', - MockedRedisContainer2) - @mock.patch('redis.Redis', MockedRedisClient2) - @mock.patch(("plugins.applications.redis.redis_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_redis_container_no_connect(self, *args): - c = RedisContainerCrawler() - with self.assertRaises(ConnectionError): - c.crawl(1234) - - -class RedisHostCrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = RedisHostCrawler() - self.assertEqual(c.get_feature(), "redis") - - @mock.patch('redis.Redis', MockedRedisClient3) - def test_redis_host_crawler_dummy(self): - import redis - client = redis.Redis() - feature_attributes = create_feature(client.info()) - self.assertEqual(feature_attributes[0], -1) - - def test_redis_host_crawler(self): - with mock.patch('redis.Redis', MockedRedisClient): - c = RedisHostCrawler() - emitted_tuple = c.crawl()[0] - self.assertEqual(emitted_tuple[0], "redis", - "feature key must be equal to redis") - self.assertIsInstance(emitted_tuple[1], RedisFeature) - self.assertEqual(emitted_tuple[2], "application", - "feature type must be equal to application") - - @mock.patch('redis.Redis', MockedRedisClient2) - def test_no_redis_connection(self): - c = RedisHostCrawler() - with self.assertRaises(ConnectionError): - c.crawl() diff --git a/tests/unit/test_app_tomcat.py b/tests/unit/test_app_tomcat.py deleted file mode 100644 index 65ae9aa7..00000000 --- a/tests/unit/test_app_tomcat.py +++ /dev/null @@ -1,295 +0,0 @@ -from unittest import TestCase -import mock -from plugins.applications.tomcat import tomcat_crawler -from plugins.applications.tomcat import feature -from plugins.applications.tomcat.tomcat_container_crawler \ - import TomcatContainerCrawler -from plugins.applications.tomcat.tomcat_host_crawler \ - import TomcatHostCrawler -from utils.crawler_exceptions import CrawlError -from requests.exceptions import ConnectionError - - -def mocked_urllib2_open(request): - return MockedURLResponse() - - -def mocked_retrieve_status_page(host, port, user, password): - return server_status_value() - - -def mock_status_value(host, user, password, url): - raise CrawlError - - -def server_status_value(): - return ('' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - ) - - -class MockedURLResponse(object): - def read(self): - return server_status_value() - - -class MockedTomcatContainer1(object): - - def __init__(self, container_id): - ports = "[ {\"containerPort\" : \"8080\"} ]" - self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": - {"annotation.io.kubernetes.container.ports": ports}}} - - -class MockedTomcatContainer2(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["8080"] - return ports - - -class MockedTomcatContainer3(object): - - def __init__(self, container_id): - self.inspect = {"State": {"Pid": 1234}, - "Config": {"Labels": {"dummy": "dummy"}}} - - def get_container_ports(self): - ports = ["1234"] - return ports - - -class TomcatCrawlTests(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_hundle_ioerror(self): - with self.assertRaises(CrawlError): - tomcat_crawler.retrieve_status_page("localhost", - "1234", "test", "test") - - @mock.patch('urllib2.urlopen', mocked_urllib2_open) - def test_ok(self): - status = list(tomcat_crawler.retrieve_metrics()) - assert status == [('tomcat_jvm', - feature.TomcatJVMFeature( - free='3846720', - total='62390272', - max='922746880'), - 'application'), - ('tomcat_memory', - feature.TomcatMemoryFeature( - name='PS Eden Space', - type='Heap memory', - initial='16252928', - committed='16252928', - maximum='340787200', - used='8570016'), - 'application'), - ('tomcat_memory', - feature.TomcatMemoryFeature( - name='PS Survivor Space', - type='Heap memory', - initial='2621440', - committed='2621440', - maximum='2621440', - used='2621440'), - 'application'), - ('tomcat_memory', - feature.TomcatMemoryFeature( - name='Code Cache', - type='Non-heap memory', - initial='2555904', - committed='6225920', - maximum='251658240', - used='6211200'), - 'application'), - ('tomcat_memory', - feature.TomcatMemoryFeature( - name='Compressed Class Space', - type='Non-heap memory', - initial='0', - committed='2097152', - maximum='1073741824', - used='1959616'), - 'application'), - ('tomcat_memory', - feature.TomcatMemoryFeature( - name='Metaspace', - type='Non-heap memory', - initial='0', - committed='18874368', - maximum='-1', - used='18211520'), - 'application'), - ('tomcat_connector', - feature.TomcatConnectorFeature( - connector='ajp-nio-8009', - maxThread='200', - currentThread='0', - currentThreadBusy='0', - requestMaxTime='0', - processingTime='0', - requestCount='0', - errorCount='0', - byteReceived='0', - byteSent='0'), - 'application'), - ('tomcat_connector', - feature.TomcatConnectorFeature( - connector='http-nio-8080', - maxThread='200', - currentThread='2', - currentThreadBusy='1', - requestMaxTime='60', - processingTime='60', - requestCount='1', - errorCount='1', - byteReceived='0', - byteSent='2473'), - 'application'), - ('tomcat_worker', - feature.TomcatWorkerFeature( - connector='http-nio-8080', - stage='S', - time='52', - byteSent='0', - byteReceived='0', - client='0:0:0:0:0:0:0:1', - vhost='localhost', - request='/manager/status'), - 'application')] - - -class TomcatHostTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = TomcatHostCrawler() - self.assertEqual(c.get_feature(), 'tomcat') - - @mock.patch('plugins.applications.tomcat.' - 'tomcat_crawler.retrieve_status_page', - mocked_retrieve_status_page) - def test_get_metrics(self): - c = TomcatHostCrawler() - options = {"password": "password", "user": "tomcat"} - emitted = list(c.crawl(**options)) - self.assertEqual(emitted[0][0], 'tomcat_jvm') - self.assertEqual(emitted[0][2], 'application') - - -class TomcatContainerTest(TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_feature(self): - c = TomcatContainerCrawler() - self.assertEqual(c.get_feature(), 'tomcat') - - @mock.patch('plugins.applications.tomcat.' - 'tomcat_crawler.retrieve_status_page', - mocked_retrieve_status_page) - @mock.patch('dockercontainer.DockerContainer', - MockedTomcatContainer1) - @mock.patch(("plugins.applications.tomcat.tomcat_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_tomcat_container_forkube(self, *args): - c = TomcatContainerCrawler() - options = {"password": "password", "user": "tomcat"} - emitted = list(c.crawl(**options)) - self.assertEqual(emitted[0][0], 'tomcat_jvm') - self.assertEqual(emitted[0][2], 'application') - - @mock.patch('plugins.applications.tomcat.' - 'tomcat_crawler.retrieve_status_page', - mocked_retrieve_status_page) - @mock.patch('dockercontainer.DockerContainer', - MockedTomcatContainer2) - @mock.patch(("plugins.applications.tomcat.tomcat_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - def test_tomcat_container_fordocker(self, *args): - c = TomcatContainerCrawler() - options = {"password": "password", "user": "tomcat"} - emitted = list(c.crawl(**options)) - self.assertEqual(emitted[0][0], 'tomcat_jvm') - self.assertEqual(emitted[0][2], 'application') - - @mock.patch('dockercontainer.DockerContainer', - MockedTomcatContainer3) - def test_tomcat_container_noport(self, *args): - c = TomcatContainerCrawler() - c.crawl(1234) - pass - - @mock.patch('dockercontainer.DockerContainer', - MockedTomcatContainer1) - @mock.patch(("plugins.applications.tomcat.tomcat_container_crawler." - "run_as_another_namespace"), - return_value=['127.0.0.1', '1.2.3.4']) - @mock.patch('plugins.applications.tomcat.' - 'tomcat_crawler.retrieve_metrics', - mock_status_value) - def test_none_tomcat_container(self, *args): - options = {"password": "password", "user": "tomcat"} - c = TomcatContainerCrawler() - with self.assertRaises(ConnectionError): - c.crawl(1234, **options) diff --git a/tests/unit/test_container.py b/tests/unit/test_container.py deleted file mode 100644 index 523a4e81..00000000 --- a/tests/unit/test_container.py +++ /dev/null @@ -1,44 +0,0 @@ -import mock -import unittest - -from container import Container - - -def mocked_exists(pid): - return True - - -class ContainerTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_non_implemented_methods(self): - c = Container(1) - with self.assertRaises(NotImplementedError): - c.get_memory_cgroup_path() - with self.assertRaises(NotImplementedError): - c.get_cpu_cgroup_path() - - @mock.patch('crawler.container.os.path.exists', side_effect=mocked_exists) - def test_is_running(self, mock_exists): - c = Container(1) - assert c.is_running() - - def test_eq_ne(self): - c1 = Container(1) - c2 = Container(2) - c3 = Container(2) - assert c1 != c2 - assert c2 == c3 - - def test_is_docker(self): - c = Container(1) - assert not c.is_docker_container() - - def test_to_str(self): - c = Container(1) - print(c) diff --git a/tests/unit/test_containers.py b/tests/unit/test_containers.py deleted file mode 100644 index 882a2d3d..00000000 --- a/tests/unit/test_containers.py +++ /dev/null @@ -1,188 +0,0 @@ -import mock -import unittest - -from containers import (list_all_containers, get_containers) - - -def mocked_exists(pid): - return True - - -class DockerContainer(): - - def __init__(self, pid): - self.pid = pid - self.short_id = pid - self.long_id = pid - self.process_namespace = pid - - def __str__(self): - return 'container %s' % self.pid - - def is_docker_container(self): - return True - -DOCKER_IDS = ['101', '102', '103', '104', '105', '106'] - - -def mocked_get_docker_containers(host_namespace='', user_list='ALL'): - for long_id in DOCKER_IDS: - - if user_list not in ['ALL', 'all', 'All']: - user_ctrs = [cid[:12] for cid in user_list.split(',')] - short_id = long_id[:12] - if short_id not in user_ctrs: - continue - - c = DockerContainer(long_id) - yield c - - -class PsUtilProcess(): - - def __init__(self, pid): - self.pid = pid - - -class ContainersTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('containers.get_docker_containers', - side_effect=lambda host_namespace, user_list='ALL': - mocked_get_docker_containers(host_namespace, user_list)) - @mock.patch('containers.container.namespace.get_pid_namespace', - side_effect=lambda pid: pid) - @mock.patch('containers.container.psutil.process_iter', - side_effect=lambda: [PsUtilProcess('4'), # container - PsUtilProcess('1'), # init - PsUtilProcess('5')]) # crawler - @mock.patch('containers.container.misc.process_is_crawler', - side_effect=lambda pid: True if pid == '5' else False) - def test_list_all_containers(self, *args): - pids = [ - c.pid for c in list_all_containers( - ignore_raw_containers=False)] - # pid 1 is the init process, which is not a container - # according to the definition in container.py - assert set(pids) == set(DOCKER_IDS + ['4']) - assert '1' not in pids # init process - assert '5' not in pids # crawler process - assert args[0].call_count == 2 - assert args[1].call_count == 1 - assert args[2].call_count == 2 - assert args[3].call_count == 1 - - @mock.patch('containers.get_docker_containers', - side_effect=lambda host_namespace, user_list='ALL': - mocked_get_docker_containers(host_namespace, user_list)) - @mock.patch('containers.container.namespace.get_pid_namespace', - side_effect=lambda pid: pid) - @mock.patch('containers.container.psutil.process_iter', - side_effect=lambda: [PsUtilProcess('4'), # container - PsUtilProcess('1'), # init - PsUtilProcess('5')]) # crawler - @mock.patch('containers.container.misc.process_is_crawler', - side_effect=lambda pid: True if pid == '5' else False) - def test_list_all_containers_input_list(self, *args): - pids = [ - c.pid for c in list_all_containers( - user_list='102', - ignore_raw_containers=False)] - # pid 1 is the init process, which is not a container - # according to the definition in container.py - assert set(pids) == set(['102']) - assert '3' not in pids # filtered container - assert '4' not in pids # filtered container - assert '1' not in pids # init process - assert '5' not in pids # crawler process - - @mock.patch('containers.get_docker_containers', - side_effect=lambda host_namespace, user_list='ALL': - mocked_get_docker_containers(host_namespace, user_list)) - @mock.patch('containers.container.namespace.get_pid_namespace', - side_effect=lambda pid: pid) - @mock.patch('containers.container.psutil.process_iter', - side_effect=lambda: [PsUtilProcess('4'), # container - PsUtilProcess('1'), # init - PsUtilProcess('5')]) # crawler - @mock.patch('containers.container.misc.process_is_crawler', - side_effect=lambda pid: True if pid == '5' else False) - def test_get_filtered_list(self, *args): - pids = [c.pid for c in get_containers(ignore_raw_containers=False)] - # pid 1 is the init process, which is not a container - # according to the definition in container.py - assert set(pids) == set(DOCKER_IDS + ['4']) - assert '1' not in pids # init process - assert '5' not in pids # crawler process - - @mock.patch('containers.get_docker_containers', - side_effect=lambda host_namespace, user_list='ALL': - mocked_get_docker_containers(host_namespace, user_list)) - @mock.patch('containers.container.namespace.get_pid_namespace', - side_effect=lambda pid: pid) - @mock.patch('containers.container.psutil.process_iter', - side_effect=lambda: [PsUtilProcess('4'), # container - PsUtilProcess('1'), # init - PsUtilProcess('5')]) # crawler - @mock.patch('containers.container.misc.process_is_crawler', - side_effect=lambda pid: True if pid == '5' else False) - def test_get_filtered_list_with_input_list(self, *args): - pids = [ - c.pid for c in get_containers(ignore_raw_containers=False, - user_list='102')] - # pid 1 is the init process, which is not a container - # according to the definition in container.py - assert set(pids) == set(['102']) - assert '3' not in pids # filtered container - assert '4' not in pids # filtered container - assert '1' not in pids # init process - assert '5' not in pids # crawler process - - @mock.patch('containers.get_docker_containers', - side_effect=lambda host_namespace, user_list='ALL': - mocked_get_docker_containers(host_namespace, user_list)) - @mock.patch('containers.container.namespace.get_pid_namespace', - side_effect=lambda pid: pid) - @mock.patch('containers.container.psutil.process_iter', - side_effect=lambda: [PsUtilProcess('4'), # container - PsUtilProcess('1'), # init - PsUtilProcess('5')]) # crawler - @mock.patch('containers.container.misc.process_is_crawler', - side_effect=lambda pid: True if pid == '5' else False) - def test_get_filtered_list_with_input_list_ALL(self, *args): - pids = [ - c.pid for c in get_containers(ignore_raw_containers=False, - user_list='ALL')] - # pid 1 is the init process, which is not a container - # according to the definition in container.py - assert set(pids) == set(DOCKER_IDS + ['4']) - - @mock.patch('containers.get_docker_containers', - side_effect=lambda host_namespace, user_list='ALL': - mocked_get_docker_containers(host_namespace, user_list)) - @mock.patch('containers.container.namespace.get_pid_namespace', - side_effect=lambda pid: pid) - @mock.patch('containers.container.psutil.process_iter', - side_effect=lambda: [PsUtilProcess('4'), # container - PsUtilProcess('1'), # init - PsUtilProcess('5')]) # crawler - @mock.patch('containers.container.misc.process_is_crawler', - side_effect=lambda pid: True if pid == '5' else False) - def test_get_filtered_list_non_default_env(self, *args): - opts = {'environment': 'alchemy', - 'docker_containers_list': 'ALL', - 'partition_strategy': {'name': 'equally_by_pid', - 'args': {'process_id': 0, - 'num_processes': 1}}} - pids = [c.pid for c in get_containers(opts)] - # pid 1 is the init process, which is not a container - # according to the definition in container.py - assert set(pids) == set(DOCKER_IDS) - # only docker containers are returned in non-cloudsight environments - # (see the 'alchemy' above) - assert '4' not in pids diff --git a/tests/unit/test_containers_crawler.py b/tests/unit/test_containers_crawler.py deleted file mode 100644 index 7f76c72a..00000000 --- a/tests/unit/test_containers_crawler.py +++ /dev/null @@ -1,139 +0,0 @@ -import mock -import unittest -from containers_crawler import ContainersCrawler - - -class MockedOSCrawler: - - def crawl(self, **kwargs): - return [('linux', {'os': 'some_os'}, 'os')] - - -class MockedCPUCrawler: - - def crawl(self, **kwargs): - return [('cpu-0', {'used': 100}, 'cpu')] - - -class MockedOSCrawlerFailure: - - def crawl(self, container_id, **kwargs): - if container_id == 'errorid': - raise OSError('some exception') - else: - return [('linux', {'os': 'some_os'}, 'os')] - - -class MockedDockerContainer: - - def __init__(self, short_id='short_id', pid=777): - self.namespace = short_id - self.pid = pid - self.short_id = short_id - self.long_id = short_id - self.name = 'name' - self.image = 'image' - self.owner_namespace = 'owner_namespace' - self.docker_image_long_name = 'image_long_name' - self.docker_image_short_name = 'image_short_name' - self.docker_image_tag = 'image_tag' - self.docker_image_registry = 'image_registry' - - def is_docker_container(self): - return True - - def link_logfiles(self, options): - pass - - def unlink_logfiles(self, options): - pass - - def get_metadata_dict(self): - return {'namespace': self.namespace} - - def __eq__(self, other): - return self.pid == other.pid - - -class ContainersCrawlerTests(unittest.TestCase): - - @mock.patch( - 'containers_crawler.plugins_manager.get_container_crawl_plugins', - side_effect=lambda features: [(MockedOSCrawler(), {}), - (MockedCPUCrawler(), {})]) - @mock.patch('containers_crawler.get_containers', - side_effect=lambda host_namespace, user_list: [ - MockedDockerContainer( - short_id='aaa', - pid=101), - MockedDockerContainer( - short_id='bbb', - pid=102), - MockedDockerContainer( - short_id='ccc', - pid=103)]) - def test_containers_crawler(self, *args): - crawler = ContainersCrawler(features=['os']) - frames = list(crawler.crawl()) - namespaces = sorted([f.metadata['namespace'] for f in frames]) - assert namespaces == sorted(['aaa', 'bbb', 'ccc']) - features_count = sorted([f.num_features for f in frames]) - assert features_count == sorted([2, 2, 2]) - system_types = sorted([f.metadata['system_type'] for f in frames]) - assert system_types == sorted(['container', 'container', 'container']) - assert args[0].call_count == 1 - assert args[1].call_count == 1 - - @mock.patch( - 'containers_crawler.plugins_manager.get_container_crawl_plugins', - side_effect=lambda features: [(MockedOSCrawlerFailure(), {}), - (MockedCPUCrawler(), {})]) - @mock.patch('containers_crawler.get_containers', - side_effect=lambda host_namespace, user_list: [ - MockedDockerContainer( - short_id='aaa', - pid=101), - MockedDockerContainer( - short_id='errorid', - pid=102), - MockedDockerContainer( - short_id='ccc', - pid=103)]) - def test_failed_containers_crawler(self, *args): - crawler = ContainersCrawler(features=['os']) - with self.assertRaises(OSError): - frames = list(crawler.crawl(ignore_plugin_exception=False)) - assert args[0].call_count == 1 - assert args[1].call_count == 1 - - @mock.patch( - 'containers_crawler.plugins_manager.get_container_crawl_plugins', - side_effect=lambda features: [(MockedCPUCrawler(), {}), - (MockedOSCrawlerFailure(), {}), - (MockedCPUCrawler(), {})]) - @mock.patch('containers_crawler.get_containers', - side_effect=lambda host_namespace, user_list: [ - MockedDockerContainer( - short_id='aaa', - pid=101), - MockedDockerContainer( - short_id='errorid', - pid=102), - MockedDockerContainer( - short_id='ccc', - pid=103)]) - def test_failed_containers_crawler_with_ignore_failure(self, *args): - crawler = ContainersCrawler(features=['os']) - frames = list(crawler.crawl()) # defaults to ignore_plugin_exception - namespaces = sorted([f.metadata['namespace'] for f in frames]) - assert namespaces == sorted(['aaa', 'errorid', 'ccc']) - features_count = sorted([f.num_features for f in frames]) - assert features_count == sorted([3, 2, 3]) - system_types = [f.metadata['system_type'] for f in frames] - assert system_types == ['container', 'container', 'container'] - assert args[0].call_count == 1 - assert args[1].call_count == 1 - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/test_diskio_host.py b/tests/unit/test_diskio_host.py deleted file mode 100644 index 3489e47a..00000000 --- a/tests/unit/test_diskio_host.py +++ /dev/null @@ -1,130 +0,0 @@ -''' -Unit tests for the DiskioHostCrawler plugin -''' -import unittest -import mock - -from plugins.systems.diskio_host_crawler import DiskioHostCrawler - -counters_increment = 0 -time_increment = 0 - -def mocked_time(): - ''' - Used to mock time.time(), which the crawler calls to calculate rates - ''' - global time_increment - - base_time = 1504726245 - return base_time + time_increment - -def mocked_diskio_counters(): - ''' - Used to mock DiskContainerCrawler._crawl_disk_io_counters() - ''' - global counters_increment - - base_counters = [10, 10, 10, 10] - counters = [ i + counters_increment for i in base_counters] - yield ('loop', [0, 0 , 0, 0]) - yield ('sda1', counters) - -class TestDiskioCrawlerPlugin(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls._crawler = DiskioHostCrawler() - - def testGetFeature(self): - crawler = DiskioHostCrawler() - self.assertEqual('diskio', crawler.get_feature()) - - def test_crawl_disk_io_counters(self): - crawler = DiskioHostCrawler() - diskio_data = crawler._crawl_disk_io_counters() - for device_name, counters in diskio_data: - self.assertIsInstance(device_name, basestring) - self.assertEqual(4, len(counters)) - for counter in counters: - self.assertIsInstance(counter, (int, long)) - - @mock.patch('time.time', side_effect=mocked_time) - @mock.patch.object(DiskioHostCrawler, '_crawl_disk_io_counters', side_effect=mocked_diskio_counters) - def testCrawl(self, mocked_diskio_counters, mocked_time): - global counters_increment - global time_increment - - # First crawl - diskio_feature = self._crawler.crawl() - for device_name, feature_attributes, feature_key in diskio_feature: - self.assertEqual('diskio', feature_key) - self.assertEqual(4, len(feature_attributes), 'Incorrect number of attributes') - self.assertIsInstance(device_name, basestring, 'Device name should be string') - - self.assertEqual(0, feature_attributes.readoprate, 'Unexpected read operations per second') - self.assertEqual(0, feature_attributes.writeoprate, 'Unexpected write operations per second') - self.assertEqual(0, feature_attributes.readbytesrate, 'Unexpected bytes read per second') - self.assertEqual(0, feature_attributes.writebytesrate, 'Unexpected bytes written per second') - - if device_name == 'diskio-loop': - pass - elif device_name == 'diskio-sda1': - pass - else: - raise Exception('Unexpected device name') - - # Make sure counters will be incremented by mock the function mocking I/O counters - counters_increment = 100.0 - - # Make sure the time will be incremented by the mocked time.time() - time_increment = 60 - - # Second crawl - diskio_feature = self._crawler.crawl() - for device_name, feature_attributes, feature_key in diskio_feature: - self.assertEqual('diskio', feature_key) - self.assertEqual(4, len(feature_attributes), 'Incorrect number of attributes') - self.assertIsInstance(device_name, basestring, 'Device name should be string') - if device_name == 'diskio-loop': - self.assertEqual(0, feature_attributes.readoprate, 'Unexpected read operations per second') - self.assertEqual(0, feature_attributes.writeoprate, 'Unexpected write operations per second') - self.assertEqual(0, feature_attributes.readbytesrate, 'Unexpected bytes read per second') - self.assertEqual(0, feature_attributes.writebytesrate, 'Unexpected bytes written per second') - elif device_name == 'diskio-sda1': - expected_rate = round(counters_increment/time_increment, 2) - self.assertEqual(feature_attributes.readoprate, expected_rate, 'Unexpected read operations per second') - self.assertEqual(feature_attributes.writeoprate, expected_rate, 'Unexpected write operations per second') - self.assertEqual(feature_attributes.readbytesrate, expected_rate, 'Unexpected bytes read per second') - self.assertEqual(feature_attributes.writebytesrate, expected_rate, 'Unexpected bytes written per second') - else: - raise Exception('Unexpected device name') - - # Make sure the counter-diff as compared to the previous crawl will be negative, - # to emulate a case where the OS counters have wrapped - # In this case, the crawler is expected to report the same measurement as before - counters_increment = -500.0 - - # Make sure the time will be incremented by the mocked time.time() - time_increment += 60 - - # Third crawl - diskio_feature = self._crawler.crawl() - for device_name, feature_attributes, feature_key in diskio_feature: - self.assertEqual('diskio', feature_key) - self.assertEqual(4, len(feature_attributes), 'Incorrect number of attributes') - self.assertIsInstance(device_name, basestring, 'Device name should be string') - if device_name == 'diskio-loop': - self.assertEqual(0, feature_attributes.readoprate, 'Unexpected read operations per second') - self.assertEqual(0, feature_attributes.writeoprate, 'Unexpected write operations per second') - self.assertEqual(0, feature_attributes.readbytesrate, 'Unexpected bytes read per second') - self.assertEqual(0, feature_attributes.writebytesrate, 'Unexpected bytes written per second') - elif device_name == 'diskio-sda1': - self.assertEqual(feature_attributes.readoprate, expected_rate, 'Unexpected read operations per second') - self.assertEqual(feature_attributes.writeoprate, expected_rate, 'Unexpected write operations per second') - self.assertEqual(feature_attributes.readbytesrate, expected_rate, 'Unexpected bytes read per second') - self.assertEqual(feature_attributes.writebytesrate, expected_rate, 'Unexpected bytes written per second') - else: - raise Exception('Unexpected device name') - -if __name__ == "__main__": - unittest.main() diff --git a/tests/unit/test_dockercontainer.py b/tests/unit/test_dockercontainer.py deleted file mode 100644 index d76119f6..00000000 --- a/tests/unit/test_dockercontainer.py +++ /dev/null @@ -1,841 +0,0 @@ -import copy -import unittest - -import mock -import requests - -from dockercontainer import DockerContainer, get_docker_containers -from utils import crawler_exceptions - - -def mocked_exists(pid): - return True - - -def mocked_docker_inspect(long_id): - if long_id == 'no_container_id': - raise requests.exceptions.HTTPError - else: - inspect = { - "Id": "good_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - inspect['Id'] = long_id - return inspect - - -def mocked_exec_dockerps(): - inspect1 = { - "Id": "good_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - inspect2 = { - "Id": "no_namespace", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - inspect3 = { - "Id": "good_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - return [inspect1, inspect2, inspect3] - - -def mocked_exec_dockerps_long(): - inspect = { - "Id": "", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - for i in range(10): - _inspect = inspect - _inspect['Id'] = str(i) - yield _inspect - - -def mocked_get_rootfs(long_id): - if long_id == 'valid_rootfs_id': - return '/tmp/something/docker/' + long_id - else: - raise requests.exceptions.HTTPError - - -def mocked_symlink_oserror(a, b): - raise OSError() - - -def mocked_symlink_exception(a, b): - raise Exception() - - -def mocked_rmtree_exception(path): - raise OSError() - - -class MockedRuntimeEnv(): - - def get_environment_name(self): - return 'cloudsight' - - def get_container_namespace(self, long_id, options): - if long_id == 'good_id': - return 'random_namespace' - elif long_id == 'throw_non_handled_exception_id': - raise Exception() - elif long_id == 'throw_bad_environment_exception_id': - raise crawler_exceptions.ContainerInvalidEnvironment() - elif long_id == 'no_namespace': - return None - else: - return 'other_namespace' - - def get_container_log_file_list(self, long_id, options): - logs = copy.deepcopy(options['container_logs']) - if long_id == 'good_id': - logs.extend([{'name': '/var/log/1', 'type': None}, - {'name': '/var/log/2', 'type': None}]) - elif long_id == 'throw_value_error_id': - raise ValueError() - elif long_id == 'valid_rootfs_id': - logs.extend([{'name': '/var/log/1', 'type': None}, - {'name': '/var/log/2', 'type': None}, - {'name': '../../as', 'type': None}]) - return logs - - def get_container_log_prefix(self, long_id, options): - return 'random_prefix' - - -def mocked_get_runtime_env(): - return MockedRuntimeEnv() - - -def mocked_get_container_json_logs_path(id, inspect): - return '/var/lib/docker/abc/container/log.json' - - -class DockerDockerContainerTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_list_docker_containers(self, mock_get_rootfs, mock_inspect, - mocked_get_runtime_env, mocked_dockerps): - n = 0 - for c in get_docker_containers(): - assert c.long_id == 'good_id' - n += 1 - assert mocked_get_runtime_env.call_count == 3 - assert n == 2 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps_long) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_list_docker_containers_with_input( - self, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - ids = [c.short_id for c in get_docker_containers(user_list='1,2,8')] - assert set(ids) == set(['1', '2', '8']) - assert mocked_get_runtime_env.call_count == 3 - ids = [c.long_id for c in get_docker_containers(user_list='5,3')] - assert set(ids) == set(['3', '5']) - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_list_docker_containers_with_opts(self, mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - n = 0 - for c in get_docker_containers(): - assert c.long_id == 'good_id' - n += 1 - assert mocked_get_runtime_env.call_count == 3 - assert n == 2 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_init( - self, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("good_id") - mock_inspect.assert_called() - assert not c.root_fs - assert mocked_get_runtime_env.call_count == 1 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_init_from_inspect(self, mock_get_rootfs, mock_inspect, - mocked_get_runtime_env, mocked_dockerps): - inspect = { - "Id": "good_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - c = DockerContainer("good_id", inspect) - mock_inspect.assert_not_called() - assert not c.root_fs - assert mocked_get_runtime_env.call_count == 1 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_init_from_inspect_w_repotags(self, mock_get_rootfs, mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - inspect = { - "Id": "good_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - }, - 'RepoTag': 'registry.com:123/ric/img:latest' - } - c = DockerContainer("good_id", inspect) - mock_inspect.assert_not_called() - assert not c.root_fs - assert mocked_get_runtime_env.call_count == 1 - assert c.docker_image_long_name == 'registry.com:123/ric/img:latest' - assert c.docker_image_short_name == 'img:latest' - assert c.docker_image_tag == 'latest' - assert c.docker_image_registry == 'registry.com:123' - assert c.owner_namespace == 'ric' - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_init_from_inspect_w_repotags2(self, mock_get_rootfs, mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - inspect = { - "Id": "good_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - }, - 'RepoTag': 'registry.com:123/img:latest' - } - c = DockerContainer("good_id", inspect) - mock_inspect.assert_not_called() - assert not c.root_fs - assert mocked_get_runtime_env.call_count == 1 - assert c.docker_image_long_name == 'registry.com:123/img:latest' - assert c.docker_image_short_name == 'img:latest' - assert c.docker_image_tag == 'latest' - assert c.docker_image_registry == 'registry.com:123' - assert c.owner_namespace == '' - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_init_failed(self, mock_get_rootfs, mock_inspect, - mocked_get_runtime_env, mocked_dockerps): - with self.assertRaises(crawler_exceptions.ContainerNonExistent): - DockerContainer("no_container_id") - assert mocked_get_runtime_env.call_count == 0 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_init_wrong_environment( - self, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - with self.assertRaises(crawler_exceptions.ContainerInvalidEnvironment): - DockerContainer("no_namespace") - with self.assertRaises(crawler_exceptions.ContainerInvalidEnvironment): - DockerContainer("throw_bad_environment_exception_id") - with self.assertRaises(Exception): - DockerContainer("throw_non_handled_exception_id") - with self.assertRaises(crawler_exceptions.ContainerInvalidEnvironment): - DockerContainer("throw_value_error_id") - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def test_is_docker( - self, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("good_id") - assert c.is_docker_container() - print(c) - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.path.ismount', - side_effect=lambda x: True if x == '/cgroup/memory' else False) - def test_memory_cgroup( - self, - mocked_ismount, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("good_id") - assert c.get_memory_cgroup_path( - 'abc') == '/cgroup/memory/docker/good_id/abc' - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.path.ismount', - side_effect=lambda x: - True if x == '/cgroup/cpuacct' or '/cgroup/cpu,cpuacct' else False) - def test_cpu_cgroup( - self, - mocked_ismount, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("good_id") - assert c.get_cpu_cgroup_path( - 'abc') == ("/cgroup/cpuacct/docker/good_id/" - "abc") or ("cgroup/cpu,cpuacct/docker/good_id/abc") - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - def test_link_logfiles( - self, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - mock_symlink.assert_called_with( - '/tmp/something/docker/valid_rootfs_id/var/log/2', - '/var/log/crawler_container_logs/random_prefix/var/log/2') - assert mock_symlink.call_count == 4 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - @mock.patch('dockercontainer.misc.get_process_env', - side_effect=lambda x: { - 'LOG_LOCATIONS': '/var/env/1,/var/env/2'}) - def test_link_logfiles_env_variable( - self, - mock_get_env, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - mock_symlink.assert_called_with( - '/tmp/something/docker/valid_rootfs_id/var/log/2', - '/var/log/crawler_container_logs/random_prefix/var/log/2') - assert mock_symlink.call_count == 6 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink', - side_effect=mocked_symlink_oserror) - def test_link_logfiles_symlink_oserror( - self, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - # no exceptoin should be thrown - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink', - side_effect=mocked_symlink_exception) - def test_link_logfiles_symlink_exception( - self, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - # no exceptoin should be thrown - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - @mock.patch('dockercontainer.shutil.rmtree') - def test_link_and_unlink_logfiles( - self, - mock_rmtree, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - mock_symlink.assert_called_with( - '/tmp/something/docker/valid_rootfs_id/var/log/2', - '/var/log/crawler_container_logs/random_prefix/var/log/2') - c.unlink_logfiles() - assert mock_symlink.call_count == 4 - assert mock_rmtree.call_count == 1 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - @mock.patch('dockercontainer.shutil.rmtree') - @mock.patch('dockercontainer.get_docker_container_json_logs_path', - side_effect=mocked_get_container_json_logs_path) - def test_link_and_unlink_docker_json_logfile( - self, - mock_json_logs, - mock_rmtree, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - mock_symlink.assert_called_with( - '/var/lib/docker/abc/container/log.json', - '/var/log/crawler_container_logs/random_prefix/docker.log') - c.unlink_logfiles() - assert mock_symlink.call_count == 5 - assert mock_rmtree.call_count == 1 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - @mock.patch('dockercontainer.shutil.rmtree', - side_effect=mocked_rmtree_exception) - def test_link_and_unlink_logfiles_failed_rmtree( - self, - mock_rmtree, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - c = DockerContainer("valid_rootfs_id") - c.link_logfiles() - mock_symlink.assert_called_with( - '/tmp/something/docker/valid_rootfs_id/var/log/2', - '/var/log/crawler_container_logs/random_prefix/var/log/2') - c.unlink_logfiles() - assert mock_symlink.call_count == 4 - assert mock_rmtree.call_count == 1 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - @mock.patch('dockercontainer.shutil.rmtree', - side_effect=mocked_rmtree_exception) - def test_links_with_mounts( - self, - mock_rmtree, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - inspect = { - "Id": "valid_rootfs_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - # /var in the container is mapped to /mount/in/the/host - # container was started with -v /var/in/the/host:/var - "Mounts": [{'Source': '/var/in/the/host', - 'Destination': '/var'}], - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - c = DockerContainer("valid_rootfs_id", inspect) - c.link_logfiles() - mock_symlink.assert_called_with( - '/var/in/the/host/log/2', - '/var/log/crawler_container_logs/random_prefix/var/log/2') - c.unlink_logfiles() - assert mock_symlink.call_count == 4 - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('dockercontainer.os.makedirs') - @mock.patch('dockercontainer.os.symlink') - @mock.patch('dockercontainer.shutil.rmtree', - side_effect=mocked_rmtree_exception) - # In older docker versions, the inspect field for Mounts was called Volumes - def test_links_with_volumes( - self, - mock_rmtree, - mock_symlink, - mock_makedirs, - mock_get_rootfs, - mock_inspect, - mocked_get_runtime_env, - mocked_dockerps): - inspect = { - "Id": "valid_rootfs_id", - "Created": "2016-07-06T16:38:05.479090842Z", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - # /var in the container is mapped to /mount/in/the/host - # container was started with -v /var/in/the/host:/var - "Volumes": {'/var': '/var/in/the/host'}, - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - } - } - c = DockerContainer("valid_rootfs_id", inspect) - c.link_logfiles() - mock_symlink.assert_called_with( - '/var/in/the/host/log/2', - '/var/log/crawler_container_logs/random_prefix/var/log/2') - c.unlink_logfiles() - assert mock_symlink.call_count == 4 - - # TODO test _get_cgroup_dir when ismount fails - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - def _test_non_implemented_methods(self): - c = DockerContainer("some_id") - with self.assertRaises(NotImplementedError): - c.get_memory_cgroup_path() - with self.assertRaises(NotImplementedError): - c.get_cpu_cgroup_path() - with self.assertRaises(NotImplementedError): - c.link_logfiles() - with self.assertRaises(NotImplementedError): - c.unlink_logfiles() - - @mock.patch('dockercontainer.exec_dockerps', - side_effect=mocked_exec_dockerps) - @mock.patch( - 'dockercontainer.plugins_manager.get_runtime_env_plugin', - side_effect=mocked_get_runtime_env) - @mock.patch('dockercontainer.exec_dockerinspect', - side_effect=mocked_docker_inspect) - @mock.patch('dockercontainer.get_docker_container_rootfs_path', - side_effect=mocked_get_rootfs) - @mock.patch('emitter.os.path.exists', side_effect=mocked_exists) - def _test_is_running(self, mock_exists): - c = DockerContainer("good_id") - assert c.is_running() - - def _test_eq_ne(self): - c1 = DockerContainer("good_id") - c2 = DockerContainer("ebcd") - c3 = DockerContainer("ebcd") - assert c1 != c2 - assert c2 == c3 - - def _test_to_str(self): - c = DockerContainer("good_id") - print(c) diff --git a/tests/unit/test_dockerutils.py b/tests/unit/test_dockerutils.py deleted file mode 100644 index 9453d49a..00000000 --- a/tests/unit/test_dockerutils.py +++ /dev/null @@ -1,381 +0,0 @@ -import unittest - -import dateutil.parser as dp -import docker -import mock - -import utils.dockerutils -from utils.crawler_exceptions import (DockerutilsNoJsonLog, DockerutilsException) - - -class MockedClient(): - - def containers(self): - return [{'Id': 'good_id'}] - - def info(self): - return {'Driver': 'btrfs', 'DockerRootDir': '/var/lib/docker'} - - def version(self): - return {'Version': '1.10.1'} - - def inspect_container(self, id): - return { - "Id": "good_id", - "Created": "2016-07-06", - "State": { - "Status": "running", - "Running": True, - "Pid": 11186 - }, - "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", - "Name": "/pensive_rosalind", - "Mounts": [], - "LogPath": "/a/b/c/log.json", - "Config": { - "Cmd": [ - "bash" - ], - "Image": "ubuntu:trusty" - }, - "NetworkSettings": { - "Ports": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "32768" - } - ]} - - }, - "HostConfig": { - "PortBindings": { - "809/tcp": [ - { - "HostIp": "", - "HostPort": "" - } - ] - } - - } - } - - def inspect_image(self, image_id): - return {'RepoTags': 'registry/abc/def:latest'} - - def history(self, image_id): - return [{'History': 'xxx'}] - - -def throw_runtime_error(*args, **kwargs): - raise RuntimeError() - - -def throw_io_error(*args, **kwargs): - raise IOError() - - -def throw_docker_exception(*args, **kwargs): - raise docker.errors.DockerException() - - -class DockerUtilsTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_exec_dockerps(self, *args): - for c in utils.dockerutils.exec_dockerps(): - print c - break - - docker_datetime = dp.parse('2016-07-06') - epoch_seconds = docker_datetime.strftime('%s') - - assert c == {'Name': '/pensive_rosalind', - 'Created': epoch_seconds, - 'RepoTag': 'r', - 'State': {'Status': 'running', - 'Running': True, - 'Pid': '11186'}, - 'Mounts': [], - 'Config': {'Image': 'ubuntu:trusty', - 'Cmd': ['bash']}, - 'NetworkSettings': {'Ports': { - '80/tcp': [{'HostPort': '32768', - 'HostIp': '0.0.0.0'}]}}, - 'Image': 'sha256:07c86167cdc4264926fa5d2894e34a339ad27', - 'LogPath': '/a/b/c/log.json', - 'HostConfig': {'PortBindings': { - '809/tcp': [{'HostPort': '', - 'HostIp': ''}]}}, - 'Id': 'good_id'} - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.exec_dockerinspect', - side_effect=throw_docker_exception) - def test_exec_dockerps_failure(self, *args): - with self.assertRaises(DockerutilsException): - utils.dockerutils.exec_dockerps() - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_exec_docker_history(self, *args): - h = utils.dockerutils.exec_docker_history('ididid') - assert h == [{'History': 'xxx'}] - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=throw_docker_exception) - def test_exec_docker_history_failure(self, *args): - with self.assertRaises(DockerutilsException): - utils.dockerutils.exec_docker_history('ididid') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_exec_docker_inspect(self, *args): - i = utils.dockerutils.exec_dockerinspect('ididid') - - docker_datetime = dp.parse('2016-07-06') - epoch_seconds = docker_datetime.strftime('%s') - - assert i == {'Name': '/pensive_rosalind', - 'Created': epoch_seconds, - 'RepoTag': 'r', - 'State': {'Status': 'running', - 'Running': True, - 'Pid': '11186'}, - 'Mounts': [], - 'Config': {'Image': 'ubuntu:trusty', - 'Cmd': ['bash']}, - 'NetworkSettings': {'Ports': { - '80/tcp': [ - {'HostPort': '32768', - 'HostIp': '0.0.0.0'}]}}, - 'Image': 'sha256:07c86167cdc4264926fa5d2894e34a339ad27', - 'LogPath': '/a/b/c/log.json', - 'HostConfig': {'PortBindings': { - '809/tcp': [{'HostPort': '', - 'HostIp': ''}]}}, - 'Id': 'good_id'} - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=throw_docker_exception) - def test_exec_docker_inspect_failure(self, *args): - with self.assertRaises(DockerutilsException): - utils.dockerutils.exec_dockerinspect('ididid') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=throw_docker_exception) - @mock.patch('utils.dockerutils.open') - def test_get_docker_storage_driver_step1a(self, mock_open, mock_client): - - mock_open.return_value = open('tests/unit/proc_mounts_aufs') - assert utils.dockerutils._get_docker_storage_driver() == 'aufs' - mock_open.return_value = open('tests/unit/proc_mounts_devicemapper') - assert utils.dockerutils._get_docker_storage_driver() == 'devicemapper' - mock_open.return_value = open('tests/unit/proc_mounts_vfs') - assert utils.dockerutils._get_docker_storage_driver() == 'vfs' - mock_open.return_value = open('tests/unit/proc_mounts_btrfs') - assert utils.dockerutils._get_docker_storage_driver() == 'btrfs' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_docker_storage_driver_step2(self, mock_open, mock_client): - assert utils.dockerutils._get_docker_storage_driver() == 'btrfs' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=throw_docker_exception) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_docker_storage_driver_failure(self, mock_open, mock_client): - assert utils.dockerutils._get_docker_storage_driver() == 'devicemapper' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_get_docker_server_version(self, mock_client): - assert utils.dockerutils._get_docker_server_version() == '1.10.1' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=throw_docker_exception) - def test_get_docker_server_version_failure(self, mock_client): - with self.assertRaises(DockerutilsException): - utils.dockerutils._get_docker_server_version() - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch( - 'crawler.utils.dockerutils.os.path.isfile', - side_effect=lambda p: - True if p == ("/var/lib/docker/containers/id/id-json.log") - else False) - def test_get_json_logs_path_from_path(self, mock_isfile, mock_client): - assert utils.dockerutils.get_docker_container_json_logs_path( - 'id') == '/var/lib/docker/containers/id/id-json.log' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.os.path.isfile', - side_effect=lambda p: - True if p == '/a/b/c/log.json' else False) - def test_get_json_logs_path_from_daemon(self, mock_isfile, mock_client): - assert utils.dockerutils.get_docker_container_json_logs_path( - 'id') == '/a/b/c/log.json' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.os.path.isfile', - side_effect=lambda p: False) - def test_get_json_logs_path_failure(self, mock_isfile, mock_client): - with self.assertRaises(DockerutilsNoJsonLog): - utils.dockerutils.get_docker_container_json_logs_path('id') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_rootfs_not_supported_driver_failure( - self, mock_open, mock_client): - utils.dockerutils.driver = 'not_supported_driver' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('id') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=[open('tests/unit/proc_pid_mounts_devicemapper'), - open('tests/unit/proc_mounts_devicemapper')]) - def test_get_rootfs_devicemapper(self, mock_open, mock_client): - utils.dockerutils.driver = 'devicemapper' - assert utils.dockerutils.get_docker_container_rootfs_path( - 'id') == ("/var/lib/docker/devicemapper/mnt/" - "65fe676c24fe1faea1f06e222cc3811cc" - "9b651c381702ca4f787ffe562a5e39b/rootfs") - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_rootfs_devicemapper_failure(self, mock_open, mock_client): - utils.dockerutils.driver = 'devicemapper' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('id') - - @mock.patch('utils.dockerutils.misc.btrfs_list_subvolumes', - side_effect=lambda p: - [ - ('ID', '260', 'gen', '22', 'top', - 'level', '5', 'path', 'sub1/abcde'), - ('ID', '260', 'gen', '22', 'top', - 'level', '5', 'path', 'sub1/abcde/sub2'), - ] - ) - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_get_rootfs_btrfs_v1_8(self, mock_client, mock_list): - utils.dockerutils.driver = 'btrfs' - utils.dockerutils.server_version = '1.8.0' - assert utils.dockerutils.get_docker_container_rootfs_path( - 'abcde') == '/var/lib/docker/sub1/abcde' - - @mock.patch('utils.dockerutils.misc.btrfs_list_subvolumes', - side_effect=throw_runtime_error) - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_get_rootfs_btrfs_v1_8_failure(self, mock_client, mock_list): - utils.dockerutils.driver = 'btrfs' - utils.dockerutils.server_version = '1.8.0' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('abcde') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=[open('tests/unit/btrfs_mount_init-id')]) - def test_get_rootfs_btrfs_v1_10(self, mock_open, mock_client): - utils.dockerutils.driver = 'btrfs' - utils.dockerutils.server_version = '1.10.0' - assert utils.dockerutils.get_docker_container_rootfs_path( - 'id') == '/var/lib/docker/btrfs/subvolumes/vol1/id/rootfs-a-b-c' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_rootfs_btrfs_v1_10_failure(self, mock_open, mock_client): - utils.dockerutils.driver = 'btrfs' - utils.dockerutils.server_version = '1.10.0' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('abcde') - - @mock.patch('utils.dockerutils.os.path.isdir', - side_effect=lambda d: True) - @mock.patch('utils.dockerutils.os.listdir', - side_effect=lambda d: ['usr', 'boot', 'var']) - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_get_rootfs_aufs_v1_8(self, *args): - utils.dockerutils.driver = 'aufs' - utils.dockerutils.server_version = '1.8.0' - assert utils.dockerutils.get_docker_container_rootfs_path( - 'abcde') == '/var/lib/docker/aufs/mnt/abcde' - - @mock.patch('utils.dockerutils.os.path.isdir', - side_effect=lambda d: False) - @mock.patch('utils.dockerutils.os.listdir', - side_effect=lambda d: ['usr', 'boot', 'var']) - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - def test_get_rootfs_aufs_v1_8_failure(self, *args): - utils.dockerutils.driver = 'aufs' - utils.dockerutils.server_version = '1.8.0' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('abcde') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=[open('tests/unit/aufs_mount_init-id')]) - def test_get_rootfs_aufs_v1_10(self, *args): - utils.dockerutils.driver = 'aufs' - utils.dockerutils.server_version = '1.10.0' - assert utils.dockerutils.get_docker_container_rootfs_path( - 'abcde') == '/var/lib/docker/aufs/mnt/vol1/id/rootfs-a-b-c' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_rootfs_aufs_v1_10_failure(self, *args): - utils.dockerutils.driver = 'aufs' - utils.dockerutils.server_version = '1.10.0' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('abcde') - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=[open('tests/unit/vfs_mount_init-id')]) - def test_get_rootfs_vfs_v1_10(self, *args): - utils.dockerutils.driver = 'vfs' - utils.dockerutils.server_version = '1.10.0' - assert utils.dockerutils.get_docker_container_rootfs_path( - 'abcde') == '/var/lib/docker/vfs/dir/vol1/id/rootfs-a-b-c' - - @mock.patch('utils.dockerutils.docker.APIClient', - side_effect=lambda base_url, version: MockedClient()) - @mock.patch('utils.dockerutils.open', - side_effect=throw_io_error) - def test_get_rootfs_vfs_v1_10_failure(self, *args): - utils.dockerutils.driver = 'vfs' - utils.dockerutils.server_version = '1.10.0' - with self.assertRaises(DockerutilsException): - utils.dockerutils.get_docker_container_rootfs_path('abcde') diff --git a/tests/unit/test_emitter.py b/tests/unit/test_emitter.py deleted file mode 100644 index c87a024a..00000000 --- a/tests/unit/test_emitter.py +++ /dev/null @@ -1,647 +0,0 @@ -import cStringIO -import gzip -import unittest -import time -import os -import json - -import mock -import requests.exceptions -import plugins_manager - -from base_crawler import BaseFrame -from capturing import Capturing -from emitters_manager import EmittersManager -from plugins.emitters.file_emitter import FileEmitter -from plugins.emitters.base_http_emitter import BaseHttpEmitter -from plugins.emitters.http_emitter import HttpEmitter -from plugins.emitters.https_emitter import HttpsEmitter -from plugins.emitters.sas_emitter import SasEmitter -from plugins.emitters.kafka_emitter import KafkaEmitter -from plugins.emitters.mtgraphite_emitter import MtGraphiteEmitter -from plugins.emitters.fluentd_emitter import FluentdEmitter -from utils import crawler_exceptions - - -def mocked_formatter(frame): - iostream = cStringIO.StringIO() - iostream.write('namespace777.dummy-feature.test2 12345 14804\r\n') - iostream.write('namespace777.dummy-feature.test2 12345 14805\r\n') - return iostream - - -def mocked_formatter1(frame): - iostream = cStringIO.StringIO() - iostream.write('abc\r\n') - iostream.write('def\r\n') - return iostream - -def mocked_formatter2(frame): - iostream = cStringIO.StringIO() - metadata = {} - metadata["timestamp"] = "current-time" - metadata["namespace"] = "my/name" - metadata["features"] = "os,cpu,memory" - metadata["source_type"] = "container" - - iostream.write('%s\t%s\t%s\n' % - ('metadata', json.dumps('metadata'), - json.dumps(metadata, separators=(',', ':')))) - return iostream - -def mocked_get_sas_token(): - return ('sas-token', 'cloudoe', 'access-group') - -class RandomKafkaException(Exception): - pass - -def raise_value_error(*args, **kwargs): - raise ValueError() - -def mock_call_with_retries(function, max_retries=10, - exception_type=Exception, - _args=(), _kwargs={}): - return function(*_args, **_kwargs) - - -def mocked_requests_post(*args, **kwargs): - class MockResponse: - - def __init__(self, status_code): - self.status_code = status_code - self.text = 'blablableble' - - def json(self): - return self.json_data - if args[0] == 'http://1.1.1.1/good' or args[0] == 'https://1.1.1.1/good': - return MockResponse(status_code=200) - elif args[0] == 'http://1.1.1.1/bad' or args[0] == 'https://1.1.1.1/bad': - return MockResponse(status_code=500) - elif args[0] == 'http://1.1.1.1/exception' or args[0] == 'https://1.1.1.1/exception': - raise requests.exceptions.RequestException('bla') - elif args[0] == 'http://1.1.1.1/encoding_error' or args[0] == 'https://1.1.1.1/encoding_error': - raise requests.exceptions.ChunkedEncodingError('bla') - - -class MockProducer: - - def __init__(self): - self._produced = [] - - def produce(self, msgs=[]): - self._produced.extend(msgs) - - -def MockedKafkaConnect(self, broker, topic): - self.producer = MockProducer() - - -class MockedMTGraphiteClient: - - def __init__(self, url): - pass - - def send_messages(self, messages): - return 1 - - -class MockFluentdSender: - - def __init__(self): - self._emitted = dict() - - def emit_with_time(self, tag, timestamp, item): - self._emitted.update(item) - self.last_error = None - - def clear_last_error(): - pass - - -def mocked_fluentd_connect(self, host, port): - self.fluentd_sender = MockFluentdSender() - - -class EmitterTests(unittest.TestCase): - image_name = 'alpine:latest' - - def setUp(self): - plugins_manager.emitter_plugins = [] - pass - - def tearDown(self): - pass - - def _test_emitter_csv_simple_stdout(self, compress=False): - emitter = EmittersManager(urls=['stdout://'], - compress=compress) - frame = BaseFrame(feature_types=['os']) - frame.add_features([("dummy_feature", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame, 0) - - def test_emitter_csv_simple_stdout(self): - with Capturing() as _output: - self._test_emitter_csv_simple_stdout() - output = "%s" % _output - print _output - assert len(_output) == 2 - assert "dummy_feature" in output - assert "metadata" in output - - def test_emitter_csv_compressed_stdout(self): - with Capturing() as _output: - self._test_emitter_csv_simple_stdout(compress=True) - output = "%s" % _output - assert 'metadata' not in output - assert len(output) > 0 - - def test_emitter_csv_simple_file(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - compress=False) - frame = BaseFrame(feature_types=['os']) - frame.add_features([("dummy_feature", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame, 0) - with open('/tmp/test_emitter.0') as f: - _output = f.readlines() - output = "%s" % _output - print output - assert len(_output) == 2 - assert "dummy_feature" in output - assert "metadata" in output - - def test_emitter_all_features_compressed_csv(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - compress=True) - frame = BaseFrame(feature_types=[]) - frame.add_feature("memory", {'test3': 12345}, 'memory') - frame.add_feature("memory_0", {'test3': 12345}, 'memory') - frame.add_feature("load", {'load': 12345}, 'load') - frame.add_feature("cpu", {'test3': 12345}, 'cpu') - frame.add_feature("cpu_0", {'test3': 12345}, 'cpu') - frame.add_feature("eth0", {'if_tx': 12345}, 'interface') - frame.add_feature("eth0", {'if_rx': 12345}, 'interface') - frame.add_feature("bla/bla", {'ble/ble': 12345}, 'disk') - emitter.emit(frame, 0) - with gzip.open('/tmp/test_emitter.0.gz') as f: - _output = f.readlines() - output = "%s" % _output - print output - assert len(_output) == 9 - assert "metadata" in output - - def test_emitter_all_features_csv(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter']) - frame = BaseFrame(feature_types=[]) - frame.add_feature("memory", {'test3': 12345}, 'memory') - frame.add_feature("memory_0", {'test3': 12345}, 'memory') - frame.add_feature("load", {'load': 12345}, 'load') - frame.add_feature("cpu", {'test3': 12345}, 'cpu') - frame.add_feature("cpu_0", {'test3': 12345}, 'cpu') - frame.add_feature("eth0", {'if_tx': 12345}, 'interface') - frame.add_feature("eth0", {'if_rx': 12345}, 'interface') - frame.add_feature("bla/bla", {'ble/ble': 12345}, 'disk') - emitter.emit(frame, 0) - with open('/tmp/test_emitter.0') as f: - _output = f.readlines() - output = "%s" % _output - print output - assert len(_output) == 9 - assert "metadata" in output - - def test_emitter_all_features_graphite(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - format='graphite') - frame = BaseFrame(feature_types=[]) - frame.add_feature("memory", {'test3': 12345}, 'memory') - frame.add_feature("memory_0", {'test3': 12345}, 'memory') - frame.add_feature("load", {'load': 12345}, 'load') - frame.add_feature("cpu", {'test3': 12345}, 'cpu') - frame.add_feature("cpu_0", {'test3': 12345}, 'cpu') - frame.add_feature("eth0", {'if_tx': 12345}, 'interface') - frame.add_feature("eth0", {'if_rx': 12345}, 'interface') - frame.add_feature("bla/bla", {'ble/ble': 12345}, 'disk') - emitter.emit(frame, 0) - with open('/tmp/test_emitter.0') as f: - _output = f.readlines() - output = "%s" % _output - print output - assert 'memory-0.test3 12345' in output - assert len(_output) == 8 - - def _test_emitter_graphite_simple_stdout(self): - emitter = EmittersManager(urls=['stdout://'], - format='graphite') - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.add_features([("dummy_feature", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame, 0) - - def test_emitter_graphite_simple_stdout(self): - with Capturing() as _output: - self._test_emitter_graphite_simple_stdout() - output = "%s" % _output - # should look like this: - # ['namespace777.dummy-feature.test3 3.000000 1449870719', - # 'namespace777.dummy-feature.test2 2.000000 1449870719', - # 'namespace777.dummy-feature.test4 4.000000 1449870719'] - assert len(_output) == 3 - assert "dummy_feature" not in output # can't have '_' - assert "dummy-feature" in output # can't have '_' - assert "metadata" not in output - assert 'namespace777.dummy-feature.test2' in output - assert 'namespace777.dummy-feature.test3' in output - assert 'namespace777.dummy-feature.test4' in output - # three fields in graphite format - assert len(_output[0].split(' ')) == 3 - # three fields in graphite format - assert len(_output[1].split(' ')) == 3 - # three fields in graphite format - assert len(_output[2].split(' ')) == 3 - assert float(_output[0].split(' ')[1]) == 12345.0 - assert float(_output[1].split(' ')[1]) == 12345.0 - assert float(_output[2].split(' ')[1]) == 12345.0 - - def test_emitter_unsupported_format(self): - metadata = {} - metadata['namespace'] = 'namespace777' - with self.assertRaises( - crawler_exceptions.EmitterUnsupportedFormat): - _ = EmittersManager(urls=['file:///tmp/test_emitter'], - format='unsupported') - - @mock.patch('plugins.emitters.file_emitter.FileEmitter.emit', - side_effect=raise_value_error) - def _test_emitter_failed_emit(self, *args): - with self.assertRaises(ValueError): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - format='csv') - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.add_feature("memory", {'test3': 12345}, 'memory') - emitter.emit(frame) - - def test_emitter_unsuported_protocol(self): - with self.assertRaises( - crawler_exceptions.EmitterUnsupportedProtocol): - _ = EmittersManager(urls=['error:///tmp/test_emitter'], - format='graphite') - - def test_emitter_graphite_simple_file(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - format='graphite') - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.add_features([("dummy_feature", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame) - with open('/tmp/test_emitter.0') as f: - _output = f.readlines() - output = "%s" % _output - # should look like this: - # ['namespace777.dummy-feature.test3 3.000000 1449870719', - # 'namespace777.dummy-feature.test2 2.000000 1449870719', - # 'namespace777.dummy-feature.test4 4.000000 1449870719'] - assert len(_output) == 3 - assert "dummy_feature" not in output # can't have '_' - assert "dummy-feature" in output # can't have '_' - assert "metadata" not in output - assert 'namespace777.dummy-feature.test2' in output - assert 'namespace777.dummy-feature.test3' in output - assert 'namespace777.dummy-feature.test4' in output - # three fields in graphite format - assert len(_output[0].split(' ')) == 3 - # three fields in graphite format - assert len(_output[1].split(' ')) == 3 - # three fields in graphite format - assert len(_output[2].split(' ')) == 3 - assert float(_output[0].split(' ')[1]) == 12345.0 - assert float(_output[1].split(' ')[1]) == 12345.0 - assert float(_output[2].split(' ')[1]) == 12345.0 - - def test_emitter_json_simple_file(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - format='json') - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.add_features([("dummy_feature", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame) - with open('/tmp/test_emitter.0') as f: - _output = f.readlines() - output = "%s" % _output - print output - assert len(_output) == 2 - assert "metadata" not in output - assert ( - '{"test3": 12345.0, "test2": 12345, "test4": 12345.0, ' - '"namespace": "namespace777", "test": "bla", "feature_type": ' - '"dummy_feature"}') in output - - def test_emitter_graphite_simple_compressed_file(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - format='graphite', - compress=True) - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.add_features([("dummy_feature", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame) - with gzip.open('/tmp/test_emitter.0.gz') as f: - _output = f.readlines() - output = "%s" % _output - # should look like this: - # ['namespace777.dummy-feature.test3 3.000000 1449870719', - # 'namespace777.dummy-feature.test2 2.000000 1449870719', - # 'namespace777.dummy-feature.test4 4.000000 1449870719'] - assert len(_output) == 3 - assert "dummy_feature" not in output # can't have '_' - assert "dummy-feature" in output # can't have '_' - assert "metadata" not in output - assert 'namespace777.dummy-feature.test2' in output - assert 'namespace777.dummy-feature.test3' in output - assert 'namespace777.dummy-feature.test4' in output - # three fields in graphite format - assert len(_output[0].split(' ')) == 3 - # three fields in graphite format - assert len(_output[1].split(' ')) == 3 - # three fields in graphite format - assert len(_output[2].split(' ')) == 3 - assert float(_output[0].split(' ')[1]) == 12345.0 - assert float(_output[1].split(' ')[1]) == 12345.0 - assert float(_output[2].split(' ')[1]) == 12345.0 - - def test_emitter_base_http(self): - emitter = BaseHttpEmitter() - self.assertRaises(NotImplementedError, emitter.get_emitter_protocol) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_http(self, mock_sleep, mock_post, mock_format): - emitter = HttpEmitter() - emitter.init(url='http://1.1.1.1/good') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 1) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_http_server_error(self, mock_sleep, mock_post, mock_format): - emitter = HttpEmitter() - emitter.init(url='http://1.1.1.1/bad') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 5) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_http_request_exception(self, mock_sleep, mock_post, mock_format): - emitter = HttpEmitter() - emitter.init(url='http://1.1.1.1/exception') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 5) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - def test_emitter_http_encoding_error(self, mock_post, mock_format): - emitter = HttpEmitter() - emitter.init(url='http://1.1.1.1/encoding_error') - emitter.emit('frame') - # there are no retries for encoding errors - self.assertEqual(mock_post.call_count, 1) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_https(self, mock_sleep, mock_post, mock_format): - emitter = HttpsEmitter() - emitter.init(url='https://1.1.1.1/good') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 1) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_https_server_error(self, mock_sleep, mock_post, mock_format): - emitter = HttpsEmitter() - emitter.init(url='https://1.1.1.1/bad') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 5) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_https_request_exception(self, mock_sleep, mock_post, mock_format): - emitter = HttpsEmitter() - emitter.init(url='https://1.1.1.1/exception') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 5) - - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter) - @mock.patch('plugins.emitters.base_http_emitter.requests.post', - side_effect=mocked_requests_post) - def test_emitter_https_encoding_error(self, mock_post, mock_format): - emitter = HttpsEmitter() - emitter.init(url='https://1.1.1.1/encoding_error') - emitter.emit('frame') - # there are no retries for encoding errors - self.assertEqual(mock_post.call_count, 1) - - @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', - side_effect=mocked_get_sas_token) - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter2) - @mock.patch('plugins.emitters.sas_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_sas(self, mock_sleep, mock_post, mock_format, mock_get_sas_token): - #env = SasEnvironment() - emitter = SasEmitter() - emitter.init(url='sas://1.1.1.1/good') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 1) - - @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', - side_effect=mocked_get_sas_token) - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter2) - @mock.patch('plugins.emitters.sas_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_sas_server_error(self, mock_sleep, mock_post, mock_format, mock_get_sas_token): - #env = SasEnvironment() - emitter = SasEmitter() - emitter.init(url='sas://1.1.1.1/bad') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 5) - - @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', - side_effect=mocked_get_sas_token) - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter2) - @mock.patch('plugins.emitters.sas_emitter.requests.post', - side_effect=mocked_requests_post) - @mock.patch('plugins.emitters.base_http_emitter.time.sleep') - def test_emitter_sas_request_exception(self, mock_sleep, mock_post, mock_format, mock_get_sas_token): - #env = SasEnvironment() - emitter = SasEmitter() - emitter.init(url='sas://1.1.1.1/exception') - emitter.emit('frame') - self.assertEqual(mock_post.call_count, 5) - - @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', - side_effect=mocked_get_sas_token) - @mock.patch('iemit_plugin.IEmitter.format', - side_effect=mocked_formatter2) - @mock.patch('plugins.emitters.sas_emitter.requests.post', - side_effect=mocked_requests_post) - def test_emitter_sas_encoding_error(self, mock_post, mock_format, mocked_get_sas_token): - #env = SasEnvironment() - emitter = SasEmitter() - emitter.init(url='sas://1.1.1.1/encoding_error') - emitter.emit('frame') - # there are no retries for encoding errors - self.assertEqual(mock_post.call_count, 1) - - @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.connect_to_broker', - side_effect=MockedKafkaConnect, autospec=True) - @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.format', - side_effect=mocked_formatter1) - def test_emitter_kafka(self, *args): - emitter = KafkaEmitter() - emitter.init(url='kafka://1.1.1.1:123/topic1') - emitter.emit('frame') - assert emitter.producer._produced == ['abc\r\ndef\r\n'] - - @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.connect_to_broker', - side_effect=MockedKafkaConnect, autospec=True) - @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.format', - side_effect=mocked_formatter1) - def test_emitter_kafka_one_per_line(self, *args): - emitter = KafkaEmitter() - emitter.init(url='kafka://1.1.1.1:123/topic1') - emitter.emit_per_line = True - emitter.emit('frame') - assert set(emitter.producer._produced) == set(['abc\r\n', 'def\r\n']) - - @mock.patch('plugins.emitters.mtgraphite_emitter.MTGraphiteClient', - side_effect=MockedMTGraphiteClient, autospec=True) - @mock.patch('plugins.emitters.mtgraphite_emitter.MtGraphiteEmitter.format', - side_effect=mocked_formatter) - def test_emitter_mtgraphite(self, MockMTGraphiteClient, mocked_formatter): - emitter = MtGraphiteEmitter() - emitter.init(url='mtgraphite://1.1.1.1:123/topic1', - max_retries=0) - emitter.emit('frame') - assert MockMTGraphiteClient.call_count == 1 - - @mock.patch('plugins.emitters.fluentd_emitter.FluentdEmitter.connect_to_fluentd_engine', - side_effect=mocked_fluentd_connect, autospec=True) - def test_emitter_fluentd_one_per_line(self, *args): - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.metadata['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S%z') - frame.add_features([("dummy_feature_key", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature_type')]) - emitter = FluentdEmitter() - emitter.init(url='fluentd://1.1.1.1:123', emit_format='json') - emitter.emit_per_line = True - emitter.emit(frame) - emitted_json = emitter.fluentd_sender._emitted - assert emitted_json["feature_key"] == "dummy_feature_key" - assert emitted_json["feature_type"] == "dummy_feature_type" - assert emitted_json["feature_val"] == {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000} - - @mock.patch('plugins.emitters.fluentd_emitter.FluentdEmitter.connect_to_fluentd_engine', - side_effect=mocked_fluentd_connect, autospec=True) - def test_emitter_fluentd(self, *args): - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.metadata['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S%z') - frame.add_features([("dummy_feature_key", - {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature_type')]) - emitter = FluentdEmitter() - emitter.init(url='fluentd://1.1.1.1:123', emit_format='json') - emitter.emit_per_line = False - emitter.emit(frame) - emitted_json = emitter.fluentd_sender._emitted - print emitted_json - assert emitted_json["feature1"]["feature_key"] == "dummy_feature_key" - assert emitted_json["feature1"]["feature_type"] == "dummy_feature_type" - assert emitted_json["feature1"]["feature_val"] == {'test': 'bla', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000} - - def test_emitter_logstash_simple_file(self): - emitter = EmittersManager(urls=['file:///tmp/test_emitter'], - format='logstash') - frame = BaseFrame(feature_types=[]) - frame.metadata['namespace'] = 'namespace777' - frame.add_features([("dummy_feature", - {'test': 'dummy', - 'test2': 12345, - 'test3': 12345.0, - 'test4': 12345.00000}, - 'dummy_feature')]) - emitter.emit(frame) - import json - with open('/tmp/test_emitter.0') as f: - output = json.load(f) - assert len(output) == 2 - assert 'metadata' in output - assert 'dummy_feature' in output - assert type(output.get('dummy_feature')) == dict diff --git a/tests/unit/test_gpu_plugin.py b/tests/unit/test_gpu_plugin.py deleted file mode 100644 index 89bf4b3f..00000000 --- a/tests/unit/test_gpu_plugin.py +++ /dev/null @@ -1,34 +0,0 @@ -import unittest -import sys -import mock -sys.path.append('tests/unit/') -sys.modules['pynvml'] = __import__('mock_pynvml') -from plugins.systems.gpu_host_crawler import GPUHostCrawler - -class GPUPluginTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch( - 'plugins.systems.gpu_host_crawler.get_host_ipaddr', - side_effect=lambda: "127.0.0.1") - @mock.patch( - 'plugins.systems.gpu_host_crawler.GPUHostCrawler._load_nvidia_lib', - side_effect=lambda: 1) - def test_os_gpu_host_crawler_plugin(self, *args): - fc = GPUHostCrawler() - for gpu_metrics in fc.crawl(): - print gpu_metrics - assert gpu_metrics == ( - '127/0/0/1.gpu0.NA', - { - "memory": {"total": 12205, "used": 0, "free": 12205}, - "temperature": 31, - "power": {"draw": 27, "limit": 149}, - "utilization": {"gpu": 0, "memory": 0} - }, - 'gpu') diff --git a/tests/unit/test_host_crawler.py b/tests/unit/test_host_crawler.py deleted file mode 100644 index 29fc1eb1..00000000 --- a/tests/unit/test_host_crawler.py +++ /dev/null @@ -1,73 +0,0 @@ -import mock -import unittest -from host_crawler import HostCrawler - - -class MockedOSCrawler: - - def crawl(self, **kwargs): - return [('linux', {'os': 'some_os'}, 'os')] - - -class MockedCPUCrawler: - - def crawl(self, **kwargs): - return [('cpu-0', {'used': 100}, 'cpu')] - - -class MockedOSCrawlerFailure: - - def crawl(self, **kwargs): - raise OSError('some exception') - - -class HostCrawlerTests(unittest.TestCase): - - @mock.patch( - 'host_crawler.plugins_manager.get_host_crawl_plugins', - side_effect=lambda features: [(MockedOSCrawler(), {}), - (MockedCPUCrawler(), {})]) - def test_host_crawler(self, *args): - crawler = HostCrawler(features=['os', 'cpu'], namespace='localhost') - frames = list(crawler.crawl()) - namespaces = [f.metadata['namespace'] for f in frames] - assert namespaces == ['localhost'] - features_count = [f.num_features for f in frames] - assert features_count == [2] - system_types = [f.metadata['system_type'] for f in frames] - assert system_types == ['host'] - assert args[0].call_count == 1 - - @mock.patch( - 'host_crawler.plugins_manager.get_host_crawl_plugins', - side_effect=lambda features: [(MockedOSCrawlerFailure(), {}), - (MockedCPUCrawler(), {})]) - def test_failed_host_crawler(self, *args): - crawler = HostCrawler(features=['os', 'cpu'], namespace='localhost') - with self.assertRaises(OSError): - frames = list(crawler.crawl(ignore_plugin_exception=False)) - assert args[0].call_count == 1 - - @mock.patch( - 'host_crawler.plugins_manager.get_host_crawl_plugins', - side_effect=lambda features: [(MockedCPUCrawler(), {}), - (MockedOSCrawlerFailure(), {}), - (MockedCPUCrawler(), {})]) - def test_failed_host_crawler_with_ignore_failure(self, *args): - crawler = HostCrawler( - features=[ - 'cpu', - 'os', - 'cpu'], - namespace='localhost') - frames = list(crawler.crawl()) - namespaces = sorted([f.metadata['namespace'] for f in frames]) - assert namespaces == sorted(['localhost']) - features_count = [f.num_features for f in frames] - assert features_count == [2] - system_types = [f.metadata['system_type'] for f in frames] - assert system_types == ['host'] - assert args[0].call_count == 1 - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/test_jar_plugin.py b/tests/unit/test_jar_plugin.py deleted file mode 100644 index 19c6adc8..00000000 --- a/tests/unit/test_jar_plugin.py +++ /dev/null @@ -1,56 +0,0 @@ -import unittest - -import os -import sys -import tempfile -from zipfile import ZipFile, ZipInfo - -from utils import jar_utils -from utils.features import JarFeature - -# -# https://security.openstack.org/guidelines/dg_using-temporary-files-securely.html -# - -sys.path.append('tests/unit/') -from plugins.systems.jar_host_crawler import JarHostCrawler - - -class JarHashesPluginTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_jar_host_crawler_plugin(self, *args): - tmpdir = tempfile.mkdtemp() - jar_file_name = 'myfile.jar' - - # Ensure the file is read/write by the creator only - saved_umask = os.umask(0077) - - path = os.path.join(tmpdir, jar_file_name) - try: - with ZipFile(path, "w") as myjar: - myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") - myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") - myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") - - fc = JarHostCrawler() - jars = list(fc.crawl(root_dir=tmpdir)) - #jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir)) - print jars - jar_feature = jars[0][1] - assert 'myfile.jar' == jar_feature.name - assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash - assert ['ddc6eff37020aa858e26b1ba8a49ee0e', - 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes - assert 'jar' == jars[0][2] - - except IOError as e: - print 'IOError' - finally: - os.remove(path) - os.umask(saved_umask) diff --git a/tests/unit/test_jar_utils.py b/tests/unit/test_jar_utils.py deleted file mode 100644 index db121962..00000000 --- a/tests/unit/test_jar_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -import unittest - -import os -import tempfile -from zipfile import ZipFile, ZipInfo - -from utils import jar_utils -from utils.features import JarFeature - -# -# https://security.openstack.org/guidelines/dg_using-temporary-files-securely.html -# - -class JarUtilsTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_get_jar_features(self): - tmpdir = tempfile.mkdtemp() - jar_file_name = 'myfile.jar' - - # Ensure the file is read/write by the creator only - saved_umask = os.umask(0077) - - path = os.path.join(tmpdir, jar_file_name) - try: - with ZipFile(path, "w") as myjar: - myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") - myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") - myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") - - jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir)) - print jars - jar_feature = jars[0][1] - assert 'myfile.jar' == jar_feature.name - assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash - assert ['ddc6eff37020aa858e26b1ba8a49ee0e', - 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes - assert 'jar' == jars[0][2] - - except IOError as e: - print 'IOError' - finally: - os.remove(path) - os.umask(saved_umask) - os.rmdir(tmpdir) diff --git a/tests/unit/test_mesos_url.py b/tests/unit/test_mesos_url.py deleted file mode 100644 index 4ca572c7..00000000 --- a/tests/unit/test_mesos_url.py +++ /dev/null @@ -1,18 +0,0 @@ -# test.py -from mock import patch, Mock - -from utils.mesos import fetch_stats - - -@patch('utils.mesos.urllib2.urlopen') -def mytest(mock_urlopen): - a = Mock() - a.read.side_effect = ['{}', None] - mock_urlopen.return_value = a - res = fetch_stats("0.22.0") - print res - if res is None: - assert res - - -mytest() diff --git a/tests/unit/test_misc.py b/tests/unit/test_misc.py deleted file mode 100644 index 66864ba4..00000000 --- a/tests/unit/test_misc.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import socket -import unittest - -import mock - -import utils.misc - - -class MockedSocket1(): - - def __init__(self, a, b): - print a, b - pass - - def connect(self, dest): - pass - - def getsockname(self): - return ['1.2.3.4'] - - -class MockedSocket2(): - - def __init__(self, a, b): - print a, b - pass - - def connect(self, dest): - pass - - def getsockname(self): - raise socket.error() - - def gethostname(self): - return '1.2.3.4' - - -class MiscTests(unittest.TestCase): - - def test_find_mount_point(self, tmpdir='/'): - assert utils.misc.find_mount_point(str(tmpdir)) == '/' - - def test_subprocess_run(self): - assert utils.misc.subprocess_run( - 'echo abc', shell=True).strip() == 'abc' - assert utils.misc.subprocess_run('exit 0', shell=True).strip() == '' - with self.assertRaises(RuntimeError): - utils.misc.subprocess_run('exit 1', shell=True) - with self.assertRaises(RuntimeError): - # There should not be a /a/b/c/d/e file - utils.misc.subprocess_run('/a/b/c/d/e', shell=False) - - @mock.patch('utils.misc.open') - def test_get_process_env(self, mock_open): - mock_open.return_value = open('tests/unit/mock_environ_file') - env = utils.misc.get_process_env(pid=os.getpid()) - assert 'HOME' in env - with self.assertRaises(TypeError): - utils.misc.get_process_env('asdf') - - def test_process_is_crawler(self): - assert utils.misc.process_is_crawler(os.getpid()) - assert utils.misc.process_is_crawler(1) is False - # make sure 1123... does not actually exist - assert utils.misc.process_is_crawler(1123234325123235) is False - with self.assertRaises(TypeError): - utils.misc.process_is_crawler('asdf') - - def test_get_host_ip4_addresses(self): - assert '127.0.0.1' in utils.misc.get_host_ip4_addresses() - - def test_is_process_running(self): - assert utils.misc.is_process_running(os.getpid()) - assert utils.misc.is_process_running(1) - # make sure 1123... does not actually exist - assert utils.misc.is_process_running(1123234325) is False - with self.assertRaises(TypeError): - utils.misc.is_process_running('asdf') - - @mock.patch('utils.misc.socket.socket', side_effect=MockedSocket1) - def test_get_host_ipaddr1(self, mock_socket): - assert utils.misc.get_host_ipaddr() == '1.2.3.4' - - @mock.patch('utils.misc.socket.socket', side_effect=MockedSocket2) - @mock.patch('utils.misc.socket.gethostname', - side_effect=lambda: '4.3.2.1') - def test_get_host_ipaddr2(self, *args): - assert utils.misc.get_host_ipaddr() == '4.3.2.1' - - def test_execution_path(self): - assert utils.misc.execution_path('abc').endswith('/abc') - - # XXX this is more of a functional test - def test_btrfs_list_subvolumes(self): - # we either have it installed and it will raise a RuntimeError because - # the path provided does not exist or it is not and it will raise a - # RuntimeError. - with self.assertRaises(RuntimeError): - for submodule in utils.misc.btrfs_list_subvolumes('asd'): - pass - - @mock.patch('utils.misc.subprocess_run') - def test_btrfs_list_subvolumes_with_list(self, mock_run): - mock_run.return_value = ( - ("ID 257 gen 7 top level 5 path btrfs/subvolumes/a60a763cbaaedd3ac" - "2b77bff939019fda876d8a187cb7e85789bb36377accbce\n" - "ID 258 gen 8 top level 5 path btrfs/subvolumes/9212798f648314583" - "9c72f06a6bc2b0e456ca2b9ec14ea70e2948f098ce51077\n" - "ID 278 gen 1908 top level 5 path btrfs/subvolumes/7cd6c219c63e02" - "82ddbd8437c9b2a0220aff40bbfd6734503bcd58e5afa28426\n")) - - assert list( - utils.misc.btrfs_list_subvolumes('asd')) == [ - [ - 'ID', - '257', - 'gen', - '7', - 'top', - 'level', - '5', - 'path', - ("btrfs/subvolumes/a60a763cbaaedd3ac2b77bff939019fda876d8a187c" - "b7e85789bb36377accbce")], - [ - 'ID', - '258', - 'gen', - '8', - 'top', - 'level', - '5', - 'path', - ("btrfs/subvolumes/9212798f6483145839c72f06a6bc2b0e456ca2b9ec1" - "4ea70e2948f098ce51077")], - [ - 'ID', - '278', - 'gen', - '1908', - 'top', - 'level', - '5', - 'path', - ("btrfs/subvolumes/7cd6c219c63e0282ddbd8437c9b2a0220aff40bbfd6" - "734503bcd58e5afa28426")]] diff --git a/tests/unit/test_mtgraphite.py b/tests/unit/test_mtgraphite.py deleted file mode 100644 index a19e54fd..00000000 --- a/tests/unit/test_mtgraphite.py +++ /dev/null @@ -1,164 +0,0 @@ -import unittest - -import mock - -from utils.crawler_exceptions import MTGraphiteInvalidTenant -from utils.mtgraphite import MTGraphiteClient - - -class MockedSocket: - - def settimeout(self, n): - pass - - def write(self, str): - return len(str) - - -class MockedConnection: - - def __init__(self): - print 'init mocked connection' - - def connect(self, *args): - pass - - def getsockname(self): - return ['host'] - - def close(self): - pass - - def write(self, str): - return len(str) - - def read(self, n): - return '1A' * n - - -class MockedConnectionBadPassword: - - def __init__(self): - print 'init mocked connection' - - def connect(self, *args): - pass - - def getsockname(self): - return ['host'] - - def close(self): - pass - - def write(self, str): - return len(str) - - def read(self, n): - return '0A' * n # bad password - - -class MTGraphiteTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) - def test_init(self, *args): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler:password', - batch_send_every_t=1, - batch_send_every_n=10) - assert not mt.conn - assert not mt.socket - assert mt.next_timeout == 1001 - assert mt.host == '2.2.2.2' - assert mt.port == '123' - assert mt.tenant == 'crawler' - assert mt.password == 'password' - args[0].assert_called() - - @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) - def test_init_bad_urls(self, *args): - - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/:password') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2/crawler') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://2.2.2.2/crawler:password') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://:234/crawler:password') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('mtgraphite://') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('http://1.2.3.4:234/crawler:password') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('host.com:234/crawler:password') - with self.assertRaises(ValueError): - mt = MTGraphiteClient('host') - mt = MTGraphiteClient('mtgraphite://host.com:234/crawler:password') - assert mt - - @mock.patch('utils.mtgraphite.time.sleep') - @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) - @mock.patch('utils.mtgraphite.socket.socket', - side_effect=lambda a, b: MockedSocket()) - @mock.patch('utils.mtgraphite.ssl.wrap_socket', - side_effect=lambda s, cert_reqs: MockedConnection()) - def test_send(self, *args): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler:password', - batch_send_every_t=1000, - batch_send_every_n=3) - assert mt.next_timeout == 2000 - - with self.assertRaises(TypeError): - mt.send_messages(1) - - m1 = mt.construct_message('space', 'group', 'cpu', 100, 1) - m2 = mt.construct_message('space', 'group', 'cpu', 100, 2) - - with self.assertRaises(TypeError): - mt.send_messages(m1) - - # we will not send anything yet as send_every_n is 3 - mt.send_messages([m1, m2]) - assert mt.msgset == [m1, m2] - - # now we should send something - m3 = mt.construct_message('space', 'group', 'cpu', 100, 3) - mt.send_messages([m3]) - assert mt.msgset == [] - - mt.close() - assert mt.conn is None - - @mock.patch('utils.mtgraphite.time.sleep') - @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) - @mock.patch('utils.mtgraphite.socket.socket', - side_effect=lambda a, b: MockedSocket()) - @mock.patch('utils.mtgraphite.ssl.wrap_socket', - side_effect=lambda s, cert_reqs: MockedConnectionBadPassword()) - def test_send_bad_password(self, *args): - mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler:password', - batch_send_every_t=1000, - batch_send_every_n=3) - assert mt.next_timeout == 2000 - - m1 = mt.construct_message('space', 'group', 'cpu', 100, 1) - m2 = mt.construct_message('space', 'group', 'cpu', 100, 2) - m3 = mt.construct_message('space', 'group', 'cpu', 100, 3) - - with self.assertRaises(MTGraphiteInvalidTenant): - mt.send_messages([m1, m2, m3]) - - assert mt.msgset == [m1, m2, m3] diff --git a/tests/unit/test_namespace.py b/tests/unit/test_namespace.py deleted file mode 100644 index 5fa4813b..00000000 --- a/tests/unit/test_namespace.py +++ /dev/null @@ -1,255 +0,0 @@ -import Queue -import time -import unittest -from collections import namedtuple - -import mock - -import utils.namespace -from utils import crawler_exceptions - -os_stat = namedtuple( - 'os_stat', - '''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size st_ino''') - - -def throw_os_error(*args, **kvargs): - raise OSError() - - -def fun_add(x=0): - return x + 1 - - -def fun_not_exiting(x=0): - yield 1 - while True: - time.sleep(1) - - -def fun_failed(x=0): - assert False - - -class MockedLibc: - - def __init__(self): - pass - - def setns(self, namespaces, mode): - pass - - def open(self, path, mode): - return 1 - - def close(self, fd): - pass - - def prctl(self, *args): - print args - - -class MockedLibcNoSetns: - - def __init__(self): - pass - - def syscall(self, syscall_num, namespaces, mode): - return 1 - - def open(self, path, mode): - return 1 - - def close(self, fd): - pass - - def prctl(self, *args): - print args - - -class MockedLibcFailedOpen: - - def __init__(self): - pass - - def setns(self, namespaces, mode): - pass - - def open(self, path, mode): - return -1 - - def close(self, fd): - pass - - def prctl(self, *args): - print args - - -class MockedLibcFailedSetns: - - def __init__(self): - pass - - def setns(self, namespaces, mode): - return -1 - - def open(self, path, mode): - return 1 - - def close(self, fd): - pass - - def prctl(self, *args): - print args - - -class MockedLibcFailedClose: - - def __init__(self): - pass - - def setns(self, namespaces, mode): - pass - - def open(self, path, mode): - return 1 - - def close(self, fd): - return -1 - - def prctl(self, *args): - print args - - -class MockedQueue: - - def __init__(self, *args): - pass - - def get(self, timeout=None): - return (123, None) - - def put(self, item): - pass - - def close(self): - pass - - -class MockedQueueGetTimeout: - - def __init__(self, *args): - pass - - def get(self, timeout=None): - if timeout: - raise Queue.Empty() - - def put(self, item): - pass - - def close(self): - pass - - -class NamespaceTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('utils.namespace.os.stat', - side_effect=lambda p: os_stat(1, 2, 3, 4, 5, 6, 7, 8)) - def test_pid_namespace(self, *args): - assert utils.namespace.get_pid_namespace(1) == 8 - - @mock.patch('utils.namespace.os.stat', - side_effect=throw_os_error) - def test_pid_namespace_no_process(self, *args): - assert utils.namespace.get_pid_namespace(1) is None - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibc()) - def test_run_as_another_namespace(self, *args): - assert utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) == 2 - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibcFailedOpen()) - def test_run_as_another_namespace_failed_mnt_open(self, *args): - with self.assertRaises( - crawler_exceptions.NamespaceFailedSetns): - utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibcFailedOpen()) - def test_run_as_another_namespace_failed_non_mnt_open(self, *args): - with self.assertRaises( - crawler_exceptions.NamespaceFailedSetns): - utils.namespace.run_as_another_namespace( - '1', ['pid', 'net'], fun_add, 1) - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibcFailedSetns()) - def test_run_as_another_namespace_failed_setns(self, *args): - with self.assertRaises(crawler_exceptions.NamespaceFailedSetns): - utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibcFailedSetns()) - def test_run_as_another_namespace_failed_non_mnt_setns(self, *args): - with self.assertRaises(crawler_exceptions.NamespaceFailedSetns): - utils.namespace.run_as_another_namespace( - '1', ['pid', 'net'], fun_add, 1) - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibcFailedClose()) - def test_run_as_another_namespace_failed_close(self, *args): - assert utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) == 2 - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibcNoSetns()) - def test_run_as_another_namespace_no_setns(self, *args): - assert utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) == 2 - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibc()) - def test_run_as_another_namespace_failed_fun(self, *args): - with self.assertRaises(AssertionError): - utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_failed, 1) - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibc()) - @mock.patch('utils.namespace.multiprocessing.Queue', - side_effect=MockedQueue) - def test_run_as_another_namespace_with_mocked_queue(self, *args): - assert utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_failed, 1) == 123 - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibc()) - @mock.patch('utils.namespace.multiprocessing.Queue', - side_effect=MockedQueueGetTimeout) - def test_run_as_another_namespace_get_timeout(self, *args): - with self.assertRaises(crawler_exceptions.CrawlTimeoutError): - utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) - - @mock.patch('utils.namespace.get_libc', - side_effect=lambda: MockedLibc()) - @mock.patch('utils.namespace.multiprocessing.Queue', - side_effect=MockedQueue) - def test_run_as_another_namespace_fun_not_exiting_failure(self, *args): - _old_timeout = utils.namespace.IN_PROCESS_TIMEOUT - utils.namespace.IN_PROCESS_TIMEOUT = 0 - with self.assertRaises(crawler_exceptions.CrawlTimeoutError): - utils.namespace.run_as_another_namespace( - '1', utils.namespace.ALL_NAMESPACES, fun_not_exiting, 1) - utils.namespace.IN_PROCESS_TIMEOUT = _old_timeout diff --git a/tests/unit/test_osinfo.py b/tests/unit/test_osinfo.py deleted file mode 100644 index 0eab2bb5..00000000 --- a/tests/unit/test_osinfo.py +++ /dev/null @@ -1,155 +0,0 @@ -import unittest -from unittest import TestCase - -import mock - -from utils.osinfo import (_get_file_name, - parse_lsb_release, - parse_os_release, - parse_redhat_release, - parse_centos_release, - get_osinfo_from_lsb_release, - get_osinfo_from_os_release, - get_osinfo_from_redhat_centos - ) - - -class Test_osinfo(TestCase): - - def test_get_file_name(self): - self.assertEqual(_get_file_name('/', 'xyz'), '/xyz') - self.assertEqual(_get_file_name('/abc/def', 'xyz'), '/abc/def/xyz') - - def test_parse_lsb_release(self): - data = ['DISTRIB_ID=Ubuntu', 'DISTRIB_RELEASE=15.10', - 'DISTRIB_CODENAME=wily' 'DISTRIB_DESCRIPTION="Ubuntu 15.10"'] - result = parse_lsb_release(data) - - self.assertEqual(result['os'], 'ubuntu') - self.assertEqual(result['version'], '15.10') - - def test_parse_os_release(self): - data = ['NAME="Ubuntu"', 'VERSION="14.04.4 LTS, Trusty Tahr"', - 'ID=ubuntu', 'ID_LIKE=debian', - 'PRETTY_NAME="Ubuntu 14.04.4 LTS"', 'VERSION_ID="14.04"', - 'HOME_URL="http://www.ubuntu.com/"', - 'SUPPORT_URL="http://help.ubuntu.com/"', - 'BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"' - ] - result = parse_os_release(data) - self.assertEqual(result['os'], 'ubuntu') - self.assertEqual(result['version'], '14.04') - - def test_alpine_parse_os_release(self): - data = ['NAME="Alpine Linux"', - 'ID=alpine', - 'VERSION_ID=3.4.0', - 'PRETTY_NAME="Alpine Linux v3.4"', - 'HOME_URL="http://alpinelinux.org"', - 'BUG_REPORT_URL="http://bugs.alpinelinux.org"' - ] - - result = parse_os_release(data) - self.assertEqual(result['os'], 'alpine') - self.assertEqual(result['version'], '3.4.0') - - def test_parse_redhat_release(self): - data = ['Red Hat Enterprise Linux Server release 7.2 (Maipo)'] - - result = parse_redhat_release(data) - self.assertEqual(result['os'], 'rhel') - self.assertEqual(result['version'], '7.2') - - def test2_parse_redhat_release(self): - data = ['Red Hat Enterprise Linux Server release 7 (Maipo)'] - - result = parse_redhat_release(data) - self.assertEqual(result['os'], 'rhel') - self.assertEqual(result['version'], '7') - - def test_parse_centos_release(self): - data = ['CentOS release 6.8 (Final)'] - - result = parse_centos_release(data) - self.assertEqual(result['os'], 'centos') - self.assertEqual(result['version'], '6.8') - - def test2_parse_centos_release(self): - data = ['CentOS Linux release 6.8 (Final)'] - - result = parse_centos_release(data) - self.assertEqual(result['os'], 'centos') - self.assertEqual(result['version'], '6.8') - - def test3_parse_centos_release(self): - data = ['CentOS release 6 (Final)'] - - result = parse_centos_release(data) - self.assertEqual(result['os'], 'centos') - self.assertEqual(result['version'], '6') - - def test_get_osinfo_from_lsb_release(self): - data = ['DISTRIB_ID=Ubuntu', 'DISTRIB_RELEASE=15.10', - 'DISTRIB_CODENAME=wily' 'DISTRIB_DESCRIPTION="Ubuntu 15.10"'] - with mock.patch( - '__builtin__.open', mock.mock_open(read_data="\n".join(data)), - create=True) as m: - m.return_value.__iter__.return_value = data - - result = get_osinfo_from_lsb_release() - self.assertEqual(result['os'], 'ubuntu') - self.assertEqual(result['version'], '15.10') - - def test1_get_osinfo_from_lsb_release(self): - with mock.patch( - '__builtin__.open', mock.mock_open(), create=True) as m: - m.side_effect = IOError() - - result = get_osinfo_from_lsb_release() - self.assertFalse(result) - - def test_get_osinfo_from_os_release(self): - data = ['NAME="Ubuntu"', 'VERSION="14.04.4 LTS, Trusty Tahr"', - 'ID=ubuntu', 'ID_LIKE=debian', - 'PRETTY_NAME="Ubuntu 14.04.4 LTS"', 'VERSION_ID="14.04"', - 'HOME_URL="http://www.ubuntu.com/"', - 'SUPPORT_URL="http://help.ubuntu.com/"', - 'BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"' - ] - with mock.patch( - '__builtin__.open', mock.mock_open(read_data="\n".join(data)), - create=True) as m: - m.return_value.__iter__.return_value = data - - result = get_osinfo_from_os_release() - self.assertEqual(result['os'], 'ubuntu') - self.assertEqual(result['version'], '14.04') - - def test1_get_osinfo_from_os_release(self): - with mock.patch( - '__builtin__.open', mock.mock_open(), create=True) as m: - m.side_effect = IOError() - - result = get_osinfo_from_os_release() - self.assertFalse(result) - - def test_get_osinfo_from_redhat_centos(self): - data = ['Red Hat Enterprise Linux Server release 7.2 (Maipo)'] - with mock.patch( - '__builtin__.open', mock.mock_open(read_data="\n".join(data)), - create=True) as m: - m.return_value.__iter__.return_value = data - - result = get_osinfo_from_redhat_centos() - self.assertEqual(result['os'], 'rhel') - self.assertEqual(result['version'], '7.2') - - def mtest1_get_osinfo_from_redhat_centos(self): - with mock.patch( - '__builtin__.open', mock.mock_open(), create=True) as m: - m.side_effect = IOError() - - result = get_osinfo_from_redhat_centos() - self.assertFalse(result) -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/test_package_utils.py b/tests/unit/test_package_utils.py deleted file mode 100644 index 40f4ce84..00000000 --- a/tests/unit/test_package_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -import unittest - -import mock - -from utils import package_utils -from utils.features import PackageFeature - - -def mocked_subprocess_run(cmd, shell=False, ignore_failure=False): - if 'dpkg-query' in cmd: - return ('pkg1|v1|x86|123\n' - 'pkg2|v2|x86|123') - elif '--queryformat' in cmd: - return ('123|pkg1|v1|x86|123\n' - '123|pkg1|v1|x86|123\n') - - -class PackageUtilsTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - @mock.patch('utils.package_utils.subprocess_run', - side_effect=mocked_subprocess_run) - def test_get_dpkg_packages(self, mock_subprocess_run): - pkgs = list(package_utils.get_dpkg_packages()) - print pkgs - assert pkgs == [ - ('pkg1', - PackageFeature( - installed=None, - pkgname='pkg1', - pkgsize='123', - pkgversion='v1', - pkgarchitecture='x86')), - ('pkg2', - PackageFeature( - installed=None, - pkgname='pkg2', - pkgsize='123', - pkgversion='v2', - pkgarchitecture='x86'))] - - @mock.patch('utils.package_utils.subprocess_run', - side_effect=mocked_subprocess_run) - def test_get_rpm_packages(self, mock_subprocess_run): - pkgs = list(package_utils.get_rpm_packages()) - print pkgs - assert pkgs == [ - ('pkg1', - PackageFeature( - installed='123', - pkgname='pkg1', - pkgsize='123', - pkgversion='v1', - pkgarchitecture='x86')), - ('pkg1', - PackageFeature( - installed='123', - pkgname='pkg1', - pkgsize='123', - pkgversion='v1', - pkgarchitecture='x86'))] - - @mock.patch('utils.package_utils.subprocess_run', - side_effect=mocked_subprocess_run) - def test_get_rpm_packages_with_db_reload(self, mock_subprocess_run): - pkgs = list(package_utils.get_rpm_packages(reload_needed=True)) - print pkgs - assert pkgs == [ - ('pkg1', - PackageFeature( - installed='123', - pkgname='pkg1', - pkgsize='123', - pkgversion='v1', - pkgarchitecture='x86')), - ('pkg1', - PackageFeature( - installed='123', - pkgname='pkg1', - pkgsize='123', - pkgversion='v1', - pkgarchitecture='x86'))] diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py deleted file mode 100644 index 224552e9..00000000 --- a/tests/unit/test_plugins.py +++ /dev/null @@ -1,1790 +0,0 @@ -import types -import unittest -from collections import namedtuple - -import os -import sys -import tempfile -from zipfile import ZipFile, ZipInfo - -from utils import jar_utils -sys.path.append('tests/unit/') - -import mock -from plugins.systems.config_container_crawler import ConfigContainerCrawler -from plugins.systems.config_host_crawler import ConfigHostCrawler -from plugins.systems.connection_container_crawler import ConnectionContainerCrawler -from plugins.systems.connection_host_crawler import ConnectionHostCrawler -from plugins.systems.connection_vm_crawler import ConnectionVmCrawler -from plugins.systems.cpu_container_crawler import CpuContainerCrawler -from plugins.systems.cpu_host_crawler import CpuHostCrawler -from plugins.systems.disk_container_crawler import DiskContainerCrawler -from plugins.systems.disk_host_crawler import DiskHostCrawler -from plugins.systems.dockerhistory_container_crawler import DockerhistoryContainerCrawler -from plugins.systems.dockerinspect_container_crawler import DockerinspectContainerCrawler -from plugins.systems.dockerps_host_crawler import DockerpsHostCrawler -from plugins.systems.file_container_crawler import FileContainerCrawler -from plugins.systems.file_host_crawler import FileHostCrawler -from plugins.systems.interface_container_crawler import InterfaceContainerCrawler -from plugins.systems.interface_host_crawler import InterfaceHostCrawler -from plugins.systems.interface_vm_crawler import InterfaceVmCrawler -from plugins.systems.jar_container_crawler import JarContainerCrawler -from plugins.systems.jar_host_crawler import JarHostCrawler -from plugins.systems.load_container_crawler import LoadContainerCrawler -from plugins.systems.load_host_crawler import LoadHostCrawler -from plugins.systems.memory_container_crawler import MemoryContainerCrawler -from plugins.systems.memory_host_crawler import MemoryHostCrawler -from plugins.systems.memory_vm_crawler import MemoryVmCrawler -from plugins.systems.metric_container_crawler import MetricContainerCrawler -from plugins.systems.metric_host_crawler import MetricHostCrawler -from plugins.systems.metric_vm_crawler import MetricVmCrawler -from plugins.systems.os_container_crawler import OSContainerCrawler -from plugins.systems.os_host_crawler import OSHostCrawler -from plugins.systems.os_vm_crawler import os_vm_crawler -from plugins.systems.package_container_crawler import PackageContainerCrawler -from plugins.systems.package_host_crawler import PackageHostCrawler -from plugins.systems.process_container_crawler import ProcessContainerCrawler -from plugins.systems.process_host_crawler import ProcessHostCrawler -from plugins.systems.process_vm_crawler import process_vm_crawler - -from container import Container -from utils.crawler_exceptions import CrawlError -from utils.features import ( - OSFeature, - ConfigFeature, - DiskFeature, - PackageFeature, - MemoryFeature, - CpuFeature, - InterfaceFeature, - LoadFeature, - DockerPSFeature, - JarFeature) - - -# for OUTVM psvmi - - -class DummyContainer(Container): - - def __init__(self, long_id): - self.pid = '1234' - self.long_id = long_id - - def get_memory_cgroup_path(self, node): - return '/cgroup/%s' % node - - def get_cpu_cgroup_path(self, node): - return '/cgroup/%s' % node - -# for OUTVM psvmi -psvmi_sysinfo = namedtuple('psvmi_sysinfo', - '''boottime ipaddr osdistro osname osplatform osrelease - ostype osversion memory_used memory_buffered - memory_cached memory_free''') - -psvmi_memory = namedtuple( - 'psvmi_memory', - 'memory_used memory_buffered memory_cached memory_free') - -psvmi_interface = namedtuple( - 'psvmi_interface', - 'ifname bytes_sent bytes_recv packets_sent packets_recv errout errin') - -os_stat = namedtuple( - 'os_stat', - '''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size''') - - -def mocked_os_walk(root_dir): - files = ['file1', 'file2', 'file3'] - dirs = ['dir'] - yield ('/', dirs, files) - - # simulate the os_walk behavior (if a dir is deleted, we don't walk it) - if '/dir' in dirs: - files = ['file4'] - dirs = [] - yield ('/dir', dirs, files) - - -def mocked_os_walk_for_avoidsetns(root_dir): - files = ['file1', 'file2', 'file3'] - dirs = ['dir'] - yield ('/1/2/3', dirs, files) - - # simulate the os_walk behavior (if a dir is deleted, we don't walk it) - if '/1/2/3/dir' in dirs: - files = ['file4'] - dirs = [] - yield ('/dir', dirs, files) - -# XXX can't do self.count = for some reason -mcount = 0 - - -class MockedMemCgroupFile(mock.Mock): - - def __init__(self): - pass - - def readline(self): - return '2' - - def __iter__(self): - return self - - def next(self): - global mcount - mcount += 1 - if mcount == 1: - return 'total_cache 100' - if mcount == 2: - return 'total_active_file 200' - else: - raise StopIteration() - -# XXX can't do self.count = for some reason -ccount = 0 -ccount2 = 0 - - -class MockedCpuCgroupFile(mock.Mock): - - def __init__(self): - pass - - def readline(self): - global ccount2 - ccount2 += 1 - if ccount2 == 1: - return '1e7' - else: - return '2e7' - - def __iter__(self): - return self - - def next(self): - global ccount - ccount += 1 - if ccount == 1: - return 'system 20' - if ccount == 2: - return 'user 20' - else: - raise StopIteration() - - -class MockedFile(mock.Mock): - - def __init__(self): - pass - - def read(self): - return 'content' - - -def mocked_codecs_open(filename, mode, encoding, errors): - m = mock.Mock() - m.__enter__ = mock.Mock(return_value=MockedFile()) - m.__exit__ = mock.Mock(return_value=False) - return m - - -def mocked_cpu_cgroup_open(filename, mode): - m = mock.Mock() - m.__enter__ = mock.Mock(return_value=MockedCpuCgroupFile()) - m.__exit__ = mock.Mock(return_value=False) - print filename - return m - - -def mocked_memory_cgroup_open(filename, mode): - m = mock.Mock() - m.__enter__ = mock.Mock(return_value=MockedMemCgroupFile()) - m.__exit__ = mock.Mock(return_value=False) - print filename - return m - -partition = namedtuple('partition', 'device fstype mountpoint opts') -pdiskusage = namedtuple('pdiskusage', 'percent total') -meminfo = namedtuple('meminfo', 'rss vms') -ioinfo = namedtuple('ioinfo', 'read_bytes write_bytes') -psutils_memory = namedtuple('psutils_memory', 'used free buffers cached') -psutils_cpu = namedtuple( - 'psutils_cpu', - 'idle nice user iowait system irq steal') -psutils_net = namedtuple( - 'psutils_net', - 'bytes_sent bytes_recv packets_sent packets_recv errout errin') - - -def mocked_disk_partitions(all): - return [partition('/dev/a', 'type', '/a', 'opts'), - partition('/dev/b', 'type', '/b', 'opts')] - - -class Connection(): - - def __init__(self): - self.laddr = ['1.1.1.1', '22'] - self.raddr = ['2.2.2.2', '22'] - self.status = 'Established' - - -class Process(): - - def __init__(self, name): - self.name = name - self.cmdline = ['cmd'] - self.pid = 123 - self.status = 'Running' - self.cwd = '/bin' - self.ppid = 1 - self.create_time = 1000 - - def num_threads(self): - return 1 - - def username(self): - return 'don quijote' - - def get_open_files(self): - return [] - - def get_connections(self): - return [Connection()] - - def get_memory_info(self): - return meminfo(10, 20) - - def get_io_counters(self): - return ioinfo(10, 20) - - def get_cpu_percent(self, interval): - return 30 - - def get_memory_percent(self): - return 30 - -STAT_DIR_MODE = 16749 - - -def mocked_os_lstat(path): - print path - if path == '/': - return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7) - elif path == '/file1': - return os_stat(1, 2, 3, 4, 5, 6, 7) - elif path == '/file2': - return os_stat(1, 2, 3, 4, 5, 6, 7) - elif path == '/file3': - return os_stat(1, 2, 3, 4, 5, 6, 7) - elif path == '/dir': - return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7) - else: - return os_stat(1, 2, 3, 4, 5, 6, 7) - - -def mocked_run_as_another_namespace(pid, ns, function, *args, **kwargs): - result = function(*args) - # if res is a generator (i.e. function uses yield) - if isinstance(result, types.GeneratorType): - result = list(result) - return result - - -def throw_os_error(*args, **kvargs): - raise OSError() - - -class PluginTests(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_init(self, *args): - pass - - @mock.patch('utils.os_utils.time.time', - side_effect=lambda: 1001) - @mock.patch('utils.os_utils.platform.platform', - side_effect=lambda: 'platform') - @mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses', - side_effect=lambda: ['1.1.1.1']) - @mock.patch('utils.os_utils.psutil.boot_time', - side_effect=lambda: 1000) - @mock.patch('utils.os_utils.platform.system', - side_effect=lambda: 'linux') - @mock.patch('utils.os_utils.platform.machine', - side_effect=lambda: 'machine') - @mock.patch( - 'utils.os_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'os', - 'version': 'os_version'}) - def test_os_host_cawler_plugin(self, *args): - fc = OSHostCrawler() - for os in fc.crawl(): - print os - assert os == ( - 'linux', - OSFeature( - boottime=1000, - uptime=1, - ipaddr=['1.1.1.1'], - os='os', - os_version='os_version', - os_kernel='platform', - architecture='machine'), - 'os') - - for i, arg in enumerate(args): - if i > 0: # time.time is called more than once - continue - assert arg.call_count == 1 - - @mock.patch('utils.os_utils.platform.system', - side_effect=throw_os_error) - def test_os_host_crawler_plugin_failure(self, *args): - fc = OSHostCrawler() - with self.assertRaises(OSError): - for os in fc.crawl(): - pass - - @mock.patch( - 'utils.os_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'os', - 'version': 'os_version'}) - def test_os_host_crawler_plugin_mountpoint_mode(self, *args): - fc = OSHostCrawler() - for os in fc.crawl(root_dir='/a'): - print os - assert os == ( - 'linux', - OSFeature( - boottime='unsupported', - uptime='unsupported', - ipaddr='0.0.0.0', - os='os', - os_version='os_version', - os_kernel='unknown', - architecture='unknown'), - 'os') - for i, arg in enumerate(args): - assert arg.call_count == 1 - - @mock.patch('utils.os_utils.osinfo.get_osinfo', - side_effect=throw_os_error) - def test_os_host_crawler_plugin_mountpoint_mode_failure(self, *args): - fc = OSHostCrawler() - with self.assertRaises(OSError): - for os in fc.crawl(root_dir='/a'): - pass - - @mock.patch('utils.os_utils.time.time', - side_effect=lambda: 1001) - @mock.patch('utils.os_utils.platform.platform', - side_effect=lambda: 'platform') - @mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses', - side_effect=lambda: ['1.1.1.1']) - @mock.patch('utils.os_utils.psutil.boot_time', - side_effect=lambda: 1000) - @mock.patch('utils.os_utils.platform.system', - side_effect=lambda: 'linux') - @mock.patch('utils.os_utils.platform.machine', - side_effect=lambda: 'machine') - @mock.patch( - ("plugins.systems.os_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.os_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'utils.os_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'os', - 'version': 'os_version'}) - def test_os_container_crawler_plugin(self, *args): - fc = OSContainerCrawler() - for os in fc.crawl(container_id=123): - print os - assert os == ( - 'linux', - OSFeature( - boottime=1000, - uptime=1, - ipaddr=['1.1.1.1'], - os='os', - os_version='os_version', - os_kernel='platform', - architecture='machine'), - 'os') - for i, arg in enumerate(args): - if i > 0: # time.time is called more than once - continue - assert arg.call_count == 1 - - @mock.patch( - ("plugins.systems.os_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.os_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/a/b/c') - @mock.patch( - 'utils.os_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'os', - 'version': 'os_version'}) - def test_os_container_crawler_plugin_avoidsetns(self, *args): - fc = OSContainerCrawler() - for os in fc.crawl(container_id=123, avoid_setns=True): - print os - assert os == ( - 'linux', - OSFeature( - boottime='unsupported', - uptime='unsupported', - ipaddr='0.0.0.0', - os='os', - os_version='os_version', - os_kernel='unknown', - architecture='unknown'), - 'os') - for i, arg in enumerate(args): - print i, arg - if i == 0: - # get_osinfo() - assert arg.call_count == 1 - arg.assert_called_with(mount_point='/a/b/c') - elif i == 1: - # get_docker_container_rootfs_path - assert arg.call_count == 1 - arg.assert_called_with(123) - else: - # exec_dockerinspect - assert arg.call_count == 1 - arg.assert_called_with(123) - - @mock.patch( - ("plugins.systems.os_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.os_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=throw_os_error) - def test_os_container_crawler_plugin_avoidsetns_failure(self, *args): - fc = OSContainerCrawler() - with self.assertRaises(OSError): - for os in fc.crawl(container_id=123, avoid_setns=True): - pass - - @mock.patch('plugins.systems.os_vm_crawler.psvmi.context_init', - side_effect=lambda dn1, dn2, kv, d, a: 1000) - @mock.patch('plugins.systems.os_vm_crawler.psvmi.system_info', - side_effect=lambda vmc: psvmi_sysinfo(1000, - '1.1.1.1', - 'osdistro', - 'osname', - 'osplatform', - 'osrelease', - 'ostype', - 'osversion', - 1000000, - 100000, - 100000, - 100000)) - @mock.patch('plugins.systems.os_vm_crawler.psvmi') - def test_os_vm_crawler_plugin_without_vm(self, *args): - fc = os_vm_crawler() - for os in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - assert os == ( - 'ostype', - OSFeature( - boottime=1000, - uptime='unknown', - ipaddr='1.1.1.1', - os='ostype', - os_version='osversion', - os_kernel='osrelease', - architecture='osplatform'), - 'os') - pass - assert args[1].call_count == 1 - - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=mocked_os_walk) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_host_crawler(self, *args): - fc = FileHostCrawler() - for (k, f, fname) in fc.crawl(): - print f - assert fname == "file" - assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 - assert f.atime == 4 and f.ctime == 5 - assert f.mtime == 6 and f.size == 7 - assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4'] - assert f.path in ['/', '/file1', '/file2', '/file3', - '/dir', '/dir/file4'] - assert f.type in ['file', 'dir'] - assert f.linksto is None - assert args[0].call_count == 6 - assert args[1].call_count == 1 # oswalk - args[1].assert_called_with('/') - assert args[2].call_count == 2 # isdir - args[2].assert_called_with('/') - - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=mocked_os_walk) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_host_crawler_with_exclude_dirs(self, *args): - fc = FileHostCrawler() - for (k, f, fname) in fc.crawl(exclude_dirs=['dir']): - print f - assert fname == "file" - assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 - assert f.atime == 4 and f.ctime == 5 - assert f.mtime == 6 and f.size == 7 - assert f.name in ['', 'file1', 'file2', 'file3', 'file4'] - assert f.path in ['/', '/file1', '/file2', '/file3'] - assert f.path not in ['/dir', '/dir/file4'] - assert f.type in ['file', 'dir'] - assert f.linksto is None - assert args[0].call_count == 4 - assert args[1].call_count == 1 # oswalk - args[1].assert_called_with('/') - assert args[2].call_count == 2 # isdir - args[2].assert_called_with('/') - - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=throw_os_error) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_host_crawler_failure(self, *args): - fc = FileHostCrawler() - with self.assertRaises(OSError): - for (k, f, fname) in fc.crawl(root_dir='/a/b/c'): - pass - - @mock.patch( - ("plugins.systems.file_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.file_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=mocked_os_walk) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_container_crawler(self, *args): - fc = FileContainerCrawler() - for (k, f, fname) in fc.crawl(root_dir='/'): - assert fname == "file" - assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 - assert f.atime == 4 and f.ctime == 5 - assert f.mtime == 6 and f.size == 7 - assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4'] - assert f.path in ['/', '/file1', '/file2', '/file3', - '/dir', '/dir/file4'] - assert f.type in ['file', 'dir'] - assert f.linksto is None - assert args[0].call_count == 6 - assert args[1].call_count == 1 # oswalk - args[1].assert_called_with('/') - assert args[2].call_count == 2 # isdir - args[2].assert_called_with('/') - - @mock.patch( - ("plugins.systems.jar_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.jar_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - def test_jar_container_crawler_plugin(self, *args): - tmpdir = tempfile.mkdtemp() - jar_file_name = 'myfile.jar' - - # Ensure the file is read/write by the creator only - saved_umask = os.umask(0077) - - path = os.path.join(tmpdir, jar_file_name) - try: - with ZipFile(path, "w") as myjar: - myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") - myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") - myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") - - fc = JarContainerCrawler() - jars = list(fc.crawl(root_dir=tmpdir)) - #jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir)) - print jars - jar_feature = jars[0][1] - assert 'myfile.jar' == jar_feature.name - assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash - assert ['ddc6eff37020aa858e26b1ba8a49ee0e', - 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes - assert 'jar' == jars[0][2] - - except IOError as e: - print 'IOError' - finally: - os.remove(path) - - - @mock.patch( - ("plugins.systems.jar_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.jar_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/tmp') - def test_jar_container_crawler_avoidsetns(self, *args): - tmpdir = tempfile.mkdtemp() - jar_file_name = 'myfile.jar' - - # Ensure the file is read/write by the creator only - saved_umask = os.umask(0077) - - path = os.path.join(tmpdir, jar_file_name) - try: - with ZipFile(path, "w") as myjar: - myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") - myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") - myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") - - fc = JarContainerCrawler() - jars = list(fc.crawl(root_dir=os.path.basename(tmpdir), avoid_setns=True)) - print jars - jar_feature = jars[0][1] - assert 'myfile.jar' == jar_feature.name - assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash - assert ['ddc6eff37020aa858e26b1ba8a49ee0e', - 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes - assert 'jar' == jars[0][2] - - except IOError as e: - print 'IOError' - finally: - os.remove(path) - - @mock.patch( - ("plugins.systems.file_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch('utils.file_utils.os.walk', - side_effect=throw_os_error) - @mock.patch( - ("plugins.systems.file_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_container_crawler_failure(self, *args): - fc = FileContainerCrawler() - with self.assertRaises(OSError): - for (k, f, fname) in fc.crawl(root_dir='/a/b/c'): - pass - - @mock.patch( - ("plugins.systems.file_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.file_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/1/2/3') - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=mocked_os_walk_for_avoidsetns) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_container_crawler_avoidsetns(self, *args): - fc = FileContainerCrawler() - for (k, f, fname) in fc.crawl(root_dir='/', avoid_setns=True): - print f - assert fname == "file" - assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 - assert f.atime == 4 and f.ctime == 5 - assert f.mtime == 6 and f.size == 7 - assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4'] - assert f.path in ['/', '/file1', '/file2', '/file3', - '/dir', '/dir/file4'] - assert f.type in ['file', 'dir'] - assert f.linksto is None - assert args[0].call_count == 6 - assert args[1].call_count == 1 # oswalk - args[1].assert_called_with('/1/2/3') - assert args[2].call_count == 2 # isdir - args[2].assert_called_with('/1/2/3') - - @mock.patch( - ("plugins.systems.file_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.file_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=mocked_os_walk) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_container_crawler_with_exclude_dirs(self, *args): - fc = FileContainerCrawler() - for (k, f, fname) in fc.crawl(root_dir='/', - exclude_dirs=['dir']): - assert fname == "file" - assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 - assert f.atime == 4 and f.ctime == 5 - assert f.mtime == 6 and f.size == 7 - assert f.name in ['', 'file1', 'file2', 'file3', 'file4'] - assert f.path in ['/', '/file1', '/file2', '/file3'] - assert f.path not in ['/dir', '/dir/file4'] - assert f.type in ['file', 'dir'] - assert f.linksto is None - assert args[0].call_count == 4 - assert args[1].call_count == 1 # oswalk - args[1].assert_called_with('/') - assert args[2].call_count == 2 # isdir - args[2].assert_called_with('/') - - @mock.patch( - ("plugins.systems.file_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.file_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/1/2/3') - @mock.patch('utils.file_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.file_utils.os.walk', - side_effect=mocked_os_walk_for_avoidsetns) - @mock.patch('utils.file_utils.os.lstat', - side_effect=mocked_os_lstat) - def test_file_container_crawler_avoidsetns_with_exclude_dirs( - self, - * - args): - fc = FileContainerCrawler() - for (k, f, fname) in fc.crawl(root_dir='/', - avoid_setns=True, - exclude_dirs=['/dir']): - assert fname == "file" - assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 - assert f.atime == 4 and f.ctime == 5 - assert f.mtime == 6 and f.size == 7 - assert f.name in ['', 'file1', 'file2', 'file3', 'file4'] - assert f.path in ['/', '/file1', '/file2', '/file3'] - assert f.path not in ['/dir', '/dir/file4'] - assert f.type in ['file', 'dir'] - assert f.linksto is None - assert args[0].call_count == 4 - assert args[1].call_count == 1 # oswalk - args[1].assert_called_with('/1/2/3') - assert args[2].call_count == 2 # isdir - args[2].assert_called_with('/1/2/3') - - @mock.patch('utils.config_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.lstat', - side_effect=mocked_os_lstat) - @mock.patch('utils.config_utils.codecs.open', - side_effect=mocked_codecs_open) - def test_config_host_crawler(self, *args): - fc = ConfigHostCrawler() - for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'], - discover_config_files=False): - assert fname == "config" - assert f == ConfigFeature(name='file1', content='content', - path='/etc/file1') - assert args[0].call_count == 1 # lstat - - @mock.patch('utils.config_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.walk', - side_effect=lambda p: [ - ('/', [], ['file1', 'file2', 'file3.conf'])]) - @mock.patch('utils.config_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.isfile', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.getsize', - side_effect=lambda p: 1000) - @mock.patch('utils.config_utils.os.lstat', - side_effect=mocked_os_lstat) - @mock.patch('utils.config_utils.codecs.open', - side_effect=mocked_codecs_open) - def test_config_host_crawler_with_discover(self, *args): - fc = ConfigHostCrawler() - - configs = fc.crawl(known_config_files=['/etc/file1'], - discover_config_files=True) - print configs - assert set(configs) == set([('/file3.conf', - ConfigFeature(name='file3.conf', - content='content', - path='/file3.conf'), - 'config'), - ('/etc/file1', - ConfigFeature(name='file1', - content='content', - path='/etc/file1'), - 'config')]) - - @mock.patch( - ("plugins.systems.config_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'plugins.systems.config_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.config_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.lstat', - side_effect=mocked_os_lstat) - @mock.patch('utils.config_utils.codecs.open', - side_effect=mocked_codecs_open) - def test_config_container_crawler(self, *args): - fc = ConfigContainerCrawler() - for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'], - discover_config_files=False): - assert fname == "config" - assert f == ConfigFeature(name='file1', content='content', - path='/etc/file1') - assert args[0].call_count == 1 # codecs open - - @mock.patch('utils.config_utils.codecs.open', - side_effect=mocked_codecs_open) - @mock.patch('utils.config_utils.os.lstat', - side_effect=mocked_os_lstat) - @mock.patch( - ("plugins.systems.config_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'plugins.systems.config_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.config_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.walk', - side_effect=lambda p: [ - ('/', [], ['file1', 'file2', 'file3.conf'])]) - @mock.patch('utils.config_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.isfile', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.getsize', - side_effect=lambda p: 1000) - def test_config_container_crawler_discover(self, *args): - fc = ConfigContainerCrawler() - - configs = fc.crawl(known_config_files=['/etc/file1'], - discover_config_files=True) - assert set(configs) == set([('/file3.conf', - ConfigFeature(name='file3.conf', - content='content', - path='/file3.conf'), - 'config'), - ('/etc/file1', - ConfigFeature(name='file1', - content='content', - path='/etc/file1'), - 'config')]) - - @mock.patch( - ("plugins.systems.config_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.config_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.config_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/1/2/3') - @mock.patch('utils.config_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.lstat', - side_effect=mocked_os_lstat) - @mock.patch('utils.config_utils.codecs.open', - side_effect=mocked_codecs_open) - def test_config_container_crawler_avoidsetns(self, *args): - fc = ConfigContainerCrawler() - for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'], - discover_config_files=False, - avoid_setns=True): - assert fname == "config" - assert f == ConfigFeature(name='file1', content='content', - path='/etc/file1') - assert args[0].call_count == 1 # lstat - - @mock.patch( - ("plugins.systems.config_container_crawler." - "run_as_another_namespace"), - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.config_container_crawler." - "utils.dockerutils.exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.config_container_crawler.utils.dockerutils." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/1/2/3') - @mock.patch('utils.config_utils.os.path.isdir', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.walk', - side_effect=lambda p: [ - ('/', [], ['file1', 'file2', 'file3.conf'])]) - @mock.patch('utils.config_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.isfile', - side_effect=lambda p: True) - @mock.patch('utils.config_utils.os.path.getsize', - side_effect=lambda p: 1000) - @mock.patch('utils.config_utils.os.lstat', - side_effect=mocked_os_lstat) - @mock.patch('utils.config_utils.codecs.open', - side_effect=mocked_codecs_open) - def test_config_container_crawler_avoidsetns_discover(self, *args): - fc = ConfigContainerCrawler() - configs = fc.crawl(known_config_files=['/etc/file1'], - avoid_setns=True, - discover_config_files=True) - assert set(configs) == set([('/file3.conf', - ConfigFeature(name='file3.conf', - content='content', - path='/file3.conf'), - 'config'), - ('/etc/file1', - ConfigFeature(name='file1', - content='content', - path='/etc/file1'), - 'config')]) - - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'ubuntu', - 'version': '123'}) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.package_utils.get_dpkg_packages', - side_effect=lambda a, b, c: [('pkg1', - PackageFeature(None, 'pkg1', - 123, 'v1', - 'x86'))]) - def test_package_host_crawler_dpkg(self, *args): - fc = PackageHostCrawler() - for (k, f, fname) in fc.crawl(): - assert fname == "package" - assert f == PackageFeature( - installed=None, - pkgname='pkg1', - pkgsize=123, - pkgversion='v1', - pkgarchitecture='x86') - assert args[0].call_count == 1 - args[0].assert_called_with('/', 'var/lib/dpkg', 0) - - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'ubuntu', - 'version': '123'}) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.package_utils.get_dpkg_packages', - side_effect=throw_os_error) - def test_package_host_crawler_dpkg_failure(self, *args): - fc = PackageHostCrawler() - with self.assertRaises(CrawlError): - for (k, f, fname) in fc.crawl(): - pass - assert args[0].call_count == 1 - args[0].assert_called_with('/', 'var/lib/dpkg', 0) - - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'redhat', - 'version': '123'}) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.package_utils.get_rpm_packages', - side_effect=lambda a, b, c, d: [('pkg1', - PackageFeature(None, 'pkg1', - 123, 'v1', - 'x86'))]) - def test_package_host_crawler_rpm(self, *args): - fc = PackageHostCrawler() - for (k, f, fname) in fc.crawl(): - assert fname == "package" - assert f == PackageFeature( - installed=None, - pkgname='pkg1', - pkgsize=123, - pkgversion='v1', - pkgarchitecture='x86') - assert args[0].call_count == 1 - args[0].assert_called_with('/', 'var/lib/rpm', 0, False) - - @mock.patch( - ("plugins.systems.package_container_crawler." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'ubuntu', - 'version': '123'}) - @mock.patch( - 'plugins.systems.package_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.package_utils.get_dpkg_packages', - side_effect=lambda a, b, c: [('pkg1', - PackageFeature(None, 'pkg1', - 123, 'v1', - 'x86'))]) - def test_package_container_crawler_dpkg(self, *args): - fc = PackageContainerCrawler() - for (k, f, fname) in fc.crawl(): - assert fname == "package" - assert f == PackageFeature( - installed=None, - pkgname='pkg1', - pkgsize=123, - pkgversion='v1', - pkgarchitecture='x86') - assert args[0].call_count == 1 - args[0].assert_called_with('/', 'var/lib/dpkg', 0) - - @mock.patch( - ("plugins.systems.package_container_crawler." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'plugins.systems.package_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.package_container_crawler." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/a/b/c') - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'ubuntu', - 'version': '123'}) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True if 'dpkg' in p else False) - @mock.patch('utils.package_utils.get_dpkg_packages', - side_effect=throw_os_error) - def test_package_container_crawler_dpkg_failure(self, *args): - fc = PackageContainerCrawler() - with self.assertRaises(CrawlError): - for (k, f, fname) in fc.crawl(): - pass - # get_dpkg_packages is called a second time after the first failure. - # first time is OUTCONTAINER mode with setns - # second time is OUTCONTAINER mode with avoid_setns - assert args[0].call_count == 2 - args[0].assert_called_with('/a/b/c', 'var/lib/dpkg', 0) - args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo() - - @mock.patch( - ("plugins.systems.package_container_crawler." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'plugins.systems.package_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.package_container_crawler." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/a/b/c') - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'redhat', - 'version': '123'}) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True if 'rpm' in p else False) - @mock.patch('utils.package_utils.get_rpm_packages', - side_effect=throw_os_error) - def test_package_container_crawler_rpm_failure(self, *args): - fc = PackageContainerCrawler() - with self.assertRaises(CrawlError): - for (k, f, fname) in fc.crawl(): - pass - # get_dpkg_packages is called a second time after the first failure. - # first time is OUTCONTAINER mode with setns - # second time is OUTCONTAINER mode with avoid_setns - assert args[0].call_count == 2 - args[0].assert_called_with('/a/b/c', 'var/lib/rpm', 0, True) - args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo() - - @mock.patch( - ("plugins.systems.package_container_crawler." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - ("plugins.systems.package_container_crawler." - "get_docker_container_rootfs_path"), - side_effect=lambda long_id: '/a/b/c') - @mock.patch( - 'utils.package_utils.osinfo.get_osinfo', - side_effect=lambda mount_point=None: { - 'os': 'ubuntu', - 'version': '123'}) - @mock.patch('utils.package_utils.os.path.exists', - side_effect=lambda p: True) - @mock.patch('utils.package_utils.get_dpkg_packages', - side_effect=lambda a, b, c: [('pkg1', - PackageFeature(None, 'pkg1', - 123, 'v1', - 'x86'))]) - def test_package_container_crawler_avoidsetns(self, *args): - fc = PackageContainerCrawler() - for (k, f, fname) in fc.crawl(avoid_setns=True): - assert fname == "package" - assert f == PackageFeature( - installed=None, - pkgname='pkg1', - pkgsize=123, - pkgversion='v1', - pkgarchitecture='x86') - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.process_host_crawler.psutil.process_iter', - side_effect=lambda: [Process('init')]) - def test_process_host_crawler(self, *args): - fc = ProcessHostCrawler() - for (k, f, fname) in fc.crawl(): - print f - assert fname == "process" - assert f.pname == 'init' - assert f.cmd == 'cmd' - assert f.pid == 123 - assert args[0].call_count == 1 - - @mock.patch( - ("plugins.systems.process_container_crawler.utils.dockerutils." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - @mock.patch( - 'plugins.systems.process_container_crawler.psutil.process_iter', - side_effect=lambda: [Process('init')]) - @mock.patch( - 'plugins.systems.process_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - def test_process_container_crawler(self, *args): - fc = ProcessContainerCrawler() - for (k, f, fname) in fc.crawl('123'): - print f - assert fname == "process" - assert f.pname == 'init' - assert f.cmd == 'cmd' - assert f.pid == 123 - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.process_vm_crawler.psvmi.context_init', - side_effect=lambda dn1, dn2, kv, d, a: 1000) - @mock.patch('plugins.systems.process_vm_crawler.psvmi.process_iter', - side_effect=lambda vmc: [Process('init')]) - @mock.patch('plugins.systems.process_vm_crawler.psvmi') - def test_process_vm_crawler(self, *args): - fc = process_vm_crawler() - for (k, f, fname) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - print f - assert fname == "process" - assert f.pname == 'init' - assert f.cmd == 'cmd' - assert f.pid == 123 - assert args[1].call_count == 1 # process_iter - - @mock.patch('utils.disk_utils.psutil.disk_partitions', - side_effect=mocked_disk_partitions) - @mock.patch('utils.disk_utils.psutil.disk_usage', - side_effect=lambda x: pdiskusage(10, 100)) - def test_crawl_disk_partitions_invm_mode(self, *args): - fc = DiskHostCrawler() - disks = fc.crawl() - assert set(disks) == set([('/a', - DiskFeature(partitionname='/dev/a', - freepct=90.0, - fstype='type', - mountpt='/a', - mountopts='opts', - partitionsize=100), - 'disk'), - ('/b', - DiskFeature(partitionname='/dev/b', - freepct=90.0, - fstype='type', - mountpt='/b', - mountopts='opts', - partitionsize=100), - 'disk')]) - - @mock.patch( - 'plugins.systems.disk_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch('utils.disk_utils.psutil.disk_partitions', - side_effect=mocked_disk_partitions) - @mock.patch('utils.disk_utils.psutil.disk_usage', - side_effect=lambda x: pdiskusage(10, 100)) - @mock.patch( - ("plugins.systems.disk_container_crawler.utils.dockerutils." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - def test_crawl_disk_partitions_outcontainer_mode(self, *args): - fc = DiskContainerCrawler() - disks = fc.crawl('123') - assert set(disks) == set([('/a', - DiskFeature(partitionname='/dev/a', - freepct=90.0, - fstype='type', - mountpt='/a', - mountopts='opts', - partitionsize=100), - 'disk'), - ('/b', - DiskFeature(partitionname='/dev/b', - freepct=90.0, - fstype='type', - mountpt='/b', - mountopts='opts', - partitionsize=100), - 'disk')]) - - @mock.patch('utils.metric_utils.psutil.process_iter', - side_effect=lambda: [Process('init')]) - def test_crawl_metrics_invm_mode(self, *args): - fc = MetricHostCrawler() - for (k, f, t) in fc.crawl(): - assert f.cpupct == 30.0 - assert f.mempct == 30.0 - assert f.pname == 'init' - assert f.pid == 123 - assert f.rss == 10 - assert f.status == 'Running' - assert f.vms == 20 - assert f.read == 10 - assert f.write == 20 - assert args[0].call_count == 1 - - @mock.patch('utils.metric_utils.psutil.process_iter', - side_effect=lambda: [Process('init')]) - @mock.patch('utils.metric_utils.round', - side_effect=throw_os_error) - def test_crawl_metrics_invm_mode_failure(self, *args): - with self.assertRaises(OSError): - fc = MetricHostCrawler() - for ff in fc.crawl(): - pass - assert args[0].call_count == 1 - - @mock.patch('utils.metric_utils.psutil.process_iter', - side_effect=lambda: [Process('init')]) - @mock.patch( - 'plugins.systems.metric_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.disk_container_crawler.utils.dockerutils." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - def test_crawl_metrics_outcontainer_mode(self, *args): - fc = MetricContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f.cpupct == 30.0 - assert f.mempct == 30.0 - assert f.pname == 'init' - assert f.pid == 123 - assert f.rss == 10 - assert f.status == 'Running' - assert f.vms == 20 - assert f.read == 10 - assert f.write == 20 - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.metric_vm_crawler.psvmi.context_init', - side_effect=lambda dn1, dn2, kv, d, a: 1000) - @mock.patch('plugins.systems.metric_vm_crawler.psvmi.process_iter', - side_effect=lambda vmc: [Process('init')]) - @mock.patch( - ("plugins.systems.metric_vm_crawler." - "MetricVmCrawler._crawl_metrics_cpu_percent"), - side_effect=lambda proc: 30.0) - @mock.patch('plugins.systems.metric_vm_crawler.psvmi') - def test_crawl_metrics_vm_mode(self, *args): - fc = MetricVmCrawler() - for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - assert f.cpupct == 30.0 - assert f.mempct == 30.0 - assert f.pname == 'init' - assert f.pid == 123 - assert f.rss == 10 - assert f.status == 'Running' - assert f.vms == 20 - assert f.read == 10 - assert f.write == 20 - assert args[1].call_count == 1 # process_iter - - @mock.patch('utils.connection_utils.psutil.process_iter', - side_effect=lambda: [Process('init')]) - def test_crawl_connections_invm_mode(self, *args): - fc = ConnectionHostCrawler() - for (k, f, t) in fc.crawl(): - assert f.localipaddr == '1.1.1.1' - assert f.remoteipaddr == '2.2.2.2' - assert f.localport == '22' - assert f.remoteport == '22' - assert args[0].call_count == 1 - - @mock.patch('utils.connection_utils.psutil.process_iter', - side_effect=lambda: [Process('init')]) - @mock.patch( - 'plugins.systems.connection_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch( - ("plugins.systems.connection_container_crawler.utils.dockerutils." - "exec_dockerinspect"), - side_effect=lambda long_id: {'State': {'Pid': 123}}) - def test_crawl_connections_outcontainer_mode(self, *args): - fc = ConnectionContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f.localipaddr == '1.1.1.1' - assert f.remoteipaddr == '2.2.2.2' - assert f.localport == '22' - assert f.remoteport == '22' - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.connection_vm_crawler.psvmi.context_init', - side_effect=lambda dn1, dn2, kv, d, a: 1000) - @mock.patch('plugins.systems.connection_vm_crawler.psvmi.process_iter', - side_effect=lambda vmc: [Process('init')]) - @mock.patch('plugins.systems.connection_vm_crawler.psvmi') - def test_crawl_connections_outvm_mode(self, *args): - fc = ConnectionVmCrawler() - for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - assert f.localipaddr == '1.1.1.1' - assert f.remoteipaddr == '2.2.2.2' - assert f.localport == '22' - assert f.remoteport == '22' - assert args[1].call_count == 1 - - @mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory', - side_effect=lambda: psutils_memory(2, 2, 3, 4)) - def test_crawl_memory_invm_mode(self, *args): - fc = MemoryHostCrawler() - for (k, f, t) in fc.crawl(): - assert f == MemoryFeature( - memory_used=2, - memory_buffered=3, - memory_cached=4, - memory_free=2, - memory_util_percentage=50) - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory', - side_effect=throw_os_error) - def test_crawl_memory_invm_mode_failure(self, *args): - fc = MemoryHostCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl(): - pass - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.memory_vm_crawler.psvmi.context_init', - side_effect=lambda dn1, dn2, kv, d, a: 1000) - @mock.patch('plugins.systems.memory_vm_crawler.psvmi.system_memory_info', - side_effect=lambda vmc: psvmi_memory(10, 20, 30, 40)) - @mock.patch('plugins.systems.memory_vm_crawler.psvmi') - def test_crawl_memory_outvm_mode(self, *args): - fc = MemoryVmCrawler() - for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - assert f == MemoryFeature( - memory_used=10, - memory_buffered=20, - memory_cached=30, - memory_free=40, - memory_util_percentage=20) - assert args[1].call_count == 1 - - @mock.patch( - 'plugins.systems.memory_container_crawler.psutil.virtual_memory', - side_effect=lambda: psutils_memory( - 10, - 10, - 3, - 10)) - @mock.patch('plugins.systems.memory_container_crawler.open', - side_effect=mocked_memory_cgroup_open) - @mock.patch('plugins.systems.memory_container_crawler.DockerContainer', - side_effect=lambda container_id: DummyContainer(container_id)) - def test_crawl_memory_outcontainer_mode(self, *args): - fc = MemoryContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f == MemoryFeature( - memory_used=2, - memory_buffered=200, - memory_cached=100, - memory_free=0, - memory_util_percentage=100) - assert args[1].call_count == 3 # 3 cgroup files - - @mock.patch( - 'plugins.systems.memory_container_crawler.psutil.virtual_memory', - side_effect=lambda: psutils_memory( - 10, - 10, - 3, - 10)) - @mock.patch('plugins.systems.memory_container_crawler.open', - side_effect=throw_os_error) - @mock.patch('plugins.systems.memory_container_crawler.DockerContainer', - side_effect=lambda container_id: DummyContainer(container_id)) - def test_crawl_memory_outcontainer_mode_failure(self, *args): - fc = MemoryContainerCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl('123'): - pass - assert args[1].call_count == 1 # 1 cgroup files - - @mock.patch( - 'plugins.systems.cpu_host_crawler.psutil.cpu_times_percent', - side_effect=lambda percpu: [ - psutils_cpu( - 10, - 20, - 30, - 40, - 50, - 60, - 70)]) - def test_crawl_cpu_invm_mode(self, *args): - fc = CpuHostCrawler() - for (k, f, t) in fc.crawl(): - assert f == CpuFeature( - cpu_idle=10, - cpu_nice=20, - cpu_user=30, - cpu_wait=40, - cpu_system=50, - cpu_interrupt=60, - cpu_steal=70, - cpu_util=90) - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.cpu_host_crawler.psutil.cpu_times_percent', - side_effect=throw_os_error) - def test_crawl_cpu_invm_mode_failure(self, *args): - fc = CpuHostCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl(): - pass - assert args[0].call_count == 1 - - @mock.patch( - 'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent', - side_effect=lambda percpu: [ - psutils_cpu( - 10, - 20, - 30, - 40, - 50, - 60, - 70)]) - @mock.patch('plugins.systems.cpu_container_crawler.time.sleep') - @mock.patch('plugins.systems.cpu_container_crawler.open', - side_effect=mocked_cpu_cgroup_open) - @mock.patch('plugins.systems.cpu_container_crawler.DockerContainer', - side_effect=lambda container_id: DummyContainer(container_id)) - def test_crawl_cpu_outcontainer_mode(self, *args): - fc = CpuContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f == CpuFeature( - cpu_idle=90.0, - cpu_nice=20, - cpu_user=5.0, - cpu_wait=40, - cpu_system=5.0, - cpu_interrupt=60, - cpu_steal=70, - cpu_util=10.0) - assert args[1].call_count == 3 # open for 3 cgroup files - - @mock.patch( - 'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent', - side_effect=lambda percpu: [ - psutils_cpu( - 10, - 20, - 30, - 40, - 50, - 60, - 70)]) - @mock.patch('plugins.systems.cpu_container_crawler.time.sleep') - @mock.patch('plugins.systems.cpu_container_crawler.open', - side_effect=throw_os_error) - @mock.patch('plugins.systems.cpu_container_crawler.DockerContainer', - side_effect=lambda container_id: DummyContainer(container_id)) - def test_crawl_cpu_outcontainer_mode_failure(self, *args): - fc = CpuContainerCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl('123'): - pass - assert args[0].call_count == 1 - - @mock.patch( - 'plugins.systems.interface_host_crawler.psutil.net_io_counters', - side_effect=lambda pernic: {'interface1-unit-tests': - psutils_net( - 10, - 20, - 30, - 40, - 50, - 60)}) - def test_crawl_interface_invm_mode(self, *args): - fc = InterfaceHostCrawler() - for (k, f, t) in fc.crawl(): - assert f == InterfaceFeature( - if_octets_tx=0, - if_octets_rx=0, - if_packets_tx=0, - if_packets_rx=0, - if_errors_tx=0, - if_errors_rx=0) - - for (k, f, t) in fc.crawl(): - assert f == InterfaceFeature( - if_octets_tx=0, - if_octets_rx=0, - if_packets_tx=0, - if_packets_rx=0, - if_errors_tx=0, - if_errors_rx=0) - assert args[0].call_count == 2 - - @mock.patch( - 'plugins.systems.interface_host_crawler.psutil.net_io_counters', - side_effect=throw_os_error) - def test_crawl_interface_invm_mode_failure(self, *args): - fc = InterfaceHostCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl(): - pass - - # Each crawl in crawlutils.py instantiates a FeaturesCrawler object - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl(): - pass - assert args[0].call_count == 2 - - @mock.patch('plugins.systems.interface_container_crawler.DockerContainer', - side_effect=lambda container_id: DummyContainer(container_id)) - @mock.patch( - 'plugins.systems.interface_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch( - 'plugins.systems.interface_container_crawler.psutil.net_io_counters', - side_effect=lambda pernic: {'eth0': - psutils_net( - 10, - 20, - 30, - 40, - 50, - 60)}) - def test_crawl_interface_outcontainer_mode(self, *args): - fc = InterfaceContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f == InterfaceFeature( - if_octets_tx=0, - if_octets_rx=0, - if_packets_tx=0, - if_packets_rx=0, - if_errors_tx=0, - if_errors_rx=0) - - for (k, f, t) in fc.crawl('123'): - assert f == InterfaceFeature( - if_octets_tx=0, - if_octets_rx=0, - if_packets_tx=0, - if_packets_rx=0, - if_errors_tx=0, - if_errors_rx=0) - assert args[0].call_count == 2 - assert args[1].call_count == 2 - - @mock.patch('plugins.systems.interface_vm_crawler.psvmi.context_init', - side_effect=lambda dn1, dn2, kv, d, a: 1000) - @mock.patch('plugins.systems.interface_vm_crawler.psvmi.interface_iter', - side_effect=lambda vmc: [psvmi_interface( - 'eth1', 10, 20, 30, 40, 50, 60)]) - @mock.patch('plugins.systems.interface_vm_crawler.psvmi') - def test_crawl_interface_outvm_mode(self, *args): - fc = InterfaceVmCrawler() - for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - assert f == InterfaceFeature( - if_octets_tx=0, - if_octets_rx=0, - if_packets_tx=0, - if_packets_rx=0, - if_errors_tx=0, - if_errors_rx=0) - - for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): - assert f == InterfaceFeature( - if_octets_tx=0, - if_octets_rx=0, - if_packets_tx=0, - if_packets_rx=0, - if_errors_tx=0, - if_errors_rx=0) - assert args[1].call_count == 2 - assert args[2].call_count == 2 - - @mock.patch('plugins.systems.load_host_crawler.os.getloadavg', - side_effect=lambda: [1, 2, 3]) - def test_crawl_load_invm_mode(self, *args): - fc = LoadHostCrawler() - for (k, f, t) in fc.crawl(): - assert f == LoadFeature(shortterm=1, midterm=2, longterm=2) - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.load_host_crawler.os.getloadavg', - side_effect=throw_os_error) - def test_crawl_load_invm_mode_failure(self, *args): - fc = LoadHostCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl(): - pass - assert args[0].call_count == 1 - - @mock.patch( - 'plugins.systems.load_container_crawler.run_as_another_namespace', - side_effect=mocked_run_as_another_namespace) - @mock.patch('plugins.systems.load_container_crawler.os.getloadavg', - side_effect=lambda: [1, 2, 3]) - @mock.patch('plugins.systems.load_container_crawler.DockerContainer', - side_effect=lambda container_id: DummyContainer(container_id)) - def test_crawl_load_outcontainer_mode(self, *args): - fc = LoadContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f == LoadFeature(shortterm=1, midterm=2, longterm=2) - assert args[1].call_count == 1 - assert args[2].call_count == 1 - - @mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps', - side_effect=lambda: [{'State': {'Running': True}, - 'Image': 'reg/image:latest', - 'Config': {'Cmd': 'command'}, - 'Name': 'name', - 'Id': 'id'}]) - def test_crawl_dockerps_invm_mode(self, *args): - fc = DockerpsHostCrawler() - for (k, f, t) in fc.crawl(): - assert f == DockerPSFeature( - Status=True, - Created=0, - Image='reg/image:latest', - Ports=[], - Command='command', - Names='name', - Id='id') - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps', - side_effect=throw_os_error) - def test_crawl_dockerps_invm_mode_failure(self, *args): - fc = DockerpsHostCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl(): - pass - assert args[0].call_count == 1 - - @mock.patch('plugins.systems.dockerhistory_container_crawler.exec_docker_history', - side_effect=lambda long_id: [ - {'Id': 'image1', 'random': 'abc'}, - {'Id': 'image2', 'random': 'abc'}]) - def test_crawl_dockerhistory_outcontainer_mode(self, *args): - fc = DockerhistoryContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f == {'history': [{'Id': 'image1', 'random': 'abc'}, - {'Id': 'image2', 'random': 'abc'}]} - assert args[0].call_count == 1 - - @mock.patch( - 'plugins.systems.dockerhistory_container_crawler.exec_docker_history', - side_effect=throw_os_error) - def test_crawl_dockerhistory_outcontainer_mode_failure(self, *args): - fc = DockerhistoryContainerCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl('123'): - pass - assert args[0].call_count == 1 - - @mock.patch( - 'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect', - side_effect=lambda long_id: { - 'Id': 'image1', - 'random': 'abc'}) - def test_crawl_dockerinspect_outcontainer_mode(self, *args): - fc = DockerinspectContainerCrawler() - for (k, f, t) in fc.crawl('123'): - assert f == {'Id': 'image1', 'random': 'abc'} - assert args[0].call_count == 1 - - @mock.patch( - 'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect', - side_effect=throw_os_error) - def test_crawl_dockerinspect_outcontainer_mode_failure(self, *args): - fc = DockerinspectContainerCrawler() - with self.assertRaises(OSError): - for (k, f, t) in fc.crawl('123'): - pass - assert args[0].call_count == 1 diff --git a/tests/unit/test_vms_crawler.py b/tests/unit/test_vms_crawler.py deleted file mode 100644 index 096f3a25..00000000 --- a/tests/unit/test_vms_crawler.py +++ /dev/null @@ -1,126 +0,0 @@ -import mock -import unittest -from vms_crawler import VirtualMachinesCrawler - - -class MockedOSCrawler: - - def crawl(self, vm_desc, **kwargs): - return [('linux', {'os': 'some_os'}, 'os')] - - -class MockedCPUCrawler: - - def crawl(self, vm_desc, **kwargs): - return [('cpu-0', {'used': 100}, 'cpu')] - - -class MockedOSCrawlerFailure: - - def crawl(self, vm_desc, **kwargs): - print vm_desc - if vm_desc[0] == 'errorpid': - raise OSError('some exception') - else: - return [('linux', {'os': 'some_os'}, 'os')] - - -class MockedQemuVirtualMachine: - - def __init__(self, name='name', pid=777): - self.namespace = name - self.name = name - self.kernel = '2.6' - self.distro = 'ubuntu' - self.arch = 'x86' - self.pid = pid - - def get_metadata_dict(self): - return {'namespace': self.namespace} - - def get_vm_desc(self): - return str(self.pid), self.kernel, self.distro, self.arch - - -class VirtualMachinesCrawlerTests(unittest.TestCase): - - @mock.patch( - 'vms_crawler.plugins_manager.get_vm_crawl_plugins', - side_effect=lambda features: [(MockedOSCrawler(), {}), - (MockedCPUCrawler(), {})]) - @mock.patch('vms_crawler.get_virtual_machines', - side_effect=lambda user_list, host_namespace: [ - MockedQemuVirtualMachine( - name='aaa', - pid=101), - MockedQemuVirtualMachine( - name='bbb', - pid=102), - MockedQemuVirtualMachine( - name='ccc', - pid=103)]) - def test_vms_crawler(self, *args): - crawler = VirtualMachinesCrawler(features=['os'], user_list=['abcd']) - frames = list(crawler.crawl()) - namespaces = sorted([f.metadata['namespace'] for f in frames]) - assert namespaces == sorted(['aaa', 'bbb', 'ccc']) - features_count = sorted([f.num_features for f in frames]) - assert features_count == sorted([2, 2, 2]) - system_types = sorted([f.metadata['system_type'] for f in frames]) - assert system_types == sorted(['vm', 'vm', 'vm']) - assert args[0].call_count == 1 - assert args[1].call_count == 1 - - @mock.patch( - 'vms_crawler.plugins_manager.get_vm_crawl_plugins', - side_effect=lambda features: [(MockedOSCrawlerFailure(), {}), - (MockedCPUCrawler(), {})]) - @mock.patch('vms_crawler.get_virtual_machines', - side_effect=lambda user_list, host_namespace: [ - MockedQemuVirtualMachine( - name='aaa', - pid=101), - MockedQemuVirtualMachine( - name='errorid', - pid='errorpid'), - MockedQemuVirtualMachine( - name='ccc', - pid=103)]) - def test_failed_vms_crawler(self, *args): - crawler = VirtualMachinesCrawler(features=['os']) - with self.assertRaises(OSError): - frames = list(crawler.crawl(ignore_plugin_exception=False)) - assert args[0].call_count == 1 - assert args[1].call_count == 1 - - @mock.patch( - 'vms_crawler.plugins_manager.get_vm_crawl_plugins', - side_effect=lambda features: [(MockedCPUCrawler(), {}), - (MockedOSCrawlerFailure(), {}), - (MockedCPUCrawler(), {})]) - @mock.patch('vms_crawler.get_virtual_machines', - side_effect=lambda user_list, host_namespace: [ - MockedQemuVirtualMachine( - name='aaa', - pid=101), - MockedQemuVirtualMachine( - name='errorid', - pid='errorpid'), - MockedQemuVirtualMachine( - name='ccc', - pid=103)]) - def test_failed_vms_crawler_with_ignore_failure(self, *args): - crawler = VirtualMachinesCrawler(features=['cpu', 'os', 'cpu']) - frames = list(crawler.crawl()) # defaults to ignore_plugin_exception - namespaces = sorted([f.metadata['namespace'] for f in frames]) - assert namespaces == sorted(['aaa', 'errorid', 'ccc']) - features_count = sorted([f.num_features for f in frames]) - assert features_count == sorted([3, 2, 3]) - system_types = [f.metadata['system_type'] for f in frames] - assert system_types == ['vm', 'vm', 'vm'] - assert args[0].call_count == 1 - assert args[1].call_count == 1 - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/vfs_mount_init-id b/tests/unit/vfs_mount_init-id deleted file mode 100644 index 08dbf2d4..00000000 --- a/tests/unit/vfs_mount_init-id +++ /dev/null @@ -1 +0,0 @@ -vol1/id/rootfs-a-b-c From 5ead8f578b71660eb238d4cfe8761d2a2a37ca4a Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 17:17:38 -0500 Subject: [PATCH 29/47] plugincont wip Signed-off-by: Sahil Suneja --- tests/functional/test_functional_safecontainers_crawler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index af33f32d..4599b5a0 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -215,6 +215,7 @@ def testCrawlContainerKafka(self): env=env) time.sleep(30) + print self.docker.containers() stdout, stderr = process.communicate() assert process.returncode == 0 @@ -284,6 +285,7 @@ def testCrawlContainerEvilPlugin(self): stdout, stderr = process.communicate() assert process.returncode == 0 + print self.docker.containers() print stderr print stdout From 6919e9e33c5c044dc30e45154fa22a58b9859380 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 17:51:37 -0500 Subject: [PATCH 30/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/.gitignore | 1 - .../utils/plugincont/seccomp-no-ptrace.json | 1593 +++++++++++++++++ 2 files changed, 1593 insertions(+), 1 deletion(-) create mode 100644 crawler/utils/plugincont/seccomp-no-ptrace.json diff --git a/crawler/.gitignore b/crawler/.gitignore index cff5a5e1..8902896e 100644 --- a/crawler/.gitignore +++ b/crawler/.gitignore @@ -4,5 +4,4 @@ binaries/ kafka-producer.py timeout.py alchemy.py -*.json *.sh diff --git a/crawler/utils/plugincont/seccomp-no-ptrace.json b/crawler/utils/plugincont/seccomp-no-ptrace.json new file mode 100644 index 00000000..17cd3623 --- /dev/null +++ b/crawler/utils/plugincont/seccomp-no-ptrace.json @@ -0,0 +1,1593 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86_64", + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ], + "syscalls": [ + { + "name": "accept", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "accept4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "access", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "alarm", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "bind", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "brk", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capset", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_getres", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "connect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "copy_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "creat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup3", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_pwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execve", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execveat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit_group", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "faccessat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64_64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fallocate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_mark", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchownat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fdatasync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futex", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futimesat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcpu", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcwd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpeername", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgrp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getppid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrandom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrusage", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettimeofday", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_add_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_rm_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_cancel", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_destroy", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_getevents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_get", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_set", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_setup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_submit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ipc", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "kill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "link", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "linkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listen", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "llistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_llseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "madvise", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "memfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mincore", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdirat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mprotect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_getsetattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_notify", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedreceive", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedsend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mremap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgrcv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgsnd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "newfstatat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_newselect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "openat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pause", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "pipe", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pipe2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "poll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ppoll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pread64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "preadv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prlimit64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pselect6", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwrite64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwritev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "read", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readahead", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvfrom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "remap_file_pages", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "removexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rename", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "restart_syscall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rmdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigaction", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigpending", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigprocmask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigsuspend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigtimedwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_tgsigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_max", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_min", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_rr_get_interval", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_yield", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "seccomp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "select", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semtimedop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "send", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendto", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tid_address", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmdt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shutdown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigaltstack", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socket", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketcall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketpair", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "splice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syncfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sysinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syslog", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tee", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tgkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "time", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_delete", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_getoverrun", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "times", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ugetrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "umask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "uname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimensat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimes", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vfork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vmsplice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "wait4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "write", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "writev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "arch_prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "modify_ldt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chroot", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + } + ] +} From 49e9485d2b24300e0e53bd5f991fff897e0e129a Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 18:12:49 -0500 Subject: [PATCH 31/47] plugincont wip Signed-off-by: Sahil Suneja --- .travis.yml | 3 ++- tests/functional/test_functional_safecontainers_crawler.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1b7e2f84..b0fa61c3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ sudo: required language: python - +distribution: ubuntu +version: 16.04 services: - docker diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index 4599b5a0..dd77edd1 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -87,9 +87,9 @@ def start_crawled_container(self): fd.close() def tearDown(self): + selg.fix_test_artifacts() self.remove_crawled_container() self.remove_kafka_container() - shutil.rmtree(self.tempd) def remove_kafka_container(self): @@ -306,7 +306,7 @@ def testCrawlContainerEvilPlugin(self): assert 'expected_failed' in output f.close() - def testFixArtifacts(self): + def fix_test_artifacts(self): plugincont_image_path = os.getcwd() + \ '/crawler/utils/plugincont/plugincont_img' shutil.copyfile(plugincont_image_path + '/requirements.txt.template', From 05971e9606f8169970617ab35e215d25dcea1540 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 20:55:36 -0500 Subject: [PATCH 32/47] plugincont wip Signed-off-by: Sahil Suneja --- .../test_functional_safecontainers_crawler.py | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index dd77edd1..3479aa04 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -8,13 +8,15 @@ import subprocess import sys import pykafka - +import semantic_version # Tests for crawlers in kraken crawlers configuration. from safe_containers_crawler import SafeContainersCrawler from worker import Worker from emitters_manager import EmittersManager from utils.dockerutils import get_docker_container_rootfs_path +from utils.dockerutils import _fix_version +from utils.dockerutils import _get_docker_server_version import logging @@ -44,12 +46,20 @@ def setUp(self): print ("Error connecting to docker daemon, are you in the docker" "group? You need to be in the docker group.") + self.version_check() self.start_crawled_container() - # start a kakfa+zookeeper container to send data to (to test our # kafka emitter) self.start_kafka_container() + def version_check(self): + self.version_ok = False + VERSION_SPEC = semantic_version.Spec('>=1.12.1') + server_version = _get_docker_server_version() + if VERSION_SPEC.match(semantic_version.Version(_fix_version( + server_version))): + self.version_ok = True + def start_kafka_container(self): self.docker.pull(repository='spotify/kafka', tag='latest') self.kafka_container = self.docker.create_container( @@ -87,7 +97,7 @@ def start_crawled_container(self): fd.close() def tearDown(self): - selg.fix_test_artifacts() + self.fix_test_artifacts() self.remove_crawled_container() self.remove_kafka_container() shutil.rmtree(self.tempd) @@ -101,6 +111,8 @@ def remove_crawled_container(self): self.docker.remove_container(container=self.container['Id']) def _testCrawlContainer1(self): + if self.version_ok is False: + pass crawler = SafeContainersCrawler( features=[], user_list=self.container['Id']) frames = list(crawler.crawl()) @@ -121,6 +133,8 @@ def _testCrawlContainer1(self): assert 'rake' in output def _testCrawlContainer2(self): + if self.version_ok is False: + pass env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -157,6 +171,8 @@ def _testCrawlContainer2(self): f.close() def testCrawlContainerNoPlugins(self): + if self.version_ok is False: + pass rootfs = get_docker_container_rootfs_path(self.container['Id']) fd = open(rootfs + '/crawlplugins', 'w') fd.write('noplugin\n') @@ -198,6 +214,8 @@ def testCrawlContainerNoPlugins(self): f.close() def testCrawlContainerKafka(self): + if self.version_ok is False: + pass env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -260,6 +278,8 @@ def _setup_plugincont_testing2(self): plugincont_image_path + '/requirements.txt') def testCrawlContainerEvilPlugin(self): + if self.version_ok is False: + pass rootfs = get_docker_container_rootfs_path(self.container['Id']) fd = open(rootfs + '/crawlplugins', 'w') fd.write('evil\n') From f645e3742301ccca6797c8bdccabd361ebeee206 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 21:53:37 -0500 Subject: [PATCH 33/47] plugincont wip Signed-off-by: Sahil Suneja --- .travis.yml | 2 - .../test_functional_safecontainers_crawler.py | 66 +++++++++++++------ 2 files changed, 47 insertions(+), 21 deletions(-) diff --git a/.travis.yml b/.travis.yml index b0fa61c3..84211915 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,5 @@ sudo: required language: python -distribution: ubuntu -version: 16.04 services: - docker diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index 3479aa04..fcfe210e 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -9,6 +9,7 @@ import sys import pykafka import semantic_version +import platform # Tests for crawlers in kraken crawlers configuration. from safe_containers_crawler import SafeContainersCrawler @@ -47,18 +48,49 @@ def setUp(self): "group? You need to be in the docker group.") self.version_check() + self.setup_plugincont_testing2() self.start_crawled_container() # start a kakfa+zookeeper container to send data to (to test our # kafka emitter) self.start_kafka_container() + def setup_plugincont_testing2(self): + _platform = platform.linux_distribution() + if _platform[0] == 'Ubuntu' or _platform[1] >= '16.04': + self.seccomp = True + plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' + shutil.copyfile( + plugincont_image_path + '/requirements.txt.testing', + plugincont_image_path + '/requirements.txt') + else: + self.seccomp = False + src_file = os.getcwd() + \ + '/crawler/plugin_containers_manager.py' + os.system('sed -i.bak /security_opt=/d ' + src_file) + + def fix_test_artifacts(self): + if self.seccomp is True: + plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' + shutil.copyfile( + plugincont_image_path + '/requirements.txt.template', + plugincont_image_path + '/requirements.txt') + else: + src_file = os.getcwd() + \ + '/crawler/plugin_containers_manager.py.bak' + dst_file = os.getcwd() + \ + '/crawler/plugin_containers_manager.py' + shutil.move(src_file, dst_file) + pass + def version_check(self): self.version_ok = False VERSION_SPEC = semantic_version.Spec('>=1.12.1') server_version = _get_docker_server_version() if VERSION_SPEC.match(semantic_version.Version(_fix_version( server_version))): - self.version_ok = True + self.version_ok = True def start_kafka_container(self): self.docker.pull(repository='spotify/kafka', tag='latest') @@ -113,6 +145,7 @@ def remove_crawled_container(self): def _testCrawlContainer1(self): if self.version_ok is False: pass + return crawler = SafeContainersCrawler( features=[], user_list=self.container['Id']) frames = list(crawler.crawl()) @@ -135,6 +168,7 @@ def _testCrawlContainer1(self): def _testCrawlContainer2(self): if self.version_ok is False: pass + return env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -173,6 +207,7 @@ def _testCrawlContainer2(self): def testCrawlContainerNoPlugins(self): if self.version_ok is False: pass + return rootfs = get_docker_container_rootfs_path(self.container['Id']) fd = open(rootfs + '/crawlplugins', 'w') fd.write('noplugin\n') @@ -214,8 +249,11 @@ def testCrawlContainerNoPlugins(self): f.close() def testCrawlContainerKafka(self): + # import pdb + # pdb.set_trace() if self.version_ok is False: pass + return env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -271,22 +309,15 @@ def _setup_plugincont_testing1(self): cmd='pip install python-ptrace') self.docker.exec_start(exec_instance.get("Id")) - def _setup_plugincont_testing2(self): - plugincont_image_path = os.getcwd() + \ - '/crawler/utils/plugincont/plugincont_img' - shutil.copyfile(plugincont_image_path + '/requirements.txt.testing', - plugincont_image_path + '/requirements.txt') - def testCrawlContainerEvilPlugin(self): if self.version_ok is False: pass + return rootfs = get_docker_container_rootfs_path(self.container['Id']) fd = open(rootfs + '/crawlplugins', 'w') fd.write('evil\n') fd.close() - self._setup_plugincont_testing2() - env = os.environ.copy() mypath = os.path.dirname(os.path.realpath(__file__)) os.makedirs(self.tempd + '/out') @@ -301,7 +332,7 @@ def testCrawlContainerEvilPlugin(self): '--crawlmode', 'OUTCONTAINERSAFE', ], env=env) - time.sleep(30) + time.sleep(30) stdout, stderr = process.communicate() assert process.returncode == 0 @@ -316,22 +347,19 @@ def testCrawlContainerEvilPlugin(self): f = open(self.tempd + '/out/' + files[0], 'r') output = f.read() + f.close() print output # only printed if the test fails assert 'kill_status' in output assert 'trace_status' in output assert 'write_status' in output assert 'rm_status' in output assert 'nw_status' in output - assert 'unexpected_succeeded' not in output assert 'expected_failed' in output - f.close() - - def fix_test_artifacts(self): - plugincont_image_path = os.getcwd() + \ - '/crawler/utils/plugincont/plugincont_img' - shutil.copyfile(plugincont_image_path + '/requirements.txt.template', - plugincont_image_path + '/requirements.txt') - pass + ctr = output.count('unexpected_succeeded') + if self.seccomp is True: + assert ctr == 0 + else: + assert ctr == 1 if __name__ == '__main__': From 0007e6bbafb69ed29697903a72c57a9a7f80ab97 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 21:59:53 -0500 Subject: [PATCH 34/47] plugincont wip Signed-off-by: Sahil Suneja --- tests/functional/test_functional_safecontainers_crawler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index fcfe210e..4866be5e 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -56,7 +56,7 @@ def setUp(self): def setup_plugincont_testing2(self): _platform = platform.linux_distribution() - if _platform[0] == 'Ubuntu' or _platform[1] >= '16.04': + if _platform[0] == 'Ubuntu' and _platform[1] >= '16.04': self.seccomp = True plugincont_image_path = os.getcwd() + \ '/crawler/utils/plugincont/plugincont_img' From 4a441edbbac7dfcdddca4886a94b6c9efd8e1fec Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 22:16:08 -0500 Subject: [PATCH 35/47] plugincont wip Signed-off-by: Sahil Suneja --- .travis.yml | 2 +- Dockerfile | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 84211915..6c27977a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,7 +33,7 @@ before_install: - cp -r psvmi/offsets offsets - cp psvmi/header.h . # for safe plugin mode - - sudo apt-get install libcap-dev + - sudo apt-get install libcap-dev iptables libxtables11 # command to install dependencies # XXX: Now mock complains if we don't `sudo pip install`. diff --git a/Dockerfile b/Dockerfile index 265ac7d4..0124517e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,6 +15,8 @@ RUN dpkg -i /tmp/python-socket-datacollector_*_all.deb && \ apt-get -y update && \ apt-get -y install libpcap0.8 && \ apt-get -y install libcap-dev && \ + apt-get -y install iptables && \ + apt-get -y install libxtables11 && \ dpkg -i /tmp/softflowd_0.9.*_amd64.deb && \ pip install pyroute2 py-radix requests-unixsocket json-rpc && \ dpkg -i /tmp/python-conntrackprobe_*_all.deb && \ From 1bfedd38650efc6ee2fe2dd44893fddd9f0335a1 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 22:23:44 -0500 Subject: [PATCH 36/47] plugincont wip Signed-off-by: Sahil Suneja --- .travis.yml | 2 +- Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6c27977a..89c3c280 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,7 +33,7 @@ before_install: - cp -r psvmi/offsets offsets - cp psvmi/header.h . # for safe plugin mode - - sudo apt-get install libcap-dev iptables libxtables11 + - sudo apt-get install libcap-dev iptables iptables-dev # command to install dependencies # XXX: Now mock complains if we don't `sudo pip install`. diff --git a/Dockerfile b/Dockerfile index 0124517e..221a4934 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN dpkg -i /tmp/python-socket-datacollector_*_all.deb && \ apt-get -y install libpcap0.8 && \ apt-get -y install libcap-dev && \ apt-get -y install iptables && \ - apt-get -y install libxtables11 && \ + apt-get -y install iptables-dev && \ dpkg -i /tmp/softflowd_0.9.*_amd64.deb && \ pip install pyroute2 py-radix requests-unixsocket json-rpc && \ dpkg -i /tmp/python-conntrackprobe_*_all.deb && \ From 60164290afcd0edbc7845db37bb0a500a23d673c Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Tue, 5 Dec 2017 23:22:33 -0500 Subject: [PATCH 37/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 34 +++++++++++++------ .../plugincont_img/requirements.txt | 14 -------- .../test_functional_safecontainers_crawler.py | 3 +- 3 files changed, 26 insertions(+), 25 deletions(-) delete mode 100644 crawler/utils/plugincont/plugincont_img/requirements.txt diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index ff1c5f61..7e7120ca 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -224,18 +224,9 @@ def create_plugincont(self, guestcont): self.pluginconts[str(guestcont_id)] = plugincont guestcont.plugincont = plugincont - def _add_iptable_rules(self): + def _add_iptable_rules_in(self): retVal = 0 try: - rule = iptc.Rule() - match = iptc.Match(rule, "owner") - match.uid_owner = str(self.plugincont_host_uid) - rule.add_match(match) - rule.dst = "!127.0.0.1" - rule.target = iptc.Target(rule, "DROP") - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") - chain.insert_rule(rule) - rule = iptc.Rule() match = iptc.Match(rule, "cgroup") match.cgroup = str(self.plugincont_cgroup_netclsid) @@ -248,6 +239,29 @@ def _add_iptable_rules(self): print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal + + def _add_iptable_rules_out(self): + retVal = 0 + try: + rule = iptc.Rule() + match = iptc.Match(rule, "owner") + match.uid_owner = str(self.plugincont_host_uid) + rule.add_match(match) + rule.dst = "!127.0.0.1" + rule.target = iptc.Target(rule, "DROP") + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "OUTPUT") + chain.insert_rule(rule) + except Exception as exc: + print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno + retVal = -1 + return retVal + + def _add_iptable_rules(self): + retVal1 = 0 + retVal2 = 0 + retVal1 = self._add_iptable_rules_in() + retVal2 = self._add_iptable_rules_out() + return (retVal1 + retVal2)/2 def _get_cgroup_dir(self, devlist=[]): for dev in devlist: diff --git a/crawler/utils/plugincont/plugincont_img/requirements.txt b/crawler/utils/plugincont/plugincont_img/requirements.txt deleted file mode 100644 index 7fe20159..00000000 --- a/crawler/utils/plugincont/plugincont_img/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -psutil==2.1.3 -requests>=2.7.13 -netifaces==0.10.4 -kafka-python==1.3.1 -pykafka==1.1.0 -kafka==1.3.3 -docker-py==1.10.6 -python-dateutil==2.4.2 -semantic_version==2.5.0 -Yapsy==1.11.223 -configobj==4.7.0 -morph==0.1.2 -fluent-logger==0.4.6 -requests_unixsocket==0.1.5 diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index 4866be5e..67d9be5f 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -67,7 +67,8 @@ def setup_plugincont_testing2(self): self.seccomp = False src_file = os.getcwd() + \ '/crawler/plugin_containers_manager.py' - os.system('sed -i.bak /security_opt=/d ' + src_file) + os.system("sed -i.bak '/security_opt=/d; " + "/self._add_iptable_rules_out/d' " + src_file) def fix_test_artifacts(self): if self.seccomp is True: From 3072498ff9bbc7665520a248b842a0caabbb8402 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Wed, 6 Dec 2017 00:04:14 -0500 Subject: [PATCH 38/47] plugincont wip Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 6 ++-- .../test_functional_safecontainers_crawler.py | 31 +++++++++---------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 7e7120ca..627cde8d 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -239,7 +239,7 @@ def _add_iptable_rules_in(self): print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal - + def _add_iptable_rules_out(self): retVal = 0 try: @@ -255,13 +255,13 @@ def _add_iptable_rules_out(self): print sys.exc_info()[0], exc, sys.exc_info()[-1].tb_lineno retVal = -1 return retVal - + def _add_iptable_rules(self): retVal1 = 0 retVal2 = 0 retVal1 = self._add_iptable_rules_in() retVal2 = self._add_iptable_rules_out() - return (retVal1 + retVal2)/2 + return (retVal1 + retVal2) / 2 def _get_cgroup_dir(self, devlist=[]): for dev in devlist: diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index 67d9be5f..ac7e3e6e 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -55,29 +55,28 @@ def setUp(self): self.start_kafka_container() def setup_plugincont_testing2(self): + plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' + shutil.copyfile( + plugincont_image_path + '/requirements.txt.testing', + plugincont_image_path + '/requirements.txt') _platform = platform.linux_distribution() - if _platform[0] == 'Ubuntu' and _platform[1] >= '16.04': - self.seccomp = True - plugincont_image_path = os.getcwd() + \ - '/crawler/utils/plugincont/plugincont_img' - shutil.copyfile( - plugincont_image_path + '/requirements.txt.testing', - plugincont_image_path + '/requirements.txt') - else: + if _platform[0] != 'Ubuntu1' or _platform[1] < '16.04': self.seccomp = False src_file = os.getcwd() + \ '/crawler/plugin_containers_manager.py' os.system("sed -i.bak '/security_opt=/d; " - "/self._add_iptable_rules_out/d' " + src_file) + "/self._add_iptable_rules_in/d' " + src_file) + else: + self.seccomp = True def fix_test_artifacts(self): - if self.seccomp is True: - plugincont_image_path = os.getcwd() + \ - '/crawler/utils/plugincont/plugincont_img' - shutil.copyfile( - plugincont_image_path + '/requirements.txt.template', - plugincont_image_path + '/requirements.txt') - else: + plugincont_image_path = os.getcwd() + \ + '/crawler/utils/plugincont/plugincont_img' + shutil.copyfile( + plugincont_image_path + '/requirements.txt.template', + plugincont_image_path + '/requirements.txt') + if self.seccomp is False: src_file = os.getcwd() + \ '/crawler/plugin_containers_manager.py.bak' dst_file = os.getcwd() + \ From 5f67f7a0f6114f5f7b220e5ceebbe92aef30f274 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Wed, 6 Dec 2017 00:12:46 -0500 Subject: [PATCH 39/47] plugincont wipush Signed-off-by: Sahil Suneja --- .../test_functional_containers_crawler.py | 242 +++ tests/functional/test_functional_ctprobe.py | 328 +++ .../test_functional_dockerevents.py | 222 ++ .../functional/test_functional_dockerutils.py | 91 + tests/functional/test_functional_fprobe.py | 281 +++ .../test_functional_k8s_environment.py | 246 +++ .../functional/test_functional_logs_linker.py | 137 ++ tests/functional/test_functional_namespace.py | 117 ++ tests/functional/test_functional_plugins.py | 84 + .../test_functional_safecontainers_crawler.py | 2 +- .../functional/test_functional_vm_plugins.py | 159 ++ .../functional/test_functional_vms_crawler.py | 147 ++ tests/functional/test_logs_in_volumes1.py | 96 + tests/functional/test_logs_in_volumes_star.py | 93 + tests/functional/test_logs_no_volumes.py | 90 + tests/unit/__init__.py | 0 tests/unit/aufs_mount_init-id | 1 + tests/unit/btrfs_mount_init-id | 1 + tests/unit/capturing.py | 16 + tests/unit/liberty_connection_stats | 1 + tests/unit/liberty_jvm_stats | 1 + tests/unit/liberty_mbeans | 2 + tests/unit/liberty_response_time_details | 1 + .../unit/liberty_response_time_details_mocked | 1 + tests/unit/liberty_servlet_stats | 1 + tests/unit/liberty_session_stats | 1 + tests/unit/liberty_thread_pool_stats | 1 + tests/unit/mock_environ_file | 1 + tests/unit/mock_pynvml.py | 44 + tests/unit/proc_mounts_aufs | 33 + tests/unit/proc_mounts_btrfs | 33 + tests/unit/proc_mounts_devicemapper | 28 + tests/unit/proc_mounts_vfs | 33 + tests/unit/proc_pid_mounts_devicemapper | 20 + tests/unit/test_app_apache.py | 288 +++ tests/unit/test_app_db2.py | 244 +++ tests/unit/test_app_liberty.py | 264 +++ tests/unit/test_app_nginx.py | 194 ++ tests/unit/test_app_redis.py | 270 +++ tests/unit/test_app_tomcat.py | 295 +++ tests/unit/test_container.py | 44 + tests/unit/test_containers.py | 188 ++ tests/unit/test_containers_crawler.py | 139 ++ tests/unit/test_diskio_host.py | 130 ++ tests/unit/test_dockercontainer.py | 841 ++++++++ tests/unit/test_dockerutils.py | 381 ++++ tests/unit/test_emitter.py | 647 ++++++ tests/unit/test_gpu_plugin.py | 34 + tests/unit/test_host_crawler.py | 73 + tests/unit/test_jar_plugin.py | 56 + tests/unit/test_jar_utils.py | 50 + tests/unit/test_mesos_url.py | 18 + tests/unit/test_misc.py | 147 ++ tests/unit/test_mtgraphite.py | 164 ++ tests/unit/test_namespace.py | 255 +++ tests/unit/test_osinfo.py | 155 ++ tests/unit/test_package_utils.py | 87 + tests/unit/test_plugins.py | 1790 +++++++++++++++++ tests/unit/test_vms_crawler.py | 126 ++ tests/unit/vfs_mount_init-id | 1 + 60 files changed, 9434 insertions(+), 1 deletion(-) create mode 100644 tests/functional/test_functional_containers_crawler.py create mode 100644 tests/functional/test_functional_ctprobe.py create mode 100644 tests/functional/test_functional_dockerevents.py create mode 100644 tests/functional/test_functional_dockerutils.py create mode 100644 tests/functional/test_functional_fprobe.py create mode 100644 tests/functional/test_functional_k8s_environment.py create mode 100644 tests/functional/test_functional_logs_linker.py create mode 100644 tests/functional/test_functional_namespace.py create mode 100644 tests/functional/test_functional_plugins.py create mode 100644 tests/functional/test_functional_vm_plugins.py create mode 100644 tests/functional/test_functional_vms_crawler.py create mode 100644 tests/functional/test_logs_in_volumes1.py create mode 100644 tests/functional/test_logs_in_volumes_star.py create mode 100644 tests/functional/test_logs_no_volumes.py create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/aufs_mount_init-id create mode 100644 tests/unit/btrfs_mount_init-id create mode 100644 tests/unit/capturing.py create mode 100644 tests/unit/liberty_connection_stats create mode 100644 tests/unit/liberty_jvm_stats create mode 100644 tests/unit/liberty_mbeans create mode 100644 tests/unit/liberty_response_time_details create mode 100644 tests/unit/liberty_response_time_details_mocked create mode 100644 tests/unit/liberty_servlet_stats create mode 100644 tests/unit/liberty_session_stats create mode 100644 tests/unit/liberty_thread_pool_stats create mode 100644 tests/unit/mock_environ_file create mode 100644 tests/unit/mock_pynvml.py create mode 100644 tests/unit/proc_mounts_aufs create mode 100644 tests/unit/proc_mounts_btrfs create mode 100644 tests/unit/proc_mounts_devicemapper create mode 100644 tests/unit/proc_mounts_vfs create mode 100644 tests/unit/proc_pid_mounts_devicemapper create mode 100644 tests/unit/test_app_apache.py create mode 100644 tests/unit/test_app_db2.py create mode 100644 tests/unit/test_app_liberty.py create mode 100644 tests/unit/test_app_nginx.py create mode 100644 tests/unit/test_app_redis.py create mode 100644 tests/unit/test_app_tomcat.py create mode 100644 tests/unit/test_container.py create mode 100644 tests/unit/test_containers.py create mode 100644 tests/unit/test_containers_crawler.py create mode 100644 tests/unit/test_diskio_host.py create mode 100644 tests/unit/test_dockercontainer.py create mode 100644 tests/unit/test_dockerutils.py create mode 100644 tests/unit/test_emitter.py create mode 100644 tests/unit/test_gpu_plugin.py create mode 100644 tests/unit/test_host_crawler.py create mode 100644 tests/unit/test_jar_plugin.py create mode 100644 tests/unit/test_jar_utils.py create mode 100644 tests/unit/test_mesos_url.py create mode 100644 tests/unit/test_misc.py create mode 100644 tests/unit/test_mtgraphite.py create mode 100644 tests/unit/test_namespace.py create mode 100644 tests/unit/test_osinfo.py create mode 100644 tests/unit/test_package_utils.py create mode 100644 tests/unit/test_plugins.py create mode 100644 tests/unit/test_vms_crawler.py create mode 100644 tests/unit/vfs_mount_init-id diff --git a/tests/functional/test_functional_containers_crawler.py b/tests/functional/test_functional_containers_crawler.py new file mode 100644 index 00000000..d03b658b --- /dev/null +++ b/tests/functional/test_functional_containers_crawler.py @@ -0,0 +1,242 @@ +import unittest +import docker +import requests.exceptions +import tempfile +import os +import shutil +import subprocess +import sys +import pykafka + +# Tests for crawlers in kraken crawlers configuration. + +from containers_crawler import ContainersCrawler +from worker import Worker +from emitters_manager import EmittersManager + +import logging + +# Tests conducted with a single container running. + + +class ContainersCrawlerTests(unittest.TestCase): + + def setUp(self): + root = logging.getLogger() + root.setLevel(logging.INFO) + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + root.addHandler(ch) + + self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', + version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.start_crawled_container() + + # start a kakfa+zookeeper container to send data to (to test our + # kafka emitter) + self.start_kafka_container() + + def start_kafka_container(self): + self.docker.pull(repository='spotify/kafka', tag='latest') + self.kafka_container = self.docker.create_container( + image='spotify/kafka', ports=[9092, 2181], + host_config=self.docker.create_host_config(port_bindings={ + 9092: 9092, + 2181: 2181 + }), + environment={'ADVERTISED_HOST': 'localhost', + 'ADVERTISED_PORT': '9092'}) + self.docker.start(container=self.kafka_container['Id']) + + def start_crawled_container(self): + # start a container to be crawled + self.docker.pull(repository='ubuntu', tag='latest') + self.container = self.docker.create_container( + image='ubuntu:latest', command='/bin/sleep 60') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + + def tearDown(self): + self.remove_crawled_container() + self.remove_kafka_container() + + shutil.rmtree(self.tempd) + + def remove_kafka_container(self): + self.docker.stop(container=self.kafka_container['Id']) + self.docker.remove_container(container=self.kafka_container['Id']) + + def remove_crawled_container(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + def testCrawlContainer1(self): + crawler = ContainersCrawler( + features=[ + 'cpu', + 'memory', + 'interface', + 'package']) + frames = list(crawler.crawl()) + output = str(frames[0]) + print output # only printed if the test fails + assert 'interface-lo' in output + assert 'if_octets_tx=' in output + assert 'cpu-0' in output + assert 'cpu_nice=' in output + assert 'memory' in output + assert 'memory_buffered=' in output + assert 'apt' in output + assert 'pkgarchitecture=' in output + + def testCrawlContainer2(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'cpu,memory,interface,package', + '--crawlContainers', self.container['Id'], + '--format', 'graphite', + '--crawlmode', 'OUTCONTAINER', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + assert 'interface-lo.if_octets.tx' in output + assert 'cpu-0.cpu-idle' in output + assert 'memory.memory-used' in output + assert 'apt.pkgsize' in output + f.close() + + def testCrawlContainerKafka(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'kafka://localhost:9092/test', + '--features', 'os,process', + '--crawlContainers', self.container['Id'], + '--crawlmode', 'OUTCONTAINER', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + kafka = pykafka.KafkaClient(hosts='localhost:9092') + topic = kafka.topics['test'] + consumer = topic.get_simple_consumer() + message = consumer.consume() + assert '"cmd":"/bin/sleep 60"' in message.value + + def testCrawlContainerKafka2(self): + emitters = EmittersManager(urls=['kafka://localhost:9092/test']) + crawler = ContainersCrawler( + features=['os', 'process'], + user_list=self.container['Id']) + worker = Worker(emitters=emitters, frequency=-1, + crawler=crawler) + worker.iterate() + kafka = pykafka.KafkaClient(hosts='localhost:9092') + topic = kafka.topics['test'] + consumer = topic.get_simple_consumer() + message = consumer.consume() + assert '"cmd":"/bin/sleep 60"' in message.value + + for i in range(1, 5): + worker.iterate() + message = consumer.consume() + assert '"cmd":"/bin/sleep 60"' in message.value + + def testCrawlContainer3(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'os,process', + '--crawlContainers', self.container['Id'], + '--crawlmode', 'OUTCONTAINER', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + assert 'sleep' in output + assert 'linux' or 'Linux' in output + f.close() + + def testCrawlContainerAvoidSetns(self): + options = {'avoid_setns': True} + crawler = ContainersCrawler( + user_list=self.container['Id'], + features=['cpu', 'memory', 'interface', 'package'], + options=options) + frames = list(crawler.crawl()) + output = str(frames[0]) + print output # only printed if the test fails + # interface in avoid_setns mode is not supported + # assert 'interface-lo' in output + # assert 'if_octets_tx=' in output + assert 'cpu-0' in output + assert 'cpu_nice=' in output + assert 'memory' in output + assert 'memory_buffered=' in output + assert 'apt' in output + assert 'pkgarchitecture=' in output + +if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_ctprobe.py b/tests/functional/test_functional_ctprobe.py new file mode 100644 index 00000000..79192602 --- /dev/null +++ b/tests/functional/test_functional_ctprobe.py @@ -0,0 +1,328 @@ +import json +import logging +import mock +import os +import shutil +import sys +import time +import tempfile +import unittest + +import docker +import requests.exceptions +from plugins.systems.ctprobe_container_crawler import CTProbeContainerCrawler +from utils.process_utils import start_child + + +# Tests the FprobeContainerCrawler class +# Throws an AssertionError if any test fails + +CTPROBE_FRAME = \ + '[{"data":"xAAAAAABAAYAAAAAAAAAAAIAAAA0AAGAFAABgAgAAQCsEQABCAACAKwRAA4cA' \ + 'AKABQABAAYAAAAGAAIAiEYAAAYAAwARWwAANAACgBQAAYAIAAEArBEADggAAgCsEQABHAAC' \ + 'gAUAAQAGAAAABgACABFbAAAGAAMAiEYAAAgADADhBU3ACAADAAAAAYgIAAcAAAAAeDAABIA' \ + 'sAAGABQABAAEAAAAFAAIABwAAAAUAAwAAAAAABgAEAAMAAAAGAAUAAAAAAA==","metadat' \ + 'a":{"ip-addresses":["172.17.0.14"]}},{"data":"jAAAAAIBAAAAAAAAAAAAAAIAA' \ + 'AA0AAGAFAABgAgAAQCsEQABCAACAKwRAA4cAAKABQABAAYAAAAGAAIAiDYAAAYAAwARWwAA' \ + 'NAACgBQAAYAIAAEArBEADggAAgCsEQABHAACgAUAAQAGAAAABgACABFbAAAGAAMAiDYAAAg' \ + 'ADAAM3QUACAADAAAAAY4=","metadata":{"ip-addresses":["172.17.0.14"]}}]' + + +def simulate_ctprobe(url): + """ simulate writing by ctprobe """ + filename = url.split('://')[1] + with open(filename, 'w') as f: + f.write(CTPROBE_FRAME) + with open(filename + ".tmp", 'w') as f: + f.write(CTPROBE_FRAME) + + +def mocked_add_collector(self, url, ipaddresses, ifname): + code, content = self.send_request('add_collector', + [url, ipaddresses, ifname]) + if code == 200: + # in this case we simulate a file being written... + simulate_ctprobe(url) + return True + else: + raise Exception('HTTP Error %d: %s' % (code, content['error'])) + + +def mocked_start_child(params, pass_fds, null_fds, ign_sigs, setsid=False, + **kwargs): + return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, setsid) + + +def mocked_start_child_ctprobe_except(params, pass_fds, null_fds, ign_sigs, + setsid=False, **kwargs): + if params[0] == 'conntrackprobe': + raise Exception('Refusing to start %s' % params[0]) + + +def mocked_session_get(self, path, data=''): + class Session(object): + def __init__(self, status_code, content): + self.status_code = status_code + self.content = json.dumps(content) + + return Session(200, {'error': ''}) + + +def mocked_session_get_fail(self, path, data=''): + class Session(object): + def __init__(self, status_code, content): + self.status_code = status_code + self.content = json.dumps(content) + + return Session(400, {'error': 'Bad request'}) + + +def mocked_ethtool_get_peer_ifindex(ifname): + raise Exception('ethtool exception') + + +def mocked_check_ctprobe_alive(self, pid): + return True + + +# Tests conducted with a single container running. +class CtprobeFunctionalTests(unittest.TestCase): + image_name = 'alpine:latest' + + def setUp(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.docker.pull(repository='alpine', tag='latest') + self.container = self.docker.create_container( + image=self.image_name, command='ping -w 30 8.8.8.8') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + + self.output_dir = os.path.join(self.tempd, 'crawler-ctprobe') + + self.params = { + 'ctprobe_user': 'nobody', + 'ctprobe_output_dir': self.output_dir, + 'output_filepattern': 'testfile', + } + + logging.basicConfig(stream=sys.stderr) + self.logger = logging.getLogger("crawlutils").setLevel(logging.INFO) + + def tearDown(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + shutil.rmtree(self.tempd) + CTProbeContainerCrawler.ctprobe_pid = 0 + CTProbeContainerCrawler.ifaces_monitored = [] + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'ConntrackProbeClient.add_collector', mocked_add_collector) + def test_crawl_outcontainer_ctprobe(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: expecting collector output') + + num = len(CTProbeContainerCrawler.ifaces_monitored) + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + # the fake collector writes the single frame immediately + res = [] + for data in ctc.crawl(self.container['Id'], avoid_setns=False, + **self.params): + res.append(data) + assert len(res) == 1 + assert len(CTProbeContainerCrawler.ifaces_monitored) == num + 1 + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get_fail) + def test_start_netlink_collection_fault1(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: collector cannot be configured') + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + # with ctprobe failing to start, we won't get data + res = [] + for data in ctc.crawl(self.container['Id'], avoid_setns=False, + **self.params): + res.append(data) + assert len(res) == 0 + assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'ConntrackProbeClient.add_collector', mocked_add_collector) + def test_start_netlink_collection_fault4(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: collector cannot be configured') + + ctprobe_user = self.params['ctprobe_user'] + self.params['ctprobe_user'] = 'user-does-not-exist' + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + # with ctprobe failing to start, we won't get data + assert not ctc.crawl(self.container['Id'], avoid_setns=False, + **self.params) + assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 + + self.params['ctprobe_user'] = ctprobe_user + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child_ctprobe_except) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'ConntrackProbeClient.add_collector', mocked_add_collector) + def test_start_netlink_collection_fault5(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: conntrackprobe fails to start') + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + assert not ctc.crawl(self.container['Id'], avoid_setns=False, + **self.params) + assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 + + assert not ctc.check_ctprobe_alive(CTProbeContainerCrawler.ctprobe_pid) + # this should always fail + assert not ctc.check_ctprobe_alive(1) + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'ethtool_get_peer_ifindex', mocked_ethtool_get_peer_ifindex) + def test_start_netlink_collection_fault6(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: ethtool throws an error') + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + # with ctprobe failing to start, we won't get data + res = [] + for data in ctc.crawl(self.container['Id'], avoid_setns=False, + **self.params): + res.append(data) + assert len(res) == 0 + assert len(CTProbeContainerCrawler.ifaces_monitored) == 0 + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'ConntrackProbeClient.add_collector', mocked_add_collector) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'CTProbeContainerCrawler.check_ctprobe_alive', + mocked_check_ctprobe_alive) + def test_remove_datafiles(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: datafiles of disappeared interface ' + 'being removed') + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + # we pretend that an interface test.eth0 existed + ifname = 'test.eth0' + CTProbeContainerCrawler.ifaces_monitored.append(ifname) + + self.params['output_filepattern'] = 'ctprobe-{ifname}-{timestamp}' + + # create a datafile for this fake interface + timestamp = int(time.time()) + filepattern = 'ctprobe-{ifname}-{timestamp}' \ + .format(ifname=ifname, timestamp=timestamp) + # have the ctprobe write a file with the ifname in + # the filename + ctc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) + simulate_ctprobe('file+json://%s/%s' % (self.output_dir, filepattern)) + written_file = os.path.join(self.output_dir, filepattern) + assert os.path.isfile(written_file) + + CTProbeContainerCrawler.next_cleanup = 0 + # calling ctc.crawl() will trigger a cleanup of that file + # since our fake interface never existed + ctc.crawl(self.container['Id'], avoid_setns=False, **self.params) + + # file should be gone now + assert not os.path.isfile(written_file) + + @mock.patch('plugins.systems.ctprobe_container_crawler.start_child', + mocked_start_child) + @mock.patch('plugins.systems.ctprobe_container_crawler.' + 'requests_unixsocket.Session.get', mocked_session_get) + def test_remove_stale_files(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: stale file being removed') + + ctc = CTProbeContainerCrawler() + assert ctc.get_feature() == 'ctprobe' + + # we pretend that an interface test.eth0 existed + ifname = 'test.eth0' + CTProbeContainerCrawler.ifaces_monitored.append(ifname) + + self.params['output_filepattern'] = 'ctprobe-{ifname}-{timestamp}' + + # have the fake socket-datacollector write a file with the ifname in + # the filename + ctc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) + + written_file = os.path.join(self.output_dir, 'test.output') + with open(written_file, 'a') as f: + f.write('hello') + + assert os.path.isfile(written_file) + + # mock the stale file timeout so that our file will get removed + # with in reasonable time + CTProbeContainerCrawler.STALE_FILE_TIMEOUT = 5 + + # calling ctc.crawl() will not trigger a cleanup of that file + # the first time + logger.info('1st crawl') + ctc.crawl(self.container['Id'], avoid_setns=False, **self.params) + + # file should still be here + assert os.path.isfile(written_file) + + # the next time we will crawl, the file will be removed + CTProbeContainerCrawler.next_cleanup = time.time() + time.sleep(CTProbeContainerCrawler.STALE_FILE_TIMEOUT + 1) + + logger.info('2nd crawl') + ctc.crawl(self.container['Id'], avoid_setns=False, **self.params) + + # file should be gone now + assert not os.path.isfile(written_file) + + if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_dockerevents.py b/tests/functional/test_functional_dockerevents.py new file mode 100644 index 00000000..32dc853f --- /dev/null +++ b/tests/functional/test_functional_dockerevents.py @@ -0,0 +1,222 @@ +import unittest +import docker +import requests.exceptions +import tempfile +import os +import shutil +import subprocess +import commands +import time +import multiprocessing +import signal +import psutil +import semantic_version +from utils.dockerutils import _fix_version + +# Tests conducted with a single container running. +# docker events supported avove docker version 1.8.0 +VERSION_SPEC = semantic_version.Spec('>=1.8.1') + +class CrawlerDockerEventTests(unittest.TestCase): + + def setUp(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker containers running.") + except requests.exceptions.ConnectionError as e: + print "Error connecting to docker daemon, are you in the docker group? You need to be in the docker group." + + self.docker.pull(repository='alpine', tag='latest') + self.tempd = tempfile.mkdtemp(prefix='crawlertest-events.') + + def tearDown(self): + containers = self.docker.containers() + for container in containers: + self.docker.stop(container=container['Id']) + self.docker.remove_container(container=container['Id']) + + shutil.rmtree(self.tempd) + #self.__exec_kill_crawlers() + + def __exec_crawler(self, cmd): + status, output = commands.getstatusoutput(cmd) + assert status == 0 + + def __exec_create_container(self): + container = self.docker.create_container( + image='alpine:latest', command='/bin/sleep 60') + self.docker.start(container=container['Id']) + return container['Id'] + + def __exec_delet_container(self, containerId): + self.docker.stop(container=containerId) + self.docker.remove_container(container=containerId) + + ''' + def __exec_kill_crawlers(self): + procname = "python" + for proc in psutil.process_iter(): + if proc.name() == procname: + #cmdline = proc.cmdline() + pid = proc.pid + #if 'crawler.py' in cmdline[1]: + os.kill(pid, signal.SIGTERM) + ''' + + ''' + This is a basic sanity test. It first creates a container and then starts crawler. + In this case, crawler would miss the create event, but it should be able to + discover already running containers and snapshot them + ''' + def testCrawlContainer0(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + self.__exec_create_container() + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'cpu,memory,interface', + '--crawlContainers', 'ALL', + '--format', 'graphite', + '--crawlmode', 'OUTCONTAINER', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + assert 'interface-lo.if_octets.tx' in output + assert 'cpu-0.cpu-idle' in output + assert 'memory.memory-used' in output + f.close() + + #clear the outut direcory + shutil.rmtree(os.path.join(self.tempd, 'out')) + + ''' + In this test, crawler is started with high snapshot frequency (60 sec), + and container is created immediately. Expected behaviour is that + crawler should get intrupptted and start snapshotting container immediately. + + ''' + def testCrawlContainer1(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + cmd = ''.join([ + '/usr/bin/python ', mypath + '/../../crawler/crawler.py ', + '--url ', 'file://' + self.tempd + '/out/crawler ', + '--features ', 'cpu,memory,interface ', + '--crawlContainers ', 'ALL ', + '--format ', 'graphite ', + '--crawlmode ', 'OUTCONTAINER ', + '--frequency ', '60 ', + '--numprocesses ', '1 ' + ]) + + crawlerProc = multiprocessing.Process( + name='crawler', target=self.__exec_crawler, + args=(cmd,)) + + createContainerProc = multiprocessing.Process( + name='createContainer', target=self.__exec_create_container + ) + + crawlerProc.start() + createContainerProc.start() + + time.sleep(5) + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + #print output # only printed if the test fails + assert 'interface-lo.if_octets.tx' in output + assert 'cpu-0.cpu-idle' in output + assert 'memory.memory-used' in output + f.close() + #clear the outut direcory + shutil.rmtree(os.path.join(self.tempd, 'out')) + crawlerProc.terminate() + crawlerProc.join() + + ''' + In this test, crawler is started with shorter snapshot frequency (20 sec), + and container is created immediately. Expected behaviour is that + crawler should get intrupptted and start snapshotting container immediately. + + And then we will wait for crawler's next iteration to ensure, w/o docker event, + crawler will timeout and snapshot container periodically + ''' + def testCrawlContainer2(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + cmd = ''.join([ + '/usr/bin/python ', mypath + '/../../crawler/crawler.py ', + '--url ', 'file://' + self.tempd + '/out/crawler ', + '--features ', 'cpu,memory,interface ', + '--crawlContainers ', 'ALL ', + '--format ', 'graphite ', + '--crawlmode ', 'OUTCONTAINER ', + '--frequency ', '20 ', + '--numprocesses ', '1 ' + ]) + + crawlerProc = multiprocessing.Process( + name='crawler', target=self.__exec_crawler, + args=(cmd,)) + + createContainerProc = multiprocessing.Process( + name='createContainer', target=self.__exec_create_container + ) + + crawlerProc.start() + createContainerProc.start() + + time.sleep(30) + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + docker_server_version = self.docker.version()['Version'] + if VERSION_SPEC.match(semantic_version.Version(_fix_version(docker_server_version))): + assert len(files) == 2 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + #print output # only printed if the test fails + assert 'interface-lo.if_octets.tx' in output + assert 'cpu-0.cpu-idle' in output + assert 'memory.memory-used' in output + f.close() + #clear the outut direcory + shutil.rmtree(os.path.join(self.tempd, 'out')) + crawlerProc.terminate() + crawlerProc.join() + +if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_dockerutils.py b/tests/functional/test_functional_dockerutils.py new file mode 100644 index 00000000..576f0ef6 --- /dev/null +++ b/tests/functional/test_functional_dockerutils.py @@ -0,0 +1,91 @@ +import logging +import unittest +import docker +import requests.exceptions +import tempfile +import shutil + +from utils.dockerutils import ( + exec_dockerps, + exec_docker_history, + exec_dockerinspect, + _get_docker_server_version, + _fix_version, + get_docker_container_rootfs_path +) + +# Tests conducted with a single container running. + + +class DockerUtilsTests(unittest.TestCase): + image_name = 'alpine:latest' + long_image_name = 'docker.io/alpine:latest' + + def setUp(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.docker.pull(repository='alpine', tag='latest') + self.container = self.docker.create_container( + image=self.image_name, command='/bin/sleep 60') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + + def tearDown(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + shutil.rmtree(self.tempd) + + def test_fix_version(self): + import semantic_version + ver = u'17.03.01-ce' + fixed_ver = _fix_version(ver) + assert fixed_ver == u'17.3.1' + VERSION_SPEC = semantic_version.Spec('>=1.10.0') + assert VERSION_SPEC.match(semantic_version.Version(fixed_ver)) is True + + def test_docker_version(self): + ver = _get_docker_server_version() + import re + pattern = re.compile("^[0-9]+\.[0-9]+\.[0-9]+") + assert pattern.match(ver) + + def test_dockerps(self): + for inspect in exec_dockerps(): + c_long_id = inspect['Id'] + break # there should only be one container anyway + assert self.container['Id'] == c_long_id + + def test_docker_history(self): + history = exec_docker_history(self.container['Id']) + print history[0] + assert self.image_name in history[0][ + 'Tags'] or self.long_image_name in history[0]['Tags'] + + def test_dockerinspect(self): + inspect = exec_dockerinspect(self.container['Id']) + print inspect + assert self.container['Id'] == inspect['Id'] + + def test_get_container_rootfs(self): + root = get_docker_container_rootfs_path(self.container['Id']) + print root + assert root.startswith('/var/lib/docker') + + if __name__ == '__main__': + logging.basicConfig( + filename='test_dockerutils.log', + filemode='a', + format='%(asctime)s %(levelname)s : %(message)s', + level=logging.DEBUG) + + unittest.main() diff --git a/tests/functional/test_functional_fprobe.py b/tests/functional/test_functional_fprobe.py new file mode 100644 index 00000000..0584ead5 --- /dev/null +++ b/tests/functional/test_functional_fprobe.py @@ -0,0 +1,281 @@ +import logging +import mock +import os +import shutil +import sys +import time +import tempfile +import unittest + +import docker +import requests.exceptions +from plugins.systems.fprobe_container_crawler import FprobeContainerCrawler +from utils.process_utils import start_child + + +# Tests the FprobeContainerCrawler class +# Throws an AssertionError if any test fails + +FPROBE_FRAME = \ + '[{"data": "AAUACD6AE4dYsG5IAAGSWAAABngAAAAArBA3AqwQNwEAAAAAAAAAAAAAAAQAA'\ + 'AFiPn/cGT5/3Bsfkez+ABsGAAAAAAAAAAAArBA3AawQNwIAAAAAAAAAAAAAAAYAAAHDPn/cF'\ + 'j5/3BfcUh+QABsGAAAAAAAAAAAArBA3AgoKCgEAAAAAAAAAAAAAAAYAAAFiPn/dmj5//Q2TJ'\ + 'gG7ABgGAAAAAAAAAAAArBA3AawQNwIAAAAAAAAAAAAAAAYAAAHDPn/cGT5/3BvcUx+QABsGA'\ + 'AAAAAAAAAAArBA3AqwQNwEAAAAAAAAAAAAAAAQAAAFhPn/cGT5/3BsfkNxTABsGAAAAAAAAA'\ + 'AAArBA3AawQNwIAAAAAAAAAAAAAAAYAAAG9Pn/cGT5/3Bvs/h+RABsGAAAAAAAAAAAArBA3A'\ + 'qwQNwEAAAAAAAAAAAAAAAQAAAFhPn/cFj5/3BgfkNxSABsGAAAAAAAAAAAACgoKAawQNwIAA'\ + 'AAAAAAAAAAAAAsAABn8Pn/dfj5//Q0Bu5MmABgGAAAAAAAAAAAA", "metadata": {"send'\ + 'er": "127.0.0.1", "timestamp": 1487957576.000248, "ifname": "vethcfd6842'\ + '", "sport": 46246, "ip-addresses": ["172.16.55.2"], "container-id": "5f2'\ + 'e9fb6168da249e1ef215c41c1454e921a7e4ee722d85191d3027703ea613e"}}]' + + +def simulate_socket_datacollector(params): + """ simulate writing by the socket-datacollector """ + dir_idx = params.index('--dir') + assert dir_idx > 0 + output_dir = params[dir_idx + 1] + + filepattern_idx = params.index('--filepattern') + assert filepattern_idx > 0 + filepattern = params[filepattern_idx + 1] + + filename = os.path.join(output_dir, filepattern) + with open(filename, 'w') as f: + f.write(FPROBE_FRAME) + print 'Write file %s' % filename + with open(filename + ".tmp", 'w') as f: + f.write(FPROBE_FRAME) + + +def mocked_start_child(params, pass_fds, null_fds, ign_sigs, setsid=False, + **kwargs): + if params[0] == 'socket-datacollector': + # in case the socket-datacollector is started, we just write + # the frame without actually starting that program. + simulate_socket_datacollector(params) + + # return appropriate values + return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, setsid) + + +def mocked_start_child_fprobe_fail(params, pass_fds, null_fds, ign_sigs, + setsid=False, **kwargs): + if params[0] == 'softflowd': + return start_child(['___no_such_file'], pass_fds, null_fds, ign_sigs, + setsid, **kwargs) + return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, setsid, + **kwargs) + + +def mocked_start_child_collector_fail(params, pass_fds, null_fds, ign_sigs, + setsid=False, **kwargs): + if params[0] == 'socket-datacollector': + return start_child(['___no_such_file'], pass_fds, null_fds, ign_sigs, + setsid, **kwargs) + return start_child(['sleep', '1'], pass_fds, null_fds, ign_sigs, + setsid, **kwargs) + + +def mocked_psutil_process_iter(): + class MyProcess(object): + def __init__(self, _name, _cmdline, _pid): + self._name = _name + self._cmdline = _cmdline + self.pid = _pid + + def name(self): + return self._name + + def cmdline(self): + return self._cmdline + yield MyProcess('softflowd', ['-i', 'test.eth0', '127.0.0.1:1234'], 11111) + + +# Tests conducted with a single container running. +class FprobeFunctionalTests(unittest.TestCase): + image_name = 'alpine:latest' + + def setUp(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.docker.pull(repository='alpine', tag='latest') + self.container = self.docker.create_container( + image=self.image_name, command='ping -w 30 8.8.8.8') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + + self.output_dir = os.path.join(self.tempd, 'crawler-fprobe') + + self.params = { + 'fprobe_user': 'nobody', + 'fprobe_output_dir': self.output_dir, + 'output_filepattern': 'testfile', + 'netflow_version': 10, + } + + logging.basicConfig(stream=sys.stderr) + self.logger = logging.getLogger("crawlutils").setLevel(logging.INFO) + + def tearDown(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + shutil.rmtree(self.tempd) + + @mock.patch('plugins.systems.fprobe_container_crawler.start_child', + mocked_start_child) + def test_crawl_outcontainer_fprobe(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: expecting collector output') + + fc = FprobeContainerCrawler() + assert fc.get_feature() == 'fprobe' + + # the fake collector writes the single frame immediately + res = [] + for data in fc.crawl(self.container['Id'], avoid_setns=False, + **self.params): + res.append(data) + assert len(res) == 1 + + @mock.patch('plugins.systems.fprobe_container_crawler.start_child', + mocked_start_child_fprobe_fail) + def test_start_netflow_collection_fault1(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: fprobe fails to start') + + fc = FprobeContainerCrawler() + assert fc.get_feature() == 'fprobe' + + # with fprobe failing to start, we won't get data + res = [] + for data in fc.crawl(self.container['Id'], avoid_setns=False, + **self.params): + res.append(data) + assert len(res) == 0 + + @mock.patch('plugins.systems.fprobe_container_crawler.start_child', + mocked_start_child_collector_fail) + def test_start_netflow_collection_fault2(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: collector fails to start') + + fc = FprobeContainerCrawler() + assert fc.get_feature() == 'fprobe' + + # with fprobe failing to start, we won't get data + res = [] + for data in fc.crawl(self.container['Id'], avoid_setns=False, + **self.params): + res.append(data) + assert len(res) == 0 + + @mock.patch('plugins.systems.fprobe_container_crawler.start_child', + mocked_start_child) + def test_remove_datafiles(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: datafiles of disappeared interface ' + 'being removed') + + fc = FprobeContainerCrawler() + assert fc.get_feature() == 'fprobe' + + # we pretend that an interface test.eth0 existed + ifname = 'test.eth0' + FprobeContainerCrawler.fprobes_started[ifname] = 1234 + + self.params['output_filepattern'] = 'fprobe-{ifname}-{timestamp}' + + # create a datafile for this fake interface + timestamp = int(time.time()) + filepattern = 'fprobe-{ifname}-{timestamp}'.format(ifname=ifname, + timestamp=timestamp) + params = [ + 'socket-datacollector', + '--dir', self.output_dir, + '--filepattern', filepattern, + ] + + # have the fake socket-datacollector write a file with the ifname in + # the filename + fc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) + simulate_socket_datacollector(params) + written_file = os.path.join(self.output_dir, filepattern) + assert os.path.isfile(written_file) + + FprobeContainerCrawler.next_cleanup = 0 + # calling fc.crawl() will trigger a cleanup of that file + # since our fake interface never existed + fc.crawl(self.container['Id'], avoid_setns=False, **self.params) + + # file should be gone now + assert not os.path.isfile(written_file) + + @mock.patch('plugins.systems.fprobe_container_crawler.psutil.process_iter', + mocked_psutil_process_iter) + def test_interfaces_with_fprobes(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: determine interfaces on which flow probes ' + 'are running') + s = FprobeContainerCrawler.interfaces_with_fprobes() + assert 'test.eth0' in s.keys() + + @mock.patch('plugins.systems.fprobe_container_crawler.start_child', + mocked_start_child) + def test_remove_stale_files(self): + logger = logging.getLogger("crawlutils") + logger.info('>>> Testcase: stale file being removed') + + fc = FprobeContainerCrawler() + assert fc.get_feature() == 'fprobe' + + # we pretend that an interface test.eth0 existed + ifname = 'test.eth0' + FprobeContainerCrawler.fprobes_started[ifname] = 1234 + + self.params['output_filepattern'] = 'fprobe-{ifname}-{timestamp}' + + # have the fake socket-datacollector write a file with the ifname in + # the filename + fc.setup_outputdir(self.output_dir, os.getuid(), os.getgid()) + + written_file = os.path.join(self.output_dir, 'test.output') + with open(written_file, 'a') as f: + f.write('hello') + + assert os.path.isfile(written_file) + + # mock the stale file timeout so that our file will get removed + # with in reasonable time + FprobeContainerCrawler.STALE_FILE_TIMEOUT = 5 + + # calling fc.crawl() will not trigger a cleanup of that file + # the first time + logger.info('1st crawl') + fc.crawl(self.container['Id'], avoid_setns=False, **self.params) + + # file should still be here + assert os.path.isfile(written_file) + + # the next time we will crawl, the file will be removed + FprobeContainerCrawler.next_cleanup = time.time() + time.sleep(FprobeContainerCrawler.STALE_FILE_TIMEOUT + 1) + + logger.info('2nd crawl') + fc.crawl(self.container['Id'], avoid_setns=False, **self.params) + + # file should be gone now + assert not os.path.isfile(written_file) + + if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_k8s_environment.py b/tests/functional/test_functional_k8s_environment.py new file mode 100644 index 00000000..707cff51 --- /dev/null +++ b/tests/functional/test_functional_k8s_environment.py @@ -0,0 +1,246 @@ +import unittest +import docker +import requests.exceptions +import tempfile +import os +import shutil +import subprocess +import sys +import json + +# Tests for crawlers in kubernetes crawlers configuration. + +from containers_crawler import ContainersCrawler +from worker import Worker +from emitters_manager import EmittersManager + +import logging + +# Tests conducted with a single container running. + +CONT_NAME = "io.kubernetes.container.name" +POD_NAME = "io.kubernetes.pod.name" +POD_UID = "io.kubernetes.pod.uid" +POD_NS = "io.kubernetes.pod.namespace" +K8S_DELIMITER = "/" + + +class ContainersCrawlerTests(unittest.TestCase): + + def setUp(self): + root = logging.getLogger() + root.setLevel(logging.INFO) + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + root.addHandler(ch) + + self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', + version='auto') + self.k8s_labels = dict() + self.k8s_labels[CONT_NAME] = "simson" + self.k8s_labels[POD_NAME] = "pod-test" + self.k8s_labels[POD_UID] = "pod-123" + self.k8s_labels[POD_NS] = "devtest" + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.start_crawled_container() + + def start_crawled_container(self): + # start a container to be crawled + self.docker.pull(repository='ubuntu', tag='latest') + self.container = self.docker.create_container( + image='ubuntu:latest', labels=self.k8s_labels, command='/bin/sleep 60') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + + def tearDown(self): + self.remove_crawled_container() + + shutil.rmtree(self.tempd) + + def remove_crawled_container(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + def testCrawlContainer1(self): + crawler = ContainersCrawler( + features=[ + 'cpu', + 'memory', + 'interface', + 'package'], + environment='kubernetes') + frames = list(crawler.crawl()) + output = str(frames[0]) + print output # only printed if the test fails + assert 'interface-lo' in output + assert 'if_octets_tx=' in output + assert 'cpu-0' in output + assert 'cpu_nice=' in output + assert 'memory' in output + assert 'memory_buffered=' in output + assert 'apt' in output + assert 'pkgarchitecture=' in output + + ''' + Test for graphite o/p format. + ''' + + def testCrawlContainer2(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'cpu,memory,interface', + '--crawlContainers', self.container['Id'], + '--format', 'graphite', + '--crawlmode', 'OUTCONTAINER', + '--environment', 'kubernetes', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + sample_out = output.split('\n')[0] + print sample_out + namespace_parts = sample_out.split(".")[:4] + assert len(namespace_parts) == 4 + assert namespace_parts[0] == self.k8s_labels[POD_NS] + assert namespace_parts[1] == self.k8s_labels[POD_NAME] + assert namespace_parts[2] == self.k8s_labels[CONT_NAME] + assert 'interface-lo.if_octets.tx' in output + assert 'cpu-0.cpu-idle' in output + assert 'memory.memory-used' in output + f.close() + + ''' + Test for csv o/p format + ''' + + def testCrawlContainer3(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'cpu,memory,interface', + '--crawlContainers', self.container['Id'], + '--format', 'csv', + '--crawlmode', 'OUTCONTAINER', + '--environment', 'kubernetes', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + metadata_frame = output.split('\n')[0] + metadata_str = metadata_frame.split()[2] + metadata_json = json.loads(metadata_str) + namespace_str = metadata_json['namespace'] + assert namespace_str + namespace_parts = namespace_str.split(K8S_DELIMITER) + assert len(namespace_parts) == 4 + assert namespace_parts[0] == self.k8s_labels[POD_NS] + assert namespace_parts[1] == self.k8s_labels[POD_NAME] + assert namespace_parts[2] == self.k8s_labels[CONT_NAME] + assert 'interface-lo' in output + assert 'cpu-0' in output + assert 'memory' in output + f.close() + + ''' + Test for json o/p format + ''' + + def testCrawlContainer4(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + # crawler itself needs to be root + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'cpu,memory,interface', + '--crawlContainers', self.container['Id'], + '--format', 'json', + '--crawlmode', 'OUTCONTAINER', + '--environment', 'kubernetes', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == 1 + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + sample_out = output.split('\n')[0] + metadata_json = json.loads(sample_out) + namespace_str = metadata_json['namespace'] + assert namespace_str + namespace_parts = namespace_str.split(K8S_DELIMITER) + assert len(namespace_parts) == 4 + assert namespace_parts[0] == self.k8s_labels[POD_NS] + assert namespace_parts[1] == self.k8s_labels[POD_NAME] + assert namespace_parts[2] == self.k8s_labels[CONT_NAME] + assert 'memory_used' in output + assert 'if_octets_tx' in output + assert 'cpu_idle' in output + f.close() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_logs_linker.py b/tests/functional/test_functional_logs_linker.py new file mode 100644 index 00000000..90e23fab --- /dev/null +++ b/tests/functional/test_functional_logs_linker.py @@ -0,0 +1,137 @@ +import unittest +import docker +import os +import shutil +import sys +import subprocess +import plugins_manager + +from containers_logs_linker import DockerContainersLogsLinker +from worker import Worker +from dockercontainer import HOST_LOG_BASEDIR +from utils.misc import get_host_ipaddr + +import logging + + +class LogsLinkerTests(unittest.TestCase): + + def setUp(self): + root = logging.getLogger() + root.setLevel(logging.INFO) + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + root.addHandler(ch) + plugins_manager.runtime_env = None + self.container = {} + self.container_name = 'LogLinkerContainer' + self.host_namespace = get_host_ipaddr() + try: + shutil.rmtree(os.path.join(HOST_LOG_BASEDIR, self.host_namespace, + self.container_name)) + except OSError: + pass + + def startContainer(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + self.docker.pull(repository='ubuntu', tag='latest') + self.container = self.docker.create_container( + image='ubuntu:latest', + command='bash -c "echo hi ; echo hi > /var/log/messages; /bin/sleep 120"', + name=self.container_name) + self.docker.start(container=self.container['Id']) + + def tearDown(self): + try: + self.removeContainer() + shutil.rmtree(os.path.join(HOST_LOG_BASEDIR, self.host_namespace, + self.container_name)) + except Exception: + pass + + def removeContainer(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + def testLinkUnlinkContainer(self): + docker_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, + self.container_name, 'docker.log') + messages_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, + self.container_name, 'var/log/messages') + crawler = DockerContainersLogsLinker( + environment='cloudsight', + user_list='ALL', + host_namespace=self.host_namespace) + worker = Worker(crawler=crawler) + + self.startContainer() + worker.iterate() + with open(docker_log, 'r') as log: + assert 'hi' in log.read() + with open(messages_log, 'r') as log: + assert 'hi' in log.read() + assert os.path.exists(docker_log) + assert os.path.exists(messages_log) + assert os.path.islink(docker_log) + assert os.path.islink(messages_log) + + self.removeContainer() + worker.iterate() + assert not os.path.exists(docker_log) + assert not os.path.exists(messages_log) + assert not os.path.islink(docker_log) + assert not os.path.islink(messages_log) + + self.startContainer() + worker.iterate() + assert os.path.exists(docker_log) + with open(docker_log, 'r') as log: + assert 'hi' in log.read() + with open(messages_log, 'r') as log: + assert 'hi' in log.read() + assert os.path.exists(messages_log) + assert os.path.islink(docker_log) + assert os.path.islink(messages_log) + + self.removeContainer() + + def testLinkUnlinkContainerCli(self): + docker_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, + self.container_name, 'docker.log') + messages_log = os.path.join(HOST_LOG_BASEDIR, self.host_namespace, + self.container_name, 'var/log/messages') + + self.startContainer() + + # crawler itself needs to be root + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/containers_logs_linker.py' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + with open(docker_log, 'r') as log: + assert 'hi' in log.read() + with open(messages_log, 'r') as log: + assert 'hi' in log.read() + assert os.path.exists(docker_log) + assert os.path.exists(messages_log) + assert os.path.islink(docker_log) + assert os.path.islink(messages_log) + + self.removeContainer() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_namespace.py b/tests/functional/test_functional_namespace.py new file mode 100644 index 00000000..95514a88 --- /dev/null +++ b/tests/functional/test_functional_namespace.py @@ -0,0 +1,117 @@ +import logging +import shutil +import sys +import tempfile +import time +import unittest + +import docker +import requests.exceptions + +from utils.crawler_exceptions import CrawlTimeoutError +from utils.namespace import run_as_another_namespace + +all_namespaces = ["user", "pid", "uts", "ipc", "net", "mnt"] + + +# Functions used to test the library +def func_args(arg1, arg2): + return "test %s %s" % (arg1, arg2) + +def func_kwargs(arg1='a', arg2='b'): + return "test %s %s" % (arg1, arg2) + +def func_mixed_args(arg1, arg2='b'): + return "test %s %s" % (arg1, arg2) + +def func_no_args(arg="default"): + return "test %s" % (arg) + + +class FooError(Exception): + pass + + +def func_crash(arg, *args, **kwargs): + print locals() + raise FooError("oops") + + +def func_infinite_loop(arg): + while True: + time.sleep(1) + +# Tests conducted with a single container running. + + +class NamespaceLibTests(unittest.TestCase): + image_name = 'alpine:latest' + + def setUp(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.docker.pull(repository='alpine', tag='latest') + self.container = self.docker.create_container( + image=self.image_name, command='/bin/sleep 300') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + inspect = self.docker.inspect_container(self.container['Id']) + print inspect + self.pid = str(inspect['State']['Pid']) + + def tearDown(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + shutil.rmtree(self.tempd) + + def test_run_as_another_namespace_function_args(self): + res = run_as_another_namespace( + self.pid, all_namespaces, func_args, "arg1", "arg2") + assert res == "test arg1 arg2" + print sys._getframe().f_code.co_name, 1 + + def test_run_as_another_namespace_function_kwargs(self): + res = run_as_another_namespace( + self.pid, all_namespaces, func_kwargs, arg1="arg1", arg2="arg2") + assert res == "test arg1 arg2" + print sys._getframe().f_code.co_name, 1 + + def test_run_as_another_namespace_function_mixed_args(self): + res = run_as_another_namespace( + self.pid, all_namespaces, func_mixed_args, "arg1", arg2="arg2") + assert res == "test arg1 arg2" + print sys._getframe().f_code.co_name, 1 + + def test_run_as_another_namespace_simple_function_no_args(self): + res = run_as_another_namespace(self.pid, all_namespaces, func_no_args) + assert res == "test default" + print sys._getframe().f_code.co_name, 1 + + def test_run_as_another_namespace_crashing_function(self): + with self.assertRaises(FooError): + run_as_another_namespace( + self.pid, all_namespaces, func_crash, "arg") + + def test_run_as_another_namespace_infinite_loop_function(self): + with self.assertRaises(CrawlTimeoutError): + run_as_another_namespace( + self.pid, all_namespaces, func_infinite_loop, "arg") + + if __name__ == '__main__': + logging.basicConfig( + filename='test_namespace.log', + filemode='a', + format='%(asctime)s %(levelname)s : %(message)s', + level=logging.DEBUG) + + unittest.main() diff --git a/tests/functional/test_functional_plugins.py b/tests/functional/test_functional_plugins.py new file mode 100644 index 00000000..59bdd269 --- /dev/null +++ b/tests/functional/test_functional_plugins.py @@ -0,0 +1,84 @@ +import shutil +import tempfile +import unittest + +import docker +import requests.exceptions +from plugins.systems.cpu_container_crawler import CpuContainerCrawler +from plugins.systems.cpu_host_crawler import CpuHostCrawler +from plugins.systems.memory_container_crawler import MemoryContainerCrawler +from plugins.systems.memory_host_crawler import MemoryHostCrawler +from plugins.systems.os_container_crawler import OSContainerCrawler +from plugins.systems.process_container_crawler import ProcessContainerCrawler + + +# Tests the FeaturesCrawler class +# Throws an AssertionError if any test fails + + +# Tests conducted with a single container running. +class HostAndContainerPluginsFunctionalTests(unittest.TestCase): + image_name = 'alpine:latest' + + def setUp(self): + self.docker = docker.APIClient( + base_url='unix://var/run/docker.sock', version='auto') + try: + if len(self.docker.containers()) != 0: + raise Exception( + "Sorry, this test requires a machine with no docker" + "containers running.") + except requests.exceptions.ConnectionError: + print ("Error connecting to docker daemon, are you in the docker" + "group? You need to be in the docker group.") + + self.docker.pull(repository='alpine', tag='latest') + self.container = self.docker.create_container( + image=self.image_name, command='/bin/sleep 60') + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + self.docker.start(container=self.container['Id']) + + def tearDown(self): + self.docker.stop(container=self.container['Id']) + self.docker.remove_container(container=self.container['Id']) + + shutil.rmtree(self.tempd) + + def test_crawl_invm_cpu(self): + fc = CpuHostCrawler() + cores = len(list(fc.crawl())) + assert cores > 0 + + def test_crawl_invm_mem(self): + fc = MemoryHostCrawler() + cores = len(list(fc.crawl())) + assert cores > 0 + + def test_crawl_outcontainer_cpu(self): + fc = CpuContainerCrawler() + for key, feature, t in fc.crawl(self.container['Id']): + print key, feature + cores = len(list(fc.crawl(self.container['Id']))) + assert cores > 0 + + def test_crawl_outcontainer_os(self): + fc = OSContainerCrawler() + assert len(list(fc.crawl(self.container['Id']))) == 1 + + def test_crawl_outcontainer_processes(self): + fc = ProcessContainerCrawler() + # sleep + crawler + assert len(list(fc.crawl(self.container['Id']))) == 2 + + def test_crawl_outcontainer_processes_mmapfiles(self): + fc = ProcessContainerCrawler() + output = "%s" % list(fc.crawl(self.container['Id'], get_mmap_files='True')) + assert '/bin/busybox' in output + + def test_crawl_outcontainer_mem(self): + fc = MemoryContainerCrawler() + output = "%s" % list(fc.crawl(self.container['Id'])) + assert 'memory_used' in output + + if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_safecontainers_crawler.py b/tests/functional/test_functional_safecontainers_crawler.py index ac7e3e6e..8311f261 100644 --- a/tests/functional/test_functional_safecontainers_crawler.py +++ b/tests/functional/test_functional_safecontainers_crawler.py @@ -61,7 +61,7 @@ def setup_plugincont_testing2(self): plugincont_image_path + '/requirements.txt.testing', plugincont_image_path + '/requirements.txt') _platform = platform.linux_distribution() - if _platform[0] != 'Ubuntu1' or _platform[1] < '16.04': + if _platform[0] != 'Ubuntu' or _platform[1] < '16.04': self.seccomp = False src_file = os.getcwd() + \ '/crawler/plugin_containers_manager.py' diff --git a/tests/functional/test_functional_vm_plugins.py b/tests/functional/test_functional_vm_plugins.py new file mode 100644 index 00000000..3f049342 --- /dev/null +++ b/tests/functional/test_functional_vm_plugins.py @@ -0,0 +1,159 @@ +import subprocess +import time +import unittest + +from plugins.systems.connection_vm_crawler import ConnectionVmCrawler +from plugins.systems.interface_vm_crawler import InterfaceVmCrawler +from plugins.systems.memory_vm_crawler import MemoryVmCrawler +from plugins.systems.metric_vm_crawler import MetricVmCrawler +from plugins.systems.os_vm_crawler import os_vm_crawler + +from plugins.systems.process_vm_crawler import process_vm_crawler +from utils.features import ( + ProcessFeature, + MetricFeature, + MemoryFeature, +) + + +# Tests the FeaturesCrawler class +# Throws an AssertionError if any test fails + +class VmPluginsFunctionalTests(unittest.TestCase): + + SETUP_ONCE = False + vm_descs = [['vm2', '4.0.3.x86_64', 'vanilla', 'x86_64'], + ['vm3', '3.2.0-101-generic_3.2.0-101.x86_64', + 'ubuntu', 'x86_64'], + ['vm4', '3.13.0-24-generic_3.13.0-24.x86_64', + 'ubuntu', 'x86_64'] + ] + + def create_vm_via_bash(self, vmID): + qemu_out_file = "/tmp/psvmi_qemu_out" + serial = "file:" + qemu_out_file + + vmlinuz = "psvmi/tests/vmlinuz/vmlinuz-" + vmID[1] + vm_name = vmID[0] + + disk_file = "psvmi/tests/" + vm_name + "disk.qow2" + subprocess.call(["cp", "psvmi/tests/disk.qcow2", disk_file]) + disk = "format=raw,file=" + disk_file + + qemu_cmd = subprocess.Popen( + ("qemu-system-x86_64", + "-kernel", + vmlinuz, + "-append", + ("init=psvmi_test_init root=/dev/sda console=ttyAMA0 " + "console=ttyS0"), + "-name", + vm_name, + "-m", + "512", + "-smp", + "1", + "-drive", + disk, + "-display", + "none", + "-serial", + serial)) + + vmID.append(str(qemu_cmd.pid)) # vmID[4]=qemu_pid + + # ugly way to fiogure out if a VM has booted, could not pipe output + # from qemu properly + vm_ready = False + + while True: + time.sleep(4) + + fr = open(qemu_out_file, "r") + for line in fr.readlines(): + if "Mounted root" in line: + time.sleep(3) + vm_ready = True + break + fr.close() + + if vm_ready is True: + break + + def setUp(self): + if VmPluginsFunctionalTests.SETUP_ONCE is False: + for vm_desc in VmPluginsFunctionalTests.vm_descs: + self.create_vm_via_bash(vm_desc) + VmPluginsFunctionalTests.SETUP_ONCE = True + self.vm_descs = VmPluginsFunctionalTests.vm_descs + + @classmethod + def teardown_class(cls): + for _, _, _, _, pid in VmPluginsFunctionalTests.vm_descs: + subprocess.call(["kill", "-9", pid]) + + def _tearDown(self): + for _, _, _, _, pid in self.vm_descs: + subprocess.call(["kill", "-9", pid]) + # no need to rm qcow disk files since they get destroyed on + # container exit + + def test_crawl_outvm_os(self): + fc = os_vm_crawler() + for _, kernel, distro, arch, pid in self.vm_descs: + for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): + assert 'Linux' in item + + def test_crawl_outvm_process(self): + fc = process_vm_crawler() + for _, kernel, distro, arch, pid in self.vm_descs: + for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): + p = ProcessFeature._make(item[1]) + if p.pid == 0: + assert 'swapper' in str(p.pname) + elif p.pname == 'psvmi_test_init': + assert 'devconsole' in str(p.openfiles) + else: + assert p.pid > 0 + + def test_crawl_outvm_mem(self): + fc = MemoryVmCrawler() + for _, kernel, distro, arch, pid in self.vm_descs: + for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): + meminfo = MemoryFeature._make(item[1]) + assert (meminfo.memory_util_percentage >= 0) + + def test_crawl_outvm_metrics(self): + fc = MetricVmCrawler() + for _, kernel, distro, arch, pid in self.vm_descs: + for item in fc.crawl(vm_desc=(pid, kernel, distro, arch)): + p = MetricFeature._make(item[1]) + if p.pname == 'psvmi_test_init': + assert p.rss > 0 + assert p.vms > 0 + assert p.mempct >= 0 + # stritly speaking > 0 but due to rounding + + # to see if 100% cpu util shows up for psvmi_test_init + # time.sleep(1) + # print list(crawler.crawl_metrics()) + + def _test_crawl_outvm_modules(self): + for crawler in self.crawlers: + output = crawler.crawl_modules() + assert len(list(output)) > 0 + + def test_crawl_outvm_interface(self): + fc = InterfaceVmCrawler() + for _, kernel, distro, arch, pid in self.vm_descs: + output = fc.crawl(vm_desc=(pid, kernel, distro, arch)) + assert any('lo' in item[0] for item in output) + + def test_crawl_outvm_connections(self): + fc = ConnectionVmCrawler() + for _, kernel, distro, arch, pid in self.vm_descs: + output = fc.crawl(vm_desc=(pid, kernel, distro, arch)) + assert len(list(output)) == 0 # There are no connections + + if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_functional_vms_crawler.py b/tests/functional/test_functional_vms_crawler.py new file mode 100644 index 00000000..e864a3c7 --- /dev/null +++ b/tests/functional/test_functional_vms_crawler.py @@ -0,0 +1,147 @@ +import unittest +import tempfile +import os +import subprocess +import time + +# Tests for crawlers in kraken crawlers configuration. + +from vms_crawler import VirtualMachinesCrawler + +# Tests conducted with a single container running. + + +class VirtualMachinesCrawlerTests(unittest.TestCase): + + SETUP_ONCE = False + + vmIDs = [['vm2', '4.0.3.x86_64', 'vanilla', 'x86_64'], + ['vm3', '3.2.0-101-generic_3.2.0-101.x86_64', 'ubuntu', 'x86_64'], + ['vm4', '3.13.0-24-generic_3.13.0-24.x86_64', 'ubuntu', 'x86_64'] + ] + + def create_vm_via_bash(self, vmID): + qemu_out_file = "/tmp/psvmi_qemu_out" + serial = "file:" + qemu_out_file + + vmlinuz = "psvmi/tests/vmlinuz/vmlinuz-" + vmID[1] + vm_name = vmID[0] + + disk_file = "psvmi/tests/" + vm_name + "disk.qow2" + subprocess.call(["cp", "psvmi/tests/disk.qcow2", disk_file]) + disk = "format=raw,file=" + disk_file + + qemu_cmd = subprocess.Popen( + ("qemu-system-x86_64", + "-kernel", + vmlinuz, + "-append", + ("init=psvmi_test_init root=/dev/sda console=ttyAMA0 " + "console=ttyS0"), + "-name", + vm_name, + "-m", + "512", + "-smp", + "1", + "-drive", + disk, + "-display", + "none", + "-serial", + serial)) + + vmID.append(str(qemu_cmd.pid)) # vmID[4]=qemu_pid + + # ugly way to fiogure out if a VM has booted, could not pipe output + # from qemu properly + vm_ready = False + + while True: + time.sleep(4) + + fr = open(qemu_out_file, "r") + for line in fr.readlines(): + if "Mounted root" in line: + time.sleep(3) + vm_ready = True + break + fr.close() + + if vm_ready is True: + break + + def create_vms(self): + for vmID in VirtualMachinesCrawlerTests.vmIDs: + self.create_vm_via_bash(vmID) + + @classmethod + def teardown_class(cls): + for vmID in VirtualMachinesCrawlerTests.vmIDs: + subprocess.call(["kill", "-9", vmID[4]]) + + def setUp(self): + self.tempd = tempfile.mkdtemp(prefix='crawlertest.') + if VirtualMachinesCrawlerTests.SETUP_ONCE is False: + self.create_vms() + VirtualMachinesCrawlerTests.SETUP_ONCE = True + + def testCrawlVM1(self): + vm_list = [ + 'vm2,4.0.3.x86_64,vanilla,x86_64', + 'vm3,3.2.0-101-generic_3.2.0-101.x86_64,ubuntu,x86_64', + 'vm4,3.13.0-24-generic_3.13.0-24.x86_64,ubuntu,x86_64'] + crawler = VirtualMachinesCrawler( + features=[ + 'os', + 'memory', + 'interface', + 'process'], + user_list=vm_list) + frames = list(crawler.crawl()) + output = str(frames[0]) + print output # only printed if the test fails + assert 'interface-lo' in output + assert 'if_octets_tx=' in output + assert 'memory' in output + assert 'memory_buffered=' in output + + def testCrawlVM2(self): + env = os.environ.copy() + mypath = os.path.dirname(os.path.realpath(__file__)) + os.makedirs(self.tempd + '/out') + + process = subprocess.Popen( + [ + '/usr/bin/python', mypath + '/../../crawler/crawler.py', + '--url', 'file://' + self.tempd + '/out/crawler', + '--features', 'os,memory,interface,process', + '--crawlVMs', 'vm2,4.0.3.x86_64,vanilla,x86_64', + 'vm3,3.2.0-101-generic_3.2.0-101.x86_64,ubuntu,x86_64', + 'vm4,3.13.0-24-generic_3.13.0-24.x86_64,ubuntu,x86_64', + '--crawlmode', 'OUTVM', + '--numprocesses', '1' + ], + env=env) + stdout, stderr = process.communicate() + assert process.returncode == 0 + + print stderr + print stdout + + subprocess.call(['/bin/chmod', '-R', '777', self.tempd]) + + files = os.listdir(self.tempd + '/out') + assert len(files) == len(VirtualMachinesCrawlerTests.vmIDs) + + f = open(self.tempd + '/out/' + files[0], 'r') + output = f.read() + print output # only printed if the test fails + assert 'psvmi_test_init' in output + assert 'Linux' in output + assert 'memory_used' in output + assert 'interface-lo' in output + f.close() + + if __name__ == '__main__': + unittest.main() diff --git a/tests/functional/test_logs_in_volumes1.py b/tests/functional/test_logs_in_volumes1.py new file mode 100644 index 00000000..0fbf13b1 --- /dev/null +++ b/tests/functional/test_logs_in_volumes1.py @@ -0,0 +1,96 @@ +import logging +import unittest +import tempfile +import os +import shutil +import mock + +import utils.dockerutils +import dockercontainer +import plugins_manager + +# Tests dockercontainer._get_logfiles_list +# the log file, test1.log is in a host directory +# mounted as volume + + +def get_container_log_files(path, options): + pass + + +@mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=lambda id: 'rootfs') +class DockerContainerTests(unittest.TestCase): + + def setUp(self): + + self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.') + self.volume = tempfile.mkdtemp(prefix='volume.') + for logf in ['test1.log', 'test2.log']: + with open(os.path.join(self.volume, logf), 'w') as logp: + logp.write(logf) + + def tearDown(self): + shutil.rmtree(self.volume) + shutil.rmtree(self.host_log_dir) + + def test_get_logfiles_list(self, *args): + + inspect = { + "Id": ("1e744b5e3e11e848863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea" + "24e847"), + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186}, + "Image": ("sha256:07c86167cdc4264926fa5d2894e34a339ad27f730e8cc81a" + "16cd21b7479e8eac"), + "Name": "/pensive_rosalind", + "LogPath": ("/var/lib/docker/containers/1e744b5e3e11e848863fefe9d9" + "a8b3731070c6b0c702a04d2b8ab948ea24e847/1e744b5e3e11e8" + "48863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" + "-json.log"), + "HostnamePath": ("/var/lib/docker/containers/1e744b5e3e11e848863fe" + "fe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" + "/hostname"), + "Mounts": [ + { + "Source": self.volume, + "Destination": "/data"}], + "Config": { + "Cmd": ["bash"], + "Image": "ubuntu:trusty"}, + "docker_image_long_name": "long_name/short_name", + "docker_image_short_name": "short_name", + "docker_image_tag": "image_tag", + "docker_image_registry": "image_registry", + "owner_namespace": "owner_namespace", + "NetworkSettings": {}} + + plugins_manager.runtime_env = None + self.docker_container = \ + dockercontainer.DockerContainer(inspect['Id'], inspect) + + self.docker_container._get_container_log_files = \ + get_container_log_files + self.docker_container.log_file_list = [ + {'name': '/data/test1.log', 'type': None}] + + log_list = self.docker_container._set_logs_list() + log_list = self.docker_container.logs_list + for log in log_list: + if log.name == '/data/test1.log': + self.assertEqual( + log.dest, self.host_log_dir + '/data/test1.log') + self.assertEqual(log.source, + self.volume + '/test1.log') + +if __name__ == '__main__': + logging.basicConfig( + filename='test_dockerutils.log', + filemode='a', + format='%(asctime)s %(levelname)s : %(message)s', + level=logging.DEBUG) + + unittest.main() diff --git a/tests/functional/test_logs_in_volumes_star.py b/tests/functional/test_logs_in_volumes_star.py new file mode 100644 index 00000000..c6460a5c --- /dev/null +++ b/tests/functional/test_logs_in_volumes_star.py @@ -0,0 +1,93 @@ +import logging +import unittest +import tempfile +import os +import shutil +import mock + +import utils.dockerutils +import dockercontainer + +# Tests dockercontainer._get_logfiles_list +# the log file, test1.log is in a host directory +# mounted as volume + + +def get_container_log_files(path, options): + pass + + +@mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=lambda id: 'rootfs') +class DockerContainerTests(unittest.TestCase): + + def setUp(self): + + self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.') + self.volume = tempfile.mkdtemp(prefix='volume.') + self.log_file_list = ['test1.log', 'test2.log'] + for logf in self.log_file_list: + with open(os.path.join(self.volume, logf), 'w') as logp: + logp.write(logf) + + def tearDown(self): + shutil.rmtree(self.volume) + shutil.rmtree(self.host_log_dir) + + def test_get_logfiles_list(self, *args): + + inspect = { + "Id": ("1e744b5e3e11e848863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea" + "24e847"), + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186}, + "Image": ("sha256:07c86167cdc4264926fa5d2894e34a339ad27f730e8cc81a" + "16cd21b7479e8eac"), + "Name": "/pensive_rosalind", + "LogPath": ("/var/lib/docker/containers/1e744b5e3e11e848863fefe9d9" + "a8b3731070c6b0c702a04d2b8ab948ea24e847/1e744b5e3e11e8" + "48863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" + "-json.log"), + "HostnamePath": ("/var/lib/docker/containers/1e744b5e3e11e848863fe" + "fe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" + "/hostname"), + "Mounts": [ + { + "Source": self.volume, + "Destination": "/data"}], + "Config": { + "Cmd": ["bash"], + "Image": "ubuntu:trusty"}, + "docker_image_long_name": "long_name/short_name", + "docker_image_short_name": "short_name", + "docker_image_tag": "image_tag", + "docker_image_registry": "image_registry", + "owner_namespace": "owner_namespace", + "NetworkSettings": {}} + self.docker_container = dockercontainer.\ + DockerContainer(inspect['Id'], inspect) + + self.docker_container.\ + _get_container_log_files = get_container_log_files + self.docker_container.log_file_list = [ + {'name': '/data/test*.log', 'type': None}] + + self.docker_container._set_logs_list() + log_list = self.docker_container.logs_list + for log in log_list: + if log.name == '/data/test*.log': + assert os.path.basename(log.dest) in self.log_file_list + assert os.path.basename( + log.source) in self.log_file_list + +if __name__ == '__main__': + logging.basicConfig( + filename='test_dockerutils.log', + filemode='a', + format='%(asctime)s %(levelname)s : %(message)s', + level=logging.DEBUG) + + unittest.main() diff --git a/tests/functional/test_logs_no_volumes.py b/tests/functional/test_logs_no_volumes.py new file mode 100644 index 00000000..057a7d30 --- /dev/null +++ b/tests/functional/test_logs_no_volumes.py @@ -0,0 +1,90 @@ +import logging +import unittest +import tempfile +import os +import shutil +import mock + +import utils.dockerutils +import dockercontainer + + +# Tests dockercontainer._get_logfiles_list +# for the case when no volumes are mounted + + +def get_container_log_files(path, options): + pass + + +@mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=lambda id: 'rootfs') +class DockerContainerTests(unittest.TestCase): + + def setUp(self): + + self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.') + self.volume = tempfile.mkdtemp(prefix='volume.') + for logf in ['test1.log', 'test2.log']: + with open(os.path.join(self.volume, logf), 'w') as logp: + logp.write(logf) + + def tearDown(self): + shutil.rmtree(self.volume) + shutil.rmtree(self.host_log_dir) + + def test_get_logfiles_list(self, *args): + + inspect = { + "Id": ("1e744b5e3e11e848863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea" + "24e847"), + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186}, + "Image": ("sha256:07c86167cdc4264926fa5d2894e34a339ad27f730e8cc81a" + "16cd21b7479e8eac"), + "Name": "/pensive_rosalind", + "LogPath": ("/var/lib/docker/containers/1e744b5e3e11e848863fefe9d9" + "a8b3731070c6b0c702a04d2b8ab948ea24e847/1e744b5e3e11e8" + "48863fefe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" + "-json.log"), + "HostnamePath": ("/var/lib/docker/containers/1e744b5e3e11e848863fe" + "fe9d9a8b3731070c6b0c702a04d2b8ab948ea24e847" + "/hostname"), + "Mounts": [], + "Config": { + "Cmd": ["bash"], + "Image": "ubuntu:trusty"}, + "docker_image_long_name": "long_name/short_name", + "docker_image_short_name": "short_name", + "docker_image_tag": "image_tag", + "docker_image_registry": "image_registry", + "owner_namespace": "owner_namespace", + "NetworkSettings": {}} + self.docker_container = dockercontainer.\ + DockerContainer(inspect['Id'], inspect) + + self.docker_container.\ + _get_container_log_files = get_container_log_files + self.docker_container.log_file_list = [ + {'name': '/data/test1.log', 'type': None}] + + self.docker_container._set_logs_list() + log_list = self.docker_container.logs_list + for log in log_list: + if log.name == '/data/test1.log': + self.assertEqual( + log.dest, self.host_log_dir + + '/data/test1.log' + ) + +if __name__ == '__main__': + logging.basicConfig( + filename='test_dockerutils.log', + filemode='a', + format='%(asctime)s %(levelname)s : %(message)s', + level=logging.DEBUG) + + unittest.main() diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/aufs_mount_init-id b/tests/unit/aufs_mount_init-id new file mode 100644 index 00000000..08dbf2d4 --- /dev/null +++ b/tests/unit/aufs_mount_init-id @@ -0,0 +1 @@ +vol1/id/rootfs-a-b-c diff --git a/tests/unit/btrfs_mount_init-id b/tests/unit/btrfs_mount_init-id new file mode 100644 index 00000000..08dbf2d4 --- /dev/null +++ b/tests/unit/btrfs_mount_init-id @@ -0,0 +1 @@ +vol1/id/rootfs-a-b-c diff --git a/tests/unit/capturing.py b/tests/unit/capturing.py new file mode 100644 index 00000000..29117fe1 --- /dev/null +++ b/tests/unit/capturing.py @@ -0,0 +1,16 @@ +from cStringIO import StringIO +import sys + +# Class used to capture the stdout of a function + + +class Capturing(list): + + def __enter__(self): + self._stdout = sys.stdout + sys.stdout = self._stringio = StringIO() + return self + + def __exit__(self, *args): + self.extend(self._stringio.getvalue().splitlines()) + sys.stdout = self._stdout diff --git a/tests/unit/liberty_connection_stats b/tests/unit/liberty_connection_stats new file mode 100644 index 00000000..75581e60 --- /dev/null +++ b/tests/unit/liberty_connection_stats @@ -0,0 +1 @@ +{"className":"com.ibm.ws.session.monitor.SessionStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.session.monitor.SessionStatsMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"CheckedOutCount","type":"long","description":"ActiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/CheckedOutCountValue"},{"name":"WaitQueueSize","type":"long","description":"LiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/WaitQueueSizeValue"},{"name":"MinSize","type":"long","description":"CreateCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/MinSizeValue"},{"name":"MaxSize","type":"long","description":"InvalidatedCountbyTimeout","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/MaxSizeValue"},{"name":"Size","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/SizeValue"},{"name":"Host","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/HostValue"},{"name":"Port","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/PortValue"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes","constructors":[{"name":"com.ibm.ws.session.monitor.SessionStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[]}],"notifications":[],"operations":[]} diff --git a/tests/unit/liberty_jvm_stats b/tests/unit/liberty_jvm_stats new file mode 100644 index 00000000..3cec3cce --- /dev/null +++ b/tests/unit/liberty_jvm_stats @@ -0,0 +1 @@ +{"className":"com.ibm.ws.monitors.helper.JvmStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.monitor.meters.JvmMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"UsedMemory","type":"long","description":"UsedMemory","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/UsedMemory"},{"name":"FreeMemory","type":"long","description":"FreeMemory","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/FreeMemory"},{"name":"Heap","type":"long","description":"Heap","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/Heap"},{"name":"UpTime","type":"long","description":"UpTime","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/UpTime"},{"name":"ProcessCPU","type":"double","description":"ProcessCPU","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuRG91YmxlcQB+AARxAH4ABA=="}},{"value":"double","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/ProcessCPU"},{"name":"GcCount","type":"long","description":"GcCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/GcCount"},{"name":"GcTime","type":"long","description":"GcTime","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes/GcTime"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats/attributes","constructors":[{"name":"com.ibm.ws.monitors.helper.JvmStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[{"name":"p1","type":"com.ibm.ws.monitors.helper.JvmMonitorHelper","description":"","descriptor":{"names":[],"values":[]}}]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/liberty_mbeans b/tests/unit/liberty_mbeans new file mode 100644 index 00000000..d80a306b --- /dev/null +++ b/tests/unit/liberty_mbeans @@ -0,0 +1,2 @@ +[{"objectName":"WebSphere:type=ServletStats","className":"com.mongodb.management.ConnectionPoolStatistics","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DConnectionPool"},{"objectName":"WebSphere:type=ServletStats,name=com.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet","className":"com.ibm.ws.webcontainer.monitor.ServletStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats"},{"objectName":"java.lang:type=MemoryPool,name=Java heap","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJava+heap%2Ctype%3DMemoryPool"},{"objectName":"java.lang:type=GarbageCollector,name=Copy","className":"com.ibm.lang.management.GarbageCollectorMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DCopy%2Ctype%3DGarbageCollector"},{"objectName":"WebSphere:name=com.ibm.websphere.config.mbeans.FeatureListMBean","className":"com.ibm.ws.config.featuregen.internal.FeatureListMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.websphere.config.mbeans.FeatureListMBean"},{"objectName":"java.lang:type=MemoryPool,name=class storage","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3Dclass+storage%2Ctype%3DMemoryPool"},{"objectName":"osgi.core:type=bundleState,version=1.7,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.BundleState","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3DbundleState%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.7"},{"objectName":"WebSphere:name=com.ibm.websphere.runtime.update.RuntimeUpdateNotificationMBean","className":"com.ibm.ws.runtime.update.internal.RuntimeUpdateNotificationMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.websphere.runtime.update.RuntimeUpdateNotificationMBean"},{"objectName":"java.lang:type=GarbageCollector,name=MarkSweepCompact","className":"com.ibm.lang.management.GarbageCollectorMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DMarkSweepCompact%2Ctype%3DGarbageCollector"},{"objectName":"java.lang:type=Memory","className":"com.ibm.lang.management.MemoryMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DMemory"},{"objectName":"java.lang:type=Compilation","className":"com.ibm.lang.management.CompilationMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DCompilation"},{"objectName":"java.util.logging:type=Logging","className":"com.ibm.lang.management.LoggingMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.util.logging%3Atype%3DLogging"},{"objectName":"java.nio:type=BufferPool,name=mapped","className":"com.ibm.lang.management.BufferPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.nio%3Aname%3Dmapped%2Ctype%3DBufferPool"},{"objectName":"WebSphere:name=com.ibm.ws.jmx.mbeans.sessionManagerMBean","className":"com.ibm.ws.session.SessionManagerMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.mbeans.sessionManagerMBean"},{"objectName":"WebSphere:name=com.ibm.ws.config.serverSchemaGenerator","className":"com.ibm.ws.config.schemagen.internal.ServerSchemaGeneratorImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.config.serverSchemaGenerator"},{"objectName":"WebSphere:feature=kernel,name=ServerInfo","className":"com.ibm.ws.kernel.server.internal.ServerInfoMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3Dkernel%2Cname%3DServerInfo"},{"objectName":"WebSphere:type=ThreadPoolStats,name=Default Executor","className":"com.ibm.ws.monitors.helper.ThreadPoolStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats"},{"objectName":"WebSphere:name=com.ibm.ws.jmx.mbeans.generatePluginConfig","className":"com.ibm.ws.webcontainer.osgi.mbeans.GeneratePluginConfigMBean","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.mbeans.generatePluginConfig"},{"objectName":"JMImplementation:type=MBeanServerDelegate","className":"com.ibm.ws.kernel.boot.jmx.internal.PlatformMBeanServerDelegate","URL":"/IBMJMXConnectorREST/mbeans/JMImplementation%3Atype%3DMBeanServerDelegate"},{"objectName":"osgi.core:type=packageState,version=1.5,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.PackageState","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3DpackageState%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.5"},{"objectName":"WebSphere:feature=CacheAdmin,type=DynaCache,name=DistributedMap","className":"com.ibm.ws.cache.MBeans","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3DCacheAdmin%2Cname%3DDistributedMap%2Ctype%3DDynaCache"},{"objectName":"osgi.compendium:service=cm,version=1.3,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"com.ibm.ws.jmx.internal.ReadOnlyConfigurationAdmin","URL":"/IBMJMXConnectorREST/mbeans/osgi.compendium%3Aframework%3Dorg.eclipse.osgi%2Cservice%3Dcm%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.3"},{"objectName":"WebSphere:type=SessionStats,name=default_host/IBMJMXConnectorREST","className":"com.ibm.ws.session.monitor.SessionStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats"},{"objectName":"WebSphere:feature=channelfw,type=endpoint,name=defaultHttpEndpoint-ssl","className":"com.ibm.websphere.channelfw.EndPointInfo","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3Dchannelfw%2Cname%3DdefaultHttpEndpoint-ssl%2Ctype%3Dendpoint"},{"objectName":"java.lang:type=ClassLoading","className":"com.ibm.lang.management.ClassLoadingMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DClassLoading"},{"objectName":"WebSphere:name=com.ibm.websphere.config.mbeans.ServerXMLConfigurationMBean","className":"com.ibm.ws.config.xml.internal.ServerXMLConfigurationMBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.websphere.config.mbeans.ServerXMLConfigurationMBean"},{"objectName":"com.ibm.lang.management:type=JvmCpuMonitor","className":"com.ibm.lang.management.JvmCpuMonitor","URL":"/IBMJMXConnectorREST/mbeans/com.ibm.lang.management%3Atype%3DJvmCpuMonitor"},{"objectName":"osgi.core:type=serviceState,version=1.7,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.ServiceState","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3DserviceState%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.7"},{"objectName":"java.lang:type=OperatingSystem","className":"com.ibm.lang.management.UnixExtendedOperatingSystem","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DOperatingSystem"},{"objectName":"com.ibm.virtualization.management:type=Hypervisor","className":"com.ibm.virtualization.management.HypervisorMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/com.ibm.virtualization.management%3Atype%3DHypervisor"},{"objectName":"java.lang:type=MemoryPool,name=JIT data cache","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJIT+data+cache%2Ctype%3DMemoryPool"},{"objectName":"osgi.core:service=permissionadmin,version=1.2,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.permissionadmin.PermissionAdmin","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Cservice%3Dpermissionadmin%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.2"},{"objectName":"java.lang:type=MemoryPool,name=JIT code cache","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJIT+code+cache%2Ctype%3DMemoryPool"},{"objectName":"java.lang:type=Runtime","className":"com.ibm.lang.management.RuntimeMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DRuntime"},{"objectName":"java.lang:type=Threading","className":"com.ibm.lang.management.ThreadMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Atype%3DThreading"},{"objectName":"WebSphere:type=JvmStats","className":"com.ibm.ws.monitors.helper.JvmStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Atype%3DJvmStats"},{"objectName":"java.lang:type=MemoryManager,name=J9 non-heap manager","className":"com.ibm.lang.management.MemoryManagerMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3DJ9+non-heap+manager%2Ctype%3DMemoryManager"},{"objectName":"java.nio:type=BufferPool,name=direct","className":"com.ibm.lang.management.BufferPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.nio%3Aname%3Ddirect%2Ctype%3DBufferPool"},{"objectName":"osgi.core:type=framework,version=1.7,framework=org.eclipse.osgi,uuid=e0241740-6baa-0016-1a35-810c8b1d730a","className":"org.apache.aries.jmx.framework.Framework","URL":"/IBMJMXConnectorREST/mbeans/osgi.core%3Aframework%3Dorg.eclipse.osgi%2Ctype%3Dframework%2Cuuid%3De0241740-6baa-0016-1a35-810c8b1d730a%2Cversion%3D1.7"},{"objectName":"WebSphere:service=com.ibm.ws.kernel.filemonitor.FileNotificationMBean","className":"com.ibm.ws.kernel.filemonitor.internal.FileNotificationImpl","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aservice%3Dcom.ibm.ws.kernel.filemonitor.FileNotificationMBean"},{"objectName":"WebSphere:feature=channelfw,type=endpoint,name=defaultHttpEndpoint","className":"com.ibm.websphere.channelfw.EndPointInfo","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3Dchannelfw%2Cname%3DdefaultHttpEndpoint%2Ctype%3Dendpoint"},{"objectName":"com.ibm.virtualization.management:type=GuestOS","className":"com.ibm.virtualization.management.GuestOS","URL":"/IBMJMXConnectorREST/mbeans/com.ibm.virtualization.management%3Atype%3DGuestOS"},{"objectName":"WebSphere:feature=restConnector,type=FileService,name=FileService","className":"com.ibm.ws.filetransfer.internal.mbean.FileService","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3DrestConnector%2Cname%3DFileService%2Ctype%3DFileService"},{"objectName":"java.lang:type=MemoryPool,name=miscellaneous non-heap storage","className":"com.ibm.lang.management.MemoryPoolMXBeanImpl","URL":"/IBMJMXConnectorREST/mbeans/java.lang%3Aname%3Dmiscellaneous+non-heap+storage%2Ctype%3DMemoryPool"},{"objectName":"WebSphere:feature=restConnector,type=FileTransfer,name=FileTransfer","className":"com.ibm.ws.filetransfer.internal.mbean.FileTransfer","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Afeature%3DrestConnector%2Cname%3DFileTransfer%2Ctype%3DFileTransfer"}] + diff --git a/tests/unit/liberty_response_time_details b/tests/unit/liberty_response_time_details new file mode 100644 index 00000000..fb46a5cf --- /dev/null +++ b/tests/unit/liberty_response_time_details @@ -0,0 +1 @@ +{"value":{"count":"292","description":"Average Response Time for servlet","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","reading":{"count":"292","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","standardDeviation":"7747033.106769906","timestamp":"1479283670331","total":"4.80750166E8","unit":"ns","variance":"6.001652195738899E13"},"standardDeviation":"7746816.577149615","total":"4.80750166E8","unit":"ns","variance":"5.980967601894751E13"},"type":{"className":"javax.management.openmbean.CompositeDataSupport","openType":"0"},"openTypes":[{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsMeter","description":"com.ibm.websphere.monitor.meters.StatisticsMeter","items":[{"key":"count","description":"count","type":"1"},{"key":"description","description":"description","type":"2"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"reading","description":"reading","type":"4"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]},"java.lang.Long","java.lang.String","java.lang.Double",{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsReading","description":"com.ibm.websphere.monitor.meters.StatisticsReading","items":[{"key":"count","description":"count","type":"1"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"timestamp","description":"timestamp","type":"1"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]}]} \ No newline at end of file diff --git a/tests/unit/liberty_response_time_details_mocked b/tests/unit/liberty_response_time_details_mocked new file mode 100644 index 00000000..a78e0ba2 --- /dev/null +++ b/tests/unit/liberty_response_time_details_mocked @@ -0,0 +1 @@ +{"className":"com.ibm.ws.webcontainer.monitor.ServletStats","URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats","value":{"count":"292","description":"Average Response Time for servlet","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","reading":{"count":"292","maximumValue":"129746827","mean":"1646404.6780821919","minimumValue":"257689","standardDeviation":"7747033.106769906","timestamp":"1479283670331","total":"4.80750166E8","unit":"ns","variance":"6.001652195738899E13"},"standardDeviation":"7746816.577149615","total":"4.80750166E8","unit":"ns","variance":"5.980967601894751E13"},"type":{"className":"javax.management.openmbean.CompositeDataSupport","openType":"0"},"openTypes":[{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsMeter","description":"com.ibm.websphere.monitor.meters.StatisticsMeter","items":[{"key":"count","description":"count","type":"1"},{"key":"description","description":"description","type":"2"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"reading","description":"reading","type":"4"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]},"java.lang.Long","java.lang.String","java.lang.Double",{"openTypeClass":"javax.management.openmbean.CompositeType","className":"javax.management.openmbean.CompositeData","typeName":"com.ibm.websphere.monitor.meters.StatisticsReading","description":"com.ibm.websphere.monitor.meters.StatisticsReading","items":[{"key":"count","description":"count","type":"1"},{"key":"maximumValue","description":"maximumValue","type":"1"},{"key":"mean","description":"mean","type":"3"},{"key":"minimumValue","description":"minimumValue","type":"1"},{"key":"standardDeviation","description":"standardDeviation","type":"3"},{"key":"timestamp","description":"timestamp","type":"1"},{"key":"total","description":"total","type":"3"},{"key":"unit","description":"unit","type":"2"},{"key":"variance","description":"variance","type":"3"}]}]} diff --git a/tests/unit/liberty_servlet_stats b/tests/unit/liberty_servlet_stats new file mode 100644 index 00000000..00e843ec --- /dev/null +++ b/tests/unit/liberty_servlet_stats @@ -0,0 +1 @@ +{"className":"com.ibm.ws.webcontainer.monitor.ServletStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.ws.webcontainer.monitor.ServletStatsMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"RequestCountDetails","type":"javax.management.openmbean.CompositeData","description":"RequestCountDetails","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.CompositeType","value":"rO0ABXNyAChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVUeXBltYdG61oHn0ICAAJMABFuYW1lVG9EZXNjcmlwdGlvbnQAE0xqYXZhL3V0aWwvVHJlZU1hcDtMAApuYW1lVG9UeXBlcQB+AAF4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AANMAAh0eXBlTmFtZXEAfgADeHB0AChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVEYXRhdAAoY29tLmlibS53ZWJzcGhlcmUubW9uaXRvci5tZXRlcnMuQ291bnRlcnEAfgAGc3IAEWphdmEudXRpbC5UcmVlTWFwDMH2Pi0lauYDAAFMAApjb21wYXJhdG9ydAAWTGphdmEvdXRpbC9Db21wYXJhdG9yO3hwcHcEAAAABHQADGN1cnJlbnRWYWx1ZXEAfgAKdAALZGVzY3JpcHRpb25xAH4AC3QAB3JlYWRpbmdxAH4ADHQABHVuaXRxAH4ADXhzcQB+AAdwdwQAAAAEcQB+AApzcgAlamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uU2ltcGxlVHlwZR6/T/jcZXgnAgAAeHEAfgACdAAOamF2YS5sYW5nLkxvbmdxAH4AEXEAfgARcQB+AAtzcQB+AA90ABBqYXZhLmxhbmcuU3RyaW5ncQB+ABNxAH4AE3EAfgAMc3EAfgAAcQB+AAV0AC9jb20uaWJtLndlYnNwaGVyZS5tb25pdG9yLm1ldGVycy5Db3VudGVyUmVhZGluZ3EAfgAVc3EAfgAHcHcEAAAAA3QABWNvdW50cQB+ABd0AAl0aW1lc3RhbXBxAH4AGHQABHVuaXRxAH4AGXhzcQB+AAdwdwQAAAADcQB+ABdxAH4AEHEAfgAYcQB+ABBxAH4AGXEAfgASeHEAfgANcQB+ABJ4"}},{"value":"com.ibm.websphere.monitor.meters.Counter","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/RequestCountDetails"},{"name":"ResponseTimeDetails","type":"javax.management.openmbean.CompositeData","description":"ResponseTimeDetails","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.CompositeType","value":"rO0ABXNyAChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVUeXBltYdG61oHn0ICAAJMABFuYW1lVG9EZXNjcmlwdGlvbnQAE0xqYXZhL3V0aWwvVHJlZU1hcDtMAApuYW1lVG9UeXBlcQB+AAF4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AANMAAh0eXBlTmFtZXEAfgADeHB0AChqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5Db21wb3NpdGVEYXRhdAAwY29tLmlibS53ZWJzcGhlcmUubW9uaXRvci5tZXRlcnMuU3RhdGlzdGljc01ldGVycQB+AAZzcgARamF2YS51dGlsLlRyZWVNYXAMwfY+LSVq5gMAAUwACmNvbXBhcmF0b3J0ABZMamF2YS91dGlsL0NvbXBhcmF0b3I7eHBwdwQAAAAKdAAFY291bnRxAH4ACnQAC2Rlc2NyaXB0aW9ucQB+AAt0AAxtYXhpbXVtVmFsdWVxAH4ADHQABG1lYW5xAH4ADXQADG1pbmltdW1WYWx1ZXEAfgAOdAAHcmVhZGluZ3EAfgAPdAARc3RhbmRhcmREZXZpYXRpb25xAH4AEHQABXRvdGFscQB+ABF0AAR1bml0cQB+ABJ0AAh2YXJpYW5jZXEAfgATeHNxAH4AB3B3BAAAAApxAH4ACnNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cQB+AAJ0AA5qYXZhLmxhbmcuTG9uZ3EAfgAXcQB+ABdxAH4AC3NxAH4AFXQAEGphdmEubGFuZy5TdHJpbmdxAH4AGXEAfgAZcQB+AAxxAH4AFnEAfgANc3EAfgAVdAAQamF2YS5sYW5nLkRvdWJsZXEAfgAbcQB+ABtxAH4ADnEAfgAWcQB+AA9zcQB+AABxAH4ABXQAMmNvbS5pYm0ud2Vic3BoZXJlLm1vbml0b3IubWV0ZXJzLlN0YXRpc3RpY3NSZWFkaW5ncQB+AB1zcQB+AAdwdwQAAAAJdAAFY291bnRxAH4AH3QADG1heGltdW1WYWx1ZXEAfgAgdAAEbWVhbnEAfgAhdAAMbWluaW11bVZhbHVlcQB+ACJ0ABFzdGFuZGFyZERldmlhdGlvbnEAfgAjdAAJdGltZXN0YW1wcQB+ACR0AAV0b3RhbHEAfgAldAAEdW5pdHEAfgAmdAAIdmFyaWFuY2VxAH4AJ3hzcQB+AAdwdwQAAAAJcQB+AB9xAH4AFnEAfgAgcQB+ABZxAH4AIXEAfgAacQB+ACJxAH4AFnEAfgAjcQB+ABpxAH4AJHEAfgAWcQB+ACVxAH4AGnEAfgAmcQB+ABhxAH4AJ3EAfgAaeHEAfgAQcQB+ABpxAH4AEXEAfgAacQB+ABJxAH4AGHEAfgATcQB+ABp4"}},{"value":"com.ibm.websphere.monitor.meters.StatisticsMeter","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/ResponseTimeDetails"},{"name":"Description","type":"java.lang.String","description":"Description","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/Description"},{"name":"ServletName","type":"java.lang.String","description":"ServletName","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/ServletName"},{"name":"RequestCount","type":"long","description":"RequestCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/RequestCount"},{"name":"ResponseTime","type":"double","description":"ResponseTime","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuRG91YmxlcQB+AARxAH4ABA=="}},{"value":"double","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/ResponseTime"},{"name":"AppName","type":"java.lang.String","description":"AppName","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes/AppName"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Dcom.ibm.ws.jmx.connector.server.rest.JMXRESTProxyServlet%2Ctype%3DServletStats/attributes","constructors":[{"name":"com.ibm.ws.webcontainer.monitor.ServletStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[{"name":"p1","type":"java.lang.String","description":"","descriptor":{"names":[],"values":[]}},{"name":"p2","type":"java.lang.String","description":"","descriptor":{"names":[],"values":[]}}]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/liberty_session_stats b/tests/unit/liberty_session_stats new file mode 100644 index 00000000..25fd34b3 --- /dev/null +++ b/tests/unit/liberty_session_stats @@ -0,0 +1 @@ +{"className":"com.ibm.ws.session.monitor.SessionStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.session.monitor.SessionStatsMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"ActiveCount","type":"long","description":"ActiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/ActiveCount"},{"name":"LiveCount","type":"long","description":"LiveCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/LiveCount"},{"name":"CreateCount","type":"long","description":"CreateCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/CreateCount"},{"name":"InvalidatedCountbyTimeout","type":"long","description":"InvalidatedCountbyTimeout","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/InvalidatedCountbyTimeout"},{"name":"InvalidatedCount","type":"long","description":"InvalidatedCount","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0AA5qYXZhLmxhbmcuTG9uZ3EAfgAEcQB+AAQ="}},{"value":"long","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes/InvalidatedCount"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3Ddefault_host%2FIBMJMXConnectorREST%2Ctype%3DSessionStats/attributes","constructors":[{"name":"com.ibm.ws.session.monitor.SessionStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/liberty_thread_pool_stats b/tests/unit/liberty_thread_pool_stats new file mode 100644 index 00000000..661bba5b --- /dev/null +++ b/tests/unit/liberty_thread_pool_stats @@ -0,0 +1 @@ +{"className":"com.ibm.ws.monitors.helper.ThreadPoolStats","description":"Information on the management interface of the MBean","descriptor":{"names":["immutableInfo","interfaceClassName","mxbean"],"values":[{"value":"true","type":"java.lang.String"},{"value":"com.ibm.websphere.monitor.meters.ThreadPoolMXBean","type":"java.lang.String"},{"value":"true","type":"java.lang.String"}]},"attributes":[{"name":"PoolName","type":"java.lang.String","description":"PoolName","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABBqYXZhLmxhbmcuU3RyaW5ncQB+AARxAH4ABA=="}},{"value":"java.lang.String","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes/PoolName"},{"name":"ActiveThreads","type":"int","description":"ActiveThreads","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABFqYXZhLmxhbmcuSW50ZWdlcnEAfgAEcQB+AAQ="}},{"value":"int","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes/ActiveThreads"},{"name":"PoolSize","type":"int","description":"PoolSize","descriptor":{"names":["openType","originalType"],"values":[{"value":null,"type":{"className":"javax.management.openmbean.SimpleType","value":"rO0ABXNyACVqYXZheC5tYW5hZ2VtZW50Lm9wZW5tYmVhbi5TaW1wbGVUeXBlHr9P+NxleCcCAAB4cgAjamF2YXgubWFuYWdlbWVudC5vcGVubWJlYW4uT3BlblR5cGWAZBqR6erePAIAA0wACWNsYXNzTmFtZXQAEkxqYXZhL2xhbmcvU3RyaW5nO0wAC2Rlc2NyaXB0aW9ucQB+AAJMAAh0eXBlTmFtZXEAfgACeHB0ABFqYXZhLmxhbmcuSW50ZWdlcnEAfgAEcQB+AAQ="}},{"value":"int","type":"java.lang.String"}]},"isIs":false,"isReadable":true,"isWritable":false,"URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes/PoolSize"}],"attributes_URL":"/IBMJMXConnectorREST/mbeans/WebSphere%3Aname%3DDefault+Executor%2Ctype%3DThreadPoolStats/attributes","constructors":[{"name":"com.ibm.ws.monitors.helper.ThreadPoolStats","description":"Public constructor of the MBean","descriptor":{"names":[],"values":[]},"signature":[{"name":"p1","type":"java.lang.String","description":"","descriptor":{"names":[],"values":[]}},{"name":"p2","type":"java.lang.Object","description":"","descriptor":{"names":[],"values":[]}}]}],"notifications":[],"operations":[]} \ No newline at end of file diff --git a/tests/unit/mock_environ_file b/tests/unit/mock_environ_file new file mode 100644 index 00000000..79bfbe3f --- /dev/null +++ b/tests/unit/mock_environ_file @@ -0,0 +1 @@ +HOME=/TERM=linuxPATH=/sbin:/bin diff --git a/tests/unit/mock_pynvml.py b/tests/unit/mock_pynvml.py new file mode 100644 index 00000000..03f1e38d --- /dev/null +++ b/tests/unit/mock_pynvml.py @@ -0,0 +1,44 @@ +#class pynvml() +import collections + +Memory = collections.namedtuple('Memory', 'total used free') +Utilization = collections.namedtuple('Utilization', 'gpu memory') + +NVML_TEMPERATURE_GPU = 0 + +class DummyProcess(): + pid = 1234 + usedGpuMemory = 273285120 + +def nvmlInit(): + pass + +def nvmlShutdown(): + pass + +def nvmlDeviceGetCount(): + return 1 + +def nvmlDeviceGetHandleByIndex(arg): + return 0 + +def nvmlDeviceGetTemperature(arg1, arg2): + return 31 + +def nvmlDeviceGetMemoryInfo(arg): + retVal = 12205 * 1024 * 1024 + return Memory(total=retVal, used=0, free=retVal) + +def nvmlDeviceGetPowerUsage(arg): + return 27000 + +def nvmlDeviceGetEnforcedPowerLimit(arg): + return 149000 + +def nvmlDeviceGetUtilizationRates(arg): + return Utilization(gpu=0, memory=0) + +def nvmlDeviceGetComputeRunningProcesses(arg): + p = DummyProcess() + return [p] + #return [{'pid': 1234, 'usedGpuMemory': 273285120}] diff --git a/tests/unit/proc_mounts_aufs b/tests/unit/proc_mounts_aufs new file mode 100644 index 00000000..49f40144 --- /dev/null +++ b/tests/unit/proc_mounts_aufs @@ -0,0 +1,33 @@ +rootfs / rootfs rw 0 0 +sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +udev /dev devtmpfs rw,relatime,size=4008360k,nr_inodes=1002090,mode=755 0 0 +devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=804824k,mode=755 0 0 +/dev/dm-1 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 +none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0 +none /sys/fs/fuse/connections fusectl rw,relatime 0 0 +none /sys/kernel/debug debugfs rw,relatime 0 0 +none /sys/kernel/security securityfs rw,relatime 0 0 +cgroup /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 +cgroup /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 +cgroup /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 +none /sys/firmware/efi/efivars efivarfs rw,relatime 0 0 +none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 +none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 +cgroup /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 +none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0 +none /sys/fs/pstore pstore rw,relatime 0 0 +cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 +cgroup /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 +cgroup /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 +cgroup /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 +cgroup /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 +/dev/sda2 /boot ext2 rw,relatime 0 0 +/dev/sda1 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 +systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0 +/dev/dm-1 /var/lib/docker/aufs ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 +gaufsd-fuse /run/user/1000/gaufs fuse.gaufsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1001 0 0 +none /var/lib/docker/aufs/mnt/e29e2a3403c1825008a1c53c61d4bd41774df3833cc5d8a12926fc6de39a466b aufs rw,relatime,si=90ef4e398f8a3d10,dio 0 0 +shm /var/lib/docker/containers/c751d4e5a334df29466b0fff65ea721317372d2c5b56012e371923ddaaa4f95a/shm tmpfs rw,nosuid,nodev,noexec,relatime,size=65536k 0 0 +proc /run/docker/netns/41815b5eedd6 proc rw,nosuid,nodev,noexec,relatime 0 0 diff --git a/tests/unit/proc_mounts_btrfs b/tests/unit/proc_mounts_btrfs new file mode 100644 index 00000000..e4f52134 --- /dev/null +++ b/tests/unit/proc_mounts_btrfs @@ -0,0 +1,33 @@ +rootfs / rootfs rw 0 0 +sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +udev /dev devtmpfs rw,relatime,size=4008360k,nr_inodes=1002090,mode=755 0 0 +devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=804824k,mode=755 0 0 +/dev/dm-1 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 +none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0 +none /sys/fs/fuse/connections fusectl rw,relatime 0 0 +none /sys/kernel/debug debugfs rw,relatime 0 0 +none /sys/kernel/security securityfs rw,relatime 0 0 +cgroup /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 +cgroup /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 +cgroup /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 +none /sys/firmware/efi/efivars efivarfs rw,relatime 0 0 +none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 +none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 +cgroup /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 +none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0 +none /sys/fs/pstore pstore rw,relatime 0 0 +cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 +cgroup /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 +cgroup /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 +cgroup /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 +cgroup /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 +/dev/sda2 /boot ext2 rw,relatime 0 0 +/dev/sda1 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 +systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0 +/dev/dm-1 /var/lib/docker/btrfs ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 +gbtrfsd-fuse /run/user/1000/gbtrfs fuse.gbtrfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1001 0 0 +none /var/lib/docker/btrfs/mnt/e29e2a3403c1825008a1c53c61d4bd41774df3833cc5d8a12926fc6de39a466b btrfs rw,relatime,si=90ef4e398f8a3d10,dio 0 0 +shm /var/lib/docker/containers/c751d4e5a334df29466b0fff65ea721317372d2c5b56012e371923ddaaa4f95a/shm tmpfs rw,nosuid,nodev,noexec,relatime,size=65536k 0 0 +proc /run/docker/netns/41815b5eedd6 proc rw,nosuid,nodev,noexec,relatime 0 0 diff --git a/tests/unit/proc_mounts_devicemapper b/tests/unit/proc_mounts_devicemapper new file mode 100644 index 00000000..5b8cccdc --- /dev/null +++ b/tests/unit/proc_mounts_devicemapper @@ -0,0 +1,28 @@ +rootfs / rootfs rw 0 0 +proc /proc proc rw,relatime 0 0 +sysfs /sys sysfs rw,seclabel,relatime 0 0 +devtmpfs /dev devtmpfs rw,seclabel,relatime,size=3916200k,nr_inodes=979050,mode=755 0 0 +devpts /dev/pts devpts rw,seclabel,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /dev/shm tmpfs rw,seclabel,relatime 0 0 +/dev/mapper/vg_oc3262877066-lv_root / ext4 rw,seclabel,relatime,data=ordered 0 0 +none /selinux selinuxfs rw,relatime 0 0 +devtmpfs /dev devtmpfs rw,seclabel,relatime,size=3916200k,nr_inodes=979050,mode=755 0 0 +/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0 +none /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0 +cgroup /cgroup/cpuset cgroup rw,relatime,cpuset 0 0 +cgroup /cgroup/cpu cgroup rw,relatime,cpu 0 0 +cgroup /cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 +cgroup /cgroup/memory cgroup rw,relatime,memory 0 0 +cgroup /cgroup/devices cgroup rw,relatime,devices 0 0 +cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0 +cgroup /cgroup/net_cls cgroup rw,relatime,net_cls 0 0 +cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0 +/etc/auto.misc /misc autofs rw,relatime,fd=7,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 +-hosts /net autofs rw,relatime,fd=13,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 +/etc/auto.gsa /gsa autofs rw,relatime,fd=19,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 +/etc/auto.gsaro /gsaro autofs rw,relatime,fd=25,pgrp=2897,timeout=300,minproto=5,maxproto=5,indirect 0 0 +/dev/mapper/vg_oc3262877066-lv_root /var/lib/docker/devicemapper ext4 rw,seclabel,relatime,data=ordered 0 0 +gvfs-fuse-daemon /root/.gvfs fuse.gvfs-fuse-daemon rw,nosuid,nodev,relatime,user_id=0,group_id=0 0 0 +sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 +/dev/mapper/docker-253:2-29492713-65fe676c24fe1faea1f06e222cc3811cc9b651c381702ca4f787ffe562a5e39b /var/lib/docker/devicemapper/mnt/65fe676c24fe1faea1f06e222cc3811cc9b651c381702ca4f787ffe562a5e39b ext4 rw,seclabel,relatime,stripe=16,data=ordered 0 0 +proc /var/run/docker/netns/65fe676c24fe proc rw,relatime 0 0 diff --git a/tests/unit/proc_mounts_vfs b/tests/unit/proc_mounts_vfs new file mode 100644 index 00000000..e93f154f --- /dev/null +++ b/tests/unit/proc_mounts_vfs @@ -0,0 +1,33 @@ +rootfs / rootfs rw 0 0 +sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +udev /dev devtmpfs rw,relatime,size=4008360k,nr_inodes=1002090,mode=755 0 0 +devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=804824k,mode=755 0 0 +/dev/dm-1 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 +none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0 +none /sys/fs/fuse/connections fusectl rw,relatime 0 0 +none /sys/kernel/debug debugfs rw,relatime 0 0 +none /sys/kernel/security securityfs rw,relatime 0 0 +cgroup /sys/fs/cgroup/cpuset cgroup rw,relatime,cpuset 0 0 +cgroup /sys/fs/cgroup/cpu cgroup rw,relatime,cpu 0 0 +cgroup /sys/fs/cgroup/cpuacct cgroup rw,relatime,cpuacct 0 0 +none /sys/firmware/efi/efivars efivarfs rw,relatime 0 0 +none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 +none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0 +cgroup /sys/fs/cgroup/memory cgroup rw,relatime,memory 0 0 +none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0 +none /sys/fs/pstore pstore rw,relatime 0 0 +cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 +cgroup /sys/fs/cgroup/freezer cgroup rw,relatime,freezer 0 0 +cgroup /sys/fs/cgroup/blkio cgroup rw,relatime,blkio 0 0 +cgroup /sys/fs/cgroup/perf_event cgroup rw,relatime,perf_event 0 0 +cgroup /sys/fs/cgroup/hugetlb cgroup rw,relatime,hugetlb 0 0 +/dev/sda2 /boot ext2 rw,relatime 0 0 +/dev/sda1 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 +systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0 +/dev/dm-1 /var/lib/docker/vfs ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 +gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1001 0 0 +none /var/lib/docker/vfs/mnt/e29e2a3403c1825008a1c53c61d4bd41774df3833cc5d8a12926fc6de39a466b vfs rw,relatime,si=90ef4e398f8a3d10,dio 0 0 +shm /var/lib/docker/containers/c751d4e5a334df29466b0fff65ea721317372d2c5b56012e371923ddaaa4f95a/shm tmpfs rw,nosuid,nodev,noexec,relatime,size=65536k 0 0 +proc /run/docker/netns/41815b5eedd6 proc rw,nosuid,nodev,noexec,relatime 0 0 diff --git a/tests/unit/proc_pid_mounts_devicemapper b/tests/unit/proc_pid_mounts_devicemapper new file mode 100644 index 00000000..97ab51a2 --- /dev/null +++ b/tests/unit/proc_pid_mounts_devicemapper @@ -0,0 +1,20 @@ +rootfs / rootfs rw 0 0 +/dev/mapper/docker-253:2-29492713-65fe676c24fe1faea1f06e222cc3811cc9b651c381702ca4f787ffe562a5e39b / ext4 rw,seclabel,relatime,stripe=16,data=ordered 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +tmpfs /dev tmpfs rw,seclabel,nosuid,mode=755 0 0 +devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666 0 0 +shm /dev/shm tmpfs rw,seclabel,nosuid,nodev,noexec,relatime,size=65536k 0 0 +mqueue /dev/mqueue mqueue rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +sysfs /sys sysfs ro,seclabel,nosuid,nodev,noexec,relatime 0 0 +/dev/mapper/vg_oc3262877066-lv_root /etc/resolv.conf ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/mapper/vg_oc3262877066-lv_root /etc/hostname ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/mapper/vg_oc3262877066-lv_root /etc/hosts ext4 rw,seclabel,relatime,data=ordered 0 0 +devpts /dev/console devpts rw,seclabel,relatime,gid=5,mode=620,ptmxmode=000 0 0 +proc /proc/asound proc ro,nosuid,nodev,noexec,relatime 0 0 +proc /proc/bus proc ro,nosuid,nodev,noexec,relatime 0 0 +proc /proc/fs proc ro,nosuid,nodev,noexec,relatime 0 0 +proc /proc/irq proc ro,nosuid,nodev,noexec,relatime 0 0 +proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0 +proc /proc/sysrq-trigger proc ro,nosuid,nodev,noexec,relatime 0 0 +tmpfs /proc/kcore tmpfs rw,seclabel,nosuid,mode=755 0 0 +tmpfs /proc/timer_stats tmpfs rw,seclabel,nosuid,mode=755 0 0 diff --git a/tests/unit/test_app_apache.py b/tests/unit/test_app_apache.py new file mode 100644 index 00000000..7f1d55b2 --- /dev/null +++ b/tests/unit/test_app_apache.py @@ -0,0 +1,288 @@ +from unittest import TestCase +import mock +from plugins.applications.apache import apache_crawler +from plugins.applications.apache.feature import ApacheFeature +from plugins.applications.apache.apache_container_crawler \ + import ApacheContainerCrawler +from plugins.applications.apache.apache_host_crawler \ + import ApacheHostCrawler +from utils.crawler_exceptions import CrawlError +from requests.exceptions import ConnectionError + + +# expected format from apache status page +def mocked_wrong_status_page(host, port): + return ('No Acceptable status page format') + + +def mocked_urllib2_open(request): + return MockedURLResponse() + + +def mocked_urllib2_open_with_zero(request): + return MockedURLResponseWithZero() + + +def mocked_no_status_page(host, port): + raise Exception + + +def mocked_retrieve_status_page(host, port): + return ('Total Accesses: 172\n' + 'Total kBytes: 1182\n' + 'CPULoad: 2.34827\n' + 'Uptime: 1183\n' + 'ReqPerSec: .145393\n' + 'BytesPerSec: 1023.13\n' + 'BytesPerReq: 7037.02\n' + 'BusyWorkers: 2\n' + 'IdleWorkers: 9\n' + 'Scoreboard: __R_W______......G..C...' + 'DSKLI...............................' + '...................................................' + ) + + +class MockedURLResponse(object): + + def read(self): + return ('Total Accesses: 172\n' + 'Total kBytes: 1182\n' + 'CPULoad: 2.34827\n' + 'Uptime: 1183\n' + 'ReqPerSec: .145393\n' + 'BytesPerSec: 1023.13\n' + 'BytesPerReq: 7037.02\n' + 'BusyWorkers: 2\n' + 'IdleWorkers: 9\n' + 'Scoreboard: __R_W______......G..' + 'C...DSKLI........................' + '..........................................................' + ) + + +class MockedURLResponseWithZero(object): + + def read(self): + return ('Total Accesses: 172\n' + 'Total kBytes: 1182\n' + 'CPULoad: 2.34827\n' + 'ReqPerSec: .145393\n' + 'BytesPerSec: 1023.13\n' + 'BytesPerReq: 7037.02\n' + 'BusyWorkers: 2\n' + 'IdleWorkers: 9\n' + 'Scoreboard: __R_W______......G..C...' + 'DSKLI................................' + '..................................................' + ) + + +class MockedApacheContainer1(object): + + def __init__( + self, + container_id, + ): + ports = "[ {\"containerPort\" : \"80\"} ]" + self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": + {"annotation.io.kubernetes.container.ports": ports}}} + + +class MockedApacheContainer2(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["80"] + return ports + + +class MockedApacheContainer3(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["1234"] + return ports + + +class MockedNoPortContainer(object): + + def __init__( + self, + container_id, + ): + self.image_name = 'httpd-container' + + def get_container_ip(self): + return '1.2.3.4' + + def get_container_ports(self): + ports = [] + return ports + + +class MockedNoNameContainer(object): + + def __init__(self, container_id): + self.image_name = 'dummy' + + +class ApacheCrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('urllib2.urlopen', mocked_urllib2_open_with_zero) + def test_ok_with_zero(self): + status = apache_crawler.retrieve_metrics() + assert status == ApacheFeature( + BusyWorkers='2', + IdleWorkers='9', + waiting_for_connection='9', + starting_up='1', + reading_request='1', + sending_reply='1', + keepalive_read='1', + dns_lookup='1', + closing_connection='1', + logging='1', + graceful_finishing='1', + idle_worker_cleanup='1', + BytesPerSec='1023.13', + BytesPerReq='7037.02', + ReqPerSec='.145393', + Uptime='0', + Total_kBytes='1182', + Total_Accesses='172') + + @mock.patch('urllib2.urlopen', mocked_urllib2_open) + def test_ok(self): + status = apache_crawler.retrieve_metrics() + assert status == ApacheFeature( + BusyWorkers='2', + IdleWorkers='9', + waiting_for_connection='9', + starting_up='1', + reading_request='1', + sending_reply='1', + keepalive_read='1', + dns_lookup='1', + closing_connection='1', + logging='1', + graceful_finishing='1', + idle_worker_cleanup='1', + BytesPerSec='1023.13', + BytesPerReq='7037.02', + ReqPerSec='.145393', + Uptime='1183', + Total_kBytes='1182', + Total_Accesses='172') + + @mock.patch('plugins.applications.apache.' + 'apache_crawler.retrieve_status_page', + mocked_no_status_page) + def test_hundle_ioerror(self): + with self.assertRaises(CrawlError): + apache_crawler.retrieve_metrics() + + @mock.patch('plugins.applications.apache.' + 'apache_crawler.retrieve_status_page', + mocked_wrong_status_page) + def test_hundle_parseerror(self): + with self.assertRaises(CrawlError): + apache_crawler.retrieve_metrics() + + +class ApacheHostTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = ApacheHostCrawler() + self.assertEqual(c.get_feature(), 'apache') + + @mock.patch('plugins.applications.apache.' + 'apache_crawler.retrieve_status_page', + mocked_retrieve_status_page) + def test_get_metrics(self): + c = ApacheHostCrawler() + emitted = c.crawl()[0] + self.assertEqual(emitted[0], 'apache') + self.assertIsInstance(emitted[1], ApacheFeature) + self.assertEqual(emitted[2], 'application') + + +class ApacheContainerTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = ApacheContainerCrawler() + self.assertEqual(c.get_feature(), 'apache') + + @mock.patch('plugins.applications.apache.' + 'apache_crawler.retrieve_status_page', + mocked_retrieve_status_page) + @mock.patch(("plugins.applications.apache.apache_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + @mock.patch('dockercontainer.DockerContainer', + MockedApacheContainer1) + def test_apache_container_crawler_forkube(self, *args): + c = ApacheContainerCrawler() + emitted = c.crawl()[0] + self.assertEqual(emitted[0], 'apache') + self.assertIsInstance(emitted[1], ApacheFeature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('plugins.applications.apache.' + 'apache_crawler.retrieve_status_page', + mocked_retrieve_status_page) + @mock.patch(("plugins.applications.apache.apache_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + @mock.patch('dockercontainer.DockerContainer', + MockedApacheContainer2) + def test_apache_container_crawler_docker(self, *args): + c = ApacheContainerCrawler() + emitted = c.crawl()[0] + self.assertEqual(emitted[0], 'apache') + self.assertIsInstance(emitted[1], ApacheFeature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('dockercontainer.DockerContainer', + MockedApacheContainer3) + def test_no_available_ports(self): + c = ApacheContainerCrawler() + c.crawl() + pass + + @mock.patch('plugins.applications.apache.' + 'apache_crawler.retrieve_status_page', + mocked_no_status_page) + @mock.patch(("plugins.applications.apache.apache_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + @mock.patch('dockercontainer.DockerContainer', + MockedApacheContainer1) + def test_no_accessible_endpoint(self, *kwargs): + c = ApacheContainerCrawler() + with self.assertRaises(ConnectionError): + c.crawl("mockcontainer") diff --git a/tests/unit/test_app_db2.py b/tests/unit/test_app_db2.py new file mode 100644 index 00000000..7e8d8dfe --- /dev/null +++ b/tests/unit/test_app_db2.py @@ -0,0 +1,244 @@ +import mock +import pip +from unittest import TestCase +from plugins.applications.db2 import db2_crawler +from plugins.applications.db2.feature import DB2Feature +from plugins.applications.db2.db2_container_crawler \ + import DB2ContainerCrawler +from plugins.applications.db2.db2_host_crawler \ + import DB2HostCrawler +from utils.crawler_exceptions import CrawlError +from requests.exceptions import ConnectionError + + +pip.main(['install', 'ibm_db']) + + +class MockedDB2Container1(object): + + def __init__(self, container_id): + ports = "[ {\"containerPort\" : \"50000\"} ]" + self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": + {"annotation.io.kubernetes.container.ports": ports}}} + + +class MockedDB2Container2(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["50000"] + return ports + + +class MockedDB2Container3(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["1234"] + return ports + + +def mocked_dbi_conn_error(ibm_db_conn): + raise Exception("error") + + +def mocked_dbi_conn(ibm_db_conn): + return + + +def mocked_db_exec_error(sql): + raise Exception("error") + + +def mocked_db_conn(req, opt1, opt2): + return + + +def mocked_ibm_db_dbi_conn(object): + conn = mocked_conn() + return conn + + +class mocked_conn(): + def cursor(obj): + return + + def execute(sql): + return + + +def mocked_retrieve_metrics(host, user, password, db): + + attribute = DB2Feature( + "dbCapacity", + "dbVersion", + "instanceName", + "productName", + "dbName", + "serviceLevel", + "instanceConn", + "instanceUsedMem", + "dbConn", + "usedLog", + "transcationInDoubt", + "xlocksEscalation", + "locksEscalation", + "locksTimeOut", + "deadLock", + "lastBackupTime", + "dbStatus", + "instanceStatus", + "bpIndexHitRatio", + "bpDatahitRatio", + "sortsInOverflow", + "agetnsWait", + "updateRows", + "insertRows", + "selectedRows", + "deleteRows", + "selects", + "selectSQLs", + "dynamicSQLs", + "rollbacks", + "commits", + "bpTempIndexHitRatio", + "bpTempDataHitRatio" + ) + + return attribute + + +def mocked_retrieve_metrics_error(host, user, password, db): + raise CrawlError + + +class DB2CrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('ibm_db_dbi.Connection', mocked_dbi_conn_error) + def test_conn_error(self): + with self.assertRaises(CrawlError): + db2_crawler.retrieve_metrics() + + @mock.patch('ibm_db.connect', mocked_db_conn) + @mock.patch('ibm_db_dbi.Connection', mocked_ibm_db_dbi_conn) + @mock.patch('ibm_db.execute', mocked_dbi_conn_error) + def test_exec_error(self): + with self.assertRaises(CrawlError): + db2_crawler.retrieve_metrics() + + @mock.patch('ibm_db.connect', mocked_db_conn) + @mock.patch('ibm_db_dbi.Connection') + def test_ok(self, mock_connect): + status = db2_crawler.retrieve_metrics() + self.assertIsInstance(status, DB2Feature) + + +class DB2HostTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = DB2HostCrawler() + self.assertEqual(c.get_feature(), 'db2') + + @mock.patch('plugins.applications.db2.' + 'db2_crawler.retrieve_metrics', + mocked_retrieve_metrics) + def test_get_metrics(self): + c = DB2HostCrawler() + options = {"password": "password", "user": "db2inst1", "db": "sample"} + emitted = c.crawl(**options)[0] + self.assertEqual(emitted[0], 'db2') + self.assertIsInstance(emitted[1], DB2Feature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('plugins.applications.db2.' + 'db2_crawler.retrieve_metrics', + mocked_retrieve_metrics_error) + def test_get_metrics_error(self): + with self.assertRaises(CrawlError): + c = DB2HostCrawler() + c.crawl()[0] + + +class DB2ContainerTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = DB2ContainerCrawler() + self.assertEqual(c.get_feature(), 'db2') + + @mock.patch('plugins.applications.db2.' + 'db2_crawler.retrieve_metrics', + mocked_retrieve_metrics) + @mock.patch('dockercontainer.DockerContainer', + MockedDB2Container1) + @mock.patch(("plugins.applications.db2.db2_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_db2_container_crawler_forkube(self, *kwargs): + c = DB2ContainerCrawler() + options = {"password": "password", "user": "db2inst1", "db": "sample"} + emitted = c.crawl(1234, **options)[0] + self.assertEqual(emitted[0], 'db2') + self.assertIsInstance(emitted[1], DB2Feature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('plugins.applications.db2.' + 'db2_crawler.retrieve_metrics', + mocked_retrieve_metrics) + @mock.patch('dockercontainer.DockerContainer', + MockedDB2Container2) + @mock.patch(("plugins.applications.db2.db2_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_db2_container_crawler_fordocker(self, *kwargs): + c = DB2ContainerCrawler() + options = {"password": "password", "user": "db2inst1", "db": "sample"} + emitted = c.crawl(1234, **options)[0] + self.assertEqual(emitted[0], 'db2') + self.assertIsInstance(emitted[1], DB2Feature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('dockercontainer.DockerContainer', + MockedDB2Container3) + def test_no_available_port(self): + c = DB2ContainerCrawler() + c.crawl("mockcontainer") + pass + + @mock.patch('plugins.applications.db2.' + 'db2_crawler.retrieve_metrics', + mocked_retrieve_metrics_error) + @mock.patch('dockercontainer.DockerContainer', + MockedDB2Container2) + @mock.patch(("plugins.applications.db2.db2_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_no_accessible_endpoint(self, *args): + c = DB2ContainerCrawler() + with self.assertRaises(ConnectionError): + options = {"password": "password", + "user": "db2inst1", "db": "sample"} + c.crawl(1234, **options)[0] diff --git a/tests/unit/test_app_liberty.py b/tests/unit/test_app_liberty.py new file mode 100644 index 00000000..a2d04575 --- /dev/null +++ b/tests/unit/test_app_liberty.py @@ -0,0 +1,264 @@ +from unittest import TestCase +import mock +from plugins.applications.liberty import liberty_crawler +from plugins.applications.liberty import feature +from plugins.applications.liberty.liberty_container_crawler \ + import LibertyContainerCrawler +from plugins.applications.liberty.liberty_host_crawler \ + import LibertyHostCrawler +from utils.crawler_exceptions import CrawlError +from requests.exceptions import ConnectionError + + +def mocked_urllib2_open(request): + return MockedURLResponse() + + +def mock_status_value(user, password, url): + raise CrawlError + + +class MockedLibertyContainer1(object): + + def __init__(self, container_id): + ports = "[ {\"containerPort\" : \"9443\"} ]" + self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": + {"annotation.io.kubernetes.container.ports": ports}}} + + +class MockedLibertyContainer2(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["9443"] + return ports + + +class MockedLibertyContainer3(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["1234"] + return ports + + +class MockedURLResponse(object): + def read(self): + return open('tests/unit/liberty_response_time_details_mocked', + 'r').read() + + +def server_status_value(user, password, url): + url_list = url.lstrip('/').split("/") + url_list = filter(lambda a: a != '', url_list) + tmp_word = url_list[len(url_list)-1] + last_word = tmp_word.split('%3D') + last_word = last_word[len(last_word)-1] + + file_value = { + "mbeans": 'tests/unit/liberty_mbeans', + "ServletStats": 'tests/unit/liberty_servlet_stats', + "ResponseTimeDetails": 'tests/unit/liberty_response_time_details', + "JvmStats": 'tests/unit/liberty_jvm_stats', + "ThreadPoolStats": 'tests/unit/liberty_thread_pool_stats', + "SessionStats": 'tests/unit/liberty_session_stats', + "ConnectionPool": 'tests/unit/liberty_connection_stats' + } + + return_value = { + "ServletName": + '{"value":"JMXRESTProxyServlet","type":"java.lang.String"}', + "AppName": '{"value":"com.ibm.ws.jmx.connector.server.rest",\ + "type":"java.lang.String"}', + "Heap": '{"value":"31588352","type":"java.lang.Long"}', + "FreeMemory": '{"value":"9104704","type":"java.lang.Long"}', + "UsedMemory": '{"value":"23213312","type":"java.lang.Long"}', + "ProcessCPU": + '{"value":"0.07857719811500322","type":"java.lang.Double"}', + "GcCount": '{"value":"1325","type":"java.lang.Long"}', + "GcTime": '{"value":"1001","type":"java.lang.Long"}', + "UpTime": '{"value":"155755366","type":"java.lang.Long"}', + "ActiveThreads": '{"value":"1","type":"java.lang.Integer"}', + "PoolSize": '{"value":"4","type":"java.lang.Integer"}', + "PoolName": '{"value":"Default Executor","type":"java.lang.String"}', + "CreateCount": '{"value":"1","type":"java.lang.Long"}', + "LiveCount": '{"value":"0","type":"java.lang.Long"}', + "ActiveCount": '{"value":"0","type":"java.lang.Long"}', + "InvalidatedCount": '{"value":"1","type":"java.lang.Long"}', + "InvalidatedCountbyTimeout": '{"value":"2","type":"java.lang.Long"}', + "CheckedOutCountValue": '{"value":"1","type":"java.lang.Long"}', + "WaitQueueSizeValue": '{"value":"2","type":"java.lang.Long"}', + "MinSizeValue": '{"value":"3","type":"java.lang.Long"}', + "MaxSizeValue": '{"value":"4","type":"java.lang.Long"}', + "SizeValue": '{"value":"7","type":"java.lang.Long"}', + "HostValue": '{"value":"test","type":"java.lang.Long"}', + "PortValue": '{"value":"12","type":"java.lang.Long"}' + } + + if last_word in file_value: + return open(file_value.get(last_word), 'r').read() + + if last_word in return_value: + return return_value.get(last_word) + + +class LibertyCrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_hundle_ioerror(self): + with self.assertRaises(CrawlError): + liberty_crawler.retrieve_status_page("user", "pass", "localhost") + + @mock.patch('urllib2.urlopen', mocked_urllib2_open) + def test_read(self): + liberty_crawler.retrieve_status_page("user", "pass", "localhost") + self.assertNotIsInstance(liberty_crawler.retrieve_metrics(), + feature.LibertyServletFeature) + + @mock.patch('plugins.applications.liberty.' + 'liberty_crawler.retrieve_status_page', + side_effect=server_status_value) + def test_ok(self, server_status_value): + status = list(liberty_crawler.retrieve_metrics()) + assert status == [('liberty_servlet_status', + feature.LibertyServletFeature( + name='JMXRESTProxyServlet', + appName='com.ibm.ws.jmx.connector.server.rest', + reqCount='292', + responseMean='1646404.6780821919', + responseMax='129746827', + responseMin='257689'), + 'application'), + ('liberty_jvm_status', + feature.LibertyJVMFeature( + heap='31588352', + freeMemory='9104704', + usedMemory='23213312', + processCPU='0.07857719811500322', + gcCount='1325', + gcTime='1001', + upTime='155755366'), + 'application'), + ('liberty_thread_status', + feature.LibertyThreadFeature( + activeThreads='1', + poolSize='4', + poolName='Default Executor'), + 'application'), + ('liberty_session_status', + feature.LibertySessionFeature( + name='default_host/IBMJMXConnectorREST', + createCount='1', + liveCount='0', + activeCount='0', + invalidatedCount='1', + invalidatedCountByTimeout='2'), + 'application'), + ('liberty_mongo_connection_status', + feature.LibertyMongoConnectionFeature( + checkedOutCount='1', + waitQueueSize='2', + maxSize='4', + minSize='3', + host='test', + port='12', + size='7'), + 'application')] + + +class LibertyHostTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = LibertyHostCrawler() + self.assertEqual(c.get_feature(), 'liberty') + + @mock.patch('plugins.applications.liberty.' + 'liberty_crawler.retrieve_status_page', + server_status_value) + def test_get_metrics(self): + c = LibertyHostCrawler() + options = {"password": "password", "user": "liberty"} + emitted = list(c.crawl(**options)) + self.assertEqual(emitted[0][0], 'liberty_servlet_status') + self.assertEqual(emitted[0][2], 'application') + + +class LibertyContainerTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = LibertyContainerCrawler() + self.assertEqual(c.get_feature(), 'liberty') + + @mock.patch('plugins.applications.liberty.' + 'liberty_crawler.retrieve_status_page', + server_status_value) + @mock.patch('dockercontainer.DockerContainer', + MockedLibertyContainer1) + @mock.patch(("plugins.applications.liberty.liberty_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_liberty_container_forkube(self, *args): + c = LibertyContainerCrawler() + options = {"password": "password", "user": "liberty"} + emitted = list(c.crawl(**options)) + self.assertEqual(emitted[0][0], 'liberty_servlet_status') + self.assertEqual(emitted[0][2], 'application') + + @mock.patch('plugins.applications.liberty.' + 'liberty_crawler.retrieve_status_page', + server_status_value) + @mock.patch('dockercontainer.DockerContainer', + MockedLibertyContainer2) + @mock.patch(("plugins.applications.liberty.liberty_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_liberty_container_fordocker(self, *args): + c = LibertyContainerCrawler() + options = {"password": "password", "user": "liberty"} + emitted = list(c.crawl(**options)) + self.assertEqual(emitted[0][0], 'liberty_servlet_status') + self.assertEqual(emitted[0][2], 'application') + + @mock.patch('dockercontainer.DockerContainer', + MockedLibertyContainer3) + def test_liberty_container_noport(self, *args): + c = LibertyContainerCrawler() + c.crawl(1234) + pass + + @mock.patch('dockercontainer.DockerContainer', + MockedLibertyContainer1) + @mock.patch(("plugins.applications.liberty.liberty_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + @mock.patch('plugins.applications.liberty.' + 'liberty_crawler.retrieve_metrics', + mock_status_value) + def test_none_liberty_container(self, *args): + options = {"password": "password", "user": "liberty"} + c = LibertyContainerCrawler() + with self.assertRaises(ConnectionError): + c.crawl(1234, **options) diff --git a/tests/unit/test_app_nginx.py b/tests/unit/test_app_nginx.py new file mode 100644 index 00000000..5ef176ff --- /dev/null +++ b/tests/unit/test_app_nginx.py @@ -0,0 +1,194 @@ +from unittest import TestCase +import mock +from plugins.applications.nginx import nginx_crawler +from plugins.applications.nginx.feature import NginxFeature +from plugins.applications.nginx.nginx_container_crawler \ + import NginxContainerCrawler +from plugins.applications.nginx.nginx_host_crawler \ + import NginxHostCrawler +from utils.crawler_exceptions import CrawlError +from requests.exceptions import ConnectionError + + +# expected format from nginx status page +def mocked_retrieve_status_page(host, port): + return ('Active connections: 2\n' + 'server accepts handled requests\n' + '2 2 1\n' + 'Reading: 0 Writing: 1 Waiting: 1' + ) + + +def mocked_no_status_page(host, port): + # raise urllib2.HTTPError(1,2,3,4,5) + raise Exception + + +def mocked_wrong_status_page(host, port): + return ('No Acceptable status page format') + + +def mocked_urllib2_open(request): + return MockedURLResponse() + + +class MockedURLResponse(object): + + def read(self): + return ('Active connections: 2\n' + 'server accepts handled requests\n' + '2 2 1\n' + 'Reading: 0 Writing: 1 Waiting: 1' + ) + + +class MockedNginxContainer1(object): + + def __init__(self, container_id): + ports = "[ {\"containerPort\" : \"80\"} ]" + self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": + {"annotation.io.kubernetes.container.ports": ports}}} + + +class MockedNginxContainer2(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["80"] + return ports + + +class MockedNginxContainer3(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["1234"] + return ports + + +class NginxCrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('urllib2.urlopen', mocked_urllib2_open) + def test_ok(self): + self.assertIsInstance(nginx_crawler.retrieve_metrics(), + NginxFeature) + + ''' + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_retrieve_status_page) + def test_successful_crawling(self): + self.assertIsInstance(nginx_crawler.retrieve_metrics(), + NginxFeature) + ''' + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_no_status_page) + def test_hundle_ioerror(self): + with self.assertRaises(CrawlError): + nginx_crawler.retrieve_metrics() + + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_wrong_status_page) + def test_hundle_parseerror(self): + with self.assertRaises(CrawlError): + nginx_crawler.retrieve_metrics() + + +class NginxHostTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = NginxHostCrawler() + self.assertEqual(c.get_feature(), 'nginx') + + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_retrieve_status_page) + def test_get_metrics(self): + c = NginxHostCrawler() + emitted = c.crawl()[0] + self.assertEqual(emitted[0], 'nginx') + self.assertIsInstance(emitted[1], NginxFeature) + self.assertEqual(emitted[2], 'application') + + +class NginxContainerTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = NginxContainerCrawler() + self.assertEqual(c.get_feature(), 'nginx') + + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_retrieve_status_page) + @mock.patch('dockercontainer.DockerContainer', + MockedNginxContainer1) + @mock.patch(("plugins.applications.nginx.nginx_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_nginx_container_forkube(self, *args): + c = NginxContainerCrawler() + emitted = c.crawl()[0] + self.assertEqual(emitted[0], 'nginx') + self.assertIsInstance(emitted[1], NginxFeature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_retrieve_status_page) + @mock.patch('dockercontainer.DockerContainer', + MockedNginxContainer2) + @mock.patch(("plugins.applications.nginx.nginx_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_nginx_container_fordocker(self, *args): + c = NginxContainerCrawler() + emitted = c.crawl()[0] + self.assertEqual(emitted[0], 'nginx') + self.assertIsInstance(emitted[1], NginxFeature) + self.assertEqual(emitted[2], 'application') + + @mock.patch('dockercontainer.DockerContainer', + MockedNginxContainer3) + def test_nginx_container_noport(self, *args): + c = NginxContainerCrawler() + c.crawl(1234) + pass + + @mock.patch('plugins.applications.nginx.' + 'nginx_crawler.retrieve_status_page', + mocked_no_status_page) + @mock.patch('dockercontainer.DockerContainer', + MockedNginxContainer2) + @mock.patch(("plugins.applications.nginx.nginx_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_no_accessible_endpoint(self, *arg): + c = NginxContainerCrawler() + with self.assertRaises(ConnectionError): + c.crawl("mockcontainer") diff --git a/tests/unit/test_app_redis.py b/tests/unit/test_app_redis.py new file mode 100644 index 00000000..aaca14f9 --- /dev/null +++ b/tests/unit/test_app_redis.py @@ -0,0 +1,270 @@ +import mock +import pip +from unittest import TestCase +from plugins.applications.redis.feature import RedisFeature +from plugins.applications.redis.feature import create_feature +from plugins.applications.redis.redis_host_crawler \ + import RedisHostCrawler +from plugins.applications.redis.redis_container_crawler \ + import RedisContainerCrawler +from requests.exceptions import ConnectionError + +pip.main(['install', 'redis']) + + +class MockedRedisClient(object): + def __init__(self, host='localhost', port=6379): + self.host = host + self.port = port + + def info(self): + metrics = { + "aof_current_rewrite_time_sec": -1, + "aof_enabled": 0, + "aof_last_bgrewrite_status": "ok", + "aof_last_rewrite_time_sec": -1, + "aof_last_write_status": "ok", + "aof_rewrite_in_progress": 0, + "aof_rewrite_scheduled": 0, + "arch_bits": 64, + "blocked_clients": 0, + "client_biggest_input_buf": 0, + "client_longest_output_list": 0, + "cluster_enabled": 0, + "config_file": "", + "connected_clients": 1, + "connected_slaves": 0, + "evicted_keys": 0, + "executable": "/data/redis-server", + "expired_keys": 0, + "gcc_version": "4.9.2", + "hz": 10, + "instantaneous_input_kbps": 0.0, + "instantaneous_ops_per_sec": 0, + "instantaneous_output_kbps": 0.0, + "keyspace_hits": 0, + "keyspace_misses": 0, + "latest_fork_usec": 0, + "loading": 0, + "lru_clock": 3053805, + "master_repl_offset": 0, + "maxmemory": 0, + "maxmemory_human": "0B", + "maxmemory_policy": "noeviction", + "mem_allocator": "jemalloc-4.0.3", + "mem_fragmentation_ratio": 8.18, + "migrate_cached_sockets": 0, + "multiplexing_api": "epoll", + "os": "Linux 4.4.0-21-generic ppc64le", + "process_id": 1, + "pubsub_channels": 0, + "pubsub_patterns": 0, + "rdb_bgsave_in_progress": 0, + "rdb_changes_since_last_save": 0, + "rdb_current_bgsave_time_sec": -1, + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": -1, + "rdb_last_save_time": 1479217974, + "redis_build_id": "962858415ee795a5", + "redis_git_dirty": 0, + "redis_git_sha1": 0, + "redis_mode": "standalone", + "redis_version": "3.2.0", + "rejected_connections": 0, + "repl_backlog_active": 0, + "repl_backlog_first_byte_offset": 0, + "repl_backlog_histlen": 0, + "repl_backlog_size": 1048576, + "role": "master", + "run_id": "7b9a920c40761ad5750fbc8810408b69eca45c06", + "sync_full": 0, + "sync_partial_err": 0, + "sync_partial_ok": 0, + "tcp_port": 6379, + "total_commands_processed": 108, + "total_connections_received": 109, + "total_net_input_bytes": 1526, + "total_net_output_bytes": 228594, + "total_system_memory": 8557363200, + "total_system_memory_human": "7.97G", + "uptime_in_days": 2, + "uptime_in_seconds": 230839, + "used_cpu_sys": 86.48, + "used_cpu_sys_children": 0.0, + "used_cpu_user": 25.17, + "used_cpu_user_children": 0.0, + "used_memory": 856848, + "used_memory_peak": 857872, + "used_memory_peak_human": "837.77K", + "used_memory_rss": 7012352, + "used_memory_rss_human": "6.69M" + } + return metrics + + +class MockedRedisClient2(object): + + def __init__(self, host='localhost', port=6379): + self.host = host + self.port = port + + def info(self): + raise ConnectionError() + + +class MockedRedisClient3(object): + + def __init__(self, host='localhost', port=6379): + self.host = host + self.port = port + + def info(self): + metrics = { + "aof_current_rewrite_time_sec": -1, + "aof_enabled": 0, + "tcp_port": 6379, + "used_memory_rss_human": "6.69M" + } + return metrics + + +class MockedRedisContainer1(object): + + def __init__(self, container_id): + ports = "[ {\"containerPort\" : \"6379\"} ]" + self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": + {"annotation.io.kubernetes.container.ports": ports}}} + + +class MockedRedisContainer2(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["6379"] + return ports + + +class MockedRedisContainer3(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["1234"] + return ports + + +class RedisModuleTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_redis_module(self): + import redis + v = redis.VERSION + self.assertIsNotNone(v, "redis module does not exist") + + +class RedisContainerCrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = RedisContainerCrawler() + self.assertEqual(c.get_feature(), "redis") + + @mock.patch('dockercontainer.DockerContainer', + MockedRedisContainer1) + @mock.patch(("plugins.applications.redis.redis_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + @mock.patch('redis.Redis', MockedRedisClient) + def test_redis_container_crawler_forkube(self, *args): + c = RedisContainerCrawler() + emitted_tuple = c.crawl("mockcontainerid")[0] + self.assertEqual(emitted_tuple[0], "redis", + "feature key must be equal to redis") + self.assertIsInstance(emitted_tuple[1], RedisFeature) + self.assertEqual(emitted_tuple[2], "application", + "feature type must be equal to application") + + @mock.patch('dockercontainer.DockerContainer', + MockedRedisContainer2) + @mock.patch('redis.Redis', MockedRedisClient) + @mock.patch(("plugins.applications.redis.redis_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_redis_container_crawler_fordocker(self, *args): + c = RedisContainerCrawler() + emitted_tuple = c.crawl("mockcontainerid")[0] + self.assertEqual(emitted_tuple[0], "redis", + "feature key must be equal to redis") + self.assertIsInstance(emitted_tuple[1], RedisFeature) + self.assertEqual(emitted_tuple[2], "application", + "feature type must be equal to application") + + @mock.patch('dockercontainer.DockerContainer', + MockedRedisContainer3) + @mock.patch('redis.Redis', MockedRedisClient) + def test_no_available_ports(self): + c = RedisContainerCrawler() + c.crawl(1234) + pass + + @mock.patch('dockercontainer.DockerContainer', + MockedRedisContainer2) + @mock.patch('redis.Redis', MockedRedisClient2) + @mock.patch(("plugins.applications.redis.redis_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_redis_container_no_connect(self, *args): + c = RedisContainerCrawler() + with self.assertRaises(ConnectionError): + c.crawl(1234) + + +class RedisHostCrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = RedisHostCrawler() + self.assertEqual(c.get_feature(), "redis") + + @mock.patch('redis.Redis', MockedRedisClient3) + def test_redis_host_crawler_dummy(self): + import redis + client = redis.Redis() + feature_attributes = create_feature(client.info()) + self.assertEqual(feature_attributes[0], -1) + + def test_redis_host_crawler(self): + with mock.patch('redis.Redis', MockedRedisClient): + c = RedisHostCrawler() + emitted_tuple = c.crawl()[0] + self.assertEqual(emitted_tuple[0], "redis", + "feature key must be equal to redis") + self.assertIsInstance(emitted_tuple[1], RedisFeature) + self.assertEqual(emitted_tuple[2], "application", + "feature type must be equal to application") + + @mock.patch('redis.Redis', MockedRedisClient2) + def test_no_redis_connection(self): + c = RedisHostCrawler() + with self.assertRaises(ConnectionError): + c.crawl() diff --git a/tests/unit/test_app_tomcat.py b/tests/unit/test_app_tomcat.py new file mode 100644 index 00000000..65ae9aa7 --- /dev/null +++ b/tests/unit/test_app_tomcat.py @@ -0,0 +1,295 @@ +from unittest import TestCase +import mock +from plugins.applications.tomcat import tomcat_crawler +from plugins.applications.tomcat import feature +from plugins.applications.tomcat.tomcat_container_crawler \ + import TomcatContainerCrawler +from plugins.applications.tomcat.tomcat_host_crawler \ + import TomcatHostCrawler +from utils.crawler_exceptions import CrawlError +from requests.exceptions import ConnectionError + + +def mocked_urllib2_open(request): + return MockedURLResponse() + + +def mocked_retrieve_status_page(host, port, user, password): + return server_status_value() + + +def mock_status_value(host, user, password, url): + raise CrawlError + + +def server_status_value(): + return ('' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + '' + ) + + +class MockedURLResponse(object): + def read(self): + return server_status_value() + + +class MockedTomcatContainer1(object): + + def __init__(self, container_id): + ports = "[ {\"containerPort\" : \"8080\"} ]" + self.inspect = {"State": {"Pid": 1234}, "Config": {"Labels": + {"annotation.io.kubernetes.container.ports": ports}}} + + +class MockedTomcatContainer2(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["8080"] + return ports + + +class MockedTomcatContainer3(object): + + def __init__(self, container_id): + self.inspect = {"State": {"Pid": 1234}, + "Config": {"Labels": {"dummy": "dummy"}}} + + def get_container_ports(self): + ports = ["1234"] + return ports + + +class TomcatCrawlTests(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_hundle_ioerror(self): + with self.assertRaises(CrawlError): + tomcat_crawler.retrieve_status_page("localhost", + "1234", "test", "test") + + @mock.patch('urllib2.urlopen', mocked_urllib2_open) + def test_ok(self): + status = list(tomcat_crawler.retrieve_metrics()) + assert status == [('tomcat_jvm', + feature.TomcatJVMFeature( + free='3846720', + total='62390272', + max='922746880'), + 'application'), + ('tomcat_memory', + feature.TomcatMemoryFeature( + name='PS Eden Space', + type='Heap memory', + initial='16252928', + committed='16252928', + maximum='340787200', + used='8570016'), + 'application'), + ('tomcat_memory', + feature.TomcatMemoryFeature( + name='PS Survivor Space', + type='Heap memory', + initial='2621440', + committed='2621440', + maximum='2621440', + used='2621440'), + 'application'), + ('tomcat_memory', + feature.TomcatMemoryFeature( + name='Code Cache', + type='Non-heap memory', + initial='2555904', + committed='6225920', + maximum='251658240', + used='6211200'), + 'application'), + ('tomcat_memory', + feature.TomcatMemoryFeature( + name='Compressed Class Space', + type='Non-heap memory', + initial='0', + committed='2097152', + maximum='1073741824', + used='1959616'), + 'application'), + ('tomcat_memory', + feature.TomcatMemoryFeature( + name='Metaspace', + type='Non-heap memory', + initial='0', + committed='18874368', + maximum='-1', + used='18211520'), + 'application'), + ('tomcat_connector', + feature.TomcatConnectorFeature( + connector='ajp-nio-8009', + maxThread='200', + currentThread='0', + currentThreadBusy='0', + requestMaxTime='0', + processingTime='0', + requestCount='0', + errorCount='0', + byteReceived='0', + byteSent='0'), + 'application'), + ('tomcat_connector', + feature.TomcatConnectorFeature( + connector='http-nio-8080', + maxThread='200', + currentThread='2', + currentThreadBusy='1', + requestMaxTime='60', + processingTime='60', + requestCount='1', + errorCount='1', + byteReceived='0', + byteSent='2473'), + 'application'), + ('tomcat_worker', + feature.TomcatWorkerFeature( + connector='http-nio-8080', + stage='S', + time='52', + byteSent='0', + byteReceived='0', + client='0:0:0:0:0:0:0:1', + vhost='localhost', + request='/manager/status'), + 'application')] + + +class TomcatHostTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = TomcatHostCrawler() + self.assertEqual(c.get_feature(), 'tomcat') + + @mock.patch('plugins.applications.tomcat.' + 'tomcat_crawler.retrieve_status_page', + mocked_retrieve_status_page) + def test_get_metrics(self): + c = TomcatHostCrawler() + options = {"password": "password", "user": "tomcat"} + emitted = list(c.crawl(**options)) + self.assertEqual(emitted[0][0], 'tomcat_jvm') + self.assertEqual(emitted[0][2], 'application') + + +class TomcatContainerTest(TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_feature(self): + c = TomcatContainerCrawler() + self.assertEqual(c.get_feature(), 'tomcat') + + @mock.patch('plugins.applications.tomcat.' + 'tomcat_crawler.retrieve_status_page', + mocked_retrieve_status_page) + @mock.patch('dockercontainer.DockerContainer', + MockedTomcatContainer1) + @mock.patch(("plugins.applications.tomcat.tomcat_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_tomcat_container_forkube(self, *args): + c = TomcatContainerCrawler() + options = {"password": "password", "user": "tomcat"} + emitted = list(c.crawl(**options)) + self.assertEqual(emitted[0][0], 'tomcat_jvm') + self.assertEqual(emitted[0][2], 'application') + + @mock.patch('plugins.applications.tomcat.' + 'tomcat_crawler.retrieve_status_page', + mocked_retrieve_status_page) + @mock.patch('dockercontainer.DockerContainer', + MockedTomcatContainer2) + @mock.patch(("plugins.applications.tomcat.tomcat_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + def test_tomcat_container_fordocker(self, *args): + c = TomcatContainerCrawler() + options = {"password": "password", "user": "tomcat"} + emitted = list(c.crawl(**options)) + self.assertEqual(emitted[0][0], 'tomcat_jvm') + self.assertEqual(emitted[0][2], 'application') + + @mock.patch('dockercontainer.DockerContainer', + MockedTomcatContainer3) + def test_tomcat_container_noport(self, *args): + c = TomcatContainerCrawler() + c.crawl(1234) + pass + + @mock.patch('dockercontainer.DockerContainer', + MockedTomcatContainer1) + @mock.patch(("plugins.applications.tomcat.tomcat_container_crawler." + "run_as_another_namespace"), + return_value=['127.0.0.1', '1.2.3.4']) + @mock.patch('plugins.applications.tomcat.' + 'tomcat_crawler.retrieve_metrics', + mock_status_value) + def test_none_tomcat_container(self, *args): + options = {"password": "password", "user": "tomcat"} + c = TomcatContainerCrawler() + with self.assertRaises(ConnectionError): + c.crawl(1234, **options) diff --git a/tests/unit/test_container.py b/tests/unit/test_container.py new file mode 100644 index 00000000..523a4e81 --- /dev/null +++ b/tests/unit/test_container.py @@ -0,0 +1,44 @@ +import mock +import unittest + +from container import Container + + +def mocked_exists(pid): + return True + + +class ContainerTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_non_implemented_methods(self): + c = Container(1) + with self.assertRaises(NotImplementedError): + c.get_memory_cgroup_path() + with self.assertRaises(NotImplementedError): + c.get_cpu_cgroup_path() + + @mock.patch('crawler.container.os.path.exists', side_effect=mocked_exists) + def test_is_running(self, mock_exists): + c = Container(1) + assert c.is_running() + + def test_eq_ne(self): + c1 = Container(1) + c2 = Container(2) + c3 = Container(2) + assert c1 != c2 + assert c2 == c3 + + def test_is_docker(self): + c = Container(1) + assert not c.is_docker_container() + + def test_to_str(self): + c = Container(1) + print(c) diff --git a/tests/unit/test_containers.py b/tests/unit/test_containers.py new file mode 100644 index 00000000..882a2d3d --- /dev/null +++ b/tests/unit/test_containers.py @@ -0,0 +1,188 @@ +import mock +import unittest + +from containers import (list_all_containers, get_containers) + + +def mocked_exists(pid): + return True + + +class DockerContainer(): + + def __init__(self, pid): + self.pid = pid + self.short_id = pid + self.long_id = pid + self.process_namespace = pid + + def __str__(self): + return 'container %s' % self.pid + + def is_docker_container(self): + return True + +DOCKER_IDS = ['101', '102', '103', '104', '105', '106'] + + +def mocked_get_docker_containers(host_namespace='', user_list='ALL'): + for long_id in DOCKER_IDS: + + if user_list not in ['ALL', 'all', 'All']: + user_ctrs = [cid[:12] for cid in user_list.split(',')] + short_id = long_id[:12] + if short_id not in user_ctrs: + continue + + c = DockerContainer(long_id) + yield c + + +class PsUtilProcess(): + + def __init__(self, pid): + self.pid = pid + + +class ContainersTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('containers.get_docker_containers', + side_effect=lambda host_namespace, user_list='ALL': + mocked_get_docker_containers(host_namespace, user_list)) + @mock.patch('containers.container.namespace.get_pid_namespace', + side_effect=lambda pid: pid) + @mock.patch('containers.container.psutil.process_iter', + side_effect=lambda: [PsUtilProcess('4'), # container + PsUtilProcess('1'), # init + PsUtilProcess('5')]) # crawler + @mock.patch('containers.container.misc.process_is_crawler', + side_effect=lambda pid: True if pid == '5' else False) + def test_list_all_containers(self, *args): + pids = [ + c.pid for c in list_all_containers( + ignore_raw_containers=False)] + # pid 1 is the init process, which is not a container + # according to the definition in container.py + assert set(pids) == set(DOCKER_IDS + ['4']) + assert '1' not in pids # init process + assert '5' not in pids # crawler process + assert args[0].call_count == 2 + assert args[1].call_count == 1 + assert args[2].call_count == 2 + assert args[3].call_count == 1 + + @mock.patch('containers.get_docker_containers', + side_effect=lambda host_namespace, user_list='ALL': + mocked_get_docker_containers(host_namespace, user_list)) + @mock.patch('containers.container.namespace.get_pid_namespace', + side_effect=lambda pid: pid) + @mock.patch('containers.container.psutil.process_iter', + side_effect=lambda: [PsUtilProcess('4'), # container + PsUtilProcess('1'), # init + PsUtilProcess('5')]) # crawler + @mock.patch('containers.container.misc.process_is_crawler', + side_effect=lambda pid: True if pid == '5' else False) + def test_list_all_containers_input_list(self, *args): + pids = [ + c.pid for c in list_all_containers( + user_list='102', + ignore_raw_containers=False)] + # pid 1 is the init process, which is not a container + # according to the definition in container.py + assert set(pids) == set(['102']) + assert '3' not in pids # filtered container + assert '4' not in pids # filtered container + assert '1' not in pids # init process + assert '5' not in pids # crawler process + + @mock.patch('containers.get_docker_containers', + side_effect=lambda host_namespace, user_list='ALL': + mocked_get_docker_containers(host_namespace, user_list)) + @mock.patch('containers.container.namespace.get_pid_namespace', + side_effect=lambda pid: pid) + @mock.patch('containers.container.psutil.process_iter', + side_effect=lambda: [PsUtilProcess('4'), # container + PsUtilProcess('1'), # init + PsUtilProcess('5')]) # crawler + @mock.patch('containers.container.misc.process_is_crawler', + side_effect=lambda pid: True if pid == '5' else False) + def test_get_filtered_list(self, *args): + pids = [c.pid for c in get_containers(ignore_raw_containers=False)] + # pid 1 is the init process, which is not a container + # according to the definition in container.py + assert set(pids) == set(DOCKER_IDS + ['4']) + assert '1' not in pids # init process + assert '5' not in pids # crawler process + + @mock.patch('containers.get_docker_containers', + side_effect=lambda host_namespace, user_list='ALL': + mocked_get_docker_containers(host_namespace, user_list)) + @mock.patch('containers.container.namespace.get_pid_namespace', + side_effect=lambda pid: pid) + @mock.patch('containers.container.psutil.process_iter', + side_effect=lambda: [PsUtilProcess('4'), # container + PsUtilProcess('1'), # init + PsUtilProcess('5')]) # crawler + @mock.patch('containers.container.misc.process_is_crawler', + side_effect=lambda pid: True if pid == '5' else False) + def test_get_filtered_list_with_input_list(self, *args): + pids = [ + c.pid for c in get_containers(ignore_raw_containers=False, + user_list='102')] + # pid 1 is the init process, which is not a container + # according to the definition in container.py + assert set(pids) == set(['102']) + assert '3' not in pids # filtered container + assert '4' not in pids # filtered container + assert '1' not in pids # init process + assert '5' not in pids # crawler process + + @mock.patch('containers.get_docker_containers', + side_effect=lambda host_namespace, user_list='ALL': + mocked_get_docker_containers(host_namespace, user_list)) + @mock.patch('containers.container.namespace.get_pid_namespace', + side_effect=lambda pid: pid) + @mock.patch('containers.container.psutil.process_iter', + side_effect=lambda: [PsUtilProcess('4'), # container + PsUtilProcess('1'), # init + PsUtilProcess('5')]) # crawler + @mock.patch('containers.container.misc.process_is_crawler', + side_effect=lambda pid: True if pid == '5' else False) + def test_get_filtered_list_with_input_list_ALL(self, *args): + pids = [ + c.pid for c in get_containers(ignore_raw_containers=False, + user_list='ALL')] + # pid 1 is the init process, which is not a container + # according to the definition in container.py + assert set(pids) == set(DOCKER_IDS + ['4']) + + @mock.patch('containers.get_docker_containers', + side_effect=lambda host_namespace, user_list='ALL': + mocked_get_docker_containers(host_namespace, user_list)) + @mock.patch('containers.container.namespace.get_pid_namespace', + side_effect=lambda pid: pid) + @mock.patch('containers.container.psutil.process_iter', + side_effect=lambda: [PsUtilProcess('4'), # container + PsUtilProcess('1'), # init + PsUtilProcess('5')]) # crawler + @mock.patch('containers.container.misc.process_is_crawler', + side_effect=lambda pid: True if pid == '5' else False) + def test_get_filtered_list_non_default_env(self, *args): + opts = {'environment': 'alchemy', + 'docker_containers_list': 'ALL', + 'partition_strategy': {'name': 'equally_by_pid', + 'args': {'process_id': 0, + 'num_processes': 1}}} + pids = [c.pid for c in get_containers(opts)] + # pid 1 is the init process, which is not a container + # according to the definition in container.py + assert set(pids) == set(DOCKER_IDS) + # only docker containers are returned in non-cloudsight environments + # (see the 'alchemy' above) + assert '4' not in pids diff --git a/tests/unit/test_containers_crawler.py b/tests/unit/test_containers_crawler.py new file mode 100644 index 00000000..7f76c72a --- /dev/null +++ b/tests/unit/test_containers_crawler.py @@ -0,0 +1,139 @@ +import mock +import unittest +from containers_crawler import ContainersCrawler + + +class MockedOSCrawler: + + def crawl(self, **kwargs): + return [('linux', {'os': 'some_os'}, 'os')] + + +class MockedCPUCrawler: + + def crawl(self, **kwargs): + return [('cpu-0', {'used': 100}, 'cpu')] + + +class MockedOSCrawlerFailure: + + def crawl(self, container_id, **kwargs): + if container_id == 'errorid': + raise OSError('some exception') + else: + return [('linux', {'os': 'some_os'}, 'os')] + + +class MockedDockerContainer: + + def __init__(self, short_id='short_id', pid=777): + self.namespace = short_id + self.pid = pid + self.short_id = short_id + self.long_id = short_id + self.name = 'name' + self.image = 'image' + self.owner_namespace = 'owner_namespace' + self.docker_image_long_name = 'image_long_name' + self.docker_image_short_name = 'image_short_name' + self.docker_image_tag = 'image_tag' + self.docker_image_registry = 'image_registry' + + def is_docker_container(self): + return True + + def link_logfiles(self, options): + pass + + def unlink_logfiles(self, options): + pass + + def get_metadata_dict(self): + return {'namespace': self.namespace} + + def __eq__(self, other): + return self.pid == other.pid + + +class ContainersCrawlerTests(unittest.TestCase): + + @mock.patch( + 'containers_crawler.plugins_manager.get_container_crawl_plugins', + side_effect=lambda features: [(MockedOSCrawler(), {}), + (MockedCPUCrawler(), {})]) + @mock.patch('containers_crawler.get_containers', + side_effect=lambda host_namespace, user_list: [ + MockedDockerContainer( + short_id='aaa', + pid=101), + MockedDockerContainer( + short_id='bbb', + pid=102), + MockedDockerContainer( + short_id='ccc', + pid=103)]) + def test_containers_crawler(self, *args): + crawler = ContainersCrawler(features=['os']) + frames = list(crawler.crawl()) + namespaces = sorted([f.metadata['namespace'] for f in frames]) + assert namespaces == sorted(['aaa', 'bbb', 'ccc']) + features_count = sorted([f.num_features for f in frames]) + assert features_count == sorted([2, 2, 2]) + system_types = sorted([f.metadata['system_type'] for f in frames]) + assert system_types == sorted(['container', 'container', 'container']) + assert args[0].call_count == 1 + assert args[1].call_count == 1 + + @mock.patch( + 'containers_crawler.plugins_manager.get_container_crawl_plugins', + side_effect=lambda features: [(MockedOSCrawlerFailure(), {}), + (MockedCPUCrawler(), {})]) + @mock.patch('containers_crawler.get_containers', + side_effect=lambda host_namespace, user_list: [ + MockedDockerContainer( + short_id='aaa', + pid=101), + MockedDockerContainer( + short_id='errorid', + pid=102), + MockedDockerContainer( + short_id='ccc', + pid=103)]) + def test_failed_containers_crawler(self, *args): + crawler = ContainersCrawler(features=['os']) + with self.assertRaises(OSError): + frames = list(crawler.crawl(ignore_plugin_exception=False)) + assert args[0].call_count == 1 + assert args[1].call_count == 1 + + @mock.patch( + 'containers_crawler.plugins_manager.get_container_crawl_plugins', + side_effect=lambda features: [(MockedCPUCrawler(), {}), + (MockedOSCrawlerFailure(), {}), + (MockedCPUCrawler(), {})]) + @mock.patch('containers_crawler.get_containers', + side_effect=lambda host_namespace, user_list: [ + MockedDockerContainer( + short_id='aaa', + pid=101), + MockedDockerContainer( + short_id='errorid', + pid=102), + MockedDockerContainer( + short_id='ccc', + pid=103)]) + def test_failed_containers_crawler_with_ignore_failure(self, *args): + crawler = ContainersCrawler(features=['os']) + frames = list(crawler.crawl()) # defaults to ignore_plugin_exception + namespaces = sorted([f.metadata['namespace'] for f in frames]) + assert namespaces == sorted(['aaa', 'errorid', 'ccc']) + features_count = sorted([f.num_features for f in frames]) + assert features_count == sorted([3, 2, 3]) + system_types = [f.metadata['system_type'] for f in frames] + assert system_types == ['container', 'container', 'container'] + assert args[0].call_count == 1 + assert args[1].call_count == 1 + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/test_diskio_host.py b/tests/unit/test_diskio_host.py new file mode 100644 index 00000000..3489e47a --- /dev/null +++ b/tests/unit/test_diskio_host.py @@ -0,0 +1,130 @@ +''' +Unit tests for the DiskioHostCrawler plugin +''' +import unittest +import mock + +from plugins.systems.diskio_host_crawler import DiskioHostCrawler + +counters_increment = 0 +time_increment = 0 + +def mocked_time(): + ''' + Used to mock time.time(), which the crawler calls to calculate rates + ''' + global time_increment + + base_time = 1504726245 + return base_time + time_increment + +def mocked_diskio_counters(): + ''' + Used to mock DiskContainerCrawler._crawl_disk_io_counters() + ''' + global counters_increment + + base_counters = [10, 10, 10, 10] + counters = [ i + counters_increment for i in base_counters] + yield ('loop', [0, 0 , 0, 0]) + yield ('sda1', counters) + +class TestDiskioCrawlerPlugin(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls._crawler = DiskioHostCrawler() + + def testGetFeature(self): + crawler = DiskioHostCrawler() + self.assertEqual('diskio', crawler.get_feature()) + + def test_crawl_disk_io_counters(self): + crawler = DiskioHostCrawler() + diskio_data = crawler._crawl_disk_io_counters() + for device_name, counters in diskio_data: + self.assertIsInstance(device_name, basestring) + self.assertEqual(4, len(counters)) + for counter in counters: + self.assertIsInstance(counter, (int, long)) + + @mock.patch('time.time', side_effect=mocked_time) + @mock.patch.object(DiskioHostCrawler, '_crawl_disk_io_counters', side_effect=mocked_diskio_counters) + def testCrawl(self, mocked_diskio_counters, mocked_time): + global counters_increment + global time_increment + + # First crawl + diskio_feature = self._crawler.crawl() + for device_name, feature_attributes, feature_key in diskio_feature: + self.assertEqual('diskio', feature_key) + self.assertEqual(4, len(feature_attributes), 'Incorrect number of attributes') + self.assertIsInstance(device_name, basestring, 'Device name should be string') + + self.assertEqual(0, feature_attributes.readoprate, 'Unexpected read operations per second') + self.assertEqual(0, feature_attributes.writeoprate, 'Unexpected write operations per second') + self.assertEqual(0, feature_attributes.readbytesrate, 'Unexpected bytes read per second') + self.assertEqual(0, feature_attributes.writebytesrate, 'Unexpected bytes written per second') + + if device_name == 'diskio-loop': + pass + elif device_name == 'diskio-sda1': + pass + else: + raise Exception('Unexpected device name') + + # Make sure counters will be incremented by mock the function mocking I/O counters + counters_increment = 100.0 + + # Make sure the time will be incremented by the mocked time.time() + time_increment = 60 + + # Second crawl + diskio_feature = self._crawler.crawl() + for device_name, feature_attributes, feature_key in diskio_feature: + self.assertEqual('diskio', feature_key) + self.assertEqual(4, len(feature_attributes), 'Incorrect number of attributes') + self.assertIsInstance(device_name, basestring, 'Device name should be string') + if device_name == 'diskio-loop': + self.assertEqual(0, feature_attributes.readoprate, 'Unexpected read operations per second') + self.assertEqual(0, feature_attributes.writeoprate, 'Unexpected write operations per second') + self.assertEqual(0, feature_attributes.readbytesrate, 'Unexpected bytes read per second') + self.assertEqual(0, feature_attributes.writebytesrate, 'Unexpected bytes written per second') + elif device_name == 'diskio-sda1': + expected_rate = round(counters_increment/time_increment, 2) + self.assertEqual(feature_attributes.readoprate, expected_rate, 'Unexpected read operations per second') + self.assertEqual(feature_attributes.writeoprate, expected_rate, 'Unexpected write operations per second') + self.assertEqual(feature_attributes.readbytesrate, expected_rate, 'Unexpected bytes read per second') + self.assertEqual(feature_attributes.writebytesrate, expected_rate, 'Unexpected bytes written per second') + else: + raise Exception('Unexpected device name') + + # Make sure the counter-diff as compared to the previous crawl will be negative, + # to emulate a case where the OS counters have wrapped + # In this case, the crawler is expected to report the same measurement as before + counters_increment = -500.0 + + # Make sure the time will be incremented by the mocked time.time() + time_increment += 60 + + # Third crawl + diskio_feature = self._crawler.crawl() + for device_name, feature_attributes, feature_key in diskio_feature: + self.assertEqual('diskio', feature_key) + self.assertEqual(4, len(feature_attributes), 'Incorrect number of attributes') + self.assertIsInstance(device_name, basestring, 'Device name should be string') + if device_name == 'diskio-loop': + self.assertEqual(0, feature_attributes.readoprate, 'Unexpected read operations per second') + self.assertEqual(0, feature_attributes.writeoprate, 'Unexpected write operations per second') + self.assertEqual(0, feature_attributes.readbytesrate, 'Unexpected bytes read per second') + self.assertEqual(0, feature_attributes.writebytesrate, 'Unexpected bytes written per second') + elif device_name == 'diskio-sda1': + self.assertEqual(feature_attributes.readoprate, expected_rate, 'Unexpected read operations per second') + self.assertEqual(feature_attributes.writeoprate, expected_rate, 'Unexpected write operations per second') + self.assertEqual(feature_attributes.readbytesrate, expected_rate, 'Unexpected bytes read per second') + self.assertEqual(feature_attributes.writebytesrate, expected_rate, 'Unexpected bytes written per second') + else: + raise Exception('Unexpected device name') + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unit/test_dockercontainer.py b/tests/unit/test_dockercontainer.py new file mode 100644 index 00000000..d76119f6 --- /dev/null +++ b/tests/unit/test_dockercontainer.py @@ -0,0 +1,841 @@ +import copy +import unittest + +import mock +import requests + +from dockercontainer import DockerContainer, get_docker_containers +from utils import crawler_exceptions + + +def mocked_exists(pid): + return True + + +def mocked_docker_inspect(long_id): + if long_id == 'no_container_id': + raise requests.exceptions.HTTPError + else: + inspect = { + "Id": "good_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + inspect['Id'] = long_id + return inspect + + +def mocked_exec_dockerps(): + inspect1 = { + "Id": "good_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + inspect2 = { + "Id": "no_namespace", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + inspect3 = { + "Id": "good_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + return [inspect1, inspect2, inspect3] + + +def mocked_exec_dockerps_long(): + inspect = { + "Id": "", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + for i in range(10): + _inspect = inspect + _inspect['Id'] = str(i) + yield _inspect + + +def mocked_get_rootfs(long_id): + if long_id == 'valid_rootfs_id': + return '/tmp/something/docker/' + long_id + else: + raise requests.exceptions.HTTPError + + +def mocked_symlink_oserror(a, b): + raise OSError() + + +def mocked_symlink_exception(a, b): + raise Exception() + + +def mocked_rmtree_exception(path): + raise OSError() + + +class MockedRuntimeEnv(): + + def get_environment_name(self): + return 'cloudsight' + + def get_container_namespace(self, long_id, options): + if long_id == 'good_id': + return 'random_namespace' + elif long_id == 'throw_non_handled_exception_id': + raise Exception() + elif long_id == 'throw_bad_environment_exception_id': + raise crawler_exceptions.ContainerInvalidEnvironment() + elif long_id == 'no_namespace': + return None + else: + return 'other_namespace' + + def get_container_log_file_list(self, long_id, options): + logs = copy.deepcopy(options['container_logs']) + if long_id == 'good_id': + logs.extend([{'name': '/var/log/1', 'type': None}, + {'name': '/var/log/2', 'type': None}]) + elif long_id == 'throw_value_error_id': + raise ValueError() + elif long_id == 'valid_rootfs_id': + logs.extend([{'name': '/var/log/1', 'type': None}, + {'name': '/var/log/2', 'type': None}, + {'name': '../../as', 'type': None}]) + return logs + + def get_container_log_prefix(self, long_id, options): + return 'random_prefix' + + +def mocked_get_runtime_env(): + return MockedRuntimeEnv() + + +def mocked_get_container_json_logs_path(id, inspect): + return '/var/lib/docker/abc/container/log.json' + + +class DockerDockerContainerTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_list_docker_containers(self, mock_get_rootfs, mock_inspect, + mocked_get_runtime_env, mocked_dockerps): + n = 0 + for c in get_docker_containers(): + assert c.long_id == 'good_id' + n += 1 + assert mocked_get_runtime_env.call_count == 3 + assert n == 2 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps_long) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_list_docker_containers_with_input( + self, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + ids = [c.short_id for c in get_docker_containers(user_list='1,2,8')] + assert set(ids) == set(['1', '2', '8']) + assert mocked_get_runtime_env.call_count == 3 + ids = [c.long_id for c in get_docker_containers(user_list='5,3')] + assert set(ids) == set(['3', '5']) + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_list_docker_containers_with_opts(self, mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + n = 0 + for c in get_docker_containers(): + assert c.long_id == 'good_id' + n += 1 + assert mocked_get_runtime_env.call_count == 3 + assert n == 2 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_init( + self, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("good_id") + mock_inspect.assert_called() + assert not c.root_fs + assert mocked_get_runtime_env.call_count == 1 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_init_from_inspect(self, mock_get_rootfs, mock_inspect, + mocked_get_runtime_env, mocked_dockerps): + inspect = { + "Id": "good_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + c = DockerContainer("good_id", inspect) + mock_inspect.assert_not_called() + assert not c.root_fs + assert mocked_get_runtime_env.call_count == 1 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_init_from_inspect_w_repotags(self, mock_get_rootfs, mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + inspect = { + "Id": "good_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + }, + 'RepoTag': 'registry.com:123/ric/img:latest' + } + c = DockerContainer("good_id", inspect) + mock_inspect.assert_not_called() + assert not c.root_fs + assert mocked_get_runtime_env.call_count == 1 + assert c.docker_image_long_name == 'registry.com:123/ric/img:latest' + assert c.docker_image_short_name == 'img:latest' + assert c.docker_image_tag == 'latest' + assert c.docker_image_registry == 'registry.com:123' + assert c.owner_namespace == 'ric' + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_init_from_inspect_w_repotags2(self, mock_get_rootfs, mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + inspect = { + "Id": "good_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + }, + 'RepoTag': 'registry.com:123/img:latest' + } + c = DockerContainer("good_id", inspect) + mock_inspect.assert_not_called() + assert not c.root_fs + assert mocked_get_runtime_env.call_count == 1 + assert c.docker_image_long_name == 'registry.com:123/img:latest' + assert c.docker_image_short_name == 'img:latest' + assert c.docker_image_tag == 'latest' + assert c.docker_image_registry == 'registry.com:123' + assert c.owner_namespace == '' + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_init_failed(self, mock_get_rootfs, mock_inspect, + mocked_get_runtime_env, mocked_dockerps): + with self.assertRaises(crawler_exceptions.ContainerNonExistent): + DockerContainer("no_container_id") + assert mocked_get_runtime_env.call_count == 0 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_init_wrong_environment( + self, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + with self.assertRaises(crawler_exceptions.ContainerInvalidEnvironment): + DockerContainer("no_namespace") + with self.assertRaises(crawler_exceptions.ContainerInvalidEnvironment): + DockerContainer("throw_bad_environment_exception_id") + with self.assertRaises(Exception): + DockerContainer("throw_non_handled_exception_id") + with self.assertRaises(crawler_exceptions.ContainerInvalidEnvironment): + DockerContainer("throw_value_error_id") + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def test_is_docker( + self, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("good_id") + assert c.is_docker_container() + print(c) + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.path.ismount', + side_effect=lambda x: True if x == '/cgroup/memory' else False) + def test_memory_cgroup( + self, + mocked_ismount, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("good_id") + assert c.get_memory_cgroup_path( + 'abc') == '/cgroup/memory/docker/good_id/abc' + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.path.ismount', + side_effect=lambda x: + True if x == '/cgroup/cpuacct' or '/cgroup/cpu,cpuacct' else False) + def test_cpu_cgroup( + self, + mocked_ismount, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("good_id") + assert c.get_cpu_cgroup_path( + 'abc') == ("/cgroup/cpuacct/docker/good_id/" + "abc") or ("cgroup/cpu,cpuacct/docker/good_id/abc") + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + def test_link_logfiles( + self, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + mock_symlink.assert_called_with( + '/tmp/something/docker/valid_rootfs_id/var/log/2', + '/var/log/crawler_container_logs/random_prefix/var/log/2') + assert mock_symlink.call_count == 4 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + @mock.patch('dockercontainer.misc.get_process_env', + side_effect=lambda x: { + 'LOG_LOCATIONS': '/var/env/1,/var/env/2'}) + def test_link_logfiles_env_variable( + self, + mock_get_env, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + mock_symlink.assert_called_with( + '/tmp/something/docker/valid_rootfs_id/var/log/2', + '/var/log/crawler_container_logs/random_prefix/var/log/2') + assert mock_symlink.call_count == 6 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink', + side_effect=mocked_symlink_oserror) + def test_link_logfiles_symlink_oserror( + self, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + # no exceptoin should be thrown + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch('dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink', + side_effect=mocked_symlink_exception) + def test_link_logfiles_symlink_exception( + self, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + # no exceptoin should be thrown + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + @mock.patch('dockercontainer.shutil.rmtree') + def test_link_and_unlink_logfiles( + self, + mock_rmtree, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + mock_symlink.assert_called_with( + '/tmp/something/docker/valid_rootfs_id/var/log/2', + '/var/log/crawler_container_logs/random_prefix/var/log/2') + c.unlink_logfiles() + assert mock_symlink.call_count == 4 + assert mock_rmtree.call_count == 1 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + @mock.patch('dockercontainer.shutil.rmtree') + @mock.patch('dockercontainer.get_docker_container_json_logs_path', + side_effect=mocked_get_container_json_logs_path) + def test_link_and_unlink_docker_json_logfile( + self, + mock_json_logs, + mock_rmtree, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + mock_symlink.assert_called_with( + '/var/lib/docker/abc/container/log.json', + '/var/log/crawler_container_logs/random_prefix/docker.log') + c.unlink_logfiles() + assert mock_symlink.call_count == 5 + assert mock_rmtree.call_count == 1 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + @mock.patch('dockercontainer.shutil.rmtree', + side_effect=mocked_rmtree_exception) + def test_link_and_unlink_logfiles_failed_rmtree( + self, + mock_rmtree, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + c = DockerContainer("valid_rootfs_id") + c.link_logfiles() + mock_symlink.assert_called_with( + '/tmp/something/docker/valid_rootfs_id/var/log/2', + '/var/log/crawler_container_logs/random_prefix/var/log/2') + c.unlink_logfiles() + assert mock_symlink.call_count == 4 + assert mock_rmtree.call_count == 1 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + @mock.patch('dockercontainer.shutil.rmtree', + side_effect=mocked_rmtree_exception) + def test_links_with_mounts( + self, + mock_rmtree, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + inspect = { + "Id": "valid_rootfs_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + # /var in the container is mapped to /mount/in/the/host + # container was started with -v /var/in/the/host:/var + "Mounts": [{'Source': '/var/in/the/host', + 'Destination': '/var'}], + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + c = DockerContainer("valid_rootfs_id", inspect) + c.link_logfiles() + mock_symlink.assert_called_with( + '/var/in/the/host/log/2', + '/var/log/crawler_container_logs/random_prefix/var/log/2') + c.unlink_logfiles() + assert mock_symlink.call_count == 4 + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('dockercontainer.os.makedirs') + @mock.patch('dockercontainer.os.symlink') + @mock.patch('dockercontainer.shutil.rmtree', + side_effect=mocked_rmtree_exception) + # In older docker versions, the inspect field for Mounts was called Volumes + def test_links_with_volumes( + self, + mock_rmtree, + mock_symlink, + mock_makedirs, + mock_get_rootfs, + mock_inspect, + mocked_get_runtime_env, + mocked_dockerps): + inspect = { + "Id": "valid_rootfs_id", + "Created": "2016-07-06T16:38:05.479090842Z", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + # /var in the container is mapped to /mount/in/the/host + # container was started with -v /var/in/the/host:/var + "Volumes": {'/var': '/var/in/the/host'}, + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + } + } + c = DockerContainer("valid_rootfs_id", inspect) + c.link_logfiles() + mock_symlink.assert_called_with( + '/var/in/the/host/log/2', + '/var/log/crawler_container_logs/random_prefix/var/log/2') + c.unlink_logfiles() + assert mock_symlink.call_count == 4 + + # TODO test _get_cgroup_dir when ismount fails + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + def _test_non_implemented_methods(self): + c = DockerContainer("some_id") + with self.assertRaises(NotImplementedError): + c.get_memory_cgroup_path() + with self.assertRaises(NotImplementedError): + c.get_cpu_cgroup_path() + with self.assertRaises(NotImplementedError): + c.link_logfiles() + with self.assertRaises(NotImplementedError): + c.unlink_logfiles() + + @mock.patch('dockercontainer.exec_dockerps', + side_effect=mocked_exec_dockerps) + @mock.patch( + 'dockercontainer.plugins_manager.get_runtime_env_plugin', + side_effect=mocked_get_runtime_env) + @mock.patch('dockercontainer.exec_dockerinspect', + side_effect=mocked_docker_inspect) + @mock.patch('dockercontainer.get_docker_container_rootfs_path', + side_effect=mocked_get_rootfs) + @mock.patch('emitter.os.path.exists', side_effect=mocked_exists) + def _test_is_running(self, mock_exists): + c = DockerContainer("good_id") + assert c.is_running() + + def _test_eq_ne(self): + c1 = DockerContainer("good_id") + c2 = DockerContainer("ebcd") + c3 = DockerContainer("ebcd") + assert c1 != c2 + assert c2 == c3 + + def _test_to_str(self): + c = DockerContainer("good_id") + print(c) diff --git a/tests/unit/test_dockerutils.py b/tests/unit/test_dockerutils.py new file mode 100644 index 00000000..9453d49a --- /dev/null +++ b/tests/unit/test_dockerutils.py @@ -0,0 +1,381 @@ +import unittest + +import dateutil.parser as dp +import docker +import mock + +import utils.dockerutils +from utils.crawler_exceptions import (DockerutilsNoJsonLog, DockerutilsException) + + +class MockedClient(): + + def containers(self): + return [{'Id': 'good_id'}] + + def info(self): + return {'Driver': 'btrfs', 'DockerRootDir': '/var/lib/docker'} + + def version(self): + return {'Version': '1.10.1'} + + def inspect_container(self, id): + return { + "Id": "good_id", + "Created": "2016-07-06", + "State": { + "Status": "running", + "Running": True, + "Pid": 11186 + }, + "Image": "sha256:07c86167cdc4264926fa5d2894e34a339ad27", + "Name": "/pensive_rosalind", + "Mounts": [], + "LogPath": "/a/b/c/log.json", + "Config": { + "Cmd": [ + "bash" + ], + "Image": "ubuntu:trusty" + }, + "NetworkSettings": { + "Ports": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "32768" + } + ]} + + }, + "HostConfig": { + "PortBindings": { + "809/tcp": [ + { + "HostIp": "", + "HostPort": "" + } + ] + } + + } + } + + def inspect_image(self, image_id): + return {'RepoTags': 'registry/abc/def:latest'} + + def history(self, image_id): + return [{'History': 'xxx'}] + + +def throw_runtime_error(*args, **kwargs): + raise RuntimeError() + + +def throw_io_error(*args, **kwargs): + raise IOError() + + +def throw_docker_exception(*args, **kwargs): + raise docker.errors.DockerException() + + +class DockerUtilsTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_exec_dockerps(self, *args): + for c in utils.dockerutils.exec_dockerps(): + print c + break + + docker_datetime = dp.parse('2016-07-06') + epoch_seconds = docker_datetime.strftime('%s') + + assert c == {'Name': '/pensive_rosalind', + 'Created': epoch_seconds, + 'RepoTag': 'r', + 'State': {'Status': 'running', + 'Running': True, + 'Pid': '11186'}, + 'Mounts': [], + 'Config': {'Image': 'ubuntu:trusty', + 'Cmd': ['bash']}, + 'NetworkSettings': {'Ports': { + '80/tcp': [{'HostPort': '32768', + 'HostIp': '0.0.0.0'}]}}, + 'Image': 'sha256:07c86167cdc4264926fa5d2894e34a339ad27', + 'LogPath': '/a/b/c/log.json', + 'HostConfig': {'PortBindings': { + '809/tcp': [{'HostPort': '', + 'HostIp': ''}]}}, + 'Id': 'good_id'} + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.exec_dockerinspect', + side_effect=throw_docker_exception) + def test_exec_dockerps_failure(self, *args): + with self.assertRaises(DockerutilsException): + utils.dockerutils.exec_dockerps() + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_exec_docker_history(self, *args): + h = utils.dockerutils.exec_docker_history('ididid') + assert h == [{'History': 'xxx'}] + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=throw_docker_exception) + def test_exec_docker_history_failure(self, *args): + with self.assertRaises(DockerutilsException): + utils.dockerutils.exec_docker_history('ididid') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_exec_docker_inspect(self, *args): + i = utils.dockerutils.exec_dockerinspect('ididid') + + docker_datetime = dp.parse('2016-07-06') + epoch_seconds = docker_datetime.strftime('%s') + + assert i == {'Name': '/pensive_rosalind', + 'Created': epoch_seconds, + 'RepoTag': 'r', + 'State': {'Status': 'running', + 'Running': True, + 'Pid': '11186'}, + 'Mounts': [], + 'Config': {'Image': 'ubuntu:trusty', + 'Cmd': ['bash']}, + 'NetworkSettings': {'Ports': { + '80/tcp': [ + {'HostPort': '32768', + 'HostIp': '0.0.0.0'}]}}, + 'Image': 'sha256:07c86167cdc4264926fa5d2894e34a339ad27', + 'LogPath': '/a/b/c/log.json', + 'HostConfig': {'PortBindings': { + '809/tcp': [{'HostPort': '', + 'HostIp': ''}]}}, + 'Id': 'good_id'} + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=throw_docker_exception) + def test_exec_docker_inspect_failure(self, *args): + with self.assertRaises(DockerutilsException): + utils.dockerutils.exec_dockerinspect('ididid') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=throw_docker_exception) + @mock.patch('utils.dockerutils.open') + def test_get_docker_storage_driver_step1a(self, mock_open, mock_client): + + mock_open.return_value = open('tests/unit/proc_mounts_aufs') + assert utils.dockerutils._get_docker_storage_driver() == 'aufs' + mock_open.return_value = open('tests/unit/proc_mounts_devicemapper') + assert utils.dockerutils._get_docker_storage_driver() == 'devicemapper' + mock_open.return_value = open('tests/unit/proc_mounts_vfs') + assert utils.dockerutils._get_docker_storage_driver() == 'vfs' + mock_open.return_value = open('tests/unit/proc_mounts_btrfs') + assert utils.dockerutils._get_docker_storage_driver() == 'btrfs' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_docker_storage_driver_step2(self, mock_open, mock_client): + assert utils.dockerutils._get_docker_storage_driver() == 'btrfs' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=throw_docker_exception) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_docker_storage_driver_failure(self, mock_open, mock_client): + assert utils.dockerutils._get_docker_storage_driver() == 'devicemapper' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_get_docker_server_version(self, mock_client): + assert utils.dockerutils._get_docker_server_version() == '1.10.1' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=throw_docker_exception) + def test_get_docker_server_version_failure(self, mock_client): + with self.assertRaises(DockerutilsException): + utils.dockerutils._get_docker_server_version() + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch( + 'crawler.utils.dockerutils.os.path.isfile', + side_effect=lambda p: + True if p == ("/var/lib/docker/containers/id/id-json.log") + else False) + def test_get_json_logs_path_from_path(self, mock_isfile, mock_client): + assert utils.dockerutils.get_docker_container_json_logs_path( + 'id') == '/var/lib/docker/containers/id/id-json.log' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.os.path.isfile', + side_effect=lambda p: + True if p == '/a/b/c/log.json' else False) + def test_get_json_logs_path_from_daemon(self, mock_isfile, mock_client): + assert utils.dockerutils.get_docker_container_json_logs_path( + 'id') == '/a/b/c/log.json' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.os.path.isfile', + side_effect=lambda p: False) + def test_get_json_logs_path_failure(self, mock_isfile, mock_client): + with self.assertRaises(DockerutilsNoJsonLog): + utils.dockerutils.get_docker_container_json_logs_path('id') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_rootfs_not_supported_driver_failure( + self, mock_open, mock_client): + utils.dockerutils.driver = 'not_supported_driver' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('id') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=[open('tests/unit/proc_pid_mounts_devicemapper'), + open('tests/unit/proc_mounts_devicemapper')]) + def test_get_rootfs_devicemapper(self, mock_open, mock_client): + utils.dockerutils.driver = 'devicemapper' + assert utils.dockerutils.get_docker_container_rootfs_path( + 'id') == ("/var/lib/docker/devicemapper/mnt/" + "65fe676c24fe1faea1f06e222cc3811cc" + "9b651c381702ca4f787ffe562a5e39b/rootfs") + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_rootfs_devicemapper_failure(self, mock_open, mock_client): + utils.dockerutils.driver = 'devicemapper' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('id') + + @mock.patch('utils.dockerutils.misc.btrfs_list_subvolumes', + side_effect=lambda p: + [ + ('ID', '260', 'gen', '22', 'top', + 'level', '5', 'path', 'sub1/abcde'), + ('ID', '260', 'gen', '22', 'top', + 'level', '5', 'path', 'sub1/abcde/sub2'), + ] + ) + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_get_rootfs_btrfs_v1_8(self, mock_client, mock_list): + utils.dockerutils.driver = 'btrfs' + utils.dockerutils.server_version = '1.8.0' + assert utils.dockerutils.get_docker_container_rootfs_path( + 'abcde') == '/var/lib/docker/sub1/abcde' + + @mock.patch('utils.dockerutils.misc.btrfs_list_subvolumes', + side_effect=throw_runtime_error) + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_get_rootfs_btrfs_v1_8_failure(self, mock_client, mock_list): + utils.dockerutils.driver = 'btrfs' + utils.dockerutils.server_version = '1.8.0' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('abcde') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=[open('tests/unit/btrfs_mount_init-id')]) + def test_get_rootfs_btrfs_v1_10(self, mock_open, mock_client): + utils.dockerutils.driver = 'btrfs' + utils.dockerutils.server_version = '1.10.0' + assert utils.dockerutils.get_docker_container_rootfs_path( + 'id') == '/var/lib/docker/btrfs/subvolumes/vol1/id/rootfs-a-b-c' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_rootfs_btrfs_v1_10_failure(self, mock_open, mock_client): + utils.dockerutils.driver = 'btrfs' + utils.dockerutils.server_version = '1.10.0' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('abcde') + + @mock.patch('utils.dockerutils.os.path.isdir', + side_effect=lambda d: True) + @mock.patch('utils.dockerutils.os.listdir', + side_effect=lambda d: ['usr', 'boot', 'var']) + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_get_rootfs_aufs_v1_8(self, *args): + utils.dockerutils.driver = 'aufs' + utils.dockerutils.server_version = '1.8.0' + assert utils.dockerutils.get_docker_container_rootfs_path( + 'abcde') == '/var/lib/docker/aufs/mnt/abcde' + + @mock.patch('utils.dockerutils.os.path.isdir', + side_effect=lambda d: False) + @mock.patch('utils.dockerutils.os.listdir', + side_effect=lambda d: ['usr', 'boot', 'var']) + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + def test_get_rootfs_aufs_v1_8_failure(self, *args): + utils.dockerutils.driver = 'aufs' + utils.dockerutils.server_version = '1.8.0' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('abcde') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=[open('tests/unit/aufs_mount_init-id')]) + def test_get_rootfs_aufs_v1_10(self, *args): + utils.dockerutils.driver = 'aufs' + utils.dockerutils.server_version = '1.10.0' + assert utils.dockerutils.get_docker_container_rootfs_path( + 'abcde') == '/var/lib/docker/aufs/mnt/vol1/id/rootfs-a-b-c' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_rootfs_aufs_v1_10_failure(self, *args): + utils.dockerutils.driver = 'aufs' + utils.dockerutils.server_version = '1.10.0' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('abcde') + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=[open('tests/unit/vfs_mount_init-id')]) + def test_get_rootfs_vfs_v1_10(self, *args): + utils.dockerutils.driver = 'vfs' + utils.dockerutils.server_version = '1.10.0' + assert utils.dockerutils.get_docker_container_rootfs_path( + 'abcde') == '/var/lib/docker/vfs/dir/vol1/id/rootfs-a-b-c' + + @mock.patch('utils.dockerutils.docker.APIClient', + side_effect=lambda base_url, version: MockedClient()) + @mock.patch('utils.dockerutils.open', + side_effect=throw_io_error) + def test_get_rootfs_vfs_v1_10_failure(self, *args): + utils.dockerutils.driver = 'vfs' + utils.dockerutils.server_version = '1.10.0' + with self.assertRaises(DockerutilsException): + utils.dockerutils.get_docker_container_rootfs_path('abcde') diff --git a/tests/unit/test_emitter.py b/tests/unit/test_emitter.py new file mode 100644 index 00000000..c87a024a --- /dev/null +++ b/tests/unit/test_emitter.py @@ -0,0 +1,647 @@ +import cStringIO +import gzip +import unittest +import time +import os +import json + +import mock +import requests.exceptions +import plugins_manager + +from base_crawler import BaseFrame +from capturing import Capturing +from emitters_manager import EmittersManager +from plugins.emitters.file_emitter import FileEmitter +from plugins.emitters.base_http_emitter import BaseHttpEmitter +from plugins.emitters.http_emitter import HttpEmitter +from plugins.emitters.https_emitter import HttpsEmitter +from plugins.emitters.sas_emitter import SasEmitter +from plugins.emitters.kafka_emitter import KafkaEmitter +from plugins.emitters.mtgraphite_emitter import MtGraphiteEmitter +from plugins.emitters.fluentd_emitter import FluentdEmitter +from utils import crawler_exceptions + + +def mocked_formatter(frame): + iostream = cStringIO.StringIO() + iostream.write('namespace777.dummy-feature.test2 12345 14804\r\n') + iostream.write('namespace777.dummy-feature.test2 12345 14805\r\n') + return iostream + + +def mocked_formatter1(frame): + iostream = cStringIO.StringIO() + iostream.write('abc\r\n') + iostream.write('def\r\n') + return iostream + +def mocked_formatter2(frame): + iostream = cStringIO.StringIO() + metadata = {} + metadata["timestamp"] = "current-time" + metadata["namespace"] = "my/name" + metadata["features"] = "os,cpu,memory" + metadata["source_type"] = "container" + + iostream.write('%s\t%s\t%s\n' % + ('metadata', json.dumps('metadata'), + json.dumps(metadata, separators=(',', ':')))) + return iostream + +def mocked_get_sas_token(): + return ('sas-token', 'cloudoe', 'access-group') + +class RandomKafkaException(Exception): + pass + +def raise_value_error(*args, **kwargs): + raise ValueError() + +def mock_call_with_retries(function, max_retries=10, + exception_type=Exception, + _args=(), _kwargs={}): + return function(*_args, **_kwargs) + + +def mocked_requests_post(*args, **kwargs): + class MockResponse: + + def __init__(self, status_code): + self.status_code = status_code + self.text = 'blablableble' + + def json(self): + return self.json_data + if args[0] == 'http://1.1.1.1/good' or args[0] == 'https://1.1.1.1/good': + return MockResponse(status_code=200) + elif args[0] == 'http://1.1.1.1/bad' or args[0] == 'https://1.1.1.1/bad': + return MockResponse(status_code=500) + elif args[0] == 'http://1.1.1.1/exception' or args[0] == 'https://1.1.1.1/exception': + raise requests.exceptions.RequestException('bla') + elif args[0] == 'http://1.1.1.1/encoding_error' or args[0] == 'https://1.1.1.1/encoding_error': + raise requests.exceptions.ChunkedEncodingError('bla') + + +class MockProducer: + + def __init__(self): + self._produced = [] + + def produce(self, msgs=[]): + self._produced.extend(msgs) + + +def MockedKafkaConnect(self, broker, topic): + self.producer = MockProducer() + + +class MockedMTGraphiteClient: + + def __init__(self, url): + pass + + def send_messages(self, messages): + return 1 + + +class MockFluentdSender: + + def __init__(self): + self._emitted = dict() + + def emit_with_time(self, tag, timestamp, item): + self._emitted.update(item) + self.last_error = None + + def clear_last_error(): + pass + + +def mocked_fluentd_connect(self, host, port): + self.fluentd_sender = MockFluentdSender() + + +class EmitterTests(unittest.TestCase): + image_name = 'alpine:latest' + + def setUp(self): + plugins_manager.emitter_plugins = [] + pass + + def tearDown(self): + pass + + def _test_emitter_csv_simple_stdout(self, compress=False): + emitter = EmittersManager(urls=['stdout://'], + compress=compress) + frame = BaseFrame(feature_types=['os']) + frame.add_features([("dummy_feature", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame, 0) + + def test_emitter_csv_simple_stdout(self): + with Capturing() as _output: + self._test_emitter_csv_simple_stdout() + output = "%s" % _output + print _output + assert len(_output) == 2 + assert "dummy_feature" in output + assert "metadata" in output + + def test_emitter_csv_compressed_stdout(self): + with Capturing() as _output: + self._test_emitter_csv_simple_stdout(compress=True) + output = "%s" % _output + assert 'metadata' not in output + assert len(output) > 0 + + def test_emitter_csv_simple_file(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + compress=False) + frame = BaseFrame(feature_types=['os']) + frame.add_features([("dummy_feature", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame, 0) + with open('/tmp/test_emitter.0') as f: + _output = f.readlines() + output = "%s" % _output + print output + assert len(_output) == 2 + assert "dummy_feature" in output + assert "metadata" in output + + def test_emitter_all_features_compressed_csv(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + compress=True) + frame = BaseFrame(feature_types=[]) + frame.add_feature("memory", {'test3': 12345}, 'memory') + frame.add_feature("memory_0", {'test3': 12345}, 'memory') + frame.add_feature("load", {'load': 12345}, 'load') + frame.add_feature("cpu", {'test3': 12345}, 'cpu') + frame.add_feature("cpu_0", {'test3': 12345}, 'cpu') + frame.add_feature("eth0", {'if_tx': 12345}, 'interface') + frame.add_feature("eth0", {'if_rx': 12345}, 'interface') + frame.add_feature("bla/bla", {'ble/ble': 12345}, 'disk') + emitter.emit(frame, 0) + with gzip.open('/tmp/test_emitter.0.gz') as f: + _output = f.readlines() + output = "%s" % _output + print output + assert len(_output) == 9 + assert "metadata" in output + + def test_emitter_all_features_csv(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter']) + frame = BaseFrame(feature_types=[]) + frame.add_feature("memory", {'test3': 12345}, 'memory') + frame.add_feature("memory_0", {'test3': 12345}, 'memory') + frame.add_feature("load", {'load': 12345}, 'load') + frame.add_feature("cpu", {'test3': 12345}, 'cpu') + frame.add_feature("cpu_0", {'test3': 12345}, 'cpu') + frame.add_feature("eth0", {'if_tx': 12345}, 'interface') + frame.add_feature("eth0", {'if_rx': 12345}, 'interface') + frame.add_feature("bla/bla", {'ble/ble': 12345}, 'disk') + emitter.emit(frame, 0) + with open('/tmp/test_emitter.0') as f: + _output = f.readlines() + output = "%s" % _output + print output + assert len(_output) == 9 + assert "metadata" in output + + def test_emitter_all_features_graphite(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + format='graphite') + frame = BaseFrame(feature_types=[]) + frame.add_feature("memory", {'test3': 12345}, 'memory') + frame.add_feature("memory_0", {'test3': 12345}, 'memory') + frame.add_feature("load", {'load': 12345}, 'load') + frame.add_feature("cpu", {'test3': 12345}, 'cpu') + frame.add_feature("cpu_0", {'test3': 12345}, 'cpu') + frame.add_feature("eth0", {'if_tx': 12345}, 'interface') + frame.add_feature("eth0", {'if_rx': 12345}, 'interface') + frame.add_feature("bla/bla", {'ble/ble': 12345}, 'disk') + emitter.emit(frame, 0) + with open('/tmp/test_emitter.0') as f: + _output = f.readlines() + output = "%s" % _output + print output + assert 'memory-0.test3 12345' in output + assert len(_output) == 8 + + def _test_emitter_graphite_simple_stdout(self): + emitter = EmittersManager(urls=['stdout://'], + format='graphite') + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.add_features([("dummy_feature", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame, 0) + + def test_emitter_graphite_simple_stdout(self): + with Capturing() as _output: + self._test_emitter_graphite_simple_stdout() + output = "%s" % _output + # should look like this: + # ['namespace777.dummy-feature.test3 3.000000 1449870719', + # 'namespace777.dummy-feature.test2 2.000000 1449870719', + # 'namespace777.dummy-feature.test4 4.000000 1449870719'] + assert len(_output) == 3 + assert "dummy_feature" not in output # can't have '_' + assert "dummy-feature" in output # can't have '_' + assert "metadata" not in output + assert 'namespace777.dummy-feature.test2' in output + assert 'namespace777.dummy-feature.test3' in output + assert 'namespace777.dummy-feature.test4' in output + # three fields in graphite format + assert len(_output[0].split(' ')) == 3 + # three fields in graphite format + assert len(_output[1].split(' ')) == 3 + # three fields in graphite format + assert len(_output[2].split(' ')) == 3 + assert float(_output[0].split(' ')[1]) == 12345.0 + assert float(_output[1].split(' ')[1]) == 12345.0 + assert float(_output[2].split(' ')[1]) == 12345.0 + + def test_emitter_unsupported_format(self): + metadata = {} + metadata['namespace'] = 'namespace777' + with self.assertRaises( + crawler_exceptions.EmitterUnsupportedFormat): + _ = EmittersManager(urls=['file:///tmp/test_emitter'], + format='unsupported') + + @mock.patch('plugins.emitters.file_emitter.FileEmitter.emit', + side_effect=raise_value_error) + def _test_emitter_failed_emit(self, *args): + with self.assertRaises(ValueError): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + format='csv') + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.add_feature("memory", {'test3': 12345}, 'memory') + emitter.emit(frame) + + def test_emitter_unsuported_protocol(self): + with self.assertRaises( + crawler_exceptions.EmitterUnsupportedProtocol): + _ = EmittersManager(urls=['error:///tmp/test_emitter'], + format='graphite') + + def test_emitter_graphite_simple_file(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + format='graphite') + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.add_features([("dummy_feature", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame) + with open('/tmp/test_emitter.0') as f: + _output = f.readlines() + output = "%s" % _output + # should look like this: + # ['namespace777.dummy-feature.test3 3.000000 1449870719', + # 'namespace777.dummy-feature.test2 2.000000 1449870719', + # 'namespace777.dummy-feature.test4 4.000000 1449870719'] + assert len(_output) == 3 + assert "dummy_feature" not in output # can't have '_' + assert "dummy-feature" in output # can't have '_' + assert "metadata" not in output + assert 'namespace777.dummy-feature.test2' in output + assert 'namespace777.dummy-feature.test3' in output + assert 'namespace777.dummy-feature.test4' in output + # three fields in graphite format + assert len(_output[0].split(' ')) == 3 + # three fields in graphite format + assert len(_output[1].split(' ')) == 3 + # three fields in graphite format + assert len(_output[2].split(' ')) == 3 + assert float(_output[0].split(' ')[1]) == 12345.0 + assert float(_output[1].split(' ')[1]) == 12345.0 + assert float(_output[2].split(' ')[1]) == 12345.0 + + def test_emitter_json_simple_file(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + format='json') + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.add_features([("dummy_feature", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame) + with open('/tmp/test_emitter.0') as f: + _output = f.readlines() + output = "%s" % _output + print output + assert len(_output) == 2 + assert "metadata" not in output + assert ( + '{"test3": 12345.0, "test2": 12345, "test4": 12345.0, ' + '"namespace": "namespace777", "test": "bla", "feature_type": ' + '"dummy_feature"}') in output + + def test_emitter_graphite_simple_compressed_file(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + format='graphite', + compress=True) + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.add_features([("dummy_feature", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame) + with gzip.open('/tmp/test_emitter.0.gz') as f: + _output = f.readlines() + output = "%s" % _output + # should look like this: + # ['namespace777.dummy-feature.test3 3.000000 1449870719', + # 'namespace777.dummy-feature.test2 2.000000 1449870719', + # 'namespace777.dummy-feature.test4 4.000000 1449870719'] + assert len(_output) == 3 + assert "dummy_feature" not in output # can't have '_' + assert "dummy-feature" in output # can't have '_' + assert "metadata" not in output + assert 'namespace777.dummy-feature.test2' in output + assert 'namespace777.dummy-feature.test3' in output + assert 'namespace777.dummy-feature.test4' in output + # three fields in graphite format + assert len(_output[0].split(' ')) == 3 + # three fields in graphite format + assert len(_output[1].split(' ')) == 3 + # three fields in graphite format + assert len(_output[2].split(' ')) == 3 + assert float(_output[0].split(' ')[1]) == 12345.0 + assert float(_output[1].split(' ')[1]) == 12345.0 + assert float(_output[2].split(' ')[1]) == 12345.0 + + def test_emitter_base_http(self): + emitter = BaseHttpEmitter() + self.assertRaises(NotImplementedError, emitter.get_emitter_protocol) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_http(self, mock_sleep, mock_post, mock_format): + emitter = HttpEmitter() + emitter.init(url='http://1.1.1.1/good') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 1) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_http_server_error(self, mock_sleep, mock_post, mock_format): + emitter = HttpEmitter() + emitter.init(url='http://1.1.1.1/bad') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 5) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_http_request_exception(self, mock_sleep, mock_post, mock_format): + emitter = HttpEmitter() + emitter.init(url='http://1.1.1.1/exception') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 5) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + def test_emitter_http_encoding_error(self, mock_post, mock_format): + emitter = HttpEmitter() + emitter.init(url='http://1.1.1.1/encoding_error') + emitter.emit('frame') + # there are no retries for encoding errors + self.assertEqual(mock_post.call_count, 1) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_https(self, mock_sleep, mock_post, mock_format): + emitter = HttpsEmitter() + emitter.init(url='https://1.1.1.1/good') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 1) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_https_server_error(self, mock_sleep, mock_post, mock_format): + emitter = HttpsEmitter() + emitter.init(url='https://1.1.1.1/bad') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 5) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_https_request_exception(self, mock_sleep, mock_post, mock_format): + emitter = HttpsEmitter() + emitter.init(url='https://1.1.1.1/exception') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 5) + + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter) + @mock.patch('plugins.emitters.base_http_emitter.requests.post', + side_effect=mocked_requests_post) + def test_emitter_https_encoding_error(self, mock_post, mock_format): + emitter = HttpsEmitter() + emitter.init(url='https://1.1.1.1/encoding_error') + emitter.emit('frame') + # there are no retries for encoding errors + self.assertEqual(mock_post.call_count, 1) + + @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', + side_effect=mocked_get_sas_token) + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter2) + @mock.patch('plugins.emitters.sas_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_sas(self, mock_sleep, mock_post, mock_format, mock_get_sas_token): + #env = SasEnvironment() + emitter = SasEmitter() + emitter.init(url='sas://1.1.1.1/good') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 1) + + @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', + side_effect=mocked_get_sas_token) + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter2) + @mock.patch('plugins.emitters.sas_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_sas_server_error(self, mock_sleep, mock_post, mock_format, mock_get_sas_token): + #env = SasEnvironment() + emitter = SasEmitter() + emitter.init(url='sas://1.1.1.1/bad') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 5) + + @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', + side_effect=mocked_get_sas_token) + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter2) + @mock.patch('plugins.emitters.sas_emitter.requests.post', + side_effect=mocked_requests_post) + @mock.patch('plugins.emitters.base_http_emitter.time.sleep') + def test_emitter_sas_request_exception(self, mock_sleep, mock_post, mock_format, mock_get_sas_token): + #env = SasEnvironment() + emitter = SasEmitter() + emitter.init(url='sas://1.1.1.1/exception') + emitter.emit('frame') + self.assertEqual(mock_post.call_count, 5) + + @mock.patch('plugins.emitters.sas_emitter.SasEmitter.get_sas_tokens', + side_effect=mocked_get_sas_token) + @mock.patch('iemit_plugin.IEmitter.format', + side_effect=mocked_formatter2) + @mock.patch('plugins.emitters.sas_emitter.requests.post', + side_effect=mocked_requests_post) + def test_emitter_sas_encoding_error(self, mock_post, mock_format, mocked_get_sas_token): + #env = SasEnvironment() + emitter = SasEmitter() + emitter.init(url='sas://1.1.1.1/encoding_error') + emitter.emit('frame') + # there are no retries for encoding errors + self.assertEqual(mock_post.call_count, 1) + + @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.connect_to_broker', + side_effect=MockedKafkaConnect, autospec=True) + @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.format', + side_effect=mocked_formatter1) + def test_emitter_kafka(self, *args): + emitter = KafkaEmitter() + emitter.init(url='kafka://1.1.1.1:123/topic1') + emitter.emit('frame') + assert emitter.producer._produced == ['abc\r\ndef\r\n'] + + @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.connect_to_broker', + side_effect=MockedKafkaConnect, autospec=True) + @mock.patch('plugins.emitters.kafka_emitter.KafkaEmitter.format', + side_effect=mocked_formatter1) + def test_emitter_kafka_one_per_line(self, *args): + emitter = KafkaEmitter() + emitter.init(url='kafka://1.1.1.1:123/topic1') + emitter.emit_per_line = True + emitter.emit('frame') + assert set(emitter.producer._produced) == set(['abc\r\n', 'def\r\n']) + + @mock.patch('plugins.emitters.mtgraphite_emitter.MTGraphiteClient', + side_effect=MockedMTGraphiteClient, autospec=True) + @mock.patch('plugins.emitters.mtgraphite_emitter.MtGraphiteEmitter.format', + side_effect=mocked_formatter) + def test_emitter_mtgraphite(self, MockMTGraphiteClient, mocked_formatter): + emitter = MtGraphiteEmitter() + emitter.init(url='mtgraphite://1.1.1.1:123/topic1', + max_retries=0) + emitter.emit('frame') + assert MockMTGraphiteClient.call_count == 1 + + @mock.patch('plugins.emitters.fluentd_emitter.FluentdEmitter.connect_to_fluentd_engine', + side_effect=mocked_fluentd_connect, autospec=True) + def test_emitter_fluentd_one_per_line(self, *args): + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.metadata['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S%z') + frame.add_features([("dummy_feature_key", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature_type')]) + emitter = FluentdEmitter() + emitter.init(url='fluentd://1.1.1.1:123', emit_format='json') + emitter.emit_per_line = True + emitter.emit(frame) + emitted_json = emitter.fluentd_sender._emitted + assert emitted_json["feature_key"] == "dummy_feature_key" + assert emitted_json["feature_type"] == "dummy_feature_type" + assert emitted_json["feature_val"] == {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000} + + @mock.patch('plugins.emitters.fluentd_emitter.FluentdEmitter.connect_to_fluentd_engine', + side_effect=mocked_fluentd_connect, autospec=True) + def test_emitter_fluentd(self, *args): + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.metadata['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S%z') + frame.add_features([("dummy_feature_key", + {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature_type')]) + emitter = FluentdEmitter() + emitter.init(url='fluentd://1.1.1.1:123', emit_format='json') + emitter.emit_per_line = False + emitter.emit(frame) + emitted_json = emitter.fluentd_sender._emitted + print emitted_json + assert emitted_json["feature1"]["feature_key"] == "dummy_feature_key" + assert emitted_json["feature1"]["feature_type"] == "dummy_feature_type" + assert emitted_json["feature1"]["feature_val"] == {'test': 'bla', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000} + + def test_emitter_logstash_simple_file(self): + emitter = EmittersManager(urls=['file:///tmp/test_emitter'], + format='logstash') + frame = BaseFrame(feature_types=[]) + frame.metadata['namespace'] = 'namespace777' + frame.add_features([("dummy_feature", + {'test': 'dummy', + 'test2': 12345, + 'test3': 12345.0, + 'test4': 12345.00000}, + 'dummy_feature')]) + emitter.emit(frame) + import json + with open('/tmp/test_emitter.0') as f: + output = json.load(f) + assert len(output) == 2 + assert 'metadata' in output + assert 'dummy_feature' in output + assert type(output.get('dummy_feature')) == dict diff --git a/tests/unit/test_gpu_plugin.py b/tests/unit/test_gpu_plugin.py new file mode 100644 index 00000000..89bf4b3f --- /dev/null +++ b/tests/unit/test_gpu_plugin.py @@ -0,0 +1,34 @@ +import unittest +import sys +import mock +sys.path.append('tests/unit/') +sys.modules['pynvml'] = __import__('mock_pynvml') +from plugins.systems.gpu_host_crawler import GPUHostCrawler + +class GPUPluginTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch( + 'plugins.systems.gpu_host_crawler.get_host_ipaddr', + side_effect=lambda: "127.0.0.1") + @mock.patch( + 'plugins.systems.gpu_host_crawler.GPUHostCrawler._load_nvidia_lib', + side_effect=lambda: 1) + def test_os_gpu_host_crawler_plugin(self, *args): + fc = GPUHostCrawler() + for gpu_metrics in fc.crawl(): + print gpu_metrics + assert gpu_metrics == ( + '127/0/0/1.gpu0.NA', + { + "memory": {"total": 12205, "used": 0, "free": 12205}, + "temperature": 31, + "power": {"draw": 27, "limit": 149}, + "utilization": {"gpu": 0, "memory": 0} + }, + 'gpu') diff --git a/tests/unit/test_host_crawler.py b/tests/unit/test_host_crawler.py new file mode 100644 index 00000000..29fc1eb1 --- /dev/null +++ b/tests/unit/test_host_crawler.py @@ -0,0 +1,73 @@ +import mock +import unittest +from host_crawler import HostCrawler + + +class MockedOSCrawler: + + def crawl(self, **kwargs): + return [('linux', {'os': 'some_os'}, 'os')] + + +class MockedCPUCrawler: + + def crawl(self, **kwargs): + return [('cpu-0', {'used': 100}, 'cpu')] + + +class MockedOSCrawlerFailure: + + def crawl(self, **kwargs): + raise OSError('some exception') + + +class HostCrawlerTests(unittest.TestCase): + + @mock.patch( + 'host_crawler.plugins_manager.get_host_crawl_plugins', + side_effect=lambda features: [(MockedOSCrawler(), {}), + (MockedCPUCrawler(), {})]) + def test_host_crawler(self, *args): + crawler = HostCrawler(features=['os', 'cpu'], namespace='localhost') + frames = list(crawler.crawl()) + namespaces = [f.metadata['namespace'] for f in frames] + assert namespaces == ['localhost'] + features_count = [f.num_features for f in frames] + assert features_count == [2] + system_types = [f.metadata['system_type'] for f in frames] + assert system_types == ['host'] + assert args[0].call_count == 1 + + @mock.patch( + 'host_crawler.plugins_manager.get_host_crawl_plugins', + side_effect=lambda features: [(MockedOSCrawlerFailure(), {}), + (MockedCPUCrawler(), {})]) + def test_failed_host_crawler(self, *args): + crawler = HostCrawler(features=['os', 'cpu'], namespace='localhost') + with self.assertRaises(OSError): + frames = list(crawler.crawl(ignore_plugin_exception=False)) + assert args[0].call_count == 1 + + @mock.patch( + 'host_crawler.plugins_manager.get_host_crawl_plugins', + side_effect=lambda features: [(MockedCPUCrawler(), {}), + (MockedOSCrawlerFailure(), {}), + (MockedCPUCrawler(), {})]) + def test_failed_host_crawler_with_ignore_failure(self, *args): + crawler = HostCrawler( + features=[ + 'cpu', + 'os', + 'cpu'], + namespace='localhost') + frames = list(crawler.crawl()) + namespaces = sorted([f.metadata['namespace'] for f in frames]) + assert namespaces == sorted(['localhost']) + features_count = [f.num_features for f in frames] + assert features_count == [2] + system_types = [f.metadata['system_type'] for f in frames] + assert system_types == ['host'] + assert args[0].call_count == 1 + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/test_jar_plugin.py b/tests/unit/test_jar_plugin.py new file mode 100644 index 00000000..19c6adc8 --- /dev/null +++ b/tests/unit/test_jar_plugin.py @@ -0,0 +1,56 @@ +import unittest + +import os +import sys +import tempfile +from zipfile import ZipFile, ZipInfo + +from utils import jar_utils +from utils.features import JarFeature + +# +# https://security.openstack.org/guidelines/dg_using-temporary-files-securely.html +# + +sys.path.append('tests/unit/') +from plugins.systems.jar_host_crawler import JarHostCrawler + + +class JarHashesPluginTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_jar_host_crawler_plugin(self, *args): + tmpdir = tempfile.mkdtemp() + jar_file_name = 'myfile.jar' + + # Ensure the file is read/write by the creator only + saved_umask = os.umask(0077) + + path = os.path.join(tmpdir, jar_file_name) + try: + with ZipFile(path, "w") as myjar: + myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") + myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") + myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") + + fc = JarHostCrawler() + jars = list(fc.crawl(root_dir=tmpdir)) + #jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir)) + print jars + jar_feature = jars[0][1] + assert 'myfile.jar' == jar_feature.name + assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash + assert ['ddc6eff37020aa858e26b1ba8a49ee0e', + 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes + assert 'jar' == jars[0][2] + + except IOError as e: + print 'IOError' + finally: + os.remove(path) + os.umask(saved_umask) diff --git a/tests/unit/test_jar_utils.py b/tests/unit/test_jar_utils.py new file mode 100644 index 00000000..db121962 --- /dev/null +++ b/tests/unit/test_jar_utils.py @@ -0,0 +1,50 @@ +import unittest + +import os +import tempfile +from zipfile import ZipFile, ZipInfo + +from utils import jar_utils +from utils.features import JarFeature + +# +# https://security.openstack.org/guidelines/dg_using-temporary-files-securely.html +# + +class JarUtilsTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_get_jar_features(self): + tmpdir = tempfile.mkdtemp() + jar_file_name = 'myfile.jar' + + # Ensure the file is read/write by the creator only + saved_umask = os.umask(0077) + + path = os.path.join(tmpdir, jar_file_name) + try: + with ZipFile(path, "w") as myjar: + myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") + myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") + myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") + + jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir)) + print jars + jar_feature = jars[0][1] + assert 'myfile.jar' == jar_feature.name + assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash + assert ['ddc6eff37020aa858e26b1ba8a49ee0e', + 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes + assert 'jar' == jars[0][2] + + except IOError as e: + print 'IOError' + finally: + os.remove(path) + os.umask(saved_umask) + os.rmdir(tmpdir) diff --git a/tests/unit/test_mesos_url.py b/tests/unit/test_mesos_url.py new file mode 100644 index 00000000..4ca572c7 --- /dev/null +++ b/tests/unit/test_mesos_url.py @@ -0,0 +1,18 @@ +# test.py +from mock import patch, Mock + +from utils.mesos import fetch_stats + + +@patch('utils.mesos.urllib2.urlopen') +def mytest(mock_urlopen): + a = Mock() + a.read.side_effect = ['{}', None] + mock_urlopen.return_value = a + res = fetch_stats("0.22.0") + print res + if res is None: + assert res + + +mytest() diff --git a/tests/unit/test_misc.py b/tests/unit/test_misc.py new file mode 100644 index 00000000..66864ba4 --- /dev/null +++ b/tests/unit/test_misc.py @@ -0,0 +1,147 @@ +import os +import socket +import unittest + +import mock + +import utils.misc + + +class MockedSocket1(): + + def __init__(self, a, b): + print a, b + pass + + def connect(self, dest): + pass + + def getsockname(self): + return ['1.2.3.4'] + + +class MockedSocket2(): + + def __init__(self, a, b): + print a, b + pass + + def connect(self, dest): + pass + + def getsockname(self): + raise socket.error() + + def gethostname(self): + return '1.2.3.4' + + +class MiscTests(unittest.TestCase): + + def test_find_mount_point(self, tmpdir='/'): + assert utils.misc.find_mount_point(str(tmpdir)) == '/' + + def test_subprocess_run(self): + assert utils.misc.subprocess_run( + 'echo abc', shell=True).strip() == 'abc' + assert utils.misc.subprocess_run('exit 0', shell=True).strip() == '' + with self.assertRaises(RuntimeError): + utils.misc.subprocess_run('exit 1', shell=True) + with self.assertRaises(RuntimeError): + # There should not be a /a/b/c/d/e file + utils.misc.subprocess_run('/a/b/c/d/e', shell=False) + + @mock.patch('utils.misc.open') + def test_get_process_env(self, mock_open): + mock_open.return_value = open('tests/unit/mock_environ_file') + env = utils.misc.get_process_env(pid=os.getpid()) + assert 'HOME' in env + with self.assertRaises(TypeError): + utils.misc.get_process_env('asdf') + + def test_process_is_crawler(self): + assert utils.misc.process_is_crawler(os.getpid()) + assert utils.misc.process_is_crawler(1) is False + # make sure 1123... does not actually exist + assert utils.misc.process_is_crawler(1123234325123235) is False + with self.assertRaises(TypeError): + utils.misc.process_is_crawler('asdf') + + def test_get_host_ip4_addresses(self): + assert '127.0.0.1' in utils.misc.get_host_ip4_addresses() + + def test_is_process_running(self): + assert utils.misc.is_process_running(os.getpid()) + assert utils.misc.is_process_running(1) + # make sure 1123... does not actually exist + assert utils.misc.is_process_running(1123234325) is False + with self.assertRaises(TypeError): + utils.misc.is_process_running('asdf') + + @mock.patch('utils.misc.socket.socket', side_effect=MockedSocket1) + def test_get_host_ipaddr1(self, mock_socket): + assert utils.misc.get_host_ipaddr() == '1.2.3.4' + + @mock.patch('utils.misc.socket.socket', side_effect=MockedSocket2) + @mock.patch('utils.misc.socket.gethostname', + side_effect=lambda: '4.3.2.1') + def test_get_host_ipaddr2(self, *args): + assert utils.misc.get_host_ipaddr() == '4.3.2.1' + + def test_execution_path(self): + assert utils.misc.execution_path('abc').endswith('/abc') + + # XXX this is more of a functional test + def test_btrfs_list_subvolumes(self): + # we either have it installed and it will raise a RuntimeError because + # the path provided does not exist or it is not and it will raise a + # RuntimeError. + with self.assertRaises(RuntimeError): + for submodule in utils.misc.btrfs_list_subvolumes('asd'): + pass + + @mock.patch('utils.misc.subprocess_run') + def test_btrfs_list_subvolumes_with_list(self, mock_run): + mock_run.return_value = ( + ("ID 257 gen 7 top level 5 path btrfs/subvolumes/a60a763cbaaedd3ac" + "2b77bff939019fda876d8a187cb7e85789bb36377accbce\n" + "ID 258 gen 8 top level 5 path btrfs/subvolumes/9212798f648314583" + "9c72f06a6bc2b0e456ca2b9ec14ea70e2948f098ce51077\n" + "ID 278 gen 1908 top level 5 path btrfs/subvolumes/7cd6c219c63e02" + "82ddbd8437c9b2a0220aff40bbfd6734503bcd58e5afa28426\n")) + + assert list( + utils.misc.btrfs_list_subvolumes('asd')) == [ + [ + 'ID', + '257', + 'gen', + '7', + 'top', + 'level', + '5', + 'path', + ("btrfs/subvolumes/a60a763cbaaedd3ac2b77bff939019fda876d8a187c" + "b7e85789bb36377accbce")], + [ + 'ID', + '258', + 'gen', + '8', + 'top', + 'level', + '5', + 'path', + ("btrfs/subvolumes/9212798f6483145839c72f06a6bc2b0e456ca2b9ec1" + "4ea70e2948f098ce51077")], + [ + 'ID', + '278', + 'gen', + '1908', + 'top', + 'level', + '5', + 'path', + ("btrfs/subvolumes/7cd6c219c63e0282ddbd8437c9b2a0220aff40bbfd6" + "734503bcd58e5afa28426")]] diff --git a/tests/unit/test_mtgraphite.py b/tests/unit/test_mtgraphite.py new file mode 100644 index 00000000..a19e54fd --- /dev/null +++ b/tests/unit/test_mtgraphite.py @@ -0,0 +1,164 @@ +import unittest + +import mock + +from utils.crawler_exceptions import MTGraphiteInvalidTenant +from utils.mtgraphite import MTGraphiteClient + + +class MockedSocket: + + def settimeout(self, n): + pass + + def write(self, str): + return len(str) + + +class MockedConnection: + + def __init__(self): + print 'init mocked connection' + + def connect(self, *args): + pass + + def getsockname(self): + return ['host'] + + def close(self): + pass + + def write(self, str): + return len(str) + + def read(self, n): + return '1A' * n + + +class MockedConnectionBadPassword: + + def __init__(self): + print 'init mocked connection' + + def connect(self, *args): + pass + + def getsockname(self): + return ['host'] + + def close(self): + pass + + def write(self, str): + return len(str) + + def read(self, n): + return '0A' * n # bad password + + +class MTGraphiteTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) + def test_init(self, *args): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler:password', + batch_send_every_t=1, + batch_send_every_n=10) + assert not mt.conn + assert not mt.socket + assert mt.next_timeout == 1001 + assert mt.host == '2.2.2.2' + assert mt.port == '123' + assert mt.tenant == 'crawler' + assert mt.password == 'password' + args[0].assert_called() + + @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) + def test_init_bad_urls(self, *args): + + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/:password') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2/crawler') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://2.2.2.2/crawler:password') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://:234/crawler:password') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('mtgraphite://') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('http://1.2.3.4:234/crawler:password') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('host.com:234/crawler:password') + with self.assertRaises(ValueError): + mt = MTGraphiteClient('host') + mt = MTGraphiteClient('mtgraphite://host.com:234/crawler:password') + assert mt + + @mock.patch('utils.mtgraphite.time.sleep') + @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) + @mock.patch('utils.mtgraphite.socket.socket', + side_effect=lambda a, b: MockedSocket()) + @mock.patch('utils.mtgraphite.ssl.wrap_socket', + side_effect=lambda s, cert_reqs: MockedConnection()) + def test_send(self, *args): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler:password', + batch_send_every_t=1000, + batch_send_every_n=3) + assert mt.next_timeout == 2000 + + with self.assertRaises(TypeError): + mt.send_messages(1) + + m1 = mt.construct_message('space', 'group', 'cpu', 100, 1) + m2 = mt.construct_message('space', 'group', 'cpu', 100, 2) + + with self.assertRaises(TypeError): + mt.send_messages(m1) + + # we will not send anything yet as send_every_n is 3 + mt.send_messages([m1, m2]) + assert mt.msgset == [m1, m2] + + # now we should send something + m3 = mt.construct_message('space', 'group', 'cpu', 100, 3) + mt.send_messages([m3]) + assert mt.msgset == [] + + mt.close() + assert mt.conn is None + + @mock.patch('utils.mtgraphite.time.sleep') + @mock.patch('utils.mtgraphite.time.time', side_effect=lambda: 1000) + @mock.patch('utils.mtgraphite.socket.socket', + side_effect=lambda a, b: MockedSocket()) + @mock.patch('utils.mtgraphite.ssl.wrap_socket', + side_effect=lambda s, cert_reqs: MockedConnectionBadPassword()) + def test_send_bad_password(self, *args): + mt = MTGraphiteClient('mtgraphite://2.2.2.2:123/crawler:password', + batch_send_every_t=1000, + batch_send_every_n=3) + assert mt.next_timeout == 2000 + + m1 = mt.construct_message('space', 'group', 'cpu', 100, 1) + m2 = mt.construct_message('space', 'group', 'cpu', 100, 2) + m3 = mt.construct_message('space', 'group', 'cpu', 100, 3) + + with self.assertRaises(MTGraphiteInvalidTenant): + mt.send_messages([m1, m2, m3]) + + assert mt.msgset == [m1, m2, m3] diff --git a/tests/unit/test_namespace.py b/tests/unit/test_namespace.py new file mode 100644 index 00000000..5fa4813b --- /dev/null +++ b/tests/unit/test_namespace.py @@ -0,0 +1,255 @@ +import Queue +import time +import unittest +from collections import namedtuple + +import mock + +import utils.namespace +from utils import crawler_exceptions + +os_stat = namedtuple( + 'os_stat', + '''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size st_ino''') + + +def throw_os_error(*args, **kvargs): + raise OSError() + + +def fun_add(x=0): + return x + 1 + + +def fun_not_exiting(x=0): + yield 1 + while True: + time.sleep(1) + + +def fun_failed(x=0): + assert False + + +class MockedLibc: + + def __init__(self): + pass + + def setns(self, namespaces, mode): + pass + + def open(self, path, mode): + return 1 + + def close(self, fd): + pass + + def prctl(self, *args): + print args + + +class MockedLibcNoSetns: + + def __init__(self): + pass + + def syscall(self, syscall_num, namespaces, mode): + return 1 + + def open(self, path, mode): + return 1 + + def close(self, fd): + pass + + def prctl(self, *args): + print args + + +class MockedLibcFailedOpen: + + def __init__(self): + pass + + def setns(self, namespaces, mode): + pass + + def open(self, path, mode): + return -1 + + def close(self, fd): + pass + + def prctl(self, *args): + print args + + +class MockedLibcFailedSetns: + + def __init__(self): + pass + + def setns(self, namespaces, mode): + return -1 + + def open(self, path, mode): + return 1 + + def close(self, fd): + pass + + def prctl(self, *args): + print args + + +class MockedLibcFailedClose: + + def __init__(self): + pass + + def setns(self, namespaces, mode): + pass + + def open(self, path, mode): + return 1 + + def close(self, fd): + return -1 + + def prctl(self, *args): + print args + + +class MockedQueue: + + def __init__(self, *args): + pass + + def get(self, timeout=None): + return (123, None) + + def put(self, item): + pass + + def close(self): + pass + + +class MockedQueueGetTimeout: + + def __init__(self, *args): + pass + + def get(self, timeout=None): + if timeout: + raise Queue.Empty() + + def put(self, item): + pass + + def close(self): + pass + + +class NamespaceTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('utils.namespace.os.stat', + side_effect=lambda p: os_stat(1, 2, 3, 4, 5, 6, 7, 8)) + def test_pid_namespace(self, *args): + assert utils.namespace.get_pid_namespace(1) == 8 + + @mock.patch('utils.namespace.os.stat', + side_effect=throw_os_error) + def test_pid_namespace_no_process(self, *args): + assert utils.namespace.get_pid_namespace(1) is None + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibc()) + def test_run_as_another_namespace(self, *args): + assert utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) == 2 + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibcFailedOpen()) + def test_run_as_another_namespace_failed_mnt_open(self, *args): + with self.assertRaises( + crawler_exceptions.NamespaceFailedSetns): + utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibcFailedOpen()) + def test_run_as_another_namespace_failed_non_mnt_open(self, *args): + with self.assertRaises( + crawler_exceptions.NamespaceFailedSetns): + utils.namespace.run_as_another_namespace( + '1', ['pid', 'net'], fun_add, 1) + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibcFailedSetns()) + def test_run_as_another_namespace_failed_setns(self, *args): + with self.assertRaises(crawler_exceptions.NamespaceFailedSetns): + utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibcFailedSetns()) + def test_run_as_another_namespace_failed_non_mnt_setns(self, *args): + with self.assertRaises(crawler_exceptions.NamespaceFailedSetns): + utils.namespace.run_as_another_namespace( + '1', ['pid', 'net'], fun_add, 1) + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibcFailedClose()) + def test_run_as_another_namespace_failed_close(self, *args): + assert utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) == 2 + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibcNoSetns()) + def test_run_as_another_namespace_no_setns(self, *args): + assert utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) == 2 + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibc()) + def test_run_as_another_namespace_failed_fun(self, *args): + with self.assertRaises(AssertionError): + utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_failed, 1) + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibc()) + @mock.patch('utils.namespace.multiprocessing.Queue', + side_effect=MockedQueue) + def test_run_as_another_namespace_with_mocked_queue(self, *args): + assert utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_failed, 1) == 123 + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibc()) + @mock.patch('utils.namespace.multiprocessing.Queue', + side_effect=MockedQueueGetTimeout) + def test_run_as_another_namespace_get_timeout(self, *args): + with self.assertRaises(crawler_exceptions.CrawlTimeoutError): + utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_add, 1) + + @mock.patch('utils.namespace.get_libc', + side_effect=lambda: MockedLibc()) + @mock.patch('utils.namespace.multiprocessing.Queue', + side_effect=MockedQueue) + def test_run_as_another_namespace_fun_not_exiting_failure(self, *args): + _old_timeout = utils.namespace.IN_PROCESS_TIMEOUT + utils.namespace.IN_PROCESS_TIMEOUT = 0 + with self.assertRaises(crawler_exceptions.CrawlTimeoutError): + utils.namespace.run_as_another_namespace( + '1', utils.namespace.ALL_NAMESPACES, fun_not_exiting, 1) + utils.namespace.IN_PROCESS_TIMEOUT = _old_timeout diff --git a/tests/unit/test_osinfo.py b/tests/unit/test_osinfo.py new file mode 100644 index 00000000..0eab2bb5 --- /dev/null +++ b/tests/unit/test_osinfo.py @@ -0,0 +1,155 @@ +import unittest +from unittest import TestCase + +import mock + +from utils.osinfo import (_get_file_name, + parse_lsb_release, + parse_os_release, + parse_redhat_release, + parse_centos_release, + get_osinfo_from_lsb_release, + get_osinfo_from_os_release, + get_osinfo_from_redhat_centos + ) + + +class Test_osinfo(TestCase): + + def test_get_file_name(self): + self.assertEqual(_get_file_name('/', 'xyz'), '/xyz') + self.assertEqual(_get_file_name('/abc/def', 'xyz'), '/abc/def/xyz') + + def test_parse_lsb_release(self): + data = ['DISTRIB_ID=Ubuntu', 'DISTRIB_RELEASE=15.10', + 'DISTRIB_CODENAME=wily' 'DISTRIB_DESCRIPTION="Ubuntu 15.10"'] + result = parse_lsb_release(data) + + self.assertEqual(result['os'], 'ubuntu') + self.assertEqual(result['version'], '15.10') + + def test_parse_os_release(self): + data = ['NAME="Ubuntu"', 'VERSION="14.04.4 LTS, Trusty Tahr"', + 'ID=ubuntu', 'ID_LIKE=debian', + 'PRETTY_NAME="Ubuntu 14.04.4 LTS"', 'VERSION_ID="14.04"', + 'HOME_URL="http://www.ubuntu.com/"', + 'SUPPORT_URL="http://help.ubuntu.com/"', + 'BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"' + ] + result = parse_os_release(data) + self.assertEqual(result['os'], 'ubuntu') + self.assertEqual(result['version'], '14.04') + + def test_alpine_parse_os_release(self): + data = ['NAME="Alpine Linux"', + 'ID=alpine', + 'VERSION_ID=3.4.0', + 'PRETTY_NAME="Alpine Linux v3.4"', + 'HOME_URL="http://alpinelinux.org"', + 'BUG_REPORT_URL="http://bugs.alpinelinux.org"' + ] + + result = parse_os_release(data) + self.assertEqual(result['os'], 'alpine') + self.assertEqual(result['version'], '3.4.0') + + def test_parse_redhat_release(self): + data = ['Red Hat Enterprise Linux Server release 7.2 (Maipo)'] + + result = parse_redhat_release(data) + self.assertEqual(result['os'], 'rhel') + self.assertEqual(result['version'], '7.2') + + def test2_parse_redhat_release(self): + data = ['Red Hat Enterprise Linux Server release 7 (Maipo)'] + + result = parse_redhat_release(data) + self.assertEqual(result['os'], 'rhel') + self.assertEqual(result['version'], '7') + + def test_parse_centos_release(self): + data = ['CentOS release 6.8 (Final)'] + + result = parse_centos_release(data) + self.assertEqual(result['os'], 'centos') + self.assertEqual(result['version'], '6.8') + + def test2_parse_centos_release(self): + data = ['CentOS Linux release 6.8 (Final)'] + + result = parse_centos_release(data) + self.assertEqual(result['os'], 'centos') + self.assertEqual(result['version'], '6.8') + + def test3_parse_centos_release(self): + data = ['CentOS release 6 (Final)'] + + result = parse_centos_release(data) + self.assertEqual(result['os'], 'centos') + self.assertEqual(result['version'], '6') + + def test_get_osinfo_from_lsb_release(self): + data = ['DISTRIB_ID=Ubuntu', 'DISTRIB_RELEASE=15.10', + 'DISTRIB_CODENAME=wily' 'DISTRIB_DESCRIPTION="Ubuntu 15.10"'] + with mock.patch( + '__builtin__.open', mock.mock_open(read_data="\n".join(data)), + create=True) as m: + m.return_value.__iter__.return_value = data + + result = get_osinfo_from_lsb_release() + self.assertEqual(result['os'], 'ubuntu') + self.assertEqual(result['version'], '15.10') + + def test1_get_osinfo_from_lsb_release(self): + with mock.patch( + '__builtin__.open', mock.mock_open(), create=True) as m: + m.side_effect = IOError() + + result = get_osinfo_from_lsb_release() + self.assertFalse(result) + + def test_get_osinfo_from_os_release(self): + data = ['NAME="Ubuntu"', 'VERSION="14.04.4 LTS, Trusty Tahr"', + 'ID=ubuntu', 'ID_LIKE=debian', + 'PRETTY_NAME="Ubuntu 14.04.4 LTS"', 'VERSION_ID="14.04"', + 'HOME_URL="http://www.ubuntu.com/"', + 'SUPPORT_URL="http://help.ubuntu.com/"', + 'BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"' + ] + with mock.patch( + '__builtin__.open', mock.mock_open(read_data="\n".join(data)), + create=True) as m: + m.return_value.__iter__.return_value = data + + result = get_osinfo_from_os_release() + self.assertEqual(result['os'], 'ubuntu') + self.assertEqual(result['version'], '14.04') + + def test1_get_osinfo_from_os_release(self): + with mock.patch( + '__builtin__.open', mock.mock_open(), create=True) as m: + m.side_effect = IOError() + + result = get_osinfo_from_os_release() + self.assertFalse(result) + + def test_get_osinfo_from_redhat_centos(self): + data = ['Red Hat Enterprise Linux Server release 7.2 (Maipo)'] + with mock.patch( + '__builtin__.open', mock.mock_open(read_data="\n".join(data)), + create=True) as m: + m.return_value.__iter__.return_value = data + + result = get_osinfo_from_redhat_centos() + self.assertEqual(result['os'], 'rhel') + self.assertEqual(result['version'], '7.2') + + def mtest1_get_osinfo_from_redhat_centos(self): + with mock.patch( + '__builtin__.open', mock.mock_open(), create=True) as m: + m.side_effect = IOError() + + result = get_osinfo_from_redhat_centos() + self.assertFalse(result) +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/test_package_utils.py b/tests/unit/test_package_utils.py new file mode 100644 index 00000000..40f4ce84 --- /dev/null +++ b/tests/unit/test_package_utils.py @@ -0,0 +1,87 @@ +import unittest + +import mock + +from utils import package_utils +from utils.features import PackageFeature + + +def mocked_subprocess_run(cmd, shell=False, ignore_failure=False): + if 'dpkg-query' in cmd: + return ('pkg1|v1|x86|123\n' + 'pkg2|v2|x86|123') + elif '--queryformat' in cmd: + return ('123|pkg1|v1|x86|123\n' + '123|pkg1|v1|x86|123\n') + + +class PackageUtilsTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + @mock.patch('utils.package_utils.subprocess_run', + side_effect=mocked_subprocess_run) + def test_get_dpkg_packages(self, mock_subprocess_run): + pkgs = list(package_utils.get_dpkg_packages()) + print pkgs + assert pkgs == [ + ('pkg1', + PackageFeature( + installed=None, + pkgname='pkg1', + pkgsize='123', + pkgversion='v1', + pkgarchitecture='x86')), + ('pkg2', + PackageFeature( + installed=None, + pkgname='pkg2', + pkgsize='123', + pkgversion='v2', + pkgarchitecture='x86'))] + + @mock.patch('utils.package_utils.subprocess_run', + side_effect=mocked_subprocess_run) + def test_get_rpm_packages(self, mock_subprocess_run): + pkgs = list(package_utils.get_rpm_packages()) + print pkgs + assert pkgs == [ + ('pkg1', + PackageFeature( + installed='123', + pkgname='pkg1', + pkgsize='123', + pkgversion='v1', + pkgarchitecture='x86')), + ('pkg1', + PackageFeature( + installed='123', + pkgname='pkg1', + pkgsize='123', + pkgversion='v1', + pkgarchitecture='x86'))] + + @mock.patch('utils.package_utils.subprocess_run', + side_effect=mocked_subprocess_run) + def test_get_rpm_packages_with_db_reload(self, mock_subprocess_run): + pkgs = list(package_utils.get_rpm_packages(reload_needed=True)) + print pkgs + assert pkgs == [ + ('pkg1', + PackageFeature( + installed='123', + pkgname='pkg1', + pkgsize='123', + pkgversion='v1', + pkgarchitecture='x86')), + ('pkg1', + PackageFeature( + installed='123', + pkgname='pkg1', + pkgsize='123', + pkgversion='v1', + pkgarchitecture='x86'))] diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py new file mode 100644 index 00000000..224552e9 --- /dev/null +++ b/tests/unit/test_plugins.py @@ -0,0 +1,1790 @@ +import types +import unittest +from collections import namedtuple + +import os +import sys +import tempfile +from zipfile import ZipFile, ZipInfo + +from utils import jar_utils +sys.path.append('tests/unit/') + +import mock +from plugins.systems.config_container_crawler import ConfigContainerCrawler +from plugins.systems.config_host_crawler import ConfigHostCrawler +from plugins.systems.connection_container_crawler import ConnectionContainerCrawler +from plugins.systems.connection_host_crawler import ConnectionHostCrawler +from plugins.systems.connection_vm_crawler import ConnectionVmCrawler +from plugins.systems.cpu_container_crawler import CpuContainerCrawler +from plugins.systems.cpu_host_crawler import CpuHostCrawler +from plugins.systems.disk_container_crawler import DiskContainerCrawler +from plugins.systems.disk_host_crawler import DiskHostCrawler +from plugins.systems.dockerhistory_container_crawler import DockerhistoryContainerCrawler +from plugins.systems.dockerinspect_container_crawler import DockerinspectContainerCrawler +from plugins.systems.dockerps_host_crawler import DockerpsHostCrawler +from plugins.systems.file_container_crawler import FileContainerCrawler +from plugins.systems.file_host_crawler import FileHostCrawler +from plugins.systems.interface_container_crawler import InterfaceContainerCrawler +from plugins.systems.interface_host_crawler import InterfaceHostCrawler +from plugins.systems.interface_vm_crawler import InterfaceVmCrawler +from plugins.systems.jar_container_crawler import JarContainerCrawler +from plugins.systems.jar_host_crawler import JarHostCrawler +from plugins.systems.load_container_crawler import LoadContainerCrawler +from plugins.systems.load_host_crawler import LoadHostCrawler +from plugins.systems.memory_container_crawler import MemoryContainerCrawler +from plugins.systems.memory_host_crawler import MemoryHostCrawler +from plugins.systems.memory_vm_crawler import MemoryVmCrawler +from plugins.systems.metric_container_crawler import MetricContainerCrawler +from plugins.systems.metric_host_crawler import MetricHostCrawler +from plugins.systems.metric_vm_crawler import MetricVmCrawler +from plugins.systems.os_container_crawler import OSContainerCrawler +from plugins.systems.os_host_crawler import OSHostCrawler +from plugins.systems.os_vm_crawler import os_vm_crawler +from plugins.systems.package_container_crawler import PackageContainerCrawler +from plugins.systems.package_host_crawler import PackageHostCrawler +from plugins.systems.process_container_crawler import ProcessContainerCrawler +from plugins.systems.process_host_crawler import ProcessHostCrawler +from plugins.systems.process_vm_crawler import process_vm_crawler + +from container import Container +from utils.crawler_exceptions import CrawlError +from utils.features import ( + OSFeature, + ConfigFeature, + DiskFeature, + PackageFeature, + MemoryFeature, + CpuFeature, + InterfaceFeature, + LoadFeature, + DockerPSFeature, + JarFeature) + + +# for OUTVM psvmi + + +class DummyContainer(Container): + + def __init__(self, long_id): + self.pid = '1234' + self.long_id = long_id + + def get_memory_cgroup_path(self, node): + return '/cgroup/%s' % node + + def get_cpu_cgroup_path(self, node): + return '/cgroup/%s' % node + +# for OUTVM psvmi +psvmi_sysinfo = namedtuple('psvmi_sysinfo', + '''boottime ipaddr osdistro osname osplatform osrelease + ostype osversion memory_used memory_buffered + memory_cached memory_free''') + +psvmi_memory = namedtuple( + 'psvmi_memory', + 'memory_used memory_buffered memory_cached memory_free') + +psvmi_interface = namedtuple( + 'psvmi_interface', + 'ifname bytes_sent bytes_recv packets_sent packets_recv errout errin') + +os_stat = namedtuple( + 'os_stat', + '''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size''') + + +def mocked_os_walk(root_dir): + files = ['file1', 'file2', 'file3'] + dirs = ['dir'] + yield ('/', dirs, files) + + # simulate the os_walk behavior (if a dir is deleted, we don't walk it) + if '/dir' in dirs: + files = ['file4'] + dirs = [] + yield ('/dir', dirs, files) + + +def mocked_os_walk_for_avoidsetns(root_dir): + files = ['file1', 'file2', 'file3'] + dirs = ['dir'] + yield ('/1/2/3', dirs, files) + + # simulate the os_walk behavior (if a dir is deleted, we don't walk it) + if '/1/2/3/dir' in dirs: + files = ['file4'] + dirs = [] + yield ('/dir', dirs, files) + +# XXX can't do self.count = for some reason +mcount = 0 + + +class MockedMemCgroupFile(mock.Mock): + + def __init__(self): + pass + + def readline(self): + return '2' + + def __iter__(self): + return self + + def next(self): + global mcount + mcount += 1 + if mcount == 1: + return 'total_cache 100' + if mcount == 2: + return 'total_active_file 200' + else: + raise StopIteration() + +# XXX can't do self.count = for some reason +ccount = 0 +ccount2 = 0 + + +class MockedCpuCgroupFile(mock.Mock): + + def __init__(self): + pass + + def readline(self): + global ccount2 + ccount2 += 1 + if ccount2 == 1: + return '1e7' + else: + return '2e7' + + def __iter__(self): + return self + + def next(self): + global ccount + ccount += 1 + if ccount == 1: + return 'system 20' + if ccount == 2: + return 'user 20' + else: + raise StopIteration() + + +class MockedFile(mock.Mock): + + def __init__(self): + pass + + def read(self): + return 'content' + + +def mocked_codecs_open(filename, mode, encoding, errors): + m = mock.Mock() + m.__enter__ = mock.Mock(return_value=MockedFile()) + m.__exit__ = mock.Mock(return_value=False) + return m + + +def mocked_cpu_cgroup_open(filename, mode): + m = mock.Mock() + m.__enter__ = mock.Mock(return_value=MockedCpuCgroupFile()) + m.__exit__ = mock.Mock(return_value=False) + print filename + return m + + +def mocked_memory_cgroup_open(filename, mode): + m = mock.Mock() + m.__enter__ = mock.Mock(return_value=MockedMemCgroupFile()) + m.__exit__ = mock.Mock(return_value=False) + print filename + return m + +partition = namedtuple('partition', 'device fstype mountpoint opts') +pdiskusage = namedtuple('pdiskusage', 'percent total') +meminfo = namedtuple('meminfo', 'rss vms') +ioinfo = namedtuple('ioinfo', 'read_bytes write_bytes') +psutils_memory = namedtuple('psutils_memory', 'used free buffers cached') +psutils_cpu = namedtuple( + 'psutils_cpu', + 'idle nice user iowait system irq steal') +psutils_net = namedtuple( + 'psutils_net', + 'bytes_sent bytes_recv packets_sent packets_recv errout errin') + + +def mocked_disk_partitions(all): + return [partition('/dev/a', 'type', '/a', 'opts'), + partition('/dev/b', 'type', '/b', 'opts')] + + +class Connection(): + + def __init__(self): + self.laddr = ['1.1.1.1', '22'] + self.raddr = ['2.2.2.2', '22'] + self.status = 'Established' + + +class Process(): + + def __init__(self, name): + self.name = name + self.cmdline = ['cmd'] + self.pid = 123 + self.status = 'Running' + self.cwd = '/bin' + self.ppid = 1 + self.create_time = 1000 + + def num_threads(self): + return 1 + + def username(self): + return 'don quijote' + + def get_open_files(self): + return [] + + def get_connections(self): + return [Connection()] + + def get_memory_info(self): + return meminfo(10, 20) + + def get_io_counters(self): + return ioinfo(10, 20) + + def get_cpu_percent(self, interval): + return 30 + + def get_memory_percent(self): + return 30 + +STAT_DIR_MODE = 16749 + + +def mocked_os_lstat(path): + print path + if path == '/': + return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7) + elif path == '/file1': + return os_stat(1, 2, 3, 4, 5, 6, 7) + elif path == '/file2': + return os_stat(1, 2, 3, 4, 5, 6, 7) + elif path == '/file3': + return os_stat(1, 2, 3, 4, 5, 6, 7) + elif path == '/dir': + return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7) + else: + return os_stat(1, 2, 3, 4, 5, 6, 7) + + +def mocked_run_as_another_namespace(pid, ns, function, *args, **kwargs): + result = function(*args) + # if res is a generator (i.e. function uses yield) + if isinstance(result, types.GeneratorType): + result = list(result) + return result + + +def throw_os_error(*args, **kvargs): + raise OSError() + + +class PluginTests(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_init(self, *args): + pass + + @mock.patch('utils.os_utils.time.time', + side_effect=lambda: 1001) + @mock.patch('utils.os_utils.platform.platform', + side_effect=lambda: 'platform') + @mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses', + side_effect=lambda: ['1.1.1.1']) + @mock.patch('utils.os_utils.psutil.boot_time', + side_effect=lambda: 1000) + @mock.patch('utils.os_utils.platform.system', + side_effect=lambda: 'linux') + @mock.patch('utils.os_utils.platform.machine', + side_effect=lambda: 'machine') + @mock.patch( + 'utils.os_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'os', + 'version': 'os_version'}) + def test_os_host_cawler_plugin(self, *args): + fc = OSHostCrawler() + for os in fc.crawl(): + print os + assert os == ( + 'linux', + OSFeature( + boottime=1000, + uptime=1, + ipaddr=['1.1.1.1'], + os='os', + os_version='os_version', + os_kernel='platform', + architecture='machine'), + 'os') + + for i, arg in enumerate(args): + if i > 0: # time.time is called more than once + continue + assert arg.call_count == 1 + + @mock.patch('utils.os_utils.platform.system', + side_effect=throw_os_error) + def test_os_host_crawler_plugin_failure(self, *args): + fc = OSHostCrawler() + with self.assertRaises(OSError): + for os in fc.crawl(): + pass + + @mock.patch( + 'utils.os_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'os', + 'version': 'os_version'}) + def test_os_host_crawler_plugin_mountpoint_mode(self, *args): + fc = OSHostCrawler() + for os in fc.crawl(root_dir='/a'): + print os + assert os == ( + 'linux', + OSFeature( + boottime='unsupported', + uptime='unsupported', + ipaddr='0.0.0.0', + os='os', + os_version='os_version', + os_kernel='unknown', + architecture='unknown'), + 'os') + for i, arg in enumerate(args): + assert arg.call_count == 1 + + @mock.patch('utils.os_utils.osinfo.get_osinfo', + side_effect=throw_os_error) + def test_os_host_crawler_plugin_mountpoint_mode_failure(self, *args): + fc = OSHostCrawler() + with self.assertRaises(OSError): + for os in fc.crawl(root_dir='/a'): + pass + + @mock.patch('utils.os_utils.time.time', + side_effect=lambda: 1001) + @mock.patch('utils.os_utils.platform.platform', + side_effect=lambda: 'platform') + @mock.patch('utils.os_utils.utils.misc.get_host_ip4_addresses', + side_effect=lambda: ['1.1.1.1']) + @mock.patch('utils.os_utils.psutil.boot_time', + side_effect=lambda: 1000) + @mock.patch('utils.os_utils.platform.system', + side_effect=lambda: 'linux') + @mock.patch('utils.os_utils.platform.machine', + side_effect=lambda: 'machine') + @mock.patch( + ("plugins.systems.os_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.os_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'utils.os_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'os', + 'version': 'os_version'}) + def test_os_container_crawler_plugin(self, *args): + fc = OSContainerCrawler() + for os in fc.crawl(container_id=123): + print os + assert os == ( + 'linux', + OSFeature( + boottime=1000, + uptime=1, + ipaddr=['1.1.1.1'], + os='os', + os_version='os_version', + os_kernel='platform', + architecture='machine'), + 'os') + for i, arg in enumerate(args): + if i > 0: # time.time is called more than once + continue + assert arg.call_count == 1 + + @mock.patch( + ("plugins.systems.os_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.os_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/a/b/c') + @mock.patch( + 'utils.os_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'os', + 'version': 'os_version'}) + def test_os_container_crawler_plugin_avoidsetns(self, *args): + fc = OSContainerCrawler() + for os in fc.crawl(container_id=123, avoid_setns=True): + print os + assert os == ( + 'linux', + OSFeature( + boottime='unsupported', + uptime='unsupported', + ipaddr='0.0.0.0', + os='os', + os_version='os_version', + os_kernel='unknown', + architecture='unknown'), + 'os') + for i, arg in enumerate(args): + print i, arg + if i == 0: + # get_osinfo() + assert arg.call_count == 1 + arg.assert_called_with(mount_point='/a/b/c') + elif i == 1: + # get_docker_container_rootfs_path + assert arg.call_count == 1 + arg.assert_called_with(123) + else: + # exec_dockerinspect + assert arg.call_count == 1 + arg.assert_called_with(123) + + @mock.patch( + ("plugins.systems.os_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.os_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=throw_os_error) + def test_os_container_crawler_plugin_avoidsetns_failure(self, *args): + fc = OSContainerCrawler() + with self.assertRaises(OSError): + for os in fc.crawl(container_id=123, avoid_setns=True): + pass + + @mock.patch('plugins.systems.os_vm_crawler.psvmi.context_init', + side_effect=lambda dn1, dn2, kv, d, a: 1000) + @mock.patch('plugins.systems.os_vm_crawler.psvmi.system_info', + side_effect=lambda vmc: psvmi_sysinfo(1000, + '1.1.1.1', + 'osdistro', + 'osname', + 'osplatform', + 'osrelease', + 'ostype', + 'osversion', + 1000000, + 100000, + 100000, + 100000)) + @mock.patch('plugins.systems.os_vm_crawler.psvmi') + def test_os_vm_crawler_plugin_without_vm(self, *args): + fc = os_vm_crawler() + for os in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + assert os == ( + 'ostype', + OSFeature( + boottime=1000, + uptime='unknown', + ipaddr='1.1.1.1', + os='ostype', + os_version='osversion', + os_kernel='osrelease', + architecture='osplatform'), + 'os') + pass + assert args[1].call_count == 1 + + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=mocked_os_walk) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_host_crawler(self, *args): + fc = FileHostCrawler() + for (k, f, fname) in fc.crawl(): + print f + assert fname == "file" + assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 + assert f.atime == 4 and f.ctime == 5 + assert f.mtime == 6 and f.size == 7 + assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4'] + assert f.path in ['/', '/file1', '/file2', '/file3', + '/dir', '/dir/file4'] + assert f.type in ['file', 'dir'] + assert f.linksto is None + assert args[0].call_count == 6 + assert args[1].call_count == 1 # oswalk + args[1].assert_called_with('/') + assert args[2].call_count == 2 # isdir + args[2].assert_called_with('/') + + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=mocked_os_walk) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_host_crawler_with_exclude_dirs(self, *args): + fc = FileHostCrawler() + for (k, f, fname) in fc.crawl(exclude_dirs=['dir']): + print f + assert fname == "file" + assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 + assert f.atime == 4 and f.ctime == 5 + assert f.mtime == 6 and f.size == 7 + assert f.name in ['', 'file1', 'file2', 'file3', 'file4'] + assert f.path in ['/', '/file1', '/file2', '/file3'] + assert f.path not in ['/dir', '/dir/file4'] + assert f.type in ['file', 'dir'] + assert f.linksto is None + assert args[0].call_count == 4 + assert args[1].call_count == 1 # oswalk + args[1].assert_called_with('/') + assert args[2].call_count == 2 # isdir + args[2].assert_called_with('/') + + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=throw_os_error) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_host_crawler_failure(self, *args): + fc = FileHostCrawler() + with self.assertRaises(OSError): + for (k, f, fname) in fc.crawl(root_dir='/a/b/c'): + pass + + @mock.patch( + ("plugins.systems.file_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.file_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=mocked_os_walk) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_container_crawler(self, *args): + fc = FileContainerCrawler() + for (k, f, fname) in fc.crawl(root_dir='/'): + assert fname == "file" + assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 + assert f.atime == 4 and f.ctime == 5 + assert f.mtime == 6 and f.size == 7 + assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4'] + assert f.path in ['/', '/file1', '/file2', '/file3', + '/dir', '/dir/file4'] + assert f.type in ['file', 'dir'] + assert f.linksto is None + assert args[0].call_count == 6 + assert args[1].call_count == 1 # oswalk + args[1].assert_called_with('/') + assert args[2].call_count == 2 # isdir + args[2].assert_called_with('/') + + @mock.patch( + ("plugins.systems.jar_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.jar_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + def test_jar_container_crawler_plugin(self, *args): + tmpdir = tempfile.mkdtemp() + jar_file_name = 'myfile.jar' + + # Ensure the file is read/write by the creator only + saved_umask = os.umask(0077) + + path = os.path.join(tmpdir, jar_file_name) + try: + with ZipFile(path, "w") as myjar: + myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") + myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") + myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") + + fc = JarContainerCrawler() + jars = list(fc.crawl(root_dir=tmpdir)) + #jars = list(jar_utils.crawl_jar_files(root_dir=tmpdir)) + print jars + jar_feature = jars[0][1] + assert 'myfile.jar' == jar_feature.name + assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash + assert ['ddc6eff37020aa858e26b1ba8a49ee0e', + 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes + assert 'jar' == jars[0][2] + + except IOError as e: + print 'IOError' + finally: + os.remove(path) + + + @mock.patch( + ("plugins.systems.jar_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.jar_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/tmp') + def test_jar_container_crawler_avoidsetns(self, *args): + tmpdir = tempfile.mkdtemp() + jar_file_name = 'myfile.jar' + + # Ensure the file is read/write by the creator only + saved_umask = os.umask(0077) + + path = os.path.join(tmpdir, jar_file_name) + try: + with ZipFile(path, "w") as myjar: + myjar.writestr(ZipInfo('first.class',(1980,1,1,1,1,1)), "first secrets!") + myjar.writestr(ZipInfo('second.class',(1980,1,1,1,1,1)), "second secrets!") + myjar.writestr(ZipInfo('second.txt',(1980,1,1,1,1,1)), "second secrets!") + + fc = JarContainerCrawler() + jars = list(fc.crawl(root_dir=os.path.basename(tmpdir), avoid_setns=True)) + print jars + jar_feature = jars[0][1] + assert 'myfile.jar' == jar_feature.name + assert '48ac85a26ffa7ff5cefdd5c73a9fb888' == jar_feature.jarhash + assert ['ddc6eff37020aa858e26b1ba8a49ee0e', + 'cbe2a13eb99c1c8ac5f30d0a04f8c492'] == jar_feature.hashes + assert 'jar' == jars[0][2] + + except IOError as e: + print 'IOError' + finally: + os.remove(path) + + @mock.patch( + ("plugins.systems.file_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch('utils.file_utils.os.walk', + side_effect=throw_os_error) + @mock.patch( + ("plugins.systems.file_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_container_crawler_failure(self, *args): + fc = FileContainerCrawler() + with self.assertRaises(OSError): + for (k, f, fname) in fc.crawl(root_dir='/a/b/c'): + pass + + @mock.patch( + ("plugins.systems.file_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.file_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/1/2/3') + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=mocked_os_walk_for_avoidsetns) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_container_crawler_avoidsetns(self, *args): + fc = FileContainerCrawler() + for (k, f, fname) in fc.crawl(root_dir='/', avoid_setns=True): + print f + assert fname == "file" + assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 + assert f.atime == 4 and f.ctime == 5 + assert f.mtime == 6 and f.size == 7 + assert f.name in ['', 'dir', 'file1', 'file2', 'file3', 'file4'] + assert f.path in ['/', '/file1', '/file2', '/file3', + '/dir', '/dir/file4'] + assert f.type in ['file', 'dir'] + assert f.linksto is None + assert args[0].call_count == 6 + assert args[1].call_count == 1 # oswalk + args[1].assert_called_with('/1/2/3') + assert args[2].call_count == 2 # isdir + args[2].assert_called_with('/1/2/3') + + @mock.patch( + ("plugins.systems.file_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.file_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=mocked_os_walk) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_container_crawler_with_exclude_dirs(self, *args): + fc = FileContainerCrawler() + for (k, f, fname) in fc.crawl(root_dir='/', + exclude_dirs=['dir']): + assert fname == "file" + assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 + assert f.atime == 4 and f.ctime == 5 + assert f.mtime == 6 and f.size == 7 + assert f.name in ['', 'file1', 'file2', 'file3', 'file4'] + assert f.path in ['/', '/file1', '/file2', '/file3'] + assert f.path not in ['/dir', '/dir/file4'] + assert f.type in ['file', 'dir'] + assert f.linksto is None + assert args[0].call_count == 4 + assert args[1].call_count == 1 # oswalk + args[1].assert_called_with('/') + assert args[2].call_count == 2 # isdir + args[2].assert_called_with('/') + + @mock.patch( + ("plugins.systems.file_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.file_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/1/2/3') + @mock.patch('utils.file_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.file_utils.os.walk', + side_effect=mocked_os_walk_for_avoidsetns) + @mock.patch('utils.file_utils.os.lstat', + side_effect=mocked_os_lstat) + def test_file_container_crawler_avoidsetns_with_exclude_dirs( + self, + * + args): + fc = FileContainerCrawler() + for (k, f, fname) in fc.crawl(root_dir='/', + avoid_setns=True, + exclude_dirs=['/dir']): + assert fname == "file" + assert f.mode in [1, STAT_DIR_MODE] and f.gid == 2 and f.uid == 3 + assert f.atime == 4 and f.ctime == 5 + assert f.mtime == 6 and f.size == 7 + assert f.name in ['', 'file1', 'file2', 'file3', 'file4'] + assert f.path in ['/', '/file1', '/file2', '/file3'] + assert f.path not in ['/dir', '/dir/file4'] + assert f.type in ['file', 'dir'] + assert f.linksto is None + assert args[0].call_count == 4 + assert args[1].call_count == 1 # oswalk + args[1].assert_called_with('/1/2/3') + assert args[2].call_count == 2 # isdir + args[2].assert_called_with('/1/2/3') + + @mock.patch('utils.config_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.lstat', + side_effect=mocked_os_lstat) + @mock.patch('utils.config_utils.codecs.open', + side_effect=mocked_codecs_open) + def test_config_host_crawler(self, *args): + fc = ConfigHostCrawler() + for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'], + discover_config_files=False): + assert fname == "config" + assert f == ConfigFeature(name='file1', content='content', + path='/etc/file1') + assert args[0].call_count == 1 # lstat + + @mock.patch('utils.config_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.walk', + side_effect=lambda p: [ + ('/', [], ['file1', 'file2', 'file3.conf'])]) + @mock.patch('utils.config_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.isfile', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.getsize', + side_effect=lambda p: 1000) + @mock.patch('utils.config_utils.os.lstat', + side_effect=mocked_os_lstat) + @mock.patch('utils.config_utils.codecs.open', + side_effect=mocked_codecs_open) + def test_config_host_crawler_with_discover(self, *args): + fc = ConfigHostCrawler() + + configs = fc.crawl(known_config_files=['/etc/file1'], + discover_config_files=True) + print configs + assert set(configs) == set([('/file3.conf', + ConfigFeature(name='file3.conf', + content='content', + path='/file3.conf'), + 'config'), + ('/etc/file1', + ConfigFeature(name='file1', + content='content', + path='/etc/file1'), + 'config')]) + + @mock.patch( + ("plugins.systems.config_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'plugins.systems.config_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.config_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.lstat', + side_effect=mocked_os_lstat) + @mock.patch('utils.config_utils.codecs.open', + side_effect=mocked_codecs_open) + def test_config_container_crawler(self, *args): + fc = ConfigContainerCrawler() + for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'], + discover_config_files=False): + assert fname == "config" + assert f == ConfigFeature(name='file1', content='content', + path='/etc/file1') + assert args[0].call_count == 1 # codecs open + + @mock.patch('utils.config_utils.codecs.open', + side_effect=mocked_codecs_open) + @mock.patch('utils.config_utils.os.lstat', + side_effect=mocked_os_lstat) + @mock.patch( + ("plugins.systems.config_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'plugins.systems.config_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.config_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.walk', + side_effect=lambda p: [ + ('/', [], ['file1', 'file2', 'file3.conf'])]) + @mock.patch('utils.config_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.isfile', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.getsize', + side_effect=lambda p: 1000) + def test_config_container_crawler_discover(self, *args): + fc = ConfigContainerCrawler() + + configs = fc.crawl(known_config_files=['/etc/file1'], + discover_config_files=True) + assert set(configs) == set([('/file3.conf', + ConfigFeature(name='file3.conf', + content='content', + path='/file3.conf'), + 'config'), + ('/etc/file1', + ConfigFeature(name='file1', + content='content', + path='/etc/file1'), + 'config')]) + + @mock.patch( + ("plugins.systems.config_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.config_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.config_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/1/2/3') + @mock.patch('utils.config_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.lstat', + side_effect=mocked_os_lstat) + @mock.patch('utils.config_utils.codecs.open', + side_effect=mocked_codecs_open) + def test_config_container_crawler_avoidsetns(self, *args): + fc = ConfigContainerCrawler() + for (k, f, fname) in fc.crawl(known_config_files=['/etc/file1'], + discover_config_files=False, + avoid_setns=True): + assert fname == "config" + assert f == ConfigFeature(name='file1', content='content', + path='/etc/file1') + assert args[0].call_count == 1 # lstat + + @mock.patch( + ("plugins.systems.config_container_crawler." + "run_as_another_namespace"), + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.config_container_crawler." + "utils.dockerutils.exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.config_container_crawler.utils.dockerutils." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/1/2/3') + @mock.patch('utils.config_utils.os.path.isdir', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.walk', + side_effect=lambda p: [ + ('/', [], ['file1', 'file2', 'file3.conf'])]) + @mock.patch('utils.config_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.isfile', + side_effect=lambda p: True) + @mock.patch('utils.config_utils.os.path.getsize', + side_effect=lambda p: 1000) + @mock.patch('utils.config_utils.os.lstat', + side_effect=mocked_os_lstat) + @mock.patch('utils.config_utils.codecs.open', + side_effect=mocked_codecs_open) + def test_config_container_crawler_avoidsetns_discover(self, *args): + fc = ConfigContainerCrawler() + configs = fc.crawl(known_config_files=['/etc/file1'], + avoid_setns=True, + discover_config_files=True) + assert set(configs) == set([('/file3.conf', + ConfigFeature(name='file3.conf', + content='content', + path='/file3.conf'), + 'config'), + ('/etc/file1', + ConfigFeature(name='file1', + content='content', + path='/etc/file1'), + 'config')]) + + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'ubuntu', + 'version': '123'}) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.package_utils.get_dpkg_packages', + side_effect=lambda a, b, c: [('pkg1', + PackageFeature(None, 'pkg1', + 123, 'v1', + 'x86'))]) + def test_package_host_crawler_dpkg(self, *args): + fc = PackageHostCrawler() + for (k, f, fname) in fc.crawl(): + assert fname == "package" + assert f == PackageFeature( + installed=None, + pkgname='pkg1', + pkgsize=123, + pkgversion='v1', + pkgarchitecture='x86') + assert args[0].call_count == 1 + args[0].assert_called_with('/', 'var/lib/dpkg', 0) + + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'ubuntu', + 'version': '123'}) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.package_utils.get_dpkg_packages', + side_effect=throw_os_error) + def test_package_host_crawler_dpkg_failure(self, *args): + fc = PackageHostCrawler() + with self.assertRaises(CrawlError): + for (k, f, fname) in fc.crawl(): + pass + assert args[0].call_count == 1 + args[0].assert_called_with('/', 'var/lib/dpkg', 0) + + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'redhat', + 'version': '123'}) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.package_utils.get_rpm_packages', + side_effect=lambda a, b, c, d: [('pkg1', + PackageFeature(None, 'pkg1', + 123, 'v1', + 'x86'))]) + def test_package_host_crawler_rpm(self, *args): + fc = PackageHostCrawler() + for (k, f, fname) in fc.crawl(): + assert fname == "package" + assert f == PackageFeature( + installed=None, + pkgname='pkg1', + pkgsize=123, + pkgversion='v1', + pkgarchitecture='x86') + assert args[0].call_count == 1 + args[0].assert_called_with('/', 'var/lib/rpm', 0, False) + + @mock.patch( + ("plugins.systems.package_container_crawler." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'ubuntu', + 'version': '123'}) + @mock.patch( + 'plugins.systems.package_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.package_utils.get_dpkg_packages', + side_effect=lambda a, b, c: [('pkg1', + PackageFeature(None, 'pkg1', + 123, 'v1', + 'x86'))]) + def test_package_container_crawler_dpkg(self, *args): + fc = PackageContainerCrawler() + for (k, f, fname) in fc.crawl(): + assert fname == "package" + assert f == PackageFeature( + installed=None, + pkgname='pkg1', + pkgsize=123, + pkgversion='v1', + pkgarchitecture='x86') + assert args[0].call_count == 1 + args[0].assert_called_with('/', 'var/lib/dpkg', 0) + + @mock.patch( + ("plugins.systems.package_container_crawler." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'plugins.systems.package_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.package_container_crawler." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/a/b/c') + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'ubuntu', + 'version': '123'}) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True if 'dpkg' in p else False) + @mock.patch('utils.package_utils.get_dpkg_packages', + side_effect=throw_os_error) + def test_package_container_crawler_dpkg_failure(self, *args): + fc = PackageContainerCrawler() + with self.assertRaises(CrawlError): + for (k, f, fname) in fc.crawl(): + pass + # get_dpkg_packages is called a second time after the first failure. + # first time is OUTCONTAINER mode with setns + # second time is OUTCONTAINER mode with avoid_setns + assert args[0].call_count == 2 + args[0].assert_called_with('/a/b/c', 'var/lib/dpkg', 0) + args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo() + + @mock.patch( + ("plugins.systems.package_container_crawler." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'plugins.systems.package_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.package_container_crawler." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/a/b/c') + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'redhat', + 'version': '123'}) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True if 'rpm' in p else False) + @mock.patch('utils.package_utils.get_rpm_packages', + side_effect=throw_os_error) + def test_package_container_crawler_rpm_failure(self, *args): + fc = PackageContainerCrawler() + with self.assertRaises(CrawlError): + for (k, f, fname) in fc.crawl(): + pass + # get_dpkg_packages is called a second time after the first failure. + # first time is OUTCONTAINER mode with setns + # second time is OUTCONTAINER mode with avoid_setns + assert args[0].call_count == 2 + args[0].assert_called_with('/a/b/c', 'var/lib/rpm', 0, True) + args[2].assert_called_with(mount_point='/a/b/c') # get_osinfo() + + @mock.patch( + ("plugins.systems.package_container_crawler." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + ("plugins.systems.package_container_crawler." + "get_docker_container_rootfs_path"), + side_effect=lambda long_id: '/a/b/c') + @mock.patch( + 'utils.package_utils.osinfo.get_osinfo', + side_effect=lambda mount_point=None: { + 'os': 'ubuntu', + 'version': '123'}) + @mock.patch('utils.package_utils.os.path.exists', + side_effect=lambda p: True) + @mock.patch('utils.package_utils.get_dpkg_packages', + side_effect=lambda a, b, c: [('pkg1', + PackageFeature(None, 'pkg1', + 123, 'v1', + 'x86'))]) + def test_package_container_crawler_avoidsetns(self, *args): + fc = PackageContainerCrawler() + for (k, f, fname) in fc.crawl(avoid_setns=True): + assert fname == "package" + assert f == PackageFeature( + installed=None, + pkgname='pkg1', + pkgsize=123, + pkgversion='v1', + pkgarchitecture='x86') + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.process_host_crawler.psutil.process_iter', + side_effect=lambda: [Process('init')]) + def test_process_host_crawler(self, *args): + fc = ProcessHostCrawler() + for (k, f, fname) in fc.crawl(): + print f + assert fname == "process" + assert f.pname == 'init' + assert f.cmd == 'cmd' + assert f.pid == 123 + assert args[0].call_count == 1 + + @mock.patch( + ("plugins.systems.process_container_crawler.utils.dockerutils." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + @mock.patch( + 'plugins.systems.process_container_crawler.psutil.process_iter', + side_effect=lambda: [Process('init')]) + @mock.patch( + 'plugins.systems.process_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + def test_process_container_crawler(self, *args): + fc = ProcessContainerCrawler() + for (k, f, fname) in fc.crawl('123'): + print f + assert fname == "process" + assert f.pname == 'init' + assert f.cmd == 'cmd' + assert f.pid == 123 + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.process_vm_crawler.psvmi.context_init', + side_effect=lambda dn1, dn2, kv, d, a: 1000) + @mock.patch('plugins.systems.process_vm_crawler.psvmi.process_iter', + side_effect=lambda vmc: [Process('init')]) + @mock.patch('plugins.systems.process_vm_crawler.psvmi') + def test_process_vm_crawler(self, *args): + fc = process_vm_crawler() + for (k, f, fname) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + print f + assert fname == "process" + assert f.pname == 'init' + assert f.cmd == 'cmd' + assert f.pid == 123 + assert args[1].call_count == 1 # process_iter + + @mock.patch('utils.disk_utils.psutil.disk_partitions', + side_effect=mocked_disk_partitions) + @mock.patch('utils.disk_utils.psutil.disk_usage', + side_effect=lambda x: pdiskusage(10, 100)) + def test_crawl_disk_partitions_invm_mode(self, *args): + fc = DiskHostCrawler() + disks = fc.crawl() + assert set(disks) == set([('/a', + DiskFeature(partitionname='/dev/a', + freepct=90.0, + fstype='type', + mountpt='/a', + mountopts='opts', + partitionsize=100), + 'disk'), + ('/b', + DiskFeature(partitionname='/dev/b', + freepct=90.0, + fstype='type', + mountpt='/b', + mountopts='opts', + partitionsize=100), + 'disk')]) + + @mock.patch( + 'plugins.systems.disk_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch('utils.disk_utils.psutil.disk_partitions', + side_effect=mocked_disk_partitions) + @mock.patch('utils.disk_utils.psutil.disk_usage', + side_effect=lambda x: pdiskusage(10, 100)) + @mock.patch( + ("plugins.systems.disk_container_crawler.utils.dockerutils." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + def test_crawl_disk_partitions_outcontainer_mode(self, *args): + fc = DiskContainerCrawler() + disks = fc.crawl('123') + assert set(disks) == set([('/a', + DiskFeature(partitionname='/dev/a', + freepct=90.0, + fstype='type', + mountpt='/a', + mountopts='opts', + partitionsize=100), + 'disk'), + ('/b', + DiskFeature(partitionname='/dev/b', + freepct=90.0, + fstype='type', + mountpt='/b', + mountopts='opts', + partitionsize=100), + 'disk')]) + + @mock.patch('utils.metric_utils.psutil.process_iter', + side_effect=lambda: [Process('init')]) + def test_crawl_metrics_invm_mode(self, *args): + fc = MetricHostCrawler() + for (k, f, t) in fc.crawl(): + assert f.cpupct == 30.0 + assert f.mempct == 30.0 + assert f.pname == 'init' + assert f.pid == 123 + assert f.rss == 10 + assert f.status == 'Running' + assert f.vms == 20 + assert f.read == 10 + assert f.write == 20 + assert args[0].call_count == 1 + + @mock.patch('utils.metric_utils.psutil.process_iter', + side_effect=lambda: [Process('init')]) + @mock.patch('utils.metric_utils.round', + side_effect=throw_os_error) + def test_crawl_metrics_invm_mode_failure(self, *args): + with self.assertRaises(OSError): + fc = MetricHostCrawler() + for ff in fc.crawl(): + pass + assert args[0].call_count == 1 + + @mock.patch('utils.metric_utils.psutil.process_iter', + side_effect=lambda: [Process('init')]) + @mock.patch( + 'plugins.systems.metric_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.disk_container_crawler.utils.dockerutils." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + def test_crawl_metrics_outcontainer_mode(self, *args): + fc = MetricContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f.cpupct == 30.0 + assert f.mempct == 30.0 + assert f.pname == 'init' + assert f.pid == 123 + assert f.rss == 10 + assert f.status == 'Running' + assert f.vms == 20 + assert f.read == 10 + assert f.write == 20 + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.metric_vm_crawler.psvmi.context_init', + side_effect=lambda dn1, dn2, kv, d, a: 1000) + @mock.patch('plugins.systems.metric_vm_crawler.psvmi.process_iter', + side_effect=lambda vmc: [Process('init')]) + @mock.patch( + ("plugins.systems.metric_vm_crawler." + "MetricVmCrawler._crawl_metrics_cpu_percent"), + side_effect=lambda proc: 30.0) + @mock.patch('plugins.systems.metric_vm_crawler.psvmi') + def test_crawl_metrics_vm_mode(self, *args): + fc = MetricVmCrawler() + for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + assert f.cpupct == 30.0 + assert f.mempct == 30.0 + assert f.pname == 'init' + assert f.pid == 123 + assert f.rss == 10 + assert f.status == 'Running' + assert f.vms == 20 + assert f.read == 10 + assert f.write == 20 + assert args[1].call_count == 1 # process_iter + + @mock.patch('utils.connection_utils.psutil.process_iter', + side_effect=lambda: [Process('init')]) + def test_crawl_connections_invm_mode(self, *args): + fc = ConnectionHostCrawler() + for (k, f, t) in fc.crawl(): + assert f.localipaddr == '1.1.1.1' + assert f.remoteipaddr == '2.2.2.2' + assert f.localport == '22' + assert f.remoteport == '22' + assert args[0].call_count == 1 + + @mock.patch('utils.connection_utils.psutil.process_iter', + side_effect=lambda: [Process('init')]) + @mock.patch( + 'plugins.systems.connection_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch( + ("plugins.systems.connection_container_crawler.utils.dockerutils." + "exec_dockerinspect"), + side_effect=lambda long_id: {'State': {'Pid': 123}}) + def test_crawl_connections_outcontainer_mode(self, *args): + fc = ConnectionContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f.localipaddr == '1.1.1.1' + assert f.remoteipaddr == '2.2.2.2' + assert f.localport == '22' + assert f.remoteport == '22' + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.connection_vm_crawler.psvmi.context_init', + side_effect=lambda dn1, dn2, kv, d, a: 1000) + @mock.patch('plugins.systems.connection_vm_crawler.psvmi.process_iter', + side_effect=lambda vmc: [Process('init')]) + @mock.patch('plugins.systems.connection_vm_crawler.psvmi') + def test_crawl_connections_outvm_mode(self, *args): + fc = ConnectionVmCrawler() + for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + assert f.localipaddr == '1.1.1.1' + assert f.remoteipaddr == '2.2.2.2' + assert f.localport == '22' + assert f.remoteport == '22' + assert args[1].call_count == 1 + + @mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory', + side_effect=lambda: psutils_memory(2, 2, 3, 4)) + def test_crawl_memory_invm_mode(self, *args): + fc = MemoryHostCrawler() + for (k, f, t) in fc.crawl(): + assert f == MemoryFeature( + memory_used=2, + memory_buffered=3, + memory_cached=4, + memory_free=2, + memory_util_percentage=50) + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.memory_host_crawler.psutil.virtual_memory', + side_effect=throw_os_error) + def test_crawl_memory_invm_mode_failure(self, *args): + fc = MemoryHostCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl(): + pass + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.memory_vm_crawler.psvmi.context_init', + side_effect=lambda dn1, dn2, kv, d, a: 1000) + @mock.patch('plugins.systems.memory_vm_crawler.psvmi.system_memory_info', + side_effect=lambda vmc: psvmi_memory(10, 20, 30, 40)) + @mock.patch('plugins.systems.memory_vm_crawler.psvmi') + def test_crawl_memory_outvm_mode(self, *args): + fc = MemoryVmCrawler() + for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + assert f == MemoryFeature( + memory_used=10, + memory_buffered=20, + memory_cached=30, + memory_free=40, + memory_util_percentage=20) + assert args[1].call_count == 1 + + @mock.patch( + 'plugins.systems.memory_container_crawler.psutil.virtual_memory', + side_effect=lambda: psutils_memory( + 10, + 10, + 3, + 10)) + @mock.patch('plugins.systems.memory_container_crawler.open', + side_effect=mocked_memory_cgroup_open) + @mock.patch('plugins.systems.memory_container_crawler.DockerContainer', + side_effect=lambda container_id: DummyContainer(container_id)) + def test_crawl_memory_outcontainer_mode(self, *args): + fc = MemoryContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f == MemoryFeature( + memory_used=2, + memory_buffered=200, + memory_cached=100, + memory_free=0, + memory_util_percentage=100) + assert args[1].call_count == 3 # 3 cgroup files + + @mock.patch( + 'plugins.systems.memory_container_crawler.psutil.virtual_memory', + side_effect=lambda: psutils_memory( + 10, + 10, + 3, + 10)) + @mock.patch('plugins.systems.memory_container_crawler.open', + side_effect=throw_os_error) + @mock.patch('plugins.systems.memory_container_crawler.DockerContainer', + side_effect=lambda container_id: DummyContainer(container_id)) + def test_crawl_memory_outcontainer_mode_failure(self, *args): + fc = MemoryContainerCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl('123'): + pass + assert args[1].call_count == 1 # 1 cgroup files + + @mock.patch( + 'plugins.systems.cpu_host_crawler.psutil.cpu_times_percent', + side_effect=lambda percpu: [ + psutils_cpu( + 10, + 20, + 30, + 40, + 50, + 60, + 70)]) + def test_crawl_cpu_invm_mode(self, *args): + fc = CpuHostCrawler() + for (k, f, t) in fc.crawl(): + assert f == CpuFeature( + cpu_idle=10, + cpu_nice=20, + cpu_user=30, + cpu_wait=40, + cpu_system=50, + cpu_interrupt=60, + cpu_steal=70, + cpu_util=90) + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.cpu_host_crawler.psutil.cpu_times_percent', + side_effect=throw_os_error) + def test_crawl_cpu_invm_mode_failure(self, *args): + fc = CpuHostCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl(): + pass + assert args[0].call_count == 1 + + @mock.patch( + 'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent', + side_effect=lambda percpu: [ + psutils_cpu( + 10, + 20, + 30, + 40, + 50, + 60, + 70)]) + @mock.patch('plugins.systems.cpu_container_crawler.time.sleep') + @mock.patch('plugins.systems.cpu_container_crawler.open', + side_effect=mocked_cpu_cgroup_open) + @mock.patch('plugins.systems.cpu_container_crawler.DockerContainer', + side_effect=lambda container_id: DummyContainer(container_id)) + def test_crawl_cpu_outcontainer_mode(self, *args): + fc = CpuContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f == CpuFeature( + cpu_idle=90.0, + cpu_nice=20, + cpu_user=5.0, + cpu_wait=40, + cpu_system=5.0, + cpu_interrupt=60, + cpu_steal=70, + cpu_util=10.0) + assert args[1].call_count == 3 # open for 3 cgroup files + + @mock.patch( + 'plugins.systems.cpu_container_crawler.psutil.cpu_times_percent', + side_effect=lambda percpu: [ + psutils_cpu( + 10, + 20, + 30, + 40, + 50, + 60, + 70)]) + @mock.patch('plugins.systems.cpu_container_crawler.time.sleep') + @mock.patch('plugins.systems.cpu_container_crawler.open', + side_effect=throw_os_error) + @mock.patch('plugins.systems.cpu_container_crawler.DockerContainer', + side_effect=lambda container_id: DummyContainer(container_id)) + def test_crawl_cpu_outcontainer_mode_failure(self, *args): + fc = CpuContainerCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl('123'): + pass + assert args[0].call_count == 1 + + @mock.patch( + 'plugins.systems.interface_host_crawler.psutil.net_io_counters', + side_effect=lambda pernic: {'interface1-unit-tests': + psutils_net( + 10, + 20, + 30, + 40, + 50, + 60)}) + def test_crawl_interface_invm_mode(self, *args): + fc = InterfaceHostCrawler() + for (k, f, t) in fc.crawl(): + assert f == InterfaceFeature( + if_octets_tx=0, + if_octets_rx=0, + if_packets_tx=0, + if_packets_rx=0, + if_errors_tx=0, + if_errors_rx=0) + + for (k, f, t) in fc.crawl(): + assert f == InterfaceFeature( + if_octets_tx=0, + if_octets_rx=0, + if_packets_tx=0, + if_packets_rx=0, + if_errors_tx=0, + if_errors_rx=0) + assert args[0].call_count == 2 + + @mock.patch( + 'plugins.systems.interface_host_crawler.psutil.net_io_counters', + side_effect=throw_os_error) + def test_crawl_interface_invm_mode_failure(self, *args): + fc = InterfaceHostCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl(): + pass + + # Each crawl in crawlutils.py instantiates a FeaturesCrawler object + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl(): + pass + assert args[0].call_count == 2 + + @mock.patch('plugins.systems.interface_container_crawler.DockerContainer', + side_effect=lambda container_id: DummyContainer(container_id)) + @mock.patch( + 'plugins.systems.interface_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch( + 'plugins.systems.interface_container_crawler.psutil.net_io_counters', + side_effect=lambda pernic: {'eth0': + psutils_net( + 10, + 20, + 30, + 40, + 50, + 60)}) + def test_crawl_interface_outcontainer_mode(self, *args): + fc = InterfaceContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f == InterfaceFeature( + if_octets_tx=0, + if_octets_rx=0, + if_packets_tx=0, + if_packets_rx=0, + if_errors_tx=0, + if_errors_rx=0) + + for (k, f, t) in fc.crawl('123'): + assert f == InterfaceFeature( + if_octets_tx=0, + if_octets_rx=0, + if_packets_tx=0, + if_packets_rx=0, + if_errors_tx=0, + if_errors_rx=0) + assert args[0].call_count == 2 + assert args[1].call_count == 2 + + @mock.patch('plugins.systems.interface_vm_crawler.psvmi.context_init', + side_effect=lambda dn1, dn2, kv, d, a: 1000) + @mock.patch('plugins.systems.interface_vm_crawler.psvmi.interface_iter', + side_effect=lambda vmc: [psvmi_interface( + 'eth1', 10, 20, 30, 40, 50, 60)]) + @mock.patch('plugins.systems.interface_vm_crawler.psvmi') + def test_crawl_interface_outvm_mode(self, *args): + fc = InterfaceVmCrawler() + for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + assert f == InterfaceFeature( + if_octets_tx=0, + if_octets_rx=0, + if_packets_tx=0, + if_packets_rx=0, + if_errors_tx=0, + if_errors_rx=0) + + for (k, f, t) in fc.crawl(vm_desc=('dn', '2.6', 'ubuntu', 'x86')): + assert f == InterfaceFeature( + if_octets_tx=0, + if_octets_rx=0, + if_packets_tx=0, + if_packets_rx=0, + if_errors_tx=0, + if_errors_rx=0) + assert args[1].call_count == 2 + assert args[2].call_count == 2 + + @mock.patch('plugins.systems.load_host_crawler.os.getloadavg', + side_effect=lambda: [1, 2, 3]) + def test_crawl_load_invm_mode(self, *args): + fc = LoadHostCrawler() + for (k, f, t) in fc.crawl(): + assert f == LoadFeature(shortterm=1, midterm=2, longterm=2) + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.load_host_crawler.os.getloadavg', + side_effect=throw_os_error) + def test_crawl_load_invm_mode_failure(self, *args): + fc = LoadHostCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl(): + pass + assert args[0].call_count == 1 + + @mock.patch( + 'plugins.systems.load_container_crawler.run_as_another_namespace', + side_effect=mocked_run_as_another_namespace) + @mock.patch('plugins.systems.load_container_crawler.os.getloadavg', + side_effect=lambda: [1, 2, 3]) + @mock.patch('plugins.systems.load_container_crawler.DockerContainer', + side_effect=lambda container_id: DummyContainer(container_id)) + def test_crawl_load_outcontainer_mode(self, *args): + fc = LoadContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f == LoadFeature(shortterm=1, midterm=2, longterm=2) + assert args[1].call_count == 1 + assert args[2].call_count == 1 + + @mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps', + side_effect=lambda: [{'State': {'Running': True}, + 'Image': 'reg/image:latest', + 'Config': {'Cmd': 'command'}, + 'Name': 'name', + 'Id': 'id'}]) + def test_crawl_dockerps_invm_mode(self, *args): + fc = DockerpsHostCrawler() + for (k, f, t) in fc.crawl(): + assert f == DockerPSFeature( + Status=True, + Created=0, + Image='reg/image:latest', + Ports=[], + Command='command', + Names='name', + Id='id') + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.dockerps_host_crawler.exec_dockerps', + side_effect=throw_os_error) + def test_crawl_dockerps_invm_mode_failure(self, *args): + fc = DockerpsHostCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl(): + pass + assert args[0].call_count == 1 + + @mock.patch('plugins.systems.dockerhistory_container_crawler.exec_docker_history', + side_effect=lambda long_id: [ + {'Id': 'image1', 'random': 'abc'}, + {'Id': 'image2', 'random': 'abc'}]) + def test_crawl_dockerhistory_outcontainer_mode(self, *args): + fc = DockerhistoryContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f == {'history': [{'Id': 'image1', 'random': 'abc'}, + {'Id': 'image2', 'random': 'abc'}]} + assert args[0].call_count == 1 + + @mock.patch( + 'plugins.systems.dockerhistory_container_crawler.exec_docker_history', + side_effect=throw_os_error) + def test_crawl_dockerhistory_outcontainer_mode_failure(self, *args): + fc = DockerhistoryContainerCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl('123'): + pass + assert args[0].call_count == 1 + + @mock.patch( + 'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect', + side_effect=lambda long_id: { + 'Id': 'image1', + 'random': 'abc'}) + def test_crawl_dockerinspect_outcontainer_mode(self, *args): + fc = DockerinspectContainerCrawler() + for (k, f, t) in fc.crawl('123'): + assert f == {'Id': 'image1', 'random': 'abc'} + assert args[0].call_count == 1 + + @mock.patch( + 'plugins.systems.dockerinspect_container_crawler.exec_dockerinspect', + side_effect=throw_os_error) + def test_crawl_dockerinspect_outcontainer_mode_failure(self, *args): + fc = DockerinspectContainerCrawler() + with self.assertRaises(OSError): + for (k, f, t) in fc.crawl('123'): + pass + assert args[0].call_count == 1 diff --git a/tests/unit/test_vms_crawler.py b/tests/unit/test_vms_crawler.py new file mode 100644 index 00000000..096f3a25 --- /dev/null +++ b/tests/unit/test_vms_crawler.py @@ -0,0 +1,126 @@ +import mock +import unittest +from vms_crawler import VirtualMachinesCrawler + + +class MockedOSCrawler: + + def crawl(self, vm_desc, **kwargs): + return [('linux', {'os': 'some_os'}, 'os')] + + +class MockedCPUCrawler: + + def crawl(self, vm_desc, **kwargs): + return [('cpu-0', {'used': 100}, 'cpu')] + + +class MockedOSCrawlerFailure: + + def crawl(self, vm_desc, **kwargs): + print vm_desc + if vm_desc[0] == 'errorpid': + raise OSError('some exception') + else: + return [('linux', {'os': 'some_os'}, 'os')] + + +class MockedQemuVirtualMachine: + + def __init__(self, name='name', pid=777): + self.namespace = name + self.name = name + self.kernel = '2.6' + self.distro = 'ubuntu' + self.arch = 'x86' + self.pid = pid + + def get_metadata_dict(self): + return {'namespace': self.namespace} + + def get_vm_desc(self): + return str(self.pid), self.kernel, self.distro, self.arch + + +class VirtualMachinesCrawlerTests(unittest.TestCase): + + @mock.patch( + 'vms_crawler.plugins_manager.get_vm_crawl_plugins', + side_effect=lambda features: [(MockedOSCrawler(), {}), + (MockedCPUCrawler(), {})]) + @mock.patch('vms_crawler.get_virtual_machines', + side_effect=lambda user_list, host_namespace: [ + MockedQemuVirtualMachine( + name='aaa', + pid=101), + MockedQemuVirtualMachine( + name='bbb', + pid=102), + MockedQemuVirtualMachine( + name='ccc', + pid=103)]) + def test_vms_crawler(self, *args): + crawler = VirtualMachinesCrawler(features=['os'], user_list=['abcd']) + frames = list(crawler.crawl()) + namespaces = sorted([f.metadata['namespace'] for f in frames]) + assert namespaces == sorted(['aaa', 'bbb', 'ccc']) + features_count = sorted([f.num_features for f in frames]) + assert features_count == sorted([2, 2, 2]) + system_types = sorted([f.metadata['system_type'] for f in frames]) + assert system_types == sorted(['vm', 'vm', 'vm']) + assert args[0].call_count == 1 + assert args[1].call_count == 1 + + @mock.patch( + 'vms_crawler.plugins_manager.get_vm_crawl_plugins', + side_effect=lambda features: [(MockedOSCrawlerFailure(), {}), + (MockedCPUCrawler(), {})]) + @mock.patch('vms_crawler.get_virtual_machines', + side_effect=lambda user_list, host_namespace: [ + MockedQemuVirtualMachine( + name='aaa', + pid=101), + MockedQemuVirtualMachine( + name='errorid', + pid='errorpid'), + MockedQemuVirtualMachine( + name='ccc', + pid=103)]) + def test_failed_vms_crawler(self, *args): + crawler = VirtualMachinesCrawler(features=['os']) + with self.assertRaises(OSError): + frames = list(crawler.crawl(ignore_plugin_exception=False)) + assert args[0].call_count == 1 + assert args[1].call_count == 1 + + @mock.patch( + 'vms_crawler.plugins_manager.get_vm_crawl_plugins', + side_effect=lambda features: [(MockedCPUCrawler(), {}), + (MockedOSCrawlerFailure(), {}), + (MockedCPUCrawler(), {})]) + @mock.patch('vms_crawler.get_virtual_machines', + side_effect=lambda user_list, host_namespace: [ + MockedQemuVirtualMachine( + name='aaa', + pid=101), + MockedQemuVirtualMachine( + name='errorid', + pid='errorpid'), + MockedQemuVirtualMachine( + name='ccc', + pid=103)]) + def test_failed_vms_crawler_with_ignore_failure(self, *args): + crawler = VirtualMachinesCrawler(features=['cpu', 'os', 'cpu']) + frames = list(crawler.crawl()) # defaults to ignore_plugin_exception + namespaces = sorted([f.metadata['namespace'] for f in frames]) + assert namespaces == sorted(['aaa', 'errorid', 'ccc']) + features_count = sorted([f.num_features for f in frames]) + assert features_count == sorted([3, 2, 3]) + system_types = [f.metadata['system_type'] for f in frames] + assert system_types == ['vm', 'vm', 'vm'] + assert args[0].call_count == 1 + assert args[1].call_count == 1 + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/vfs_mount_init-id b/tests/unit/vfs_mount_init-id new file mode 100644 index 00000000..08dbf2d4 --- /dev/null +++ b/tests/unit/vfs_mount_init-id @@ -0,0 +1 @@ +vol1/id/rootfs-a-b-c From 0e1d95a2fc8017ffedc31f63cc64b836813a8e84 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 11 Jan 2018 13:03:04 -0500 Subject: [PATCH 40/47] adding pids limit to plugin cont Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 1 + .../crawler/plugins/systems/config_container_crawler.py | 1 + .../crawler/plugins/systems/os_container_crawler.py | 1 + .../crawler/plugins/systems/package_container_crawler.py | 1 + .../crawler/plugins/systems/pythonpackage_container_crawler.py | 1 + .../crawler/plugins/systems/rubypackage_container_crawler.py | 1 + 6 files changed, 6 insertions(+) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 627cde8d..4bec999a 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -208,6 +208,7 @@ def create_plugincont(self, guestcont): user=self.plugincont_username, command="/usr/bin/python2.7 /crawler/crawler_lite.py " "--frequency=" + str(self.frequency), + pids_limit=10, pid_mode='container:' + guestcont_id, network_mode='container:' + guestcont_id, cap_add=["SYS_PTRACE", "DAC_READ_SEARCH"], diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py index ea1697f3..c1622bd4 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py @@ -56,6 +56,7 @@ def crawl( else: # in all other cases, including wrong mode set real_root = os.open('/', os.O_RDONLY) os.chroot('/rootfs_local') + os.chdir('/') config_list = list(crawl_config_files(root_dir, exclude_dirs, None, diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py index 903e0ec7..141ae051 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py @@ -21,6 +21,7 @@ def crawl(self, container_id, avoid_setns=False, **kwargs): else: # in all other cases, including wrong mode set real_root = os.open('/', os.O_RDONLY) os.chroot('/rootfs_local') + os.chdir('/') os_info = crawl_os() os.fchdir(real_root) os.chroot('.') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py index 5562ec47..ba018a18 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py @@ -31,6 +31,7 @@ def crawl(self, container_id=None, avoid_setns=False, print "in package plugin" real_root = os.open('/', os.O_RDONLY) os.chroot('/rootfs_local') + os.chdir('/') pkg_list = list(crawl_packages(None, root_dir, 0, False)) os.fchdir(real_root) os.chroot('.') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py index 32a466e8..8d721195 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py @@ -94,6 +94,7 @@ def _crawl_without_setns(self, container_id): def _crawl_in_system(self): real_root = os.open('/', os.O_RDONLY) os.chroot('/rootfs_local') + os.chdir('/') if self.get_packages_generic is True: mountpoint = '/' diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py index 3984dc8a..ff1b8453 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/rubypackage_container_crawler.py @@ -70,6 +70,7 @@ def _crawl_without_setns(self, container_id): def _crawl_in_system(self): real_root = os.open('/', os.O_RDONLY) os.chroot('/rootfs_local') + os.chdir('/') if self.get_packages_generic is True: mountpoint = '/' From 260d6596dac19d70f7e39788436ecb5faaff633e Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 10:52:46 -0500 Subject: [PATCH 41/47] remoing unused imports Signed-off-by: Sahil Suneja --- .../utils/plugincont/plugincont_img/crawler/crawler_lite.py | 2 ++ .../crawler/plugins/systems/config_container_crawler.py | 2 -- .../crawler/plugins/systems/connection_container_crawler.py | 2 -- .../crawler/plugins/systems/cpu_container_crawler.py | 1 - .../crawler/plugins/systems/disk_container_crawler.py | 2 -- .../crawler/plugins/systems/interface_container_crawler.py | 2 -- .../crawler/plugins/systems/load_container_crawler.py | 2 -- .../crawler/plugins/systems/memory_container_crawler.py | 1 - .../crawler/plugins/systems/metric_container_crawler.py | 2 -- .../crawler/plugins/systems/os_container_crawler.py | 2 -- .../crawler/plugins/systems/package_container_crawler.py | 3 --- .../crawler/plugins/systems/pythonpackage_container_crawler.py | 1 - 12 files changed, 2 insertions(+), 20 deletions(-) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py index 2a9823b7..fe3ce557 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/crawler_lite.py @@ -7,6 +7,8 @@ import shutil import cStringIO import json +import logging +logging.basicConfig() from icrawl_plugin import IContainerCrawler plugins_dir = '/crawler/plugins/systems/' # might eventually become /home/user1/crawler/plugins/... diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py index c1622bd4..0decb84b 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/config_container_crawler.py @@ -1,10 +1,8 @@ import logging import os -import utils.dockerutils import utils.misc from icrawl_plugin import IContainerCrawler from utils.config_utils import crawl_config_files -from utils.namespace import run_as_another_namespace logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_container_crawler.py index f31b0a84..77810e2d 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/connection_container_crawler.py @@ -1,9 +1,7 @@ import logging -import utils.dockerutils from icrawl_plugin import IContainerCrawler from utils.connection_utils import crawl_connections -from utils.namespace import run_as_another_namespace, ALL_NAMESPACES logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py index 9c555b0c..5e7e93b9 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py @@ -4,7 +4,6 @@ import os import psutil -from dockercontainer import DockerContainer from icrawl_plugin import IContainerCrawler from utils.features import CpuFeature diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py index e6d1fa4e..8d75d988 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/disk_container_crawler.py @@ -1,9 +1,7 @@ import logging -import utils.dockerutils from icrawl_plugin import IContainerCrawler from utils.disk_utils import crawl_disk_partitions -from utils.namespace import run_as_another_namespace, ALL_NAMESPACES logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py index a0b42022..6b79b160 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/interface_container_crawler.py @@ -3,10 +3,8 @@ import psutil -from dockercontainer import DockerContainer from icrawl_plugin import IContainerCrawler from utils.features import InterfaceFeature -from utils.namespace import run_as_another_namespace logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py index bcb10111..f641a35a 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/load_container_crawler.py @@ -1,10 +1,8 @@ import logging import os -from dockercontainer import DockerContainer from icrawl_plugin import IContainerCrawler from utils.features import LoadFeature -from utils.namespace import run_as_another_namespace, ALL_NAMESPACES logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py index de59f52c..bda49a88 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py @@ -2,7 +2,6 @@ import os import psutil -from dockercontainer import DockerContainer from icrawl_plugin import IContainerCrawler from utils.features import MemoryFeature diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py index 96495fcf..8a949202 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/metric_container_crawler.py @@ -1,9 +1,7 @@ import logging -import utils.dockerutils from icrawl_plugin import IContainerCrawler from utils.metric_utils import crawl_metrics -from utils.namespace import run_as_another_namespace, ALL_NAMESPACES logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py index 141ae051..cb2d9437 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/os_container_crawler.py @@ -1,8 +1,6 @@ import logging import os -import utils.dockerutils from icrawl_plugin import IContainerCrawler -from utils.namespace import run_as_another_namespace, ALL_NAMESPACES from utils.os_utils import crawl_os, crawl_os_mountpoint logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py index ba018a18..d86a9785 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/package_container_crawler.py @@ -3,10 +3,7 @@ from icrawl_plugin import IContainerCrawler from utils.crawler_exceptions import CrawlError -from utils.dockerutils import (exec_dockerinspect, - get_docker_container_rootfs_path) from utils.misc import join_abs_paths -from utils.namespace import run_as_another_namespace, ALL_NAMESPACES from utils.package_utils import crawl_packages logger = logging.getLogger('crawlutils') diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py index 8d721195..d852c42c 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/pythonpackage_container_crawler.py @@ -3,7 +3,6 @@ import re import subprocess -import utils.dockerutils from icrawl_plugin import IContainerCrawler From 92e53541befbfbfa57285d6cc0d0852cddea83bf Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 11:08:40 -0500 Subject: [PATCH 42/47] more cgroup limits on plugin cont Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 4bec999a..66217e73 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -209,6 +209,9 @@ def create_plugincont(self, guestcont): command="/usr/bin/python2.7 /crawler/crawler_lite.py " "--frequency=" + str(self.frequency), pids_limit=10, + mem_limit='256m', + cpu_period=100000, + cpu_quota=25000, pid_mode='container:' + guestcont_id, network_mode='container:' + guestcont_id, cap_add=["SYS_PTRACE", "DAC_READ_SEARCH"], From 9f6713865c183d2450594da8339dde21a69c9726 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 11:17:46 -0500 Subject: [PATCH 43/47] merge fix for new docker version Signed-off-by: Sahil Suneja --- tests/functional/test_functional_apk_package_crawler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/test_functional_apk_package_crawler.py b/tests/functional/test_functional_apk_package_crawler.py index 8e4bdaaf..72724f34 100644 --- a/tests/functional/test_functional_apk_package_crawler.py +++ b/tests/functional/test_functional_apk_package_crawler.py @@ -31,7 +31,7 @@ def setUp(self): ch.setFormatter(formatter) root.addHandler(ch) - self.docker = docker.Client(base_url='unix://var/run/docker.sock', + self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto') try: if len(self.docker.containers()) != 0: From 164d25cea4e94ca925d3396c42f7c433fe1eb652 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 11:27:01 -0500 Subject: [PATCH 44/47] flake8 Signed-off-by: Sahil Suneja --- tests/functional/test_functional_apk_package_crawler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/test_functional_apk_package_crawler.py b/tests/functional/test_functional_apk_package_crawler.py index 72724f34..0d269ba8 100644 --- a/tests/functional/test_functional_apk_package_crawler.py +++ b/tests/functional/test_functional_apk_package_crawler.py @@ -32,7 +32,7 @@ def setUp(self): root.addHandler(ch) self.docker = docker.APIClient(base_url='unix://var/run/docker.sock', - version='auto') + version='auto') try: if len(self.docker.containers()) != 0: raise Exception( From bf1ff67dfccd9ad8716bfcd9fa1a52a74bef0e9a Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 19:14:49 -0500 Subject: [PATCH 45/47] cgroupfs shared Signed-off-by: Sahil Suneja --- crawler/plugin_containers_manager.py | 14 +++++++++++++- .../plugins/systems/cpu_container_crawler.py | 9 ++++++++- .../plugins/systems/memory_container_crawler.py | 6 ++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/crawler/plugin_containers_manager.py b/crawler/plugin_containers_manager.py index 66217e73..5d48c45c 100644 --- a/crawler/plugin_containers_manager.py +++ b/crawler/plugin_containers_manager.py @@ -27,6 +27,7 @@ def __init__(self, frequency=-1): self.plugincont_image_path = os.getcwd() + \ '/crawler/utils/plugincont/plugincont_img' self.plugincont_guestcont_mountpoint = '/rootfs_local' + self.plugincont_guestcont_sysfs_mountpoint = '/sysfs_local' self.docker_client = docker.DockerClient( base_url='unix://var/run/docker.sock', version='auto') self.docker_APIclient = docker.APIClient( @@ -195,6 +196,11 @@ def create_plugincont(self, guestcont): guestcont_id = guestcont.long_id guestcont_rootfs = utils.dockerutils.get_docker_container_rootfs_path( guestcont_id) + guestcont_sysfs_mem = os.path.join( + self._get_cgroup_dir(['memory']), 'docker', guestcont_id) + guestcont_sysfs_cpu = os.path.join( + self._get_cgroup_dir(['cpuacct', 'cpu,cpuacct']), + 'docker', guestcont_id) plugincont = None plugincont_name = self.plugincont_name_prefix + '_' + guestcont_id seccomp_attr = json.dumps( @@ -219,7 +225,13 @@ def create_plugincont(self, guestcont): volumes={ guestcont_rootfs: { 'bind': self.plugincont_guestcont_mountpoint, - 'mode': 'ro'}}, + 'mode': 'ro'}, + guestcont_sysfs_mem: { + 'bind': self.plugincont_guestcont_sysfs_mountpoint + '' + '/sys/fs/cgroup/memory'}, + guestcont_sysfs_cpu: { + 'bind': self.plugincont_guestcont_sysfs_mountpoint + '' + '/sys/fs/cgroup/cpu,cpuacct'}}, detach=True) time.sleep(5) except Exception as exc: diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py index 5e7e93b9..4ce5b2c8 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/cpu_container_crawler.py @@ -45,6 +45,7 @@ def _get_cgroup_dir(self, devlist=[]): if os.path.ismount(path): return path + for dev in devlist: # Try getting the mount point from /proc/mounts for l in open('/proc/mounts', 'r'): _type, mnt, _, _, _, _ = l.split(' ') @@ -53,7 +54,6 @@ def _get_cgroup_dir(self, devlist=[]): raise ValueError('Can not find the cgroup dir') - def get_cpu_cgroup_path(self, node='cpuacct.usage'): # In kernels 4.x, the node is actually called 'cpu,cpuacct' cgroup_dir = self._get_cgroup_dir(['cpuacct', 'cpu,cpuacct']) @@ -80,6 +80,10 @@ def crawl(self, container_id, avoid_setns=False, per_cpu=False, **kwargs): 100 - int(cpu.idle), ) + real_root = os.open('/', os.O_RDONLY) + os.chroot('/sysfs_local') + os.chdir('/') + if per_cpu: stat_file_name = 'cpuacct.usage_percpu' else: @@ -122,6 +126,9 @@ def crawl(self, container_id, avoid_setns=False, per_cpu=False, **kwargs): cpu_user_system[m.group(1)] = \ float(m.group(2)) + os.fchdir(real_root) + os.chroot('.') + for (index, cpu_usage_ns) in enumerate(cpu_usage_t1): usage_secs = (float(cpu_usage_t2[index]) - float(cpu_usage_ns)) / float(1e9) diff --git a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py index bda49a88..2257feda 100644 --- a/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py +++ b/crawler/utils/plugincont/plugincont_img/crawler/plugins/systems/memory_container_crawler.py @@ -33,6 +33,9 @@ def get_memory_cgroup_path(self, node='memory.stat'): return os.path.join(self._get_cgroup_dir(['memory']), node) def crawl(self, container_id, avoid_setns=False, **kwargs): + real_root = os.open('/', os.O_RDONLY) + os.chroot('/sysfs_local') + os.chdir('/') used = buffered = cached = free = 'unknown' with open(self.get_memory_cgroup_path('memory.stat' @@ -52,6 +55,9 @@ def crawl(self, container_id, avoid_setns=False, **kwargs): 'memory.usage_in_bytes'), 'r') as f: used = int(f.readline().strip()) + os.fchdir(real_root) + os.chroot('.') + host_free = psutil.virtual_memory().free container_total = used + min(host_free, limit - used) free = container_total - used From 4193706a8b09fb4c200a6a6f1c14b6e04cd2bdb2 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 19:20:07 -0500 Subject: [PATCH 46/47] docker py version in requirements Signed-off-by: Sahil Suneja --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 14f4f8c5..4705764d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ netifaces==0.10.4 kafka-python==1.3.1 pykafka==1.1.0 kafka==1.3.3 -docker==2.0.0 +docker==3.0.1 python-dateutil==2.4.2 semantic_version==2.5.0 Yapsy==1.11.223 From 7853404868c1e74329fbe6ee176892a0e7212f40 Mon Sep 17 00:00:00 2001 From: Sahil Suneja Date: Thu, 22 Feb 2018 19:33:09 -0500 Subject: [PATCH 47/47] docker py version reduction for travis Signed-off-by: Sahil Suneja --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4705764d..286b10e6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ netifaces==0.10.4 kafka-python==1.3.1 pykafka==1.1.0 kafka==1.3.3 -docker==3.0.1 +docker==2.7.0 python-dateutil==2.4.2 semantic_version==2.5.0 Yapsy==1.11.223