diff --git a/.gitignore b/.gitignore index ca4c07dd6..7dbc14b41 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ coverage.xml # python files *.egg-info *.pyc +*.pyi # ignore package files *.rpm diff --git a/CHANGELOG.md b/CHANGELOG.md index 425f2ae09..35f1789d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,31 @@ +## 2024-07-04 CORE 9.1.0 + +* Installation + * most install files are now consolidated under /opt/core + * \#830 / \#831 - fixed issues with systemd service support +* Documentation + * added notes about MaxSession configuration for SSH when running distributed + * \#826 - updated tutorial 3 documentation +* core-daemon + * updated Docker nodes to use docker exec by default for commands + * added Podman nodes + * Docker/Podman compose support for nodes to allow greater configuration + * Docker/Podman node images no longer require network tools for orchestration + * EMANE configuration parsing updated to treat regexes as provided + * executables are now symlinks under $prefix/bin to avoid PATH issues + * new flag added to quickly enable debug logging + * code refactoring for handling internal nodes like control nets and ptp + * \#789 - wlan nodes can now configure the subnet for connected nodes + * \#801 - fixed issue running EMANE within Docker/Podman + * \#816 - fixed split link configuration saving to XML + * \#823 - config services are now the default and only services available + * \#842 - gRPC WLAN add node during runtime fixed + * \#875 - fix dataclass creation for python 3.11+ +* core-gui + * fixed copy/paste nodes to include image name + * adds root to xhost by default when running + * \#811 - added convenience shortcut to run arbitrary commands from nodes, like wireshark + ## 2023-08-01 CORE 9.0.3 * Installation diff --git a/Makefile.am b/Makefile.am index 2b5f29e2a..6fadc2048 100644 --- a/Makefile.am +++ b/Makefile.am @@ -94,9 +94,8 @@ fpm -s dir -t rpm -n core \ --vendor "$(PACKAGE_VENDOR)" \ -p core_VERSION_ARCH.rpm \ -v $(PACKAGE_VERSION) \ - --rpm-init package/core-daemon \ - --after-install package/after-install.sh \ - --after-remove package/after-remove.sh \ + --after-install package/after-install-rpm.sh \ + --after-remove package/after-remove-rpm.sh \ -d "ethtool" \ -d "tk" \ -d "procps-ng" \ @@ -108,9 +107,10 @@ fpm -s dir -t rpm -n core \ -d "nftables" \ netns/vnoded=/usr/bin/ \ netns/vcmd=/usr/bin/ \ - package/etc/core.conf=/etc/core/ \ - package/etc/logging.conf=/etc/core/ \ - package/examples=/opt/core/ \ + package/core-daemon.service=/usr/lib/systemd/system/ \ + package/etc/core.conf=/opt/core/etc/ \ + package/etc/logging.conf=/opt/core/etc/ \ + package/share=/opt/core/ \ daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/ endef @@ -125,8 +125,8 @@ fpm -s dir -t deb -n core \ -v $(PACKAGE_VERSION) \ --deb-systemd package/core-daemon.service \ --deb-no-default-config-files \ - --after-install package/after-install.sh \ - --after-remove package/after-remove.sh \ + --after-install package/after-install-deb.sh \ + --after-remove package/after-remove-deb.sh \ -d "ethtool" \ -d "tk" \ -d "libtk-img" \ @@ -139,9 +139,9 @@ fpm -s dir -t deb -n core \ -d "nftables" \ netns/vnoded=/usr/bin/ \ netns/vcmd=/usr/bin/ \ - package/etc/core.conf=/etc/core/ \ - package/etc/logging.conf=/etc/core/ \ - package/examples=/opt/core/ \ + package/etc/core.conf=/opt/core/etc/ \ + package/etc/logging.conf=/opt/core/etc/ \ + package/share=/opt/core/ \ daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/ endef @@ -172,8 +172,6 @@ $(info creating file $1 from $1.in) -e 's,[@]bindir[@],$(bindir),g' \ -e 's,[@]PACKAGE_VERSION[@],$(PACKAGE_VERSION),g' \ -e 's,[@]PACKAGE_DATE[@],$(PACKAGE_DATE),g' \ - -e 's,[@]CORE_LIB_DIR[@],$(CORE_LIB_DIR),g' \ - -e 's,[@]CORE_STATE_DIR[@],$(CORE_STATE_DIR),g' \ -e 's,[@]CORE_DATA_DIR[@],$(CORE_DATA_DIR),g' \ -e 's,[@]CORE_CONF_DIR[@],$(CORE_CONF_DIR),g' \ < $1.in > $1 diff --git a/README.md b/README.md index b0aa133f7..7df0fa76e 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,46 @@ # CORE + CORE: Common Open Research Emulator -Copyright (c)2005-2022 the Boeing Company. +Copyright (c)2005-2023 the Boeing Company. See the LICENSE file included in this distribution. ## About + The Common Open Research Emulator (CORE) is a tool for emulating networks on one or more machines. You can connect these emulated networks to live networks. CORE consists of a GUI for drawing topologies of lightweight virtual machines, and Python modules for scripting network emulation. +## Documentation & Support + +We are leveraging GitHub hosted documentation and Discord for persistent +chat rooms. This allows for more dynamic conversations and the +capability to respond faster. Feel free to join us at the link below. + +* [Documentation](https://coreemu.github.io/core/) +* [Discord Channel](https://discord.gg/AKd7kmP) + ## Quick Start + Requires Python 3.9+. More detailed instructions and install options can be found [here](https://coreemu.github.io/core/install.html). ### Package Install + Grab the latest deb/rpm from [releases](https://github.com/coreemu/core/releases). This will install vnoded/vcmd, system dependencies, and CORE within a python virtual environment at `/opt/core/venv`. + ```shell sudo install -y ./ ``` Then install OSPF MDR from source: + ```shell git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git cd ospf-mdr @@ -38,6 +53,7 @@ sudo make install ``` ### Script Install + The following should get you up and running on Ubuntu 22.04. This would install CORE into a python3 virtual environment and install [OSPF MDR](https://github.com/USNavalResearchLaboratory/ospf-mdr) from source. @@ -54,11 +70,3 @@ inv install # CentOS inv install -p /usr ``` - -## Documentation & Support -We are leveraging GitHub hosted documentation and Discord for persistent -chat rooms. This allows for more dynamic conversations and the -capability to respond faster. Feel free to join us at the link below. - -* [Documentation](https://coreemu.github.io/core/) -* [Discord Channel](https://discord.gg/AKd7kmP) diff --git a/configure.ac b/configure.ac index 4e56507a8..9d8b3eea2 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. # this defines the CORE version number, must be static for AC_INIT -AC_INIT(core, 9.0.3) +AC_INIT(core, 9.1.0) # autoconf and automake initialization AC_CONFIG_SRCDIR([netns/version.h.in]) @@ -17,18 +17,14 @@ PACKAGE_VENDOR="CORE Developers" PACKAGE_MAINTAINERS="$PACKAGE_VENDOR" # core specific variables -CORE_LIB_DIR="\${prefix}/lib/core" -CORE_CONF_DIR="/etc/core" -CORE_DATA_DIR="\${datadir}/core" -CORE_STATE_DIR="/var" +CORE_CONF_DIR="/opt/core/etc" +CORE_DATA_DIR="/opt/core/share" AC_SUBST(PACKAGE_DATE) AC_SUBST(PACKAGE_MAINTAINERS) AC_SUBST(PACKAGE_VENDOR) -AC_SUBST(CORE_LIB_DIR) AC_SUBST(CORE_CONF_DIR) AC_SUBST(CORE_DATA_DIR) -AC_SUBST(CORE_STATE_DIR) # documentation option AC_ARG_ENABLE([docs], diff --git a/daemon/core/api/grpc/client.py b/daemon/core/api/grpc/client.py index 2a5a1d446..2d8adae48 100644 --- a/daemon/core/api/grpc/client.py +++ b/daemon/core/api/grpc/client.py @@ -13,11 +13,6 @@ import grpc from core.api.grpc import core_pb2, core_pb2_grpc, emane_pb2, wrappers -from core.api.grpc.configservices_pb2 import ( - GetConfigServiceDefaultsRequest, - GetConfigServiceRenderedRequest, - GetNodeConfigServiceRequest, -) from core.api.grpc.core_pb2 import ( ExecuteScriptRequest, GetConfigRequest, @@ -39,12 +34,11 @@ SetMobilityConfigRequest, ) from core.api.grpc.services_pb2 import ( - GetNodeServiceFileRequest, + CreateServiceRequest, GetNodeServiceRequest, GetServiceDefaultsRequest, + GetServiceRenderedRequest, ServiceActionRequest, - ServiceDefaults, - SetServiceDefaultsRequest, ) from core.api.grpc.wlan_pb2 import ( GetWlanConfigRequest, @@ -118,6 +112,24 @@ def iter(self): return iter(self.next, None) +class EmaneEventsStreamer: + def __init__(self) -> None: + self.queue: Queue = Queue() + + def send(self, request: Optional[wrappers.EmaneEventsRequest]) -> None: + self.queue.put(request) + + def next(self) -> Optional[emane_pb2.EmaneEventsRequest]: + request: Optional[wrappers.EmaneEventsRequest] = self.queue.get() + if request: + return request.to_proto() + else: + return request + + def iter(self): + return iter(self.next, None) + + class InterfaceHelper: """ Convenience class to help generate IP4 and IP6 addresses for gRPC clients. @@ -326,7 +338,7 @@ def get_session(self, session_id: int) -> wrappers.Session: def alert( self, session_id: int, - level: wrappers.ExceptionLevel, + level: wrappers.AlertLevel, source: str, text: str, node_id: int = None, @@ -707,79 +719,6 @@ def get_config(self) -> wrappers.CoreConfig: response = self.stub.GetConfig(request) return wrappers.CoreConfig.from_proto(response) - def get_service_defaults(self, session_id: int) -> list[wrappers.ServiceDefault]: - """ - Get default services for different default node models. - - :param session_id: session id - :return: list of service defaults - :raises grpc.RpcError: when session doesn't exist - """ - request = GetServiceDefaultsRequest(session_id=session_id) - response = self.stub.GetServiceDefaults(request) - defaults = [] - for default_proto in response.defaults: - default = wrappers.ServiceDefault.from_proto(default_proto) - defaults.append(default) - return defaults - - def set_service_defaults( - self, session_id: int, service_defaults: dict[str, list[str]] - ) -> bool: - """ - Set default services for node models. - - :param session_id: session id - :param service_defaults: node models to lists of services - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - defaults = [] - for model in service_defaults: - services = service_defaults[model] - default = ServiceDefaults(model=model, services=services) - defaults.append(default) - request = SetServiceDefaultsRequest(session_id=session_id, defaults=defaults) - response = self.stub.SetServiceDefaults(request) - return response.result - - def get_node_service( - self, session_id: int, node_id: int, service: str - ) -> wrappers.NodeServiceData: - """ - Get service data for a node. - - :param session_id: session id - :param node_id: node id - :param service: service name - :return: node service data - :raises grpc.RpcError: when session or node doesn't exist - """ - request = GetNodeServiceRequest( - session_id=session_id, node_id=node_id, service=service - ) - response = self.stub.GetNodeService(request) - return wrappers.NodeServiceData.from_proto(response.service) - - def get_node_service_file( - self, session_id: int, node_id: int, service: str, file_name: str - ) -> str: - """ - Get a service file for a node. - - :param session_id: session id - :param node_id: node id - :param service: service name - :param file_name: file name to get data for - :return: file data - :raises grpc.RpcError: when session or node doesn't exist - """ - request = GetNodeServiceFileRequest( - session_id=session_id, node_id=node_id, service=service, file=file_name - ) - response = self.stub.GetNodeServiceFile(request) - return response.data - def service_action( self, session_id: int, @@ -804,30 +743,6 @@ def service_action( response = self.stub.ServiceAction(request) return response.result - def config_service_action( - self, - session_id: int, - node_id: int, - service: str, - action: wrappers.ServiceAction, - ) -> bool: - """ - Send an action to a config service for a node. - - :param session_id: session id - :param node_id: node id - :param service: config service name - :param action: action for service (start, stop, restart, - validate) - :return: True for success, False otherwise - :raises grpc.RpcError: when session or node doesn't exist - """ - request = ServiceActionRequest( - session_id=session_id, node_id=node_id, service=service, action=action.value - ) - response = self.stub.ConfigServiceAction(request) - return response.result - def get_wlan_config( self, session_id: int, node_id: int ) -> dict[str, wrappers.ConfigOption]: @@ -907,7 +822,7 @@ def save_xml(self, session_id: int, file_path: str) -> None: """ request = core_pb2.SaveXmlRequest(session_id=session_id) response = self.stub.SaveXml(request) - with open(file_path, "w") as xml_file: + with open(file_path, "wb") as xml_file: xml_file.write(response.data) def open_xml(self, file_path: Path, start: bool = False) -> tuple[bool, int]: @@ -952,28 +867,28 @@ def get_ifaces(self) -> list[str]: response = self.stub.GetInterfaces(request) return list(response.ifaces) - def get_config_service_defaults( + def get_service_defaults( self, session_id: int, node_id: int, name: str - ) -> wrappers.ConfigServiceDefaults: + ) -> wrappers.ServiceDefaults: """ - Retrieves config service default values. + Retrieves service default values. :param session_id: session id to get node from :param node_id: node id to get service data from :param name: name of service to get defaults for - :return: config service defaults + :return: service defaults """ - request = GetConfigServiceDefaultsRequest( + request = GetServiceDefaultsRequest( name=name, session_id=session_id, node_id=node_id ) - response = self.stub.GetConfigServiceDefaults(request) - return wrappers.ConfigServiceDefaults.from_proto(response) + response = self.stub.GetServiceDefaults(request) + return wrappers.ServiceDefaults.from_proto(response) - def get_node_config_service( + def get_node_service( self, session_id: int, node_id: int, name: str ) -> dict[str, str]: """ - Retrieves information for a specific config service on a node. + Retrieves information for a specific service on a node. :param session_id: session node belongs to :param node_id: id of node to get service information from @@ -981,27 +896,27 @@ def get_node_config_service( :return: config dict of names to values :raises grpc.RpcError: when session or node doesn't exist """ - request = GetNodeConfigServiceRequest( + request = GetNodeServiceRequest( session_id=session_id, node_id=node_id, name=name ) - response = self.stub.GetNodeConfigService(request) + response = self.stub.GetNodeService(request) return dict(response.config) - def get_config_service_rendered( + def get_service_rendered( self, session_id: int, node_id: int, name: str ) -> dict[str, str]: """ - Retrieve the rendered config service files for a node. + Retrieve the rendered service files for a node. :param session_id: id of session :param node_id: id of node :param name: name of service :return: dict mapping names of files to rendered data """ - request = GetConfigServiceRenderedRequest( + request = GetServiceRenderedRequest( session_id=session_id, node_id=node_id, name=name ) - response = self.stub.GetConfigServiceRendered(request) + response = self.stub.GetServiceRendered(request) return dict(response.rendered) def get_emane_event_channel( @@ -1066,6 +981,16 @@ def emane_pathlosses(self, streamer: EmanePathlossesStreamer) -> None: """ self.stub.EmanePathlosses(streamer.iter()) + def emane_events(self, streamer: EmaneEventsStreamer) -> None: + """ + Stream EMANE events. + + :param streamer: emane events streamer + :return: nothing + :raises grpc.RpcError: when an event session, node, iface, or nem does not exist + """ + self.stub.EmaneEvents(streamer.iter()) + def linked( self, session_id: int, @@ -1141,6 +1066,20 @@ def get_wireless_config( response = self.stub.GetWirelessConfig(request) return wrappers.ConfigOption.from_dict(response.config) + def create_service( + self, + service: wrappers.Service, + templates: dict[str, str], + recreate: bool = False, + ) -> bool: + request = CreateServiceRequest( + service=service.to_proto(), + templates=templates, + recreate=recreate, + ) + response = self.stub.CreateService(request) + return response.result + def connect(self) -> None: """ Open connection to server, must be closed manually. diff --git a/daemon/core/api/grpc/events.py b/daemon/core/api/grpc/events.py index 65a202960..9029fe988 100644 --- a/daemon/core/api/grpc/events.py +++ b/daemon/core/api/grpc/events.py @@ -5,14 +5,7 @@ from core.api.grpc import core_pb2, grpcutils from core.api.grpc.grpcutils import convert_link_data -from core.emulator.data import ( - ConfigData, - EventData, - ExceptionData, - FileData, - LinkData, - NodeData, -) +from core.emulator.data import AlertData, EventData, LinkData, NodeData from core.emulator.session import Session logger = logging.getLogger(__name__) @@ -68,68 +61,22 @@ def handle_session_event(event_data: EventData) -> core_pb2.Event: return core_pb2.Event(session_event=session_event) -def handle_config_event(config_data: ConfigData) -> core_pb2.Event: +def handle_alert_event(alert_data: AlertData) -> core_pb2.Event: """ - Handle configuration event when there is configuration event + Handle alert data, when there is an alert event. - :param config_data: configuration data - :return: configuration event + :param alert_data: alert data + :return: alert event """ - config_event = core_pb2.ConfigEvent( - message_type=config_data.message_type, - node_id=config_data.node, - object=config_data.object, - type=config_data.type, - captions=config_data.captions, - bitmap=config_data.bitmap, - data_values=config_data.data_values, - possible_values=config_data.possible_values, - groups=config_data.groups, - iface_id=config_data.iface_id, - network_id=config_data.network_id, - opaque=config_data.opaque, - data_types=config_data.data_types, + alert_event = core_pb2.AlertEvent( + node_id=alert_data.node, + level=alert_data.level.value, + source=alert_data.source, + date=alert_data.date, + text=alert_data.text, + opaque=alert_data.opaque, ) - return core_pb2.Event(config_event=config_event) - - -def handle_exception_event(exception_data: ExceptionData) -> core_pb2.Event: - """ - Handle exception event when there is exception event - - :param exception_data: exception data - :return: exception event - """ - exception_event = core_pb2.ExceptionEvent( - node_id=exception_data.node, - level=exception_data.level.value, - source=exception_data.source, - date=exception_data.date, - text=exception_data.text, - opaque=exception_data.opaque, - ) - return core_pb2.Event(exception_event=exception_event) - - -def handle_file_event(file_data: FileData) -> core_pb2.Event: - """ - Handle file event - - :param file_data: file data - :return: file event - """ - file_event = core_pb2.FileEvent( - message_type=file_data.message_type.value, - node_id=file_data.node, - name=file_data.name, - mode=file_data.mode, - number=file_data.number, - type=file_data.type, - source=file_data.source, - data=file_data.data, - compressed_data=file_data.compressed_data, - ) - return core_pb2.Event(file_event=file_event) + return core_pb2.Event(alert_event=alert_event) class EventStreamer: @@ -158,17 +105,13 @@ def add_handlers(self) -> None: :return: nothing """ if core_pb2.EventType.NODE in self.event_types: - self.session.node_handlers.append(self.queue.put) + self.session.broadcast_manager.add_handler(NodeData, self.queue.put) if core_pb2.EventType.LINK in self.event_types: - self.session.link_handlers.append(self.queue.put) - if core_pb2.EventType.CONFIG in self.event_types: - self.session.config_handlers.append(self.queue.put) - if core_pb2.EventType.FILE in self.event_types: - self.session.file_handlers.append(self.queue.put) + self.session.broadcast_manager.add_handler(LinkData, self.queue.put) if core_pb2.EventType.EXCEPTION in self.event_types: - self.session.exception_handlers.append(self.queue.put) + self.session.broadcast_manager.add_handler(AlertData, self.queue.put) if core_pb2.EventType.SESSION in self.event_types: - self.session.event_handlers.append(self.queue.put) + self.session.broadcast_manager.add_handler(EventData, self.queue.put) def process(self) -> Optional[core_pb2.Event]: """ @@ -185,12 +128,8 @@ def process(self) -> Optional[core_pb2.Event]: event = handle_link_event(data) elif isinstance(data, EventData): event = handle_session_event(data) - elif isinstance(data, ConfigData): - event = handle_config_event(data) - elif isinstance(data, ExceptionData): - event = handle_exception_event(data) - elif isinstance(data, FileData): - event = handle_file_event(data) + elif isinstance(data, AlertData): + event = handle_alert_event(data) else: logger.error("unknown event: %s", data) except Empty: @@ -206,14 +145,10 @@ def remove_handlers(self) -> None: :return: nothing """ if core_pb2.EventType.NODE in self.event_types: - self.session.node_handlers.remove(self.queue.put) + self.session.broadcast_manager.remove_handler(NodeData, self.queue.put) if core_pb2.EventType.LINK in self.event_types: - self.session.link_handlers.remove(self.queue.put) - if core_pb2.EventType.CONFIG in self.event_types: - self.session.config_handlers.remove(self.queue.put) - if core_pb2.EventType.FILE in self.event_types: - self.session.file_handlers.remove(self.queue.put) + self.session.broadcast_manager.remove_handler(LinkData, self.queue.put) if core_pb2.EventType.EXCEPTION in self.event_types: - self.session.exception_handlers.remove(self.queue.put) + self.session.broadcast_manager.remove_handler(AlertData, self.queue.put) if core_pb2.EventType.SESSION in self.event_types: - self.session.event_handlers.remove(self.queue.put) + self.session.broadcast_manager.remove_handler(EventData, self.queue.put) diff --git a/daemon/core/api/grpc/grpcutils.py b/daemon/core/api/grpc/grpcutils.py index f89144e4b..14eb2fe96 100644 --- a/daemon/core/api/grpc/grpcutils.py +++ b/daemon/core/api/grpc/grpcutils.py @@ -4,18 +4,13 @@ from typing import Any, Optional, Union import grpc +from google.protobuf.internal.containers import RepeatedCompositeFieldContainer +from google.protobuf.message import Message from grpc import ServicerContext from core import utils -from core.api.grpc import common_pb2, core_pb2, wrappers -from core.api.grpc.configservices_pb2 import ConfigServiceConfig +from core.api.grpc import common_pb2, core_pb2, services_pb2, wrappers from core.api.grpc.emane_pb2 import NodeEmaneConfig -from core.api.grpc.services_pb2 import ( - NodeServiceConfig, - NodeServiceData, - ServiceConfig, - ServiceDefaults, -) from core.config import ConfigurableOptions from core.emane.nodes import EmaneNet, EmaneOptions from core.emulator.data import InterfaceData, LinkData, LinkOptions @@ -34,11 +29,9 @@ ) from core.nodes.docker import DockerNode, DockerOptions from core.nodes.interface import CoreInterface -from core.nodes.lxd import LxcNode, LxcOptions -from core.nodes.network import CoreNetwork, CtrlNet, PtpNet, WlanNode +from core.nodes.network import CoreNetwork, WlanNode from core.nodes.podman import PodmanNode, PodmanOptions from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService logger = logging.getLogger(__name__) WORKERS = 10 @@ -79,11 +72,12 @@ def add_node_data( if isinstance(options, CoreNodeOptions): options.model = node_proto.model options.services = node_proto.services - options.config_services = node_proto.config_services if isinstance(options, EmaneOptions): options.emane_model = node_proto.emane - if isinstance(options, (DockerOptions, LxcOptions, PodmanOptions)): + if isinstance(options, (DockerOptions, PodmanOptions)): options.image = node_proto.image + options.compose = node_proto.compose + options.compose_name = node_proto.compose_name position = Position() position.set(node_proto.position.x, node_proto.position.y) if node_proto.HasField("geo"): @@ -146,7 +140,7 @@ def add_link_data( def create_nodes( - session: Session, node_protos: list[core_pb2.Node] + session: Session, node_protos: RepeatedCompositeFieldContainer[core_pb2.Node] ) -> tuple[list[NodeBase], list[Exception]]: """ Create nodes using a thread pool and wait for completion. @@ -272,6 +266,7 @@ def get_config_options( value=value, type=configuration.type.value, select=configuration.options, + regex=configuration.regex, ) results[configuration.id] = config_option for config_group in configurable_options.config_groups(): @@ -301,12 +296,11 @@ def get_node_proto( geo = core_pb2.Geo( lat=node.position.lat, lon=node.position.lon, alt=node.position.alt ) - services = [x.name for x in node.services] node_dir = None - config_services = [] + services = [] if isinstance(node, CoreNodeBase): node_dir = str(node.directory) - config_services = [x for x in node.config_services] + services = [x for x in node.services] channel = None if isinstance(node, CoreNode): channel = str(node.ctrlchnlname) @@ -314,8 +308,12 @@ def get_node_proto( if isinstance(node, EmaneNet): emane_model = node.wireless_model.name image = None - if isinstance(node, (DockerNode, LxcNode, PodmanNode)): + compose = None + compose_name = None + if isinstance(node, (DockerNode, PodmanNode)): image = node.image + compose = node.compose + compose_name = node.compose_name # check for wlan config wlan_config = session.mobility.get_configs( node.id, config_type=BasicRangeModel.name @@ -344,26 +342,12 @@ def get_node_proto( if mobility_config: mobility_config = get_config_options(mobility_config, Ns2ScriptedMobility) # check for service configs - custom_services = session.services.custom_services.get(node.id) service_configs = {} - if custom_services: - for service in custom_services.values(): - service_proto = get_service_configuration(service) - service_configs[service.name] = NodeServiceConfig( - node_id=node.id, - service=service.name, - data=service_proto, - files=service.config_data, - ) - # check for config service configs - config_service_configs = {} if isinstance(node, CoreNode): - for service in node.config_services.values(): + for service in node.services.values(): if not service.custom_templates and not service.custom_config: continue - config_service_configs[service.name] = ConfigServiceConfig( - node_id=node.id, - name=service.name, + service_configs[service.name] = services_pb2.ServiceConfig( templates=service.custom_templates, config=service.custom_config, ) @@ -375,10 +359,11 @@ def get_node_proto( type=node_type.value, position=position, geo=geo, - services=services, icon=node.icon, image=image, - config_services=config_services, + compose=compose, + compose_name=compose_name, + services=services, dir=node_dir, channel=channel, canvas=node.canvas, @@ -386,7 +371,6 @@ def get_node_proto( wireless_config=wireless_config, mobility_config=mobility_config, service_configs=service_configs, - config_service_configs=config_service_configs, emane_configs=emane_configs, ) @@ -624,49 +608,6 @@ def session_location(session: Session, location: core_pb2.SessionLocation) -> No session.location.refscale = location.scale -def service_configuration(session: Session, config: ServiceConfig) -> None: - """ - Convenience method for setting a node service configuration. - - :param session: session for service configuration - :param config: service configuration - :return: - """ - session.services.set_service(config.node_id, config.service) - service = session.services.get_service(config.node_id, config.service) - if config.files: - service.configs = tuple(config.files) - if config.directories: - service.dirs = tuple(config.directories) - if config.startup: - service.startup = tuple(config.startup) - if config.validate: - service.validate = tuple(config.validate) - if config.shutdown: - service.shutdown = tuple(config.shutdown) - - -def get_service_configuration(service: CoreService) -> NodeServiceData: - """ - Convenience for converting a service to service data proto. - - :param service: service to get proto data for - :return: service proto data - """ - return NodeServiceData( - executables=service.executables, - dependencies=service.dependencies, - dirs=service.dirs, - configs=service.configs, - startup=service.startup, - validate=service.validate, - validation_mode=service.validation_mode.value, - validation_timer=service.validation_timer, - shutdown=service.shutdown, - meta=service.meta, - ) - - def iface_to_proto(session: Session, iface: CoreInterface) -> core_pb2.Interface: """ Convenience for converting a core interface to the protobuf representation. @@ -760,28 +701,13 @@ def get_hooks(session: Session) -> list[core_pb2.Hook]: :return: list of hook protobufs """ hooks = [] - for state in session.hooks: - state_hooks = session.hooks[state] - for file_name, file_data in state_hooks: + for state, state_hooks in session.hook_manager.script_hooks.items(): + for file_name, file_data in state_hooks.items(): hook = core_pb2.Hook(state=state.value, file=file_name, data=file_data) hooks.append(hook) return hooks -def get_default_services(session: Session) -> list[ServiceDefaults]: - """ - Retrieve the default service sets for a given session. - - :param session: session to get default service sets for - :return: list of default service sets - """ - default_services = [] - for model, services in session.services.default_services.items(): - default_service = ServiceDefaults(model=model, services=services) - default_services.append(default_service) - return default_services - - def get_mobility_node( session: Session, node_id: int, context: ServicerContext ) -> Union[WlanNode, EmaneNet]: @@ -814,16 +740,14 @@ def convert_session(session: Session) -> wrappers.Session: links = [] for _id in session.nodes: node = session.nodes[_id] - if not isinstance(node, (PtpNet, CtrlNet)): - node_emane_configs = emane_configs.get(node.id, []) - node_proto = get_node_proto(session, node, node_emane_configs) - nodes.append(node_proto) + node_emane_configs = emane_configs.get(node.id, []) + node_proto = get_node_proto(session, node, node_emane_configs) + nodes.append(node_proto) if isinstance(node, (WlanNode, EmaneNet)): for link_data in node.links(): links.append(convert_link_data(link_data)) for core_link in session.link_manager.links(): links.extend(convert_core_link(core_link)) - default_services = get_default_services(session) x, y, z = session.location.refxyz lat, lon, alt = session.location.refgeo location = core_pb2.SessionLocation( @@ -836,6 +760,10 @@ def convert_session(session: Session) -> wrappers.Session: core_pb2.Server(name=x.name, host=x.host) for x in session.distributed.servers.values() ] + default_services = [] + for group, services in session.service_manager.defaults.items(): + defaults = services_pb2.ServiceDefaults(model=group, services=services) + default_services.append(defaults) return core_pb2.Session( id=session.id, state=session.state.value, @@ -878,31 +806,19 @@ def configure_node( if isinstance(core_node, WirelessNode) and node.wireless_config: config = {k: v.value for k, v in node.wireless_config.items()} core_node.set_config(config) - for service_name, service_config in node.service_configs.items(): - data = service_config.data - config = ServiceConfig( - node_id=node.id, - service=service_name, - startup=data.startup, - validate=data.validate, - shutdown=data.shutdown, - files=data.configs, - directories=data.dirs, - ) - service_configuration(session, config) - for file_name, file_data in service_config.files.items(): - session.services.set_service_file( - node.id, service_name, file_name, file_data - ) - if node.config_service_configs: + if node.service_configs: if not isinstance(core_node, CoreNode): context.abort( grpc.StatusCode.INVALID_ARGUMENT, - "invalid node type with config service configs", + "invalid node type with service configs", ) - for service_name, service_config in node.config_service_configs.items(): - service = core_node.config_services[service_name] + for service_name, service_config in node.service_configs.items(): + service = core_node.services[service_name] if service_config.config: - service.set_config(service_config.config) + service.set_config(dict(service_config.config)) for name, template in service_config.templates.items(): service.set_template(name, template) + + +def get_optional(message: Message, name: str) -> Optional[Any]: + return getattr(message, name) if message.HasField(name) else None diff --git a/daemon/core/api/grpc/server.py b/daemon/core/api/grpc/server.py index 6a86ab0ae..60f183c89 100644 --- a/daemon/core/api/grpc/server.py +++ b/daemon/core/api/grpc/server.py @@ -15,22 +15,7 @@ from grpc import ServicerContext from core import utils -from core.api.grpc import ( - common_pb2, - configservices_pb2, - core_pb2, - core_pb2_grpc, - grpcutils, -) -from core.api.grpc.configservices_pb2 import ( - ConfigService, - GetConfigServiceDefaultsRequest, - GetConfigServiceDefaultsResponse, - GetConfigServiceRenderedRequest, - GetConfigServiceRenderedResponse, - GetNodeConfigServiceRequest, - GetNodeConfigServiceResponse, -) +from core.api.grpc import common_pb2, core_pb2, core_pb2_grpc, grpcutils, services_pb2 from core.api.grpc.core_pb2 import ( ExecuteScriptResponse, GetWirelessConfigRequest, @@ -43,6 +28,8 @@ WirelessLinkedResponse, ) from core.api.grpc.emane_pb2 import ( + EmaneEventsRequest, + EmaneEventsResponse, EmaneLinkRequest, EmaneLinkResponse, EmanePathlossesRequest, @@ -66,18 +53,17 @@ SetMobilityConfigResponse, ) from core.api.grpc.services_pb2 import ( - GetNodeServiceFileRequest, - GetNodeServiceFileResponse, + CreateServiceRequest, + CreateServiceResponse, GetNodeServiceRequest, GetNodeServiceResponse, GetServiceDefaultsRequest, GetServiceDefaultsResponse, - Service, + GetServiceRenderedRequest, + GetServiceRenderedResponse, ServiceAction, ServiceActionRequest, ServiceActionResponse, - SetServiceDefaultsRequest, - SetServiceDefaultsResponse, ) from core.api.grpc.wlan_pb2 import ( GetWlanConfigRequest, @@ -87,26 +73,25 @@ WlanLinkRequest, WlanLinkResponse, ) -from core.configservice.base import ConfigServiceBootError from core.emane.modelmanager import EmaneModelManager from core.emulator.coreemu import CoreEmu from core.emulator.data import InterfaceData, LinkData, LinkOptions -from core.emulator.enumerations import ( - EventTypes, - ExceptionLevels, - MessageFlags, - NodeTypes, -) +from core.emulator.enumerations import AlertLevels, EventTypes, MessageFlags, NodeTypes from core.emulator.session import NT, Session from core.errors import CoreCommandError, CoreError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility from core.nodes.base import CoreNode, NodeBase from core.nodes.network import CoreNetwork, WlanNode from core.nodes.wireless import WirelessNode -from core.services.coreservices import ServiceManager +from core.services.base import ( + CoreService, + CustomCoreService, + ServiceBootError, + ServiceMode, +) +from core.xml.corexml import CoreXmlWriter logger = logging.getLogger(__name__) -_ONE_DAY_IN_SECONDS: int = 60 * 60 * 24 _INTERFACE_REGEX: Pattern[str] = re.compile(r"beth(?P[0-9a-fA-F]+)") _MAX_WORKERS = 1000 @@ -150,10 +135,8 @@ def listen(self, address: str) -> None: core_pb2_grpc.add_CoreApiServicer_to_server(self, self.server) self.server.add_insecure_port(address) self.server.start() - try: - while True: - time.sleep(_ONE_DAY_IN_SECONDS) + self.server.wait_for_termination() except KeyboardInterrupt: self.server.stop(None) @@ -212,7 +195,7 @@ def move_node( def validate_service( self, name: str, context: ServicerContext - ) -> type[ConfigService]: + ) -> type[CoreService]: """ Validates a configuration service is a valid known service. @@ -230,13 +213,8 @@ def GetConfig( self, request: core_pb2.GetConfigRequest, context: ServicerContext ) -> core_pb2.GetConfigResponse: services = [] - for name in ServiceManager.services: - service = ServiceManager.services[name] - service_proto = Service(group=service.group, name=service.name) - services.append(service_proto) - config_services = [] for service in self.coreemu.service_manager.services.values(): - service_proto = ConfigService( + service_proto = services_pb2.Service( name=service.name, group=service.group, executables=service.executables, @@ -250,11 +228,10 @@ def GetConfig( validation_timer=service.validation_timer, validation_period=service.validation_period, ) - config_services.append(service_proto) + services.append(service_proto) emane_models = [x.name for x in EmaneModelManager.models.values()] return core_pb2.GetConfigResponse( services=services, - config_services=config_services, emane_models=emane_models, ) @@ -287,6 +264,7 @@ def StartSession( if option.value: session.options.set(option.name, option.value) session.metadata = dict(request.session.metadata) + session.parse_options() # add servers for server in request.session.servers: @@ -457,9 +435,9 @@ def SessionAlert( self, request: core_pb2.SessionAlertRequest, context: ServicerContext ) -> core_pb2.SessionAlertResponse: session = self.get_session(request.session_id, context) - level = ExceptionLevels(request.level) + level = AlertLevels(request.level) node_id = request.node_id if request.node_id else None - session.exception(level, request.source, request.text, node_id) + session.broadcast_alert(level, request.source, request.text, node_id) return core_pb2.SessionAlertResponse(result=True) def Events(self, request: core_pb2.EventsRequest, context: ServicerContext) -> None: @@ -885,7 +863,9 @@ def SetMobilityConfig( session = self.get_session(request.session_id, context) mobility_config = request.mobility_config session.mobility.set_model_config( - mobility_config.node_id, Ns2ScriptedMobility.name, mobility_config.config + mobility_config.node_id, + Ns2ScriptedMobility.name, + dict(mobility_config.config), ) return SetMobilityConfigResponse(result=True) @@ -918,124 +898,11 @@ def MobilityAction( result = False return MobilityActionResponse(result=result) - def GetServiceDefaults( - self, request: GetServiceDefaultsRequest, context: ServicerContext - ) -> GetServiceDefaultsResponse: - """ - Retrieve all the default services of all node types in a session - - :param request: get-default-service request - :param context: context object - :return: get-service-defaults response about all the available default services - """ - logger.debug("get service defaults: %s", request) - session = self.get_session(request.session_id, context) - defaults = grpcutils.get_default_services(session) - return GetServiceDefaultsResponse(defaults=defaults) - - def SetServiceDefaults( - self, request: SetServiceDefaultsRequest, context: ServicerContext - ) -> SetServiceDefaultsResponse: - """ - Set new default services to the session after whipping out the old ones - - :param request: set-service-defaults request - :param context: context object - :return: set-service-defaults response - """ - logger.debug("set service defaults: %s", request) - session = self.get_session(request.session_id, context) - session.services.default_services.clear() - for service_defaults in request.defaults: - session.services.default_services[ - service_defaults.model - ] = service_defaults.services - return SetServiceDefaultsResponse(result=True) - - def GetNodeService( - self, request: GetNodeServiceRequest, context: ServicerContext - ) -> GetNodeServiceResponse: - """ - Retrieve a requested service from a node - - :param request: get-node-service - request - :param context: context object - :return: get-node-service response about the requested service - """ - logger.debug("get node service: %s", request) - session = self.get_session(request.session_id, context) - service = session.services.get_service( - request.node_id, request.service, default_service=True - ) - service_proto = grpcutils.get_service_configuration(service) - return GetNodeServiceResponse(service=service_proto) - - def GetNodeServiceFile( - self, request: GetNodeServiceFileRequest, context: ServicerContext - ) -> GetNodeServiceFileResponse: - """ - Retrieve a requested service file from a node - - :param request: - get-node-service request - :param context: context object - :return: get-node-service response about the requested service - """ - logger.debug("get node service file: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - file_data = session.services.get_service_file( - node, request.service, request.file - ) - return GetNodeServiceFileResponse(data=file_data.data) - def ServiceAction( self, request: ServiceActionRequest, context: ServicerContext ) -> ServiceActionResponse: """ - Take action whether to start, stop, restart, validate the service or none of - the above. - - :param request: service-action request - :param context: context object - :return: service-action response about status of action - """ - logger.debug("service action: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - service = None - for current_service in node.services: - if current_service.name == request.service: - service = current_service - break - - if not service: - context.abort(grpc.StatusCode.NOT_FOUND, "service not found") - - status = -1 - if request.action == ServiceAction.START: - status = session.services.startup_service(node, service, wait=True) - elif request.action == ServiceAction.STOP: - status = session.services.stop_service(node, service) - elif request.action == ServiceAction.RESTART: - status = session.services.stop_service(node, service) - if not status: - status = session.services.startup_service(node, service, wait=True) - elif request.action == ServiceAction.VALIDATE: - status = session.services.validate_service(node, service) - - result = False - if not status: - result = True - - return ServiceActionResponse(result=result) - - def ConfigServiceAction( - self, request: ServiceActionRequest, context: ServicerContext - ) -> ServiceActionResponse: - """ - Take action whether to start, stop, restart, validate the config service or + Take action whether to start, stop, restart, validate the service or none of the above. :param request: service action request @@ -1045,15 +912,17 @@ def ConfigServiceAction( logger.debug("service action: %s", request) session = self.get_session(request.session_id, context) node = self.get_node(session, request.node_id, context, CoreNode) - service = node.config_services.get(request.service) + service = node.services.get(request.service) if not service: - context.abort(grpc.StatusCode.NOT_FOUND, "config service not found") + context.abort( + grpc.StatusCode.NOT_FOUND, f"service({request.service}) not found" + ) result = False if request.action == ServiceAction.START: try: service.start() result = True - except ConfigServiceBootError: + except ServiceBootError: pass elif request.action == ServiceAction.STOP: service.stop() @@ -1063,13 +932,13 @@ def ConfigServiceAction( try: service.start() result = True - except ConfigServiceBootError: + except ServiceBootError: pass elif request.action == ServiceAction.VALIDATE: try: service.run_validation() result = True - except ConfigServiceBootError: + except ServiceBootError: pass return ServiceActionResponse(result=result) @@ -1104,7 +973,7 @@ def SetWlanConfig( logger.debug("set wlan config: %s", request) session = self.get_session(request.session_id, context) node_id = request.wlan_config.node_id - config = request.wlan_config.config + config = dict(request.wlan_config.config) session.mobility.set_model_config(node_id, BasicRangeModel.name, config) if session.is_running(): node = self.get_node(session, node_id, context, WlanNode) @@ -1145,7 +1014,7 @@ def SetEmaneModelConfig( session = self.get_session(request.session_id, context) model_config = request.emane_model_config _id = utils.iface_config_id(model_config.node_id, model_config.iface_id) - session.emane.set_config(_id, model_config.model, model_config.config) + session.emane.set_config(_id, model_config.model, dict(model_config.config)) return SetEmaneModelConfigResponse(result=True) def SaveXml( @@ -1160,10 +1029,8 @@ def SaveXml( """ logger.debug("save xml: %s", request) session = self.get_session(request.session_id, context) - _, temp_path = tempfile.mkstemp() - session.save_xml(temp_path) - with open(temp_path, "r") as xml_file: - data = xml_file.read() + xml_writer = CoreXmlWriter(session) + data = xml_writer.get_data() return core_pb2.SaveXmlResponse(data=data) def OpenXml( @@ -1233,57 +1100,57 @@ def EmaneLink( else: return EmaneLinkResponse(result=False) - def GetNodeConfigService( - self, request: GetNodeConfigServiceRequest, context: ServicerContext - ) -> GetNodeConfigServiceResponse: + def GetNodeService( + self, request: GetNodeServiceRequest, context: ServicerContext + ) -> GetNodeServiceResponse: """ - Gets configuration, for a given configuration service, for a given node. + Gets configuration, for a given service, for a given node. - :param request: get node config service request + :param request: get node service request :param context: grpc context - :return: get node config service response + :return: get node service response """ session = self.get_session(request.session_id, context) node = self.get_node(session, request.node_id, context, CoreNode) self.validate_service(request.name, context) - service = node.config_services.get(request.name) + service = node.services.get(request.name) if service: config = service.render_config() else: service = self.coreemu.service_manager.get_service(request.name) config = {x.id: x.default for x in service.default_configs} - return GetNodeConfigServiceResponse(config=config) + return GetNodeServiceResponse(config=config) - def GetConfigServiceRendered( - self, request: GetConfigServiceRenderedRequest, context: ServicerContext - ) -> GetConfigServiceRenderedResponse: + def GetServiceRendered( + self, request: GetServiceRenderedRequest, context: ServicerContext + ) -> GetServiceRenderedResponse: """ - Retrieves the rendered file data for a given config service on a node. + Retrieves the rendered file data for a given service on a node. - :param request: config service render request + :param request: service render request :param context: grpc context - :return: rendered config service files + :return: rendered service files """ session = self.get_session(request.session_id, context) node = self.get_node(session, request.node_id, context, CoreNode) self.validate_service(request.name, context) - service = node.config_services.get(request.name) + service = node.services.get(request.name) if not service: context.abort( grpc.StatusCode.NOT_FOUND, f"unknown node service {request.name}" ) rendered = service.get_rendered_templates() - return GetConfigServiceRenderedResponse(rendered=rendered) + return GetServiceRenderedResponse(rendered=rendered) - def GetConfigServiceDefaults( - self, request: GetConfigServiceDefaultsRequest, context: ServicerContext - ) -> GetConfigServiceDefaultsResponse: + def GetServiceDefaults( + self, request: GetServiceDefaultsRequest, context: ServicerContext + ) -> GetServiceDefaultsResponse: """ - Get default values for a given configuration service. + Get default values for a given service. - :param request: get config service defaults request + :param request: get service defaults request :param context: grpc context - :return: get config service defaults response + :return: get service defaults response """ session = self.get_session(request.session_id, context) node = self.get_node(session, request.node_id, context, CoreNode) @@ -1303,9 +1170,9 @@ def GetConfigServiceDefaults( config[configuration.id] = config_option modes = [] for name, mode_config in service.modes.items(): - mode = configservices_pb2.ConfigMode(name=name, config=mode_config) + mode = services_pb2.ConfigMode(name=name, config=mode_config) modes.append(mode) - return GetConfigServiceDefaultsResponse( + return GetServiceDefaultsResponse( templates=templates, config=config, modes=modes ) @@ -1313,7 +1180,7 @@ def GetEmaneEventChannel( self, request: GetEmaneEventChannelRequest, context: ServicerContext ) -> GetEmaneEventChannelResponse: session = self.get_session(request.session_id, context) - service = session.emane.nem_service.get(request.nem_id) + service = session.emane.event_manager.get_service(request.nem_id) if not service: context.abort(grpc.StatusCode.NOT_FOUND, f"unknown nem id {request.nem_id}") return GetEmaneEventChannelResponse( @@ -1375,7 +1242,9 @@ def EmanePathlosses( nem1 = grpcutils.get_nem_id(session, node1, request.iface1_id, context) node2 = self.get_node(session, request.node2_id, context, CoreNode) nem2 = grpcutils.get_nem_id(session, node2, request.iface2_id, context) - session.emane.publish_pathloss(nem1, nem2, request.rx1, request.rx2) + session.emane.event_manager.publish_pathloss( + nem1, nem2, forward1=request.rx1, forward2=request.rx2 + ) return EmanePathlossesResponse() def Linked( @@ -1434,3 +1303,148 @@ def GetWirelessConfig( ) config_options[config.id] = config_option return GetWirelessConfigResponse(config=config_options) + + def EmaneEvents( + self, + request_iterator: Iterable[EmaneEventsRequest], + context: ServicerContext, + ) -> EmaneEventsResponse: + for request in request_iterator: + session = self.get_session(request.session_id, context) + if request.HasField("location"): + location = request.location + if location.HasField("nem_id"): + nem_id = location.nem_id + else: + node = self.get_node(session, location.node_id, context, CoreNode) + nem_id = grpcutils.get_nem_id( + session, node, location.iface_id, context + ) + session.emane.event_manager.publish_location( + nem_id, + location.lon, + location.lat, + location.alt, + grpcutils.get_optional(location, "azimuth"), + grpcutils.get_optional(location, "elevation"), + grpcutils.get_optional(location, "magnitude"), + grpcutils.get_optional(location, "roll"), + grpcutils.get_optional(location, "pitch"), + grpcutils.get_optional(location, "yaw"), + ) + elif request.HasField("comm_effect"): + comm_effect = request.comm_effect + if comm_effect.HasField("nem1_id"): + nem1_id = comm_effect.nem1_id + else: + node1 = self.get_node( + session, comm_effect.node1_id, context, CoreNode + ) + nem1_id = grpcutils.get_nem_id( + session, node1, comm_effect.iface1_id, context + ) + if comm_effect.HasField("nem2_id"): + nem2_id = comm_effect.nem2_id + else: + node2 = self.get_node( + session, comm_effect.node2_id, context, CoreNode + ) + nem2_id = grpcutils.get_nem_id( + session, node2, comm_effect.iface2_id, context + ) + session.emane.event_manager.publish_comm_effect( + nem1_id, + nem2_id, + comm_effect.delay, + comm_effect.jitter, + comm_effect.loss, + comm_effect.dup, + comm_effect.unicast, + comm_effect.broadcast, + ) + elif request.HasField("pathloss"): + pathloss = request.pathloss + if pathloss.HasField("nem1_id"): + nem1_id = pathloss.nem1_id + else: + node1 = self.get_node(session, pathloss.node1_id, context, CoreNode) + nem1_id = grpcutils.get_nem_id( + session, node1, pathloss.iface1_id, context + ) + if pathloss.HasField("nem2_id"): + nem2_id = pathloss.nem2_id + else: + node2 = self.get_node(session, pathloss.node2_id, context, CoreNode) + nem2_id = grpcutils.get_nem_id( + session, node2, pathloss.iface2_id, context + ) + session.emane.event_manager.publish_pathloss( + nem1_id, + nem2_id, + grpcutils.get_optional(pathloss, "forward1"), + grpcutils.get_optional(pathloss, "reverse1"), + grpcutils.get_optional(pathloss, "forward2"), + grpcutils.get_optional(pathloss, "reverse2"), + ) + elif request.HasField("antenna"): + antenna = request.antenna + if antenna.HasField("nem_id"): + nem_id = antenna.nem_id + else: + node = self.get_node(session, antenna.node_id, context, CoreNode) + nem_id = grpcutils.get_nem_id( + session, node, antenna.iface_id, context + ) + session.emane.event_manager.publish_antenna_profile( + nem_id, + antenna.profile, + antenna.azimuth, + antenna.elevation, + ) + elif request.HasField("fading"): + fading = request.fading + if fading.HasField("nem_id"): + nem_id = fading.nem_id + else: + node = self.get_node(session, fading.node_id, context, CoreNode) + nem_id = grpcutils.get_nem_id( + session, node, fading.iface_id, context + ) + session.emane.event_manager.publish_fading_selection( + nem_id, fading.model + ) + return EmaneEventsResponse() + + def CreateService( + self, + request: CreateServiceRequest, + context: ServicerContext, + ) -> CreateServiceResponse: + service = request.service + class_name = f"{service.name.capitalize()}Class" + custom_class = type( + class_name, (CustomCoreService,), dict(__init__=CustomCoreService.__init__) + ) + custom_class.name = service.name + custom_class.group = service.group + custom_class.executables = list(service.executables) + custom_class.dependencies = list(service.dependencies) + custom_class.directories = list(service.directories) + custom_class.files = list(service.files) + custom_class.startup = list(service.startup) + custom_class.validate = list(service.validate) + custom_class.shutdown = list(service.shutdown) + custom_class.validation_mode = ServiceMode(service.validation_mode) + custom_class.validation_timer = service.validation_timer + custom_class.validation_period = service.validation_period + custom_class.defined_templates = dict(request.templates) + result = False + if request.recreate: + self.coreemu.service_manager.services.pop(custom_class.name, None) + try: + if issubclass(custom_class, CustomCoreService): + self.coreemu.service_manager.add(custom_class) + result = True + except CoreError as e: + logger.error("error creating custom service: %s", e) + return CreateServiceResponse(result=result) diff --git a/daemon/core/api/grpc/wrappers.py b/daemon/core/api/grpc/wrappers.py index f84e6a082..9627fa679 100644 --- a/daemon/core/api/grpc/wrappers.py +++ b/daemon/core/api/grpc/wrappers.py @@ -1,21 +1,11 @@ from dataclasses import dataclass, field from enum import Enum from pathlib import Path -from typing import Any, Optional +from typing import Optional -from core.api.grpc import ( - common_pb2, - configservices_pb2, - core_pb2, - emane_pb2, - services_pb2, -) +from google.protobuf.internal.containers import MessageMap - -class ConfigServiceValidationMode(Enum): - BLOCKING = 0 - NON_BLOCKING = 1 - TIMER = 2 +from core.api.grpc import common_pb2, core_pb2, emane_pb2, services_pb2 class ServiceValidationMode(Enum): @@ -66,7 +56,6 @@ class NodeType(Enum): PEER_TO_PEER = 12 CONTROL_NET = 13 DOCKER = 15 - LXC = 16 WIRELESS = 17 PODMAN = 18 @@ -76,7 +65,7 @@ class LinkType(Enum): WIRED = 1 -class ExceptionLevel(Enum): +class AlertLevel(Enum): DEFAULT = 0 FATAL = 1 ERROR = 2 @@ -106,82 +95,77 @@ class EventType: SESSION = 0 NODE = 1 LINK = 2 - CONFIG = 3 EXCEPTION = 4 - FILE = 5 @dataclass -class ConfigService: +class Service: group: str name: str - executables: list[str] - dependencies: list[str] - directories: list[str] - files: list[str] - startup: list[str] - validate: list[str] - shutdown: list[str] - validation_mode: ConfigServiceValidationMode - validation_timer: int - validation_period: float + executables: list[str] = field(default_factory=list) + dependencies: list[str] = field(default_factory=list) + directories: list[str] = field(default_factory=list) + files: list[str] = field(default_factory=list) + startup: list[str] = field(default_factory=list) + validate: list[str] = field(default_factory=list) + shutdown: list[str] = field(default_factory=list) + validation_mode: ServiceValidationMode = ServiceValidationMode.NON_BLOCKING + validation_timer: int = 0 + validation_period: float = 0.0 @classmethod - def from_proto(cls, proto: configservices_pb2.ConfigService) -> "ConfigService": - return ConfigService( + def from_proto(cls, proto: services_pb2.Service) -> "Service": + return Service( group=proto.group, name=proto.name, - executables=proto.executables, - dependencies=proto.dependencies, - directories=proto.directories, - files=proto.files, - startup=proto.startup, - validate=proto.validate, - shutdown=proto.shutdown, - validation_mode=ConfigServiceValidationMode(proto.validation_mode), + executables=list(proto.executables), + dependencies=list(proto.dependencies), + directories=list(proto.directories), + files=list(proto.files), + startup=list(proto.startup), + validate=list(proto.validate), + shutdown=list(proto.shutdown), + validation_mode=ServiceValidationMode(proto.validation_mode), validation_timer=proto.validation_timer, validation_period=proto.validation_period, ) - -@dataclass -class ConfigServiceConfig: - node_id: int - name: str - templates: dict[str, str] - config: dict[str, str] - - @classmethod - def from_proto( - cls, proto: configservices_pb2.ConfigServiceConfig - ) -> "ConfigServiceConfig": - return ConfigServiceConfig( - node_id=proto.node_id, - name=proto.name, - templates=dict(proto.templates), - config=dict(proto.config), + def to_proto(self) -> services_pb2.Service: + return services_pb2.Service( + group=self.group, + name=self.name, + executables=self.executables, + dependencies=self.dependencies, + directories=self.directories, + files=self.files, + startup=self.startup, + validate=self.validate, + shutdown=self.shutdown, + validation_mode=self.validation_mode.value, + validation_timer=self.validation_timer, + validation_period=self.validation_period, ) @dataclass -class ConfigServiceData: +class ServiceData: templates: dict[str, str] = field(default_factory=dict) config: dict[str, str] = field(default_factory=dict) @dataclass -class ConfigServiceDefaults: +class ServiceDefaults: templates: dict[str, str] config: dict[str, "ConfigOption"] modes: dict[str, dict[str, str]] @classmethod def from_proto( - cls, proto: configservices_pb2.GetConfigServiceDefaultsResponse - ) -> "ConfigServiceDefaults": + cls, proto: services_pb2.GetServiceDefaultsResponse + ) -> "ServiceDefaults": config = ConfigOption.from_dict(proto.config) modes = {x.name: dict(x.config) for x in proto.modes} - return ConfigServiceDefaults( + return ServiceDefaults( templates=dict(proto.templates), config=config, modes=modes ) @@ -199,16 +183,6 @@ def to_proto(self) -> core_pb2.Server: return core_pb2.Server(name=self.name, host=self.host) -@dataclass -class Service: - group: str - name: str - - @classmethod - def from_proto(cls, proto: services_pb2.Service) -> "Service": - return Service(group=proto.group, name=proto.name) - - @dataclass class ServiceDefault: model: str @@ -219,101 +193,6 @@ def from_proto(cls, proto: services_pb2.ServiceDefaults) -> "ServiceDefault": return ServiceDefault(model=proto.model, services=list(proto.services)) -@dataclass -class NodeServiceData: - executables: list[str] = field(default_factory=list) - dependencies: list[str] = field(default_factory=list) - dirs: list[str] = field(default_factory=list) - configs: list[str] = field(default_factory=list) - startup: list[str] = field(default_factory=list) - validate: list[str] = field(default_factory=list) - validation_mode: ServiceValidationMode = ServiceValidationMode.NON_BLOCKING - validation_timer: int = 5 - shutdown: list[str] = field(default_factory=list) - meta: str = None - - @classmethod - def from_proto(cls, proto: services_pb2.NodeServiceData) -> "NodeServiceData": - return NodeServiceData( - executables=proto.executables, - dependencies=proto.dependencies, - dirs=proto.dirs, - configs=proto.configs, - startup=proto.startup, - validate=proto.validate, - validation_mode=ServiceValidationMode(proto.validation_mode), - validation_timer=proto.validation_timer, - shutdown=proto.shutdown, - meta=proto.meta, - ) - - def to_proto(self) -> services_pb2.NodeServiceData: - return services_pb2.NodeServiceData( - executables=self.executables, - dependencies=self.dependencies, - dirs=self.dirs, - configs=self.configs, - startup=self.startup, - validate=self.validate, - validation_mode=self.validation_mode.value, - validation_timer=self.validation_timer, - shutdown=self.shutdown, - meta=self.meta, - ) - - -@dataclass -class NodeServiceConfig: - node_id: int - service: str - data: NodeServiceData - files: dict[str, str] = field(default_factory=dict) - - @classmethod - def from_proto(cls, proto: services_pb2.NodeServiceConfig) -> "NodeServiceConfig": - return NodeServiceConfig( - node_id=proto.node_id, - service=proto.service, - data=NodeServiceData.from_proto(proto.data), - files=dict(proto.files), - ) - - -@dataclass -class ServiceConfig: - node_id: int - service: str - files: list[str] = None - directories: list[str] = None - startup: list[str] = None - validate: list[str] = None - shutdown: list[str] = None - - def to_proto(self) -> services_pb2.ServiceConfig: - return services_pb2.ServiceConfig( - node_id=self.node_id, - service=self.service, - files=self.files, - directories=self.directories, - startup=self.startup, - validate=self.validate, - shutdown=self.shutdown, - ) - - -@dataclass -class ServiceFileConfig: - node_id: int - service: str - file: str - data: str = field(repr=False) - - def to_proto(self) -> services_pb2.ServiceFileConfig: - return services_pb2.ServiceFileConfig( - node_id=self.node_id, service=self.service, file=self.file, data=self.data - ) - - @dataclass class BridgeThroughput: node_id: int @@ -398,23 +277,21 @@ def to_proto(self) -> core_pb2.SessionLocation: @dataclass -class ExceptionEvent: +class AlertEvent: session_id: int node_id: int - level: ExceptionLevel + level: AlertLevel source: str date: str text: str opaque: str @classmethod - def from_proto( - cls, session_id: int, proto: core_pb2.ExceptionEvent - ) -> "ExceptionEvent": - return ExceptionEvent( + def from_proto(cls, session_id: int, proto: core_pb2.AlertEvent) -> "AlertEvent": + return AlertEvent( session_id=session_id, node_id=proto.node_id, - level=ExceptionLevel(proto.level), + level=AlertLevel(proto.level), source=proto.source, date=proto.date, text=proto.text, @@ -430,15 +307,13 @@ class ConfigOption: type: ConfigOptionType = None group: str = None select: list[str] = None + regex: str = None @classmethod def from_dict( - cls, config: dict[str, common_pb2.ConfigOption] + cls, config: MessageMap[str, common_pb2.ConfigOption] ) -> dict[str, "ConfigOption"]: - d = {} - for key, value in config.items(): - d[key] = ConfigOption.from_proto(value) - return d + return {k: ConfigOption.from_proto(v) for k, v in config.items()} @classmethod def to_dict(cls, config: dict[str, "ConfigOption"]) -> dict[str, str]: @@ -453,7 +328,8 @@ def from_proto(cls, proto: common_pb2.ConfigOption) -> "ConfigOption": value=proto.value, type=config_type, group=proto.group, - select=proto.select, + select=list(proto.select), + regex=proto.regex, ) def to_proto(self) -> common_pb2.ConfigOption: @@ -725,12 +601,13 @@ class Node: name: str = None type: NodeType = NodeType.DEFAULT model: str = None - position: Position = Position(x=0, y=0) + position: Position = field(default_factory=lambda: Position(x=0, y=0)) services: set[str] = field(default_factory=set) - config_services: set[str] = field(default_factory=set) emane: str = None icon: str = None image: str = None + compose: str = None + compose_name: str = None server: str = None geo: Geo = None dir: str = None @@ -744,32 +621,19 @@ class Node: wlan_config: dict[str, ConfigOption] = field(default_factory=dict, repr=False) wireless_config: dict[str, ConfigOption] = field(default_factory=dict, repr=False) mobility_config: dict[str, ConfigOption] = field(default_factory=dict, repr=False) - service_configs: dict[str, NodeServiceData] = field( - default_factory=dict, repr=False - ) - service_file_configs: dict[str, dict[str, str]] = field( - default_factory=dict, repr=False - ) - config_service_configs: dict[str, ConfigServiceData] = field( - default_factory=dict, repr=False - ) + service_configs: dict[str, ServiceData] = field(default_factory=dict, repr=False) @classmethod def from_proto(cls, proto: core_pb2.Node) -> "Node": - service_configs = {} - service_file_configs = {} - for service, node_config in proto.service_configs.items(): - service_configs[service] = NodeServiceData.from_proto(node_config.data) - service_file_configs[service] = dict(node_config.files) emane_configs = {} for emane_config in proto.emane_configs: iface_id = None if emane_config.iface_id == -1 else emane_config.iface_id model = emane_config.model key = (model, iface_id) emane_configs[key] = ConfigOption.from_dict(emane_config.config) - config_service_configs = {} - for service, service_config in proto.config_service_configs.items(): - config_service_configs[service] = ConfigServiceData( + service_configs = {} + for service, service_config in proto.service_configs.items(): + service_configs[service] = ServiceData( templates=dict(service_config.templates), config=dict(service_config.config), ) @@ -780,10 +644,11 @@ def from_proto(cls, proto: core_pb2.Node) -> "Node": model=proto.model or None, position=Position.from_proto(proto.position), services=set(proto.services), - config_services=set(proto.config_services), emane=proto.emane, icon=proto.icon, image=proto.image, + compose=proto.compose, + compose_name=proto.compose_name, server=proto.server, geo=Geo.from_proto(proto.geo), dir=proto.dir, @@ -792,8 +657,6 @@ def from_proto(cls, proto: core_pb2.Node) -> "Node": wlan_config=ConfigOption.from_dict(proto.wlan_config), mobility_config=ConfigOption.from_dict(proto.mobility_config), service_configs=service_configs, - service_file_configs=service_file_configs, - config_service_configs=config_service_configs, emane_model_configs=emane_configs, wireless_config=ConfigOption.from_dict(proto.wireless_config), ) @@ -810,21 +673,8 @@ def to_proto(self) -> core_pb2.Node: ) emane_configs.append(emane_config) service_configs = {} - for service, service_data in self.service_configs.items(): - service_configs[service] = services_pb2.NodeServiceConfig( - service=service, data=service_data.to_proto() - ) - for service, file_configs in self.service_file_configs.items(): - service_config = service_configs.get(service) - if service_config: - service_config.files.update(file_configs) - else: - service_configs[service] = services_pb2.NodeServiceConfig( - service=service, files=file_configs - ) - config_service_configs = {} - for service, service_config in self.config_service_configs.items(): - config_service_configs[service] = configservices_pb2.ConfigServiceConfig( + for service, service_config in self.service_configs.items(): + service_configs[service] = services_pb2.ServiceConfig( templates=service_config.templates, config=service_config.config ) return core_pb2.Node( @@ -834,10 +684,11 @@ def to_proto(self) -> core_pb2.Node: model=self.model, position=self.position.to_proto(), services=self.services, - config_services=self.config_services, emane=self.emane, icon=self.icon, image=self.image, + compose=self.compose, + compose_name=self.compose_name, server=self.server, dir=self.dir, channel=self.channel, @@ -845,7 +696,6 @@ def to_proto(self) -> core_pb2.Node: wlan_config={k: v.to_proto() for k, v in self.wlan_config.items()}, mobility_config={k: v.to_proto() for k, v in self.mobility_config.items()}, service_configs=service_configs, - config_service_configs=config_service_configs, emane_configs=emane_configs, wireless_config={k: v.to_proto() for k, v in self.wireless_config.items()}, ) @@ -879,8 +729,10 @@ class Session: dir: str = None user: str = None default_services: dict[str, set[str]] = field(default_factory=dict) - location: SessionLocation = SessionLocation( - x=0.0, y=0.0, z=0.0, lat=47.57917, lon=-122.13232, alt=2.0, scale=150.0 + location: SessionLocation = field( + default_factory=lambda: SessionLocation( + x=0.0, y=0.0, z=0.0, lat=47.57917, lon=-122.13232, alt=2.0, scale=150.0 + ) ) hooks: dict[str, Hook] = field(default_factory=dict) metadata: dict[str, str] = field(default_factory=dict) @@ -997,16 +849,13 @@ def set_options(self, config: dict[str, str]) -> None: @dataclass class CoreConfig: services: list[Service] = field(default_factory=list) - config_services: list[ConfigService] = field(default_factory=list) emane_models: list[str] = field(default_factory=list) @classmethod def from_proto(cls, proto: core_pb2.GetConfigResponse) -> "CoreConfig": services = [Service.from_proto(x) for x in proto.services] - config_services = [ConfigService.from_proto(x) for x in proto.config_services] return CoreConfig( services=services, - config_services=config_services, emane_models=list(proto.emane_models), ) @@ -1056,67 +905,6 @@ def from_proto(cls, proto: core_pb2.SessionEvent) -> "SessionEvent": ) -@dataclass -class FileEvent: - message_type: MessageType - node_id: int - name: str - mode: str - number: int - type: str - source: str - data: str - compressed_data: str - - @classmethod - def from_proto(cls, proto: core_pb2.FileEvent) -> "FileEvent": - return FileEvent( - message_type=MessageType(proto.message_type), - node_id=proto.node_id, - name=proto.name, - mode=proto.mode, - number=proto.number, - type=proto.type, - source=proto.source, - data=proto.data, - compressed_data=proto.compressed_data, - ) - - -@dataclass -class ConfigEvent: - message_type: MessageType - node_id: int - object: str - type: int - data_types: list[int] - data_values: str - captions: str - bitmap: str - possible_values: str - groups: str - iface_id: int - network_id: int - opaque: str - - @classmethod - def from_proto(cls, proto: core_pb2.ConfigEvent) -> "ConfigEvent": - return ConfigEvent( - message_type=MessageType(proto.message_type), - node_id=proto.node_id, - object=proto.object, - type=proto.type, - data_types=list(proto.data_types), - data_values=proto.data_values, - captions=proto.captions, - possible_values=proto.possible_values, - groups=proto.groups, - iface_id=proto.iface_id, - network_id=proto.network_id, - opaque=proto.opaque, - ) - - @dataclass class Event: session_id: int @@ -1124,42 +912,30 @@ class Event: session_event: SessionEvent = None node_event: NodeEvent = None link_event: LinkEvent = None - config_event: Any = None - exception_event: ExceptionEvent = None - file_event: FileEvent = None + alert_event: AlertEvent = None @classmethod def from_proto(cls, proto: core_pb2.Event) -> "Event": source = proto.source if proto.source else None node_event = None link_event = None - exception_event = None + alert_event = None session_event = None - file_event = None - config_event = None if proto.HasField("node_event"): node_event = NodeEvent.from_proto(proto.node_event) elif proto.HasField("link_event"): link_event = LinkEvent.from_proto(proto.link_event) - elif proto.HasField("exception_event"): - exception_event = ExceptionEvent.from_proto( - proto.session_id, proto.exception_event - ) + elif proto.HasField("alert_event"): + alert_event = AlertEvent.from_proto(proto.session_id, proto.alert_event) elif proto.HasField("session_event"): session_event = SessionEvent.from_proto(proto.session_event) - elif proto.HasField("file_event"): - file_event = FileEvent.from_proto(proto.file_event) - elif proto.HasField("config_event"): - config_event = ConfigEvent.from_proto(proto.config_event) return Event( session_id=proto.session_id, source=source, node_event=node_event, link_event=link_event, - exception_event=exception_event, + alert_event=alert_event, session_event=session_event, - file_event=file_event, - config_event=config_event, ) @@ -1200,6 +976,154 @@ def to_proto(self) -> emane_pb2.EmanePathlossesRequest: ) +@dataclass +class LocationEvent: + lon: float + lat: float + alt: float + nem_id: int = None + node_id: int = None + iface_id: int = None + azimuth: float = None + elevation: float = None + magnitude: float = None + roll: float = None + pitch: float = None + yaw: float = None + + def to_proto(self) -> emane_pb2.LocationEvent: + return LocationEvent( + nem_id=self.nem_id, + node_id=self.node_id, + iface_id=self.iface_id, + lon=self.lon, + lat=self.lat, + alt=self.alt, + azimuth=self.azimuth, + elevation=self.elevation, + magnitude=self.magnitude, + roll=self.roll, + pitch=self.pitch, + yaw=self.yaw, + ) + + +@dataclass +class CommEffectEvent: + delay: int + jitter: int + loss: float + dup: int + unicast: int + broadcast: int + nem1_id: int = None + node1_id: int = None + iface1_id: int = None + nem2_id: int = None + node2_id: int = None + iface2_id: int = None + + def to_proto(self) -> emane_pb2.CommEffectEvent: + return emane_pb2.CommEffectEvent( + nem1_id=self.nem1_id, + node1_id=self.node1_id, + iface1_id=self.iface1_id, + nem2_id=self.nem2_id, + node2_id=self.node2_id, + iface2_id=self.iface2_id, + delay=self.delay, + jitter=self.jitter, + loss=self.loss, + dup=self.dup, + unicast=self.unicast, + broadcast=self.broadcast, + ) + + +@dataclass +class PathlossEvent: + nem1_id: int = None + node1_id: int = None + iface1_id: int = None + nem2_id: int = None + node2_id: int = None + iface2_id: int = None + forward1: float = None + reverse1: float = None + forward2: float = None + reverse2: float = None + + def to_proto(self) -> emane_pb2.PathlossEvent: + return emane_pb2.PathlossEvent( + nem1_id=self.nem1_id, + node1_id=self.node1_id, + iface1_id=self.iface1_id, + nem2_id=self.nem2_id, + node2_id=self.node2_id, + iface2_id=self.iface2_id, + forward1=self.forward1, + reverse1=self.reverse1, + forward2=self.forward2, + reverse2=self.reverse2, + ) + + +@dataclass +class AntennaProfileEvent: + profile: int + azimuth: float + elevation: float + nem_id: int = None + node_id: int = None + iface_id: int = None + + def to_proto(self) -> emane_pb2.AntennaProfileEvent: + return emane_pb2.AntennaProfileEvent( + nem_id=self.nem_id, + node_id=self.node_id, + iface_id=self.iface_id, + profile=self.profile, + azimuth=self.azimuth, + elevation=self.elevation, + ) + + +@dataclass +class FadingSelectionEvent: + model: str + nem_id: int = None + node_id: int = None + iface_id: int = None + + def to_proto(self) -> emane_pb2.FadingSelectionEvent: + return emane_pb2.FadingSelectionEvent( + nem_id=self.nem_id, + node_id=self.node_id, + iface_id=self.iface_id, + model=self.model, + ) + + +@dataclass +class EmaneEventsRequest: + session_id: int + location: LocationEvent = None + comm_effect: CommEffectEvent = None + pathloss: PathlossEvent = None + antenna: AntennaProfileEvent = None + fading: FadingSelectionEvent = None + + def to_proto(self) -> emane_pb2.EmaneEventsRequest: + return emane_pb2.EmaneEventsRequest( + session_id=self.session_id, + location=self.location.to_proto() if self.location else None, + comm_effect=self.comm_effect.to_proto() if self.comm_effect else None, + pathloss=self.pathloss.to_proto() if self.pathloss else None, + antenna=self.antenna.to_proto() if self.antenna else None, + fading=self.fading.to_proto() if self.fading else None, + ) + + @dataclass(frozen=True) class MoveNodesRequest: session_id: int diff --git a/daemon/core/config.py b/daemon/core/config.py index 7a6ffa49f..b1d9f2d1a 100644 --- a/daemon/core/config.py +++ b/daemon/core/config.py @@ -45,6 +45,7 @@ class Configuration: default: str = "" options: list[str] = field(default_factory=list) group: str = "Configuration" + regex: str = None def __post_init__(self) -> None: self.label = self.label if self.label else self.id @@ -80,6 +81,7 @@ class ConfigBool(Configuration): type: ConfigDataTypes = ConfigDataTypes.BOOL value: bool = False + default: str = "0" @dataclass diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py index c02570c98..f7ecbeaf0 100644 --- a/daemon/core/emane/emanemanager.py +++ b/daemon/core/emane/emanemanager.py @@ -3,18 +3,18 @@ """ import logging -import os import threading from enum import Enum -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING, Optional from core import utils from core.emane.emanemodel import EmaneModel +from core.emane.eventmanager import EmaneEventManager from core.emane.linkmonitor import EmaneLinkMonitor from core.emane.modelmanager import EmaneModelManager from core.emane.nodes import EmaneNet, TunTap from core.emulator.data import LinkData -from core.emulator.enumerations import LinkTypes, MessageFlags, RegisterTlvs +from core.emulator.enumerations import LinkTypes, MessageFlags from core.errors import CoreCommandError, CoreError from core.nodes.base import CoreNode, NodeBase from core.nodes.interface import CoreInterface @@ -26,23 +26,12 @@ from core.emulator.session import Session try: - from emane.events import EventService, PathlossEvent, CommEffectEvent, LocationEvent - from emane.events.eventserviceexception import EventServiceException + from emane.events import LocationEvent except ImportError: try: - from emanesh.events import ( - EventService, - PathlossEvent, - CommEffectEvent, - LocationEvent, - ) - from emanesh.events.eventserviceexception import EventServiceException + from emanesh.events import LocationEvent except ImportError: - CommEffectEvent = None - EventService = None LocationEvent = None - PathlossEvent = None - EventServiceException = None logger.debug("compatible emane python bindings not installed") DEFAULT_LOG_LEVEL: int = 3 @@ -54,59 +43,6 @@ class EmaneState(Enum): NOT_READY = 2 -class EmaneEventService: - def __init__( - self, manager: "EmaneManager", device: str, group: str, port: int - ) -> None: - self.manager: "EmaneManager" = manager - self.device: str = device - self.group: str = group - self.port: int = port - self.running: bool = False - self.thread: Optional[threading.Thread] = None - logger.info("starting emane event service %s %s:%s", device, group, port) - self.events: EventService = EventService( - eventchannel=(group, port, device), otachannel=None - ) - - def start(self) -> None: - self.running = True - self.thread = threading.Thread(target=self.run, daemon=True) - self.thread.start() - - def run(self) -> None: - """ - Run and monitor events. - """ - logger.info("subscribing to emane location events") - while self.running: - _uuid, _seq, events = self.events.nextEvent() - # this occurs with 0.9.1 event service - if not self.running: - break - for event in events: - nem, eid, data = event - if eid == LocationEvent.IDENTIFIER: - self.manager.handlelocationevent(nem, eid, data) - logger.info("unsubscribing from emane location events") - - def stop(self) -> None: - """ - Stop service and monitoring events. - """ - self.events.breakloop() - self.running = False - if self.thread: - self.thread.join() - self.thread = None - for fd in self.events._readFd, self.events._writeFd: - if fd >= 0: - os.close(fd) - for f in self.events._socket, self.events._socketOTA: - if f: - f.close() - - class EmaneManager: """ EMANE controller object. Lives in a Session instance and is used for @@ -115,7 +51,6 @@ class EmaneManager: """ name: str = "emane" - config_type: RegisterTlvs = RegisterTlvs.EMULATION_SERVER def __init__(self, session: "Session") -> None: """ @@ -124,7 +59,6 @@ def __init__(self, session: "Session") -> None: :param session: session this manager is tied to :return: nothing """ - super().__init__() self.session: "Session" = session self.nems_to_ifaces: dict[int, CoreInterface] = {} self.ifaces_to_nems: dict[CoreInterface, int] = {} @@ -146,9 +80,10 @@ def __init__(self, session: "Session") -> None: # link monitor self.link_monitor: EmaneLinkMonitor = EmaneLinkMonitor(self) - # emane event monitoring - self.services: dict[str, EmaneEventService] = {} - self.nem_service: dict[int, EmaneEventService] = {} + # emane event handling + self.event_manager: EmaneEventManager = EmaneEventManager( + self.handlelocationevent + ) def next_nem_id(self, iface: CoreInterface) -> int: nem_id = self.session.options.get_int("nem_id_start") @@ -292,7 +227,7 @@ def setup(self) -> EmaneState: logger.debug("no emane nodes in session") return EmaneState.NOT_NEEDED # check if bindings were installed - if EventService is None: + if LocationEvent is None: raise CoreError("EMANE python bindings are not installed") self.check_node_models() return EmaneState.SUCCESS @@ -360,39 +295,23 @@ def setup_control_channels( # setup ota device otagroup, _otaport = config["otamanagergroup"].split(":") otadev = config["otamanagerdevice"] - ota_index = self.session.get_control_net_index(otadev) - self.session.add_remove_control_net(ota_index, conf_required=False) + ota_index = self.session.control_net_manager.get_net_id(otadev) + self.session.control_net_manager.add_net(ota_index, conf_required=False) if isinstance(node, CoreNode): - self.session.add_remove_control_iface(node, ota_index, conf_required=False) + self.session.control_net_manager.add_iface(node, ota_index) # setup event device eventgroup, eventport = config["eventservicegroup"].split(":") eventdev = config["eventservicedevice"] - event_index = self.session.get_control_net_index(eventdev) - event_net = self.session.add_remove_control_net( + event_index = self.session.control_net_manager.get_net_id(eventdev) + event_net = self.session.control_net_manager.add_net( event_index, conf_required=False ) if isinstance(node, CoreNode): - self.session.add_remove_control_iface( - node, event_index, conf_required=False - ) + self.session.control_net_manager.add_iface(node, event_index) # initialize emane event services - service = self.services.get(event_net.brname) - if not service: - try: - service = EmaneEventService( - self, event_net.brname, eventgroup, int(eventport) - ) - if self.doeventmonitor(): - service.start() - self.services[event_net.brname] = service - self.nem_service[nem_id] = service - except EventServiceException: - raise CoreError( - "failed to start emane event services " - f"{event_net.brname} {eventgroup}:{eventport}" - ) - else: - self.nem_service[nem_id] = service + self.event_manager.create_service( + nem_id, event_net.brname, eventgroup, int(eventport), self.doeventmonitor() + ) # setup multicast routes as needed logger.info( "node(%s) interface(%s) ota(%s:%s) event(%s:%s)", @@ -448,10 +367,8 @@ def set_nem_position(self, iface: CoreInterface) -> None: """ position = self.get_nem_position(iface) if position: - nemid, lon, lat, alt = position - event = LocationEvent() - event.append(nemid, latitude=lat, longitude=lon, altitude=alt) - self.publish_event(nemid, event, send_all=True) + nem_id, lon, lat, alt = position + self.event_manager.publish_location(nem_id, lon, lat, alt) def set_nem_positions(self, moved_ifaces: list[CoreInterface]) -> None: """ @@ -461,19 +378,14 @@ def set_nem_positions(self, moved_ifaces: list[CoreInterface]) -> None: """ if not moved_ifaces: return - services = {} + positions = [] for iface in moved_ifaces: position = self.get_nem_position(iface) if not position: continue nem_id, lon, lat, alt = position - service = self.nem_service.get(nem_id) - if not service: - continue - event = services.setdefault(service, LocationEvent()) - event.append(nem_id, latitude=lat, longitude=lon, altitude=alt) - for service, event in services.items(): - service.events.publish(0, event) + positions.append((nem_id, lon, lat, alt)) + self.event_manager.publish_locations(positions) def write_nem(self, iface: CoreInterface, nem_id: int) -> None: path = self.session.directory / "emane_nems" @@ -512,7 +424,7 @@ def reset(self) -> None: self.nems_to_ifaces.clear() self.ifaces_to_nems.clear() self.nems_to_ifaces.clear() - self.services.clear() + self.event_manager.reset() def shutdown(self) -> None: """ @@ -537,10 +449,7 @@ def shutdown(self) -> None: node.host_cmd(kill_cmd, wait=False) iface.poshook = None # stop emane event services - while self.services: - _, service = self.services.popitem() - service.stop() - self.nem_service.clear() + self.event_manager.shutdown() def check_node_models(self) -> None: """ @@ -612,10 +521,10 @@ def start_daemon(self, iface: CoreInterface) -> None: emanecmd = f"emane -d -l {loglevel}" if realtime: emanecmd += " -r" + # start emane if isinstance(node, CoreNode): - # start emane - log_file = node.directory / f"{iface.name}-emane.log" - platform_xml = node.directory / emanexml.platform_file_name(iface) + log_file = f"{iface.name}-emane.log" + platform_xml = emanexml.platform_file_name(iface) args = f"{emanecmd} -f {log_file} {platform_xml}" node.cmd(args) else: @@ -642,16 +551,14 @@ def doeventmonitor(self) -> bool: def genlocationevents(self) -> bool: """ - Returns boolean whether or not EMANE events will be generated. + Returns boolean whether EMANE events will be generated. """ return self.session.options.get_bool("emane_event_generate", True) - def handlelocationevent(self, rxnemid: int, eid: int, data: str) -> None: + def handlelocationevent(self, events: LocationEvent) -> None: """ Handle an EMANE location event. """ - events = LocationEvent() - events.restore(data) for event in events: txnemid, attrs = event if ( @@ -739,32 +646,3 @@ def emanerunning(self, node: CoreNode) -> bool: except CoreCommandError: result = False return result - - def publish_pathloss(self, nem1: int, nem2: int, rx1: float, rx2: float) -> None: - """ - Publish pathloss events between provided nems, using provided rx power. - :param nem1: interface one for pathloss - :param nem2: interface two for pathloss - :param rx1: received power from nem2 to nem1 - :param rx2: received power from nem1 to nem2 - :return: nothing - """ - event = PathlossEvent() - event.append(nem1, forward=rx1) - event.append(nem2, forward=rx2) - self.publish_event(nem1, event) - self.publish_event(nem2, event) - - def publish_event( - self, - nem_id: int, - event: Union[PathlossEvent, CommEffectEvent, LocationEvent], - send_all: bool = False, - ) -> None: - service = self.nem_service.get(nem_id) - if not service: - logger.error("no service to publish event nem(%s)", nem_id) - return - if send_all: - nem_id = 0 - service.events.publish(nem_id, event) diff --git a/daemon/core/emane/emanemanifest.py b/daemon/core/emane/emanemanifest.py index ea2b05fd7..2c6b1f617 100644 --- a/daemon/core/emane/emanemanifest.py +++ b/daemon/core/emane/emanemanifest.py @@ -32,24 +32,6 @@ def _type_value(config_type: str) -> ConfigDataTypes: return ConfigDataTypes[config_type] -def _get_possible(config_type: str, config_regex: str) -> list[str]: - """ - Retrieve possible config value options based on emane regexes. - - :param config_type: emane configuration type - :param config_regex: emane configuration regex - :return: a string listing comma delimited values, if needed, empty string otherwise - """ - if config_type == "bool": - return ["On", "Off"] - - if config_type == "string" and config_regex: - possible = config_regex[2:-2] - return possible.split("|") - - return [] - - def _get_default(config_type_name: str, config_value: list[str]) -> str: """ Convert default configuration values to one used by core. @@ -111,7 +93,9 @@ def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]: # map to possible values used as options within the gui config_regex = config_info.get("regex") - possible = _get_possible(config_type_name, config_regex) + options = None + if config_type == "bool": + options = ["On", "Off"] # define description and account for gui quirks config_descriptions = config_name @@ -122,8 +106,9 @@ def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]: id=config_name, type=config_type_value, default=config_default, - options=possible, + options=options, label=config_descriptions, + regex=config_regex, ) configurations.append(configuration) diff --git a/daemon/core/emane/eventmanager.py b/daemon/core/emane/eventmanager.py new file mode 100644 index 000000000..606fa011e --- /dev/null +++ b/daemon/core/emane/eventmanager.py @@ -0,0 +1,261 @@ +import logging +import threading +from typing import Callable, Optional, Union + +from core.errors import CoreError + +logger = logging.getLogger(__name__) + +try: + from emane.events import ( + EventService, + AntennaProfileEvent, + CommEffectEvent, + FadingSelectionEvent, + LocationEvent, + PathlossEvent, + ) + from emane.events.eventserviceexception import EventServiceException +except ImportError: + try: + from emanesh.events import ( + EventService, + AntennaProfileEvent, + CommEffectEvent, + FadingSelectionEvent, + LocationEvent, + PathlossEvent, + ) + from emanesh.events.eventserviceexception import EventServiceException + except ImportError: + EventService = None + AntennaProfileEvent = None + CommEffectEvent = None + FadingSelectionEvent = None + LocationEvent = None + PathlossEvent = None + EventServiceException = None + logger.debug("compatible emane python bindings not installed") + + +class EmaneEventService: + def __init__( + self, + device: str, + group: str, + port: int, + location_handler: Callable[[LocationEvent], None], + ) -> None: + self.device: str = device + self.group: str = group + self.port: int = port + self.location_handler: Callable[[LocationEvent], None] = location_handler + self.running: bool = False + self.thread: Optional[threading.Thread] = None + logger.info("starting emane event service %s %s:%s", device, group, port) + self.events: EventService = EventService( + eventchannel=(group, port, device), otachannel=None + ) + + def start(self) -> None: + self.running = True + self.thread = threading.Thread(target=self.run, daemon=True) + self.thread.start() + + def run(self) -> None: + """ + Run and monitor events. + """ + logger.info("subscribing to emane location events") + while self.running: + _uuid, _seq, events = self.events.nextEvent() + # this occurs with 0.9.1 event service + if not self.running: + break + for _nem_id, event_id, data in events: + if event_id == LocationEvent.IDENTIFIER: + events = LocationEvent() + events.restore(data) + self.location_handler(events) + logger.info("unsubscribing from emane location events") + + def stop(self) -> None: + """ + Stop service and monitoring events. + """ + self.events.breakloop() + self.running = False + if self.thread: + self.thread.join() + self.thread = None + + +class EmaneEventManager: + def __init__(self, location_handler: Callable[[LocationEvent], None]): + self.location_handler: Callable[[LocationEvent], None] = location_handler + self.services: dict[str, EmaneEventService] = {} + self.nem_service: dict[int, EmaneEventService] = {} + + def reset(self) -> None: + self.services.clear() + self.nem_service.clear() + + def shutdown(self) -> None: + while self.services: + _, service = self.services.popitem() + service.stop() + self.nem_service.clear() + + def create_service( + self, + nem_id: int, + device: str, + group: str, + port: int, + should_start: bool, + ) -> None: + # initialize emane event services + service = self.services.get(device) + if not service: + try: + service = EmaneEventService(device, group, port, self.location_handler) + if should_start: + service.start() + self.services[device] = service + self.nem_service[nem_id] = service + except EventServiceException: + raise CoreError( + "failed to start emane event services {name} {group}:{port}" + ) + else: + self.nem_service[nem_id] = service + + def get_service(self, nem_id: int) -> Optional[EmaneEventService]: + service = self.nem_service.get(nem_id) + if not service: + logger.error("failure to find event service for nem(%s)", nem_id) + return service + + def publish_location( + self, + nem_id: int, + lon: float, + lat: float, + alt: float, + azimuth: float = None, + elevation: float = None, + magnitude: float = None, + roll: float = None, + pitch: float = None, + yaw: float = None, + ) -> None: + args = dict( + azimuth=azimuth, + elevation=elevation, + magnitude=magnitude, + roll=roll, + pitch=pitch, + yaw=yaw, + ) + args = {k: v for k, v in args.items() if v is not None} + event = LocationEvent() + event.append( + nem_id, + latitude=lat, + longitude=lon, + altitude=alt, + **args, + ) + self._publish_event(nem_id, event, 0) + + def publish_locations( + self, positions: list[tuple[int, float, float, float]] + ) -> None: + services = {} + for nem_id, lon, lat, alt in positions: + service = self.get_service(nem_id) + if not service: + continue + event = services.setdefault(service, LocationEvent()) + event.append(nem_id, latitude=lat, longitude=lon, altitude=alt) + for service, event in services.items(): + service.events.publish(0, event) + + def publish_comm_effect( + self, + nem1_id: int, + nem2_id: int, + delay: int, + jitter: int, + loss: float, + dup: int, + unicast: int, + broadcast: int, + ) -> None: + # TODO: batch these into multiple events per transmission + # TODO: may want to split out seconds portion of delay and jitter + event = CommEffectEvent() + event.append( + nem1_id, + latency=delay, + jitter=jitter, + loss=loss, + duplicate=dup, + unicast=unicast, + broadcast=broadcast, + ) + self._publish_event(nem2_id, event) + + def publish_pathloss( + self, + nem1_id: int, + nem2_id: int, + forward1: float = None, + reverse1: float = None, + forward2: float = None, + reverse2: float = None, + ) -> None: + args1 = dict(forward=forward1, reverse=reverse1) + args1 = {k: v for k, v in args1.items() if v is not None} + args2 = dict(forward=forward2, reverse=reverse2) + args2 = {k: v for k, v in args2.items() if v is not None} + event = PathlossEvent() + event.append(nem1_id, **args1) + event.append(nem2_id, **args2) + self._publish_event(nem1_id, event) + self._publish_event(nem2_id, event) + + def publish_antenna_profile( + self, + nem_id: int, + profile: int, + azimuth: float, + elevation: float, + ) -> None: + event = AntennaProfileEvent() + event.append(nem_id, profile=profile, azimuth=azimuth, elevation=elevation) + self._publish_event(nem_id, event, 0) + + def publish_fading_selection(self, nem_id: int, model: str) -> None: + event = FadingSelectionEvent() + event.append(nem_id, model=model) + self._publish_event(nem_id, event) + + def _publish_event( + self, + nem_id: int, + event: Union[ + AntennaProfileEvent, + CommEffectEvent, + FadingSelectionEvent, + LocationEvent, + PathlossEvent, + ], + publish_id: int = None, + ) -> None: + service = self.get_service(nem_id) + if not service: + return + if publish_id is None: + publish_id = nem_id + service.events.publish(publish_id, event) diff --git a/daemon/core/emane/models/commeffect.py b/daemon/core/emane/models/commeffect.py index aa093a93e..85ed65393 100644 --- a/daemon/core/emane/models/commeffect.py +++ b/daemon/core/emane/models/commeffect.py @@ -124,19 +124,17 @@ def linkconfig( if iface is None or iface2 is None: logger.warning("%s: missing NEM information", self.name) return - # TODO: batch these into multiple events per transmission - # TODO: may want to split out seconds portion of delay and jitter - event = CommEffectEvent() nem1 = self.session.emane.get_nem_id(iface) nem2 = self.session.emane.get_nem_id(iface2) logger.info("sending comm effect event") - event.append( + bandwidth = int(convert_none(options.bandwidth)) + self.session.emane.event_manager.publish_comm_effect( nem1, - latency=convert_none(options.delay), + nem2, + delay=convert_none(options.delay), jitter=convert_none(options.jitter), loss=convert_none(options.loss), - duplicate=convert_none(options.dup), - unicast=int(convert_none(options.bandwidth)), - broadcast=int(convert_none(options.bandwidth)), + dup=convert_none(options.dup), + unicast=bandwidth, + broadcast=bandwidth, ) - self.session.emane.publish_event(nem2, event) diff --git a/daemon/core/emulator/broadcast.py b/daemon/core/emulator/broadcast.py index bf56f99de..666a9521c 100644 --- a/daemon/core/emulator/broadcast.py +++ b/daemon/core/emulator/broadcast.py @@ -1,19 +1,10 @@ from collections.abc import Callable from typing import TypeVar, Union -from core.emulator.data import ( - ConfigData, - EventData, - ExceptionData, - FileData, - LinkData, - NodeData, -) +from core.emulator.data import AlertData, EventData, LinkData, NodeData from core.errors import CoreError -T = TypeVar( - "T", bound=Union[EventData, ExceptionData, NodeData, LinkData, FileData, ConfigData] -) +T = TypeVar("T", bound=Union[EventData, AlertData, NodeData, LinkData]) class BroadcastManager: diff --git a/daemon/core/emulator/controlnets.py b/daemon/core/emulator/controlnets.py index 27b003672..450324a76 100644 --- a/daemon/core/emulator/controlnets.py +++ b/daemon/core/emulator/controlnets.py @@ -3,6 +3,7 @@ from core import utils from core.emulator.data import InterfaceData +from core.emulator.sessionconfig import SessionConfig from core.errors import CoreError from core.nodes.base import CoreNode from core.nodes.interface import DEFAULT_MTU @@ -14,54 +15,48 @@ from core.emulator.session import Session CTRL_NET_ID: int = 9001 +CTRL_NET_IFACE_ID: int = 99 ETC_HOSTS_PATH: str = "/etc/hosts" +DEFAULT_PREFIX_LIST: dict[int, str] = { + 0: "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", + 1: "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", + 2: "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", + 3: "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24", +} class ControlNetManager: def __init__(self, session: "Session") -> None: self.session: "Session" = session self.etc_hosts_header: str = f"CORE session {self.session.id} host entries" + self.etc_hosted_enabled: bool = False + self.net_prefixes: dict[int, Optional[str]] = {} + self.net_ifaces: dict[int, Optional[str]] = {} + self.updown_script: Optional[str] = None + self.parse_options(session.options) - def _etc_hosts_enabled(self) -> bool: + def parse_options(self, options: SessionConfig) -> None: """ - Determines if /etc/hosts should be configured. + Parse session options for current settings to use. - :return: True if /etc/hosts should be configured, False otherwise - """ - return self.session.options.get_bool("update_etc_hosts", False) - - def _get_server_ifaces( - self, - ) -> tuple[None, Optional[str], Optional[str], Optional[str]]: - """ - Retrieve control net server interfaces. - - :return: control net server interfaces - """ - d0 = self.session.options.get("controlnetif0") - if d0: - logger.error("controlnet0 cannot be assigned with a host interface") - d1 = self.session.options.get("controlnetif1") - d2 = self.session.options.get("controlnetif2") - d3 = self.session.options.get("controlnetif3") - return None, d1, d2, d3 - - def _get_prefixes( - self, - ) -> tuple[Optional[str], Optional[str], Optional[str], Optional[str]]: - """ - Retrieve control net prefixes. - - :return: control net prefixes + :param options: options to parse + :return: nothing """ - p = self.session.options.get("controlnet") - p0 = self.session.options.get("controlnet0") - p1 = self.session.options.get("controlnet1") - p2 = self.session.options.get("controlnet2") - p3 = self.session.options.get("controlnet3") - if not p0 and p: - p0 = p - return p0, p1, p2, p3 + self.etc_hosted_enabled: bool = options.get_bool("update_etc_hosts", False) + default_net = options.get("controlnet") or None + self.net_prefixes = { + 0: (options.get("controlnet0") or None) or default_net, + 1: options.get("controlnet1") or None, + 2: options.get("controlnet2") or None, + 3: options.get("controlnet3") or None, + } + self.net_ifaces = { + 0: None, + 1: options.get("controlnetif1") or None, + 2: options.get("controlnetif2") or None, + 3: options.get("controlnetif3") or None, + } + self.updown_script = options.get("controlnet_updown_script") or None def update_etc_hosts(self) -> None: """ @@ -69,9 +64,9 @@ def update_etc_hosts(self) -> None: :return: nothing """ - if not self._etc_hosts_enabled(): + if not self.etc_hosted_enabled: return - control_net = self.get_control_net(0) + control_net = self.get_net(0) entries = "" for iface in control_net.get_ifaces(): name = iface.node.name @@ -86,72 +81,77 @@ def clear_etc_hosts(self) -> None: :return: nothing """ - if not self._etc_hosts_enabled(): + if not self.etc_hosted_enabled: return logger.info("removing /etc/hosts file entries") utils.file_demunge(ETC_HOSTS_PATH, self.etc_hosts_header) - def get_control_net_index(self, dev: str) -> int: + def get_net_id(self, dev: str) -> int: """ - Retrieve control net index. + Retrieve control net id. - :param dev: device to get control net index for - :return: control net index, -1 otherwise + :param dev: device to get control net id for + :return: control net id, -1 otherwise """ if dev[0:4] == "ctrl" and int(dev[4]) in (0, 1, 2, 3): - index = int(dev[4]) - if index == 0: - return index - if index < 4 and self._get_prefixes()[index] is not None: - return index + _id = int(dev[4]) + if _id == 0: + return _id + if _id < 4 and self.net_prefixes[_id] is not None: + return _id return -1 - def get_control_net(self, index: int) -> Optional[CtrlNet]: + def get_net(self, _id: int) -> Optional[CtrlNet]: """ - Retrieve a control net based on index. + Retrieve a control net based on id. - :param index: control net index + :param _id: id of control net to retrieve :return: control net when available, None otherwise """ - try: - return self.session.get_node(CTRL_NET_ID + index, CtrlNet) - except CoreError: - return None + return self.session.control_nodes.get(_id) + + def setup_nets(self) -> None: + """ + Setup all configured control nets. + + :return: nothing + """ + for _id, prefix in self.net_prefixes.items(): + if prefix: + self.add_net(_id) - def add_control_net( - self, index: int, conf_required: bool = True - ) -> Optional[CtrlNet]: + def add_net(self, _id: int, conf_required: bool = True) -> Optional[CtrlNet]: """ Create a control network bridge as necessary. The conf_reqd flag, when False, causes a control network bridge to be added even if one has not been configured. - :param index: network index to add + :param _id: id of control net to add :param conf_required: flag to check if conf is required :return: control net node """ logger.info( - "checking to add control net index(%s) conf_required(%s)", - index, + "checking to add control net(%s) conf_required(%s)", + _id, conf_required, ) - # check for valid index - if not (0 <= index <= 3): - raise CoreError(f"invalid control net index({index})") + # check for valid id + if not (0 <= _id <= 3): + raise CoreError(f"invalid control net id({_id})") # return any existing control net bridge - control_net = self.get_control_net(index) + control_net = self.get_net(_id) if control_net: - logger.info("control net index(%s) already exists", index) + logger.info("control net(%s) already exists", _id) return control_net - # retrieve prefix for current index - index_prefix = self._get_prefixes()[index] - if not index_prefix: + # retrieve prefix for current id + id_prefix = self.net_prefixes[_id] + if not id_prefix: if conf_required: return None else: - index_prefix = CtrlNet.DEFAULT_PREFIX_LIST[index] + id_prefix = DEFAULT_PREFIX_LIST[_id] # retrieve valid prefix from old style values - prefixes = index_prefix.split() + prefixes = id_prefix.split() if len(prefixes) > 1: # a list of per-host prefixes is provided try: @@ -162,67 +162,63 @@ def add_control_net( prefix = prefixes[0] # use the updown script for control net 0 only updown_script = None - if index == 0: - updown_script = self.session.options.get("controlnet_updown_script") + if _id == 0: + updown_script = self.updown_script # build a new controlnet bridge - _id = CTRL_NET_ID + index - server_iface = self._get_server_ifaces()[index] - logger.info( - "adding controlnet(%s) prefix(%s) updown(%s) server interface(%s)", - _id, - prefix, - updown_script, - server_iface, - ) - options = CtrlNet.create_options() - options.prefix = prefix - options.updown_script = updown_script - options.serverintf = server_iface - control_net = self.session.create_node(CtrlNet, False, _id, options=options) - control_net.brname = f"ctrl{index}.{self.session.short_session_id()}" - control_net.startup() - return control_net + server_iface = self.net_ifaces[_id] + return self.session.create_control_net(_id, prefix, updown_script, server_iface) - def remove_control_net(self, index: int) -> None: + def remove_nets(self) -> None: """ - Removes control net. + Removes control nets. - :param index: index of control net to remove :return: nothing """ - control_net = self.get_control_net(index) - if control_net: - logger.info("removing control net index(%s)", index) - self.session.delete_node(control_net.id) + for _id in self.net_prefixes: + control_net = self.session.control_nodes.pop(_id, None) + if control_net: + logger.info("shutting down control net(%s)", _id) + control_net.shutdown() - def add_control_iface(self, node: CoreNode, index: int) -> None: + def setup_ifaces(self, node: CoreNode) -> None: + """ + Setup all configured control net interfaces for node. + + :param node: node to configure control net interfaces for + :return: nothing + """ + for _id in self.net_prefixes: + if self.get_net(_id): + self.add_iface(node, _id) + + def add_iface(self, node: CoreNode, _id: int) -> None: """ Adds a control net interface to a node. :param node: node to add control net interface to - :param index: index of control net to add interface to + :param _id: id of control net to add interface to :return: nothing :raises CoreError: if control net doesn't exist, interface already exists, or there is an error creating the interface """ - control_net = self.get_control_net(index) + control_net = self.get_net(_id) if not control_net: - raise CoreError(f"control net index({index}) does not exist") - iface_id = control_net.CTRLIF_IDX_BASE + index + raise CoreError(f"control net id({_id}) does not exist") + iface_id = CTRL_NET_IFACE_ID + _id if node.ifaces.get(iface_id): - raise CoreError(f"control iface({iface_id}) already exists") + return try: logger.info( - "node(%s) adding control net index(%s) interface(%s)", + "node(%s) adding control net id(%s) interface(%s)", node.name, - index, + _id, iface_id, ) ip4 = control_net.prefix[node.id] ip4_mask = control_net.prefix.prefixlen iface_data = InterfaceData( id=iface_id, - name=f"ctrl{index}", + name=f"ctrl{_id}", mac=utils.random_mac(), ip4=ip4, ip4_mask=ip4_mask, diff --git a/daemon/core/emulator/coreemu.py b/daemon/core/emulator/coreemu.py index 574002e6f..3b31089c4 100644 --- a/daemon/core/emulator/coreemu.py +++ b/daemon/core/emulator/coreemu.py @@ -3,11 +3,10 @@ from pathlib import Path from core import utils -from core.configservice.manager import ConfigServiceManager from core.emane.modelmanager import EmaneModelManager from core.emulator.session import Session from core.executables import get_requirements -from core.services.coreservices import ServiceManager +from core.services.manager import ServiceManager logger = logging.getLogger(__name__) @@ -37,7 +36,7 @@ def __init__(self, config: dict[str, str] = None) -> None: # load services self.service_errors: list[str] = [] - self.service_manager: ConfigServiceManager = ConfigServiceManager() + self.service_manager: ServiceManager = ServiceManager() self._load_services() # check and load emane @@ -65,19 +64,9 @@ def _load_services(self) -> None: :return: nothing """ # load default services - self.service_errors = ServiceManager.load_locals() - # load custom services - service_paths = self.config.get("custom_services_dir") - logger.debug("custom service paths: %s", service_paths) - if service_paths is not None: - for service_path in service_paths.split(","): - service_path = Path(service_path.strip()) - custom_service_errors = ServiceManager.add_services(service_path) - self.service_errors.extend(custom_service_errors) - # load default config services self.service_manager.load_locals() - # load custom config services - custom_dir = self.config.get("custom_config_services_dir") + # load custom services + custom_dir = self.config.get("custom_services_dir") if custom_dir is not None: custom_dir = Path(custom_dir) self.service_manager.load(custom_dir) diff --git a/daemon/core/emulator/data.py b/daemon/core/emulator/data.py index 7d3dc8dcc..910bb8e90 100644 --- a/daemon/core/emulator/data.py +++ b/daemon/core/emulator/data.py @@ -7,35 +7,12 @@ import netaddr from core import utils -from core.emulator.enumerations import ( - EventTypes, - ExceptionLevels, - LinkTypes, - MessageFlags, -) +from core.emulator.enumerations import AlertLevels, EventTypes, LinkTypes, MessageFlags if TYPE_CHECKING: from core.nodes.base import CoreNode, NodeBase -@dataclass -class ConfigData: - message_type: int = None - node: int = None - object: str = None - type: int = None - data_types: tuple[int] = None - data_values: str = None - captions: str = None - bitmap: str = None - possible_values: str = None - groups: str = None - session: int = None - iface_id: int = None - network_id: int = None - opaque: str = None - - @dataclass class EventData: node: int = None @@ -47,10 +24,10 @@ class EventData: @dataclass -class ExceptionData: +class AlertData: node: int = None session: int = None - level: ExceptionLevels = None + level: AlertLevels = None source: str = None date: str = None text: str = None @@ -82,7 +59,6 @@ class NodeOptions: canvas: int = None icon: str = None services: list[str] = field(default_factory=list) - config_services: list[str] = field(default_factory=list) x: float = None y: float = None lat: float = None @@ -91,7 +67,6 @@ class NodeOptions: server: str = None image: str = None emane: str = None - legacy: bool = False # src, dst binds: list[tuple[str, str]] = field(default_factory=list) # src, dst, unique, delete @@ -176,7 +151,7 @@ class LinkOptions: mer: int = None burst: int = None mburst: int = None - unidirectional: int = None + unidirectional: bool = None key: int = None buffer: int = None @@ -256,7 +231,7 @@ class LinkData: network_id: int = None iface1: InterfaceData = None iface2: InterfaceData = None - options: LinkOptions = LinkOptions() + options: LinkOptions = field(default_factory=LinkOptions) color: str = None source: str = None diff --git a/daemon/core/emulator/distributed.py b/daemon/core/emulator/distributed.py index 1c0d3c922..225e79a70 100644 --- a/daemon/core/emulator/distributed.py +++ b/daemon/core/emulator/distributed.py @@ -19,7 +19,7 @@ from core.errors import CoreCommandError, CoreError from core.executables import get_requirements from core.nodes.interface import GreTap -from core.nodes.network import CoreNetwork, CtrlNet +from core.nodes.network import CoreNetwork logger = logging.getLogger(__name__) @@ -187,8 +187,8 @@ def start(self) -> None: :return: nothing """ mtu = self.session.options.get_int("mtu") - for node in self.session.nodes.values(): - if not isinstance(node, CtrlNet) or node.serverintf is not None: + for node in self.session.control_nodes.values(): + if node.serverintf is not None: continue for name in self.servers: server = self.servers[name] @@ -250,7 +250,7 @@ def create_gre_tunnel( def tunnel_key(self, node1_id: int, node2_id: int) -> int: """ Compute a 32-bit key used to uniquely identify a GRE tunnel. - The hash(n1num), hash(n2num) values are used, so node numbers may be + The hash(node1_id), hash(node2_id) values are used, so node numbers may be None or string values (used for e.g. "ctrlnet"). :param node1_id: node one id diff --git a/daemon/core/emulator/enumerations.py b/daemon/core/emulator/enumerations.py index 96fb919b0..2c431e76e 100644 --- a/daemon/core/emulator/enumerations.py +++ b/daemon/core/emulator/enumerations.py @@ -20,17 +20,6 @@ class MessageFlags(Enum): TTY = 0x40 -class ConfigFlags(Enum): - """ - Configuration flags. - """ - - NONE = 0x00 - REQUEST = 0x01 - UPDATE = 0x02 - RESET = 0x03 - - class NodeTypes(Enum): """ Node types. @@ -45,10 +34,7 @@ class NodeTypes(Enum): TUNNEL = 8 EMANE = 10 TAP_BRIDGE = 11 - PEER_TO_PEER = 12 - CONTROL_NET = 13 DOCKER = 15 - LXC = 16 WIRELESS = 17 PODMAN = 18 @@ -69,11 +55,6 @@ class RegisterTlvs(Enum): WIRELESS = 0x01 MOBILITY = 0x02 - UTILITY = 0x03 - EXECUTE_SERVER = 0x04 - GUI = 0x05 - EMULATION_SERVER = 0x06 - SESSION = 0x0A class ConfigDataTypes(Enum): @@ -123,9 +104,9 @@ def already_collected(self) -> bool: return self.value >= self.DATACOLLECT_STATE.value -class ExceptionLevels(Enum): +class AlertLevels(Enum): """ - Exception levels. + Alert levels. """ NONE = 0 diff --git a/daemon/core/emulator/hooks.py b/daemon/core/emulator/hooks.py index ffeeafeb9..ea0cc0b89 100644 --- a/daemon/core/emulator/hooks.py +++ b/daemon/core/emulator/hooks.py @@ -9,6 +9,62 @@ logger = logging.getLogger(__name__) +def _run_callback(state: EventTypes, hook: Callable[[EventTypes], None]) -> None: + """ + Run a callback hook. + + :param state: state hook is being run in + :param hook: hook to run + :return: nothing + """ + try: + hook(state) + except Exception as e: + name = getattr(callable, "__name__", repr(hook)) + raise CoreError( + f"failure running state({state.name}) " f"hook callback({name}): {e}" + ) + + +def _run_script( + state: EventTypes, + directory: Path, + file_name: str, + data: str, + env: dict[str, str], +) -> None: + """ + Run a script hook. + + :param state: state script is being run in + :param directory: directory to run script in + :param file_name: name of script to run + :param data: script content + :param env: environment to run script with + :return: nothing + """ + logger.info("running hook %s", file_name) + file_path = directory / file_name + log_path = directory / f"{file_name}.log" + try: + with file_path.open("w") as f: + f.write(data) + with log_path.open("w") as f: + args = ["/bin/sh", file_name] + subprocess.check_call( + args, + stdout=f, + stderr=subprocess.STDOUT, + close_fds=True, + cwd=directory, + env=env, + ) + except (OSError, subprocess.CalledProcessError) as e: + raise CoreError( + f"failure running state({state.name}) " f"hook script({file_name}): {e}" + ) + + class HookManager: """ Provides functionality for managing and running script/callback hooks. @@ -19,24 +75,34 @@ def __init__(self) -> None: Create a HookManager instance. """ self.script_hooks: dict[EventTypes, dict[str, str]] = {} - self.callback_hooks: dict[EventTypes, list[Callable[[], None]]] = {} + self.callback_hooks: dict[EventTypes, list[Callable[[EventTypes], None]]] = {} def reset(self) -> None: """ - Clear all current hooks. + Clear all current script hooks only. :return: nothing """ self.script_hooks.clear() - self.callback_hooks.clear() - def add_script_hook(self, state: EventTypes, file_name: str, data: str) -> None: + def add_script_hook( + self, + state: EventTypes, + file_name: str, + data: str, + directory: Path, + env: dict[str, str], + should_run: bool = False, + ) -> None: """ Add a hook script to run for a given state. :param state: state to run hook on :param file_name: hook file name :param data: file data + :param directory: directory to run script within + :param env: environment to run script with + :param should_run: True if should run script now, False otherwise :return: nothing """ logger.info("setting state hook: %s - %s", state, file_name) @@ -46,6 +112,8 @@ def add_script_hook(self, state: EventTypes, file_name: str, data: str) -> None: f"adding duplicate state({state.name}) hook script({file_name})" ) state_hooks[file_name] = data + if should_run: + _run_script(state, directory, file_name, data, env) def delete_script_hook(self, state: EventTypes, file_name: str) -> None: """ @@ -64,13 +132,17 @@ def delete_script_hook(self, state: EventTypes, file_name: str) -> None: del state_hooks[file_name] def add_callback_hook( - self, state: EventTypes, hook: Callable[[EventTypes], None] + self, + state: EventTypes, + hook: Callable[[EventTypes], None], + should_run: bool = False, ) -> None: """ Add a hook callback to run for a state. :param state: state to add hook for :param hook: callback to run + :param should_run: True if should run callback now, False otherwise :return: nothing """ hooks = self.callback_hooks.setdefault(state, []) @@ -80,6 +152,8 @@ def add_callback_hook( f"adding duplicate state({state.name}) hook callback({name})" ) hooks.append(hook) + if should_run: + _run_callback(state, hook) def delete_callback_hook( self, state: EventTypes, hook: Callable[[EventTypes], None] @@ -111,35 +185,9 @@ def run_hooks( :param env: environment to run script hooks with :return: nothing """ - for state_hooks in self.script_hooks.get(state, {}): - for file_name, data in state_hooks.items(): - logger.info("running hook %s", file_name) - file_path = directory / file_name - log_path = directory / f"{file_name}.log" - try: - with file_path.open("w") as f: - f.write(data) - with log_path.open("w") as f: - args = ["/bin/sh", file_name] - subprocess.check_call( - args, - stdout=f, - stderr=subprocess.STDOUT, - close_fds=True, - cwd=directory, - env=env, - ) - except (OSError, subprocess.CalledProcessError) as e: - raise CoreError( - f"failure running state({state.name}) " - f"hook script({file_name}): {e}" - ) - for hook in self.callback_hooks.get(state, []): - try: - hook() - except Exception as e: - name = getattr(callable, "__name__", repr(hook)) - raise CoreError( - f"failure running state({state.name}) " - f"hook callback({name}): {e}" - ) + state_hooks = self.script_hooks.get(state, {}) + for file_name, data in state_hooks.items(): + _run_script(state, directory, file_name, data, env) + callback_hooks = self.callback_hooks.get(state, []) + for hook in callback_hooks: + _run_callback(state, hook) diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 5a6557eec..1f959c418 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -8,7 +8,6 @@ import os import pwd import shutil -import subprocess import sys import tempfile import threading @@ -17,26 +16,21 @@ from typing import Callable, Optional, TypeVar, Union from core import constants, utils -from core.configservice.manager import ConfigServiceManager from core.emane.emanemanager import EmaneManager, EmaneState from core.emane.nodes import EmaneNet +from core.emulator.broadcast import BroadcastManager +from core.emulator.controlnets import ControlNetManager from core.emulator.data import ( - ConfigData, + AlertData, EventData, - ExceptionData, - FileData, InterfaceData, LinkData, LinkOptions, NodeData, ) from core.emulator.distributed import DistributedController -from core.emulator.enumerations import ( - EventTypes, - ExceptionLevels, - MessageFlags, - NodeTypes, -) +from core.emulator.enumerations import AlertLevels, EventTypes, MessageFlags, NodeTypes +from core.emulator.hooks import HookManager from core.emulator.links import CoreLink, LinkManager from core.emulator.sessionconfig import SessionConfig from core.errors import CoreError @@ -46,7 +40,6 @@ from core.nodes.base import CoreNode, CoreNodeBase, NodeBase, NodeOptions, Position from core.nodes.docker import DockerNode from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.lxd import LxcNode from core.nodes.network import ( CtrlNet, GreTapBridge, @@ -60,7 +53,7 @@ from core.nodes.podman import PodmanNode from core.nodes.wireless import WirelessNode from core.plugins.sdt import Sdt -from core.services.coreservices import CoreServices +from core.services.manager import ServiceManager from core.xml import corexml, corexmldeployment from core.xml.corexml import CoreXmlReader, CoreXmlWriter @@ -77,10 +70,7 @@ NodeTypes.TUNNEL: TunnelNode, NodeTypes.EMANE: EmaneNet, NodeTypes.TAP_BRIDGE: GreTapBridge, - NodeTypes.PEER_TO_PEER: PtpNet, - NodeTypes.CONTROL_NET: CtrlNet, NodeTypes.DOCKER: DockerNode, - NodeTypes.LXC: LxcNode, NodeTypes.WIRELESS: WirelessNode, NodeTypes.PODMAN: PodmanNode, } @@ -126,25 +116,14 @@ def __init__( # dict of nodes: all nodes and nets self.nodes: dict[int, NodeBase] = {} + self.ptp_nodes: dict[int, PtpNet] = {} + self.control_nodes: dict[int, CtrlNet] = {} self.nodes_lock: threading.Lock = threading.Lock() self.link_manager: LinkManager = LinkManager() # states and hooks handlers self.state: EventTypes = EventTypes.DEFINITION_STATE self.state_time: float = time.monotonic() - self.hooks: dict[EventTypes, list[tuple[str, str]]] = {} - self.state_hooks: dict[EventTypes, list[Callable[[EventTypes], None]]] = {} - self.add_state_hook( - state=EventTypes.RUNTIME_STATE, hook=self.runtime_state_hook - ) - - # handlers for broadcasting information - self.event_handlers: list[Callable[[EventData], None]] = [] - self.exception_handlers: list[Callable[[ExceptionData], None]] = [] - self.node_handlers: list[Callable[[NodeData], None]] = [] - self.link_handlers: list[Callable[[LinkData], None]] = [] - self.file_handlers: list[Callable[[FileData], None]] = [] - self.config_handlers: list[Callable[[ConfigData], None]] = [] # session options/metadata self.options: SessionConfig = SessionConfig(config) @@ -154,15 +133,18 @@ def __init__( self.distributed: DistributedController = DistributedController(self) # initialize session feature helpers + self.control_net_manager: ControlNetManager = ControlNetManager(self) + self.broadcast_manager: BroadcastManager = BroadcastManager() + self.hook_manager: HookManager = HookManager() + self.hook_manager.add_callback_hook( + EventTypes.RUNTIME_STATE, self.runtime_state_hook + ) self.location: GeoLocation = GeoLocation() self.mobility: MobilityManager = MobilityManager(self) - self.services: CoreServices = CoreServices(self) self.emane: EmaneManager = EmaneManager(self) + self.service_manager: Optional[ServiceManager] = None self.sdt: Sdt = Sdt(self) - # config services - self.service_manager: Optional[ConfigServiceManager] = None - @classmethod def get_node_class(cls, _type: NodeTypes) -> type[NodeBase]: """ @@ -361,7 +343,7 @@ def _add_wired_link( iface1 = node1.create_iface(iface1_data, options) iface2 = node2.create_iface(iface2_data, options) # join and attach to ptp bridge - ptp = self.create_node(PtpNet, self.state.should_start()) + ptp = self.create_ptp() ptp.attach(iface1) ptp.attach(iface2) # track link @@ -455,25 +437,22 @@ def update_link( f"there is no link for node({node1.name}):interface({iface1_id}) " f"node({node2.name}):interface({iface2_id})" ) - if iface1: - iface1.options.update(options) - iface1.set_config() - if iface2 and not options.unidirectional: - iface2.options.update(options) - iface2.set_config() + if iface1 and options: + iface1.update_options(options) + if iface2 and options and not options.unidirectional: + iface2.update_options(options) - def next_node_id(self) -> int: + def next_node_id(self, start_id: int = 1) -> int: """ Find the next valid node id, starting from 1. :return: next node id """ - _id = 1 while True: - if _id not in self.nodes: + if start_id not in self.nodes: break - _id += 1 - return _id + start_id += 1 + return start_id def add_node( self, @@ -517,12 +496,13 @@ def add_node( self.set_node_geo(node, position.lon, position.lat, position.alt) else: self.set_node_pos(node, position.x, position.y) - # setup default wlan + # setup default wlan and startup if already running if isinstance(node, WlanNode): self.mobility.set_model_config(node.id, BasicRangeModel.name) + if self.is_running(): + self.mobility.startup([node.id]) # boot core nodes after runtime if self.is_running() and isinstance(node, CoreNode): - self.add_remove_control_iface(node, remove=False) self.boot_node(node) self.sdt.add_node(node) return node @@ -574,29 +554,19 @@ def save_xml(self, file_path: Path) -> None: """ CoreXmlWriter(self).write(file_path) - def add_hook( - self, state: EventTypes, file_name: str, data: str, src_name: str = None - ) -> None: + def add_hook(self, state: EventTypes, file_name: str, data: str) -> None: """ Store a hook from a received file message. :param state: when to run hook :param file_name: file name for hook - :param data: hook data - :param src_name: source name + :param data: file data :return: nothing """ - logger.info( - "setting state hook: %s - %s source(%s)", state, file_name, src_name + should_run = self.state == state + self.hook_manager.add_script_hook( + state, file_name, data, self.directory, self.get_environment(), should_run ) - hook = file_name, data - state_hooks = self.hooks.setdefault(state, []) - state_hooks.append(hook) - - # immediately run a hook if it is in the current state - if self.state == state: - logger.info("immediately running new state hook") - self.run_hook(hook) def clear(self) -> None: """ @@ -608,13 +578,13 @@ def clear(self) -> None: self.delete_nodes() self.link_manager.reset() self.distributed.shutdown() - self.hooks.clear() + self.hook_manager.reset() self.emane.reset() self.emane.config_reset() self.location.reset() - self.services.reset() self.mobility.config_reset() self.link_colors.clear() + self.control_net_manager.remove_nets() def set_location(self, lat: float, lon: float, alt: float, scale: float) -> None: """ @@ -647,25 +617,54 @@ def shutdown(self) -> None: if not preserve: shutil.rmtree(self.directory, ignore_errors=True) - def broadcast_event(self, event_data: EventData) -> None: + def broadcast_event( + self, + event_type: EventTypes, + *, + node_id: int = None, + name: str = None, + data: str = None, + ) -> None: """ Handle event data that should be provided to event handler. - :param event_data: event data to send out + :param event_type: type of event to send + :param node_id: associated node id, default is None + :param name: name of event, default is None + :param data: data for event, default is None :return: nothing """ - for handler in self.event_handlers: - handler(event_data) + event_data = EventData( + node=node_id, + event_type=event_type, + name=name, + data=data, + time=str(time.monotonic()), + session=self.id, + ) + self.broadcast_manager.send(event_data) - def broadcast_exception(self, exception_data: ExceptionData) -> None: + def broadcast_alert( + self, level: AlertLevels, source: str, text: str, node_id: int = None + ) -> None: """ - Handle exception data that should be provided to exception handlers. + Generate and broadcast an alert event. - :param exception_data: exception data to send out + :param level: alert level + :param source: source name + :param text: alert message + :param node_id: node related to alert, defaults to None :return: nothing """ - for handler in self.exception_handlers: - handler(exception_data) + alert_data = AlertData( + node=node_id, + session=self.id, + level=level, + source=source, + date=time.ctime(), + text=text, + ) + self.broadcast_manager.send(alert_data) def broadcast_node( self, @@ -682,28 +681,7 @@ def broadcast_node( :return: nothing """ node_data = NodeData(node=node, message_type=message_type, source=source) - for handler in self.node_handlers: - handler(node_data) - - def broadcast_file(self, file_data: FileData) -> None: - """ - Handle file data that should be provided to file handlers. - - :param file_data: file data to send out - :return: nothing - """ - for handler in self.file_handlers: - handler(file_data) - - def broadcast_config(self, config_data: ConfigData) -> None: - """ - Handle config data that should be provided to config handlers. - - :param config_data: config data to send out - :return: nothing - """ - for handler in self.config_handlers: - handler(config_data) + self.broadcast_manager.send(node_data) def broadcast_link(self, link_data: LinkData) -> None: """ @@ -712,8 +690,7 @@ def broadcast_link(self, link_data: LinkData) -> None: :param link_data: link data to send out :return: nothing """ - for handler in self.link_handlers: - handler(link_data) + self.broadcast_manager.send(link_data) def set_state(self, state: EventTypes, send_event: bool = False) -> None: """ @@ -728,68 +705,9 @@ def set_state(self, state: EventTypes, send_event: bool = False) -> None: self.state = state self.state_time = time.monotonic() logger.info("changing session(%s) to state %s", self.id, state.name) - self.run_hooks(state) - self.run_state_hooks(state) + self.hook_manager.run_hooks(state, self.directory, self.get_environment()) if send_event: - event_data = EventData(event_type=state, time=str(time.monotonic())) - self.broadcast_event(event_data) - - def run_hooks(self, state: EventTypes) -> None: - """ - Run hook scripts upon changing states. If hooks is not specified, run all hooks - in the given state. - - :param state: state to run hooks for - :return: nothing - """ - hooks = self.hooks.get(state, []) - for hook in hooks: - self.run_hook(hook) - - def run_hook(self, hook: tuple[str, str]) -> None: - """ - Run a hook. - - :param hook: hook to run - :return: nothing - """ - file_name, data = hook - logger.info("running hook %s", file_name) - file_path = self.directory / file_name - log_path = self.directory / f"{file_name}.log" - try: - with file_path.open("w") as f: - f.write(data) - with log_path.open("w") as f: - args = ["/bin/sh", file_name] - subprocess.check_call( - args, - stdout=f, - stderr=subprocess.STDOUT, - close_fds=True, - cwd=self.directory, - env=self.get_environment(), - ) - except (OSError, subprocess.CalledProcessError): - logger.exception("error running hook: %s", file_path) - - def run_state_hooks(self, state: EventTypes) -> None: - """ - Run state hooks. - - :param state: state to run hooks for - :return: nothing - """ - for hook in self.state_hooks.get(state, []): - self.run_state_hook(state, hook) - - def run_state_hook(self, state: EventTypes, hook: Callable[[EventTypes], None]): - try: - hook(state) - except Exception: - message = f"exception occurred when running {state.name} state hook: {hook}" - logger.exception(message) - self.exception(ExceptionLevels.ERROR, "Session.run_state_hooks", message) + self.broadcast_event(state) def add_state_hook( self, state: EventTypes, hook: Callable[[EventTypes], None] @@ -801,12 +719,8 @@ def add_state_hook( :param hook: hook callback for the state :return: nothing """ - hooks = self.state_hooks.setdefault(state, []) - if hook in hooks: - raise CoreError("attempting to add duplicate state hook") - hooks.append(hook) - if self.state == state: - self.run_state_hook(state, hook) + should_run = self.state == state + self.hook_manager.add_callback_hook(state, hook, should_run) def del_state_hook( self, state: EventTypes, hook: Callable[[EventTypes], None] @@ -818,9 +732,7 @@ def del_state_hook( :param hook: hook to delete :return: nothing """ - hooks = self.state_hooks.get(state, []) - if hook in hooks: - hooks.remove(hook) + self.hook_manager.delete_callback_hook(state, hook) def runtime_state_hook(self, _state: EventTypes) -> None: """ @@ -856,7 +768,7 @@ def get_environment(self, state: bool = True) -> dict[str, str]: if state: env["SESSION_STATE"] = str(self.state) # try reading and merging optional environments from: - # /etc/core/environment + # /opt/core/environment # /home/user/.coregui/environment # /tmp/pycore./environment core_env_path = constants.CORE_CONF_DIR / "environment" @@ -891,6 +803,66 @@ def set_user(self, user: str) -> None: except OSError: logger.exception("failed to set permission on %s", self.directory) + def create_ptp(self) -> PtpNet: + """ + Create node used to link wired nodes together. + + :return: created node + """ + with self.nodes_lock: + # get next ptp node id for creation + _id = 1 + while _id in self.ptp_nodes: + _id += 1 + node = PtpNet(self, _id=_id) + self.ptp_nodes[node.id] = node + logger.debug( + "created ptp node(%s) name(%s) start(%s)", + node.id, + node.name, + self.state.should_start(), + ) + if self.state.should_start(): + node.startup() + return node + + def create_control_net( + self, + _id: int, + prefix: str, + updown_script: Optional[str], + server_iface: Optional[str], + ) -> CtrlNet: + """ + Create a control net node, used to provide a common network between + the host running CORE and created nodes. + + :param _id: id of the control net to create + :param prefix: network prefix to create control net with + :param updown_script: updown script for the control net + :param server_iface: interface name to use for control net + :return: created control net + """ + with self.nodes_lock: + if _id in self.control_nodes: + raise CoreError(f"control net({_id}) already exists") + options = CtrlNet.create_options() + options.prefix = prefix + options.updown_script = updown_script + options.serverintf = server_iface + control_net = CtrlNet(self, _id, options=options) + self.control_nodes[_id] = control_net + logger.info( + "created control net(%s) prefix(%s) updown(%s) server interface(%s)", + _id, + prefix, + updown_script, + server_iface, + ) + if self.state.should_start(): + control_net.startup() + return control_net + def create_node( self, _class: type[NT], @@ -913,6 +885,7 @@ def create_node( :raises core.CoreError: when id of the node to create already exists """ with self.nodes_lock: + _id = _id if _id is not None else self.next_node_id() node = _class(self, _id=_id, name=name, server=server, options=options) if node.id in self.nodes: node.shutdown() @@ -977,32 +950,13 @@ def delete_nodes(self) -> None: _, node = self.nodes.popitem() nodes_ids.append(node.id) funcs.append((node.shutdown, [], {})) + while self.ptp_nodes: + _, node = self.ptp_nodes.popitem() + funcs.append((node.shutdown, [], {})) utils.threadpool(funcs) for node_id in nodes_ids: self.sdt.delete_node(node_id) - def exception( - self, level: ExceptionLevels, source: str, text: str, node_id: int = None - ) -> None: - """ - Generate and broadcast an exception event. - - :param level: exception level - :param source: source name - :param text: exception message - :param node_id: node related to exception - :return: nothing - """ - exception_data = ExceptionData( - node=node_id, - session=self.id, - level=level, - source=source, - date=time.ctime(), - text=text, - ) - self.broadcast_exception(exception_data) - def instantiate(self) -> list[Exception]: """ We have entered the instantiation state, invoke startup methods @@ -1014,10 +968,6 @@ def instantiate(self) -> list[Exception]: if self.is_running(): logger.warning("ignoring instantiate, already in runtime state") return [] - # create control net interfaces and network tunnels - # which need to exist for emane to sync on location events - # in distributed scenarios - self.add_remove_control_net(0, remove=False) # initialize distributed tunnels self.distributed.start() # instantiate will be invoked again upon emane configure @@ -1032,8 +982,7 @@ def instantiate(self) -> list[Exception]: node.post_startup() self.mobility.startup() # notify listeners that instantiation is complete - event = EventData(event_type=EventTypes.INSTANTIATION_COMPLETE) - self.broadcast_event(event) + self.broadcast_event(EventTypes.INSTANTIATION_COMPLETE) # startup event loop self.event_loop.run() self.set_state(EventTypes.RUNTIME_STATE, send_event=True) @@ -1049,11 +998,10 @@ def get_node_count(self) -> int: with self.nodes_lock: count = 0 for node in self.nodes.values(): - is_p2p_ctrlnet = isinstance(node, (PtpNet, CtrlNet)) is_tap = isinstance(node, GreTapBridge) and not isinstance( node, TunnelNode ) - if is_p2p_ctrlnet or is_tap: + if is_tap: continue count += 1 return count @@ -1081,20 +1029,14 @@ def data_collect(self) -> None: funcs = [] for node in self.nodes.values(): if isinstance(node, CoreNodeBase) and node.up: - args = (node,) - funcs.append((self.services.stop_services, args, {})) - funcs.append((node.stop_config_services, (), {})) + funcs.append((node.stop_services, (), {})) utils.threadpool(funcs) # shutdown emane self.emane.shutdown() # update control interface hosts - self.update_control_iface_hosts(remove=True) - - # remove all four possible control networks - for i in range(4): - self.add_remove_control_net(i, remove=True) + self.control_net_manager.clear_etc_hosts() def short_session_id(self) -> str: """ @@ -1115,13 +1057,13 @@ def boot_node(self, node: CoreNode) -> None: :return: nothing """ logger.info( - "booting node(%s): config services(%s) services(%s)", + "booting node(%s): services(%s)", node.name, - ", ".join(node.config_services.keys()), - ", ".join(x.name for x in node.services), + ", ".join(node.services.keys()), ) - self.services.boot_services(node) - node.start_config_services() + self.control_net_manager.setup_ifaces(node) + with self.nodes_lock: + node.start_services() def boot_nodes(self) -> list[Exception]: """ @@ -1131,236 +1073,19 @@ def boot_nodes(self) -> list[Exception]: :return: service boot exceptions """ - with self.nodes_lock: - funcs = [] - start = time.monotonic() - for node in self.nodes.values(): - if isinstance(node, CoreNode): - self.add_remove_control_iface(node, remove=False) - funcs.append((self.boot_node, (node,), {})) - results, exceptions = utils.threadpool(funcs) - total = time.monotonic() - start - logger.debug("boot run time: %s", total) + funcs = [] + start = time.monotonic() + self.control_net_manager.setup_nets() + for node in self.nodes.values(): + if isinstance(node, CoreNode): + funcs.append((self.boot_node, (node,), {})) + results, exceptions = utils.threadpool(funcs) + total = time.monotonic() - start + logger.debug("boot run time: %s", total) if not exceptions: - self.update_control_iface_hosts() + self.control_net_manager.update_etc_hosts() return exceptions - def get_control_net_prefixes(self) -> list[str]: - """ - Retrieve control net prefixes. - - :return: control net prefix list - """ - p = self.options.get("controlnet") - p0 = self.options.get("controlnet0") - p1 = self.options.get("controlnet1") - p2 = self.options.get("controlnet2") - p3 = self.options.get("controlnet3") - if not p0 and p: - p0 = p - return [p0, p1, p2, p3] - - def get_control_net_server_ifaces(self) -> list[str]: - """ - Retrieve control net server interfaces. - - :return: list of control net server interfaces - """ - d0 = self.options.get("controlnetif0") - if d0: - logger.error("controlnet0 cannot be assigned with a host interface") - d1 = self.options.get("controlnetif1") - d2 = self.options.get("controlnetif2") - d3 = self.options.get("controlnetif3") - return [None, d1, d2, d3] - - def get_control_net_index(self, dev: str) -> int: - """ - Retrieve control net index. - - :param dev: device to get control net index for - :return: control net index, -1 otherwise - """ - if dev[0:4] == "ctrl" and int(dev[4]) in [0, 1, 2, 3]: - index = int(dev[4]) - if index == 0: - return index - if index < 4 and self.get_control_net_prefixes()[index] is not None: - return index - return -1 - - def get_control_net(self, net_index: int) -> CtrlNet: - """ - Retrieve a control net based on index. - - :param net_index: control net index - :return: control net - :raises CoreError: when control net is not found - """ - return self.get_node(CTRL_NET_ID + net_index, CtrlNet) - - def add_remove_control_net( - self, net_index: int, remove: bool = False, conf_required: bool = True - ) -> Optional[CtrlNet]: - """ - Create a control network bridge as necessary. - When the remove flag is True, remove the bridge that connects control - interfaces. The conf_reqd flag, when False, causes a control network - bridge to be added even if one has not been configured. - - :param net_index: network index - :param remove: flag to check if it should be removed - :param conf_required: flag to check if conf is required - :return: control net node - """ - logger.debug( - "add/remove control net: index(%s) remove(%s) conf_required(%s)", - net_index, - remove, - conf_required, - ) - prefix_spec_list = self.get_control_net_prefixes() - prefix_spec = prefix_spec_list[net_index] - if not prefix_spec: - if conf_required: - # no controlnet needed - return None - else: - prefix_spec = CtrlNet.DEFAULT_PREFIX_LIST[net_index] - logger.debug("prefix spec: %s", prefix_spec) - server_iface = self.get_control_net_server_ifaces()[net_index] - - # return any existing controlnet bridge - try: - control_net = self.get_control_net(net_index) - if remove: - self.delete_node(control_net.id) - return None - return control_net - except CoreError: - if remove: - return None - - # build a new controlnet bridge - _id = CTRL_NET_ID + net_index - - # use the updown script for control net 0 only. - updown_script = None - if net_index == 0: - updown_script = self.options.get("controlnet_updown_script") or None - if not updown_script: - logger.debug("controlnet updown script not configured") - - prefixes = prefix_spec.split() - if len(prefixes) > 1: - # a list of per-host prefixes is provided - try: - # split first (master) entry into server and prefix - prefix = prefixes[0].split(":", 1)[1] - except IndexError: - # no server name. possibly only one server - prefix = prefixes[0] - else: - prefix = prefixes[0] - - logger.info( - "controlnet(%s) prefix(%s) updown(%s) serverintf(%s)", - _id, - prefix, - updown_script, - server_iface, - ) - options = CtrlNet.create_options() - options.prefix = prefix - options.updown_script = updown_script - options.serverintf = server_iface - control_net = self.create_node(CtrlNet, False, _id, options=options) - control_net.brname = f"ctrl{net_index}.{self.short_session_id()}" - control_net.startup() - return control_net - - def add_remove_control_iface( - self, - node: CoreNode, - net_index: int = 0, - remove: bool = False, - conf_required: bool = True, - ) -> None: - """ - Add a control interface to a node when a 'controlnet' prefix is - listed in the config file or session options. Uses - addremovectrlnet() to build or remove the control bridge. - If conf_reqd is False, the control network may be built even - when the user has not configured one (e.g. for EMANE.) - - :param node: node to add or remove control interface - :param net_index: network index - :param remove: flag to check if it should be removed - :param conf_required: flag to check if conf is required - :return: nothing - """ - control_net = self.add_remove_control_net(net_index, remove, conf_required) - if not control_net: - return - if not node: - return - # ctrl# already exists - if node.ifaces.get(control_net.CTRLIF_IDX_BASE + net_index): - return - try: - ip4 = control_net.prefix[node.id] - ip4_mask = control_net.prefix.prefixlen - iface_data = InterfaceData( - id=control_net.CTRLIF_IDX_BASE + net_index, - name=f"ctrl{net_index}", - mac=utils.random_mac(), - ip4=ip4, - ip4_mask=ip4_mask, - mtu=DEFAULT_MTU, - ) - iface = node.create_iface(iface_data) - control_net.attach(iface) - iface.control = True - except ValueError: - msg = f"Control interface not added to node {node.id}. " - msg += f"Invalid control network prefix ({control_net.prefix}). " - msg += "A longer prefix length may be required for this many nodes." - logger.exception(msg) - - def update_control_iface_hosts( - self, net_index: int = 0, remove: bool = False - ) -> None: - """ - Add the IP addresses of control interfaces to the /etc/hosts file. - - :param net_index: network index to update - :param remove: flag to check if it should be removed - :return: nothing - """ - if not self.options.get_bool("update_etc_hosts", False): - return - - try: - control_net = self.get_control_net(net_index) - except CoreError: - logger.exception("error retrieving control net node") - return - - header = f"CORE session {self.id} host entries" - if remove: - logger.info("Removing /etc/hosts file entries.") - utils.file_demunge("/etc/hosts", header) - return - - entries = [] - for iface in control_net.get_ifaces(): - name = iface.node.name - for ip in iface.ips(): - entries.append(f"{ip.ip} {name}") - - logger.info("Adding %d /etc/hosts file entries.", len(entries)) - utils.file_munge("/etc/hosts", header, "\n".join(entries) + "\n") - def runtime(self) -> float: """ Return the current time we have been in the runtime state, or zero @@ -1451,3 +1176,11 @@ def is_running(self) -> bool: :return: True if in the runtime state, False otherwise """ return self.state == EventTypes.RUNTIME_STATE + + def parse_options(self) -> None: + """ + Update configurations from latest session options. + + :return: nothing + """ + self.control_net_manager.parse_options(self.options) diff --git a/daemon/core/emulator/sessionconfig.py b/daemon/core/emulator/sessionconfig.py index b6d5bcd32..6691ba834 100644 --- a/daemon/core/emulator/sessionconfig.py +++ b/daemon/core/emulator/sessionconfig.py @@ -33,6 +33,7 @@ class SessionConfig: ), ConfigInt(id="link_timeout", default="4", label="EMANE Link Timeout (sec)"), ConfigInt(id="mtu", default="0", label="MTU for All Devices"), + ConfigBool(id="checksums", default="0", label="Enable Eth Checksums?"), ] def __init__(self, config: dict[str, str] = None) -> None: diff --git a/daemon/core/gui/appconfig.py b/daemon/core/gui/appconfig.py index 0a5ae76b5..e26407eed 100644 --- a/daemon/core/gui/appconfig.py +++ b/daemon/core/gui/appconfig.py @@ -72,6 +72,15 @@ def __init__(self, name: str, cmd: str) -> None: self.cmd: str = cmd +class NodeCommand(yaml.YAMLObject): + yaml_tag: str = "!NodeCommand" + yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader + + def __init__(self, name: str, cmd: str) -> None: + self.name: str = name + self.cmd: str = cmd + + class PreferencesConfig(yaml.YAMLObject): yaml_tag: str = "!PreferencesConfig" yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader @@ -149,6 +158,7 @@ def __init__( scale: float = 1.0, ips: IpConfigs = None, mac: str = "00:00:00:aa:00:00", + node_commands: list[NodeCommand] = None, ) -> None: if preferences is None: preferences = PreferencesConfig() @@ -173,6 +183,12 @@ def __init__( ips = IpConfigs() self.ips: IpConfigs = ips self.mac: str = mac + self.node_commands: list[NodeCommand] = node_commands or [] + + @classmethod + def from_yaml(cls, loader, node): + values = loader.construct_mapping(node, deep=True) + return cls(**values) def copy_files(current_path: Path, new_path: Path) -> None: diff --git a/daemon/core/gui/coreclient.py b/daemon/core/gui/coreclient.py index da2ca6d61..2924bd643 100644 --- a/daemon/core/gui/coreclient.py +++ b/daemon/core/gui/coreclient.py @@ -13,26 +13,23 @@ import grpc -from core.api.grpc import client, configservices_pb2, core_pb2 +from core.api.grpc import client, core_pb2 from core.api.grpc.wrappers import ( + AlertEvent, ConfigOption, - ConfigService, - ConfigServiceDefaults, EmaneModelConfig, Event, - ExceptionEvent, Link, LinkEvent, LinkType, MessageType, Node, NodeEvent, - NodeServiceData, NodeType, Position, Server, - ServiceConfig, - ServiceFileConfig, + Service, + ServiceDefaults, Session, SessionLocation, SessionState, @@ -53,8 +50,13 @@ if TYPE_CHECKING: from core.gui.app import Application -GUI_SOURCE = "gui" -CPU_USAGE_DELAY = 3 +GUI_SOURCE: str = "gui" +CPU_USAGE_DELAY: int = 3 +MOBILITY_ACTIONS: dict[int, str] = { + 7: "PLAY", + 8: "STOP", + 9: "PAUSE", +} def to_dict(config: dict[str, ConfigOption]) -> dict[str, str]: @@ -76,15 +78,15 @@ def __init__(self, app: "Application", proxy: bool) -> None: self.show_throughputs: tk.BooleanVar = tk.BooleanVar(value=False) # global service settings - self.services: dict[str, set[str]] = {} - self.config_services_groups: dict[str, set[str]] = {} - self.config_services: dict[str, ConfigService] = {} + self.services_groups: dict[str, set[str]] = {} + self.services: dict[str, Service] = {} # loaded configuration data self.emane_models: list[str] = [] self.servers: dict[str, CoreServer] = {} self.custom_nodes: dict[str, NodeDraw] = {} self.custom_observers: dict[str, Observer] = {} + self.node_commands: dict[str, str] = {} self.read_config() # helpers @@ -153,6 +155,9 @@ def read_config(self) -> None: # read observers for observer in self.app.guiconfig.observers: self.custom_observers[observer.name] = observer + # read node commands + for node_cmd in self.app.guiconfig.node_commands: + self.node_commands[node_cmd.name] = node_cmd.cmd def handle_events(self, event: Event) -> None: if not self.session or event.source == GUI_SOURCE: @@ -167,11 +172,21 @@ def handle_events(self, event: Event) -> None: if event.link_event: self.app.after(0, self.handle_link_event, event.link_event) elif event.session_event: - logger.info("session event: %s", event) session_event = event.session_event if session_event.event <= SessionState.SHUTDOWN.value: self.session.state = SessionState(session_event.event) - elif session_event.event in {7, 8, 9}: + logger.info( + "session(%s) state(%s)", + event.session_id, + self.session.state, + ) + elif session_event.event in MOBILITY_ACTIONS: + action = MOBILITY_ACTIONS[session_event.event] + logger.info( + "session(%s) mobility action(%s)", + event.session_id, + action, + ) node_id = session_event.node_id dialog = self.mobility_players.get(node_id) if dialog: @@ -181,14 +196,14 @@ def handle_events(self, event: Event) -> None: dialog.set_stop() else: dialog.set_pause() + elif session_event.event == 15: + logger.info("session(%s) instantiation complete", event.session_id) else: logger.warning("unknown session event: %s", session_event) elif event.node_event: self.app.after(0, self.handle_node_event, event.node_event) - elif event.config_event: - logger.info("config event: %s", event) - elif event.exception_event: - self.handle_exception_event(event.exception_event) + elif event.alert_event: + self.handle_alert_event(event.alert_event) else: logger.info("unhandled event: %s", event) @@ -218,7 +233,9 @@ def handle_link_event(self, event: LinkEvent) -> None: logger.warning("unknown link event: %s", event) else: if event.message_type == MessageType.ADD: - self.app.manager.add_wired_edge(canvas_node1, canvas_node2, event.link) + self.app.manager.add_wired_edge( + canvas_node1, canvas_node2, event.link, organize=True + ) elif event.message_type == MessageType.DELETE: self.app.manager.delete_wired_edge(event.link) elif event.message_type == MessageType.NONE: @@ -291,8 +308,8 @@ def handle_throughputs(self, event: ThroughputsEvent) -> None: def handle_cpu_event(self, event: core_pb2.CpuUsageEvent) -> None: self.app.after(0, self.app.statusbar.set_cpu, event.usage) - def handle_exception_event(self, event: ExceptionEvent) -> None: - logger.info("exception event: %s", event) + def handle_alert_event(self, event: AlertEvent) -> None: + logger.info("alert event: %s", event) self.app.statusbar.add_alert(event) def update_session_title(self) -> None: @@ -358,17 +375,12 @@ def setup(self, session_id: int = None) -> None: """ try: self.client.connect() - # get current core configurations services/config services + # get current core configurations core_config = self.client.get_config() self.emane_models = sorted(core_config.emane_models) for service in core_config.services: - group_services = self.services.setdefault(service.group, set()) - group_services.add(service.name) - for service in core_config.config_services: - self.config_services[service.name] = service - group_services = self.config_services_groups.setdefault( - service.group, set() - ) + self.services[service.name] = service + group_services = self.services_groups.setdefault(service.group, set()) group_services.add(service.name) # join provided session, create new session, or show dialog to select an # existing session @@ -560,30 +572,6 @@ def open_xml(self, file_path: Path) -> None: except grpc.RpcError as e: self.app.show_grpc_exception("Open XML Error", e) - def get_node_service(self, node_id: int, service_name: str) -> NodeServiceData: - node_service = self.client.get_node_service( - self.session.id, node_id, service_name - ) - logger.debug( - "get node(%s) service(%s): %s", node_id, service_name, node_service - ) - return node_service - - def get_node_service_file( - self, node_id: int, service_name: str, file_name: str - ) -> str: - data = self.client.get_node_service_file( - self.session.id, node_id, service_name, file_name - ) - logger.debug( - "get service file for node(%s), service: %s, file: %s, data: %s", - node_id, - service_name, - file_name, - data, - ) - return data - def close(self) -> None: """ Clean ups when done using grpc @@ -638,12 +626,12 @@ def create_node( ) if nutils.is_custom(node): services = nutils.get_custom_services(self.app.guiconfig, model) - node.config_services = set(services) + node.services = set(services) # assign default services to CORE node else: services = self.session.default_services.get(model) if services: - node.config_services = set(services) + node.services = set(services) logger.info( "add node(%s) to session(%s), coordinates(%s, %s)", node.name, @@ -663,6 +651,8 @@ def deleted_canvas_nodes(self, canvas_nodes: list[CanvasNode]) -> None: node = canvas_node.core_node del self.canvas_nodes[node.id] del self.session.nodes[node.id] + if nutils.is_wireless(node): + self.ifaces_manager.clear_wireless_nets(node.id) def deleted_canvas_edges(self, edges: Iterable[CanvasEdge]) -> None: links = [] @@ -718,70 +708,11 @@ def get_emane_model_configs(self) -> list[EmaneModelConfig]: configs.append(config) return configs - def get_service_configs(self) -> list[ServiceConfig]: - configs = [] - for node in self.session.nodes.values(): - if not nutils.is_container(node): - continue - if not node.service_configs: - continue - for name, config in node.service_configs.items(): - config = ServiceConfig( - node_id=node.id, - service=name, - files=config.configs, - directories=config.dirs, - startup=config.startup, - validate=config.validate, - shutdown=config.shutdown, - ) - configs.append(config) - return configs - - def get_service_file_configs(self) -> list[ServiceFileConfig]: - configs = [] - for node in self.session.nodes.values(): - if not nutils.is_container(node): - continue - if not node.service_file_configs: - continue - for service, file_configs in node.service_file_configs.items(): - for file, data in file_configs.items(): - config = ServiceFileConfig(node.id, service, file, data) - configs.append(config) - return configs - - def get_config_service_rendered(self, node_id: int, name: str) -> dict[str, str]: - return self.client.get_config_service_rendered(self.session.id, node_id, name) + def get_service_rendered(self, node_id: int, name: str) -> dict[str, str]: + return self.client.get_service_rendered(self.session.id, node_id, name) - def get_config_service_defaults( - self, node_id: int, name: str - ) -> ConfigServiceDefaults: - return self.client.get_config_service_defaults(self.session.id, node_id, name) - - def get_config_service_configs_proto( - self, - ) -> list[configservices_pb2.ConfigServiceConfig]: - config_service_protos = [] - for node in self.session.nodes.values(): - if not nutils.is_container(node): - continue - if not node.config_service_configs: - continue - for name, service_config in node.config_service_configs.items(): - config_proto = configservices_pb2.ConfigServiceConfig( - node_id=node.id, - name=name, - templates=service_config.templates, - config=service_config.config, - ) - config_service_protos.append(config_proto) - return config_service_protos - - def run(self, node_id: int) -> str: - logger.info("running node(%s) cmd: %s", node_id, self.observer) - _, output = self.client.node_command(self.session.id, node_id, self.observer) - return output + def get_service_defaults(self, node_id: int, name: str) -> ServiceDefaults: + return self.client.get_service_defaults(self.session.id, node_id, name) def get_wlan_config(self, node_id: int) -> dict[str, ConfigOption]: config = self.client.get_wlan_config(self.session.id, node_id) @@ -838,3 +769,7 @@ def edit_link(self, link: Link) -> None: result = self.client.edit_link(self.session.id, link, source=GUI_SOURCE) if not result: logger.error("error editing link: %s", link) + + def run_cmd(self, node_id: int, cmd: str) -> str: + _, output = self.client.node_command(self.session.id, node_id, cmd) + return output diff --git a/daemon/core/gui/data/icons/lxc.png b/daemon/core/gui/data/icons/lxc.png deleted file mode 100644 index b944b231d..000000000 Binary files a/daemon/core/gui/data/icons/lxc.png and /dev/null differ diff --git a/daemon/core/gui/dialogs/alerts.py b/daemon/core/gui/dialogs/alerts.py index b13f0797d..20cdbc60c 100644 --- a/daemon/core/gui/dialogs/alerts.py +++ b/daemon/core/gui/dialogs/alerts.py @@ -5,7 +5,7 @@ from tkinter import ttk from typing import TYPE_CHECKING, Optional -from core.api.grpc.wrappers import ExceptionEvent, ExceptionLevel +from core.api.grpc.wrappers import AlertEvent, AlertLevel from core.gui.dialogs.dialog import Dialog from core.gui.themes import PADX, PADY from core.gui.widgets import CodeText @@ -19,7 +19,7 @@ def __init__(self, app: "Application") -> None: super().__init__(app, "Alerts") self.tree: Optional[ttk.Treeview] = None self.codetext: Optional[CodeText] = None - self.alarm_map: dict[int, ExceptionEvent] = {} + self.alarm_map: dict[int, AlertEvent] = {} self.draw() def draw(self) -> None: @@ -67,13 +67,13 @@ def draw(self) -> None: ) self.alarm_map[insert_id] = exception - error_name = ExceptionLevel.ERROR.name + error_name = AlertLevel.ERROR.name self.tree.tag_configure(error_name, background="#ff6666") - fatal_name = ExceptionLevel.FATAL.name + fatal_name = AlertLevel.FATAL.name self.tree.tag_configure(fatal_name, background="#d9d9d9") - warning_name = ExceptionLevel.WARNING.name + warning_name = AlertLevel.WARNING.name self.tree.tag_configure(warning_name, background="#ffff99") - notice_name = ExceptionLevel.NOTICE.name + notice_name = AlertLevel.NOTICE.name self.tree.tag_configure(notice_name, background="#85e085") yscrollbar = ttk.Scrollbar(frame, orient="vertical", command=self.tree.yview) diff --git a/daemon/core/gui/dialogs/configserviceconfig.py b/daemon/core/gui/dialogs/configserviceconfig.py deleted file mode 100644 index 0e873a796..000000000 --- a/daemon/core/gui/dialogs/configserviceconfig.py +++ /dev/null @@ -1,414 +0,0 @@ -""" -Service configuration dialog -""" -import logging -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import ( - ConfigOption, - ConfigServiceData, - Node, - ServiceValidationMode, -) -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CodeText, ConfigFrame, ListboxScroll - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.coreclient import CoreClient - - -class ConfigServiceConfigDialog(Dialog): - def __init__( - self, master: tk.BaseWidget, app: "Application", service_name: str, node: Node - ) -> None: - title = f"{service_name} Config Service" - super().__init__(app, title, master=master) - self.core: "CoreClient" = app.core - self.node: Node = node - self.service_name: str = service_name - self.radiovar: tk.IntVar = tk.IntVar(value=2) - self.directories: list[str] = [] - self.templates: list[str] = [] - self.rendered: dict[str, str] = {} - self.dependencies: list[str] = [] - self.executables: list[str] = [] - self.startup_commands: list[str] = [] - self.validation_commands: list[str] = [] - self.shutdown_commands: list[str] = [] - self.default_startup: list[str] = [] - self.default_validate: list[str] = [] - self.default_shutdown: list[str] = [] - self.validation_mode: Optional[ServiceValidationMode] = None - self.validation_time: Optional[int] = None - self.validation_period: tk.DoubleVar = tk.DoubleVar() - self.modes: list[str] = [] - self.mode_configs: dict[str, dict[str, str]] = {} - self.notebook: Optional[ttk.Notebook] = None - self.templates_combobox: Optional[ttk.Combobox] = None - self.modes_combobox: Optional[ttk.Combobox] = None - self.startup_commands_listbox: Optional[tk.Listbox] = None - self.shutdown_commands_listbox: Optional[tk.Listbox] = None - self.validate_commands_listbox: Optional[tk.Listbox] = None - self.validation_time_entry: Optional[ttk.Entry] = None - self.validation_mode_entry: Optional[ttk.Entry] = None - self.template_text: Optional[CodeText] = None - self.rendered_text: Optional[CodeText] = None - self.validation_period_entry: Optional[ttk.Entry] = None - self.original_service_files: dict[str, str] = {} - self.temp_service_files: dict[str, str] = {} - self.modified_files: set[str] = set() - self.config_frame: Optional[ConfigFrame] = None - self.default_config: dict[str, str] = {} - self.config: dict[str, ConfigOption] = {} - self.has_error: bool = False - self.load() - if not self.has_error: - self.draw() - - def load(self) -> None: - try: - self.core.start_session(definition=True) - service = self.core.config_services[self.service_name] - self.dependencies = service.dependencies[:] - self.executables = service.executables[:] - self.directories = service.directories[:] - self.templates = service.files[:] - self.startup_commands = service.startup[:] - self.validation_commands = service.validate[:] - self.shutdown_commands = service.shutdown[:] - self.validation_mode = service.validation_mode - self.validation_time = service.validation_timer - self.validation_period.set(service.validation_period) - defaults = self.core.get_config_service_defaults( - self.node.id, self.service_name - ) - self.original_service_files = defaults.templates - self.temp_service_files = dict(self.original_service_files) - self.modes = sorted(defaults.modes) - self.mode_configs = defaults.modes - self.config = ConfigOption.from_dict(defaults.config) - self.default_config = {x.name: x.value for x in self.config.values()} - self.rendered = self.core.get_config_service_rendered( - self.node.id, self.service_name - ) - service_config = self.node.config_service_configs.get(self.service_name) - if service_config: - for key, value in service_config.config.items(): - self.config[key].value = value - logger.info("default config: %s", self.default_config) - for file, data in service_config.templates.items(): - self.modified_files.add(file) - self.temp_service_files[file] = data - except grpc.RpcError as e: - self.app.show_grpc_exception("Get Config Service Error", e) - self.has_error = True - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - # draw notebook - self.notebook = ttk.Notebook(self.top) - self.notebook.grid(sticky=tk.NSEW, pady=PADY) - self.draw_tab_files() - if self.config: - self.draw_tab_config() - self.draw_tab_startstop() - self.draw_tab_validation() - self.draw_buttons() - - def draw_tab_files(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - tab.rowconfigure(2, weight=1) - self.notebook.add(tab, text="Directories/Files") - - label = ttk.Label( - tab, text="Directories and templates that will be used for this service." - ) - label.grid(pady=PADY) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Directories") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - state = "readonly" if self.directories else tk.DISABLED - directories_combobox = ttk.Combobox(frame, values=self.directories, state=state) - directories_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - if self.directories: - directories_combobox.current(0) - label = ttk.Label(frame, text="Files") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - state = "readonly" if self.templates else tk.DISABLED - self.templates_combobox = ttk.Combobox( - frame, values=self.templates, state=state - ) - self.templates_combobox.bind( - "<>", self.handle_template_changed - ) - self.templates_combobox.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - # draw file template tab - notebook = ttk.Notebook(tab) - notebook.rowconfigure(0, weight=1) - notebook.columnconfigure(0, weight=1) - notebook.grid(sticky=tk.NSEW, pady=PADY) - # draw rendered file tab - rendered_tab = ttk.Frame(notebook, padding=FRAME_PAD) - rendered_tab.grid(sticky=tk.NSEW) - rendered_tab.rowconfigure(0, weight=1) - rendered_tab.columnconfigure(0, weight=1) - notebook.add(rendered_tab, text="Rendered") - self.rendered_text = CodeText(rendered_tab) - self.rendered_text.grid(sticky=tk.NSEW) - self.rendered_text.text.bind("", self.update_template_file_data) - # draw template file tab - template_tab = ttk.Frame(notebook, padding=FRAME_PAD) - template_tab.grid(sticky=tk.NSEW) - template_tab.rowconfigure(0, weight=1) - template_tab.columnconfigure(0, weight=1) - notebook.add(template_tab, text="Template") - self.template_text = CodeText(template_tab) - self.template_text.grid(sticky=tk.NSEW) - self.template_text.text.bind("", self.update_template_file_data) - if self.templates: - self.templates_combobox.current(0) - template_name = self.templates[0] - temp_data = self.temp_service_files[template_name] - self.template_text.set_text(temp_data) - rendered_data = self.rendered[template_name] - self.rendered_text.set_text(rendered_data) - else: - self.template_text.text.configure(state=tk.DISABLED) - self.rendered_text.text.configure(state=tk.DISABLED) - - def draw_tab_config(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Configuration") - - if self.modes: - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Modes") - label.grid(row=0, column=0, padx=PADX) - self.modes_combobox = ttk.Combobox( - frame, values=self.modes, state="readonly" - ) - self.modes_combobox.bind("<>", self.handle_mode_changed) - self.modes_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - - logger.info("config service config: %s", self.config) - self.config_frame = ConfigFrame(tab, self.app, self.config) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - tab.rowconfigure(self.config_frame.grid_info()["row"], weight=1) - - def draw_tab_startstop(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - for i in range(3): - tab.rowconfigure(i, weight=1) - self.notebook.add(tab, text="Startup/Shutdown") - commands = [] - # tab 3 - for i in range(3): - label_frame = None - if i == 0: - label_frame = ttk.LabelFrame( - tab, text="Startup Commands", padding=FRAME_PAD - ) - commands = self.startup_commands - elif i == 1: - label_frame = ttk.LabelFrame( - tab, text="Shutdown Commands", padding=FRAME_PAD - ) - commands = self.shutdown_commands - elif i == 2: - label_frame = ttk.LabelFrame( - tab, text="Validation Commands", padding=FRAME_PAD - ) - commands = self.validation_commands - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - label_frame.grid(row=i, column=0, sticky=tk.NSEW, pady=PADY) - listbox_scroll = ListboxScroll(label_frame) - for command in commands: - listbox_scroll.listbox.insert("end", command) - listbox_scroll.listbox.config(height=4) - listbox_scroll.grid(sticky=tk.NSEW) - if i == 0: - self.startup_commands_listbox = listbox_scroll.listbox - elif i == 1: - self.shutdown_commands_listbox = listbox_scroll.listbox - elif i == 2: - self.validate_commands_listbox = listbox_scroll.listbox - - def draw_tab_validation(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.EW) - tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Validation", sticky=tk.NSEW) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - - label = ttk.Label(frame, text="Validation Time") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - self.validation_time_entry = ttk.Entry(frame) - self.validation_time_entry.insert("end", str(self.validation_time)) - self.validation_time_entry.config(state=tk.DISABLED) - self.validation_time_entry.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - - label = ttk.Label(frame, text="Validation Mode") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - if self.validation_mode == ServiceValidationMode.BLOCKING: - mode = "BLOCKING" - elif self.validation_mode == ServiceValidationMode.NON_BLOCKING: - mode = "NON_BLOCKING" - else: - mode = "TIMER" - self.validation_mode_entry = ttk.Entry( - frame, textvariable=tk.StringVar(value=mode) - ) - self.validation_mode_entry.insert("end", mode) - self.validation_mode_entry.config(state=tk.DISABLED) - self.validation_mode_entry.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - - label = ttk.Label(frame, text="Validation Period") - label.grid(row=2, column=0, sticky=tk.W, padx=PADX) - self.validation_period_entry = ttk.Entry( - frame, state=tk.DISABLED, textvariable=self.validation_period - ) - self.validation_period_entry.grid(row=2, column=1, sticky=tk.EW, pady=PADY) - - label_frame = ttk.LabelFrame(tab, text="Executables", padding=FRAME_PAD) - label_frame.grid(sticky=tk.NSEW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.grid(sticky=tk.NSEW) - tab.rowconfigure(listbox_scroll.grid_info()["row"], weight=1) - for executable in self.executables: - listbox_scroll.listbox.insert("end", executable) - - label_frame = ttk.LabelFrame(tab, text="Dependencies", padding=FRAME_PAD) - label_frame.grid(sticky=tk.NSEW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.grid(sticky=tk.NSEW) - tab.rowconfigure(listbox_scroll.grid_info()["row"], weight=1) - for dependency in self.dependencies: - listbox_scroll.listbox.insert("end", dependency) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Defaults", command=self.click_defaults) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Copy...", command=self.click_copy) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=3, sticky=tk.EW) - - def click_apply(self) -> None: - current_listbox = self.master.current.listbox - if not self.is_custom(): - self.node.config_service_configs.pop(self.service_name, None) - current_listbox.itemconfig(current_listbox.curselection()[0], bg="") - self.destroy() - return - service_config = self.node.config_service_configs.setdefault( - self.service_name, ConfigServiceData() - ) - if self.config_frame: - self.config_frame.parse_config() - service_config.config = {x.name: x.value for x in self.config.values()} - for file in self.modified_files: - service_config.templates[file] = self.temp_service_files[file] - all_current = current_listbox.get(0, tk.END) - current_listbox.itemconfig(all_current.index(self.service_name), bg="green") - self.destroy() - - def handle_template_changed(self, event: tk.Event) -> None: - template_name = self.templates_combobox.get() - temp_data = self.temp_service_files[template_name] - self.template_text.set_text(temp_data) - rendered = self.rendered[template_name] - self.rendered_text.set_text(rendered) - - def handle_mode_changed(self, event: tk.Event) -> None: - mode = self.modes_combobox.get() - config = self.mode_configs[mode] - logger.info("mode config: %s", config) - self.config_frame.set_values(config) - - def update_template_file_data(self, _event: tk.Event) -> None: - template = self.templates_combobox.get() - self.temp_service_files[template] = self.rendered_text.get_text() - if self.rendered[template] != self.temp_service_files[template]: - self.modified_files.add(template) - return - self.temp_service_files[template] = self.template_text.get_text() - if self.temp_service_files[template] != self.original_service_files[template]: - self.modified_files.add(template) - else: - self.modified_files.discard(template) - - def is_custom(self) -> bool: - has_custom_templates = len(self.modified_files) > 0 - has_custom_config = False - if self.config_frame: - current = self.config_frame.parse_config() - has_custom_config = self.default_config != current - return has_custom_templates or has_custom_config - - def click_defaults(self) -> None: - # clear all saved state data - self.modified_files.clear() - self.node.config_service_configs.pop(self.service_name, None) - self.temp_service_files = dict(self.original_service_files) - # reset session definition and retrieve default rendered templates - self.core.start_session(definition=True) - self.rendered = self.core.get_config_service_rendered( - self.node.id, self.service_name - ) - logger.info( - "cleared config service config: %s", self.node.config_service_configs - ) - # reset current selected file data and config data, if present - template_name = self.templates_combobox.get() - temp_data = self.temp_service_files[template_name] - self.template_text.set_text(temp_data) - rendered_data = self.rendered[template_name] - self.rendered_text.set_text(rendered_data) - if self.config_frame: - logger.info("resetting defaults: %s", self.default_config) - self.config_frame.set_values(self.default_config) - - def click_copy(self) -> None: - pass - - def append_commands( - self, commands: list[str], listbox: tk.Listbox, to_add: list[str] - ) -> None: - for cmd in to_add: - commands.append(cmd) - listbox.insert(tk.END, cmd) diff --git a/daemon/core/gui/dialogs/copyserviceconfig.py b/daemon/core/gui/dialogs/copyserviceconfig.py deleted file mode 100644 index 6b2f4927e..000000000 --- a/daemon/core/gui/dialogs/copyserviceconfig.py +++ /dev/null @@ -1,119 +0,0 @@ -""" -copy service config dialog -""" - -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import CodeText, ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.dialogs.serviceconfig import ServiceConfigDialog - - -class CopyServiceConfigDialog(Dialog): - def __init__( - self, - app: "Application", - dialog: "ServiceConfigDialog", - name: str, - service: str, - file_name: str, - ) -> None: - super().__init__(app, f"Copy Custom File to {name}", master=dialog) - self.dialog: "ServiceConfigDialog" = dialog - self.service: str = service - self.file_name: str = file_name - self.listbox: Optional[tk.Listbox] = None - self.nodes: dict[str, int] = {} - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - label = ttk.Label( - self.top, text=f"{self.service} - {self.file_name}", anchor=tk.CENTER - ) - label.grid(sticky=tk.EW, pady=PADY) - - listbox_scroll = ListboxScroll(self.top) - listbox_scroll.grid(sticky=tk.NSEW, pady=PADY) - self.listbox = listbox_scroll.listbox - for node in self.app.core.session.nodes.values(): - file_configs = node.service_file_configs.get(self.service) - if not file_configs: - continue - data = file_configs.get(self.file_name) - if not data: - continue - self.nodes[node.name] = node.id - self.listbox.insert(tk.END, node.name) - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(3): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Copy", command=self.click_copy) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="View", command=self.click_view) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=2, sticky=tk.EW) - - def click_copy(self) -> None: - selection = self.listbox.curselection() - if not selection: - return - name = self.listbox.get(selection) - node_id = self.nodes[name] - node = self.app.core.session.nodes[node_id] - data = node.service_file_configs[self.service][self.file_name] - self.dialog.temp_service_files[self.file_name] = data - self.dialog.modified_files.add(self.file_name) - self.dialog.service_file_data.text.delete(1.0, tk.END) - self.dialog.service_file_data.text.insert(tk.END, data) - self.destroy() - - def click_view(self) -> None: - selection = self.listbox.curselection() - if not selection: - return - name = self.listbox.get(selection) - node_id = self.nodes[name] - node = self.app.core.session.nodes[node_id] - data = node.service_file_configs[self.service][self.file_name] - dialog = ViewConfigDialog( - self.app, self, name, self.service, self.file_name, data - ) - dialog.show() - - -class ViewConfigDialog(Dialog): - def __init__( - self, - app: "Application", - master: tk.BaseWidget, - name: str, - service: str, - file_name: str, - data: str, - ) -> None: - title = f"{name} Service({service}) File({file_name})" - super().__init__(app, title, master=master) - self.data = data - self.service_data = None - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.service_data = CodeText(self.top) - self.service_data.grid(sticky=tk.NSEW, pady=PADY) - self.service_data.text.insert(tk.END, self.data) - self.service_data.text.config(state=tk.DISABLED) - button = ttk.Button(self.top, text="Close", command=self.destroy) - button.grid(sticky=tk.EW) diff --git a/daemon/core/gui/dialogs/customnodes.py b/daemon/core/gui/dialogs/customnodes.py index ea4421e89..e891458f9 100644 --- a/daemon/core/gui/dialogs/customnodes.py +++ b/daemon/core/gui/dialogs/customnodes.py @@ -23,7 +23,7 @@ class ServicesSelectDialog(Dialog): def __init__( self, master: tk.BaseWidget, app: "Application", current_services: set[str] ) -> None: - super().__init__(app, "Node Config Services", master=master) + super().__init__(app, "Node Services", master=master) self.groups: Optional[ListboxScroll] = None self.services: Optional[CheckboxList] = None self.current: Optional[ListboxScroll] = None @@ -45,7 +45,7 @@ def draw(self) -> None: label_frame.columnconfigure(0, weight=1) self.groups = ListboxScroll(label_frame) self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.config_services_groups): + for group in sorted(self.app.core.services_groups): self.groups.listbox.insert(tk.END, group) self.groups.listbox.bind("<>", self.handle_group_change) self.groups.listbox.selection_set(0) @@ -86,7 +86,7 @@ def handle_group_change(self, event: tk.Event = None) -> None: index = selection[0] group = self.groups.listbox.get(index) self.services.clear() - for name in sorted(self.app.core.config_services_groups[group]): + for name in sorted(self.app.core.services_groups[group]): checked = name in self.current_services self.services.add(name, checked) @@ -147,7 +147,7 @@ def draw_node_config(self) -> None: frame, text="Icon", compound=tk.LEFT, command=self.click_icon ) self.image_button.grid(sticky=tk.EW, pady=PADY) - button = ttk.Button(frame, text="Config Services", command=self.click_services) + button = ttk.Button(frame, text="Services", command=self.click_services) button.grid(sticky=tk.EW) def draw_node_buttons(self) -> None: diff --git a/daemon/core/gui/dialogs/emaneconfig.py b/daemon/core/gui/dialogs/emaneconfig.py index 00eda6948..7cea29c41 100644 --- a/daemon/core/gui/dialogs/emaneconfig.py +++ b/daemon/core/gui/dialogs/emaneconfig.py @@ -3,7 +3,7 @@ """ import tkinter as tk import webbrowser -from tkinter import ttk +from tkinter import messagebox, ttk from typing import TYPE_CHECKING, Optional import grpc @@ -70,10 +70,13 @@ def draw_buttons(self) -> None: button.grid(row=0, column=1, sticky=tk.EW) def click_apply(self) -> None: - self.config_frame.parse_config() - key = (self.model, self.iface_id) - self.node.emane_model_configs[key] = self.config - self.destroy() + try: + self.config_frame.parse_config() + key = (self.model, self.iface_id) + self.node.emane_model_configs[key] = self.config + self.destroy() + except ValueError as e: + messagebox.showerror("EMANE Config Error", str(e)) class EmaneConfigDialog(Dialog): diff --git a/daemon/core/gui/dialogs/nodecommands.py b/daemon/core/gui/dialogs/nodecommands.py new file mode 100644 index 000000000..8d6b4e721 --- /dev/null +++ b/daemon/core/gui/dialogs/nodecommands.py @@ -0,0 +1,147 @@ +import tkinter as tk +from tkinter import messagebox, ttk +from typing import TYPE_CHECKING, Optional + +from core.gui.appconfig import NodeCommand +from core.gui.dialogs.dialog import Dialog +from core.gui.themes import PADX, PADY +from core.gui.widgets import ListboxScroll + +if TYPE_CHECKING: + from core.gui.app import Application + + +class NodeCommandsDialog(Dialog): + def __init__(self, app: "Application") -> None: + super().__init__(app, "Node Commands") + self.commands: Optional[tk.Listbox] = None + self.save_button: Optional[ttk.Button] = None + self.delete_button: Optional[ttk.Button] = None + self.selected: Optional[str] = None + self.selected_index: Optional[int] = None + self.name: tk.StringVar = tk.StringVar() + self.cmd: tk.StringVar = tk.StringVar() + self.draw() + + def draw(self) -> None: + self.top.columnconfigure(0, weight=1) + self.top.rowconfigure(0, weight=1) + self.draw_listbox() + self.draw_form_fields() + self.draw_config_buttons() + self.draw_apply_buttons() + + def draw_listbox(self) -> None: + listbox_scroll = ListboxScroll(self.top) + listbox_scroll.grid(sticky=tk.NSEW, pady=PADY) + listbox_scroll.columnconfigure(0, weight=1) + listbox_scroll.rowconfigure(0, weight=1) + self.commands = listbox_scroll.listbox + self.commands.grid(row=0, column=0, sticky=tk.NSEW) + self.commands.bind("<>", self.handle_change) + for name in sorted(self.app.core.node_commands): + self.commands.insert(tk.END, name) + + def draw_form_fields(self) -> None: + frame = ttk.Frame(self.top) + frame.grid(sticky=tk.EW, pady=PADY) + frame.columnconfigure(1, weight=1) + + label = ttk.Label(frame, text="Name") + label.grid(row=0, column=0, sticky=tk.W, padx=PADX) + entry = ttk.Entry(frame, textvariable=self.name) + entry.grid(row=0, column=1, sticky=tk.EW) + + label = ttk.Label(frame, text="Command") + label.grid(row=1, column=0, sticky=tk.W, padx=PADX) + entry = ttk.Entry(frame, textvariable=self.cmd) + entry.grid(row=1, column=1, sticky=tk.EW) + + def draw_config_buttons(self) -> None: + frame = ttk.Frame(self.top) + frame.grid(sticky=tk.EW, pady=PADY) + for i in range(3): + frame.columnconfigure(i, weight=1) + + button = ttk.Button(frame, text="Create", command=self.click_create) + button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) + + self.save_button = ttk.Button( + frame, text="Save", state=tk.DISABLED, command=self.click_save + ) + self.save_button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) + + self.delete_button = ttk.Button( + frame, text="Delete", state=tk.DISABLED, command=self.click_delete + ) + self.delete_button.grid(row=0, column=2, sticky=tk.EW) + + def draw_apply_buttons(self) -> None: + frame = ttk.Frame(self.top) + frame.grid(sticky=tk.EW) + for i in range(2): + frame.columnconfigure(i, weight=1) + + button = ttk.Button(frame, text="Save", command=self.click_save_config) + button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) + + button = ttk.Button(frame, text="Cancel", command=self.destroy) + button.grid(row=0, column=1, sticky=tk.EW) + + def click_save_config(self) -> None: + self.app.guiconfig.node_commands.clear() + for name, cmd in self.app.core.node_commands.items(): + self.app.guiconfig.node_commands.append(NodeCommand(name, cmd)) + self.app.save_config() + self.destroy() + + def click_create(self) -> None: + name = self.name.get() + if name not in self.app.core.node_commands: + cmd = self.cmd.get() + self.app.core.node_commands[name] = cmd + self.commands.insert(tk.END, name) + self.name.set("") + self.cmd.set("") + else: + messagebox.showerror("Node Command Error", f"{name} already exists") + + def click_save(self) -> None: + name = self.name.get() + cmd = self.cmd.get() + if self.selected: + previous_name = self.selected + self.selected = name + self.app.core.node_commands.pop(previous_name) + self.app.core.node_commands[name] = cmd + self.commands.delete(self.selected_index) + self.commands.insert(self.selected_index, name) + self.commands.selection_set(self.selected_index) + + def click_delete(self) -> None: + if self.selected: + self.commands.delete(self.selected_index) + del self.app.core.node_commands[self.selected] + self.selected = None + self.selected_index = None + self.name.set("") + self.cmd.set("") + self.commands.selection_clear(0, tk.END) + self.save_button.config(state=tk.DISABLED) + self.delete_button.config(state=tk.DISABLED) + + def handle_change(self, _event: tk.Event) -> None: + selection = self.commands.curselection() + if selection: + self.selected_index = selection[0] + self.selected = self.commands.get(self.selected_index) + cmd = self.app.core.node_commands[self.selected] + self.name.set(self.selected) + self.cmd.set(cmd) + self.save_button.config(state=tk.NORMAL) + self.delete_button.config(state=tk.NORMAL) + else: + self.selected_index = None + self.selected = None + self.save_button.config(state=tk.DISABLED) + self.delete_button.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/nodeconfig.py b/daemon/core/gui/dialogs/nodeconfig.py index 162696d41..59eda3be0 100644 --- a/daemon/core/gui/dialogs/nodeconfig.py +++ b/daemon/core/gui/dialogs/nodeconfig.py @@ -1,10 +1,12 @@ import logging import tkinter as tk from functools import partial -from tkinter import messagebox, ttk +from pathlib import Path +from tkinter import filedialog, messagebox, ttk from typing import TYPE_CHECKING, Optional import netaddr +from netaddr import AddrFormatError, IPNetwork from PIL.ImageTk import PhotoImage from core.api.grpc.wrappers import Interface, Node @@ -186,11 +188,16 @@ def __init__(self, app: "Application", canvas_node: "CanvasNode") -> None: self.name: tk.StringVar = tk.StringVar(value=self.node.name) self.type: tk.StringVar = tk.StringVar(value=self.node.model) self.container_image: tk.StringVar = tk.StringVar(value=self.node.image) + self.compose_file: tk.StringVar = tk.StringVar(value=self.node.compose) + self.compose_name: tk.StringVar = tk.StringVar(value=self.node.compose_name) server = DEFAULT_SERVER if self.node.server: server = self.node.server self.server: tk.StringVar = tk.StringVar(value=server) self.ifaces: dict[int, InterfaceData] = {} + subnets = self.app.core.ifaces_manager.get_wireless_nets(self.node.id) + self.ip4_subnet: tk.StringVar = tk.StringVar(value=str(subnets.ip4)) + self.ip6_subnet: tk.StringVar = tk.StringVar(value=str(subnets.ip6)) self.draw() def draw(self) -> None: @@ -219,42 +226,88 @@ def draw(self) -> None: self.image_button.grid(row=row, column=1, sticky=tk.EW) row += 1 - # name field - label = ttk.Label(frame, text="Name") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - entry = validation.NodeNameEntry(frame, textvariable=self.name, state=state) - entry.grid(row=row, column=1, sticky=tk.EW) + overview_frame = ttk.Labelframe(frame, text="Overview", padding=FRAME_PAD) + overview_frame.grid(row=row, columnspan=2, sticky=tk.EW, pady=PADY) + overview_frame.columnconfigure(1, weight=1) + overview_row = 0 row += 1 + # name field + label = ttk.Label(overview_frame, text="Name") + label.grid(row=overview_row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) + entry = validation.NodeNameEntry( + overview_frame, textvariable=self.name, state=state + ) + entry.grid(row=overview_row, column=1, sticky=tk.EW) + overview_row += 1 + # node type field if nutils.is_model(self.node): - label = ttk.Label(frame, text="Type") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - entry = ttk.Entry(frame, textvariable=self.type, state=tk.DISABLED) - entry.grid(row=row, column=1, sticky=tk.EW) - row += 1 - - # container image field - if nutils.has_image(self.node.type): - label = ttk.Label(frame, text="Image") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - entry = ttk.Entry(frame, textvariable=self.container_image, state=state) - entry.grid(row=row, column=1, sticky=tk.EW) - row += 1 + label = ttk.Label(overview_frame, text="Type") + label.grid(row=overview_row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) + entry = ttk.Entry(overview_frame, textvariable=self.type, state=tk.DISABLED) + entry.grid(row=overview_row, column=1, sticky=tk.EW) + overview_row += 1 if nutils.is_container(self.node): - # server - frame.grid(sticky=tk.EW) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Server") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) + label = ttk.Label(overview_frame, text="Server") + label.grid(row=overview_row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) servers = [DEFAULT_SERVER] servers.extend(list(sorted(self.app.core.servers.keys()))) combobox = ttk.Combobox( - frame, textvariable=self.server, values=servers, state=combo_state + overview_frame, + textvariable=self.server, + values=servers, + state=combo_state, ) - combobox.grid(row=row, column=1, sticky=tk.EW) - row += 1 + combobox.grid(row=overview_row, column=1, sticky=tk.EW) + overview_row += 1 + + # container image field + if nutils.has_image(self.node.type): + # image name + label = ttk.Label(overview_frame, text="Image") + label.grid(row=overview_row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) + entry = ttk.Entry( + overview_frame, textvariable=self.container_image, state=state + ) + entry.grid(row=overview_row, column=1, sticky=tk.EW) + overview_row += 1 + # compose file + compose_frame = ttk.Frame(overview_frame) + compose_frame.columnconfigure(0, weight=2) + compose_frame.columnconfigure(1, weight=1) + compose_frame.columnconfigure(2, weight=1) + entry = ttk.Entry( + compose_frame, textvariable=self.compose_file, state=state + ) + entry.grid(row=0, column=0, sticky=tk.EW, padx=PADX) + button = ttk.Button( + compose_frame, + text="Compose File", + command=self.click_compose, + state=state, + ) + button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) + button = ttk.Button( + compose_frame, + text="Clear", + command=self.click_compose_clear, + state=state, + ) + button.grid(row=0, column=2, sticky=tk.EW) + compose_frame.grid( + row=overview_row, column=0, columnspan=2, sticky=tk.EW, pady=PADY + ) + overview_row += 1 + # compose name + label = ttk.Label(overview_frame, text="Compose Name") + label.grid(row=overview_row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) + entry = ttk.Entry( + overview_frame, textvariable=self.compose_name, state=state + ) + entry.grid(row=overview_row, column=1, sticky=tk.EW) + overview_row += 1 if nutils.is_rj45(self.node): ifaces = self.app.core.client.get_ifaces() @@ -269,6 +322,9 @@ def draw(self) -> None: row += 1 ifaces_scroll.listbox.bind("<>", self.iface_select) + if nutils.is_wireless(self.node): + self.draw_network_config() + # interfaces if nutils.is_container(self.node): self.draw_ifaces() @@ -276,7 +332,23 @@ def draw(self) -> None: self.draw_spacer() self.draw_buttons() + def draw_network_config(self) -> None: + frame = ttk.LabelFrame(self.top, text="Network", padding=FRAME_PAD) + frame.grid(sticky=tk.EW, pady=PADY) + for i in range(2): + frame.columnconfigure(i, weight=1) + label = ttk.Label(frame, text="IPv4 Subnet") + label.grid(row=0, column=0, sticky=tk.EW) + entry = ttk.Entry(frame, textvariable=self.ip4_subnet) + entry.grid(row=0, column=1, sticky=tk.EW) + label = ttk.Label(frame, text="IPv6 Subnet") + label.grid(row=1, column=0, sticky=tk.EW) + entry = ttk.Entry(frame, textvariable=self.ip6_subnet) + entry.grid(row=1, column=1, sticky=tk.EW) + def draw_ifaces(self) -> None: + if not self.canvas_node.ifaces: + return notebook = ttk.Notebook(self.top) notebook.grid(sticky=tk.NSEW, pady=PADY) self.top.rowconfigure(notebook.grid_info()["row"], weight=1) @@ -378,7 +450,16 @@ def click_apply(self) -> None: # update core node self.node.name = self.name.get() if nutils.has_image(self.node.type): - self.node.image = self.container_image.get() + self.node.image = self.container_image.get() or None + self.node.compose = self.compose_file.get() or None + self.node.compose_name = self.compose_name.get() or None + if self.node.compose and not self.node.compose_name: + messagebox.showerror( + "Compose Error", + "Name required when using a compose file", + parent=self.top, + ) + return server = self.server.get() if nutils.is_container(self.node): if server == DEFAULT_SERVER: @@ -400,6 +481,18 @@ def click_apply(self) -> None: if error: break + # save custom network for wireless node types + if nutils.is_wireless(self.node): + try: + ip4_subnet = IPNetwork(self.ip4_subnet.get()) + ip6_subnet = IPNetwork(self.ip6_subnet.get()) + self.app.core.ifaces_manager.set_wireless_nets( + self.node.id, ip4_subnet, ip6_subnet + ) + except AddrFormatError as e: + messagebox.showerror("IP Network Error", str(e), parent=self.top) + return + # redraw if not error: self.canvas_node.redraw() @@ -411,3 +504,19 @@ def iface_select(self, event: tk.Event) -> None: if cur: iface = listbox.get(cur[0]) self.name.set(iface) + + def click_compose(self) -> None: + file_path = filedialog.askopenfilename( + parent=self, + initialdir=str(Path.home()), + title="Select Compose File", + filetypes=( + ("yaml", "*.yml *.yaml ..."), + ("All Files", "*"), + ), + ) + if file_path: + self.compose_file.set(file_path) + + def click_compose_clear(self) -> None: + self.compose_file.set("") diff --git a/daemon/core/gui/dialogs/nodeconfigservice.py b/daemon/core/gui/dialogs/nodeconfigservice.py deleted file mode 100644 index ce718080f..000000000 --- a/daemon/core/gui/dialogs/nodeconfigservice.py +++ /dev/null @@ -1,158 +0,0 @@ -""" -core node services -""" -import logging -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import Node -from core.gui.dialogs.configserviceconfig import ConfigServiceConfigDialog -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CheckboxList, ListboxScroll - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class NodeConfigServiceDialog(Dialog): - def __init__( - self, app: "Application", node: Node, services: set[str] = None - ) -> None: - title = f"{node.name} Config Services" - super().__init__(app, title) - self.node: Node = node - self.groups: Optional[ListboxScroll] = None - self.services: Optional[CheckboxList] = None - self.current: Optional[ListboxScroll] = None - if services is None: - services = set(node.config_services) - self.current_services: set[str] = services - self.protocol("WM_DELETE_WINDOW", self.click_cancel) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - frame = ttk.Frame(self.top) - frame.grid(stick="nsew", pady=PADY) - frame.rowconfigure(0, weight=1) - for i in range(3): - frame.columnconfigure(i, weight=1) - label_frame = ttk.LabelFrame(frame, text="Groups", padding=FRAME_PAD) - label_frame.grid(row=0, column=0, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - self.groups = ListboxScroll(label_frame) - self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.config_services_groups): - self.groups.listbox.insert(tk.END, group) - self.groups.listbox.bind("<>", self.handle_group_change) - self.groups.listbox.selection_set(0) - - label_frame = ttk.LabelFrame(frame, text="Services") - label_frame.grid(row=0, column=1, sticky=tk.NSEW) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - self.services = CheckboxList( - label_frame, self.app, clicked=self.service_clicked, padding=FRAME_PAD - ) - self.services.grid(sticky=tk.NSEW) - - label_frame = ttk.LabelFrame(frame, text="Selected", padding=FRAME_PAD) - label_frame.grid(row=0, column=2, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - - self.current = ListboxScroll(label_frame) - self.current.grid(sticky=tk.NSEW) - self.draw_current_services() - - frame = ttk.Frame(self.top) - frame.grid(stick="ew") - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Configure", command=self.click_configure) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Remove", command=self.click_remove) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.click_cancel) - button.grid(row=0, column=3, sticky=tk.EW) - - # trigger group change - self.handle_group_change() - - def handle_group_change(self, event: tk.Event = None) -> None: - selection = self.groups.listbox.curselection() - if selection: - index = selection[0] - group = self.groups.listbox.get(index) - self.services.clear() - for name in sorted(self.app.core.config_services_groups[group]): - checked = name in self.current_services - self.services.add(name, checked) - - def service_clicked(self, name: str, var: tk.IntVar) -> None: - if var.get() and name not in self.current_services: - self.current_services.add(name) - elif not var.get() and name in self.current_services: - self.current_services.remove(name) - self.node.config_service_configs.pop(name, None) - self.draw_current_services() - self.node.config_services = self.current_services.copy() - - def click_configure(self) -> None: - current_selection = self.current.listbox.curselection() - if len(current_selection): - dialog = ConfigServiceConfigDialog( - self, - self.app, - self.current.listbox.get(current_selection[0]), - self.node, - ) - if not dialog.has_error: - dialog.show() - self.draw_current_services() - else: - messagebox.showinfo( - "Config Service Configuration", - "Select a service to configure", - parent=self, - ) - - def draw_current_services(self) -> None: - self.current.listbox.delete(0, tk.END) - for name in sorted(self.current_services): - self.current.listbox.insert(tk.END, name) - if self.is_custom_service(name): - self.current.listbox.itemconfig(tk.END, bg="green") - - def click_save(self) -> None: - self.node.config_services = self.current_services.copy() - logger.info("saved node config services: %s", self.node.config_services) - self.destroy() - - def click_cancel(self) -> None: - self.current_services = None - self.destroy() - - def click_remove(self) -> None: - cur = self.current.listbox.curselection() - if cur: - service = self.current.listbox.get(cur[0]) - self.current.listbox.delete(cur[0]) - self.current_services.remove(service) - self.node.config_service_configs.pop(service, None) - for checkbutton in self.services.frame.winfo_children(): - if checkbutton["text"] == service: - checkbutton.invoke() - return - - def is_custom_service(self, service: str) -> bool: - return service in self.node.config_service_configs diff --git a/daemon/core/gui/dialogs/nodeservice.py b/daemon/core/gui/dialogs/nodeservice.py index 66e83fa44..04fed934e 100644 --- a/daemon/core/gui/dialogs/nodeservice.py +++ b/daemon/core/gui/dialogs/nodeservice.py @@ -1,6 +1,7 @@ """ core node services """ +import logging import tkinter as tk from tkinter import messagebox, ttk from typing import TYPE_CHECKING, Optional @@ -11,19 +12,24 @@ from core.gui.themes import FRAME_PAD, PADX, PADY from core.gui.widgets import CheckboxList, ListboxScroll +logger = logging.getLogger(__name__) + if TYPE_CHECKING: from core.gui.app import Application class NodeServiceDialog(Dialog): - def __init__(self, app: "Application", node: Node) -> None: - title = f"{node.name} Services (Deprecated)" + def __init__( + self, app: "Application", node: Node, services: set[str] = None + ) -> None: + title = f"{node.name} Services" super().__init__(app, title) self.node: Node = node self.groups: Optional[ListboxScroll] = None self.services: Optional[CheckboxList] = None self.current: Optional[ListboxScroll] = None - services = set(node.services) + if services is None: + services = set(node.services) self.current_services: set[str] = services self.protocol("WM_DELETE_WINDOW", self.click_cancel) self.draw() @@ -43,7 +49,7 @@ def draw(self) -> None: label_frame.columnconfigure(0, weight=1) self.groups = ListboxScroll(label_frame) self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.services): + for group in sorted(self.app.core.services_groups): self.groups.listbox.insert(tk.END, group) self.groups.listbox.bind("<>", self.handle_group_change) self.groups.listbox.selection_set(0) @@ -61,12 +67,10 @@ def draw(self) -> None: label_frame.grid(row=0, column=2, sticky=tk.NSEW) label_frame.rowconfigure(0, weight=1) label_frame.columnconfigure(0, weight=1) + self.current = ListboxScroll(label_frame) self.current.grid(sticky=tk.NSEW) - for service in sorted(self.current_services): - self.current.listbox.insert(tk.END, service) - if self.is_custom_service(service): - self.current.listbox.itemconfig(tk.END, bg="green") + self.draw_current_services() frame = ttk.Frame(self.top) frame.grid(stick="ew") @@ -90,7 +94,7 @@ def handle_group_change(self, event: tk.Event = None) -> None: index = selection[0] group = self.groups.listbox.get(index) self.services.clear() - for name in sorted(self.app.core.services[group]): + for name in sorted(self.app.core.services_groups[group]): checked = name in self.current_services self.services.add(name, checked) @@ -100,12 +104,7 @@ def service_clicked(self, name: str, var: tk.IntVar) -> None: elif not var.get() and name in self.current_services: self.current_services.remove(name) self.node.service_configs.pop(name, None) - self.node.service_file_configs.pop(name, None) - self.current.listbox.delete(0, tk.END) - for name in sorted(self.current_services): - self.current.listbox.insert(tk.END, name) - if self.is_custom_service(name): - self.current.listbox.itemconfig(tk.END, bg="green") + self.draw_current_services() self.node.services = self.current_services.copy() def click_configure(self) -> None: @@ -117,22 +116,30 @@ def click_configure(self) -> None: self.current.listbox.get(current_selection[0]), self.node, ) - - # if error occurred when creating ServiceConfigDialog, don't show the dialog if not dialog.has_error: dialog.show() - else: - dialog.destroy() + self.draw_current_services() else: messagebox.showinfo( - "Service Configuration", "Select a service to configure", parent=self + "Service Configuration", + "Select a service to configure", + parent=self, ) - def click_cancel(self) -> None: - self.destroy() + def draw_current_services(self) -> None: + self.current.listbox.delete(0, tk.END) + for name in sorted(self.current_services): + self.current.listbox.insert(tk.END, name) + if self.is_custom_service(name): + self.current.listbox.itemconfig(tk.END, bg="green") def click_save(self) -> None: self.node.services = self.current_services.copy() + logger.info("saved node services: %s", self.node.services) + self.destroy() + + def click_cancel(self) -> None: + self.current_services = None self.destroy() def click_remove(self) -> None: @@ -142,13 +149,10 @@ def click_remove(self) -> None: self.current.listbox.delete(cur[0]) self.current_services.remove(service) self.node.service_configs.pop(service, None) - self.node.service_file_configs.pop(service, None) for checkbutton in self.services.frame.winfo_children(): if checkbutton["text"] == service: checkbutton.invoke() return def is_custom_service(self, service: str) -> bool: - has_service_config = service in self.node.service_configs - has_file_config = service in self.node.service_file_configs - return has_service_config or has_file_config + return service in self.node.service_configs diff --git a/daemon/core/gui/dialogs/serviceconfig.py b/daemon/core/gui/dialogs/serviceconfig.py index 5eec7fafa..bffa329ce 100644 --- a/daemon/core/gui/dialogs/serviceconfig.py +++ b/daemon/core/gui/dialogs/serviceconfig.py @@ -1,19 +1,22 @@ +""" +Service configuration dialog +""" import logging import tkinter as tk -from pathlib import Path -from tkinter import filedialog, messagebox, ttk +from tkinter import ttk from typing import TYPE_CHECKING, Optional import grpc -from PIL.ImageTk import PhotoImage -from core.api.grpc.wrappers import Node, NodeServiceData, ServiceValidationMode -from core.gui import images -from core.gui.dialogs.copyserviceconfig import CopyServiceConfigDialog +from core.api.grpc.wrappers import ( + ConfigOption, + Node, + ServiceData, + ServiceValidationMode, +) from core.gui.dialogs.dialog import Dialog -from core.gui.images import ImageEnum from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CodeText, ListboxScroll +from core.gui.widgets import CodeText, ConfigFrame, ListboxScroll logger = logging.getLogger(__name__) @@ -21,21 +24,20 @@ from core.gui.app import Application from core.gui.coreclient import CoreClient -ICON_SIZE: int = 16 - class ServiceConfigDialog(Dialog): def __init__( self, master: tk.BaseWidget, app: "Application", service_name: str, node: Node ) -> None: - title = f"{service_name} Service (Deprecated)" + title = f"{service_name} Service" super().__init__(app, title, master=master) self.core: "CoreClient" = app.core self.node: Node = node self.service_name: str = service_name self.radiovar: tk.IntVar = tk.IntVar(value=2) - self.metadata: str = "" - self.filenames: list[str] = [] + self.directories: list[str] = [] + self.templates: list[str] = [] + self.rendered: dict[str, str] = {} self.dependencies: list[str] = [] self.executables: list[str] = [] self.startup_commands: list[str] = [] @@ -46,31 +48,26 @@ def __init__( self.default_shutdown: list[str] = [] self.validation_mode: Optional[ServiceValidationMode] = None self.validation_time: Optional[int] = None - self.validation_period: Optional[float] = None - self.directory_entry: Optional[ttk.Entry] = None - self.default_directories: list[str] = [] - self.temp_directories: list[str] = [] - self.documentnew_img: PhotoImage = self.app.get_enum_icon( - ImageEnum.DOCUMENTNEW, width=ICON_SIZE - ) - self.editdelete_img: PhotoImage = self.app.get_enum_icon( - ImageEnum.EDITDELETE, width=ICON_SIZE - ) + self.validation_period: tk.DoubleVar = tk.DoubleVar() + self.modes: list[str] = [] + self.mode_configs: dict[str, dict[str, str]] = {} self.notebook: Optional[ttk.Notebook] = None - self.metadata_entry: Optional[ttk.Entry] = None - self.filename_combobox: Optional[ttk.Combobox] = None - self.dir_list: Optional[ListboxScroll] = None + self.templates_combobox: Optional[ttk.Combobox] = None + self.modes_combobox: Optional[ttk.Combobox] = None self.startup_commands_listbox: Optional[tk.Listbox] = None self.shutdown_commands_listbox: Optional[tk.Listbox] = None self.validate_commands_listbox: Optional[tk.Listbox] = None self.validation_time_entry: Optional[ttk.Entry] = None self.validation_mode_entry: Optional[ttk.Entry] = None - self.service_file_data: Optional[CodeText] = None + self.template_text: Optional[CodeText] = None + self.rendered_text: Optional[CodeText] = None self.validation_period_entry: Optional[ttk.Entry] = None self.original_service_files: dict[str, str] = {} - self.default_config: Optional[NodeServiceData] = None self.temp_service_files: dict[str, str] = {} self.modified_files: set[str] = set() + self.config_frame: Optional[ConfigFrame] = None + self.default_config: dict[str, str] = {} + self.config: dict[str, ConfigOption] = {} self.has_error: bool = False self.load() if not self.has_error: @@ -79,180 +76,141 @@ def __init__( def load(self) -> None: try: self.core.start_session(definition=True) - default_config = self.app.core.get_node_service( + service = self.core.services[self.service_name] + self.dependencies = service.dependencies[:] + self.executables = service.executables[:] + self.directories = service.directories[:] + self.templates = service.files[:] + self.startup_commands = service.startup[:] + self.validation_commands = service.validate[:] + self.shutdown_commands = service.shutdown[:] + self.validation_mode = service.validation_mode + self.validation_time = service.validation_timer + self.validation_period.set(service.validation_period) + defaults = self.core.get_service_defaults(self.node.id, self.service_name) + self.original_service_files = defaults.templates + self.temp_service_files = dict(self.original_service_files) + self.modes = sorted(defaults.modes) + self.mode_configs = defaults.modes + self.config = ConfigOption.from_dict(defaults.config) + self.default_config = {x.name: x.value for x in self.config.values()} + self.rendered = self.core.get_service_rendered( self.node.id, self.service_name ) - self.default_startup = default_config.startup[:] - self.default_validate = default_config.validate[:] - self.default_shutdown = default_config.shutdown[:] - self.default_directories = default_config.dirs[:] - custom_service_config = self.node.service_configs.get(self.service_name) - self.default_config = default_config - service_config = ( - custom_service_config if custom_service_config else default_config - ) - self.dependencies = service_config.dependencies[:] - self.executables = service_config.executables[:] - self.metadata = service_config.meta - self.filenames = service_config.configs[:] - self.startup_commands = service_config.startup[:] - self.validation_commands = service_config.validate[:] - self.shutdown_commands = service_config.shutdown[:] - self.validation_mode = service_config.validation_mode - self.validation_time = service_config.validation_timer - self.temp_directories = service_config.dirs[:] - self.original_service_files = { - x: self.app.core.get_node_service_file( - self.node.id, self.service_name, x - ) - for x in default_config.configs - } - self.temp_service_files = dict(self.original_service_files) - - file_configs = self.node.service_file_configs.get(self.service_name, {}) - for file, data in file_configs.items(): - self.temp_service_files[file] = data + service_config = self.node.service_configs.get(self.service_name) + if service_config: + for key, value in service_config.config.items(): + self.config[key].value = value + logger.info("default config: %s", self.default_config) + for file, data in service_config.templates.items(): + self.modified_files.add(file) + self.temp_service_files[file] = data except grpc.RpcError as e: - self.app.show_grpc_exception("Get Node Service Error", e) + self.app.show_grpc_exception("Get Service Error", e) self.has_error = True def draw(self) -> None: self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - - # draw metadata - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Meta-data") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - self.metadata_entry = ttk.Entry(frame, textvariable=self.metadata) - self.metadata_entry.grid(row=0, column=1, sticky=tk.EW) - + self.top.rowconfigure(0, weight=1) # draw notebook self.notebook = ttk.Notebook(self.top) self.notebook.grid(sticky=tk.NSEW, pady=PADY) self.draw_tab_files() - self.draw_tab_directories() + if self.config: + self.draw_tab_config() self.draw_tab_startstop() - self.draw_tab_configuration() - + self.draw_tab_validation() self.draw_buttons() def draw_tab_files(self) -> None: tab = ttk.Frame(self.notebook, padding=FRAME_PAD) tab.grid(sticky=tk.NSEW) tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Files") + tab.rowconfigure(2, weight=1) + self.notebook.add(tab, text="Directories/Files") label = ttk.Label( - tab, text="Config files and scripts that are generated for this service." - ) - label.grid() - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="File Name") - label.grid(row=0, column=0, padx=PADX, sticky=tk.W) - self.filename_combobox = ttk.Combobox(frame, values=self.filenames) - self.filename_combobox.bind( - "<>", self.display_service_file_data - ) - self.filename_combobox.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button( - frame, image=self.documentnew_img, command=self.add_filename + tab, text="Directories and templates that will be used for this service." ) - button.grid(row=0, column=2, padx=PADX) - button = ttk.Button( - frame, image=self.editdelete_img, command=self.delete_filename - ) - button.grid(row=0, column=3) + label.grid(pady=PADY) frame = ttk.Frame(tab) frame.grid(sticky=tk.EW, pady=PADY) frame.columnconfigure(1, weight=1) - button = ttk.Radiobutton( - frame, - variable=self.radiovar, - text="Copy Source File", - value=1, - state=tk.DISABLED, + label = ttk.Label(frame, text="Directories") + label.grid(row=0, column=0, sticky=tk.W, padx=PADX) + state = "readonly" if self.directories else tk.DISABLED + directories_combobox = ttk.Combobox(frame, values=self.directories, state=state) + directories_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) + if self.directories: + directories_combobox.current(0) + label = ttk.Label(frame, text="Files") + label.grid(row=1, column=0, sticky=tk.W, padx=PADX) + state = "readonly" if self.templates else tk.DISABLED + self.templates_combobox = ttk.Combobox( + frame, values=self.templates, state=state ) - button.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = ttk.Entry(frame, state=tk.DISABLED) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - image = images.from_enum(ImageEnum.FILEOPEN, width=images.BUTTON_SIZE) - button = ttk.Button(frame, image=image) - button.image = image - button.grid(row=0, column=2) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(0, weight=1) - button = ttk.Radiobutton( - frame, - variable=self.radiovar, - text="Use text below for file contents", - value=2, - ) - button.grid(row=0, column=0, sticky=tk.EW) - image = images.from_enum(ImageEnum.FILEOPEN, width=images.BUTTON_SIZE) - button = ttk.Button(frame, image=image) - button.image = image - button.grid(row=0, column=1) - image = images.from_enum(ImageEnum.DOCUMENTSAVE, width=images.BUTTON_SIZE) - button = ttk.Button(frame, image=image) - button.image = image - button.grid(row=0, column=2) - - self.service_file_data = CodeText(tab) - self.service_file_data.grid(sticky=tk.NSEW) - tab.rowconfigure(self.service_file_data.grid_info()["row"], weight=1) - if len(self.filenames) > 0: - self.filename_combobox.current(0) - self.service_file_data.text.delete(1.0, "end") - self.service_file_data.text.insert( - "end", self.temp_service_files[self.filenames[0]] - ) - self.service_file_data.text.bind( - "", self.update_temp_service_file_data + self.templates_combobox.bind( + "<>", self.handle_template_changed ) + self.templates_combobox.grid(row=1, column=1, sticky=tk.EW, pady=PADY) + # draw file template tab + notebook = ttk.Notebook(tab) + notebook.rowconfigure(0, weight=1) + notebook.columnconfigure(0, weight=1) + notebook.grid(sticky=tk.NSEW, pady=PADY) + # draw rendered file tab + rendered_tab = ttk.Frame(notebook, padding=FRAME_PAD) + rendered_tab.grid(sticky=tk.NSEW) + rendered_tab.rowconfigure(0, weight=1) + rendered_tab.columnconfigure(0, weight=1) + notebook.add(rendered_tab, text="Rendered") + self.rendered_text = CodeText(rendered_tab) + self.rendered_text.grid(sticky=tk.NSEW) + self.rendered_text.text.bind("", self.update_template_file_data) + # draw template file tab + template_tab = ttk.Frame(notebook, padding=FRAME_PAD) + template_tab.grid(sticky=tk.NSEW) + template_tab.rowconfigure(0, weight=1) + template_tab.columnconfigure(0, weight=1) + notebook.add(template_tab, text="Template") + self.template_text = CodeText(template_tab) + self.template_text.grid(sticky=tk.NSEW) + self.template_text.text.bind("", self.update_template_file_data) + if self.templates: + self.templates_combobox.current(0) + template_name = self.templates[0] + temp_data = self.temp_service_files[template_name] + self.template_text.set_text(temp_data) + rendered_data = self.rendered[template_name] + self.rendered_text.set_text(rendered_data) + else: + self.template_text.text.configure(state=tk.DISABLED) + self.rendered_text.text.configure(state=tk.DISABLED) - def draw_tab_directories(self) -> None: + def draw_tab_config(self) -> None: tab = ttk.Frame(self.notebook, padding=FRAME_PAD) tab.grid(sticky=tk.NSEW) tab.columnconfigure(0, weight=1) - tab.rowconfigure(2, weight=1) - self.notebook.add(tab, text="Directories") + self.notebook.add(tab, text="Configuration") + + if self.modes: + frame = ttk.Frame(tab) + frame.grid(sticky=tk.EW, pady=PADY) + frame.columnconfigure(1, weight=1) + label = ttk.Label(frame, text="Modes") + label.grid(row=0, column=0, padx=PADX) + self.modes_combobox = ttk.Combobox( + frame, values=self.modes, state="readonly" + ) + self.modes_combobox.bind("<>", self.handle_mode_changed) + self.modes_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - label = ttk.Label( - tab, - text="Directories required by this service that are unique for each node.", - ) - label.grid(row=0, column=0, sticky=tk.EW) - frame = ttk.Frame(tab, padding=FRAME_PAD) - frame.columnconfigure(0, weight=1) - frame.grid(row=1, column=0, sticky=tk.NSEW) - var = tk.StringVar(value="") - self.directory_entry = ttk.Entry(frame, textvariable=var) - self.directory_entry.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="...", command=self.find_directory_button) - button.grid(row=0, column=1, sticky=tk.EW) - self.dir_list = ListboxScroll(tab) - self.dir_list.grid(row=2, column=0, sticky=tk.NSEW, pady=PADY) - self.dir_list.listbox.bind("<>", self.directory_select) - for d in self.temp_directories: - self.dir_list.listbox.insert("end", d) - - frame = ttk.Frame(tab) - frame.grid(row=3, column=0, sticky=tk.NSEW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - button = ttk.Button(frame, text="Add", command=self.add_directory) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Remove", command=self.remove_directory) - button.grid(row=0, column=1, sticky=tk.EW) + logger.info("service config: %s", self.config) + self.config_frame = ConfigFrame(tab, self.app, self.config) + self.config_frame.draw_config() + self.config_frame.grid(sticky=tk.NSEW, pady=PADY) + tab.rowconfigure(self.config_frame.grid_info()["row"], weight=1) def draw_tab_startstop(self) -> None: tab = ttk.Frame(self.notebook, padding=FRAME_PAD) @@ -281,26 +239,13 @@ def draw_tab_startstop(self) -> None: ) commands = self.validation_commands label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(1, weight=1) + label_frame.rowconfigure(0, weight=1) label_frame.grid(row=i, column=0, sticky=tk.NSEW, pady=PADY) - - frame = ttk.Frame(label_frame) - frame.grid(row=0, column=0, sticky=tk.NSEW, pady=PADY) - frame.columnconfigure(0, weight=1) - entry = ttk.Entry(frame, textvariable=tk.StringVar()) - entry.grid(row=0, column=0, stick="ew", padx=PADX) - button = ttk.Button(frame, image=self.documentnew_img) - button.bind("", self.add_command) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, image=self.editdelete_img) - button.grid(row=0, column=2, sticky=tk.EW) - button.bind("", self.delete_command) listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.listbox.bind("<>", self.update_entry) for command in commands: listbox_scroll.listbox.insert("end", command) listbox_scroll.listbox.config(height=4) - listbox_scroll.grid(row=1, column=0, sticky=tk.NSEW) + listbox_scroll.grid(sticky=tk.NSEW) if i == 0: self.startup_commands_listbox = listbox_scroll.listbox elif i == 1: @@ -308,11 +253,11 @@ def draw_tab_startstop(self) -> None: elif i == 2: self.validate_commands_listbox = listbox_scroll.listbox - def draw_tab_configuration(self) -> None: + def draw_tab_validation(self) -> None: tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) + tab.grid(sticky=tk.EW) tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Configuration", sticky=tk.NSEW) + self.notebook.add(tab, text="Validation", sticky=tk.NSEW) frame = ttk.Frame(tab) frame.grid(sticky=tk.EW, pady=PADY) @@ -321,7 +266,7 @@ def draw_tab_configuration(self) -> None: label = ttk.Label(frame, text="Validation Time") label.grid(row=0, column=0, sticky=tk.W, padx=PADX) self.validation_time_entry = ttk.Entry(frame) - self.validation_time_entry.insert("end", self.validation_time) + self.validation_time_entry.insert("end", str(self.validation_time)) self.validation_time_entry.config(state=tk.DISABLED) self.validation_time_entry.grid(row=0, column=1, sticky=tk.EW, pady=PADY) @@ -343,7 +288,7 @@ def draw_tab_configuration(self) -> None: label = ttk.Label(frame, text="Validation Period") label.grid(row=2, column=0, sticky=tk.W, padx=PADX) self.validation_period_entry = ttk.Entry( - frame, state=tk.DISABLED, textvariable=tk.StringVar() + frame, state=tk.DISABLED, textvariable=self.validation_period ) self.validation_period_entry.grid(row=2, column=1, sticky=tk.EW, pady=PADY) @@ -370,243 +315,89 @@ def draw_tab_configuration(self) -> None: def draw_buttons(self) -> None: frame = ttk.Frame(self.top) frame.grid(sticky=tk.EW) - for i in range(4): + for i in range(3): frame.columnconfigure(i, weight=1) button = ttk.Button(frame, text="Apply", command=self.click_apply) button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) button = ttk.Button(frame, text="Defaults", command=self.click_defaults) button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Copy...", command=self.click_copy) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=3, sticky=tk.EW) - - def add_filename(self) -> None: - filename = self.filename_combobox.get() - if filename not in self.filename_combobox["values"]: - self.filename_combobox["values"] += (filename,) - self.filename_combobox.set(filename) - self.temp_service_files[filename] = self.service_file_data.text.get( - 1.0, "end" - ) - else: - logger.debug("file already existed") - - def delete_filename(self) -> None: - cbb = self.filename_combobox - filename = cbb.get() - if filename in cbb["values"]: - cbb["values"] = tuple([x for x in cbb["values"] if x != filename]) - cbb.set("") - self.service_file_data.text.delete(1.0, "end") - self.temp_service_files.pop(filename, None) - if filename in self.modified_files: - self.modified_files.remove(filename) - - @classmethod - def add_command(cls, event: tk.Event) -> None: - frame_contains_button = event.widget.master - listbox = frame_contains_button.master.grid_slaves(row=1, column=0)[0].listbox - command_to_add = frame_contains_button.grid_slaves(row=0, column=0)[0].get() - if command_to_add == "": - return - for cmd in listbox.get(0, tk.END): - if cmd == command_to_add: - return - listbox.insert(tk.END, command_to_add) - - @classmethod - def update_entry(cls, event: tk.Event) -> None: - listbox = event.widget - current_selection = listbox.curselection() - if len(current_selection) > 0: - cmd = listbox.get(current_selection[0]) - entry = listbox.master.master.grid_slaves(row=0, column=0)[0].grid_slaves( - row=0, column=0 - )[0] - entry.delete(0, "end") - entry.insert(0, cmd) - - @classmethod - def delete_command(cls, event: tk.Event) -> None: - button = event.widget - frame_contains_button = button.master - listbox = frame_contains_button.master.grid_slaves(row=1, column=0)[0].listbox - current_selection = listbox.curselection() - if len(current_selection) > 0: - listbox.delete(current_selection[0]) - entry = frame_contains_button.grid_slaves(row=0, column=0)[0] - entry.delete(0, tk.END) + button.grid(row=0, column=2, sticky=tk.EW) def click_apply(self) -> None: - if ( - not self.is_custom_command() - and not self.is_custom_service_file() - and not self.has_new_files() - and not self.is_custom_directory() - ): + current_listbox = self.master.current.listbox + if not self.is_custom(): self.node.service_configs.pop(self.service_name, None) - self.current_service_color("") - self.destroy() - return - files = set(self.filenames) - if ( - self.is_custom_command() - or self.has_new_files() - or self.is_custom_directory() - ): - startup, validate, shutdown = self.get_commands() - files = set(self.filename_combobox["values"]) - service_data = NodeServiceData( - configs=list(files), - dirs=self.temp_directories, - startup=startup, - validate=validate, - shutdown=shutdown, - ) - logger.info("setting service data: %s", service_data) - self.node.service_configs[self.service_name] = service_data - for file in self.modified_files: - if file not in files: - continue - file_configs = self.node.service_file_configs.setdefault( - self.service_name, {} + current_listbox.itemconfig(current_listbox.curselection()[0], bg="") + else: + service_config = self.node.service_configs.setdefault( + self.service_name, ServiceData() ) - file_configs[file] = self.temp_service_files[file] - self.current_service_color("green") + if self.config_frame: + self.config_frame.parse_config() + service_config.config = {x.name: x.value for x in self.config.values()} + for file in self.modified_files: + service_config.templates[file] = self.temp_service_files[file] + all_current = current_listbox.get(0, tk.END) + current_listbox.itemconfig(all_current.index(self.service_name), bg="green") self.destroy() - def display_service_file_data(self, event: tk.Event) -> None: - filename = self.filename_combobox.get() - self.service_file_data.text.delete(1.0, "end") - self.service_file_data.text.insert("end", self.temp_service_files[filename]) - - def update_temp_service_file_data(self, event: tk.Event) -> None: - filename = self.filename_combobox.get() - self.temp_service_files[filename] = self.service_file_data.text.get(1.0, "end") - if self.temp_service_files[filename] != self.original_service_files.get( - filename, "" - ): - self.modified_files.add(filename) + def handle_template_changed(self, event: tk.Event) -> None: + template_name = self.templates_combobox.get() + temp_data = self.temp_service_files[template_name] + self.template_text.set_text(temp_data) + rendered = self.rendered[template_name] + self.rendered_text.set_text(rendered) + + def handle_mode_changed(self, event: tk.Event) -> None: + mode = self.modes_combobox.get() + config = self.mode_configs[mode] + logger.info("mode config: %s", config) + self.config_frame.set_values(config) + + def update_template_file_data(self, _event: tk.Event) -> None: + template = self.templates_combobox.get() + # check for change + self.temp_service_files[template] = self.rendered_text.get_text().strip() + if self.rendered[template] != self.temp_service_files[template]: + self.modified_files.add(template) + return + self.temp_service_files[template] = self.template_text.get_text().strip() + if self.temp_service_files[template] != self.original_service_files[template]: + self.modified_files.add(template) else: - self.modified_files.discard(filename) + self.modified_files.discard(template) - def is_custom_command(self) -> bool: - startup, validate, shutdown = self.get_commands() - return ( - set(self.default_startup) != set(startup) - or set(self.default_validate) != set(validate) - or set(self.default_shutdown) != set(shutdown) - ) - - def has_new_files(self) -> bool: - return set(self.filenames) != set(self.filename_combobox["values"]) - - def is_custom_service_file(self) -> bool: - return len(self.modified_files) > 0 - - def is_custom_directory(self) -> bool: - return set(self.default_directories) != set(self.dir_list.listbox.get(0, "end")) + def is_custom(self) -> bool: + has_custom_templates = len(self.modified_files) > 0 + has_custom_config = False + if self.config_frame: + current = self.config_frame.parse_config() + has_custom_config = self.default_config != current + return has_custom_templates or has_custom_config def click_defaults(self) -> None: - """ - clears out any custom configuration permanently - """ - # clear coreclient data + # clear all saved state data + self.modified_files.clear() self.node.service_configs.pop(self.service_name, None) - file_configs = self.node.service_file_configs.pop(self.service_name, {}) - file_configs.pop(self.service_name, None) self.temp_service_files = dict(self.original_service_files) - self.modified_files.clear() - - # reset files tab - files = list(self.default_config.configs[:]) - self.filenames = files - self.filename_combobox.config(values=files) - self.service_file_data.text.delete(1.0, "end") - if len(files) > 0: - filename = files[0] - self.filename_combobox.set(filename) - self.service_file_data.text.insert("end", self.temp_service_files[filename]) - - # reset commands - self.startup_commands_listbox.delete(0, tk.END) - self.validate_commands_listbox.delete(0, tk.END) - self.shutdown_commands_listbox.delete(0, tk.END) - for cmd in self.default_startup: - self.startup_commands_listbox.insert(tk.END, cmd) - for cmd in self.default_validate: - self.validate_commands_listbox.insert(tk.END, cmd) - for cmd in self.default_shutdown: - self.shutdown_commands_listbox.insert(tk.END, cmd) - - # reset directories - self.directory_entry.delete(0, "end") - self.dir_list.listbox.delete(0, "end") - self.temp_directories = list(self.default_directories) - for d in self.default_directories: - self.dir_list.listbox.insert("end", d) - - self.current_service_color("") + # reset session definition and retrieve default rendered templates + self.core.start_session(definition=True) + self.rendered = self.core.get_service_rendered(self.node.id, self.service_name) + logger.info("cleared service config: %s", self.node.service_configs) + # reset current selected file data and config data, if present + template_name = self.templates_combobox.get() + temp_data = self.temp_service_files[template_name] + self.template_text.set_text(temp_data) + rendered_data = self.rendered[template_name] + self.rendered_text.set_text(rendered_data) + if self.config_frame: + logger.info("resetting defaults: %s", self.default_config) + self.config_frame.set_values(self.default_config) - def click_copy(self) -> None: - file_name = self.filename_combobox.get() - dialog = CopyServiceConfigDialog( - self.app, self, self.node.name, self.service_name, file_name - ) - dialog.show() - - @classmethod def append_commands( - cls, commands: list[str], listbox: tk.Listbox, to_add: list[str] + self, commands: list[str], listbox: tk.Listbox, to_add: list[str] ) -> None: for cmd in to_add: commands.append(cmd) listbox.insert(tk.END, cmd) - - def get_commands(self) -> tuple[list[str], list[str], list[str]]: - startup = self.startup_commands_listbox.get(0, "end") - shutdown = self.shutdown_commands_listbox.get(0, "end") - validate = self.validate_commands_listbox.get(0, "end") - return startup, validate, shutdown - - def find_directory_button(self) -> None: - d = filedialog.askdirectory(initialdir="/") - self.directory_entry.delete(0, "end") - self.directory_entry.insert("end", d) - - def add_directory(self) -> None: - directory = Path(self.directory_entry.get()) - if directory.is_absolute(): - if str(directory) not in self.temp_directories: - self.dir_list.listbox.insert("end", directory) - self.temp_directories.append(str(directory)) - else: - messagebox.showerror("Add Directory", "Path must be absolute!", parent=self) - - def remove_directory(self) -> None: - d = self.directory_entry.get() - dirs = self.dir_list.listbox.get(0, "end") - if d and d in self.temp_directories: - self.temp_directories.remove(d) - try: - i = dirs.index(d) - self.dir_list.listbox.delete(i) - except ValueError: - logger.debug("directory is not in the list") - self.directory_entry.delete(0, "end") - - def directory_select(self, event) -> None: - i = self.dir_list.listbox.curselection() - if i: - d = self.dir_list.listbox.get(i) - self.directory_entry.delete(0, "end") - self.directory_entry.insert("end", d) - - def current_service_color(self, color="") -> None: - """ - change the current service label color - """ - listbox = self.master.current.listbox - services = listbox.get(0, tk.END) - listbox.itemconfig(services.index(self.service_name), bg=color) diff --git a/daemon/core/gui/graph/edges.py b/daemon/core/gui/graph/edges.py index e5a4c97ba..e72302161 100644 --- a/daemon/core/gui/graph/edges.py +++ b/daemon/core/gui/graph/edges.py @@ -167,7 +167,7 @@ def has_shadows(self) -> bool: return False return self.src.canvas != self.dst.canvas - def draw(self, state: str) -> None: + def draw(self, state: str, organize: bool = True) -> None: if not self.has_shadows(): dst = self.dst if self.dst else self.src self.id = self.draw_edge(self.src.canvas, self.src, dst, state) @@ -188,9 +188,10 @@ def draw(self, state: str) -> None: self.dst_shadow = self.src.canvas.get_shadow(self.dst) self.id = self.draw_edge(self.src.canvas, self.src, self.dst_shadow, state) self.id2 = self.draw_edge(self.dst.canvas, self.src_shadow, self.dst, state) - self.src.canvas.organize() - if self.has_shadows(): - self.dst.canvas.organize() + if organize: + self.src.canvas.organize() + if self.has_shadows(): + self.dst.canvas.organize() def draw_edge( self, @@ -531,7 +532,11 @@ class CanvasEdge(Edge): """ def __init__( - self, app: "Application", src: "CanvasNode", dst: "CanvasNode" = None + self, + app: "Application", + src: "CanvasNode", + dst: "CanvasNode" = None, + organize: bool = True, ) -> None: """ Create an instance of canvas edge object @@ -541,7 +546,7 @@ def __init__( self.text_dst: Optional[int] = None self.asymmetric_link: Optional[Link] = None self.throughput: Optional[float] = None - self.draw(tk.NORMAL) + self.draw(tk.NORMAL, organize) def is_customized(self) -> bool: return self.width != EDGE_WIDTH or self.color != EDGE_COLOR @@ -626,7 +631,9 @@ def clear_throughput(self) -> None: if not self.linked_wireless: self.draw_link_options() - def complete(self, dst: "CanvasNode", link: Link = None) -> None: + def complete( + self, dst: "CanvasNode", link: Link = None, organize: bool = True + ) -> None: logger.debug( "completing wired link from node(%s) to node(%s)", self.src.core_node.name, @@ -653,9 +660,10 @@ def complete(self, dst: "CanvasNode", link: Link = None) -> None: self.draw_labels() self.check_visibility() self.app.core.save_edge(self) - self.src.canvas.organize() - if self.has_shadows(): - self.dst.canvas.organize() + if organize: + self.src.canvas.organize() + if self.has_shadows(): + self.dst.canvas.organize() self.manager.edges[self.token] = self def check_wireless(self) -> None: diff --git a/daemon/core/gui/graph/graph.py b/daemon/core/gui/graph/graph.py index 1a7012393..e7c8e49be 100644 --- a/daemon/core/gui/graph/graph.py +++ b/daemon/core/gui/graph/graph.py @@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Optional from PIL import Image +from PIL.Image import Resampling from PIL.ImageTk import PhotoImage from core.api.grpc.wrappers import Interface, Link @@ -528,7 +529,7 @@ def width_and_height(self) -> tuple[int, int]: def get_wallpaper_image(self) -> Image.Image: width = int(self.wallpaper.width * self.ratio) height = int(self.wallpaper.height * self.ratio) - image = self.wallpaper.resize((width, height), Image.ANTIALIAS) + image = self.wallpaper.resize((width, height), Resampling.LANCZOS) return image def draw_wallpaper( @@ -591,7 +592,9 @@ def wallpaper_scaled(self) -> None: """ self.delete(self.wallpaper_id) canvas_w, canvas_h = self.width_and_height() - image = self.wallpaper.resize((int(canvas_w), int(canvas_h)), Image.ANTIALIAS) + image = self.wallpaper.resize( + (int(canvas_w), int(canvas_h)), Resampling.LANCZOS + ) image = PhotoImage(image) self.draw_wallpaper(image) @@ -720,17 +723,12 @@ def paste_selected(self, _event: tk.Event = None) -> None: ) # copy configurations and services node.core_node.services = core_node.services.copy() - node.core_node.config_services = core_node.config_services.copy() node.core_node.emane_model_configs = deepcopy(core_node.emane_model_configs) node.core_node.wlan_config = deepcopy(core_node.wlan_config) node.core_node.mobility_config = deepcopy(core_node.mobility_config) node.core_node.service_configs = deepcopy(core_node.service_configs) - node.core_node.service_file_configs = deepcopy( - core_node.service_file_configs - ) - node.core_node.config_service_configs = deepcopy( - core_node.config_service_configs - ) + node.core_node.image = core_node.image + node.core_node.compose = core_node.compose copy_map[canvas_node.id] = node.id self.nodes[node.id] = node diff --git a/daemon/core/gui/graph/manager.py b/daemon/core/gui/graph/manager.py index b2745f5c3..79d4986bc 100644 --- a/daemon/core/gui/graph/manager.py +++ b/daemon/core/gui/graph/manager.py @@ -260,7 +260,7 @@ def draw_session(self, session: Session) -> None: if link.type == LinkType.WIRELESS: self.add_wireless_edge(node1, node2, link) else: - self.add_wired_edge(node1, node2, link) + self.add_wired_edge(node1, node2, link, organize=False) # organize canvas order for canvas in self.canvases.values(): @@ -391,15 +391,17 @@ def delete_wired_edge(self, link: Link) -> None: if edge: edge.delete() - def add_wired_edge(self, src: CanvasNode, dst: CanvasNode, link: Link) -> None: + def add_wired_edge( + self, src: CanvasNode, dst: CanvasNode, link: Link, organize: bool + ) -> None: token = create_edge_token(link) if token in self.edges and link.options.unidirectional: edge = self.edges[token] edge.asymmetric_link = link edge.redraw() elif token not in self.edges: - edge = CanvasEdge(self.app, src, dst) - edge.complete(dst, link) + edge = CanvasEdge(self.app, src, dst, organize) + edge.complete(dst, link, organize) def add_wireless_edge(self, src: CanvasNode, dst: CanvasNode, link: Link) -> None: network_id = link.network_id if link.network_id else None diff --git a/daemon/core/gui/graph/node.py b/daemon/core/gui/graph/node.py index 0cfbf2e97..da499b57e 100644 --- a/daemon/core/gui/graph/node.py +++ b/daemon/core/gui/graph/node.py @@ -14,7 +14,6 @@ from core.gui.dialogs.emaneconfig import EmaneConfigDialog from core.gui.dialogs.mobilityconfig import MobilityConfigDialog from core.gui.dialogs.nodeconfig import NodeConfigDialog -from core.gui.dialogs.nodeconfigservice import NodeConfigServiceDialog from core.gui.dialogs.nodeservice import NodeServiceDialog from core.gui.dialogs.wirelessconfig import WirelessConfigDialog from core.gui.dialogs.wlanconfig import WlanConfigDialog @@ -198,7 +197,9 @@ def on_enter(self, event: tk.Event) -> None: self.tooltip.text.set("waiting...") self.tooltip.on_enter(event) try: - output = self.app.core.run(self.core_node.id) + output = self.app.core.run_cmd( + self.core_node.id, self.app.core.observer + ) self.tooltip.text.set(output) except grpc.RpcError as e: self.app.show_grpc_exception("Observer Error", e) @@ -242,8 +243,17 @@ def show_context(self, event: tk.Event) -> None: label="Mobility Player", command=self.show_mobility_player ) if nutils.is_container(self.core_node): + cmds_menu = tk.Menu(self.context) + for name, cmd in self.app.core.node_commands.items(): + cmd_func = functools.partial( + self.app.core.run_cmd, self.core_node.id, cmd + ) + cmds_menu.add_command(label=name, command=cmd_func) + themes.style_menu(cmds_menu) + self.context.add_cascade(label="Commands", menu=cmds_menu) + services_menu = tk.Menu(self.context) - for service in sorted(self.core_node.config_services): + for service in sorted(self.core_node.services): service_menu = tk.Menu(services_menu) themes.style_menu(service_menu) start_func = functools.partial(self.start_service, service) @@ -260,12 +270,7 @@ def show_context(self, event: tk.Event) -> None: else: self.context.add_command(label="Configure", command=self.show_config) if nutils.is_container(self.core_node): - self.context.add_command( - label="Config Services", command=self.show_config_services - ) - self.context.add_command( - label="Services (Deprecated)", command=self.show_services - ) + self.context.add_command(label="Services", command=self.show_services) if is_emane: self.context.add_command( label="EMANE Config", command=self.show_emane_config @@ -382,10 +387,6 @@ def show_services(self) -> None: dialog = NodeServiceDialog(self.app, self.core_node) dialog.show() - def show_config_services(self) -> None: - dialog = NodeConfigServiceDialog(self.app, self.core_node) - dialog.show() - def has_emane_link(self, iface_id: int) -> Node: result = None for edge in self.edges: @@ -479,7 +480,7 @@ def set_label(self, state: str) -> None: def _service_action(self, service: str, action: ServiceAction) -> None: session_id = self.app.core.session.id try: - result = self.app.core.client.config_service_action( + result = self.app.core.client.service_action( session_id, self.core_node.id, service, action ) if not result: diff --git a/daemon/core/gui/images.py b/daemon/core/gui/images.py index 070137fbe..31f0fff63 100644 --- a/daemon/core/gui/images.py +++ b/daemon/core/gui/images.py @@ -2,6 +2,7 @@ from typing import Optional from PIL import Image +from PIL.Image import Resampling from PIL.ImageTk import PhotoImage from core.api.grpc.wrappers import Node, NodeType @@ -32,7 +33,7 @@ def from_file( width = int(width * scale) height = int(height * scale) image = Image.open(file_path) - image = image.resize((width, height), Image.ANTIALIAS) + image = image.resize((width, height), Resampling.LANCZOS) return PhotoImage(image) @@ -79,7 +80,6 @@ class ImageEnum(Enum): ANTENNA = "antenna" DOCKER = "docker" PODMAN = "podman" - LXC = "lxc" ALERT = "alert" DELETE = "delete" SHUTDOWN = "shutdown" @@ -103,7 +103,6 @@ class ImageEnum(Enum): (NodeType.TUNNEL, None): ImageEnum.TUNNEL, (NodeType.DOCKER, None): ImageEnum.DOCKER, (NodeType.PODMAN, None): ImageEnum.PODMAN, - (NodeType.LXC, None): ImageEnum.LXC, } diff --git a/daemon/core/gui/interface.py b/daemon/core/gui/interface.py index 9ebea3c12..b275d98b3 100644 --- a/daemon/core/gui/interface.py +++ b/daemon/core/gui/interface.py @@ -30,9 +30,17 @@ def get_index(iface: Interface) -> Optional[int]: class Subnets: - def __init__(self, ip4: IPNetwork, ip6: IPNetwork) -> None: + def __init__( + self, + ip4: IPNetwork, + ip4_mask: int, + ip6: IPNetwork, + ip6_mask: int, + ) -> None: self.ip4 = ip4 + self.ip4_mask = ip4_mask self.ip6 = ip6 + self.ip6_mask = ip6_mask self.used_indexes = set() def __eq__(self, other: Any) -> bool: @@ -47,7 +55,7 @@ def key(self) -> tuple[IPNetwork, IPNetwork]: return self.ip4, self.ip6 def next(self) -> "Subnets": - return Subnets(self.ip4.next(), self.ip6.next()) + return Subnets(self.ip4.next(), self.ip4_mask, self.ip6.next(), self.ip6_mask) class InterfaceManager: @@ -57,6 +65,7 @@ def __init__(self, app: "Application") -> None: ip6 = self.app.guiconfig.ips.ip6 self.ip4_subnets: IPNetwork = IPNetwork(f"{ip4}/{IP4_MASK}") self.ip6_subnets: IPNetwork = IPNetwork(f"{ip6}/{IP6_MASK}") + self.wireless_subnets: dict[int, Subnets] = {} mac = self.app.guiconfig.mac self.mac: EUI = EUI(mac, dialect=netaddr.mac_unix_expanded) self.current_mac: Optional[EUI] = None @@ -78,12 +87,15 @@ def next_mac(self) -> str: self.current_mac = EUI(value, dialect=netaddr.mac_unix_expanded) return mac - def next_subnets(self) -> Subnets: + def next_subnets(self, wireless_link: bool) -> Subnets: subnets = self.current_subnets if subnets is None: - subnets = Subnets(self.ip4_subnets, self.ip6_subnets) + subnets = Subnets(self.ip4_subnets, IP4_MASK, self.ip6_subnets, IP6_MASK) while subnets.key() in self.used_subnets: subnets = subnets.next() + if wireless_link: + subnets.ip4_mask = WIRELESS_IP4_MASK + subnets.ip6_mask = WIRELESS_IP6_MASK self.used_subnets[subnets.key()] = subnets return subnets @@ -148,64 +160,86 @@ def joined(self, links: list[Link]) -> None: if subnets.key() not in self.used_subnets: self.used_subnets[subnets.key()] = subnets - def next_index(self, node: Node) -> int: + def next_index(self, node: Node, subnets: Subnets) -> int: if nutils.is_router(node): index = 1 else: index = 20 while True: - if index not in self.current_subnets.used_indexes: - self.current_subnets.used_indexes.add(index) + if index not in subnets.used_indexes: + subnets.used_indexes.add(index) break index += 1 return index - def get_ips(self, node: Node) -> [Optional[str], Optional[str]]: + def get_ips(self, node: Node, subnets: Subnets) -> [Optional[str], Optional[str]]: enable_ip4 = self.app.guiconfig.ips.enable_ip4 enable_ip6 = self.app.guiconfig.ips.enable_ip6 ip4, ip6 = None, None if not enable_ip4 and not enable_ip6: return ip4, ip6 - index = self.next_index(node) + index = self.next_index(node, subnets) if enable_ip4: - ip4 = str(self.current_subnets.ip4[index]) + ip4 = str(subnets.ip4[index]) if enable_ip6: - ip6 = str(self.current_subnets.ip6[index]) + ip6 = str(subnets.ip6[index]) return ip4, ip6 def get_subnets(self, iface: Interface) -> Subnets: ip4_subnet = self.ip4_subnets + ip4_mask = IP4_MASK if iface.ip4: ip4_subnet = IPNetwork(f"{iface.ip4}/{IP4_MASK}").cidr + ip4_mask = iface.ip4_mask ip6_subnet = self.ip6_subnets + ip6_mask = IP6_MASK if iface.ip6: ip6_subnet = IPNetwork(f"{iface.ip6}/{IP6_MASK}").cidr - subnets = Subnets(ip4_subnet, ip6_subnet) + ip6_mask = iface.ip6_mask + subnets = Subnets(ip4_subnet, ip4_mask, ip6_subnet, ip6_mask) return self.used_subnets.get(subnets.key(), subnets) def determine_subnets( - self, canvas_src_node: CanvasNode, canvas_dst_node: CanvasNode - ) -> None: + self, + canvas_src_node: CanvasNode, + canvas_dst_node: CanvasNode, + wireless_link: bool, + ) -> Optional[Subnets]: src_node = canvas_src_node.core_node dst_node = canvas_dst_node.core_node is_src_container = nutils.is_container(src_node) is_dst_container = nutils.is_container(dst_node) + found_subnets = None if is_src_container and is_dst_container: - self.current_subnets = self.next_subnets() + self.current_subnets = self.next_subnets(wireless_link) + found_subnets = self.current_subnets elif is_src_container and not is_dst_container: - subnets = self.find_subnets(canvas_dst_node, visited={src_node.id}) + subnets = self.wireless_subnets.get(dst_node.id) if subnets: - self.current_subnets = subnets + found_subnets = subnets else: - self.current_subnets = self.next_subnets() + subnets = self.find_subnets(canvas_dst_node, visited={src_node.id}) + if subnets: + self.current_subnets = subnets + found_subnets = self.current_subnets + else: + self.current_subnets = self.next_subnets(wireless_link) + found_subnets = self.current_subnets elif not is_src_container and is_dst_container: - subnets = self.find_subnets(canvas_src_node, visited={dst_node.id}) + subnets = self.wireless_subnets.get(src_node.id) if subnets: - self.current_subnets = subnets + found_subnets = subnets else: - self.current_subnets = self.next_subnets() + subnets = self.find_subnets(canvas_src_node, visited={dst_node.id}) + if subnets: + self.current_subnets = subnets + found_subnets = self.current_subnets + else: + self.current_subnets = self.next_subnets(wireless_link) + found_subnets = self.current_subnets else: logger.info("ignoring subnet change for link between network nodes") + return found_subnets def find_subnets( self, canvas_node: CanvasNode, visited: set[int] = None @@ -239,13 +273,13 @@ def create_link(self, edge: CanvasEdge) -> Link: """ src_node = edge.src.core_node dst_node = edge.dst.core_node - self.determine_subnets(edge.src, edge.dst) + subnets = self.determine_subnets(edge.src, edge.dst, edge.linked_wireless) src_iface = None if nutils.is_iface_node(src_node): - src_iface = self.create_iface(edge.src, edge.linked_wireless) + src_iface = self.create_iface(edge.src, subnets) dst_iface = None if nutils.is_iface_node(dst_node): - dst_iface = self.create_iface(edge.dst, edge.linked_wireless) + dst_iface = self.create_iface(edge.dst, subnets) link = Link( type=LinkType.WIRED, node1_id=src_node.id, @@ -256,28 +290,43 @@ def create_link(self, edge: CanvasEdge) -> Link: logger.info("added link between %s and %s", src_node.name, dst_node.name) return link - def create_iface(self, canvas_node: CanvasNode, wireless_link: bool) -> Interface: + def create_iface(self, canvas_node: CanvasNode, subnets: Subnets) -> Interface: node = canvas_node.core_node if nutils.is_bridge(node): iface_id = canvas_node.next_iface_id() iface = Interface(id=iface_id) else: - ip4, ip6 = self.get_ips(node) - if wireless_link: - ip4_mask = WIRELESS_IP4_MASK - ip6_mask = WIRELESS_IP6_MASK - else: - ip4_mask = IP4_MASK - ip6_mask = IP6_MASK + ip4, ip6 = self.get_ips(node, subnets) iface_id = canvas_node.next_iface_id() name = f"eth{iface_id}" iface = Interface( id=iface_id, name=name, ip4=ip4, - ip4_mask=ip4_mask, + ip4_mask=subnets.ip4_mask, ip6=ip6, - ip6_mask=ip6_mask, + ip6_mask=subnets.ip6_mask, ) logger.info("create node(%s) interface(%s)", node.name, iface) return iface + + def get_wireless_nets(self, node_id: int) -> Subnets: + subnets = self.wireless_subnets.get(node_id) + if not subnets: + ip4 = IPNetwork(f"{self.ip4_subnets.network}/{WIRELESS_IP4_MASK}") + ip6 = IPNetwork(f"{self.ip6_subnets.network}/{WIRELESS_IP6_MASK}") + subnets = Subnets(ip4, WIRELESS_IP4_MASK, ip6, WIRELESS_IP6_MASK) + return subnets + + def set_wireless_nets(self, node_id: int, ip4: IPNetwork, ip6: IPNetwork) -> None: + expected_ip4 = IPNetwork(f"{self.ip4_subnets.network}/{WIRELESS_IP4_MASK}") + expected_ip6 = IPNetwork(f"{self.ip6_subnets.network}/{WIRELESS_IP6_MASK}") + new_ip4 = expected_ip4 != ip4 + new_ip6 = expected_ip6 != ip6 + if new_ip4 or new_ip6: + subnets = Subnets(ip4, ip4.prefixlen, ip6, ip6.prefixlen) + self.wireless_subnets[node_id] = subnets + self.used_subnets[subnets.key()] = subnets + + def clear_wireless_nets(self, node_id: int) -> None: + self.wireless_subnets.pop(node_id, None) diff --git a/daemon/core/gui/menubar.py b/daemon/core/gui/menubar.py index 16e57cb61..c747a2144 100644 --- a/daemon/core/gui/menubar.py +++ b/daemon/core/gui/menubar.py @@ -17,6 +17,7 @@ from core.gui.dialogs.hooks import HooksDialog from core.gui.dialogs.ipdialog import IpConfigDialog from core.gui.dialogs.macdialog import MacConfigDialog +from core.gui.dialogs.nodecommands import NodeCommandsDialog from core.gui.dialogs.observers import ObserverDialog from core.gui.dialogs.preferences import PreferencesDialog from core.gui.dialogs.servers import ServersDialog @@ -247,6 +248,7 @@ def draw_widgets_menu(self) -> None: Create widget menu """ menu = tk.Menu(self) + menu.add_command(label="Node Commands", command=self.click_node_commands) self.create_observer_widgets_menu(menu) self.create_adjacency_menu(menu) self.create_throughput_menu(menu) @@ -452,6 +454,10 @@ def click_servers(self) -> None: dialog = ServersDialog(self.app) dialog.show() + def click_node_commands(self) -> None: + dialog = NodeCommandsDialog(self.app) + dialog.show() + def click_edit_observer_widgets(self) -> None: dialog = ObserverDialog(self.app) dialog.show() diff --git a/daemon/core/gui/nodeutils.py b/daemon/core/gui/nodeutils.py index 0b3e3d9a9..c1592fe5e 100644 --- a/daemon/core/gui/nodeutils.py +++ b/daemon/core/gui/nodeutils.py @@ -19,10 +19,9 @@ CONTAINER_NODES: set[NodeType] = { NodeType.DEFAULT, NodeType.DOCKER, - NodeType.LXC, NodeType.PODMAN, } -IMAGE_NODES: set[NodeType] = {NodeType.DOCKER, NodeType.LXC, NodeType.PODMAN} +IMAGE_NODES: set[NodeType] = {NodeType.DOCKER, NodeType.PODMAN} WIRELESS_NODES: set[NodeType] = { NodeType.WIRELESS_LAN, NodeType.EMANE, @@ -45,7 +44,6 @@ def setup() -> None: (ImageEnum.ROUTER, NodeType.DEFAULT, "Router", "router"), (ImageEnum.PROUTER, NodeType.DEFAULT, "PRouter", "prouter"), (ImageEnum.DOCKER, NodeType.DOCKER, "Docker", None), - (ImageEnum.LXC, NodeType.LXC, "LXC", None), (ImageEnum.PODMAN, NodeType.PODMAN, "Podman", None), ] for image_enum, node_type, label, model in nodes: diff --git a/daemon/core/gui/statusbar.py b/daemon/core/gui/statusbar.py index a4967cd64..e11f29024 100644 --- a/daemon/core/gui/statusbar.py +++ b/daemon/core/gui/statusbar.py @@ -5,7 +5,7 @@ from tkinter import ttk from typing import TYPE_CHECKING, Optional -from core.api.grpc.wrappers import ExceptionEvent, ExceptionLevel +from core.api.grpc.wrappers import AlertEvent, AlertLevel from core.gui.dialogs.alerts import AlertsDialog from core.gui.themes import Styles @@ -24,7 +24,7 @@ def __init__(self, master: tk.Widget, app: "Application") -> None: self.alerts_button: Optional[ttk.Button] = None self.alert_style = Styles.no_alert self.running: bool = False - self.core_alarms: list[ExceptionEvent] = [] + self.core_alarms: list[AlertEvent] = [] self.draw() def draw(self) -> None: @@ -66,17 +66,17 @@ def set_cpu(self, usage: float) -> None: def set_zoom(self, zoom: float) -> None: self.zoom.config(text=f"ZOOM {zoom * 100:.0f}%") - def add_alert(self, event: ExceptionEvent) -> None: + def add_alert(self, event: AlertEvent) -> None: self.core_alarms.append(event) level = event.level self._set_alert_style(level) label = f"Alerts ({len(self.core_alarms)})" self.alerts_button.config(text=label, style=self.alert_style) - def _set_alert_style(self, level: ExceptionLevel) -> None: - if level in [ExceptionLevel.FATAL, ExceptionLevel.ERROR]: + def _set_alert_style(self, level: AlertLevel) -> None: + if level in [AlertLevel.FATAL, AlertLevel.ERROR]: self.alert_style = Styles.red_alert - elif level == ExceptionLevel.WARNING and self.alert_style != Styles.red_alert: + elif level == AlertLevel.WARNING and self.alert_style != Styles.red_alert: self.alert_style = Styles.yellow_alert elif self.alert_style == Styles.no_alert: self.alert_style = Styles.green_alert diff --git a/daemon/core/gui/widgets.py b/daemon/core/gui/widgets.py index 902f11320..6191d887d 100644 --- a/daemon/core/gui/widgets.py +++ b/daemon/core/gui/widgets.py @@ -1,4 +1,5 @@ import logging +import re import tkinter as tk from functools import partial from pathlib import Path @@ -9,6 +10,7 @@ from core.gui import appconfig, themes, validation from core.gui.dialogs.dialog import Dialog from core.gui.themes import FRAME_PAD, PADX, PADY +from core.gui.tooltip import Tooltip logger = logging.getLogger(__name__) @@ -41,7 +43,7 @@ def __init__( master: tk.Widget, app: "Application", _cls: type[ttk.Frame] = ttk.Frame, - **kw: Any + **kw: Any, ) -> None: super().__init__(master, **kw) self.app: "Application" = app @@ -88,7 +90,7 @@ def __init__( app: "Application", config: dict[str, ConfigOption], enabled: bool = True, - **kw: Any + **kw: Any, ) -> None: super().__init__(master, **kw) self.app: "Application" = app @@ -148,6 +150,8 @@ def draw_config(self) -> None: else: entry = ttk.Entry(tab.frame, textvariable=value, state=state) entry.grid(row=index, column=1, sticky=tk.EW) + if option.regex: + Tooltip(entry, option.regex) elif option.type in INT_TYPES: value.set(option.value) state = tk.NORMAL if self.enabled else tk.DISABLED @@ -177,6 +181,12 @@ def parse_config(self) -> dict[str, str]: else: option.value = "0" else: + if option.regex: + if not re.match(option.regex, config_value): + raise ValueError( + f"{option.label} value '{config_value}' " + f"does not match regex '{option.regex}'" + ) option.value = config_value return {x: self.config[x].value for x in self.config} @@ -216,7 +226,7 @@ def __init__( master: ttk.Widget, app: "Application", clicked: Callable = None, - **kw: Any + **kw: Any, ) -> None: super().__init__(master, app, **kw) self.clicked: Callable = clicked diff --git a/daemon/core/location/geo.py b/daemon/core/location/geo.py index 783087288..d5d89625d 100644 --- a/daemon/core/location/geo.py +++ b/daemon/core/location/geo.py @@ -7,8 +7,6 @@ import pyproj from pyproj import Transformer -from core.emulator.enumerations import RegisterTlvs - logger = logging.getLogger(__name__) SCALE_FACTOR: float = 100.0 CRS_WGS84: int = 4326 @@ -22,7 +20,6 @@ class GeoLocation: """ name: str = "location" - config_type: RegisterTlvs = RegisterTlvs.UTILITY def __init__(self) -> None: """ diff --git a/daemon/core/location/mobility.py b/daemon/core/location/mobility.py index ebac9bc56..e5b56cd5b 100644 --- a/daemon/core/location/mobility.py +++ b/daemon/core/location/mobility.py @@ -23,7 +23,7 @@ ModelManager, ) from core.emane.nodes import EmaneNet -from core.emulator.data import EventData, LinkData, LinkOptions +from core.emulator.data import LinkData, LinkOptions from core.emulator.enumerations import EventTypes, LinkTypes, MessageFlags, RegisterTlvs from core.errors import CoreError from core.executables import BASH @@ -91,7 +91,6 @@ class MobilityManager(ModelManager): """ name = "MobilityManager" - config_type = RegisterTlvs.WIRELESS def __init__(self, session: "Session") -> None: """ @@ -142,54 +141,6 @@ def startup(self, node_ids: list[int] = None) -> None: "skipping mobility configuration for unknown node: %s", node_id ) - def handleevent(self, event_data: EventData) -> None: - """ - Handle an Event Message used to start, stop, or pause - mobility scripts for a given mobility network. - - :param event_data: event data to handle - :return: nothing - """ - event_type = event_data.event_type - node_id = event_data.node - name = event_data.name - try: - node = get_mobility_node(self.session, node_id) - except CoreError: - logger.exception( - "ignoring event for model(%s), unknown node(%s)", name, node_id - ) - return - - # name is e.g. "mobility:ns2script" - models = name[9:].split(",") - for model in models: - cls = self.models.get(model) - if not cls: - logger.warning("ignoring event for unknown model '%s'", model) - continue - if cls.config_type in [RegisterTlvs.WIRELESS, RegisterTlvs.MOBILITY]: - model = node.mobility - else: - continue - if model is None: - logger.warning("ignoring event, %s has no model", node.name) - continue - if cls.name != model.name: - logger.warning( - "ignoring event for %s wrong model %s,%s", - node.name, - cls.name, - model.name, - ) - continue - if event_type in [EventTypes.STOP, EventTypes.RESTART]: - model.stop(move_initial=True) - if event_type in [EventTypes.START, EventTypes.RESTART]: - model.start() - if event_type == EventTypes.PAUSE: - model.pause() - def sendevent(self, model: "WayPointMobility") -> None: """ Send an event message on behalf of a mobility model. @@ -208,14 +159,12 @@ def sendevent(self, model: "WayPointMobility") -> None: start_time = int(model.lasttime - model.timezero) end_time = int(model.endtime) data = f"start={start_time} end={end_time}" - event_data = EventData( - node=model.id, - event_type=event_type, + self.session.broadcast_event( + event_type, + node_id=model.id, name=f"mobility:{model.name}", data=data, - time=str(time.monotonic()), ) - self.session.broadcast_event(event_data) class WirelessModel(ConfigurableOptions): @@ -320,8 +269,7 @@ def setlinkparams(self) -> None: loss=self.loss, jitter=self.jitter, ) - iface.options.update(options) - iface.set_config() + iface.update_options(options) def get_position(self, iface: CoreInterface) -> tuple[float, float, float]: """ @@ -467,8 +415,16 @@ def create_link_data( message_type=message_type, type=LinkTypes.WIRELESS, node1_id=iface1.node.id, + iface1=iface1.get_data(), node2_id=iface2.node.id, + iface2=iface2.get_data(), network_id=self.wlan.id, + options=LinkOptions( + bandwidth=self.bw, + delay=self.delay, + loss=self.loss, + jitter=self.jitter, + ), color=color, ) diff --git a/daemon/core/nodes/base.py b/daemon/core/nodes/base.py index e59a89e40..1a2fa468b 100644 --- a/daemon/core/nodes/base.py +++ b/daemon/core/nodes/base.py @@ -9,28 +9,26 @@ from dataclasses import dataclass, field from pathlib import Path from threading import RLock -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING, Optional import netaddr from core import utils -from core.configservice.dependencies import ConfigServiceDependencies from core.emulator.data import InterfaceData, LinkOptions from core.errors import CoreCommandError, CoreError from core.executables import BASH, MOUNT, TEST, VCMD, VNODED from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.netclient import LinuxNetClient, get_net_client +from core.services.dependencies import ServiceDependencies logger = logging.getLogger(__name__) if TYPE_CHECKING: from core.emulator.distributed import DistributedServer from core.emulator.session import Session - from core.configservice.base import ConfigService - from core.services.coreservices import CoreService + from core.services.base import CoreService - CoreServices = list[Union[CoreService, type[CoreService]]] - ConfigServiceType = type[ConfigService] + ServiceType = type[CoreService] PRIVATE_DIRS: list[Path] = [Path("/var/run"), Path("/var/log")] @@ -115,12 +113,8 @@ class CoreNodeOptions(NodeOptions): """model is used for providing a default set of services""" services: list[str] = field(default_factory=list) """services to start within node""" - config_services: list[str] = field(default_factory=list) - """config services to start within node""" directory: Path = None """directory to define node, defaults to path under the session directory""" - legacy: bool = False - """legacy nodes default to standard services""" class NodeBase(abc.ABC): @@ -131,7 +125,7 @@ class NodeBase(abc.ABC): def __init__( self, session: "Session", - _id: int = None, + _id: int, name: str = None, server: "DistributedServer" = None, options: NodeOptions = None, @@ -147,11 +141,10 @@ def __init__( :param options: options to create node with """ self.session: "Session" = session - self.id: int = _id if _id is not None else self.session.next_node_id() + self.id: int = _id self.name: str = name or f"{self.__class__.__name__}{self.id}" self.server: "DistributedServer" = server self.model: Optional[str] = None - self.services: CoreServices = [] self.ifaces: dict[int, CoreInterface] = {} self.iface_id: int = 0 self.position: Position = Position() @@ -160,10 +153,19 @@ def __init__( self.net_client: LinuxNetClient = get_net_client( self.session.use_ovs(), self.host_cmd ) + self.node_net_client: LinuxNetClient = self._get_node_net_client() options = options if options else NodeOptions() self.canvas: Optional[int] = options.canvas self.icon: Optional[str] = options.icon + def _get_node_net_client(self) -> LinuxNetClient: + """ + Create and return network command client to run within context of the node. + + :return: network command client + """ + return get_net_client(self.session.use_ovs(), self.cmd) + @classmethod def create_options(cls) -> NodeOptions: return NodeOptions() @@ -388,14 +390,14 @@ def __init__( """ Create a CoreNodeBase instance. - :param session: CORE session object - :param _id: object id - :param name: object name + :param session: session owning this node + :param _id: id of this node + :param name: name of this node :param server: remote server node will run on, default is None for localhost """ super().__init__(session, _id, name, server, options) - self.config_services: dict[str, "ConfigService"] = {} + self.services: dict[str, "CoreService"] = {} self.directory: Optional[Path] = None self.tmpnodedir: bool = False @@ -469,17 +471,17 @@ def host_path(self, path: Path, is_dir: bool = False) -> Path: directory = str(path.parent).strip("/").replace("/", ".") return self.directory / directory / path.name - def add_config_service(self, service_class: "ConfigServiceType") -> None: + def add_service(self, service_class: "ServiceType") -> None: """ - Adds a configuration service to the node. + Adds a service to the node. - :param service_class: configuration service class to assign to node + :param service_class: service class to assign to node :return: nothing """ name = service_class.name - if name in self.config_services: + if name in self.services: raise CoreError(f"node({self.name}) already has service({name})") - self.config_services[name] = service_class(self) + self.services[name] = service_class(self) def set_service_config(self, name: str, data: dict[str, str]) -> None: """ @@ -489,30 +491,30 @@ def set_service_config(self, name: str, data: dict[str, str]) -> None: :param data: custom config data to set :return: nothing """ - service = self.config_services.get(name) + service = self.services.get(name) if service is None: raise CoreError(f"node({self.name}) does not have service({name})") service.set_config(data) - def start_config_services(self) -> None: + def start_services(self) -> None: """ - Determines startup paths and starts configuration services, based on their + Determines startup paths and starts services, based on their dependency chains. :return: nothing """ - startup_paths = ConfigServiceDependencies(self.config_services).startup_paths() + startup_paths = ServiceDependencies(self.services).startup_paths() for startup_path in startup_paths: for service in startup_path: service.start() - def stop_config_services(self) -> None: + def stop_services(self) -> None: """ - Stop all configuration services. + Stop all services. :return: nothing """ - for service in self.config_services.values(): + for service in self.services.values(): service.stop() def makenodedir(self) -> None: @@ -584,37 +586,33 @@ def __init__( self.ctrlchnlname: Path = self.session.directory / self.name self.pid: Optional[int] = None self._mounts: list[tuple[Path, Path]] = [] - self.node_net_client: LinuxNetClient = self.create_node_net_client( - self.session.use_ovs() - ) options = options or CoreNodeOptions() self.model: Optional[str] = options.model - # setup services - if options.legacy or options.services: - logger.debug("set node type: %s", self.model) - self.session.services.add_services(self, self.model, options.services) - # add config services - config_services = options.config_services - if not options.legacy and not config_services and not options.services: - config_services = self.session.services.default_services.get(self.model, []) - logger.info("setting node config services: %s", config_services) - for name in config_services: + # add services + services = options.services + if not services: + services = self.session.service_manager.defaults.get(self.model, []) + logger.info( + "setting node(%s) model(%s) services: %s", + self.name, + self.model, + services, + ) + for name in services: service_class = self.session.service_manager.get_service(name) - self.add_config_service(service_class) - - @classmethod - def create_options(cls) -> CoreNodeOptions: - return CoreNodeOptions() + self.add_service(service_class) - def create_node_net_client(self, use_ovs: bool) -> LinuxNetClient: + def _get_node_net_client(self) -> LinuxNetClient: """ - Create node network client for running network commands within the nodes - container. + Create and return network command client to run within context of the node. - :param use_ovs: True for OVS bridges, False for Linux bridges - :return: node network client + :return: network command client """ - return get_net_client(use_ovs, self.cmd) + return get_net_client(self.session.use_ovs(), self.net_cmd) + + @classmethod + def create_options(cls) -> CoreNodeOptions: + return CoreNodeOptions() def alive(self) -> bool: """ @@ -658,7 +656,8 @@ def startup(self) -> None: self.node_net_client.device_up("lo") # set hostname for node logger.debug("setting hostname: %s", self.name) - self.node_net_client.set_hostname(self.name) + hostname = self.name.replace("_", "-") + self.cmd(f"hostname {hostname}") # mark node as up self.up = True # create private directories @@ -667,7 +666,7 @@ def startup(self) -> None: def shutdown(self) -> None: """ - Shutdown logic for simple lxc nodes. + Shutdown logic for nodes. :return: nothing """ @@ -733,6 +732,33 @@ def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: else: return self.server.remote_cmd(args, wait=wait) + def create_net_cmd(self, args: str, shell: bool = False) -> str: + """ + Create command used to run network commands within the context of a node. + + :param args: command arguments + :param shell: True to run shell like, False otherwise + :return: node command + """ + return self.create_cmd(args, shell) + + def net_cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: + """ + Runs a command that is used to configure and setup the network within a + node. + + :param args: command to run + :param wait: True to wait for status, False otherwise + :param shell: True to use shell, False otherwise + :return: combined stdout and stderr + :raises CoreCommandError: when a non-zero exit status occurs + """ + args = self.create_net_cmd(args, shell) + if self.server is None: + return utils.cmd(args, wait=wait, shell=shell) + else: + return self.server.remote_cmd(args, wait=wait) + def path_exists(self, path: str) -> bool: """ Determines if a file or directory path exists. @@ -889,7 +915,8 @@ def adopt_iface(self, iface: CoreInterface, name: str) -> None: self.node_net_client.device_name(iface.name, name) iface.name = name # turn checksums off - self.node_net_client.checksums_off(iface.name) + if self.session.options.get_int("checksums", 0) == 0: + self.node_net_client.checksums_off(iface.name) # retrieve flow id for container iface.flow_id = self.node_net_client.get_ifindex(iface.name) logger.debug("interface flow index: %s - %s", iface.name, iface.flow_id) diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index ad05c4070..7c17973cf 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -1,10 +1,13 @@ import json import logging +import os import shlex from dataclasses import dataclass, field from pathlib import Path from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional + +from mako.template import Template from core import utils from core.emulator.distributed import DistributedServer @@ -18,6 +21,7 @@ from core.emulator.session import Session DOCKER: str = "docker" +DOCKER_COMPOSE: str = os.environ.get("DOCKER_COMPOSE", "docker compose") @dataclass @@ -33,6 +37,14 @@ class DockerOptions(CoreNodeOptions): unique is True for node unique volume naming delete is True for deleting volume mount during shutdown """ + compose: str = None + """ + Path to a compose file, if one should be used for this node. + """ + compose_name: str = None + """ + Service name to start, within the provided compose file. + """ @dataclass @@ -75,6 +87,8 @@ def __init__( options = options or DockerOptions() super().__init__(session, _id, name, server, options) self.image: str = options.image + self.compose: Optional[str] = options.compose + self.compose_name: Optional[str] = options.compose_name self.binds: list[tuple[str, str]] = options.binds self.volumes: dict[str, DockerVolume] = {} self.env: dict[str, str] = {} @@ -101,9 +115,56 @@ def create_cmd(self, args: str, shell: bool = False) -> str: """ if shell: args = f"{BASH} -c {shlex.quote(args)}" - return f"nsenter -t {self.pid} -m -u -i -p -n -- {args}" + return f"{DOCKER} exec {self.name} {args}" def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: + """ + Runs a command within the context of the Docker node. + + :param args: command to run + :param wait: True to wait for status, False otherwise + :param shell: True to use shell, False otherwise + :return: combined stdout and stderr + :raises CoreCommandError: when a non-zero exit status occurs + """ + args = self.create_cmd(args, shell) + if self.server is None: + return utils.cmd(args, wait=wait, shell=shell, env=self.env) + else: + return self.server.remote_cmd(args, wait=wait, env=self.env) + + def cmd_perf(self, args: str, wait: bool = True, shell: bool = False) -> str: + """ + Runs a command within the Docker node using nsenter to avoid + client/server overhead. + + :param args: command to run + :param wait: True to wait for status, False otherwise + :param shell: True to use shell, False otherwise + :return: combined stdout and stderr + :raises CoreCommandError: when a non-zero exit status occurs + """ + if shell: + args = f"{BASH} -c {shlex.quote(args)}" + args = f"nsenter -t {self.pid} -m -u -i -p -n -- {args}" + if self.server is None: + return utils.cmd(args, wait=wait, shell=shell, env=self.env) + else: + return self.server.remote_cmd(args, wait=wait, env=self.env) + + def create_net_cmd(self, args: str, shell: bool = False) -> str: + """ + Create command used to run network commands within the context of a node. + + :param args: command arguments + :param shell: True to run shell like, False otherwise + :return: node command + """ + if shell: + args = f"{BASH} -c {shlex.quote(args)}" + return f"nsenter -t {self.pid} -n -- {args}" + + def net_cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: """ Runs a command that is used to configure and setup the network within a node. @@ -114,7 +175,7 @@ def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: :return: combined stdout and stderr :raises CoreCommandError: when a non-zero exit status occurs """ - args = self.create_cmd(args, shell) + args = self.create_net_cmd(args, shell) if self.server is None: return utils.cmd(args, wait=wait, shell=shell, env=self.env) else: @@ -154,24 +215,52 @@ def startup(self) -> None: raise CoreError(f"starting node({self.name}) that is already up") # create node directory self.makenodedir() - # setup commands for creating bind/volume mounts - binds = "" - for src, dst in self.binds: - binds += f"--mount type=bind,source={src},target={dst} " - volumes = "" - for volume in self.volumes.values(): - volumes += ( - f"--mount type=volume," f"source={volume.src},target={volume.dst} " - ) - # normalize hostname hostname = self.name.replace("_", "-") - # create container and retrieve the created containers PID - self.host_cmd( - f"{DOCKER} run -td --init --net=none --hostname {hostname} " - f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " - f"{binds} {volumes} " - f"--privileged {self.image} tail -f /dev/null" - ) + if self.compose: + if not self.compose_name: + raise CoreError( + "a compose name is required when using a compose file" + ) + compose_path = os.path.expandvars(self.compose) + data = self.host_cmd(f"cat {compose_path}") + template = Template(data) + rendered = template.render_unicode(node=self, hostname=hostname) + rendered = rendered.replace('"', r"\"") + rendered = "\\n".join(rendered.splitlines()) + compose_path = self.directory / "docker-compose.yml" + self.host_cmd(f'printf "{rendered}" >> {compose_path}', shell=True) + self.host_cmd( + f"{DOCKER_COMPOSE} up -d {self.compose_name}", + cwd=self.directory, + ) + else: + # setup commands for creating bind/volume mounts + binds = "" + for src, dst in self.binds: + binds += f"--mount type=bind,source={src},target={dst} " + volumes = "" + for volume in self.volumes.values(): + volumes += ( + f"--mount type=volume," + f"source={volume.src},target={volume.dst} " + ) + # create container and retrieve the created containers PID + self.host_cmd( + f"{DOCKER} run -td --init --net=none --hostname {hostname} " + f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " + f"{binds} {volumes} " + f"--privileged {self.image} tail -f /dev/null" + ) + # setup symlinks for bind and volume mounts within + for src, dst in self.binds: + link_path = self.host_path(Path(dst), True) + self.host_cmd(f"ln -s {src} {link_path}") + for volume in self.volumes.values(): + volume.path = self.host_cmd( + f"{DOCKER} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" + ) + link_path = self.host_path(Path(volume.dst), True) + self.host_cmd(f"ln -s {volume.path} {link_path}") # retrieve pid and process environment for use in nsenter commands self.pid = self.host_cmd( f"{DOCKER} inspect -f '{{{{.State.Pid}}}}' {self.name}" @@ -182,16 +271,6 @@ def startup(self) -> None: continue key, value = line.split("=") self.env[key] = value - # setup symlinks for bind and volume mounts within - for src, dst in self.binds: - link_path = self.host_path(Path(dst), True) - self.host_cmd(f"ln -s {src} {link_path}") - for volume in self.volumes.values(): - volume.path = self.host_cmd( - f"{DOCKER} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" - ) - link_path = self.host_path(Path(volume.dst), True) - self.host_cmd(f"ln -s {volume.path} {link_path}") logger.debug("node(%s) pid: %s", self.name, self.pid) self.up = True diff --git a/daemon/core/nodes/interface.py b/daemon/core/nodes/interface.py index 294e85f9b..2b125f1fb 100644 --- a/daemon/core/nodes/interface.py +++ b/daemon/core/nodes/interface.py @@ -293,13 +293,31 @@ def is_virtual(self) -> bool: """ return self.transport_type == TransportType.VIRTUAL + def update_options(self, options: LinkOptions) -> None: + """ + Update the current link options, if a change occurred and the interface + is up, update the running interface. + + :param options: link options to update with + :return: nothing + """ + changed = self.options.update(options) + if self.up and changed: + self.set_config() + def set_config(self) -> None: + """ + Clears current link options if previously set and now empty. + Otherwise, updates to the current link option values. + + :return: nothing + """ # clear current settings if self.options.is_clear(): if self.has_netem: cmd = tc_clear_cmd(self.name) if self.node: - self.node.cmd(cmd) + self.node.node_net_client.run(cmd) else: self.host_cmd(cmd) self.has_netem = False @@ -307,7 +325,7 @@ def set_config(self) -> None: else: cmd = tc_cmd(self.name, self.options, self.mtu) if self.node: - self.node.cmd(cmd) + self.node.node_net_client.run(cmd) else: self.host_cmd(cmd) self.has_netem = True diff --git a/daemon/core/nodes/lxd.py b/daemon/core/nodes/lxd.py deleted file mode 100644 index e4cba0023..000000000 --- a/daemon/core/nodes/lxd.py +++ /dev/null @@ -1,221 +0,0 @@ -import json -import logging -import shlex -import time -from dataclasses import dataclass, field -from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING - -from core.emulator.data import InterfaceData, LinkOptions -from core.emulator.distributed import DistributedServer -from core.errors import CoreCommandError -from core.executables import BASH -from core.nodes.base import CoreNode, CoreNodeOptions -from core.nodes.interface import CoreInterface - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - - -@dataclass -class LxcOptions(CoreNodeOptions): - image: str = "ubuntu" - """image used when creating container""" - binds: list[tuple[str, str]] = field(default_factory=list) - """bind mount source and destinations to setup within container""" - volumes: list[tuple[str, str, bool, bool]] = field(default_factory=list) - """ - volume mount source, destination, unique, delete to setup within container - - unique is True for node unique volume naming - delete is True for deleting volume mount during shutdown - """ - - -class LxcNode(CoreNode): - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: LxcOptions = None, - ) -> None: - """ - Create a LxcNode instance. - - :param session: core session instance - :param _id: object id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - :param options: option to create node with - """ - options = options or LxcOptions() - super().__init__(session, _id, name, server, options) - self.image: str = options.image - - @classmethod - def create_options(cls) -> LxcOptions: - return LxcOptions() - - def create_cmd(self, args: str, shell: bool = False) -> str: - """ - Create command used to run commands within the context of a node. - - :param args: command arguments - :param shell: True to run shell like, False otherwise - :return: node command - """ - if shell: - args = f"{BASH} -c {shlex.quote(args)}" - return f"nsenter -t {self.pid} -m -u -i -p -n {args}" - - def _get_info(self) -> dict: - args = f"lxc list {self.name} --format json" - output = self.host_cmd(args) - data = json.loads(output) - if not data: - raise CoreCommandError(1, args, f"LXC({self.name}) not present") - return data[0] - - def alive(self) -> bool: - """ - Check if the node is alive. - - :return: True if node is alive, False otherwise - """ - try: - data = self._get_info() - return data["state"]["status"] == "Running" - except CoreCommandError: - return False - - def startup(self) -> None: - """ - Startup logic. - - :return: nothing - """ - with self.lock: - if self.up: - raise ValueError("starting a node that is already up") - self.makenodedir() - self.host_cmd(f"lxc launch {self.image} {self.name}") - data = self._get_info() - self.pid = data["state"]["pid"] - self.up = True - - def shutdown(self) -> None: - """ - Shutdown logic. - - :return: nothing - """ - # nothing to do if node is not up - if not self.up: - return - with self.lock: - self.ifaces.clear() - self.host_cmd(f"lxc delete --force {self.name}") - self.up = False - - def termcmdstring(self, sh: str = "/bin/sh") -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - terminal = f"lxc exec {self.name} -- {sh}" - if self.server is None: - return terminal - else: - return f"ssh -X -f {self.server.host} xterm -e {terminal}" - - def create_dir(self, dir_path: Path) -> None: - """ - Create a private directory. - - :param dir_path: path to create - :return: nothing - """ - logger.info("creating node dir: %s", dir_path) - args = f"mkdir -p {dir_path}" - self.cmd(args) - - def mount(self, src_path: Path, target_path: Path) -> None: - """ - Create and mount a directory. - - :param src_path: source directory to mount - :param target_path: target directory to create - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - logger.debug("mounting source(%s) target(%s)", src_path, target_path) - raise Exception("not supported") - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - """ - Create a node file with a given mode. - - :param file_path: name of file to create - :param contents: contents of file - :param mode: mode for file - :return: nothing - """ - logger.debug("node(%s) create file(%s) mode(%o)", self.name, file_path, mode) - temp = NamedTemporaryFile(delete=False) - temp.write(contents.encode()) - temp.close() - temp_path = Path(temp.name) - directory = file_path.parent - if str(directory) != ".": - self.cmd(f"mkdir -m {0o755:o} -p {directory}") - if self.server is not None: - self.server.remote_put(temp_path, temp_path) - if not str(file_path).startswith("/"): - file_path = Path("/root/") / file_path - self.host_cmd(f"lxc file push {temp_path} {self.name}/{file_path}") - self.cmd(f"chmod {mode:o} {file_path}") - if self.server is not None: - self.host_cmd(f"rm -f {temp_path}") - temp_path.unlink() - logger.debug("node(%s) added file: %s; mode: 0%o", self.name, file_path, mode) - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - """ - Copy a file to a node, following symlinks and preserving metadata. - Change file mode if specified. - - :param dst_path: file name to copy file to - :param src_path: file to copy - :param mode: mode to copy to - :return: nothing - """ - logger.info( - "node file copy file(%s) source(%s) mode(%o)", dst_path, src_path, mode or 0 - ) - self.cmd(f"mkdir -p {dst_path.parent}") - if self.server: - temp = NamedTemporaryFile(delete=False) - temp_path = Path(temp.name) - src_path = temp_path - self.server.remote_put(src_path, temp_path) - if not str(dst_path).startswith("/"): - dst_path = Path("/root/") / dst_path - self.host_cmd(f"lxc file push {src_path} {self.name}/{dst_path}") - if mode is not None: - self.cmd(f"chmod {mode:o} {dst_path}") - - def create_iface( - self, iface_data: InterfaceData = None, options: LinkOptions = None - ) -> CoreInterface: - iface = super().create_iface(iface_data, options) - # adding small delay to allow time for adding addresses to work correctly - time.sleep(0.5) - return iface diff --git a/daemon/core/nodes/netclient.py b/daemon/core/nodes/netclient.py index 74087e315..593b4761b 100644 --- a/daemon/core/nodes/netclient.py +++ b/daemon/core/nodes/netclient.py @@ -18,20 +18,10 @@ def __init__(self, run: Callable[..., str]) -> None: """ Create LinuxNetClient instance. - :param run: function to run commands with + :param run: function to run commands within node context """ self.run: Callable[..., str] = run - def set_hostname(self, name: str) -> None: - """ - Set network hostname. - - :param name: name for hostname - :return: nothing - """ - name = name.replace("_", "-") - self.run(f"hostname {name}") - def create_route(self, route: str, device: str) -> None: """ Create a new route for a device. @@ -88,15 +78,6 @@ def address_show(self, device: str) -> str: """ return self.run(f"{IP} address show {device}") - def get_mac(self, device: str) -> str: - """ - Retrieve MAC address for a given device. - - :param device: device to get mac for - :return: MAC address - """ - return self.run(f"cat /sys/class/net/{device}/address") - def get_ifindex(self, device: str) -> int: """ Retrieve ifindex for a given device. @@ -104,7 +85,8 @@ def get_ifindex(self, device: str) -> int: :param device: device to get ifindex for :return: ifindex """ - return int(self.run(f"cat /sys/class/net/{device}/ifindex")) + output = self.run(f"{IP} link show {device}") + return int(output.split()[0].strip(":")) def device_ns(self, device: str, namespace: str) -> None: """ @@ -390,7 +372,7 @@ def get_net_client(use_ovs: bool, run: Callable[..., str]) -> LinuxNetClient: Retrieve desired net client for running network commands. :param use_ovs: True for OVS bridges, False for Linux bridges - :param run: function used to run net client commands + :param run: function to run commands within node context :return: net client class """ if use_ovs: diff --git a/daemon/core/nodes/network.py b/daemon/core/nodes/network.py index 1ea9c31e1..e09a1045d 100644 --- a/daemon/core/nodes/network.py +++ b/daemon/core/nodes/network.py @@ -215,8 +215,8 @@ def __init__( options = options or NetworkOptions() super().__init__(session, _id, name, server, options) self.policy: NetworkPolicy = options.policy if options.policy else self.policy - sessionid = self.session.short_session_id() - self.brname: str = f"b.{self.id}.{sessionid}" + session_id = self.session.short_session_id() + self.brname: str = f"b.{self.id}.{session_id}" self.has_nftables_chain: bool = False @classmethod @@ -510,14 +510,6 @@ class CtrlNet(CoreNetwork): """ policy: NetworkPolicy = NetworkPolicy.ACCEPT - # base control interface index - CTRLIF_IDX_BASE: int = 99 - DEFAULT_PREFIX_LIST: list[str] = [ - "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", - "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", - "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", - "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24", - ] def __init__( self, @@ -544,6 +536,7 @@ def __init__( self.assign_address: bool = options.assign_address self.updown_script: Optional[str] = options.updown_script self.serverintf: Optional[str] = options.serverintf + self.brname = f"ctrl{_id}.{self.session.short_session_id()}" @classmethod def create_options(cls) -> CtrlNetOptions: @@ -636,6 +629,11 @@ class PtpNet(CoreNetwork): policy: NetworkPolicy = NetworkPolicy.ACCEPT + def __init__(self, session: "Session", _id: int) -> None: + super().__init__(session, _id) + session_id = self.session.short_session_id() + self.brname: str = f"p.{self.id}.{session_id}" + def attach(self, iface: CoreInterface) -> None: """ Attach a network interface, but limit attachment to two interfaces. diff --git a/daemon/core/nodes/physical.py b/daemon/core/nodes/physical.py index 30640fd8e..b7f546334 100644 --- a/daemon/core/nodes/physical.py +++ b/daemon/core/nodes/physical.py @@ -263,7 +263,8 @@ def adopt_iface(self, iface: CoreInterface, name: str) -> None: if iface_id == -1: raise CoreError(f"adopting unknown iface({iface.name})") # turn checksums off - self.node_net_client.checksums_off(iface.name) + if self.session.options.get_int("checksums", 0) == 0: + self.node_net_client.checksums_off(iface.name) # retrieve flow id for container iface.flow_id = self.node_net_client.get_ifindex(iface.name) logger.debug("interface flow index: %s - %s", iface.name, iface.flow_id) diff --git a/daemon/core/nodes/podman.py b/daemon/core/nodes/podman.py index 00ef24fc3..67aba279a 100644 --- a/daemon/core/nodes/podman.py +++ b/daemon/core/nodes/podman.py @@ -1,10 +1,13 @@ import json import logging +import os import shlex from dataclasses import dataclass, field from pathlib import Path from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional + +from mako.template import Template from core.emulator.distributed import DistributedServer from core.errors import CoreCommandError, CoreError @@ -17,6 +20,7 @@ from core.emulator.session import Session PODMAN: str = "podman" +PODMAN_COMPOSE: str = "podman-compose" @dataclass @@ -32,6 +36,14 @@ class PodmanOptions(CoreNodeOptions): unique is True for node unique volume naming delete is True for deleting volume mount during shutdown """ + compose: str = None + """ + Path to a compose file, if one should be used for this node. + """ + compose_name: str = None + """ + Service name to start, within the provided compose file. + """ @dataclass @@ -74,6 +86,8 @@ def __init__( options = options or PodmanOptions() super().__init__(session, _id, name, server, options) self.image: str = options.image + self.compose: Optional[str] = options.compose + self.compose_name: Optional[str] = options.compose_name self.binds: list[tuple[str, str]] = options.binds self.volumes: dict[str, VolumeMount] = {} for src, dst, unique, delete in options.volumes: @@ -101,6 +115,18 @@ def create_cmd(self, args: str, shell: bool = False) -> str: args = f"{BASH} -c {shlex.quote(args)}" return f"{PODMAN} exec {self.name} {args}" + def create_net_cmd(self, args: str, shell: bool = False) -> str: + """ + Create command used to run network commands within the context of a node. + + :param args: command arguments + :param shell: True to run shell like, False otherwise + :return: node command + """ + if shell: + args = f"{BASH} -c {shlex.quote(args)}" + return f"nsenter -t {self.pid} -n -- {args}" + def _unique_name(self, name: str) -> str: """ Creates a session/node unique prefixed name for the provided input. @@ -135,38 +161,56 @@ def startup(self) -> None: raise CoreError(f"starting node({self.name}) that is already up") # create node directory self.makenodedir() - # setup commands for creating bind/volume mounts - binds = "" - for src, dst in self.binds: - binds += f"--mount type=bind,source={src},target={dst} " - volumes = "" - for volume in self.volumes.values(): - volumes += ( - f"--mount type=volume," f"source={volume.src},target={volume.dst} " - ) - # normalize hostname hostname = self.name.replace("_", "-") - # create container and retrieve the created containers PID - self.host_cmd( - f"{PODMAN} run -td --init --net=none --hostname {hostname} " - f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " - f"{binds} {volumes} " - f"--privileged {self.image} tail -f /dev/null" - ) - # retrieve pid and process environment for use in nsenter commands + if self.compose: + if not self.compose_name: + raise CoreError( + "a compose name is required when using a compose file" + ) + compose_path = os.path.expandvars(self.compose) + data = self.host_cmd(f"cat {compose_path}") + template = Template(data) + rendered = template.render_unicode(node=self, hostname=hostname) + rendered = rendered.replace('"', r"\"") + rendered = "\\n".join(rendered.splitlines()) + compose_path = self.directory / "podman-compose.yml" + self.host_cmd(f'printf "{rendered}" >> {compose_path}', shell=True) + self.host_cmd( + f"{PODMAN_COMPOSE} up -d {self.compose_name}", cwd=self.directory + ) + else: + # setup commands for creating bind/volume mounts + binds = "" + for src, dst in self.binds: + binds += f"--mount type=bind,source={src},target={dst} " + volumes = "" + for volume in self.volumes.values(): + volumes += ( + f"--mount type=volume," + f"source={volume.src},target={volume.dst} " + ) + # normalize hostname + hostname = self.name.replace("_", "-") + # create container and retrieve the created containers PID + self.host_cmd( + f"{PODMAN} run -td --init --net=none --hostname {hostname} " + f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " + f"{binds} {volumes} " + f"--privileged {self.image} tail -f /dev/null" + ) + # setup symlinks for bind and volume mounts within + for src, dst in self.binds: + link_path = self.host_path(Path(dst), True) + self.host_cmd(f"ln -s {src} {link_path}") + for volume in self.volumes.values(): + volume.path = self.host_cmd( + f"{PODMAN} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" + ) + link_path = self.host_path(Path(volume.dst), True) + self.host_cmd(f"ln -s {volume.path} {link_path}") self.pid = self.host_cmd( f"{PODMAN} inspect -f '{{{{.State.Pid}}}}' {self.name}" ) - # setup symlinks for bind and volume mounts within - for src, dst in self.binds: - link_path = self.host_path(Path(dst), True) - self.host_cmd(f"ln -s {src} {link_path}") - for volume in self.volumes.values(): - volume.path = self.host_cmd( - f"{PODMAN} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" - ) - link_path = self.host_path(Path(volume.dst), True) - self.host_cmd(f"ln -s {volume.path} {link_path}") logger.debug("node(%s) pid: %s", self.name, self.pid) self.up = True @@ -181,10 +225,13 @@ def shutdown(self) -> None: return with self.lock: self.ifaces.clear() - self.host_cmd(f"{PODMAN} rm -f {self.name}") - for volume in self.volumes.values(): - if volume.delete: - self.host_cmd(f"{PODMAN} volume rm {volume.src}") + if self.compose: + self.host_cmd(f"{PODMAN_COMPOSE} down -t 0", cwd=self.directory) + else: + self.host_cmd(f"{PODMAN} rm -f {self.name}") + for volume in self.volumes.values(): + if volume.delete: + self.host_cmd(f"{PODMAN} volume rm {volume.src}") self.up = False def termcmdstring(self, sh: str = "/bin/sh") -> str: diff --git a/daemon/core/nodes/wireless.py b/daemon/core/nodes/wireless.py index 51a98917c..dacaf6f2f 100644 --- a/daemon/core/nodes/wireless.py +++ b/daemon/core/nodes/wireless.py @@ -243,13 +243,11 @@ def link_config( raise CoreError(f"invalid node links node1({node1_id}) node2({node2_id})") iface = link.iface has_netem = iface.has_netem - iface.options.update(options1) - iface.set_config() + iface.update_options(options1) name, localname = iface.name, iface.localname iface.name, iface.localname = localname, name - iface.options.update(options2) iface.has_netem = has_netem - iface.set_config() + iface.update_options(options2) iface.name, iface.localname = name, localname if options1 == options2: link.label = f"{options1.loss:.2f}%/{options1.delay}us" diff --git a/daemon/core/plugins/sdt.py b/daemon/core/plugins/sdt.py index f963c817b..1a0a57010 100644 --- a/daemon/core/plugins/sdt.py +++ b/daemon/core/plugins/sdt.py @@ -91,8 +91,8 @@ def __init__(self, session: "Session") -> None: self.address: Optional[tuple[Optional[str], Optional[int]]] = None self.protocol: Optional[str] = None self.network_layers: set[str] = set() - self.session.node_handlers.append(self.handle_node_update) - self.session.link_handlers.append(self.handle_link_update) + self.session.broadcast_manager.add_handler(NodeData, self.handle_node_update) + self.session.broadcast_manager.add_handler(LinkData, self.handle_link_update) def is_enabled(self) -> bool: """ diff --git a/daemon/core/scripts/cleanup.py b/daemon/core/scripts/cleanup.py index 1ab4647ea..338ed6d2c 100755 --- a/daemon/core/scripts/cleanup.py +++ b/daemon/core/scripts/cleanup.py @@ -71,6 +71,7 @@ def cleanup_interfaces() -> None: or name.startswith("beth") or name.startswith("gt.") or name.startswith("b.") + or name.startswith("p.") or name.startswith("ctrl") ): name = name.split("@")[0] diff --git a/daemon/core/scripts/daemon.py b/daemon/core/scripts/daemon.py index 6b9caa54e..f54a77bd0 100755 --- a/daemon/core/scripts/daemon.py +++ b/daemon/core/scripts/daemon.py @@ -5,20 +5,53 @@ """ import argparse +import json import logging -import os +import logging.config import time from configparser import ConfigParser from pathlib import Path from core import constants from core.api.grpc.server import CoreGrpcServer -from core.constants import CORE_CONF_DIR, COREDPY_VERSION +from core.constants import COREDPY_VERSION from core.emulator.coreemu import CoreEmu -from core.utils import load_logging_config logger = logging.getLogger(__name__) +DEFAULT_GRPC_PORT: str = "50051" +DEFAULT_GRPC_ADDRESS: str = "localhost" +DEFAULT_LOG_CONFIG: Path = constants.CORE_CONF_DIR / "logging.conf" +DEFAULT_CORE_CONFIG: Path = constants.CORE_CONF_DIR / "core.conf" + + +def file_path(value: str) -> Path: + """ + Checks value for being a valid file path. + + :param value: file path to check + :return: valid file path + """ + path = Path(value) + if not path.is_file(): + raise argparse.ArgumentTypeError(f"{path} does not exist") + return path + + +def load_logging_config(config_path: Path, debug: bool) -> None: + """ + Load CORE logging configuration file. + + :param config_path: path to logging config file + :param debug: enable debug logging + :return: nothing + """ + with config_path.open("r") as f: + log_config = json.load(f) + if debug: + log_config["loggers"]["core"]["level"] = "DEBUG" + logging.config.dictConfig(log_config) + def banner(): """ @@ -45,60 +78,19 @@ def cored(cfg): grpc_server.listen(grpc_address) -def get_merged_config(filename): +def get_merged_config(args: argparse.Namespace) -> dict[str, str]: """ Return a configuration after merging config file and command-line arguments. - :param str filename: file name to merge configuration settings with + :param args: command line arguments :return: merged configuration :rtype: dict """ # these are the defaults used in the config file - default_log = os.path.join(constants.CORE_CONF_DIR, "logging.conf") - default_grpc_port = "50051" - default_address = "localhost" - defaults = { - "grpcport": default_grpc_port, - "grpcaddress": default_address, - "logfile": default_log, - } - parser = argparse.ArgumentParser( - description=f"CORE daemon v.{COREDPY_VERSION} instantiates Linux network namespace nodes." - ) - parser.add_argument( - "-f", - "--configfile", - dest="configfile", - help=f"read config from specified file; default = {filename}", - ) - parser.add_argument( - "--ovs", - action="store_true", - help="enable experimental ovs mode, default is false", - ) - parser.add_argument( - "--grpc-port", - dest="grpcport", - help=f"grpc port to listen on; default {default_grpc_port}", - ) - parser.add_argument( - "--grpc-address", - dest="grpcaddress", - help=f"grpc address to listen on; default {default_address}", - ) - parser.add_argument( - "-l", "--logfile", help=f"core logging configuration; default {default_log}" - ) - # parse command line options - args = parser.parse_args() - # convert ovs to internal format - args.ovs = "1" if args.ovs else "0" + defaults = dict(logfile=args.log_config) # read the config file - if args.configfile is not None: - filename = args.configfile - del args.configfile cfg = ConfigParser(defaults) - cfg.read(filename) + cfg.read(args.config) section = "core-daemon" if not cfg.has_section(section): cfg.add_section(section) @@ -116,9 +108,57 @@ def main(): :return: nothing """ - cfg = get_merged_config(f"{CORE_CONF_DIR}/core.conf") + # parse arguments + parser = argparse.ArgumentParser( + description=f"CORE daemon v.{COREDPY_VERSION}", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "-d", + "--debug", + action="store_true", + help="convenience for quickly enabling DEBUG logging", + ) + parser.add_argument( + "-c", + "--config", + type=file_path, + default=DEFAULT_CORE_CONFIG, + help="CORE configuration file", + ) + parser.add_argument( + "-l", + "--log-config", + type=file_path, + default=DEFAULT_LOG_CONFIG, + help="CORE logging configuration file", + ) + parser.add_argument( + "--grpc-port", + dest="grpcport", + help="override grpc port to listen on", + ) + parser.add_argument( + "--grpc-address", + dest="grpcaddress", + help="override grpc address to listen on", + ) + parser.add_argument( + "--ovs", + action="store_true", + help="enable experimental ovs mode", + ) + args = parser.parse_args() + # convert ovs to internal format + args.ovs = "1" if args.ovs else "0" + # validate files exist + if not args.log_config.is_file(): + raise FileNotFoundError(f"{args.log_config} does not exist") + if not args.config.is_file(): + raise FileNotFoundError(f"{args.config} does not exist") + cfg = get_merged_config(args) log_config_path = Path(cfg["logfile"]) - load_logging_config(log_config_path) + load_logging_config(log_config_path, args.debug) banner() try: cored(cfg) diff --git a/daemon/core/scripts/gui.py b/daemon/core/scripts/gui.py index 9c0560b20..0aa558e06 100755 --- a/daemon/core/scripts/gui.py +++ b/daemon/core/scripts/gui.py @@ -2,6 +2,7 @@ import logging from logging.handlers import TimedRotatingFileHandler +from core import utils from core.gui import appconfig, images from core.gui.app import Application @@ -40,6 +41,10 @@ def main() -> None: ) logging.getLogger("PIL").setLevel(logging.ERROR) + # enable xhost for root + if utils.which("xhost", False): + utils.cmd("xhost +SI:localuser:root") + # start app images.load_all() app = Application(args.proxy, args.session) diff --git a/daemon/core/configservice/base.py b/daemon/core/services/base.py similarity index 81% rename from daemon/core/configservice/base.py rename to daemon/core/services/base.py index e15260eb2..d37d6bcca 100644 --- a/daemon/core/configservice/base.py +++ b/daemon/core/services/base.py @@ -33,17 +33,17 @@ def get_template_path(file_path: Path) -> str: return template_path -class ConfigServiceMode(enum.Enum): +class ServiceMode(enum.Enum): BLOCKING = 0 NON_BLOCKING = 1 TIMER = 2 -class ConfigServiceBootError(Exception): +class ServiceBootError(Exception): pass -class ConfigServiceTemplateError(Exception): +class ServiceTemplateError(Exception): pass @@ -55,23 +55,45 @@ class ShadowDir: has_node_paths: bool = False -class ConfigService(abc.ABC): +class CoreService(abc.ABC): """ - Base class for creating configurable services. + Base class for creating services. """ + # globally unique name for service + name: Optional[str] = None + # group to categorize service within + group: Optional[str] = None + # directories to create unique mount points for + directories: list[str] = [] + # files to create for service + files: list[str] = [] + # configurable values that this service can use, for file generation + default_configs: list[Configuration] = [] + # executables that should exist on path, that this service depends on + executables: list[str] = [] + # other services that this service depends on, defines service start order + dependencies: list[str] = [] + # commands to run to start this service + startup: list[str] = [] + # commands to run to validate this service + validate: list[str] = [] + # commands to run to stop this service + shutdown: list[str] = [] + # validation mode, blocking, non-blocking, and timer + validation_mode: ServiceMode = ServiceMode.BLOCKING + # predefined configuration value groupings + modes: dict[str, dict[str, str]] = {} # validation period in seconds, how frequent validation is attempted validation_period: float = 0.5 - # time to wait in seconds for determining if service started successfully validation_timer: int = 5 - # directories to shadow and copy files from shadow_directories: list[ShadowDir] = [] def __init__(self, node: CoreNode) -> None: """ - Create ConfigService instance. + Create Service instance. :param node: node this service is assigned to """ @@ -96,82 +118,22 @@ def clean_text(text: str) -> str: """ return inspect.cleandoc(text) - @property - @abc.abstractmethod - def name(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def group(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def directories(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def files(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def default_configs(self) -> list[Configuration]: - raise NotImplementedError - - @property - @abc.abstractmethod - def modes(self) -> dict[str, dict[str, str]]: - raise NotImplementedError - - @property - @abc.abstractmethod - def executables(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def dependencies(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def startup(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def validate(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def shutdown(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def validation_mode(self) -> ConfigServiceMode: - raise NotImplementedError - def start(self) -> None: """ Creates services files/directories, runs startup, and validates based on validation mode. :return: nothing - :raises ConfigServiceBootError: when there is an error starting service + :raises ServiceBootError: when there is an error starting service """ logger.info("node(%s) service(%s) starting...", self.node.name, self.name) self.create_shadow_dirs() self.create_dirs() self.create_files() - wait = self.validation_mode == ConfigServiceMode.BLOCKING + wait = self.validation_mode == ServiceMode.BLOCKING self.run_startup(wait) if not wait: - if self.validation_mode == ConfigServiceMode.TIMER: + if self.validation_mode == ServiceMode.TIMER: self.wait_validation() else: self.run_validation() @@ -184,7 +146,7 @@ def stop(self) -> None: """ for cmd in self.shutdown: try: - self.node.cmd(cmd) + self.node.cmd(cmd, shell=True) except CoreCommandError: logger.exception( f"node({self.node.name}) service({self.name}) " @@ -265,7 +227,7 @@ def create_dirs(self) -> None: :return: nothing :raises CoreError: when there is a failure creating a directory """ - logger.debug("creating config service directories") + logger.debug("creating service directories") for directory in sorted(self.directories): dir_path = Path(directory) try: @@ -323,7 +285,7 @@ def get_templates(self) -> dict[str, str]: try: template = self.get_text_template(file) except Exception as e: - raise ConfigServiceTemplateError( + raise ServiceTemplateError( f"node({self.node.name}) service({self.name}) file({file}) " f"failure getting template: {e}" ) @@ -351,7 +313,7 @@ def _get_rendered_template(self, file: str, data: dict[str, Any]) -> str: try: text = self.get_text_template(file) except Exception as e: - raise ConfigServiceTemplateError( + raise ServiceTemplateError( f"node({self.node.name}) service({self.name}) file({file}) " f"failure getting template: {e}" ) @@ -380,13 +342,13 @@ def run_startup(self, wait: bool) -> None: :param wait: wait successful command exit status when True, ignore status otherwise :return: nothing - :raises ConfigServiceBootError: when a command that waits fails + :raises ServiceBootError: when a command that waits fails """ for cmd in self.startup: try: - self.node.cmd(cmd, wait=wait) + self.node.cmd(cmd, wait=wait, shell=True) except CoreCommandError as e: - raise ConfigServiceBootError( + raise ServiceBootError( f"node({self.node.name}) service({self.name}) failed startup: {e}" ) @@ -403,7 +365,7 @@ def run_validation(self) -> None: Runs validation commands for service on node. :return: nothing - :raises ConfigServiceBootError: if there is a validation failure + :raises ServiceBootError: if there is a validation failure """ start = time.monotonic() cmds = self.validate[:] @@ -411,7 +373,7 @@ def run_validation(self) -> None: while cmds: cmd = cmds[index] try: - self.node.cmd(cmd) + self.node.cmd(cmd, shell=True) del cmds[index] index += 1 except CoreCommandError: @@ -422,7 +384,7 @@ def run_validation(self) -> None: time.sleep(self.validation_period) if cmds and time.monotonic() - start > self.validation_timer: - raise ConfigServiceBootError( + raise ServiceBootError( f"node({self.node.name}) service({self.name}) failed to validate" ) @@ -460,7 +422,7 @@ def render_text(self, text: str, data: dict[str, Any] = None) -> str: def render_template(self, template_path: str, data: dict[str, Any] = None) -> str: """ - Renders file based template providing all associated data to template. + Renders file based template providing all associated data to template. :param template_path: path of file to render :param data: service specific defined data for template @@ -508,3 +470,38 @@ def set_config(self, data: dict[str, str]) -> None: if key not in self.config: raise CoreError(f"unknown config: {key}") self.custom_config[key] = value + + +class CustomCoreService(CoreService): + defined_templates: dict[str, str] = {} + + def get_templates(self) -> dict[str, str]: + """ + Retrieves mapping of file names to templates for all cases, which + includes custom templates, file templates, and text templates. + + :return: mapping of files to templates + """ + templates = {} + for file in self.files: + if file in self.defined_templates: + template = self.defined_templates[file] + template = self.clean_text(template) + else: + raise ServiceTemplateError( + f"node({self.node.name}) service({self.name}) file({file}) " + f"failure getting template" + ) + templates[file] = template + return templates + + def _get_rendered_template(self, file: str, data: dict[str, Any]) -> str: + if file in self.defined_templates: + text = self.defined_templates[file] + rendered = self.render_text(text, data) + else: + raise ServiceTemplateError( + f"node({self.node.name}) service({self.name}) file({file}) " + f"failure getting template" + ) + return rendered diff --git a/daemon/core/services/bird.py b/daemon/core/services/bird.py deleted file mode 100644 index c2ecc4dcc..000000000 --- a/daemon/core/services/bird.py +++ /dev/null @@ -1,233 +0,0 @@ -""" -bird.py: defines routing services provided by the BIRD Internet Routing Daemon. -""" -from typing import Optional - -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService - - -class Bird(CoreService): - """ - Bird router support - """ - - name: str = "bird" - group: str = "BIRD" - executables: tuple[str, ...] = ("bird",) - dirs: tuple[str, ...] = ("/etc/bird",) - configs: tuple[str, ...] = ("/etc/bird/bird.conf",) - startup: tuple[str, ...] = (f"bird -c {configs[0]}",) - shutdown: tuple[str, ...] = ("killall bird",) - validate: tuple[str, ...] = ("pidof bird",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the bird.conf file contents. - """ - if filename == cls.configs[0]: - return cls.generate_bird_config(node) - else: - raise ValueError - - @staticmethod - def router_id(node: CoreNode) -> str: - """ - Helper to return the first IPv4 address of a node as its router ID. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return "0.0.0.0" - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - """ - Returns configuration file text. Other services that depend on bird - will have hooks that are invoked here. - """ - cfg = f"""\ -/* Main configuration file for BIRD. This is ony a template, - * you will *need* to customize it according to your needs - * Beware that only double quotes \'"\' are valid. No singles. */ - - -log "/var/log/{cls.name}.log" all; -#debug protocols all; -#debug commands 2; - -router id {cls.router_id(node)}; # Mandatory for IPv6, may be automatic for IPv4 - -protocol kernel {{ - persist; # Don\'t remove routes on BIRD shutdown - scan time 200; # Scan kernel routing table every 200 seconds - export all; - import all; -}} - -protocol device {{ - scan time 10; # Scan interfaces every 10 seconds -}} - -""" - - # generate protocol specific configurations - for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, BirdService) or issubclass(s, BirdService)): - continue - cfg += s.generate_bird_config(node) - return cfg - - -class BirdService(CoreService): - """ - Parent class for Bird services. Defines properties and methods - common to Bird's routing daemons. - """ - - name: Optional[str] = None - group: str = "BIRD" - executables: tuple[str, ...] = ("bird",) - dependencies: tuple[str, ...] = ("bird",) - meta: str = "The config file for this service can be found in the bird service." - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - return "" - - @classmethod - def generate_bird_iface_config(cls, node: CoreNode) -> str: - """ - Use only bare interfaces descriptions in generated protocol - configurations. This has the slight advantage of being the same - everywhere. - """ - cfg = "" - for iface in node.get_ifaces(control=False): - cfg += f' interface "{iface.name}";\n' - return cfg - - -class BirdBgp(BirdService): - """ - BGP BIRD Service (configuration generation) - """ - - name: str = "BIRD_BGP" - custom_needed: bool = True - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - return """ -/* This is a sample config that should be customized with appropriate AS numbers - * and peers; add one section like this for each neighbor */ - -protocol bgp { - local as 65000; # Customize your AS number - neighbor 198.51.100.130 as 64496; # Customize neighbor AS number && IP - export filter { # We use non-trivial export rules - # This is an example. You should advertise only *your routes* - if (source = RTS_DEVICE) || (source = RTS_OSPF) then { -# bgp_community.add((65000,64501)); # Assign our community - accept; - } - reject; - }; - import all; -} - -""" - - -class BirdOspf(BirdService): - """ - OSPF BIRD Service (configuration generation) - """ - - name: str = "BIRD_OSPFv2" - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "protocol ospf {\n" - cfg += " export filter {\n" - cfg += " if source = RTS_BGP then {\n" - cfg += " ospf_metric1 = 100;\n" - cfg += " accept;\n" - cfg += " }\n" - cfg += " accept;\n" - cfg += " };\n" - cfg += " area 0.0.0.0 {\n" - cfg += cls.generate_bird_iface_config(node) - cfg += " };\n" - cfg += "}\n\n" - return cfg - - -class BirdRadv(BirdService): - """ - RADV BIRD Service (configuration generation) - """ - - name: str = "BIRD_RADV" - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "/* This is a sample config that must be customized */\n" - cfg += "protocol radv {\n" - cfg += " # auto configuration on all interfaces\n" - cfg += cls.generate_bird_iface_config(node) - cfg += " # Advertise DNS\n" - cfg += " rdnss {\n" - cfg += "# lifetime mult 10;\n" - cfg += "# lifetime mult 10;\n" - cfg += "# ns 2001:0DB8:1234::11;\n" - cfg += "# ns 2001:0DB8:1234::11;\n" - cfg += "# ns 2001:0DB8:1234::12;\n" - cfg += "# ns 2001:0DB8:1234::12;\n" - cfg += " };\n" - cfg += "}\n\n" - return cfg - - -class BirdRip(BirdService): - """ - RIP BIRD Service (configuration generation) - """ - - name: str = "BIRD_RIP" - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "protocol rip {\n" - cfg += " period 10;\n" - cfg += " garbage time 60;\n" - cfg += cls.generate_bird_iface_config(node) - cfg += " honor neighbor;\n" - cfg += " authentication none;\n" - cfg += " import all;\n" - cfg += " export all;\n" - cfg += "}\n\n" - return cfg - - -class BirdStatic(BirdService): - """ - Static Bird Service (configuration generation) - """ - - name: str = "BIRD_static" - custom_needed: bool = True - - @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "/* This is a sample config that must be customized */\n" - cfg += "protocol static {\n" - cfg += "# route 0.0.0.0/0 via 198.51.100.130; # Default route. Do NOT advertise on BGP !\n" - cfg += "# route 203.0.113.0/24 reject; # Sink route\n" - cfg += '# route 10.2.0.0/24 via "arc0"; # Secondary network\n' - cfg += "}\n\n" - return cfg diff --git a/daemon/core/services/coreservices.py b/daemon/core/services/coreservices.py deleted file mode 100644 index 0eee980ed..000000000 --- a/daemon/core/services/coreservices.py +++ /dev/null @@ -1,773 +0,0 @@ -""" -Definition of CoreService class that is subclassed to define -startup services and routing for nodes. A service is typically a daemon -program launched when a node starts that provides some sort of service. -The CoreServices class handles configuration messages for sending -a list of available services to the GUI and for configuring individual -services. -""" - -import enum -import logging -import pkgutil -import time -from collections.abc import Iterable -from pathlib import Path -from typing import TYPE_CHECKING, Optional, Union - -from core import services as core_services -from core import utils -from core.emulator.data import FileData -from core.emulator.enumerations import ExceptionLevels, MessageFlags, RegisterTlvs -from core.errors import ( - CoreCommandError, - CoreError, - CoreServiceBootError, - CoreServiceError, -) -from core.nodes.base import CoreNode - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - - CoreServiceType = Union["CoreService", type["CoreService"]] - - -class ServiceMode(enum.Enum): - BLOCKING = 0 - NON_BLOCKING = 1 - TIMER = 2 - - -class ServiceDependencies: - """ - Can generate boot paths for services, based on their dependencies. Will validate - that all services will be booted and that all dependencies exist within the services - provided. - """ - - def __init__(self, services: list["CoreServiceType"]) -> None: - self.visited: set[str] = set() - self.services: dict[str, "CoreServiceType"] = {} - self.paths: dict[str, list["CoreServiceType"]] = {} - self.boot_paths: list[list["CoreServiceType"]] = [] - roots = {x.name for x in services} - for service in services: - self.services[service.name] = service - roots -= set(service.dependencies) - self.roots: list["CoreServiceType"] = [x for x in services if x.name in roots] - if services and not self.roots: - raise ValueError("circular dependency is present") - - def _search( - self, - service: "CoreServiceType", - visiting: set[str] = None, - path: list[str] = None, - ) -> list["CoreServiceType"]: - if service.name in self.visited: - return self.paths[service.name] - self.visited.add(service.name) - if visiting is None: - visiting = set() - visiting.add(service.name) - if path is None: - for dependency in service.dependencies: - path = self.paths.get(dependency) - if path is not None: - break - for dependency in service.dependencies: - service_dependency = self.services.get(dependency) - if not service_dependency: - raise ValueError(f"required dependency was not provided: {dependency}") - if dependency in visiting: - raise ValueError(f"circular dependency, already visited: {dependency}") - else: - path = self._search(service_dependency, visiting, path) - visiting.remove(service.name) - if path is None: - path = [] - self.boot_paths.append(path) - path.append(service) - self.paths[service.name] = path - return path - - def boot_order(self) -> list[list["CoreServiceType"]]: - for service in self.roots: - self._search(service) - return self.boot_paths - - -class ServiceManager: - """ - Manages services available for CORE nodes to use. - """ - - services: dict[str, type["CoreService"]] = {} - - @classmethod - def add(cls, service: type["CoreService"]) -> None: - """ - Add a service to manager. - - :param service: service to add - :return: nothing - :raises ValueError: when service cannot be loaded - """ - name = service.name - logger.debug("loading service: class(%s) name(%s)", service.__name__, name) - # avoid services with no name - if name is None: - logger.debug("not loading class(%s) with no name", service.__name__) - return - # avoid duplicate services - if name in cls.services: - raise ValueError(f"duplicate service being added: {name}") - # validate dependent executables are present - for executable in service.executables: - try: - utils.which(executable, required=True) - except CoreError as e: - raise CoreError(f"service({name}): {e}") - # validate service on load succeeds - try: - service.on_load() - except Exception as e: - logger.exception("error during service(%s) on load", service.name) - raise ValueError(e) - # make service available - cls.services[name] = service - - @classmethod - def get(cls, name: str) -> type["CoreService"]: - """ - Retrieve a service from the manager. - - :param name: name of the service to retrieve - :return: service if it exists, None otherwise - """ - service = cls.services.get(name) - if service is None: - raise CoreServiceError(f"service({name}) does not exist") - return service - - @classmethod - def add_services(cls, path: Path) -> list[str]: - """ - Method for retrieving all CoreServices from a given path. - - :param path: path to retrieve services from - :return: list of core services that failed to load - """ - service_errors = [] - services = utils.load_classes(path, CoreService) - for service in services: - if not service.name: - continue - try: - cls.add(service) - except (CoreError, ValueError) as e: - service_errors.append(service.name) - logger.debug("not loading service(%s): %s", service.name, e) - return service_errors - - @classmethod - def load_locals(cls) -> list[str]: - errors = [] - for module_info in pkgutil.walk_packages( - core_services.__path__, f"{core_services.__name__}." - ): - services = utils.load_module(module_info.name, CoreService) - for service in services: - try: - cls.add(service) - except CoreError as e: - errors.append(service.name) - logger.debug("not loading service(%s): %s", service.name, e) - return errors - - -class CoreServices: - """ - Class for interacting with a list of available startup services for - nodes. Mostly used to convert a CoreService into a Config API - message. This class lives in the Session object and remembers - the default services configured for each node type, and any - custom service configuration. A CoreService is not a Configurable. - """ - - name: str = "services" - config_type: RegisterTlvs = RegisterTlvs.UTILITY - - def __init__(self, session: "Session") -> None: - """ - Creates a CoreServices instance. - - :param session: session this manager is tied to - """ - self.session: "Session" = session - # dict of default services tuples, key is node type - self.default_services: dict[str, list[str]] = { - "mdr": ["zebra", "OSPFv3MDR", "IPForward"], - "PC": ["DefaultRoute"], - "prouter": [], - "router": ["zebra", "OSPFv2", "OSPFv3", "IPForward"], - "host": ["DefaultRoute", "SSH"], - } - # dict of node ids to dict of custom services by name - self.custom_services: dict[int, dict[str, "CoreService"]] = {} - - def reset(self) -> None: - """ - Called when config message with reset flag is received - """ - self.custom_services.clear() - - def get_service( - self, node_id: int, service_name: str, default_service: bool = False - ) -> "CoreService": - """ - Get any custom service configured for the given node that matches the specified - service name. If no custom service is found, return the specified service. - - :param node_id: object id to get service from - :param service_name: name of service to retrieve - :param default_service: True to return default service when custom does - not exist, False returns None - :return: custom service from the node - """ - node_services = self.custom_services.setdefault(node_id, {}) - default = None - if default_service: - default = ServiceManager.get(service_name) - return node_services.get(service_name, default) - - def set_service(self, node_id: int, service_name: str) -> None: - """ - Store service customizations in an instantiated service object - using a list of values that came from a config message. - - :param node_id: object id to set custom service for - :param service_name: name of service to set - :return: nothing - """ - logger.debug("setting custom service(%s) for node: %s", service_name, node_id) - service = self.get_service(node_id, service_name) - if not service: - service_class = ServiceManager.get(service_name) - service = service_class() - - # add the custom service to dict - node_services = self.custom_services.setdefault(node_id, {}) - node_services[service.name] = service - - def add_services( - self, node: CoreNode, model: str, services: list[str] = None - ) -> None: - """ - Add services to a node. - - :param node: node to add services to - :param model: node model type to add services for - :param services: names of services to add to node - :return: nothing - """ - if not services: - logger.info( - "using default services for node(%s) type(%s)", node.name, model - ) - services = self.default_services.get(model, []) - logger.info("setting services for node(%s): %s", node.name, services) - for service_name in services: - service = self.get_service(node.id, service_name, default_service=True) - if not service: - logger.warning( - "unknown service(%s) for node(%s)", service_name, node.name - ) - continue - node.services.append(service) - - def all_configs(self) -> list[tuple[int, "CoreService"]]: - """ - Return (node_id, service) tuples for all stored configs. Used when reconnecting - to a session or opening XML. - - :return: list of tuples of node ids and services - """ - configs = [] - for node_id in self.custom_services: - custom_services = self.custom_services[node_id] - for name in custom_services: - service = custom_services[name] - configs.append((node_id, service)) - return configs - - def all_files(self, service: "CoreService") -> list[tuple[str, str]]: - """ - Return all customized files stored with a service. - Used when reconnecting to a session or opening XML. - - :param service: service to get files for - :return: list of all custom service files - """ - files = [] - if not service.custom: - return files - - for filename in service.configs: - data = service.config_data.get(filename) - if data is None: - continue - files.append((filename, data)) - - return files - - def boot_services(self, node: CoreNode) -> None: - """ - Start all services on a node. - - :param node: node to start services on - :return: nothing - """ - boot_paths = ServiceDependencies(node.services).boot_order() - funcs = [] - for boot_path in boot_paths: - args = (node, boot_path) - funcs.append((self._boot_service_path, args, {})) - result, exceptions = utils.threadpool(funcs) - if exceptions: - raise CoreServiceBootError(*exceptions) - - def _boot_service_path(self, node: CoreNode, boot_path: list["CoreServiceType"]): - logger.info( - "booting node(%s) services: %s", - node.name, - " -> ".join([x.name for x in boot_path]), - ) - for service in boot_path: - service = self.get_service(node.id, service.name, default_service=True) - try: - self.boot_service(node, service) - except Exception as e: - logger.exception("exception booting service: %s", service.name) - raise CoreServiceBootError(e) - - def boot_service(self, node: CoreNode, service: "CoreServiceType") -> None: - """ - Start a service on a node. Create private dirs, generate config - files, and execute startup commands. - - :param node: node to boot services on - :param service: service to start - :return: nothing - """ - logger.info( - "starting node(%s) service(%s) validation(%s)", - node.name, - service.name, - service.validation_mode.name, - ) - - # create service directories - for directory in service.dirs: - dir_path = Path(directory) - try: - node.create_dir(dir_path) - except (CoreCommandError, CoreError) as e: - logger.warning( - "error mounting private dir '%s' for service '%s': %s", - directory, - service.name, - e, - ) - - # create service files - self.create_service_files(node, service) - - # run startup - wait = service.validation_mode == ServiceMode.BLOCKING - status = self.startup_service(node, service, wait) - if status: - raise CoreServiceBootError( - f"node({node.name}) service({service.name}) error during startup" - ) - - # blocking mode is finished - if wait: - return - - # timer mode, sleep and return - if service.validation_mode == ServiceMode.TIMER: - time.sleep(service.validation_timer) - # non-blocking, attempt to validate periodically, up to validation_timer time - elif service.validation_mode == ServiceMode.NON_BLOCKING: - start = time.monotonic() - while True: - status = self.validate_service(node, service) - if not status: - break - - if time.monotonic() - start > service.validation_timer: - break - - time.sleep(service.validation_period) - - if status: - raise CoreServiceBootError( - f"node({node.name}) service({service.name}) failed validation" - ) - - def copy_service_file(self, node: CoreNode, file_path: Path, cfg: str) -> bool: - """ - Given a configured service filename and config, determine if the - config references an existing file that should be copied. - Returns True for local files, False for generated. - - :param node: node to copy service for - :param file_path: file name for a configured service - :param cfg: configuration string - :return: True if successful, False otherwise - """ - if cfg[:7] == "file://": - src = cfg[7:] - src = src.split("\n")[0] - src = utils.expand_corepath(src, node.session, node) - # TODO: glob here - node.copy_file(src, file_path, mode=0o644) - return True - return False - - def validate_service(self, node: CoreNode, service: "CoreServiceType") -> int: - """ - Run the validation command(s) for a service. - - :param node: node to validate service for - :param service: service to validate - :return: service validation status - """ - logger.debug("validating node(%s) service(%s)", node.name, service.name) - cmds = service.validate - if not service.custom: - cmds = service.get_validate(node) - - status = 0 - for cmd in cmds: - logger.debug("validating service(%s) using: %s", service.name, cmd) - try: - node.cmd(cmd) - except CoreCommandError as e: - logger.debug( - "node(%s) service(%s) validate failed", node.name, service.name - ) - logger.debug("cmd(%s): %s", e.cmd, e.output) - status = -1 - break - - return status - - def stop_services(self, node: CoreNode) -> None: - """ - Stop all services on a node. - - :param node: node to stop services on - :return: nothing - """ - for service in node.services: - self.stop_service(node, service) - - def stop_service(self, node: CoreNode, service: "CoreServiceType") -> int: - """ - Stop a service on a node. - - :param node: node to stop a service on - :param service: service to stop - :return: status for stopping the services - """ - status = 0 - for args in service.shutdown: - try: - node.cmd(args) - except CoreCommandError as e: - self.session.exception( - ExceptionLevels.ERROR, - "services", - f"error stopping service {service.name}: {e.stderr}", - node.id, - ) - logger.exception("error running stop command %s", args) - status = -1 - return status - - def get_service_file( - self, node: CoreNode, service_name: str, filename: str - ) -> FileData: - """ - Send a File Message when the GUI has requested a service file. - The file data is either auto-generated or comes from an existing config. - - :param node: node to get service file from - :param service_name: service to get file from - :param filename: file name to retrieve - :return: file data - """ - # get service to get file from - service = self.get_service(node.id, service_name, default_service=True) - if not service: - raise ValueError("invalid service: %s", service_name) - - # retrieve config files for default/custom service - if service.custom: - config_files = service.configs - else: - config_files = service.get_configs(node) - - if filename not in config_files: - raise ValueError( - "unknown service(%s) config file: %s", service_name, filename - ) - - # get the file data - data = service.config_data.get(filename) - if data is None: - data = service.generate_config(node, filename) - else: - data = data - - filetypestr = f"service:{service.name}" - return FileData( - message_type=MessageFlags.ADD, - node=node.id, - name=filename, - type=filetypestr, - data=data, - ) - - def set_service_file( - self, node_id: int, service_name: str, file_name: str, data: str - ) -> None: - """ - Receive a File Message from the GUI and store the customized file - in the service config. The filename must match one from the list of - config files in the service. - - :param node_id: node id to set service file - :param service_name: service name to set file for - :param file_name: file name to set - :param data: data for file to set - :return: nothing - """ - # attempt to set custom service, if needed - self.set_service(node_id, service_name) - - # retrieve custom service - service = self.get_service(node_id, service_name) - if service is None: - logger.warning("received file name for unknown service: %s", service_name) - return - - # validate file being set is valid - config_files = service.configs - if file_name not in config_files: - logger.warning( - "received unknown file(%s) for service(%s)", file_name, service_name - ) - return - - # set custom service file data - service.config_data[file_name] = data - - def startup_service( - self, node: CoreNode, service: "CoreServiceType", wait: bool = False - ) -> int: - """ - Startup a node service. - - :param node: node to reconfigure service for - :param service: service to reconfigure - :param wait: determines if we should wait to validate startup - :return: status of startup - """ - cmds = service.startup - if not service.custom: - cmds = service.get_startup(node) - - status = 0 - for cmd in cmds: - try: - node.cmd(cmd, wait) - except CoreCommandError: - logger.exception("error starting command") - status = -1 - return status - - def create_service_files(self, node: CoreNode, service: "CoreServiceType") -> None: - """ - Creates node service files. - - :param node: node to reconfigure service for - :param service: service to reconfigure - :return: nothing - """ - # get values depending on if custom or not - config_files = service.configs - if not service.custom: - config_files = service.get_configs(node) - for file_name in config_files: - file_path = Path(file_name) - logger.debug( - "generating service config custom(%s): %s", service.custom, file_name - ) - if service.custom: - cfg = service.config_data.get(file_name) - if cfg is None: - cfg = service.generate_config(node, file_name) - # cfg may have a file:/// url for copying from a file - try: - if self.copy_service_file(node, file_path, cfg): - continue - except OSError: - logger.exception("error copying service file: %s", file_name) - continue - else: - cfg = service.generate_config(node, file_name) - node.create_file(file_path, cfg) - - def service_reconfigure(self, node: CoreNode, service: "CoreService") -> None: - """ - Reconfigure a node service. - - :param node: node to reconfigure service for - :param service: service to reconfigure - :return: nothing - """ - config_files = service.configs - if not service.custom: - config_files = service.get_configs(node) - for file_name in config_files: - file_path = Path(file_name) - if file_name[:7] == "file:///": - # TODO: implement this - raise NotImplementedError - cfg = service.config_data.get(file_name) - if cfg is None: - cfg = service.generate_config(node, file_name) - node.create_file(file_path, cfg) - - -class CoreService: - """ - Parent class used for defining services. - """ - - # service name should not include spaces - name: Optional[str] = None - - # executables that must exist for service to run - executables: tuple[str, ...] = () - - # sets service requirements that must be started prior to this service starting - dependencies: tuple[str, ...] = () - - # group string allows grouping services together - group: Optional[str] = None - - # private, per-node directories required by this service - dirs: tuple[str, ...] = () - - # config files written by this service - configs: tuple[str, ...] = () - - # config file data - config_data: dict[str, str] = {} - - # list of startup commands - startup: tuple[str, ...] = () - - # list of shutdown commands - shutdown: tuple[str, ...] = () - - # list of validate commands - validate: tuple[str, ...] = () - - # validation mode, used to determine startup success - validation_mode: ServiceMode = ServiceMode.NON_BLOCKING - - # time to wait in seconds for determining if service started successfully - validation_timer: int = 5 - - # validation period in seconds, how frequent validation is attempted - validation_period: float = 0.5 - - # metadata associated with this service - meta: Optional[str] = None - - # custom configuration text - custom: bool = False - custom_needed: bool = False - - def __init__(self) -> None: - """ - Services are not necessarily instantiated. Classmethods may be used - against their config. Services are instantiated when a custom - configuration is used to override their default parameters. - """ - self.custom: bool = True - self.config_data: dict[str, str] = self.__class__.config_data.copy() - - @classmethod - def on_load(cls) -> None: - pass - - @classmethod - def get_configs(cls, node: CoreNode) -> Iterable[str]: - """ - Return the tuple of configuration file filenames. This default method - returns the cls._configs tuple, but this method may be overriden to - provide node-specific filenames that may be based on other services. - - :param node: node to generate config for - :return: configuration files - """ - return cls.configs - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate configuration file given a node object. The filename is - provided to allow for multiple config files. - Return the configuration string to be written to a file or sent - to the GUI for customization. - - :param node: node to generate config for - :param filename: file name to generate config for - :return: generated config - """ - raise NotImplementedError - - @classmethod - def get_startup(cls, node: CoreNode) -> Iterable[str]: - """ - Return the tuple of startup commands. This default method - returns the cls.startup tuple, but this method may be - overridden to provide node-specific commands that may be - based on other services. - - :param node: node to get startup for - :return: startup commands - """ - return cls.startup - - @classmethod - def get_validate(cls, node: CoreNode) -> Iterable[str]: - """ - Return the tuple of validate commands. This default method - returns the cls.validate tuple, but this method may be - overridden to provide node-specific commands that may be - based on other services. - - :param node: node to validate - :return: validation commands - """ - return cls.validate diff --git a/daemon/core/configservice/__init__.py b/daemon/core/services/defaults/__init__.py similarity index 100% rename from daemon/core/configservice/__init__.py rename to daemon/core/services/defaults/__init__.py diff --git a/daemon/core/configservices/__init__.py b/daemon/core/services/defaults/frrservices/__init__.py similarity index 100% rename from daemon/core/configservices/__init__.py rename to daemon/core/services/defaults/frrservices/__init__.py diff --git a/daemon/core/configservices/frrservices/services.py b/daemon/core/services/defaults/frrservices/services.py similarity index 91% rename from daemon/core/configservices/frrservices/services.py rename to daemon/core/services/defaults/frrservices/services.py index 378d42f84..3bde4139a 100644 --- a/daemon/core/configservices/frrservices/services.py +++ b/daemon/core/services/defaults/frrservices/services.py @@ -1,14 +1,13 @@ import abc from typing import Any -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode from core.emane.nodes import EmaneNet from core.nodes.base import CoreNodeBase, NodeBase from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.network import PtpNet, WlanNode from core.nodes.physical import Rj45Node from core.nodes.wireless import WirelessNode +from core.services.base import CoreService GROUP: str = "FRR" FRR_STATE_DIR: str = "/var/run/frr" @@ -79,7 +78,7 @@ def rj45_check(iface: CoreInterface) -> bool: return False -class FRRZebra(ConfigService): +class FRRZebra(CoreService): name: str = "FRRzebra" group: str = GROUP directories: list[str] = ["/usr/local/etc/frr", "/var/run/frr", "/var/log/frr"] @@ -90,13 +89,9 @@ class FRRZebra(ConfigService): "/usr/local/etc/frr/daemons", ] executables: list[str] = ["zebra"] - dependencies: list[str] = [] startup: list[str] = ["bash frrboot.sh zebra"] validate: list[str] = ["pidof zebra"] shutdown: list[str] = ["killall zebra"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: frr_conf = self.files[0] @@ -111,7 +106,7 @@ def data(self) -> dict[str, Any]: services = [] want_ip4 = False want_ip6 = False - for service in self.node.config_services.values(): + for service in self.node.services.values(): if self.name not in service.dependencies: continue if not isinstance(service, FrrService): @@ -146,16 +141,7 @@ def data(self) -> dict[str, Any]: class FrrService(abc.ABC): group: str = GROUP - directories: list[str] = [] - files: list[str] = [] - executables: list[str] = [] dependencies: list[str] = ["FRRzebra"] - startup: list[str] = [] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} ipv4_routing: bool = False ipv6_routing: bool = False @@ -168,7 +154,7 @@ def frr_config(self) -> str: raise NotImplementedError -class FRROspfv2(FrrService, ConfigService): +class FRROspfv2(FrrService, CoreService): """ The OSPFv2 service provides IPv4 routing for wired networks. It does not build its own configuration file but has hooks for adding to the @@ -220,7 +206,7 @@ def frr_iface_config(self, iface: CoreInterface) -> str: return self.render_text(text, data) -class FRROspfv3(FrrService, ConfigService): +class FRROspfv3(FrrService, CoreService): """ The OSPFv3 service provides IPv6 routing for wired networks. It does not build its own configuration file but has hooks for adding to the @@ -257,7 +243,7 @@ def frr_iface_config(self, iface: CoreInterface) -> str: return "" -class FRRBgp(FrrService, ConfigService): +class FRRBgp(FrrService, CoreService): """ The BGP service provides interdomain routing. Peers must be manually configured, with a full mesh for those @@ -289,7 +275,7 @@ def frr_iface_config(self, iface: CoreInterface) -> str: return "" -class FRRRip(FrrService, ConfigService): +class FRRRip(FrrService, CoreService): """ The RIP service provides IPv4 routing for wired networks. """ @@ -314,7 +300,7 @@ def frr_iface_config(self, iface: CoreInterface) -> str: return "" -class FRRRipng(FrrService, ConfigService): +class FRRRipng(FrrService, CoreService): """ The RIP NG service provides IPv6 routing for wired networks. """ @@ -339,7 +325,7 @@ def frr_iface_config(self, iface: CoreInterface) -> str: return "" -class FRRBabel(FrrService, ConfigService): +class FRRBabel(FrrService, CoreService): """ The Babel service provides a loop-avoiding distance-vector routing protocol for IPv6 and IPv4 with fast convergence properties. @@ -380,7 +366,7 @@ def frr_iface_config(self, iface: CoreInterface) -> str: return self.clean_text(text) -class FRRpimd(FrrService, ConfigService): +class FRRpimd(FrrService, CoreService): """ PIM multicast routing based on XORP. """ diff --git a/daemon/core/configservices/frrservices/templates/frrboot.sh b/daemon/core/services/defaults/frrservices/templates/frrboot.sh similarity index 100% rename from daemon/core/configservices/frrservices/templates/frrboot.sh rename to daemon/core/services/defaults/frrservices/templates/frrboot.sh diff --git a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/daemons b/daemon/core/services/defaults/frrservices/templates/usr/local/etc/frr/daemons similarity index 100% rename from daemon/core/configservices/frrservices/templates/usr/local/etc/frr/daemons rename to daemon/core/services/defaults/frrservices/templates/usr/local/etc/frr/daemons diff --git a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/frr.conf b/daemon/core/services/defaults/frrservices/templates/usr/local/etc/frr/frr.conf similarity index 100% rename from daemon/core/configservices/frrservices/templates/usr/local/etc/frr/frr.conf rename to daemon/core/services/defaults/frrservices/templates/usr/local/etc/frr/frr.conf diff --git a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/vtysh.conf b/daemon/core/services/defaults/frrservices/templates/usr/local/etc/frr/vtysh.conf similarity index 100% rename from daemon/core/configservices/frrservices/templates/usr/local/etc/frr/vtysh.conf rename to daemon/core/services/defaults/frrservices/templates/usr/local/etc/frr/vtysh.conf diff --git a/daemon/core/configservices/frrservices/__init__.py b/daemon/core/services/defaults/nrlservices/__init__.py similarity index 100% rename from daemon/core/configservices/frrservices/__init__.py rename to daemon/core/services/defaults/nrlservices/__init__.py diff --git a/daemon/core/configservices/nrlservices/services.py b/daemon/core/services/defaults/nrlservices/services.py similarity index 61% rename from daemon/core/configservices/nrlservices/services.py rename to daemon/core/services/defaults/nrlservices/services.py index 3002cd94f..7ccce0e44 100644 --- a/daemon/core/configservices/nrlservices/services.py +++ b/daemon/core/services/defaults/nrlservices/services.py @@ -1,25 +1,19 @@ from typing import Any from core import utils -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode +from core.services.base import CoreService GROUP: str = "ProtoSvc" -class MgenSinkService(ConfigService): +class MgenSinkService(CoreService): name: str = "MGEN_Sink" group: str = GROUP - directories: list[str] = [] files: list[str] = ["mgensink.sh", "sink.mgen"] executables: list[str] = ["mgen"] - dependencies: list[str] = [] startup: list[str] = ["bash mgensink.sh"] validate: list[str] = ["pidof mgen"] shutdown: list[str] = ["killall mgen"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifnames = [] @@ -29,45 +23,35 @@ def data(self) -> dict[str, Any]: return dict(ifnames=ifnames) -class NrlNhdp(ConfigService): +class NrlNhdp(CoreService): name: str = "NHDP" group: str = GROUP - directories: list[str] = [] files: list[str] = ["nrlnhdp.sh"] executables: list[str] = ["nrlnhdp"] - dependencies: list[str] = [] startup: list[str] = ["bash nrlnhdp.sh"] validate: list[str] = ["pidof nrlnhdp"] shutdown: list[str] = ["killall nrlnhdp"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services + has_smf = "SMF" in self.node.services ifnames = [] for iface in self.node.get_ifaces(control=False): ifnames.append(iface.name) return dict(has_smf=has_smf, ifnames=ifnames) -class NrlSmf(ConfigService): +class NrlSmf(CoreService): name: str = "SMF" group: str = GROUP - directories: list[str] = [] files: list[str] = ["startsmf.sh"] executables: list[str] = ["nrlsmf", "killall"] - dependencies: list[str] = [] startup: list[str] = ["bash startsmf.sh"] validate: list[str] = ["pidof nrlsmf"] shutdown: list[str] = ["killall nrlsmf"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: - has_nhdp = "NHDP" in self.node.config_services - has_olsr = "OLSR" in self.node.config_services + has_nhdp = "NHDP" in self.node.services + has_olsr = "OLSR" in self.node.services ifnames = [] ip4_prefix = None for iface in self.node.get_ifaces(control=False): @@ -81,23 +65,18 @@ def data(self) -> dict[str, Any]: ) -class NrlOlsr(ConfigService): +class NrlOlsr(CoreService): name: str = "OLSR" group: str = GROUP - directories: list[str] = [] files: list[str] = ["nrlolsrd.sh"] executables: list[str] = ["nrlolsrd"] - dependencies: list[str] = [] startup: list[str] = ["bash nrlolsrd.sh"] validate: list[str] = ["pidof nrlolsrd"] shutdown: list[str] = ["killall nrlolsrd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services - has_zebra = "zebra" in self.node.config_services + has_smf = "SMF" in self.node.services + has_zebra = "zebra" in self.node.services ifname = None for iface in self.node.get_ifaces(control=False): ifname = iface.name @@ -105,60 +84,46 @@ def data(self) -> dict[str, Any]: return dict(has_smf=has_smf, has_zebra=has_zebra, ifname=ifname) -class NrlOlsrv2(ConfigService): +class NrlOlsrv2(CoreService): name: str = "OLSRv2" group: str = GROUP - directories: list[str] = [] files: list[str] = ["nrlolsrv2.sh"] executables: list[str] = ["nrlolsrv2"] - dependencies: list[str] = [] startup: list[str] = ["bash nrlolsrv2.sh"] validate: list[str] = ["pidof nrlolsrv2"] shutdown: list[str] = ["killall nrlolsrv2"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services + has_smf = "SMF" in self.node.services ifnames = [] for iface in self.node.get_ifaces(control=False): ifnames.append(iface.name) return dict(has_smf=has_smf, ifnames=ifnames) -class OlsrOrg(ConfigService): +class OlsrOrg(CoreService): name: str = "OLSRORG" group: str = GROUP directories: list[str] = ["/etc/olsrd"] files: list[str] = ["olsrd.sh", "/etc/olsrd/olsrd.conf"] executables: list[str] = ["olsrd"] - dependencies: list[str] = [] startup: list[str] = ["bash olsrd.sh"] validate: list[str] = ["pidof olsrd"] shutdown: list[str] = ["killall olsrd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services + has_smf = "SMF" in self.node.services ifnames = [] for iface in self.node.get_ifaces(control=False): ifnames.append(iface.name) return dict(has_smf=has_smf, ifnames=ifnames) -class MgenActor(ConfigService): +class MgenActor(CoreService): name: str = "MgenActor" group: str = GROUP - directories: list[str] = [] files: list[str] = ["start_mgen_actor.sh"] executables: list[str] = ["mgen"] - dependencies: list[str] = [] startup: list[str] = ["bash start_mgen_actor.sh"] validate: list[str] = ["pidof mgen"] shutdown: list[str] = ["killall mgen"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} diff --git a/daemon/core/configservices/nrlservices/templates/etc/olsrd/olsrd.conf b/daemon/core/services/defaults/nrlservices/templates/etc/olsrd/olsrd.conf similarity index 100% rename from daemon/core/configservices/nrlservices/templates/etc/olsrd/olsrd.conf rename to daemon/core/services/defaults/nrlservices/templates/etc/olsrd/olsrd.conf diff --git a/daemon/core/configservices/nrlservices/templates/mgensink.sh b/daemon/core/services/defaults/nrlservices/templates/mgensink.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/mgensink.sh rename to daemon/core/services/defaults/nrlservices/templates/mgensink.sh diff --git a/daemon/core/configservices/nrlservices/templates/nrlnhdp.sh b/daemon/core/services/defaults/nrlservices/templates/nrlnhdp.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/nrlnhdp.sh rename to daemon/core/services/defaults/nrlservices/templates/nrlnhdp.sh diff --git a/daemon/core/configservices/nrlservices/templates/nrlolsrd.sh b/daemon/core/services/defaults/nrlservices/templates/nrlolsrd.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/nrlolsrd.sh rename to daemon/core/services/defaults/nrlservices/templates/nrlolsrd.sh diff --git a/daemon/core/configservices/nrlservices/templates/nrlolsrv2.sh b/daemon/core/services/defaults/nrlservices/templates/nrlolsrv2.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/nrlolsrv2.sh rename to daemon/core/services/defaults/nrlservices/templates/nrlolsrv2.sh diff --git a/daemon/core/configservices/nrlservices/templates/olsrd.sh b/daemon/core/services/defaults/nrlservices/templates/olsrd.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/olsrd.sh rename to daemon/core/services/defaults/nrlservices/templates/olsrd.sh diff --git a/daemon/core/configservices/nrlservices/templates/sink.mgen b/daemon/core/services/defaults/nrlservices/templates/sink.mgen similarity index 100% rename from daemon/core/configservices/nrlservices/templates/sink.mgen rename to daemon/core/services/defaults/nrlservices/templates/sink.mgen diff --git a/daemon/core/configservices/nrlservices/templates/start_mgen_actor.sh b/daemon/core/services/defaults/nrlservices/templates/start_mgen_actor.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/start_mgen_actor.sh rename to daemon/core/services/defaults/nrlservices/templates/start_mgen_actor.sh diff --git a/daemon/core/configservices/nrlservices/templates/startsmf.sh b/daemon/core/services/defaults/nrlservices/templates/startsmf.sh similarity index 100% rename from daemon/core/configservices/nrlservices/templates/startsmf.sh rename to daemon/core/services/defaults/nrlservices/templates/startsmf.sh diff --git a/daemon/core/configservices/nrlservices/__init__.py b/daemon/core/services/defaults/quaggaservices/__init__.py similarity index 100% rename from daemon/core/configservices/nrlservices/__init__.py rename to daemon/core/services/defaults/quaggaservices/__init__.py diff --git a/daemon/core/configservices/quaggaservices/services.py b/daemon/core/services/defaults/quaggaservices/services.py similarity index 92% rename from daemon/core/configservices/quaggaservices/services.py rename to daemon/core/services/defaults/quaggaservices/services.py index 8b4d4909c..f7a037405 100644 --- a/daemon/core/configservices/quaggaservices/services.py +++ b/daemon/core/services/defaults/quaggaservices/services.py @@ -2,14 +2,13 @@ import logging from typing import Any -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode from core.emane.nodes import EmaneNet from core.nodes.base import CoreNodeBase, NodeBase from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.network import PtpNet, WlanNode from core.nodes.physical import Rj45Node from core.nodes.wireless import WirelessNode +from core.services.base import CoreService logger = logging.getLogger(__name__) GROUP: str = "Quagga" @@ -81,7 +80,7 @@ def rj45_check(iface: CoreInterface) -> bool: return False -class Zebra(ConfigService): +class Zebra(CoreService): name: str = "zebra" group: str = GROUP directories: list[str] = ["/usr/local/etc/quagga", "/var/run/quagga"] @@ -91,13 +90,9 @@ class Zebra(ConfigService): "/usr/local/etc/quagga/vtysh.conf", ] executables: list[str] = ["zebra"] - dependencies: list[str] = [] startup: list[str] = ["bash quaggaboot.sh zebra"] validate: list[str] = ["pidof zebra"] shutdown: list[str] = ["killall zebra"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: quagga_bin_search = self.node.session.options.get( @@ -112,7 +107,7 @@ def data(self) -> dict[str, Any]: services = [] want_ip4 = False want_ip6 = False - for service in self.node.config_services.values(): + for service in self.node.services.values(): if self.name not in service.dependencies: continue if not isinstance(service, QuaggaService): @@ -153,16 +148,7 @@ def data(self) -> dict[str, Any]: class QuaggaService(abc.ABC): group: str = GROUP - directories: list[str] = [] - files: list[str] = [] - executables: list[str] = [] dependencies: list[str] = ["zebra"] - startup: list[str] = [] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} ipv4_routing: bool = False ipv6_routing: bool = False @@ -175,7 +161,7 @@ def quagga_config(self) -> str: raise NotImplementedError -class Ospfv2(QuaggaService, ConfigService): +class Ospfv2(QuaggaService, CoreService): """ The OSPFv2 service provides IPv4 routing for wired networks. It does not build its own configuration file but has hooks for adding to the @@ -226,7 +212,7 @@ def quagga_config(self) -> str: return self.render_text(text, data) -class Ospfv3(QuaggaService, ConfigService): +class Ospfv3(QuaggaService, CoreService): """ The OSPFv3 service provides IPv6 routing for wired networks. It does not build its own configuration file but has hooks for adding to the @@ -292,7 +278,7 @@ def quagga_iface_config(self, iface: CoreInterface) -> str: return config -class Bgp(QuaggaService, ConfigService): +class Bgp(QuaggaService, CoreService): """ The BGP service provides interdomain routing. Peers must be manually configured, with a full mesh for those @@ -323,7 +309,7 @@ def quagga_iface_config(self, iface: CoreInterface) -> str: return "" -class Rip(QuaggaService, ConfigService): +class Rip(QuaggaService, CoreService): """ The RIP service provides IPv4 routing for wired networks. """ @@ -348,7 +334,7 @@ def quagga_iface_config(self, iface: CoreInterface) -> str: return "" -class Ripng(QuaggaService, ConfigService): +class Ripng(QuaggaService, CoreService): """ The RIP NG service provides IPv6 routing for wired networks. """ @@ -373,7 +359,7 @@ def quagga_iface_config(self, iface: CoreInterface) -> str: return "" -class Babel(QuaggaService, ConfigService): +class Babel(QuaggaService, CoreService): """ The Babel service provides a loop-avoiding distance-vector routing protocol for IPv6 and IPv4 with fast convergence properties. @@ -414,7 +400,7 @@ def quagga_iface_config(self, iface: CoreInterface) -> str: return self.clean_text(text) -class Xpimd(QuaggaService, ConfigService): +class Xpimd(QuaggaService, CoreService): """ PIM multicast routing based on XORP. """ diff --git a/daemon/core/configservices/quaggaservices/templates/quaggaboot.sh b/daemon/core/services/defaults/quaggaservices/templates/quaggaboot.sh similarity index 100% rename from daemon/core/configservices/quaggaservices/templates/quaggaboot.sh rename to daemon/core/services/defaults/quaggaservices/templates/quaggaboot.sh diff --git a/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf b/daemon/core/services/defaults/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf similarity index 100% rename from daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf rename to daemon/core/services/defaults/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf diff --git a/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf b/daemon/core/services/defaults/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf similarity index 100% rename from daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf rename to daemon/core/services/defaults/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf diff --git a/daemon/core/configservices/quaggaservices/__init__.py b/daemon/core/services/defaults/securityservices/__init__.py similarity index 100% rename from daemon/core/configservices/quaggaservices/__init__.py rename to daemon/core/services/defaults/securityservices/__init__.py diff --git a/daemon/core/configservices/securityservices/services.py b/daemon/core/services/defaults/securityservices/services.py similarity index 59% rename from daemon/core/configservices/securityservices/services.py rename to daemon/core/services/defaults/securityservices/services.py index e6243b2c1..8b675e5b6 100644 --- a/daemon/core/configservices/securityservices/services.py +++ b/daemon/core/services/defaults/securityservices/services.py @@ -1,47 +1,44 @@ from typing import Any +from core import constants from core.config import ConfigString, Configuration -from core.configservice.base import ConfigService, ConfigServiceMode +from core.services.base import CoreService GROUP_NAME: str = "Security" -class VpnClient(ConfigService): +class VpnClient(CoreService): name: str = "VPNClient" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["vpnclient.sh"] executables: list[str] = ["openvpn", "ip", "killall"] - dependencies: list[str] = [] startup: list[str] = ["bash vpnclient.sh"] validate: list[str] = ["pidof openvpn"] shutdown: list[str] = ["killall openvpn"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING default_configs: list[Configuration] = [ - ConfigString(id="keydir", label="Key Dir", default="/etc/core/keys"), + ConfigString( + id="keydir", label="Key Dir", default=f"{constants.CORE_CONF_DIR}/keys" + ), ConfigString(id="keyname", label="Key Name", default="client1"), ConfigString(id="server", label="Server", default="10.0.2.10"), ] - modes: dict[str, dict[str, str]] = {} -class VpnServer(ConfigService): +class VpnServer(CoreService): name: str = "VPNServer" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["vpnserver.sh"] executables: list[str] = ["openvpn", "ip", "killall"] - dependencies: list[str] = [] startup: list[str] = ["bash vpnserver.sh"] validate: list[str] = ["pidof openvpn"] shutdown: list[str] = ["killall openvpn"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING default_configs: list[Configuration] = [ - ConfigString(id="keydir", label="Key Dir", default="/etc/core/keys"), + ConfigString( + id="keydir", label="Key Dir", default=f"{constants.CORE_CONF_DIR}/keys" + ), ConfigString(id="keyname", label="Key Name", default="server"), ConfigString(id="subnet", label="Subnet", default="10.0.200.0"), ] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: address = None @@ -53,49 +50,30 @@ def data(self) -> dict[str, Any]: return dict(address=address) -class IPsec(ConfigService): +class IPsec(CoreService): name: str = "IPsec" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["ipsec.sh"] executables: list[str] = ["racoon", "ip", "setkey", "killall"] - dependencies: list[str] = [] startup: list[str] = ["bash ipsec.sh"] validate: list[str] = ["pidof racoon"] shutdown: list[str] = ["killall racoon"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} -class Firewall(ConfigService): +class Firewall(CoreService): name: str = "Firewall" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["firewall.sh"] executables: list[str] = ["iptables"] - dependencies: list[str] = [] startup: list[str] = ["bash firewall.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} -class Nat(ConfigService): +class Nat(CoreService): name: str = "NAT" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["nat.sh"] executables: list[str] = ["iptables"] - dependencies: list[str] = [] startup: list[str] = ["bash nat.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifnames = [] diff --git a/daemon/core/configservices/securityservices/templates/firewall.sh b/daemon/core/services/defaults/securityservices/templates/firewall.sh similarity index 100% rename from daemon/core/configservices/securityservices/templates/firewall.sh rename to daemon/core/services/defaults/securityservices/templates/firewall.sh diff --git a/daemon/core/configservices/securityservices/templates/ipsec.sh b/daemon/core/services/defaults/securityservices/templates/ipsec.sh similarity index 99% rename from daemon/core/configservices/securityservices/templates/ipsec.sh rename to daemon/core/services/defaults/securityservices/templates/ipsec.sh index e8fde77e8..35e63ef9f 100644 --- a/daemon/core/configservices/securityservices/templates/ipsec.sh +++ b/daemon/core/services/defaults/securityservices/templates/ipsec.sh @@ -5,7 +5,7 @@ # peers, along with subnets to tunnel. # directory containing the certificate and key described below -keydir=/etc/core/keys +keydir=/opt/core/etc/keys # the name used for the "$certname.pem" x509 certificate and # "$certname.key" RSA private key, which can be generated using openssl diff --git a/daemon/core/configservices/securityservices/templates/nat.sh b/daemon/core/services/defaults/securityservices/templates/nat.sh similarity index 100% rename from daemon/core/configservices/securityservices/templates/nat.sh rename to daemon/core/services/defaults/securityservices/templates/nat.sh diff --git a/daemon/core/configservices/securityservices/templates/vpnclient.sh b/daemon/core/services/defaults/securityservices/templates/vpnclient.sh similarity index 100% rename from daemon/core/configservices/securityservices/templates/vpnclient.sh rename to daemon/core/services/defaults/securityservices/templates/vpnclient.sh diff --git a/daemon/core/configservices/securityservices/templates/vpnserver.sh b/daemon/core/services/defaults/securityservices/templates/vpnserver.sh similarity index 100% rename from daemon/core/configservices/securityservices/templates/vpnserver.sh rename to daemon/core/services/defaults/securityservices/templates/vpnserver.sh diff --git a/daemon/core/configservices/securityservices/__init__.py b/daemon/core/services/defaults/utilservices/__init__.py similarity index 100% rename from daemon/core/configservices/securityservices/__init__.py rename to daemon/core/services/defaults/utilservices/__init__.py diff --git a/daemon/core/configservices/utilservices/services.py b/daemon/core/services/defaults/utilservices/services.py similarity index 66% rename from daemon/core/configservices/utilservices/services.py rename to daemon/core/services/defaults/utilservices/services.py index 73d720608..4ecfbb301 100644 --- a/daemon/core/configservices/utilservices/services.py +++ b/daemon/core/services/defaults/utilservices/services.py @@ -3,25 +3,17 @@ import netaddr from core import utils -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode +from core.services.base import CoreService GROUP_NAME = "Utility" -class DefaultRouteService(ConfigService): +class DefaultRouteService(CoreService): name: str = "DefaultRoute" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["defaultroute.sh"] executables: list[str] = ["ip"] - dependencies: list[str] = [] startup: list[str] = ["bash defaultroute.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: # only add default routes for linked routing nodes @@ -37,19 +29,11 @@ def data(self) -> dict[str, Any]: return dict(routes=routes) -class DefaultMulticastRouteService(ConfigService): +class DefaultMulticastRouteService(CoreService): name: str = "DefaultMulticastRoute" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["defaultmroute.sh"] - executables: list[str] = [] - dependencies: list[str] = [] startup: list[str] = ["bash defaultmroute.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifname = None @@ -59,19 +43,11 @@ def data(self) -> dict[str, Any]: return dict(ifname=ifname) -class StaticRouteService(ConfigService): +class StaticRouteService(CoreService): name: str = "StaticRoute" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["staticroute.sh"] - executables: list[str] = [] - dependencies: list[str] = [] startup: list[str] = ["bash staticroute.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: routes = [] @@ -87,19 +63,12 @@ def data(self) -> dict[str, Any]: return dict(routes=routes) -class IpForwardService(ConfigService): +class IpForwardService(CoreService): name: str = "IPForward" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["ipforward.sh"] executables: list[str] = ["sysctl"] - dependencies: list[str] = [] startup: list[str] = ["bash ipforward.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: devnames = [] @@ -109,19 +78,14 @@ def data(self) -> dict[str, Any]: return dict(devnames=devnames) -class SshService(ConfigService): +class SshService(CoreService): name: str = "SSH" group: str = GROUP_NAME directories: list[str] = ["/etc/ssh", "/var/run/sshd"] files: list[str] = ["startsshd.sh", "/etc/ssh/sshd_config"] executables: list[str] = ["sshd"] - dependencies: list[str] = [] startup: list[str] = ["bash startsshd.sh"] - validate: list[str] = [] shutdown: list[str] = ["killall sshd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: return dict( @@ -131,19 +95,15 @@ def data(self) -> dict[str, Any]: ) -class DhcpService(ConfigService): +class DhcpService(CoreService): name: str = "DHCP" group: str = GROUP_NAME directories: list[str] = ["/etc/dhcp", "/var/lib/dhcp"] files: list[str] = ["/etc/dhcp/dhcpd.conf"] executables: list[str] = ["dhcpd"] - dependencies: list[str] = [] startup: list[str] = ["touch /var/lib/dhcp/dhcpd.leases", "dhcpd"] validate: list[str] = ["pidof dhcpd"] shutdown: list[str] = ["killall dhcpd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: subnets = [] @@ -159,19 +119,14 @@ def data(self) -> dict[str, Any]: return dict(subnets=subnets) -class DhcpClientService(ConfigService): +class DhcpClientService(CoreService): name: str = "DHCPClient" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["startdhcpclient.sh"] executables: list[str] = ["dhclient"] - dependencies: list[str] = [] startup: list[str] = ["bash startdhcpclient.sh"] validate: list[str] = ["pidof dhclient"] shutdown: list[str] = ["killall dhclient"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifnames = [] @@ -180,34 +135,25 @@ def data(self) -> dict[str, Any]: return dict(ifnames=ifnames) -class FtpService(ConfigService): +class FtpService(CoreService): name: str = "FTP" group: str = GROUP_NAME directories: list[str] = ["/var/run/vsftpd/empty", "/var/ftp"] files: list[str] = ["vsftpd.conf"] executables: list[str] = ["vsftpd"] - dependencies: list[str] = [] startup: list[str] = ["vsftpd ./vsftpd.conf"] validate: list[str] = ["pidof vsftpd"] shutdown: list[str] = ["killall vsftpd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} -class PcapService(ConfigService): +class PcapService(CoreService): name: str = "pcap" group: str = GROUP_NAME - directories: list[str] = [] files: list[str] = ["pcap.sh"] executables: list[str] = ["tcpdump"] - dependencies: list[str] = [] startup: list[str] = ["bash pcap.sh start"] validate: list[str] = ["pidof tcpdump"] shutdown: list[str] = ["bash pcap.sh stop"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifnames = [] @@ -216,21 +162,17 @@ def data(self) -> dict[str, Any]: return dict(ifnames=ifnames) -class RadvdService(ConfigService): +class RadvdService(CoreService): name: str = "radvd" group: str = GROUP_NAME directories: list[str] = ["/etc/radvd", "/var/run/radvd"] files: list[str] = ["/etc/radvd/radvd.conf"] executables: list[str] = ["radvd"] - dependencies: list[str] = [] startup: list[str] = [ "radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log" ] validate: list[str] = ["pidof radvd"] shutdown: list[str] = ["pkill radvd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifaces = [] @@ -244,22 +186,18 @@ def data(self) -> dict[str, Any]: return dict(ifaces=ifaces) -class AtdService(ConfigService): +class AtdService(CoreService): name: str = "atd" group: str = GROUP_NAME directories: list[str] = ["/var/spool/cron/atjobs", "/var/spool/cron/atspool"] files: list[str] = ["startatd.sh"] executables: list[str] = ["atd"] - dependencies: list[str] = [] startup: list[str] = ["bash startatd.sh"] validate: list[str] = ["pidof atd"] shutdown: list[str] = ["pkill atd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} -class HttpService(ConfigService): +class HttpService(CoreService): name: str = "HTTP" group: str = GROUP_NAME directories: list[str] = [ @@ -276,13 +214,9 @@ class HttpService(ConfigService): "/var/www/index.html", ] executables: list[str] = ["apache2ctl"] - dependencies: list[str] = [] startup: list[str] = ["chown www-data /var/lock/apache2", "apache2ctl start"] validate: list[str] = ["pidof apache2"] shutdown: list[str] = ["apache2ctl stop"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} def data(self) -> dict[str, Any]: ifaces = [] diff --git a/daemon/core/configservices/utilservices/templates/defaultmroute.sh b/daemon/core/services/defaults/utilservices/templates/defaultmroute.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/defaultmroute.sh rename to daemon/core/services/defaults/utilservices/templates/defaultmroute.sh diff --git a/daemon/core/configservices/utilservices/templates/defaultroute.sh b/daemon/core/services/defaults/utilservices/templates/defaultroute.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/defaultroute.sh rename to daemon/core/services/defaults/utilservices/templates/defaultroute.sh diff --git a/daemon/core/configservices/utilservices/templates/etc/apache2/apache2.conf b/daemon/core/services/defaults/utilservices/templates/etc/apache2/apache2.conf similarity index 81% rename from daemon/core/configservices/utilservices/templates/etc/apache2/apache2.conf rename to daemon/core/services/defaults/utilservices/templates/etc/apache2/apache2.conf index c53e48af8..e58c69ef8 100644 --- a/daemon/core/configservices/utilservices/templates/etc/apache2/apache2.conf +++ b/daemon/core/services/defaults/utilservices/templates/etc/apache2/apache2.conf @@ -1,7 +1,7 @@ # apache2.conf generated by utility.py:HttpService -Mutex file:$APACHE_LOCK_DIR default +Mutex file:${"${APACHE_LOCK_DIR}"} default -PidFile $APACHE_PID_FILE +PidFile ${"${APACHE_PID_FILE}"} Timeout 300 KeepAlive On MaxKeepAliveRequests 100 @@ -37,8 +37,8 @@ LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so MaxRequestsPerChild 0 -User $APACHE_RUN_USER -Group $APACHE_RUN_GROUP +User ${"${APACHE_RUN_USER}"} +Group ${"${APACHE_RUN_GROUP}"} AccessFileName .htaccess @@ -50,7 +50,7 @@ DefaultType None HostnameLookups Off -ErrorLog $APACHE_LOG_DIR/error.log +ErrorLog ${"${APACHE_LOG_DIR}"}/error.log LogLevel warn #Include mods-enabled/*.load @@ -74,9 +74,9 @@ Listen 80 Listen 443 -LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined -LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined -LogFormat "%h %l %u %t \\"%r\\" %>s %O" common +LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined +LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined +LogFormat "%h %l %u %t \"%r\" %>s %O" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent @@ -96,7 +96,7 @@ TraceEnable Off AllowOverride None Require all granted - ErrorLog $APACHE_LOG_DIR/error.log + ErrorLog ${"${APACHE_LOG_DIR}"}/error.log LogLevel warn - CustomLog $APACHE_LOG_DIR/access.log combined + CustomLog ${"${APACHE_LOG_DIR}"}/access.log combined diff --git a/daemon/core/configservices/utilservices/templates/etc/apache2/envvars b/daemon/core/services/defaults/utilservices/templates/etc/apache2/envvars similarity index 100% rename from daemon/core/configservices/utilservices/templates/etc/apache2/envvars rename to daemon/core/services/defaults/utilservices/templates/etc/apache2/envvars diff --git a/daemon/core/configservices/utilservices/templates/etc/dhcp/dhcpd.conf b/daemon/core/services/defaults/utilservices/templates/etc/dhcp/dhcpd.conf similarity index 100% rename from daemon/core/configservices/utilservices/templates/etc/dhcp/dhcpd.conf rename to daemon/core/services/defaults/utilservices/templates/etc/dhcp/dhcpd.conf diff --git a/daemon/core/configservices/utilservices/templates/etc/radvd/radvd.conf b/daemon/core/services/defaults/utilservices/templates/etc/radvd/radvd.conf similarity index 100% rename from daemon/core/configservices/utilservices/templates/etc/radvd/radvd.conf rename to daemon/core/services/defaults/utilservices/templates/etc/radvd/radvd.conf diff --git a/daemon/core/configservices/utilservices/templates/etc/ssh/sshd_config b/daemon/core/services/defaults/utilservices/templates/etc/ssh/sshd_config similarity index 100% rename from daemon/core/configservices/utilservices/templates/etc/ssh/sshd_config rename to daemon/core/services/defaults/utilservices/templates/etc/ssh/sshd_config diff --git a/daemon/core/configservices/utilservices/templates/ipforward.sh b/daemon/core/services/defaults/utilservices/templates/ipforward.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/ipforward.sh rename to daemon/core/services/defaults/utilservices/templates/ipforward.sh diff --git a/daemon/core/configservices/utilservices/templates/pcap.sh b/daemon/core/services/defaults/utilservices/templates/pcap.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/pcap.sh rename to daemon/core/services/defaults/utilservices/templates/pcap.sh diff --git a/daemon/core/configservices/utilservices/templates/startatd.sh b/daemon/core/services/defaults/utilservices/templates/startatd.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/startatd.sh rename to daemon/core/services/defaults/utilservices/templates/startatd.sh diff --git a/daemon/core/configservices/utilservices/templates/startdhcpclient.sh b/daemon/core/services/defaults/utilservices/templates/startdhcpclient.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/startdhcpclient.sh rename to daemon/core/services/defaults/utilservices/templates/startdhcpclient.sh diff --git a/daemon/core/configservices/utilservices/templates/startsshd.sh b/daemon/core/services/defaults/utilservices/templates/startsshd.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/startsshd.sh rename to daemon/core/services/defaults/utilservices/templates/startsshd.sh diff --git a/daemon/core/configservices/utilservices/templates/staticroute.sh b/daemon/core/services/defaults/utilservices/templates/staticroute.sh similarity index 100% rename from daemon/core/configservices/utilservices/templates/staticroute.sh rename to daemon/core/services/defaults/utilservices/templates/staticroute.sh diff --git a/daemon/core/configservices/utilservices/templates/var/www/index.html b/daemon/core/services/defaults/utilservices/templates/var/www/index.html similarity index 82% rename from daemon/core/configservices/utilservices/templates/var/www/index.html rename to daemon/core/services/defaults/utilservices/templates/var/www/index.html index bed270aea..1508c3b5e 100644 --- a/daemon/core/configservices/utilservices/templates/var/www/index.html +++ b/daemon/core/services/defaults/utilservices/templates/var/www/index.html @@ -6,7 +6,7 @@

${node.name} web server

The web server software is running but no content has been added, yet.

    % for iface in ifaces: -
  • ${iface.name} - ${iface.addrlist}
  • +
  • ${iface.name} - ${[str(x) for x in iface.ip4s]}
  • % endfor
diff --git a/daemon/core/configservices/utilservices/templates/vsftpd.conf b/daemon/core/services/defaults/utilservices/templates/vsftpd.conf similarity index 100% rename from daemon/core/configservices/utilservices/templates/vsftpd.conf rename to daemon/core/services/defaults/utilservices/templates/vsftpd.conf diff --git a/daemon/core/configservice/dependencies.py b/daemon/core/services/dependencies.py similarity index 83% rename from daemon/core/configservice/dependencies.py rename to daemon/core/services/dependencies.py index 1fbc4e48d..d9664b5b8 100644 --- a/daemon/core/configservice/dependencies.py +++ b/daemon/core/services/dependencies.py @@ -4,24 +4,24 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: - from core.configservice.base import ConfigService + from core.services.base import CoreService -class ConfigServiceDependencies: +class ServiceDependencies: """ Generates sets of services to start in order of their dependencies. """ - def __init__(self, services: dict[str, "ConfigService"]) -> None: + def __init__(self, services: dict[str, "CoreService"]) -> None: """ - Create a ConfigServiceDependencies instance. + Create a ServiceDependencies instance. :param services: services for determining dependency sets """ # helpers to check validity self.dependents: dict[str, set[str]] = {} self.started: set[str] = set() - self.node_services: dict[str, "ConfigService"] = {} + self.node_services: dict[str, "CoreService"] = {} for service in services.values(): self.node_services[service.name] = service for dependency in service.dependencies: @@ -29,15 +29,15 @@ def __init__(self, services: dict[str, "ConfigService"]) -> None: dependents.add(service.name) # used to find paths - self.path: list["ConfigService"] = [] + self.path: list["CoreService"] = [] self.visited: set[str] = set() self.visiting: set[str] = set() - def startup_paths(self) -> list[list["ConfigService"]]: + def startup_paths(self) -> list[list["CoreService"]]: """ Find startup path sets based on service dependencies. - :return: lists of lists of services that can be started in parallel + :return: list of lists of services that can be started in parallel """ paths = [] for name in self.node_services: @@ -70,18 +70,18 @@ def _reset(self) -> None: self.visited.clear() self.visiting.clear() - def _start(self, service: "ConfigService") -> list["ConfigService"]: + def _start(self, service: "CoreService") -> list["CoreService"]: """ - Starts a oath for checking dependencies for a given service. + Starts a path for checking dependencies for a given service. :param service: service to check dependencies for - :return: list of config services to start in order + :return: list of services to start in order """ logger.debug("starting service dependency check: %s", service.name) self._reset() return self._visit(service) - def _visit(self, current_service: "ConfigService") -> list["ConfigService"]: + def _visit(self, current_service: "CoreService") -> list["CoreService"]: """ Visits a service when discovering dependency chains for service. diff --git a/daemon/core/services/emaneservices.py b/daemon/core/services/emaneservices.py deleted file mode 100644 index 43cd9af41..000000000 --- a/daemon/core/services/emaneservices.py +++ /dev/null @@ -1,32 +0,0 @@ -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService -from core.xml import emanexml - - -class EmaneTransportService(CoreService): - name: str = "transportd" - group: str = "EMANE" - executables: tuple[str, ...] = ("emanetransportd", "emanegentransportxml") - dependencies: tuple[str, ...] = () - dirs: tuple[str, ...] = () - configs: tuple[str, ...] = ("emanetransport.sh",) - startup: tuple[str, ...] = (f"bash {configs[0]}",) - validate: tuple[str, ...] = (f"pidof {executables[0]}",) - validation_timer: float = 0.5 - shutdown: tuple[str, ...] = (f"killall {executables[0]}",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - emane_manager = node.session.emane - cfg = "" - for iface in node.get_ifaces(): - if not isinstance(iface.net, EmaneNet): - continue - emane_net = iface.net - config = emane_manager.get_iface_config(emane_net, iface) - if emanexml.is_external(config): - nem_id = emane_manager.get_nem_id(iface) - cfg += f"emanegentransportxml {iface.name}-platform.xml\n" - cfg += f"emanetransportd -r -l 0 -d transportdaemon{nem_id}.xml\n" - return cfg diff --git a/daemon/core/services/frr.py b/daemon/core/services/frr.py deleted file mode 100644 index 28756c19e..000000000 --- a/daemon/core/services/frr.py +++ /dev/null @@ -1,683 +0,0 @@ -""" -frr.py: defines routing services provided by FRRouting. -Assumes installation of FRR via https://deb.frrouting.org/ -""" -from typing import Optional - -import netaddr - -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode, NodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import PtpNet, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService - -FRR_STATE_DIR: str = "/var/run/frr" - - -def is_wireless(node: NodeBase) -> bool: - """ - Check if the node is a wireless type node. - - :param node: node to check type for - :return: True if wireless type, False otherwise - """ - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) - - -class FRRZebra(CoreService): - name: str = "FRRzebra" - group: str = "FRR" - dirs: tuple[str, ...] = ("/usr/local/etc/frr", "/var/run/frr", "/var/log/frr") - configs: tuple[str, ...] = ( - "/usr/local/etc/frr/frr.conf", - "frrboot.sh", - "/usr/local/etc/frr/vtysh.conf", - "/usr/local/etc/frr/daemons", - ) - startup: tuple[str, ...] = ("bash frrboot.sh zebra",) - shutdown: tuple[str, ...] = ("killall zebra",) - validate: tuple[str, ...] = ("pidof zebra",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the frr.conf or frrboot.sh file contents. - """ - if filename == cls.configs[0]: - return cls.generate_frr_conf(node) - elif filename == cls.configs[1]: - return cls.generate_frr_boot(node) - elif filename == cls.configs[2]: - return cls.generate_vtysh_conf(node) - elif filename == cls.configs[3]: - return cls.generate_frr_daemons(node) - else: - raise ValueError( - "file name (%s) is not a known configuration: %s", filename, cls.configs - ) - - @classmethod - def generate_vtysh_conf(cls, node: CoreNode) -> str: - """ - Returns configuration file text. - """ - return "service integrated-vtysh-config\n" - - @classmethod - def generate_frr_conf(cls, node: CoreNode) -> str: - """ - Returns configuration file text. Other services that depend on zebra - will have hooks that are invoked here. - """ - # we could verify here that filename == frr.conf - cfg = "" - for iface in node.get_ifaces(): - cfg += f"interface {iface.name}\n" - # include control interfaces in addressing but not routing daemons - if iface.control: - cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ips())) - cfg += "\n" - continue - cfgv4 = "" - cfgv6 = "" - want_ipv4 = False - want_ipv6 = False - for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, FrrService) or issubclass(s, FrrService)): - continue - iface_config = s.generate_frr_iface_config(node, iface) - if s.ipv4_routing: - want_ipv4 = True - if s.ipv6_routing: - want_ipv6 = True - cfgv6 += iface_config - else: - cfgv4 += iface_config - - if want_ipv4: - cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip4s)) - cfg += "\n" - cfg += cfgv4 - if want_ipv6: - cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip6s)) - cfg += "\n" - cfg += cfgv6 - cfg += "!\n" - - for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, FrrService) or issubclass(s, FrrService)): - continue - cfg += s.generate_frr_config(node) - return cfg - - @staticmethod - def addrstr(ip: netaddr.IPNetwork) -> str: - """ - helper for mapping IP addresses to zebra config statements - """ - address = str(ip.ip) - if netaddr.valid_ipv4(address): - return f"ip address {ip}" - elif netaddr.valid_ipv6(address): - return f"ipv6 address {ip}" - else: - raise ValueError(f"invalid address: {ip}") - - @classmethod - def generate_frr_boot(cls, node: CoreNode) -> str: - """ - Generate a shell script used to boot the FRR daemons. - """ - frr_bin_search = node.session.options.get( - "frr_bin_search", '"/usr/local/bin /usr/bin /usr/lib/frr"' - ) - frr_sbin_search = node.session.options.get( - "frr_sbin_search", - '"/usr/local/sbin /usr/sbin /usr/lib/frr /usr/libexec/frr"', - ) - cfg = f"""\ -#!/bin/sh -# auto-generated by zebra service (frr.py) -FRR_CONF={cls.configs[0]} -FRR_SBIN_SEARCH={frr_sbin_search} -FRR_BIN_SEARCH={frr_bin_search} -FRR_STATE_DIR={FRR_STATE_DIR} - -searchforprog() -{{ - prog=$1 - searchpath=$@ - ret= - for p in $searchpath; do - if [ -x $p/$prog ]; then - ret=$p - break - fi - done - echo $ret -}} - -confcheck() -{{ - CONF_DIR=`dirname $FRR_CONF` - # if /etc/frr exists, point /etc/frr/frr.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/frr.conf ]; then - ln -s $CONF_DIR/frr.conf /etc/frr/frr.conf - fi - # if /etc/frr exists, point /etc/frr/vtysh.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/vtysh.conf ]; then - ln -s $CONF_DIR/vtysh.conf /etc/frr/vtysh.conf - fi -}} - -bootdaemon() -{{ - FRR_SBIN_DIR=$(searchforprog $1 $FRR_SBIN_SEARCH) - if [ "z$FRR_SBIN_DIR" = "z" ]; then - echo "ERROR: FRR's '$1' daemon not found in search path:" - echo " $FRR_SBIN_SEARCH" - return 1 - fi - - flags="" - - if [ "$1" = "pimd" ] && \\ - grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $FRR_CONF; then - flags="$flags -6" - fi - - if [ "$1" = "ospfd" ]; then - flags="$flags --apiserver" - fi - - #force FRR to use CORE generated conf file - flags="$flags -d -f $FRR_CONF" - $FRR_SBIN_DIR/$1 $flags - - if [ "$?" != "0" ]; then - echo "ERROR: FRR's '$1' daemon failed to start!:" - return 1 - fi -}} - -bootfrr() -{{ - FRR_BIN_DIR=$(searchforprog 'vtysh' $FRR_BIN_SEARCH) - if [ "z$FRR_BIN_DIR" = "z" ]; then - echo "ERROR: FRR's 'vtysh' program not found in search path:" - echo " $FRR_BIN_SEARCH" - return 1 - fi - - # fix /var/run/frr permissions - id -u frr 2>/dev/null >/dev/null - if [ "$?" = "0" ]; then - chown frr $FRR_STATE_DIR - fi - - bootdaemon "zebra" - if grep -q "^ip route " $FRR_CONF; then - bootdaemon "staticd" - fi - for r in rip ripng ospf6 ospf bgp babel isis; do - if grep -q "^router \\<${{r}}\\>" $FRR_CONF; then - bootdaemon "${{r}}d" - fi - done - - if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $FRR_CONF; then - bootdaemon "pimd" - fi - - $FRR_BIN_DIR/vtysh -b -}} - -if [ "$1" != "zebra" ]; then - echo "WARNING: '$1': all FRR daemons are launched by the 'zebra' service!" - exit 1 -fi - -confcheck -bootfrr -""" - for iface in node.get_ifaces(): - cfg += f"ip link set dev {iface.name} down\n" - cfg += "sleep 1\n" - cfg += f"ip link set dev {iface.name} up\n" - return cfg - - @classmethod - def generate_frr_daemons(cls, node: CoreNode) -> str: - """ - Returns configuration file text. - """ - return """\ -# -# When activation a daemon at the first time, a config file, even if it is -# empty, has to be present *and* be owned by the user and group "frr", else -# the daemon will not be started by /etc/init.d/frr. The permissions should -# be u=rw,g=r,o=. -# When using "vtysh" such a config file is also needed. It should be owned by -# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. -# -# The watchfrr and zebra daemons are always started. -# -bgpd=yes -ospfd=yes -ospf6d=yes -ripd=yes -ripngd=yes -isisd=yes -pimd=yes -ldpd=yes -nhrpd=yes -eigrpd=yes -babeld=yes -sharpd=yes -staticd=yes -pbrd=yes -bfdd=yes -fabricd=yes - -# -# If this option is set the /etc/init.d/frr script automatically loads -# the config via "vtysh -b" when the servers are started. -# Check /etc/pam.d/frr if you intend to use "vtysh"! -# -vtysh_enable=yes -zebra_options=" -A 127.0.0.1 -s 90000000" -bgpd_options=" -A 127.0.0.1" -ospfd_options=" -A 127.0.0.1" -ospf6d_options=" -A ::1" -ripd_options=" -A 127.0.0.1" -ripngd_options=" -A ::1" -isisd_options=" -A 127.0.0.1" -pimd_options=" -A 127.0.0.1" -ldpd_options=" -A 127.0.0.1" -nhrpd_options=" -A 127.0.0.1" -eigrpd_options=" -A 127.0.0.1" -babeld_options=" -A 127.0.0.1" -sharpd_options=" -A 127.0.0.1" -pbrd_options=" -A 127.0.0.1" -staticd_options="-A 127.0.0.1" -bfdd_options=" -A 127.0.0.1" -fabricd_options="-A 127.0.0.1" - -# The list of daemons to watch is automatically generated by the init script. -#watchfrr_options="" - -# for debugging purposes, you can specify a "wrap" command to start instead -# of starting the daemon directly, e.g. to use valgrind on ospfd: -# ospfd_wrap="/usr/bin/valgrind" -# or you can use "all_wrap" for all daemons, e.g. to use perf record: -# all_wrap="/usr/bin/perf record --call-graph -" -# the normal daemon command is added to this at the end. -""" - - -class FrrService(CoreService): - """ - Parent class for FRR services. Defines properties and methods - common to FRR's routing daemons. - """ - - name: Optional[str] = None - group: str = "FRR" - dependencies: tuple[str, ...] = ("FRRzebra",) - meta: str = "The config file for this service can be found in the Zebra service." - ipv4_routing: bool = False - ipv6_routing: bool = False - - @staticmethod - def router_id(node: CoreNode) -> str: - """ - Helper to return the first IPv4 address of a node as its router ID. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return "0.0.0.0" - - @staticmethod - def rj45check(iface: CoreInterface) -> bool: - """ - Helper to detect whether interface is connected an external RJ45 - link. - """ - if iface.net: - for peer_iface in iface.net.get_ifaces(): - if peer_iface == iface: - continue - if isinstance(peer_iface.node, Rj45Node): - return True - return False - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return "" - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return "" - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - return "" - - -class FRROspfv2(FrrService): - """ - The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified frr.conf file. - """ - - name: str = "FRROSPFv2" - shutdown: tuple[str, ...] = ("killall ospfd",) - validate: tuple[str, ...] = ("pidof ospfd",) - ipv4_routing: bool = True - - @staticmethod - def mtu_check(iface: CoreInterface) -> str: - """ - Helper to detect MTU mismatch and add the appropriate OSPF - mtu-ignore command. This is needed when e.g. a node is linked via a - GreTap device. - """ - if iface.mtu != DEFAULT_MTU: - # a workaround for PhysicalNode GreTap, which has no knowledge of - # the other nodes/nets - return " ip ospf mtu-ignore\n" - if not iface.net: - return "" - for iface in iface.net.get_ifaces(): - if iface.mtu != iface.mtu: - return " ip ospf mtu-ignore\n" - return "" - - @staticmethod - def ptp_check(iface: CoreInterface) -> str: - """ - Helper to detect whether interface is connected to a notional - point-to-point link. - """ - if isinstance(iface.net, PtpNet): - return " ip ospf network point-to-point\n" - return "" - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = "router ospf\n" - rtrid = cls.router_id(node) - cfg += f" router-id {rtrid}\n" - # network 10.0.0.0/24 area 0 - for iface in node.get_ifaces(control=False): - for ip4 in iface.ip4s: - cfg += f" network {ip4} area 0\n" - cfg += " ospf opaque-lsa\n" - cfg += "!\n" - return cfg - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = cls.mtu_check(iface) - # external RJ45 connections will use default OSPF timers - if cls.rj45check(iface): - return cfg - cfg += cls.ptp_check(iface) - return ( - cfg - + """\ - ip ospf hello-interval 2 - ip ospf dead-interval 6 - ip ospf retransmit-interval 5 -""" - ) - - -class FRROspfv3(FrrService): - """ - The OSPFv3 service provides IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified frr.conf file. - """ - - name: str = "FRROSPFv3" - shutdown: tuple[str, ...] = ("killall ospf6d",) - validate: tuple[str, ...] = ("pidof ospf6d",) - ipv4_routing: bool = True - ipv6_routing: bool = True - - @staticmethod - def min_mtu(iface: CoreInterface) -> int: - """ - Helper to discover the minimum MTU of interfaces linked with the - given interface. - """ - mtu = iface.mtu - if not iface.net: - return mtu - for iface in iface.net.get_ifaces(): - if iface.mtu < mtu: - mtu = iface.mtu - return mtu - - @classmethod - def mtu_check(cls, iface: CoreInterface) -> str: - """ - Helper to detect MTU mismatch and add the appropriate OSPFv3 - ifmtu command. This is needed when e.g. a node is linked via a - GreTap device. - """ - minmtu = cls.min_mtu(iface) - if minmtu < iface.mtu: - return f" ipv6 ospf6 ifmtu {minmtu:d}\n" - else: - return "" - - @staticmethod - def ptp_check(iface: CoreInterface) -> str: - """ - Helper to detect whether interface is connected to a notional - point-to-point link. - """ - if isinstance(iface.net, PtpNet): - return " ipv6 ospf6 network point-to-point\n" - return "" - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = "router ospf6\n" - rtrid = cls.router_id(node) - cfg += f" router-id {rtrid}\n" - for iface in node.get_ifaces(control=False): - cfg += f" interface {iface.name} area 0.0.0.0\n" - cfg += "!\n" - return cfg - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return cls.mtu_check(iface) - - -class FRRBgp(FrrService): - """ - The BGP service provides interdomain routing. - Peers must be manually configured, with a full mesh for those - having the same AS number. - """ - - name: str = "FRRBGP" - shutdown: tuple[str, ...] = ("killall bgpd",) - validate: tuple[str, ...] = ("pidof bgpd",) - custom_needed: bool = True - ipv4_routing: bool = True - ipv6_routing: bool = True - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = "!\n! BGP configuration\n!\n" - cfg += "! You should configure the AS number below,\n" - cfg += "! along with this router's peers.\n!\n" - cfg += f"router bgp {node.id}\n" - rtrid = cls.router_id(node) - cfg += f" bgp router-id {rtrid}\n" - cfg += " redistribute connected\n" - cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n" - return cfg - - -class FRRRip(FrrService): - """ - The RIP service provides IPv4 routing for wired networks. - """ - - name: str = "FRRRIP" - shutdown: tuple[str, ...] = ("killall ripd",) - validate: tuple[str, ...] = ("pidof ripd",) - ipv4_routing: bool = True - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = """\ -router rip - redistribute static - redistribute connected - redistribute ospf - network 0.0.0.0/0 -! -""" - return cfg - - -class FRRRipng(FrrService): - """ - The RIP NG service provides IPv6 routing for wired networks. - """ - - name: str = "FRRRIPNG" - shutdown: tuple[str, ...] = ("killall ripngd",) - validate: tuple[str, ...] = ("pidof ripngd",) - ipv6_routing: bool = True - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = """\ -router ripng - redistribute static - redistribute connected - redistribute ospf6 - network ::/0 -! -""" - return cfg - - -class FRRBabel(FrrService): - """ - The Babel service provides a loop-avoiding distance-vector routing - protocol for IPv6 and IPv4 with fast convergence properties. - """ - - name: str = "FRRBabel" - shutdown: tuple[str, ...] = ("killall babeld",) - validate: tuple[str, ...] = ("pidof babeld",) - ipv6_routing: bool = True - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = "router babel\n" - for iface in node.get_ifaces(control=False): - cfg += f" network {iface.name}\n" - cfg += " redistribute static\n redistribute ipv4 connected\n" - return cfg - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - if is_wireless(iface.net): - return " babel wireless\n no babel split-horizon\n" - else: - return " babel wired\n babel split-horizon\n" - - -class FRRpimd(FrrService): - """ - PIM multicast routing based on XORP. - """ - - name: str = "FRRpimd" - shutdown: tuple[str, ...] = ("killall pimd",) - validate: tuple[str, ...] = ("pidof pimd",) - ipv4_routing: bool = True - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - ifname = "eth0" - for iface in node.get_ifaces(): - if iface.name != "lo": - ifname = iface.name - break - cfg = "router mfea\n!\n" - cfg += "router igmp\n!\n" - cfg += "router pim\n" - cfg += " !ip pim rp-address 10.0.0.1\n" - cfg += f" ip pim bsr-candidate {ifname}\n" - cfg += f" ip pim rp-candidate {ifname}\n" - cfg += " !ip pim spt-threshold interval 10 bytes 80000\n" - return cfg - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return " ip mfea\n ip igmp\n ip pim\n" - - -class FRRIsis(FrrService): - """ - The ISIS service provides IPv4 and IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified frr.conf file. - """ - - name: str = "FRRISIS" - shutdown: tuple[str, ...] = ("killall isisd",) - validate: tuple[str, ...] = ("pidof isisd",) - ipv4_routing: bool = True - ipv6_routing: bool = True - - @staticmethod - def ptp_check(iface: CoreInterface) -> str: - """ - Helper to detect whether interface is connected to a notional - point-to-point link. - """ - if isinstance(iface.net, PtpNet): - return " isis network point-to-point\n" - return "" - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = "router isis DEFAULT\n" - cfg += f" net 47.0001.0000.1900.{node.id:04x}.00\n" - cfg += " metric-style wide\n" - cfg += " is-type level-2-only\n" - cfg += "!\n" - return cfg - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = " ip router isis DEFAULT\n" - cfg += " ipv6 router isis DEFAULT\n" - cfg += " isis circuit-type level-2-only\n" - cfg += cls.ptp_check(iface) - return cfg diff --git a/daemon/core/configservice/manager.py b/daemon/core/services/manager.py similarity index 55% rename from daemon/core/configservice/manager.py rename to daemon/core/services/manager.py index 542f3cc51..06d619fab 100644 --- a/daemon/core/configservice/manager.py +++ b/daemon/core/services/manager.py @@ -3,25 +3,33 @@ import pkgutil from pathlib import Path -from core import configservices, utils -from core.configservice.base import ConfigService +from core import utils from core.errors import CoreError +from core.services import defaults +from core.services.base import CoreService logger = logging.getLogger(__name__) -class ConfigServiceManager: +class ServiceManager: """ - Manager for configurable services. + Manager for services. """ def __init__(self): """ - Create a ConfigServiceManager instance. + Create a ServiceManager instance. """ - self.services: dict[str, type[ConfigService]] = {} - - def get_service(self, name: str) -> type[ConfigService]: + self.services: dict[str, type[CoreService]] = {} + self.defaults: dict[str, list[str]] = { + "mdr": ["zebra", "OSPFv3MDR", "IPForward"], + "PC": ["DefaultRoute"], + "prouter": [], + "router": ["zebra", "OSPFv2", "OSPFv3", "IPForward"], + "host": ["DefaultRoute", "SSH"], + } + + def get_service(self, name: str) -> type[CoreService]: """ Retrieve a service by name. @@ -34,7 +42,7 @@ def get_service(self, name: str) -> type[ConfigService]: raise CoreError(f"service does not exist {name}") return service_class - def add(self, service: type[ConfigService]) -> None: + def add(self, service: type[CoreService]) -> None: """ Add service to manager, checking service requirements have been met. @@ -42,49 +50,55 @@ def add(self, service: type[ConfigService]) -> None: :return: nothing :raises CoreError: when service is a duplicate or has unmet executables """ - name = service.name logger.debug( - "loading service: class(%s) name(%s)", service.__class__.__name__, name + "loading service: class(%s) name(%s)", + service.__class__.__name__, + service.name, ) + # avoid undefined services + if service.name is None or service.group is None: + raise CoreError( + f"service name({service.name}) and group({service.group}) must be defined" + ) # avoid duplicate services - if name in self.services: - raise CoreError(f"duplicate service being added: {name}") + if service.name in self.services: + raise CoreError(f"duplicate service being added: {service.name}") # validate dependent executables are present for executable in service.executables: try: utils.which(executable, required=True) except CoreError as e: - raise CoreError(f"config service({service.name}): {e}") + raise CoreError(f"service({service.name}): {e}") # make service available - self.services[name] = service + self.services[service.name] = service def load_locals(self) -> list[str]: """ - Search and add config service from local core module. + Search and add service from local core module. :return: list of errors when loading services """ errors = [] for module_info in pkgutil.walk_packages( - configservices.__path__, f"{configservices.__name__}." + defaults.__path__, f"{defaults.__name__}." ): - services = utils.load_module(module_info.name, ConfigService) + services = utils.load_module(module_info.name, CoreService) for service in services: try: self.add(service) except CoreError as e: errors.append(service.name) - logger.debug("not loading config service(%s): %s", service.name, e) + logger.debug("not loading service(%s): %s", service.name, e) return errors def load(self, path: Path) -> list[str]: """ - Search path provided for config services and add them for being managed. + Search path provided for services and add them for being managed. - :param path: path to search configurable services + :param path: path to search services :return: list errors when loading services """ path = pathlib.Path(path) @@ -92,8 +106,8 @@ def load(self, path: Path) -> list[str]: subdirs.append(path) service_errors = [] for subdir in subdirs: - logger.debug("loading config services from: %s", subdir) - services = utils.load_classes(subdir, ConfigService) + logger.debug("loading services from: %s", subdir) + services = utils.load_classes(subdir, CoreService) for service in services: try: self.add(service) diff --git a/daemon/core/services/nrl.py b/daemon/core/services/nrl.py deleted file mode 100644 index 32e19f606..000000000 --- a/daemon/core/services/nrl.py +++ /dev/null @@ -1,582 +0,0 @@ -""" -nrl.py: defines services provided by NRL protolib tools hosted here: - http://www.nrl.navy.mil/itd/ncs/products -""" -from typing import Optional - -from core import utils -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService - - -class NrlService(CoreService): - """ - Parent class for NRL services. Defines properties and methods - common to NRL's routing daemons. - """ - - name: Optional[str] = None - group: str = "ProtoSvc" - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return "" - - @staticmethod - def firstipv4prefix(node: CoreNode, prefixlen: int = 24) -> str: - """ - Similar to QuaggaService.routerid(). Helper to return the first IPv4 - prefix of a node, using the supplied prefix length. This ignores the - interface's prefix length, so e.g. '/32' can turn into '/24'. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return f"{ip4.ip}/{prefixlen}" - return f"0.0.0.0/{prefixlen}" - - -class MgenSinkService(NrlService): - name: str = "MGEN_Sink" - executables: tuple[str, ...] = ("mgen",) - configs: tuple[str, ...] = ("sink.mgen",) - startup: tuple[str, ...] = ("mgen input sink.mgen",) - validate: tuple[str, ...] = ("pidof mgen",) - shutdown: tuple[str, ...] = ("killall mgen",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - cfg = "0.0 LISTEN UDP 5000\n" - for iface in node.get_ifaces(): - name = utils.sysctl_devname(iface.name) - cfg += f"0.0 Join 224.225.1.2 INTERFACE {name}\n" - return cfg - - @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: - cmd = cls.startup[0] - cmd += f" output /tmp/mgen_{node.name}.log" - return (cmd,) - - -class NrlNhdp(NrlService): - """ - NeighborHood Discovery Protocol for MANET networks. - """ - - name: str = "NHDP" - executables: tuple[str, ...] = ("nrlnhdp",) - startup: tuple[str, ...] = ("nrlnhdp",) - shutdown: tuple[str, ...] = ("killall nrlnhdp",) - validate: tuple[str, ...] = ("pidof nrlnhdp",) - - @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: - """ - Generate the appropriate command-line based on node interfaces. - """ - cmd = cls.startup[0] - cmd += " -l /var/log/nrlnhdp.log" - cmd += f" -rpipe {node.name}_nhdp" - servicenames = map(lambda x: x.name, node.services) - if "SMF" in servicenames: - cmd += " -flooding ecds" - cmd += f" -smfClient {node.name}_smf" - ifaces = node.get_ifaces(control=False) - if len(ifaces) > 0: - iface_names = map(lambda x: x.name, ifaces) - cmd += " -i " - cmd += " -i ".join(iface_names) - return (cmd,) - - -class NrlSmf(NrlService): - """ - Simplified Multicast Forwarding for MANET networks. - """ - - name: str = "SMF" - executables: tuple[str, ...] = ("nrlsmf",) - startup: tuple[str, ...] = ("bash startsmf.sh",) - shutdown: tuple[str, ...] = ("killall nrlsmf",) - validate: tuple[str, ...] = ("pidof nrlsmf",) - configs: tuple[str, ...] = ("startsmf.sh",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a startup script for SMF. Because nrlsmf does not - daemonize, it can cause problems in some situations when launched - directly using vcmd. - """ - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by nrl.py:NrlSmf.generateconfig()\n" - comments = "" - cmd = f"nrlsmf instance {node.name}_smf" - - servicenames = map(lambda x: x.name, node.services) - ifaces = node.get_ifaces(control=False) - if len(ifaces) == 0: - return "" - if len(ifaces) > 0: - if "NHDP" in servicenames: - comments += "# NHDP service is enabled\n" - cmd += " ecds " - elif "OLSR" in servicenames: - comments += "# OLSR service is enabled\n" - cmd += " smpr " - else: - cmd += " cf " - iface_names = map(lambda x: x.name, ifaces) - cmd += ",".join(iface_names) - - cmd += " hash MD5" - cmd += " log /var/log/nrlsmf.log" - cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" - return cfg - - -class NrlOlsr(NrlService): - """ - Optimized Link State Routing protocol for MANET networks. - """ - - name: str = "OLSR" - executables: tuple[str, ...] = ("nrlolsrd",) - startup: tuple[str, ...] = ("nrlolsrd",) - shutdown: tuple[str, ...] = ("killall nrlolsrd",) - validate: tuple[str, ...] = ("pidof nrlolsrd",) - - @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: - """ - Generate the appropriate command-line based on node interfaces. - """ - cmd = cls.startup[0] - # are multiple interfaces supported? No. - ifaces = node.get_ifaces() - if len(ifaces) > 0: - iface = ifaces[0] - cmd += f" -i {iface.name}" - cmd += " -l /var/log/nrlolsrd.log" - cmd += f" -rpipe {node.name}_olsr" - servicenames = map(lambda x: x.name, node.services) - if "SMF" in servicenames and "NHDP" not in servicenames: - cmd += " -flooding s-mpr" - cmd += f" -smfClient {node.name}_smf" - if "zebra" in servicenames: - cmd += " -z" - return (cmd,) - - -class NrlOlsrv2(NrlService): - """ - Optimized Link State Routing protocol version 2 for MANET networks. - """ - - name: str = "OLSRv2" - executables: tuple[str, ...] = ("nrlolsrv2",) - startup: tuple[str, ...] = ("nrlolsrv2",) - shutdown: tuple[str, ...] = ("killall nrlolsrv2",) - validate: tuple[str, ...] = ("pidof nrlolsrv2",) - - @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: - """ - Generate the appropriate command-line based on node interfaces. - """ - cmd = cls.startup[0] - cmd += " -l /var/log/nrlolsrv2.log" - cmd += f" -rpipe {node.name}_olsrv2" - servicenames = map(lambda x: x.name, node.services) - if "SMF" in servicenames: - cmd += " -flooding ecds" - cmd += f" -smfClient {node.name}_smf" - cmd += " -p olsr" - ifaces = node.get_ifaces(control=False) - if len(ifaces) > 0: - iface_names = map(lambda x: x.name, ifaces) - cmd += " -i " - cmd += " -i ".join(iface_names) - return (cmd,) - - -class OlsrOrg(NrlService): - """ - Optimized Link State Routing protocol from olsr.org for MANET networks. - """ - - name: str = "OLSRORG" - executables: tuple[str, ...] = ("olsrd",) - configs: tuple[str, ...] = ("/etc/olsrd/olsrd.conf",) - dirs: tuple[str, ...] = ("/etc/olsrd",) - startup: tuple[str, ...] = ("olsrd",) - shutdown: tuple[str, ...] = ("killall olsrd",) - validate: tuple[str, ...] = ("pidof olsrd",) - - @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: - """ - Generate the appropriate command-line based on node interfaces. - """ - cmd = cls.startup[0] - ifaces = node.get_ifaces(control=False) - if len(ifaces) > 0: - iface_names = map(lambda x: x.name, ifaces) - cmd += " -i " - cmd += " -i ".join(iface_names) - return (cmd,) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a default olsrd config file to use the broadcast address of - 255.255.255.255. - """ - cfg = """\ -# -# OLSR.org routing daemon config file -# This file contains the usual options for an ETX based -# stationary network without fisheye -# (for other options see olsrd.conf.default.full) -# -# Lines starting with a # are discarded -# - -#### ATTENTION for IPv6 users #### -# Because of limitations in the parser IPv6 addresses must NOT -# begin with a ":", so please add a "0" as a prefix. - -########################### -### Basic configuration ### -########################### -# keep this settings at the beginning of your first configuration file - -# Debug level (0-9) -# If set to 0 the daemon runs in the background, unless "NoFork" is set to true -# (Default is 1) - -# DebugLevel 1 - -# IP version to use (4 or 6) -# (Default is 4) - -# IpVersion 4 - -################################# -### OLSRd agent configuration ### -################################# -# this parameters control the settings of the routing agent which are not -# related to the OLSR protocol and it's extensions - -# FIBMetric controls the metric value of the host-routes OLSRd sets. -# - "flat" means that the metric value is always 2. This is the preferred value -# because it helps the linux kernel routing to clean up older routes -# - "correct" use the hopcount as the metric value. -# - "approx" use the hopcount as the metric value too, but does only update the -# hopcount if the nexthop changes too -# (Default is "flat") - -# FIBMetric "flat" - -####################################### -### Linux specific OLSRd extensions ### -####################################### -# these parameters are only working on linux at the moment - -# SrcIpRoutes tells OLSRd to set the Src flag of host routes to the originator-ip -# of the node. In addition to this an additional localhost device is created -# to make sure the returning traffic can be received. -# (Default is "no") - -# SrcIpRoutes no - -# Specify the proto tag to be used for routes olsr inserts into kernel -# currently only implemented for linux -# valid values under linux are 1 .. 254 -# 1 gets remapped by olsrd to 0 UNSPECIFIED (1 is reserved for ICMP redirects) -# 2 KERNEL routes (not very wise to use) -# 3 BOOT (should in fact not be used by routing daemons) -# 4 STATIC -# 8 .. 15 various routing daemons (gated, zebra, bird, & co) -# (defaults to 0 which gets replaced by an OS-specific default value -# under linux 3 (BOOT) (for backward compatibility) - -# RtProto 0 - -# Activates (in IPv6 mode) the automatic use of NIIT -# (see README-Olsr-Extensions) -# (default is "yes") - -# UseNiit yes - -# Activates the smartgateway ipip tunnel feature. -# See README-Olsr-Extensions for a description of smartgateways. -# (default is "no") - -# SmartGateway no - -# Signals that the server tunnel must always be removed on shutdown, -# irrespective of the interface up/down state during startup. -# (default is "no") - -# SmartGatewayAlwaysRemoveServerTunnel no - -# Determines the maximum number of gateways that can be in use at any given -# time. This setting is used to mitigate the effects of breaking connections -# (due to the selection of a new gateway) on a dynamic network. -# (default is 1) - -# SmartGatewayUseCount 1 - -# Determines the take-down percentage for a non-current smart gateway tunnel. -# If the cost of the current smart gateway tunnel is less than this percentage -# of the cost of the non-current smart gateway tunnel, then the non-current smart -# gateway tunnel is taken down because it is then presumed to be 'too expensive'. -# This setting is only relevant when SmartGatewayUseCount is larger than 1; -# a value of 0 will result in the tunnels not being taken down proactively. -# (default is 0) - -# SmartGatewayTakeDownPercentage 0 - -# Determines the policy routing script that is executed during startup and -# shutdown of olsrd. The script is only executed when SmartGatewayUseCount -# is set to a value larger than 1. The script must setup policy routing -# rules such that multi-gateway mode works. A sample script is included. -# (default is not set) - -# SmartGatewayPolicyRoutingScript "" - -# Determines the egress interfaces that are part of the multi-gateway setup and -# therefore only relevant when SmartGatewayUseCount is larger than 1 (in which -# case it must be explicitly set). -# (default is not set) - -# SmartGatewayEgressInterfaces "" - -# Determines the routing tables offset for multi-gateway policy routing tables -# See the policy routing script for an explanation. -# (default is 90) - -# SmartGatewayTablesOffset 90 - -# Determines the policy routing rules offset for multi-gateway policy routing -# rules. See the policy routing script for an explanation. -# (default is 0, which indicates that the rules and tables should be aligned and -# puts this value at SmartGatewayTablesOffset - # egress interfaces - -# # olsr interfaces) - -# SmartGatewayRulesOffset 87 - -# Allows the selection of a smartgateway with NAT (only for IPv4) -# (default is "yes") - -# SmartGatewayAllowNAT yes - -# Determines the period (in milliseconds) on which a new smart gateway -# selection is performed. -# (default is 10000 milliseconds) - -# SmartGatewayPeriod 10000 - -# Determines the number of times the link state database must be stable -# before a new smart gateway is selected. -# (default is 6) - -# SmartGatewayStableCount 6 - -# When another gateway than the current one has a cost of less than the cost -# of the current gateway multiplied by SmartGatewayThreshold then the smart -# gateway is switched to the other gateway. The unit is percentage. -# (defaults to 0) - -# SmartGatewayThreshold 0 - -# The weighing factor for the gateway uplink bandwidth (exit link, uplink). -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 1) - -# SmartGatewayWeightExitLinkUp 1 - -# The weighing factor for the gateway downlink bandwidth (exit link, downlink). -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 1) - -# SmartGatewayWeightExitLinkDown 1 - -# The weighing factor for the ETX costs. -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 1) - -# SmartGatewayWeightEtx 1 - -# The divider for the ETX costs. -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 0) - -# SmartGatewayDividerEtx 0 - -# Defines what kind of Uplink this node will publish as a -# smartgateway. The existence of the uplink is detected by -# a route to 0.0.0.0/0, ::ffff:0:0/96 and/or 2000::/3. -# possible values are "none", "ipv4", "ipv6", "both" -# (default is "both") - -# SmartGatewayUplink "both" - -# Specifies if the local ipv4 uplink use NAT -# (default is "yes") - -# SmartGatewayUplinkNAT yes - -# Specifies the speed of the uplink in kilobit/s. -# First parameter is upstream, second parameter is downstream -# (default is 128/1024) - -# SmartGatewaySpeed 128 1024 - -# Specifies the EXTERNAL ipv6 prefix of the uplink. A prefix -# length of more than 64 is not allowed. -# (default is 0::/0 - -# SmartGatewayPrefix 0::/0 - -############################## -### OLSR protocol settings ### -############################## - -# HNA (Host network association) allows the OLSR to announce -# additional IPs or IP subnets to the net that are reachable -# through this node. -# Syntax for HNA4 is "network-address network-mask" -# Syntax for HNA6 is "network-address prefix-length" -# (default is no HNA) -Hna4 -{ -# Internet gateway -# 0.0.0.0 0.0.0.0 -# specific small networks reachable through this node -# 15.15.0.0 255.255.255.0 -} -Hna6 -{ -# Internet gateway -# 0:: 0 -# specific small networks reachable through this node -# fec0:2200:106:0:0:0:0:0 48 -} - -################################ -### OLSR protocol extensions ### -################################ - -# Link quality algorithm (only for lq level 2) -# (see README-Olsr-Extensions) -# - "etx_float", a floating point ETX with exponential aging -# - "etx_fpm", same as ext_float, but with integer arithmetic -# - "etx_ff" (ETX freifunk), an etx variant which use all OLSR -# traffic (instead of only hellos) for ETX calculation -# - "etx_ffeth", an incompatible variant of etx_ff that allows -# ethernet links with ETX 0.1. -# (defaults to "etx_ff") - -# LinkQualityAlgorithm "etx_ff" - -# Fisheye mechanism for TCs (0 meansoff, 1 means on) -# (default is 1) - -LinkQualityFishEye 0 - -##################################### -### Example plugin configurations ### -##################################### -# Olsrd plugins to load -# This must be the absolute path to the file -# or the loader will use the following scheme: -# - Try the paths in the LD_LIBRARY_PATH -# environment variable. -# - The list of libraries cached in /etc/ld.so.cache -# - /lib, followed by /usr/lib -# -# the examples in this list are for linux, so check if the plugin is -# available if you use windows. -# each plugin should have a README file in it's lib subfolder - -# LoadPlugin "olsrd_txtinfo.dll" -#LoadPlugin "olsrd_txtinfo.so.0.1" -#{ - # the default port is 2006 but you can change it like this: - #PlParam "port" "8080" - - # You can set a "accept" single address to allow to connect to - # txtinfo. If no address is specified, then localhost (127.0.0.1) - # is allowed by default. txtinfo will only use the first "accept" - # parameter specified and will ignore the rest. - - # to allow a specific host: - #PlParam "accept" "172.29.44.23" - # if you set it to 0.0.0.0, it will accept all connections - #PlParam "accept" "0.0.0.0" -#} - -############################################# -### OLSRD default interface configuration ### -############################################# -# the default interface section can have the same values as the following -# interface configuration. It will allow you so set common options for all -# interfaces. - -InterfaceDefaults { - Ip4Broadcast 255.255.255.255 -} - -###################################### -### OLSRd Interfaces configuration ### -###################################### -# multiple interfaces can be specified for a single configuration block -# multiple configuration blocks can be specified - -# WARNING, don't forget to insert your interface names here ! -#Interface "" "" -#{ - # Interface Mode is used to prevent unnecessary - # packet forwarding on switched ethernet interfaces - # valid Modes are "mesh" and "ether" - # (default is "mesh") - - # Mode "mesh" -#} -""" - return cfg - - -class MgenActor(NrlService): - """ - ZpcMgenActor. - """ - - # a unique name is required, without spaces - name: str = "MgenActor" - group: str = "ProtoSvc" - executables: tuple[str, ...] = ("mgen",) - configs: tuple[str, ...] = ("start_mgen_actor.sh",) - startup: tuple[str, ...] = ("bash start_mgen_actor.sh",) - validate: tuple[str, ...] = ("pidof mgen",) - shutdown: tuple[str, ...] = ("killall mgen",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a startup script for MgenActor. Because mgenActor does not - daemonize, it can cause problems in some situations when launched - directly using vcmd. - """ - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by nrl.py:MgenActor.generateconfig()\n" - comments = "" - cmd = f"mgenBasicActor.py -n {node.name} -a 0.0.0.0" - ifaces = node.get_ifaces(control=False) - if len(ifaces) == 0: - return "" - cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" - return cfg diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py deleted file mode 100644 index b96a8eae4..000000000 --- a/daemon/core/services/quagga.py +++ /dev/null @@ -1,584 +0,0 @@ -""" -quagga.py: defines routing services provided by Quagga. -""" -from typing import Optional - -import netaddr - -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode, NodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import PtpNet, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService - -QUAGGA_STATE_DIR: str = "/var/run/quagga" - - -def is_wireless(node: NodeBase) -> bool: - """ - Check if the node is a wireless type node. - - :param node: node to check type for - :return: True if wireless type, False otherwise - """ - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) - - -class Zebra(CoreService): - name: str = "zebra" - group: str = "Quagga" - dirs: tuple[str, ...] = ("/usr/local/etc/quagga", "/var/run/quagga") - configs: tuple[str, ...] = ( - "/usr/local/etc/quagga/Quagga.conf", - "quaggaboot.sh", - "/usr/local/etc/quagga/vtysh.conf", - ) - startup: tuple[str, ...] = ("bash quaggaboot.sh zebra",) - shutdown: tuple[str, ...] = ("killall zebra",) - validate: tuple[str, ...] = ("pidof zebra",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the Quagga.conf or quaggaboot.sh file contents. - """ - if filename == cls.configs[0]: - return cls.generate_quagga_conf(node) - elif filename == cls.configs[1]: - return cls.generate_quagga_boot(node) - elif filename == cls.configs[2]: - return cls.generate_vtysh_conf(node) - else: - raise ValueError( - "file name (%s) is not a known configuration: %s", filename, cls.configs - ) - - @classmethod - def generate_vtysh_conf(cls, node: CoreNode) -> str: - """ - Returns configuration file text. - """ - return "service integrated-vtysh-config\n" - - @classmethod - def generate_quagga_conf(cls, node: CoreNode) -> str: - """ - Returns configuration file text. Other services that depend on zebra - will have hooks that are invoked here. - """ - # we could verify here that filename == Quagga.conf - cfg = "" - for iface in node.get_ifaces(): - cfg += f"interface {iface.name}\n" - # include control interfaces in addressing but not routing daemons - if iface.control: - cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ips())) - cfg += "\n" - continue - cfgv4 = "" - cfgv6 = "" - want_ipv4 = False - want_ipv6 = False - for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, QuaggaService) or issubclass(s, QuaggaService)): - continue - iface_config = s.generate_quagga_iface_config(node, iface) - if s.ipv4_routing: - want_ipv4 = True - if s.ipv6_routing: - want_ipv6 = True - cfgv6 += iface_config - else: - cfgv4 += iface_config - - if want_ipv4: - cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip4s)) - cfg += "\n" - cfg += cfgv4 - if want_ipv6: - cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip6s)) - cfg += "\n" - cfg += cfgv6 - cfg += "!\n" - - for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, QuaggaService) or issubclass(s, QuaggaService)): - continue - cfg += s.generate_quagga_config(node) - return cfg - - @staticmethod - def addrstr(ip: netaddr.IPNetwork) -> str: - """ - helper for mapping IP addresses to zebra config statements - """ - address = str(ip.ip) - if netaddr.valid_ipv4(address): - return f"ip address {ip}" - elif netaddr.valid_ipv6(address): - return f"ipv6 address {ip}" - else: - raise ValueError(f"invalid address: {ip}") - - @classmethod - def generate_quagga_boot(cls, node: CoreNode) -> str: - """ - Generate a shell script used to boot the Quagga daemons. - """ - quagga_bin_search = node.session.options.get( - "quagga_bin_search", '"/usr/local/bin /usr/bin /usr/lib/quagga"' - ) - quagga_sbin_search = node.session.options.get( - "quagga_sbin_search", '"/usr/local/sbin /usr/sbin /usr/lib/quagga"' - ) - return f"""\ -#!/bin/sh -# auto-generated by zebra service (quagga.py) -QUAGGA_CONF={cls.configs[0]} -QUAGGA_SBIN_SEARCH={quagga_sbin_search} -QUAGGA_BIN_SEARCH={quagga_bin_search} -QUAGGA_STATE_DIR={QUAGGA_STATE_DIR} - -searchforprog() -{{ - prog=$1 - searchpath=$@ - ret= - for p in $searchpath; do - if [ -x $p/$prog ]; then - ret=$p - break - fi - done - echo $ret -}} - -confcheck() -{{ - CONF_DIR=`dirname $QUAGGA_CONF` - # if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then - ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf - fi - # if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then - ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf - fi -}} - -bootdaemon() -{{ - QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH) - if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then - echo "ERROR: Quagga's '$1' daemon not found in search path:" - echo " $QUAGGA_SBIN_SEARCH" - return 1 - fi - - flags="" - - if [ "$1" = "xpimd" ] && \\ - grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $QUAGGA_CONF; then - flags="$flags -6" - fi - - $QUAGGA_SBIN_DIR/$1 $flags -d - if [ "$?" != "0" ]; then - echo "ERROR: Quagga's '$1' daemon failed to start!:" - return 1 - fi -}} - -bootquagga() -{{ - QUAGGA_BIN_DIR=$(searchforprog 'vtysh' $QUAGGA_BIN_SEARCH) - if [ "z$QUAGGA_BIN_DIR" = "z" ]; then - echo "ERROR: Quagga's 'vtysh' program not found in search path:" - echo " $QUAGGA_BIN_SEARCH" - return 1 - fi - - # fix /var/run/quagga permissions - id -u quagga 2>/dev/null >/dev/null - if [ "$?" = "0" ]; then - chown quagga $QUAGGA_STATE_DIR - fi - - bootdaemon "zebra" - for r in rip ripng ospf6 ospf bgp babel; do - if grep -q "^router \\<${{r}}\\>" $QUAGGA_CONF; then - bootdaemon "${{r}}d" - fi - done - - if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $QUAGGA_CONF; then - bootdaemon "xpimd" - fi - - $QUAGGA_BIN_DIR/vtysh -b -}} - -if [ "$1" != "zebra" ]; then - echo "WARNING: '$1': all Quagga daemons are launched by the 'zebra' service!" - exit 1 -fi -confcheck -bootquagga -""" - - -class QuaggaService(CoreService): - """ - Parent class for Quagga services. Defines properties and methods - common to Quagga's routing daemons. - """ - - name: Optional[str] = None - group: str = "Quagga" - dependencies: tuple[str, ...] = (Zebra.name,) - meta: str = "The config file for this service can be found in the Zebra service." - ipv4_routing: bool = False - ipv6_routing: bool = False - - @staticmethod - def router_id(node: CoreNode) -> str: - """ - Helper to return the first IPv4 address of a node as its router ID. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return f"0.0.0.{node.id:d}" - - @staticmethod - def rj45check(iface: CoreInterface) -> bool: - """ - Helper to detect whether interface is connected an external RJ45 - link. - """ - if iface.net: - for peer_iface in iface.net.get_ifaces(): - if peer_iface == iface: - continue - if isinstance(peer_iface.node, Rj45Node): - return True - return False - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return "" - - @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return "" - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - return "" - - -class Ospfv2(QuaggaService): - """ - The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified Quagga.conf file. - """ - - name: str = "OSPFv2" - shutdown: tuple[str, ...] = ("killall ospfd",) - validate: tuple[str, ...] = ("pidof ospfd",) - ipv4_routing: bool = True - - @staticmethod - def mtu_check(iface: CoreInterface) -> str: - """ - Helper to detect MTU mismatch and add the appropriate OSPF - mtu-ignore command. This is needed when e.g. a node is linked via a - GreTap device. - """ - if iface.mtu != DEFAULT_MTU: - # a workaround for PhysicalNode GreTap, which has no knowledge of - # the other nodes/nets - return " ip ospf mtu-ignore\n" - if not iface.net: - return "" - for iface in iface.net.get_ifaces(): - if iface.mtu != iface.mtu: - return " ip ospf mtu-ignore\n" - return "" - - @staticmethod - def ptp_check(iface: CoreInterface) -> str: - """ - Helper to detect whether interface is connected to a notional - point-to-point link. - """ - if isinstance(iface.net, PtpNet): - return " ip ospf network point-to-point\n" - return "" - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - cfg = "router ospf\n" - rtrid = cls.router_id(node) - cfg += f" router-id {rtrid}\n" - # network 10.0.0.0/24 area 0 - for iface in node.get_ifaces(control=False): - for ip4 in iface.ip4s: - cfg += f" network {ip4} area 0\n" - cfg += "!\n" - return cfg - - @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = cls.mtu_check(iface) - # external RJ45 connections will use default OSPF timers - if cls.rj45check(iface): - return cfg - cfg += cls.ptp_check(iface) - return ( - cfg - + """\ - ip ospf hello-interval 2 - ip ospf dead-interval 6 - ip ospf retransmit-interval 5 -""" - ) - - -class Ospfv3(QuaggaService): - """ - The OSPFv3 service provides IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified Quagga.conf file. - """ - - name: str = "OSPFv3" - shutdown: tuple[str, ...] = ("killall ospf6d",) - validate: tuple[str, ...] = ("pidof ospf6d",) - ipv4_routing: bool = True - ipv6_routing: bool = True - - @staticmethod - def min_mtu(iface: CoreInterface) -> int: - """ - Helper to discover the minimum MTU of interfaces linked with the - given interface. - """ - mtu = iface.mtu - if not iface.net: - return mtu - for iface in iface.net.get_ifaces(): - if iface.mtu < mtu: - mtu = iface.mtu - return mtu - - @classmethod - def mtu_check(cls, iface: CoreInterface) -> str: - """ - Helper to detect MTU mismatch and add the appropriate OSPFv3 - ifmtu command. This is needed when e.g. a node is linked via a - GreTap device. - """ - minmtu = cls.min_mtu(iface) - if minmtu < iface.mtu: - return f" ipv6 ospf6 ifmtu {minmtu:d}\n" - else: - return "" - - @staticmethod - def ptp_check(iface: CoreInterface) -> str: - """ - Helper to detect whether interface is connected to a notional - point-to-point link. - """ - if isinstance(iface.net, PtpNet): - return " ipv6 ospf6 network point-to-point\n" - return "" - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - cfg = "router ospf6\n" - rtrid = cls.router_id(node) - cfg += " instance-id 65\n" - cfg += f" router-id {rtrid}\n" - for iface in node.get_ifaces(control=False): - cfg += f" interface {iface.name} area 0.0.0.0\n" - cfg += "!\n" - return cfg - - @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return cls.mtu_check(iface) - - -class Ospfv3mdr(Ospfv3): - """ - The OSPFv3 MANET Designated Router (MDR) service provides IPv6 - routing for wireless networks. It does not build its own - configuration file but has hooks for adding to the - unified Quagga.conf file. - """ - - name: str = "OSPFv3MDR" - ipv4_routing: bool = True - - @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = cls.mtu_check(iface) - if is_wireless(iface.net): - return ( - cfg - + """\ - ipv6 ospf6 hello-interval 2 - ipv6 ospf6 dead-interval 6 - ipv6 ospf6 retransmit-interval 5 - ipv6 ospf6 network manet-designated-router - ipv6 ospf6 twohoprefresh 3 - ipv6 ospf6 adjacencyconnectivity uniconnected - ipv6 ospf6 lsafullness mincostlsa -""" - ) - else: - return cfg - - -class Bgp(QuaggaService): - """ - The BGP service provides interdomain routing. - Peers must be manually configured, with a full mesh for those - having the same AS number. - """ - - name: str = "BGP" - shutdown: tuple[str, ...] = ("killall bgpd",) - validate: tuple[str, ...] = ("pidof bgpd",) - custom_needed: bool = True - ipv4_routing: bool = True - ipv6_routing: bool = True - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - cfg = "!\n! BGP configuration\n!\n" - cfg += "! You should configure the AS number below,\n" - cfg += "! along with this router's peers.\n!\n" - cfg += f"router bgp {node.id}\n" - rtrid = cls.router_id(node) - cfg += f" bgp router-id {rtrid}\n" - cfg += " redistribute connected\n" - cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n" - return cfg - - -class Rip(QuaggaService): - """ - The RIP service provides IPv4 routing for wired networks. - """ - - name: str = "RIP" - shutdown: tuple[str, ...] = ("killall ripd",) - validate: tuple[str, ...] = ("pidof ripd",) - ipv4_routing: bool = True - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - cfg = """\ -router rip - redistribute static - redistribute connected - redistribute ospf - network 0.0.0.0/0 -! -""" - return cfg - - -class Ripng(QuaggaService): - """ - The RIP NG service provides IPv6 routing for wired networks. - """ - - name: str = "RIPNG" - shutdown: tuple[str, ...] = ("killall ripngd",) - validate: tuple[str, ...] = ("pidof ripngd",) - ipv6_routing: bool = True - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - cfg = """\ -router ripng - redistribute static - redistribute connected - redistribute ospf6 - network ::/0 -! -""" - return cfg - - -class Babel(QuaggaService): - """ - The Babel service provides a loop-avoiding distance-vector routing - protocol for IPv6 and IPv4 with fast convergence properties. - """ - - name: str = "Babel" - shutdown: tuple[str, ...] = ("killall babeld",) - validate: tuple[str, ...] = ("pidof babeld",) - ipv6_routing: bool = True - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - cfg = "router babel\n" - for iface in node.get_ifaces(control=False): - cfg += f" network {iface.name}\n" - cfg += " redistribute static\n redistribute connected\n" - return cfg - - @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - if is_wireless(iface.net): - return " babel wireless\n no babel split-horizon\n" - else: - return " babel wired\n babel split-horizon\n" - - -class Xpimd(QuaggaService): - """ - PIM multicast routing based on XORP. - """ - - name: str = "Xpimd" - shutdown: tuple[str, ...] = ("killall xpimd",) - validate: tuple[str, ...] = ("pidof xpimd",) - ipv4_routing: bool = True - - @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - ifname = "eth0" - for iface in node.get_ifaces(): - if iface.name != "lo": - ifname = iface.name - break - cfg = "router mfea\n!\n" - cfg += "router igmp\n!\n" - cfg += "router pim\n" - cfg += " !ip pim rp-address 10.0.0.1\n" - cfg += f" ip pim bsr-candidate {ifname}\n" - cfg += f" ip pim rp-candidate {ifname}\n" - cfg += " !ip pim spt-threshold interval 10 bytes 80000\n" - return cfg - - @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return " ip mfea\n ip igmp\n ip pim\n" diff --git a/daemon/core/services/sdn.py b/daemon/core/services/sdn.py deleted file mode 100644 index a31cf87d7..000000000 --- a/daemon/core/services/sdn.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -sdn.py defines services to start Open vSwitch and the Ryu SDN Controller. -""" - -import re - -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService - - -class SdnService(CoreService): - """ - Parent class for SDN services. - """ - - group: str = "SDN" - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return "" - - -class OvsService(SdnService): - name: str = "OvsService" - group: str = "SDN" - executables: tuple[str, ...] = ("ovs-ofctl", "ovs-vsctl") - dirs: tuple[str, ...] = ( - "/etc/openvswitch", - "/var/run/openvswitch", - "/var/log/openvswitch", - ) - configs: tuple[str, ...] = ("OvsService.sh",) - startup: tuple[str, ...] = ("bash OvsService.sh",) - shutdown: tuple[str, ...] = ("killall ovs-vswitchd", "killall ovsdb-server") - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - # Check whether the node is running zebra - has_zebra = 0 - for s in node.services: - if s.name == "zebra": - has_zebra = 1 - - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by OvsService (OvsService.py)\n" - cfg += "## First make sure that the ovs services are up and running\n" - cfg += "/etc/init.d/openvswitch-switch start < /dev/null\n\n" - cfg += "## create the switch itself, set the fail mode to secure, \n" - cfg += "## this stops it from routing traffic without defined flows.\n" - cfg += "## remove the -- and everything after if you want it to act as a regular switch\n" - cfg += "ovs-vsctl add-br ovsbr0 -- set Bridge ovsbr0 fail-mode=secure\n" - cfg += "\n## Now add all our interfaces as ports to the switch\n" - - portnum = 1 - for iface in node.get_ifaces(control=False): - ifnumstr = re.findall(r"\d+", iface.name) - ifnum = ifnumstr[0] - - # create virtual interfaces - cfg += "## Create a veth pair to send the data to\n" - cfg += f"ip link add rtr{ifnum} type veth peer name sw{ifnum}\n" - - # remove ip address of eths because quagga/zebra will assign same IPs to rtr interfaces - # or assign them manually to rtr interfaces if zebra is not running - for ip4 in iface.ip4s: - cfg += f"ip addr del {ip4.ip} dev {iface.name}\n" - if has_zebra == 0: - cfg += f"ip addr add {ip4.ip} dev rtr{ifnum}\n" - for ip6 in iface.ip6s: - cfg += f"ip -6 addr del {ip6.ip} dev {iface.name}\n" - if has_zebra == 0: - cfg += f"ip -6 addr add {ip6.ip} dev rtr{ifnum}\n" - - # add interfaces to bridge - # Make port numbers explicit so they're easier to follow in - # reading the script - cfg += "## Add the CORE interface to the switch\n" - cfg += ( - f"ovs-vsctl add-port ovsbr0 eth{ifnum} -- " - f"set Interface eth{ifnum} ofport_request={portnum:d}\n" - ) - cfg += "## And then add its sibling veth interface\n" - cfg += ( - f"ovs-vsctl add-port ovsbr0 sw{ifnum} -- " - f"set Interface sw{ifnum} ofport_request={portnum + 1:d}\n" - ) - cfg += "## start them up so we can send/receive data\n" - cfg += f"ovs-ofctl mod-port ovsbr0 eth{ifnum} up\n" - cfg += f"ovs-ofctl mod-port ovsbr0 sw{ifnum} up\n" - cfg += "## Bring up the lower part of the veth pair\n" - cfg += f"ip link set dev rtr{ifnum} up\n" - portnum += 2 - - # Add rule for default controller if there is one local - # (even if the controller is not local, it finds it) - cfg += "\n## We assume there will be an SDN controller on the other end of this, \n" - cfg += "## but it will still function if there's not\n" - cfg += "ovs-vsctl set-controller ovsbr0 tcp:127.0.0.1:6633\n" - - cfg += "\n## Now to create some default flows, \n" - cfg += "## if the above controller will be present then you probably want to delete them\n" - # Setup default flows - portnum = 1 - for iface in node.get_ifaces(control=False): - cfg += "## Take the data from the CORE interface and put it on the veth and vice versa\n" - cfg += f"ovs-ofctl add-flow ovsbr0 priority=1000,in_port={portnum:d},action=output:{portnum + 1:d}\n" - cfg += f"ovs-ofctl add-flow ovsbr0 priority=1000,in_port={portnum + 1:d},action=output:{portnum:d}\n" - portnum += 2 - return cfg - - -class RyuService(SdnService): - name: str = "ryuService" - group: str = "SDN" - executables: tuple[str, ...] = ("ryu-manager",) - configs: tuple[str, ...] = ("ryuService.sh",) - startup: tuple[str, ...] = ("bash ryuService.sh",) - shutdown: tuple[str, ...] = ("killall ryu-manager",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return a string that will be written to filename, or sent to the - GUI for user customization. - """ - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by ryuService (ryuService.py)\n" - cfg += ( - "ryu-manager --observe-links ryu.app.ofctl_rest ryu.app.rest_topology &\n" - ) - return cfg diff --git a/daemon/core/services/security.py b/daemon/core/services/security.py deleted file mode 100644 index afd71a140..000000000 --- a/daemon/core/services/security.py +++ /dev/null @@ -1,164 +0,0 @@ -""" -security.py: defines security services (vpnclient, vpnserver, ipsec and -firewall) -""" - -import logging - -from core import constants -from core.nodes.base import CoreNode -from core.nodes.interface import CoreInterface -from core.services.coreservices import CoreService - -logger = logging.getLogger(__name__) - - -class VPNClient(CoreService): - name: str = "VPNClient" - group: str = "Security" - configs: tuple[str, ...] = ("vpnclient.sh",) - startup: tuple[str, ...] = ("bash vpnclient.sh",) - shutdown: tuple[str, ...] = ("killall openvpn",) - validate: tuple[str, ...] = ("pidof openvpn",) - custom_needed: bool = True - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the client.conf and vpnclient.sh file contents to - """ - cfg = "#!/bin/sh\n" - cfg += "# custom VPN Client configuration for service (security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleVPNClient" - try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception( - "error opening VPN client configuration template (%s)", fname - ) - return cfg - - -class VPNServer(CoreService): - name: str = "VPNServer" - group: str = "Security" - configs: tuple[str, ...] = ("vpnserver.sh",) - startup: tuple[str, ...] = ("bash vpnserver.sh",) - shutdown: tuple[str, ...] = ("killall openvpn",) - validate: tuple[str, ...] = ("pidof openvpn",) - custom_needed: bool = True - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the sample server.conf and vpnserver.sh file contents to - GUI for user customization. - """ - cfg = "#!/bin/sh\n" - cfg += "# custom VPN Server Configuration for service (security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleVPNServer" - try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception( - "Error opening VPN server configuration template (%s)", fname - ) - return cfg - - -class IPsec(CoreService): - name: str = "IPsec" - group: str = "Security" - configs: tuple[str, ...] = ("ipsec.sh",) - startup: tuple[str, ...] = ("bash ipsec.sh",) - shutdown: tuple[str, ...] = ("killall racoon",) - custom_needed: bool = True - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the ipsec.conf and racoon.conf file contents to - GUI for user customization. - """ - cfg = "#!/bin/sh\n" - cfg += "# set up static tunnel mode security assocation for service " - cfg += "(security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleIPsec" - try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception("Error opening IPsec configuration template (%s)", fname) - return cfg - - -class Firewall(CoreService): - name: str = "Firewall" - group: str = "Security" - configs: tuple[str, ...] = ("firewall.sh",) - startup: tuple[str, ...] = ("bash firewall.sh",) - custom_needed: bool = True - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the firewall rule examples to GUI for user customization. - """ - cfg = "#!/bin/sh\n" - cfg += "# custom node firewall rules for service (security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleFirewall" - try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception( - "Error opening Firewall configuration template (%s)", fname - ) - return cfg - - -class Nat(CoreService): - """ - IPv4 source NAT service. - """ - - name: str = "NAT" - group: str = "Security" - executables: tuple[str, ...] = ("iptables",) - configs: tuple[str, ...] = ("nat.sh",) - startup: tuple[str, ...] = ("bash nat.sh",) - custom_needed: bool = False - - @classmethod - def generate_iface_nat_rule(cls, iface: CoreInterface, prefix: str = "") -> str: - """ - Generate a NAT line for one interface. - """ - cfg = prefix + "iptables -t nat -A POSTROUTING -o " - cfg += iface.name + " -j MASQUERADE\n" - cfg += prefix + "iptables -A FORWARD -i " + iface.name - cfg += " -m state --state RELATED,ESTABLISHED -j ACCEPT\n" - cfg += prefix + "iptables -A FORWARD -i " - cfg += iface.name + " -j DROP\n" - return cfg - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - NAT out the first interface - """ - cfg = "#!/bin/sh\n" - cfg += "# generated by security.py\n" - cfg += "# NAT out the first interface by default\n" - have_nat = False - for iface in node.get_ifaces(control=False): - if have_nat: - cfg += cls.generate_iface_nat_rule(iface, prefix="#") - else: - have_nat = True - cfg += "# NAT out the " + iface.name + " interface\n" - cfg += cls.generate_iface_nat_rule(iface) - cfg += "\n" - return cfg diff --git a/daemon/core/services/ucarp.py b/daemon/core/services/ucarp.py deleted file mode 100644 index c6f2256ec..000000000 --- a/daemon/core/services/ucarp.py +++ /dev/null @@ -1,165 +0,0 @@ -""" -ucarp.py: defines high-availability IP address controlled by ucarp -""" - -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService - -UCARP_ETC = "/usr/local/etc/ucarp" - - -class Ucarp(CoreService): - name: str = "ucarp" - group: str = "Utility" - dirs: tuple[str, ...] = (UCARP_ETC,) - configs: tuple[str, ...] = ( - UCARP_ETC + "/default.sh", - UCARP_ETC + "/default-up.sh", - UCARP_ETC + "/default-down.sh", - "ucarpboot.sh", - ) - startup: tuple[str, ...] = ("bash ucarpboot.sh",) - shutdown: tuple[str, ...] = ("killall ucarp",) - validate: tuple[str, ...] = ("pidof ucarp",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the default file contents - """ - if filename == cls.configs[0]: - return cls.generate_ucarp_conf(node) - elif filename == cls.configs[1]: - return cls.generate_vip_up(node) - elif filename == cls.configs[2]: - return cls.generate_vip_down(node) - elif filename == cls.configs[3]: - return cls.generate_ucarp_boot(node) - else: - raise ValueError - - @classmethod - def generate_ucarp_conf(cls, node: CoreNode) -> str: - """ - Returns configuration file text. - """ - ucarp_bin = node.session.options.get("ucarp_bin", "/usr/sbin/ucarp") - return f"""\ -#!/bin/sh -# Location of UCARP executable -UCARP_EXEC={ucarp_bin} - -# Location of the UCARP config directory -UCARP_CFGDIR={UCARP_ETC} - -# Logging Facility -FACILITY=daemon - -# Instance ID -# Any number from 1 to 255 -INSTANCE_ID=1 - -# Password -# Master and Backup(s) need to be the same -PASSWORD="changeme" - -# The failover application address -VIRTUAL_ADDRESS=127.0.0.254 -VIRTUAL_NET=8 - -# Interface for IP Address -INTERFACE=lo - -# Maintanence address of the local machine -SOURCE_ADDRESS=127.0.0.1 - -# The ratio number to be considered before marking the node as dead -DEAD_RATIO=3 - -# UCARP base, lower number will be preferred master -# set to same to have master stay as long as possible -UCARP_BASE=1 -SKEW=0 - -# UCARP options -# -z run shutdown script on exit -# -P force preferred master -# -n don't run down script at start up when we are backup -# -M use broadcast instead of multicast -# -S ignore interface state -OPTIONS="-z -n -M" - -# Send extra parameter to down and up scripts -#XPARAM="-x " -XPARAM="-x ${{VIRTUAL_NET}}" - -# The start and stop scripts -START_SCRIPT=${{UCARP_CFGDIR}}/default-up.sh -STOP_SCRIPT=${{UCARP_CFGDIR}}/default-down.sh - -# These line should not need to be touched -UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM" - -${{UCARP_EXEC}} -B ${{UCARP_OPTS}} -""" - - @classmethod - def generate_ucarp_boot(cls, node: CoreNode) -> str: - """ - Generate a shell script used to boot the Ucarp daemons. - """ - return f"""\ -#!/bin/sh -# Location of the UCARP config directory -UCARP_CFGDIR={UCARP_ETC} - -chmod a+x ${{UCARP_CFGDIR}}/*.sh - -# Start the default ucarp daemon configuration -${{UCARP_CFGDIR}}/default.sh - -""" - - @classmethod - def generate_vip_up(cls, node: CoreNode) -> str: - """ - Generate a shell script used to start the virtual ip - """ - return """\ -#!/bin/bash - -# Should be invoked as "default-up.sh " -exec 2> /dev/null - -IP="${2}" -NET="${3}" -if [ -z "$NET" ]; then - NET="24" -fi - -/sbin/ip addr add ${IP}/${NET} dev "$1" - - -""" - - @classmethod - def generate_vip_down(cls, node: CoreNode) -> str: - """ - Generate a shell script used to stop the virtual ip - """ - return """\ -#!/bin/bash - -# Should be invoked as "default-down.sh " -exec 2> /dev/null - -IP="${2}" -NET="${3}" -if [ -z "$NET" ]; then - NET="24" -fi - -/sbin/ip addr del ${IP}/${NET} dev "$1" - - -""" diff --git a/daemon/core/services/utility.py b/daemon/core/services/utility.py deleted file mode 100644 index e83cb9d56..000000000 --- a/daemon/core/services/utility.py +++ /dev/null @@ -1,665 +0,0 @@ -""" -utility.py: defines miscellaneous utility services. -""" -from typing import Optional - -import netaddr - -from core import utils -from core.errors import CoreCommandError -from core.executables import SYSCTL -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceMode - - -class UtilService(CoreService): - """ - Parent class for utility services. - """ - - name: Optional[str] = None - group: str = "Utility" - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return "" - - -class IPForwardService(UtilService): - name: str = "IPForward" - configs: tuple[str, ...] = ("ipforward.sh",) - startup: tuple[str, ...] = ("bash ipforward.sh",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return cls.generateconfiglinux(node, filename) - - @classmethod - def generateconfiglinux(cls, node: CoreNode, filename: str) -> str: - cfg = f"""\ -#!/bin/sh -# auto-generated by IPForward service (utility.py) -{SYSCTL} -w net.ipv4.conf.all.forwarding=1 -{SYSCTL} -w net.ipv4.conf.default.forwarding=1 -{SYSCTL} -w net.ipv6.conf.all.forwarding=1 -{SYSCTL} -w net.ipv6.conf.default.forwarding=1 -{SYSCTL} -w net.ipv4.conf.all.send_redirects=0 -{SYSCTL} -w net.ipv4.conf.default.send_redirects=0 -{SYSCTL} -w net.ipv4.conf.all.rp_filter=0 -{SYSCTL} -w net.ipv4.conf.default.rp_filter=0 -""" - for iface in node.get_ifaces(): - name = utils.sysctl_devname(iface.name) - cfg += f"{SYSCTL} -w net.ipv4.conf.{name}.forwarding=1\n" - cfg += f"{SYSCTL} -w net.ipv4.conf.{name}.send_redirects=0\n" - cfg += f"{SYSCTL} -w net.ipv4.conf.{name}.rp_filter=0\n" - return cfg - - -class DefaultRouteService(UtilService): - name: str = "DefaultRoute" - configs: tuple[str, ...] = ("defaultroute.sh",) - startup: tuple[str, ...] = ("bash defaultroute.sh",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - routes = [] - ifaces = node.get_ifaces() - if ifaces: - iface = ifaces[0] - for ip in iface.ips(): - net = ip.cidr - if net.size > 1: - router = net[1] - routes.append(str(router)) - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by DefaultRoute service (utility.py)\n" - for route in routes: - cfg += f"ip route add default via {route}\n" - return cfg - - -class DefaultMulticastRouteService(UtilService): - name: str = "DefaultMulticastRoute" - configs: tuple[str, ...] = ("defaultmroute.sh",) - startup: tuple[str, ...] = ("bash defaultmroute.sh",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n" - cfg += "# the first interface is chosen below; please change it " - cfg += "as needed\n" - for iface in node.get_ifaces(control=False): - rtcmd = "ip route add 224.0.0.0/4 dev" - cfg += f"{rtcmd} {iface.name}\n" - cfg += "\n" - break - return cfg - - -class StaticRouteService(UtilService): - name: str = "StaticRoute" - configs: tuple[str, ...] = ("staticroute.sh",) - startup: tuple[str, ...] = ("bash staticroute.sh",) - custom_needed: bool = True - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n" - cfg += "# NOTE: this service must be customized to be of any use\n" - cfg += "# Below are samples that you can uncomment and edit.\n#\n" - for iface in node.get_ifaces(control=False): - cfg += "\n".join(map(cls.routestr, iface.ips())) - cfg += "\n" - return cfg - - @staticmethod - def routestr(ip: netaddr.IPNetwork) -> str: - address = str(ip.ip) - if netaddr.valid_ipv6(address): - dst = "3ffe:4::/64" - else: - dst = "10.9.8.0/24" - if ip[-2] == ip[1]: - return "" - else: - rtcmd = f"#/sbin/ip route add {dst} via" - return f"{rtcmd} {ip[1]}" - - -class SshService(UtilService): - name: str = "SSH" - configs: tuple[str, ...] = ("startsshd.sh", "/etc/ssh/sshd_config") - dirs: tuple[str, ...] = ("/etc/ssh", "/var/run/sshd") - startup: tuple[str, ...] = ("bash startsshd.sh",) - shutdown: tuple[str, ...] = ("killall sshd",) - validation_mode: ServiceMode = ServiceMode.BLOCKING - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Use a startup script for launching sshd in order to wait for host - key generation. - """ - sshcfgdir = cls.dirs[0] - sshstatedir = cls.dirs[1] - sshlibdir = "/usr/lib/openssh" - if filename == "startsshd.sh": - return f"""\ -#!/bin/sh -# auto-generated by SSH service (utility.py) -ssh-keygen -q -t rsa -N "" -f {sshcfgdir}/ssh_host_rsa_key -chmod 655 {sshstatedir} -# wait until RSA host key has been generated to launch sshd -/usr/sbin/sshd -f {sshcfgdir}/sshd_config -""" - else: - return f"""\ -# auto-generated by SSH service (utility.py) -Port 22 -Protocol 2 -HostKey {sshcfgdir}/ssh_host_rsa_key -UsePrivilegeSeparation yes -PidFile {sshstatedir}/sshd.pid - -KeyRegenerationInterval 3600 -ServerKeyBits 768 - -SyslogFacility AUTH -LogLevel INFO - -LoginGraceTime 120 -PermitRootLogin yes -StrictModes yes - -RSAAuthentication yes -PubkeyAuthentication yes - -IgnoreRhosts yes -RhostsRSAAuthentication no -HostbasedAuthentication no - -PermitEmptyPasswords no -ChallengeResponseAuthentication no - -X11Forwarding yes -X11DisplayOffset 10 -PrintMotd no -PrintLastLog yes -TCPKeepAlive yes - -AcceptEnv LANG LC_* -Subsystem sftp {sshlibdir}/sftp-server -UsePAM yes -UseDNS no -""" - - -class DhcpService(UtilService): - name: str = "DHCP" - configs: tuple[str, ...] = ("/etc/dhcp/dhcpd.conf",) - dirs: tuple[str, ...] = ("/etc/dhcp", "/var/lib/dhcp") - startup: tuple[str, ...] = ("touch /var/lib/dhcp/dhcpd.leases", "dhcpd") - shutdown: tuple[str, ...] = ("killall dhcpd",) - validate: tuple[str, ...] = ("pidof dhcpd",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a dhcpd config file using the network address of - each interface. - """ - cfg = """\ -# auto-generated by DHCP service (utility.py) -# NOTE: move these option lines into the desired pool { } block(s) below -#option domain-name "test.com"; -#option domain-name-servers 10.0.0.1; -#option routers 10.0.0.1; - -log-facility local6; - -default-lease-time 600; -max-lease-time 7200; - -ddns-update-style none; -""" - for iface in node.get_ifaces(control=False): - cfg += "\n".join(map(cls.subnetentry, iface.ip4s)) - cfg += "\n" - return cfg - - @staticmethod - def subnetentry(ip: netaddr.IPNetwork) -> str: - """ - Generate a subnet declaration block given an IPv4 prefix string - for inclusion in the dhcpd3 config file. - """ - if ip.size == 1: - return "" - # divide the address space in half - index = (ip.size - 2) / 2 - rangelow = ip[index] - rangehigh = ip[-2] - return f""" -subnet {ip.cidr.ip} netmask {ip.netmask} {{ - pool {{ - range {rangelow} {rangehigh}; - default-lease-time 600; - option routers {ip.ip}; - }} -}} -""" - - -class DhcpClientService(UtilService): - """ - Use a DHCP client for all interfaces for addressing. - """ - - name: str = "DHCPClient" - configs: tuple[str, ...] = ("startdhcpclient.sh",) - startup: tuple[str, ...] = ("bash startdhcpclient.sh",) - shutdown: tuple[str, ...] = ("killall dhclient",) - validate: tuple[str, ...] = ("pidof dhclient",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a script to invoke dhclient on all interfaces. - """ - cfg = "#!/bin/sh\n" - cfg += "# auto-generated by DHCPClient service (utility.py)\n" - cfg += "# uncomment this mkdir line and symlink line to enable client-" - cfg += "side DNS\n# resolution based on the DHCP server response.\n" - cfg += "#mkdir -p /var/run/resolvconf/interface\n" - for iface in node.get_ifaces(control=False): - cfg += f"#ln -s /var/run/resolvconf/interface/{iface.name}.dhclient" - cfg += " /var/run/resolvconf/resolv.conf\n" - cfg += f"/sbin/dhclient -nw -pf /var/run/dhclient-{iface.name}.pid" - cfg += f" -lf /var/run/dhclient-{iface.name}.lease {iface.name}\n" - return cfg - - -class FtpService(UtilService): - """ - Start a vsftpd server. - """ - - name: str = "FTP" - configs: tuple[str, ...] = ("vsftpd.conf",) - dirs: tuple[str, ...] = ("/var/run/vsftpd/empty", "/var/ftp") - startup: tuple[str, ...] = ("vsftpd ./vsftpd.conf",) - shutdown: tuple[str, ...] = ("killall vsftpd",) - validate: tuple[str, ...] = ("pidof vsftpd",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a vsftpd.conf configuration file. - """ - return """\ -# vsftpd.conf auto-generated by FTP service (utility.py) -listen=YES -anonymous_enable=YES -local_enable=YES -dirmessage_enable=YES -use_localtime=YES -xferlog_enable=YES -connect_from_port_20=YES -xferlog_file=/var/log/vsftpd.log -ftpd_banner=Welcome to the CORE FTP service -secure_chroot_dir=/var/run/vsftpd/empty -anon_root=/var/ftp -""" - - -class HttpService(UtilService): - """ - Start an apache server. - """ - - name: str = "HTTP" - configs: tuple[str, ...] = ( - "/etc/apache2/apache2.conf", - "/etc/apache2/envvars", - "/var/www/index.html", - ) - dirs: tuple[str, ...] = ( - "/etc/apache2", - "/var/run/apache2", - "/var/log/apache2", - "/run/lock", - "/var/lock/apache2", - "/var/www", - ) - startup: tuple[str, ...] = ("chown www-data /var/lock/apache2", "apache2ctl start") - shutdown: tuple[str, ...] = ("apache2ctl stop",) - validate: tuple[str, ...] = ("pidof apache2",) - APACHEVER22: int = 22 - APACHEVER24: int = 24 - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate an apache2.conf configuration file. - """ - if filename == cls.configs[0]: - return cls.generateapache2conf(node, filename) - elif filename == cls.configs[1]: - return cls.generateenvvars(node, filename) - elif filename == cls.configs[2]: - return cls.generatehtml(node, filename) - else: - return "" - - @classmethod - def detectversionfromcmd(cls) -> int: - """ - Detect the apache2 version using the 'a2query' command. - """ - try: - result = utils.cmd("a2query -v") - status = 0 - except CoreCommandError as e: - status = e.returncode - result = e.stderr - if status == 0 and result[:3] == "2.4": - return cls.APACHEVER24 - return cls.APACHEVER22 - - @classmethod - def generateapache2conf(cls, node: CoreNode, filename: str) -> str: - lockstr = { - cls.APACHEVER22: "LockFile ${APACHE_LOCK_DIR}/accept.lock\n", - cls.APACHEVER24: "Mutex file:${APACHE_LOCK_DIR} default\n", - } - mpmstr = { - cls.APACHEVER22: "", - cls.APACHEVER24: "LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n", - } - permstr = { - cls.APACHEVER22: " Order allow,deny\n Deny from all\n Satisfy all\n", - cls.APACHEVER24: " Require all denied\n", - } - authstr = { - cls.APACHEVER22: "LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n", - cls.APACHEVER24: "LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n", - } - permstr2 = { - cls.APACHEVER22: "\t\tOrder allow,deny\n\t\tallow from all\n", - cls.APACHEVER24: "\t\tRequire all granted\n", - } - version = cls.detectversionfromcmd() - cfg = "# apache2.conf generated by utility.py:HttpService\n" - cfg += lockstr[version] - cfg += """\ -PidFile ${APACHE_PID_FILE} -Timeout 300 -KeepAlive On -MaxKeepAliveRequests 100 -KeepAliveTimeout 5 -""" - cfg += mpmstr[version] - cfg += """\ - - - StartServers 5 - MinSpareServers 5 - MaxSpareServers 10 - MaxClients 150 - MaxRequestsPerChild 0 - - - - StartServers 2 - MinSpareThreads 25 - MaxSpareThreads 75 - ThreadLimit 64 - ThreadsPerChild 25 - MaxClients 150 - MaxRequestsPerChild 0 - - - - StartServers 2 - MinSpareThreads 25 - MaxSpareThreads 75 - ThreadLimit 64 - ThreadsPerChild 25 - MaxClients 150 - MaxRequestsPerChild 0 - - -User ${APACHE_RUN_USER} -Group ${APACHE_RUN_GROUP} - -AccessFileName .htaccess - - -""" - cfg += permstr[version] - cfg += """\ - - -DefaultType None - -HostnameLookups Off - -ErrorLog ${APACHE_LOG_DIR}/error.log -LogLevel warn - -#Include mods-enabled/*.load -#Include mods-enabled/*.conf -LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so -LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so -""" - cfg += authstr[version] - cfg += """\ -LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so -LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so -LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so -LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so -LoadModule env_module /usr/lib/apache2/modules/mod_env.so - -NameVirtualHost *:80 -Listen 80 - - - Listen 443 - - - Listen 443 - - -LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined -LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined -LogFormat "%h %l %u %t \\"%r\\" %>s %O" common -LogFormat "%{Referer}i -> %U" referer -LogFormat "%{User-agent}i" agent - -ServerTokens OS -ServerSignature On -TraceEnable Off - - - ServerAdmin webmaster@localhost - DocumentRoot /var/www - - Options FollowSymLinks - AllowOverride None - - - Options Indexes FollowSymLinks MultiViews - AllowOverride None -""" - cfg += permstr2[version] - cfg += """\ - - ErrorLog ${APACHE_LOG_DIR}/error.log - LogLevel warn - CustomLog ${APACHE_LOG_DIR}/access.log combined - - -""" - return cfg - - @classmethod - def generateenvvars(cls, node: CoreNode, filename: str) -> str: - return """\ -# this file is used by apache2ctl - generated by utility.py:HttpService -# these settings come from a default Ubuntu apache2 installation -export APACHE_RUN_USER=www-data -export APACHE_RUN_GROUP=www-data -export APACHE_PID_FILE=/var/run/apache2.pid -export APACHE_RUN_DIR=/var/run/apache2 -export APACHE_LOCK_DIR=/var/lock/apache2 -export APACHE_LOG_DIR=/var/log/apache2 -export LANG=C -export LANG -""" - - @classmethod - def generatehtml(cls, node: CoreNode, filename: str) -> str: - body = f"""\ - -

{node.name} web server

-

This is the default web page for this server.

-

The web server software is running but no content has been added, yet.

-""" - for iface in node.get_ifaces(control=False): - body += f"
  • {iface.name} - {[str(x) for x in iface.ips()]}
  • \n" - return f"{body}" - - -class PcapService(UtilService): - """ - Pcap service for logging packets. - """ - - name: str = "pcap" - configs: tuple[str, ...] = ("pcap.sh",) - startup: tuple[str, ...] = ("bash pcap.sh start",) - shutdown: tuple[str, ...] = ("bash pcap.sh stop",) - validate: tuple[str, ...] = ("pidof tcpdump",) - meta: str = "logs network traffic to pcap packet capture files" - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a startpcap.sh traffic logging script. - """ - cfg = """ -#!/bin/sh -# set tcpdump options here (see 'man tcpdump' for help) -# (-s snap length, -C limit pcap file length, -n disable name resolution) -DUMPOPTS="-s 12288 -C 10 -n" - -if [ "x$1" = "xstart" ]; then - -""" - for iface in node.get_ifaces(): - if iface.control: - cfg += "# " - redir = "< /dev/null" - cfg += ( - f"tcpdump ${{DUMPOPTS}} -w {node.name}.{iface.name}.pcap " - f"-i {iface.name} {redir} &\n" - ) - cfg += """ - -elif [ "x$1" = "xstop" ]; then - mkdir -p ${SESSION_DIR}/pcap - mv *.pcap ${SESSION_DIR}/pcap -fi; -""" - return cfg - - -class RadvdService(UtilService): - name: str = "radvd" - configs: tuple[str, ...] = ("/etc/radvd/radvd.conf",) - dirs: tuple[str, ...] = ("/etc/radvd", "/var/run/radvd") - startup: tuple[str, ...] = ( - "radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log", - ) - shutdown: tuple[str, ...] = ("pkill radvd",) - validate: tuple[str, ...] = ("pidof radvd",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate a RADVD router advertisement daemon config file - using the network address of each interface. - """ - cfg = "# auto-generated by RADVD service (utility.py)\n" - for iface in node.get_ifaces(control=False): - prefixes = list(map(cls.subnetentry, iface.ips())) - if len(prefixes) < 1: - continue - cfg += f"""\ -interface {iface.name} -{{ - AdvSendAdvert on; - MinRtrAdvInterval 3; - MaxRtrAdvInterval 10; - AdvDefaultPreference low; - AdvHomeAgentFlag off; -""" - for prefix in prefixes: - if prefix == "": - continue - cfg += f"""\ - prefix {prefix} - {{ - AdvOnLink on; - AdvAutonomous on; - AdvRouterAddr on; - }}; -""" - cfg += "};\n" - return cfg - - @staticmethod - def subnetentry(ip: netaddr.IPNetwork) -> str: - """ - Generate a subnet declaration block given an IPv6 prefix string - for inclusion in the RADVD config file. - """ - address = str(ip.ip) - if netaddr.valid_ipv6(address): - return str(ip) - else: - return "" - - -class AtdService(UtilService): - """ - Atd service for scheduling at jobs - """ - - name: str = "atd" - configs: tuple[str, ...] = ("startatd.sh",) - dirs: tuple[str, ...] = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool") - startup: tuple[str, ...] = ("bash startatd.sh",) - shutdown: tuple[str, ...] = ("pkill atd",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return """ -#!/bin/sh -echo 00001 > /var/spool/cron/atjobs/.SEQ -chown -R daemon /var/spool/cron/* -chmod -R 700 /var/spool/cron/* -atd -""" - - -class UserDefinedService(UtilService): - """ - Dummy service allowing customization of anything. - """ - - name: str = "UserDefined" - meta: str = "Customize this service to do anything upon startup." diff --git a/daemon/core/services/xorp.py b/daemon/core/services/xorp.py deleted file mode 100644 index ac29b2999..000000000 --- a/daemon/core/services/xorp.py +++ /dev/null @@ -1,436 +0,0 @@ -""" -xorp.py: defines routing services provided by the XORP routing suite. -""" - -from typing import Optional - -import netaddr - -from core.nodes.base import CoreNode -from core.nodes.interface import CoreInterface -from core.services.coreservices import CoreService - - -class XorpRtrmgr(CoreService): - """ - XORP router manager service builds a config.boot file based on other - enabled XORP services, and launches necessary daemons upon startup. - """ - - name: str = "xorp_rtrmgr" - group: str = "XORP" - executables: tuple[str, ...] = ("xorp_rtrmgr",) - dirs: tuple[str, ...] = ("/etc/xorp",) - configs: tuple[str, ...] = ("/etc/xorp/config.boot",) - startup: tuple[ - str, ... - ] = f"xorp_rtrmgr -d -b {configs[0]} -l /var/log/{name}.log -P /var/run/{name}.pid" - shutdown: tuple[str, ...] = ("killall xorp_rtrmgr",) - validate: tuple[str, ...] = ("pidof xorp_rtrmgr",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Returns config.boot configuration file text. Other services that - depend on this will have generatexorpconfig() hooks that are - invoked here. Filename currently ignored. - """ - cfg = "interfaces {\n" - for iface in node.get_ifaces(): - cfg += f" interface {iface.name} {{\n" - cfg += f"\tvif {iface.name} {{\n" - cfg += "".join(map(cls.addrstr, iface.ips())) - cfg += cls.lladdrstr(iface) - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n\n" - - for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, XorpService) or issubclass(s, XorpService)): - continue - cfg += s.generate_xorp_config(node) - return cfg - - @staticmethod - def addrstr(ip: netaddr.IPNetwork) -> str: - """ - helper for mapping IP addresses to XORP config statements - """ - cfg = f"\t address {ip.ip} {{\n" - cfg += f"\t\tprefix-length: {ip.prefixlen}\n" - cfg += "\t }\n" - return cfg - - @staticmethod - def lladdrstr(iface: CoreInterface) -> str: - """ - helper for adding link-local address entries (required by OSPFv3) - """ - cfg = f"\t address {iface.mac.eui64()} {{\n" - cfg += "\t\tprefix-length: 64\n" - cfg += "\t }\n" - return cfg - - -class XorpService(CoreService): - """ - Parent class for XORP services. Defines properties and methods - common to XORP's routing daemons. - """ - - name: Optional[str] = None - group: str = "XORP" - executables: tuple[str, ...] = ("xorp_rtrmgr",) - dependencies: tuple[str, ...] = ("xorp_rtrmgr",) - meta: str = ( - "The config file for this service can be found in the xorp_rtrmgr service." - ) - - @staticmethod - def fea(forwarding: str) -> str: - """ - Helper to add a forwarding engine entry to the config file. - """ - cfg = "fea {\n" - cfg += f" {forwarding} {{\n" - cfg += "\tdisable:false\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - @staticmethod - def mfea(forwarding, node: CoreNode) -> str: - """ - Helper to add a multicast forwarding engine entry to the config file. - """ - names = [] - for iface in node.get_ifaces(control=False): - names.append(iface.name) - names.append("register_vif") - cfg = "plumbing {\n" - cfg += f" {forwarding} {{\n" - for name in names: - cfg += f"\tinterface {name} {{\n" - cfg += f"\t vif {name} {{\n" - cfg += "\t\tdisable: false\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - @staticmethod - def policyexportconnected() -> str: - """ - Helper to add a policy statement for exporting connected routes. - """ - cfg = "policy {\n" - cfg += " policy-statement export-connected {\n" - cfg += "\tterm 100 {\n" - cfg += "\t from {\n" - cfg += '\t\tprotocol: "connected"\n' - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - @staticmethod - def router_id(node: CoreNode) -> str: - """ - Helper to return the first IPv4 address of a node as its router ID. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return "0.0.0.0" - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return "" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - return "" - - -class XorpOspfv2(XorpService): - """ - The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified XORP configuration file. - """ - - name: str = "XORP_OSPFv2" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.fea("unicast-forwarding4") - rtrid = cls.router_id(node) - cfg += "\nprotocols {\n" - cfg += " ospf4 {\n" - cfg += f"\trouter-id: {rtrid}\n" - cfg += "\tarea 0.0.0.0 {\n" - for iface in node.get_ifaces(control=False): - cfg += f"\t interface {iface.name} {{\n" - cfg += f"\t\tvif {iface.name} {{\n" - for ip4 in iface.ip4s: - cfg += f"\t\t address {ip4.ip} {{\n" - cfg += "\t\t }\n" - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpOspfv3(XorpService): - """ - The OSPFv3 service provides IPv6 routing. It does - not build its own configuration file but has hooks for adding to the - unified XORP configuration file. - """ - - name: str = "XORP_OSPFv3" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.fea("unicast-forwarding6") - rtrid = cls.router_id(node) - cfg += "\nprotocols {\n" - cfg += " ospf6 0 { /* Instance ID 0 */\n" - cfg += f"\trouter-id: {rtrid}\n" - cfg += "\tarea 0.0.0.0 {\n" - for iface in node.get_ifaces(control=False): - cfg += f"\t interface {iface.name} {{\n" - cfg += f"\t\tvif {iface.name} {{\n" - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpBgp(XorpService): - """ - IPv4 inter-domain routing. AS numbers and peers must be customized. - """ - - name: str = "XORP_BGP" - custom_needed: bool = True - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = "/* This is a sample config that should be customized with\n" - cfg += " appropriate AS numbers and peers */\n" - cfg += cls.fea("unicast-forwarding4") - cfg += cls.policyexportconnected() - rtrid = cls.router_id(node) - cfg += "\nprotocols {\n" - cfg += " bgp {\n" - cfg += f"\tbgp-id: {rtrid}\n" - cfg += "\tlocal-as: 65001 /* change this */\n" - cfg += '\texport: "export-connected"\n' - cfg += "\tpeer 10.0.1.1 { /* change this */\n" - cfg += "\t local-ip: 10.0.1.1\n" - cfg += "\t as: 65002\n" - cfg += "\t next-hop: 10.0.0.2\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpRip(XorpService): - """ - RIP IPv4 unicast routing. - """ - - name: str = "XORP_RIP" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.fea("unicast-forwarding4") - cfg += cls.policyexportconnected() - cfg += "\nprotocols {\n" - cfg += " rip {\n" - cfg += '\texport: "export-connected"\n' - for iface in node.get_ifaces(control=False): - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - for ip4 in iface.ip4s: - cfg += f"\t\taddress {ip4.ip} {{\n" - cfg += "\t\t disable: false\n" - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpRipng(XorpService): - """ - RIP NG IPv6 unicast routing. - """ - - name: str = "XORP_RIPNG" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.fea("unicast-forwarding6") - cfg += cls.policyexportconnected() - cfg += "\nprotocols {\n" - cfg += " ripng {\n" - cfg += '\texport: "export-connected"\n' - for iface in node.get_ifaces(control=False): - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - cfg += f"\t\taddress {iface.mac.eui64()} {{\n" - cfg += "\t\t disable: false\n" - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpPimSm4(XorpService): - """ - PIM Sparse Mode IPv4 multicast routing. - """ - - name: str = "XORP_PIMSM4" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.mfea("mfea4", node) - cfg += "\nprotocols {\n" - cfg += " igmp {\n" - names = [] - for iface in node.get_ifaces(control=False): - names.append(iface.name) - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - cfg += "\t\tdisable: false\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - cfg += "\nprotocols {\n" - cfg += " pimsm4 {\n" - - names.append("register_vif") - for name in names: - cfg += f"\tinterface {name} {{\n" - cfg += f"\t vif {name} {{\n" - cfg += "\t\tdr-priority: 1\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += "\tbootstrap {\n" - cfg += "\t cand-bsr {\n" - cfg += "\t\tscope-zone 224.0.0.0/4 {\n" - cfg += f'\t\t cand-bsr-by-vif-name: "{names[0]}"\n' - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t cand-rp {\n" - cfg += "\t\tgroup-prefix 224.0.0.0/4 {\n" - cfg += f'\t\t cand-rp-by-vif-name: "{names[0]}"\n' - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - cfg += "\nprotocols {\n" - cfg += " fib2mrib {\n" - cfg += "\tdisable: false\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpPimSm6(XorpService): - """ - PIM Sparse Mode IPv6 multicast routing. - """ - - name: str = "XORP_PIMSM6" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.mfea("mfea6", node) - cfg += "\nprotocols {\n" - cfg += " mld {\n" - names = [] - for iface in node.get_ifaces(control=False): - names.append(iface.name) - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - cfg += "\t\tdisable: false\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - cfg += "\nprotocols {\n" - cfg += " pimsm6 {\n" - - names.append("register_vif") - for name in names: - cfg += f"\tinterface {name} {{\n" - cfg += f"\t vif {name} {{\n" - cfg += "\t\tdr-priority: 1\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += "\tbootstrap {\n" - cfg += "\t cand-bsr {\n" - cfg += "\t\tscope-zone ff00::/8 {\n" - cfg += f'\t\t cand-bsr-by-vif-name: "{names[0]}"\n' - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t cand-rp {\n" - cfg += "\t\tgroup-prefix ff00::/8 {\n" - cfg += f'\t\t cand-rp-by-vif-name: "{names[0]}"\n' - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - cfg += "\nprotocols {\n" - cfg += " fib2mrib {\n" - cfg += "\tdisable: false\n" - cfg += " }\n" - cfg += "}\n" - return cfg - - -class XorpOlsr(XorpService): - """ - OLSR IPv4 unicast MANET routing. - """ - - name: str = "XORP_OLSR" - - @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.fea("unicast-forwarding4") - rtrid = cls.router_id(node) - cfg += "\nprotocols {\n" - cfg += " olsr4 {\n" - cfg += f"\tmain-address: {rtrid}\n" - for iface in node.get_ifaces(control=False): - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - for ip4 in iface.ip4s: - cfg += f"\t\taddress {ip4.ip} {{\n" - cfg += "\t\t}\n" - cfg += "\t }\n" - cfg += "\t}\n" - cfg += " }\n" - cfg += "}\n" - return cfg diff --git a/daemon/core/utils.py b/daemon/core/utils.py index df00984c5..22d7b3426 100644 --- a/daemon/core/utils.py +++ b/daemon/core/utils.py @@ -7,8 +7,6 @@ import hashlib import importlib import inspect -import json -import logging import logging.config import os import random @@ -148,7 +146,7 @@ def close_onexec(fd: int) -> None: fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC) -def which(command: str, required: bool) -> str: +def which(command: str, required: bool) -> Optional[str]: """ Find location of desired executable within current PATH. @@ -401,18 +399,6 @@ def load_classes(path: Path, clazz: Generic[T]) -> list[T]: return classes -def load_logging_config(config_path: Path) -> None: - """ - Load CORE logging configuration file. - - :param config_path: path to logging config file - :return: nothing - """ - with config_path.open("r") as f: - log_config = json.load(f) - logging.config.dictConfig(log_config) - - def run_cmds_threaded( node_cmds: list[tuple["CoreNode", list[str]]], wait: bool = True, diff --git a/daemon/core/xml/corexml.py b/daemon/core/xml/corexml.py index d566b5019..3aa1150a4 100644 --- a/daemon/core/xml/corexml.py +++ b/daemon/core/xml/corexml.py @@ -4,22 +4,25 @@ from lxml import etree -import core.nodes.base -import core.nodes.physical from core import utils from core.config import Configuration from core.emane.nodes import EmaneNet, EmaneOptions from core.emulator.data import InterfaceData, LinkOptions from core.emulator.enumerations import EventTypes, NodeTypes from core.errors import CoreXmlError -from core.nodes.base import CoreNodeBase, CoreNodeOptions, NodeBase, Position +from core.nodes.base import ( + CoreNetworkBase, + CoreNodeBase, + CoreNodeOptions, + NodeBase, + Position, +) from core.nodes.docker import DockerNode, DockerOptions from core.nodes.interface import CoreInterface -from core.nodes.lxd import LxcNode, LxcOptions -from core.nodes.network import CtrlNet, GreTapBridge, PtpNet, WlanNode +from core.nodes.network import GreTapBridge, WlanNode +from core.nodes.physical import Rj45Node from core.nodes.podman import PodmanNode, PodmanOptions from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService logger = logging.getLogger(__name__) @@ -148,71 +151,10 @@ def add_position(self) -> None: add_attribute(position, "alt", alt) -class ServiceElement: - def __init__(self, service: type[CoreService]) -> None: - self.service: type[CoreService] = service - self.element: etree.Element = etree.Element("service") - add_attribute(self.element, "name", service.name) - self.add_directories() - self.add_startup() - self.add_validate() - self.add_shutdown() - self.add_files() - - def add_directories(self) -> None: - # get custom directories - directories = etree.Element("directories") - for directory in self.service.dirs: - directory_element = etree.SubElement(directories, "directory") - directory_element.text = directory - - if directories.getchildren(): - self.element.append(directories) - - def add_files(self) -> None: - file_elements = etree.Element("files") - for file_name in self.service.config_data: - data = self.service.config_data[file_name] - file_element = etree.SubElement(file_elements, "file") - add_attribute(file_element, "name", file_name) - file_element.text = etree.CDATA(data) - if file_elements.getchildren(): - self.element.append(file_elements) - - def add_startup(self) -> None: - # get custom startup - startup_elements = etree.Element("startups") - for startup in self.service.startup: - startup_element = etree.SubElement(startup_elements, "startup") - startup_element.text = startup - - if startup_elements.getchildren(): - self.element.append(startup_elements) - - def add_validate(self) -> None: - # get custom validate - validate_elements = etree.Element("validates") - for validate in self.service.validate: - validate_element = etree.SubElement(validate_elements, "validate") - validate_element.text = validate - - if validate_elements.getchildren(): - self.element.append(validate_elements) - - def add_shutdown(self) -> None: - # get custom shutdown - shutdown_elements = etree.Element("shutdowns") - for shutdown in self.service.shutdown: - shutdown_element = etree.SubElement(shutdown_elements, "shutdown") - shutdown_element.text = shutdown - - if shutdown_elements.getchildren(): - self.element.append(shutdown_elements) - - class DeviceElement(NodeElement): - def __init__(self, session: "Session", node: NodeBase) -> None: + def __init__(self, session: "Session", node: CoreNodeBase) -> None: super().__init__(session, node, "device") + self.node: CoreNodeBase = node add_attribute(self.element, "type", node.model) self.add_class() self.add_services() @@ -220,31 +162,25 @@ def __init__(self, session: "Session", node: NodeBase) -> None: def add_class(self) -> None: clazz = "" image = "" - if isinstance(self.node, DockerNode): - clazz = "docker" - image = self.node.image - elif isinstance(self.node, LxcNode): - clazz = "lxc" - image = self.node.image - elif isinstance(self.node, PodmanNode): - clazz = "podman" + compose = "" + compose_name = "" + if isinstance(self.node, (DockerNode, PodmanNode)): + clazz = "docker" if isinstance(self.node, DockerNode) else "podman" image = self.node.image + compose = self.node.compose + compose_name = self.node.compose_name add_attribute(self.element, "class", clazz) add_attribute(self.element, "image", image) + add_attribute(self.element, "compose", compose) + add_attribute(self.element, "compose_name", compose_name) def add_services(self) -> None: service_elements = etree.Element("services") - for service in self.node.services: - etree.SubElement(service_elements, "service", name=service.name) + for name, service in self.node.services.items(): + etree.SubElement(service_elements, "service", name=name) if service_elements.getchildren(): self.element.append(service_elements) - config_service_elements = etree.Element("configservices") - for name, service in self.node.config_services.items(): - etree.SubElement(config_service_elements, "service", name=name) - if config_service_elements.getchildren(): - self.element.append(config_service_elements) - class NetworkElement(NodeElement): def __init__(self, session: "Session", node: NodeBase) -> None: @@ -291,7 +227,6 @@ def write_session(self) -> None: self.write_mobility_configs() self.write_emane_configs() self.write_service_configs() - self.write_configservice_configs() self.write_session_origin() self.write_servers() self.write_session_hooks() @@ -299,14 +234,16 @@ def write_session(self) -> None: self.write_session_metadata() self.write_default_services() - def write(self, path: Path) -> None: - self.scenario.set("name", str(path)) - # write out generated xml + def get_data(self) -> bytes: xml_tree = etree.ElementTree(self.scenario) - xml_tree.write( - str(path), xml_declaration=True, pretty_print=True, encoding="UTF-8" + return etree.tostring( + xml_tree, xml_declaration=True, pretty_print=True, encoding="UTF-8" ) + def write(self, path: Path) -> None: + data = self.get_data() + path.write_bytes(data) + def write_session_origin(self) -> None: # origin: geolocation of cartesian coordinate 0,0,0 lat, lon, alt = self.session.location.refgeo @@ -315,7 +252,6 @@ def write_session_origin(self) -> None: add_attribute(origin, "lon", lon) add_attribute(origin, "alt", alt) has_origin = len(origin.items()) > 0 - if has_origin: self.scenario.append(origin) refscale = self.session.location.refscale @@ -339,13 +275,15 @@ def write_servers(self) -> None: def write_session_hooks(self) -> None: # hook scripts hooks = etree.Element("session_hooks") - for state in sorted(self.session.hooks, key=lambda x: x.value): - for file_name, data in self.session.hooks[state]: + for state in sorted( + self.session.hook_manager.script_hooks, key=lambda x: x.value + ): + state_hooks = self.session.hook_manager.script_hooks[state] + for file_name, data in sorted(state_hooks.items()): hook = etree.SubElement(hooks, "hook") add_attribute(hook, "name", file_name) add_attribute(hook, "state", state.value) hook.text = data - if hooks.getchildren(): self.scenario.append(hooks) @@ -363,11 +301,9 @@ def write_session_metadata(self) -> None: config = self.session.metadata if not config: return - for key in config: value = config[key] add_configuration(metadata_elements, key, value) - if metadata_elements.getchildren(): self.scenario.append(metadata_elements) @@ -393,7 +329,6 @@ def write_mobility_configs(self) -> None: all_configs = self.session.mobility.get_all_configs(node_id) if not all_configs: continue - for model_name in all_configs: config = all_configs[model_name] logger.debug( @@ -407,30 +342,16 @@ def write_mobility_configs(self) -> None: for name in config: value = config[name] add_configuration(mobility_configuration, name, value) - if mobility_configurations.getchildren(): self.scenario.append(mobility_configurations) def write_service_configs(self) -> None: service_configurations = etree.Element("service_configurations") - service_configs = self.session.services.all_configs() - for node_id, service in service_configs: - service_element = ServiceElement(service) - add_attribute(service_element.element, "node", node_id) - service_configurations.append(service_element.element) - - if service_configurations.getchildren(): - self.scenario.append(service_configurations) - - def write_configservice_configs(self) -> None: - service_configurations = etree.Element("configservice_configurations") for node in self.session.nodes.values(): if not isinstance(node, CoreNodeBase): continue - for name, service in node.config_services.items(): - service_element = etree.SubElement( - service_configurations, "service", name=name - ) + for name, service in node.services.items(): + service_element = etree.Element("service", name=name) add_attribute(service_element, "node", node.id) if service.custom_config: configs_element = etree.SubElement(service_element, "configs") @@ -445,13 +366,14 @@ def write_configservice_configs(self) -> None: templates_element, "template", name=template_name ) template_element.text = etree.CDATA(template) + if service.custom_config or service.custom_templates: + service_configurations.append(service_element) if service_configurations.getchildren(): self.scenario.append(service_configurations) def write_default_services(self) -> None: models = etree.Element("default_services") - for model in self.session.services.default_services: - services = self.session.services.default_services[model] + for model, services in []: model = etree.SubElement(models, "node", type=model) for service in services: etree.SubElement(model, "service", name=service) @@ -461,15 +383,11 @@ def write_default_services(self) -> None: def write_nodes(self) -> None: for node in self.session.nodes.values(): # network node - is_network_or_rj45 = isinstance( - node, (core.nodes.base.CoreNetworkBase, core.nodes.physical.Rj45Node) - ) - is_controlnet = isinstance(node, CtrlNet) - is_ptp = isinstance(node, PtpNet) - if is_network_or_rj45 and not (is_controlnet or is_ptp): + is_network_or_rj45 = isinstance(node, (CoreNetworkBase, Rj45Node)) + if is_network_or_rj45: self.write_network(node) # device node - elif isinstance(node, core.nodes.base.CoreNodeBase): + elif isinstance(node, CoreNodeBase): self.write_device(node) def write_network(self, node: NodeBase) -> None: @@ -494,7 +412,7 @@ def write_links(self) -> None: if link_elements.getchildren(): self.scenario.append(link_elements) - def write_device(self, node: NodeBase) -> None: + def write_device(self, node: CoreNodeBase) -> None: device = DeviceElement(self.session, node) self.devices.append(device.element) @@ -502,7 +420,7 @@ def create_iface_element( self, element_name: str, iface: CoreInterface ) -> etree.Element: iface_element = etree.Element(element_name) - # check if interface if connected to emane + # check if interface is connected to emane if isinstance(iface.node, CoreNodeBase) and isinstance(iface.net, EmaneNet): nem_id = self.session.emane.get_nem_id(iface) add_attribute(iface_element, "nem", nem_id) @@ -575,7 +493,6 @@ def __init__(self, session: "Session") -> None: def read(self, file_path: Path) -> None: xml_tree = etree.parse(str(file_path)) self.scenario = xml_tree.getroot() - # read xml session content self.read_default_services() self.read_session_metadata() @@ -583,31 +500,27 @@ def read(self, file_path: Path) -> None: self.read_session_hooks() self.read_servers() self.read_session_origin() - self.read_service_configs() self.read_mobility_configs() self.read_nodes() self.read_links() self.read_emane_configs() - self.read_configservice_configs() + self.read_service_configs() def read_default_services(self) -> None: default_services = self.scenario.find("default_services") if default_services is None: return - for node in default_services.iterchildren(): model = node.get("type") services = [] for service in node.iterchildren(): services.append(service.get("name")) logger.info("reading default services for nodes(%s): %s", model, services) - self.session.services.default_services[model] = services def read_session_metadata(self) -> None: session_metadata = self.scenario.find("session_metadata") if session_metadata is None: return - configs = {} for data in session_metadata.iterchildren(): name = data.get("name") @@ -632,7 +545,6 @@ def read_session_hooks(self) -> None: session_hooks = self.scenario.find("session_hooks") if session_hooks is None: return - for hook in session_hooks.iterchildren(): name = hook.get("name") state = get_int(hook, "state") @@ -655,19 +567,16 @@ def read_session_origin(self) -> None: session_origin = self.scenario.find("session_origin") if session_origin is None: return - lat = get_float(session_origin, "lat") lon = get_float(session_origin, "lon") alt = get_float(session_origin, "alt") if all([lat, lon, alt]): logger.info("reading session reference geo: %s, %s, %s", lat, lon, alt) self.session.location.setrefgeo(lat, lon, alt) - scale = get_float(session_origin, "scale") if scale: logger.info("reading session reference scale: %s", scale) self.session.location.refscale = scale - x = get_float(session_origin, "x") y = get_float(session_origin, "y") z = get_float(session_origin, "z") @@ -675,50 +584,6 @@ def read_session_origin(self) -> None: logger.info("reading session reference xyz: %s, %s, %s", x, y, z) self.session.location.refxyz = (x, y, z) - def read_service_configs(self) -> None: - service_configurations = self.scenario.find("service_configurations") - if service_configurations is None: - return - - for service_configuration in service_configurations.iterchildren(): - node_id = get_int(service_configuration, "node") - service_name = service_configuration.get("name") - logger.info( - "reading custom service(%s) for node(%s)", service_name, node_id - ) - self.session.services.set_service(node_id, service_name) - service = self.session.services.get_service(node_id, service_name) - - directory_elements = service_configuration.find("directories") - if directory_elements is not None: - service.dirs = tuple(x.text for x in directory_elements.iterchildren()) - - startup_elements = service_configuration.find("startups") - if startup_elements is not None: - service.startup = tuple(x.text for x in startup_elements.iterchildren()) - - validate_elements = service_configuration.find("validates") - if validate_elements is not None: - service.validate = tuple( - x.text for x in validate_elements.iterchildren() - ) - - shutdown_elements = service_configuration.find("shutdowns") - if shutdown_elements is not None: - service.shutdown = tuple( - x.text for x in shutdown_elements.iterchildren() - ) - - file_elements = service_configuration.find("files") - if file_elements is not None: - files = set(service.configs) - for file_element in file_elements.iterchildren(): - name = file_element.get("name") - data = file_element.text - service.config_data[name] = data - files.add(name) - service.configs = tuple(files) - def read_emane_configs(self) -> None: emane_configurations = self.scenario.find("emane_configurations") if emane_configurations is None: @@ -728,7 +593,6 @@ def read_emane_configs(self) -> None: iface_id = get_int(emane_configuration, "iface") model_name = emane_configuration.get("model") configs = {} - # validate node and model node = self.session.nodes.get(node_id) if not node: @@ -738,7 +602,6 @@ def read_emane_configs(self) -> None: raise CoreXmlError( f"invalid interface id({iface_id}) for node({node.name})" ) - # read and set emane model configuration platform_configuration = emane_configuration.find("platform") for config in platform_configuration.iterchildren(): @@ -760,7 +623,6 @@ def read_emane_configs(self) -> None: name = config.get("name") value = config.get("value") configs[name] = value - logger.info( "reading emane configuration node(%s) model(%s)", node_id, model_name ) @@ -771,17 +633,14 @@ def read_mobility_configs(self) -> None: mobility_configurations = self.scenario.find("mobility_configurations") if mobility_configurations is None: return - for mobility_configuration in mobility_configurations.iterchildren(): node_id = get_int(mobility_configuration, "node") model_name = mobility_configuration.get("model") configs = {} - for config in mobility_configuration.iterchildren(): name = config.get("name") value = config.get("value") configs[name] = value - logger.info( "reading mobility configuration node(%s) model(%s)", node_id, model_name ) @@ -792,7 +651,6 @@ def read_nodes(self) -> None: if device_elements is not None: for device_element in device_elements.iterchildren(): self.read_device(device_element) - network_elements = self.scenario.find("networks") if network_elements is not None: for network_element in network_elements.iterchildren(): @@ -805,13 +663,13 @@ def read_device(self, device_element: etree.Element) -> None: icon = device_element.get("icon") clazz = device_element.get("class") image = device_element.get("image") + compose = device_element.get("compose") + compose_name = device_element.get("compose_name") server = device_element.get("server") canvas = get_int(device_element, "canvas") node_type = NodeTypes.DEFAULT if clazz == "docker": node_type = NodeTypes.DOCKER - elif clazz == "lxc": - node_type = NodeTypes.LXC elif clazz == "podman": node_type = NodeTypes.PODMAN _class = self.session.get_node_class(node_type) @@ -822,17 +680,16 @@ def read_device(self, device_element: etree.Element) -> None: if isinstance(options, CoreNodeOptions): options.model = model service_elements = device_element.find("services") + if service_elements is None: + service_elements = device_element.find("configservices") if service_elements is not None: options.services.extend( x.get("name") for x in service_elements.iterchildren() ) - config_service_elements = device_element.find("configservices") - if config_service_elements is not None: - options.config_services.extend( - x.get("name") for x in config_service_elements.iterchildren() - ) - if isinstance(options, (DockerOptions, LxcOptions, PodmanOptions)): + if isinstance(options, (DockerOptions, PodmanOptions)): options.image = image + options.compose = compose + options.compose_name = compose_name # get position information position_element = device_element.find("position") position = None @@ -880,7 +737,7 @@ def read_network(self, network_element: etree.Element) -> None: node = self.session.add_node(_class, node_id, name, server, position, options) if isinstance(node, WirelessNode): wireless_element = network_element.find("wireless") - if wireless_element: + if wireless_element is not None: config = {} for config_element in wireless_element.iterchildren(): name = config_element.get("name") @@ -888,18 +745,18 @@ def read_network(self, network_element: etree.Element) -> None: config[name] = value node.set_config(config) - def read_configservice_configs(self) -> None: - configservice_configs = self.scenario.find("configservice_configurations") - if configservice_configs is None: + def read_service_configs(self) -> None: + service_configs = self.scenario.find("service_configurations") + if service_configs is None: + service_configs = self.scenario.find("configservice_configurations") + if service_configs is None: return - - for configservice_element in configservice_configs.iterchildren(): - name = configservice_element.get("name") - node_id = get_int(configservice_element, "node") + for service_element in service_configs.iterchildren(): + name = service_element.get("name") + node_id = get_int(service_element, "node") node = self.session.get_node(node_id, CoreNodeBase) - service = node.config_services[name] - - configs_element = configservice_element.find("configs") + service = node.services[name] + configs_element = service_element.find("configs") if configs_element is not None: config = {} for config_element in configs_element.iterchildren(): @@ -907,8 +764,7 @@ def read_configservice_configs(self) -> None: value = config_element.get("value") config[key] = value service.set_config(config) - - templates_element = configservice_element.find("templates") + templates_element = service_element.find("templates") if templates_element is not None: for template_element in templates_element.iterchildren(): name = template_element.get("name") @@ -922,7 +778,6 @@ def read_links(self) -> None: link_elements = self.scenario.find("links") if link_elements is None: return - node_sets = set() for link_element in link_elements.iterchildren(): node1_id = get_int(link_element, "node1") @@ -932,21 +787,18 @@ def read_links(self) -> None: if node2_id is None: node2_id = get_int(link_element, "node_two") node_set = frozenset((node1_id, node2_id)) - iface1_element = link_element.find("iface1") if iface1_element is None: iface1_element = link_element.find("interface_one") iface1_data = None if iface1_element is not None: iface1_data = create_iface_data(iface1_element) - iface2_element = link_element.find("iface2") if iface2_element is None: iface2_element = link_element.find("interface_two") iface2_data = None if iface2_element is not None: iface2_data = create_iface_data(iface2_element) - options_element = link_element.find("options") options = LinkOptions() if options_element is not None: @@ -961,10 +813,9 @@ def read_links(self) -> None: options.loss = get_float(options_element, "loss") if options.loss is None: options.loss = get_float(options_element, "per") - options.unidirectional = get_int(options_element, "unidirectional") + options.unidirectional = get_int(options_element, "unidirectional") == 1 options.buffer = get_int(options_element, "buffer") - - if options.unidirectional == 1 and node_set in node_sets: + if options.unidirectional and node_set in node_sets: logger.info("updating link node1(%s) node2(%s)", node1_id, node2_id) self.session.update_link( node1_id, node2_id, iface1_data.id, iface2_data.id, options @@ -974,5 +825,4 @@ def read_links(self) -> None: self.session.add_link( node1_id, node2_id, iface1_data, iface2_data, options ) - node_sets.add(node_set) diff --git a/daemon/core/xml/emanexml.py b/daemon/core/xml/emanexml.py index 4b8ada70b..da3eb47dc 100644 --- a/daemon/core/xml/emanexml.py +++ b/daemon/core/xml/emanexml.py @@ -8,11 +8,9 @@ from core import utils from core.config import Configuration from core.emane.nodes import EmaneNet -from core.emulator.distributed import DistributedServer from core.errors import CoreError -from core.nodes.base import CoreNode, CoreNodeBase +from core.nodes.base import CoreNode, NodeBase from core.nodes.interface import CoreInterface -from core.xml import corexml logger = logging.getLogger(__name__) @@ -51,52 +49,43 @@ def _value_to_params(value: str) -> Optional[tuple[str]]: return None -def create_file( - xml_element: etree.Element, - doc_name: str, - file_path: Path, - server: DistributedServer = None, +def create_node_file( + node: NodeBase, xml_element: etree.Element, doc_name: str, file_name: str ) -> None: """ - Create xml file. + Create emane xml for an interface. + :param node: node running emane :param xml_element: root element to write to file :param doc_name: name to use in the emane doctype - :param file_path: file path to write xml file to - :param server: remote server to create file on + :param file_name: name of file to create :return: nothing """ doctype = ( f'' ) - if server: - temp = NamedTemporaryFile(delete=False) - temp_path = Path(temp.name) - corexml.write_xml_file(xml_element, temp_path, doctype=doctype) - temp.close() - server.remote_put(temp_path, file_path) - temp_path.unlink() - else: - corexml.write_xml_file(xml_element, file_path, doctype=doctype) - - -def create_node_file( - node: CoreNodeBase, xml_element: etree.Element, doc_name: str, file_name: str -) -> None: - """ - Create emane xml for an interface. - - :param node: node running emane - :param xml_element: root element to write to file - :param doc_name: name to use in the emane doctype - :param file_name: name of xml file - :return: - """ + xml_data = etree.tostring( + xml_element, + pretty_print=True, + encoding="unicode", + doctype=doctype, + ) if isinstance(node, CoreNode): - file_path = node.directory / file_name + file_path = Path(file_name) + node.create_file(file_path, xml_data) else: - file_path = node.session.directory / file_name - create_file(xml_element, doc_name, file_path, node.server) + file_name = node.session.directory / file_name + if node.server: + temp = NamedTemporaryFile(delete=False) + temp_path = Path(temp.name) + with temp_path.open("w") as f: + f.write(xml_data) + temp.close() + node.server.remote_put(temp_path, file_name) + temp_path.unlink() + else: + with file_name.open("w") as f: + f.write(xml_data) def add_param(xml_element: etree.Element, name: str, value: str) -> None: @@ -224,7 +213,6 @@ def create_transport_xml(iface: CoreInterface, config: dict[str, str]) -> None: library=f"trans{transport_type.value.lower()}", ) add_param(transport_element, "bitrate", "0") - # get emane model cnfiguration flowcontrol = config.get("flowcontrolenable", "0") == "1" if isinstance(iface.node, CoreNode): @@ -235,8 +223,8 @@ def create_transport_xml(iface: CoreInterface, config: dict[str, str]) -> None: if flowcontrol: add_param(transport_element, "flowcontrolenable", "on") doc_name = "transport" - transport_name = transport_file_name(iface) - create_node_file(iface.node, transport_element, doc_name, transport_name) + file_name = transport_file_name(iface) + create_node_file(iface.node, transport_element, doc_name, file_name) def create_phy_xml( @@ -304,40 +292,8 @@ def create_nem_xml( etree.SubElement(nem_element, "mac", definition=mac_name) phy_name = phy_file_name(iface) etree.SubElement(nem_element, "phy", definition=phy_name) - nem_name = nem_file_name(iface) - create_node_file(iface.node, nem_element, "nem", nem_name) - - -def create_event_service_xml( - group: str, - port: str, - device: str, - file_directory: Path, - server: DistributedServer = None, -) -> None: - """ - Create a emane event service xml file. - - :param group: event group - :param port: event port - :param device: event device - :param file_directory: directory to create file in - :param server: remote server node - will run on, default is None for localhost - :return: nothing - """ - event_element = etree.Element("emaneeventmsgsvc") - for name, value in ( - ("group", group), - ("port", port), - ("device", device), - ("mcloop", "1"), - ("ttl", "32"), - ): - sub_element = etree.SubElement(event_element, name) - sub_element.text = value - file_path = file_directory / "libemaneeventservice.xml" - create_file(event_element, "emaneeventmsgsvc", file_path, server) + file_name = nem_file_name(iface) + create_node_file(iface.node, nem_element, "nem", file_name) def transport_file_name(iface: CoreInterface) -> str: diff --git a/daemon/poetry.lock b/daemon/poetry.lock index c2aae40d9..f3c3a3e1d 100644 --- a/daemon/poetry.lock +++ b/daemon/poetry.lock @@ -58,7 +58,7 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2022.12.7" +version = "2023.7.22" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -104,11 +104,11 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7 [[package]] name = "cryptography" -version = "39.0.1" +version = "41.0.4" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] cffi = ">=1.12" @@ -116,12 +116,12 @@ cffi = ">=1.12" [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"] -sdist = ["setuptools-rust (>=0.11.4)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -tox = ["tox"] [[package]] name = "distlib" @@ -371,14 +371,14 @@ python-versions = ">=3.7" [[package]] name = "Pillow" -version = "9.4.0" +version = "10.0.1" description = "Python Imaging Library (Fork)" category = "main" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] @@ -581,7 +581,7 @@ test = ["covdefaults (>=2.2.2)", "coverage (>=7.1)", "coverage-enable-subprocess [metadata] lock-version = "1.1" python-versions = "^3.9" -content-hash = "10902a50368c4381aec5a3e72a221a4c4225ae1be17ee38600f89aaee4a49c1f" +content-hash = "fc2c802c238fbe75c565e6e3e10446a52a8a554c03316400cf01f5e2460d555f" [metadata.files] atomicwrites = [ @@ -629,8 +629,8 @@ black = [ {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, ] certifi = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, ] cffi = [ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, @@ -711,29 +711,29 @@ colorama = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] cryptography = [ - {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"}, - {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"}, - {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"}, - {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"}, - {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"}, - {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"}, - {file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"}, + {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, + {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, + {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, + {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, + {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, + {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, + {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, ] distlib = [ {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, @@ -1026,83 +1026,60 @@ pathspec = [ {file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"}, ] Pillow = [ - {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, - {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, - {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, - {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, - {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, - {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, - {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, - {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, - {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, - {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, - {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, - {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, - {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, - {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, - {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, - {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, - {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, - {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, - {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, - {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, - {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, - {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, - {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, - {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, - {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, - {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"}, + {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"}, + {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"}, + {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"}, + {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"}, + {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"}, + {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"}, ] platformdirs = [ {file = "platformdirs-3.0.0-py3-none-any.whl", hash = "sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567"}, @@ -1197,6 +1174,7 @@ PyYAML = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1204,8 +1182,15 @@ PyYAML = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1222,6 +1207,7 @@ PyYAML = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1229,6 +1215,7 @@ PyYAML = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, diff --git a/daemon/proto/Makefile.am b/daemon/proto/Makefile.am index af535c1e7..c4c69abb2 100644 --- a/daemon/proto/Makefile.am +++ b/daemon/proto/Makefile.am @@ -1,6 +1,6 @@ all: - $(PYTHON) -m grpc_tools.protoc -I . --python_out=.. core/api/grpc/*.proto - $(PYTHON) -m grpc_tools.protoc -I . --grpc_python_out=.. core/api/grpc/core.proto + $(PYTHON) -m grpc_tools.protoc -I . --python_out=.. --pyi_out=.. core/api/grpc/*.proto + $(PYTHON) -m grpc_tools.protoc -I . --grpc_python_out=.. --pyi_out=.. core/api/grpc/core.proto clean: -rm -f ../core/api/grpc/*_pb2* diff --git a/daemon/proto/core/api/grpc/common.proto b/daemon/proto/core/api/grpc/common.proto index 065bee7a3..060432551 100644 --- a/daemon/proto/core/api/grpc/common.proto +++ b/daemon/proto/core/api/grpc/common.proto @@ -9,6 +9,7 @@ message ConfigOption { int32 type = 4; repeated string select = 5; string group = 6; + string regex = 7; } message MappedConfig { diff --git a/daemon/proto/core/api/grpc/configservices.proto b/daemon/proto/core/api/grpc/configservices.proto deleted file mode 100644 index 25be616d7..000000000 --- a/daemon/proto/core/api/grpc/configservices.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; - -package configservices; - -import "core/api/grpc/common.proto"; - -message ConfigServiceConfig { - int32 node_id = 1; - string name = 2; - map templates = 3; - map config = 4; -} - -message ConfigServiceValidationMode { - enum Enum { - BLOCKING = 0; - NON_BLOCKING = 1; - TIMER = 2; - } -} - -message ConfigService { - string group = 1; - string name = 2; - repeated string executables = 3; - repeated string dependencies = 4; - repeated string directories = 5; - repeated string files = 6; - repeated string startup = 7; - repeated string validate = 8; - repeated string shutdown = 9; - ConfigServiceValidationMode.Enum validation_mode = 10; - int32 validation_timer = 11; - float validation_period = 12; -} - -message ConfigMode { - string name = 1; - map config = 2; -} - -message GetConfigServiceDefaultsRequest { - string name = 1; - int32 session_id = 2; - int32 node_id = 3; -} - -message GetConfigServiceDefaultsResponse { - map templates = 1; - map config = 2; - repeated ConfigMode modes = 3; -} - -message GetNodeConfigServiceRequest { - int32 session_id = 1; - int32 node_id = 2; - string name = 3; -} - -message GetNodeConfigServiceResponse { - map config = 1; -} - -message GetConfigServiceRenderedRequest { - int32 session_id = 1; - int32 node_id = 2; - string name = 3; -} - -message GetConfigServiceRenderedResponse { - map rendered = 1; -} diff --git a/daemon/proto/core/api/grpc/core.proto b/daemon/proto/core/api/grpc/core.proto index 09f2c764d..f17b5bbc6 100644 --- a/daemon/proto/core/api/grpc/core.proto +++ b/daemon/proto/core/api/grpc/core.proto @@ -2,11 +2,10 @@ syntax = "proto3"; package core; -import "core/api/grpc/configservices.proto"; +import "core/api/grpc/services.proto"; import "core/api/grpc/common.proto"; import "core/api/grpc/emane.proto"; import "core/api/grpc/mobility.proto"; -import "core/api/grpc/services.proto"; import "core/api/grpc/wlan.proto"; service CoreApi { @@ -72,26 +71,16 @@ service CoreApi { rpc MobilityAction (mobility.MobilityActionRequest) returns (mobility.MobilityActionResponse) { } - // service rpc + // services rpc GetServiceDefaults (services.GetServiceDefaultsRequest) returns (services.GetServiceDefaultsResponse) { } - rpc SetServiceDefaults (services.SetServiceDefaultsRequest) returns (services.SetServiceDefaultsResponse) { - } rpc GetNodeService (services.GetNodeServiceRequest) returns (services.GetNodeServiceResponse) { } - rpc GetNodeServiceFile (services.GetNodeServiceFileRequest) returns (services.GetNodeServiceFileResponse) { - } rpc ServiceAction (services.ServiceActionRequest) returns (services.ServiceActionResponse) { } - - // config services - rpc GetConfigServiceDefaults (configservices.GetConfigServiceDefaultsRequest) returns (configservices.GetConfigServiceDefaultsResponse) { + rpc GetServiceRendered (services.GetServiceRenderedRequest) returns (services.GetServiceRenderedResponse) { } - rpc GetNodeConfigService (configservices.GetNodeConfigServiceRequest) returns (configservices.GetNodeConfigServiceResponse) { - } - rpc ConfigServiceAction (services.ServiceActionRequest) returns (services.ServiceActionResponse) { - } - rpc GetConfigServiceRendered (configservices.GetConfigServiceRenderedRequest) returns (configservices.GetConfigServiceRenderedResponse) { + rpc CreateService (services.CreateServiceRequest) returns (services.CreateServiceResponse) { } // wlan rpc @@ -121,6 +110,8 @@ service CoreApi { } rpc EmaneLink (emane.EmaneLinkRequest) returns (emane.EmaneLinkResponse) { } + rpc EmaneEvents (stream emane.EmaneEventsRequest) returns (emane.EmaneEventsResponse) { + } // xml rpc rpc SaveXml (SaveXmlRequest) returns (SaveXmlResponse) { @@ -145,8 +136,7 @@ message GetConfigRequest { message GetConfigResponse { repeated services.Service services = 1; - repeated configservices.ConfigService config_services = 2; - repeated string emane_models = 3; + repeated string emane_models = 2; } @@ -209,7 +199,7 @@ message GetSessionResponse { message SessionAlertRequest { int32 session_id = 1; - ExceptionLevel.Enum level = 2; + AlertLevel.Enum level = 2; string source = 3; string text = 4; int32 node_id = 5; @@ -258,9 +248,7 @@ message Event { SessionEvent session_event = 1; NodeEvent node_event = 2; LinkEvent link_event = 3; - ConfigEvent config_event = 4; - ExceptionEvent exception_event = 5; - FileEvent file_event = 6; + AlertEvent alert_event = 5; } int32 session_id = 7; string source = 8; @@ -284,42 +272,15 @@ message SessionEvent { float time = 5; } -message ConfigEvent { - MessageType.Enum message_type = 1; - int32 node_id = 2; - string object = 3; - int32 type = 4; - repeated int32 data_types = 5; - string data_values = 6; - string captions = 7; - string possible_values = 8; - string groups = 9; - int32 iface_id = 10; - int32 network_id = 11; - string opaque = 12; -} - -message ExceptionEvent { +message AlertEvent { int32 node_id = 1; - ExceptionLevel.Enum level = 2; + AlertLevel.Enum level = 2; string source = 3; string date = 4; string text = 5; string opaque = 6; } -message FileEvent { - MessageType.Enum message_type = 1; - int32 node_id = 2; - string name = 3; - string mode = 4; - int32 number = 5; - string type = 6; - string source = 7; - string data = 8; - string compressed_data = 9; -} - message AddNodeRequest { int32 session_id = 1; Node node = 2; @@ -456,7 +417,7 @@ message SaveXmlRequest { } message SaveXmlResponse { - string data = 1; + bytes data = 1; } message OpenXmlRequest { @@ -492,7 +453,6 @@ message EventType { SESSION = 0; NODE = 1; LINK = 2; - CONFIG = 3; EXCEPTION = 4; FILE = 5; } @@ -541,10 +501,7 @@ message NodeType { TUNNEL = 8; EMANE = 10; TAP_BRIDGE = 11; - PEER_TO_PEER = 12; - CONTROL_NET = 13; DOCKER = 15; - LXC = 16; WIRELESS = 17; PODMAN = 18; } @@ -567,7 +524,7 @@ message ConfigOptionType { } } -message ExceptionLevel { +message AlertLevel { enum Enum { DEFAULT = 0; FATAL = 1; @@ -613,22 +570,22 @@ message Node { NodeType.Enum type = 3; string model = 4; Position position = 5; - repeated string services = 6; - string emane = 7; - string icon = 8; - string image = 9; - string server = 10; - repeated string config_services = 11; - Geo geo = 12; - string dir = 13; - string channel = 14; - int32 canvas = 15; - map wlan_config = 16; - map mobility_config = 17; - map service_configs = 18; - map config_service_configs= 19; - repeated emane.NodeEmaneConfig emane_configs = 20; - map wireless_config = 21; + string emane = 6; + string icon = 7; + string image = 8; + string server = 9; + repeated string services = 10; + Geo geo = 11; + string dir = 12; + string channel = 13; + int32 canvas = 14; + map wlan_config = 15; + map mobility_config = 16; + map service_configs = 17; + repeated emane.NodeEmaneConfig emane_configs = 18; + map wireless_config = 19; + string compose = 20; + string compose_name = 21; } message Link { diff --git a/daemon/proto/core/api/grpc/emane.proto b/daemon/proto/core/api/grpc/emane.proto index b85799179..d7415c9bd 100644 --- a/daemon/proto/core/api/grpc/emane.proto +++ b/daemon/proto/core/api/grpc/emane.proto @@ -78,3 +78,76 @@ message EmanePathlossesRequest { message EmanePathlossesResponse { } + +message LocationEvent { + optional int32 nem_id = 1; + int32 node_id = 2; + int32 iface_id = 3; + float lon = 4; + float lat = 5; + float alt = 6; + optional float azimuth = 7; + optional float elevation = 8; + optional float magnitude = 9; + optional float roll = 10; + optional float pitch = 11; + optional float yaw = 12; +} + +message CommEffectEvent { + optional int32 nem1_id = 1; + int32 node1_id = 2; + int32 iface1_id = 3; + optional int32 nem2_id = 4; + int32 node2_id = 5; + int32 iface2_id = 6; + int32 delay = 7; + int32 jitter = 8; + float loss = 9; + int32 dup = 10; + int32 unicast = 11; + int32 broadcast = 12; +} + +message PathlossEvent { + optional int32 nem1_id = 1; + int32 node1_id = 2; + int32 iface1_id = 3; + optional int32 nem2_id = 4; + int32 node2_id = 5; + int32 iface2_id = 6; + optional float forward1 = 7; + optional float reverse1 = 8; + optional float forward2 = 9; + optional float reverse2 = 10; +} + +message AntennaProfileEvent { + optional int32 nem_id = 1; + int32 node_id = 2; + int32 iface_id = 3; + int32 profile = 4; + float azimuth = 5; + float elevation = 6; +} + +message FadingSelectionEvent { + optional int32 nem_id = 1; + int32 node_id = 2; + int32 iface_id = 3; + string model = 4; +} + +message EmaneEventsRequest { + int32 session_id = 1; + oneof event_type { + LocationEvent location = 2; + CommEffectEvent comm_effect = 3; + PathlossEvent pathloss = 4; + AntennaProfileEvent antenna = 5; + FadingSelectionEvent fading = 6; + } +} + +message EmaneEventsResponse { +} diff --git a/daemon/proto/core/api/grpc/services.proto b/daemon/proto/core/api/grpc/services.proto index 1b430f99e..6ab7acbde 100644 --- a/daemon/proto/core/api/grpc/services.proto +++ b/daemon/proto/core/api/grpc/services.proto @@ -2,30 +2,7 @@ syntax = "proto3"; package services; -message ServiceConfig { - int32 node_id = 1; - string service = 2; - repeated string startup = 3; - repeated string validate = 4; - repeated string shutdown = 5; - repeated string files = 6; - repeated string directories = 7; -} - -message ServiceFileConfig { - int32 node_id = 1; - string service = 2; - string file = 3; - string data = 4; -} - -message ServiceValidationMode { - enum Enum { - BLOCKING = 0; - NON_BLOCKING = 1; - TIMER = 2; - } -} +import "core/api/grpc/common.proto"; message ServiceAction { enum Enum { @@ -41,76 +18,88 @@ message ServiceDefaults { repeated string services = 2; } -message Service { - string group = 1; - string name = 2; +message ServiceActionRequest { + int32 session_id = 1; + int32 node_id = 2; + string service = 3; + ServiceAction.Enum action = 4; } -message NodeServiceData { - repeated string executables = 1; - repeated string dependencies = 2; - repeated string dirs = 3; - repeated string configs = 4; - repeated string startup = 5; - repeated string validate = 6; - ServiceValidationMode.Enum validation_mode = 7; - int32 validation_timer = 8; - repeated string shutdown = 9; - string meta = 10; +message ServiceActionResponse { + bool result = 1; } -message NodeServiceConfig { - int32 node_id = 1; - string service = 2; - NodeServiceData data = 3; - map files = 4; +message ServiceConfig { + map templates = 1; + map config = 2; } -message GetServiceDefaultsRequest { - int32 session_id = 1; +message ServiceValidationMode { + enum Enum { + BLOCKING = 0; + NON_BLOCKING = 1; + TIMER = 2; + } } -message GetServiceDefaultsResponse { - repeated ServiceDefaults defaults = 1; +message Service { + string group = 1; + string name = 2; + repeated string executables = 3; + repeated string dependencies = 4; + repeated string directories = 5; + repeated string files = 6; + repeated string startup = 7; + repeated string validate = 8; + repeated string shutdown = 9; + ServiceValidationMode.Enum validation_mode = 10; + int32 validation_timer = 11; + float validation_period = 12; } -message SetServiceDefaultsRequest { - int32 session_id = 1; - repeated ServiceDefaults defaults = 2; +message ConfigMode { + string name = 1; + map config = 2; } -message SetServiceDefaultsResponse { - bool result = 1; +message GetServiceDefaultsRequest { + string name = 1; + int32 session_id = 2; + int32 node_id = 3; +} + +message GetServiceDefaultsResponse { + map templates = 1; + map config = 2; + repeated ConfigMode modes = 3; } message GetNodeServiceRequest { int32 session_id = 1; int32 node_id = 2; - string service = 3; + string name = 3; } message GetNodeServiceResponse { - NodeServiceData service = 1; + map config = 1; } -message GetNodeServiceFileRequest { +message GetServiceRenderedRequest { int32 session_id = 1; int32 node_id = 2; - string service = 3; - string file = 4; + string name = 3; } -message GetNodeServiceFileResponse { - string data = 1; +message GetServiceRenderedResponse { + map rendered = 1; } -message ServiceActionRequest { - int32 session_id = 1; - int32 node_id = 2; - string service = 3; - ServiceAction.Enum action = 4; +message CreateServiceRequest { + Service service = 1; + map templates = 2; + bool recreate = 3; } -message ServiceActionResponse { - bool result = 1; +message CreateServiceResponse { + bool result = 1; } diff --git a/daemon/pyproject.toml b/daemon/pyproject.toml index 0d1acf7aa..d2f77b5e6 100644 --- a/daemon/pyproject.toml +++ b/daemon/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "core" -version = "9.0.3" +version = "9.1.0" description = "CORE Common Open Research Emulator" authors = ["Boeing Research and Technology"] license = "BSD-2-Clause" @@ -8,7 +8,7 @@ repository = "https://github.com/coreemu/core" documentation = "https://coreemu.github.io/core/" include = [ "core/api/grpc/*", - "core/configservices/*/templates", + "core/services/defaults/*/templates", "core/constants.py", "core/gui/data/**/*", ] @@ -32,7 +32,7 @@ lxml = "4.9.1" netaddr = "0.7.19" protobuf = "4.21.9" pyproj = "3.3.1" -Pillow = "9.4.0" +Pillow = "10.0.1" Mako = "1.2.3" PyYAML = "6.0.1" diff --git a/daemon/tests/conftest.py b/daemon/tests/conftest.py index b668fb071..fdc27be7f 100644 --- a/daemon/tests/conftest.py +++ b/daemon/tests/conftest.py @@ -16,7 +16,6 @@ from core.emulator.enumerations import EventTypes from core.emulator.session import Session from core.nodes.base import CoreNode -from core.nodes.netclient import LinuxNetClient EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward" @@ -54,10 +53,9 @@ def patcher(request): patch_manager.patch("os.mkdir") patch_manager.patch("core.utils.cmd") patch_manager.patch("core.utils.which") + patch_manager.patch("core.emulator.hooks._run_callback") + patch_manager.patch("core.emulator.hooks._run_script") patch_manager.patch("core.nodes.netclient.get_net_client") - patch_manager.patch_obj( - LinuxNetClient, "get_mac", return_value="00:00:00:00:00:00" - ) patch_manager.patch_obj(CoreNode, "create_file") yield patch_manager patch_manager.shutdown() @@ -112,6 +110,7 @@ def grpc_server(module_grpc): def session(global_session): global_session.set_state(EventTypes.CONFIGURATION_STATE) yield global_session + global_session.data_collect() global_session.clear() diff --git a/daemon/tests/test_config_services.py b/daemon/tests/test_config_services.py deleted file mode 100644 index 876b7f320..000000000 --- a/daemon/tests/test_config_services.py +++ /dev/null @@ -1,300 +0,0 @@ -from pathlib import Path -from unittest import mock - -import pytest - -from core.config import ConfigBool, ConfigString -from core.configservice.base import ( - ConfigService, - ConfigServiceBootError, - ConfigServiceMode, -) -from core.errors import CoreCommandError, CoreError - -TEMPLATE_TEXT = "echo hello" - - -class MyService(ConfigService): - name = "MyService" - group = "MyGroup" - directories = ["/usr/local/lib"] - files = ["test.sh"] - executables = [] - dependencies = [] - startup = [f"sh {files[0]}"] - validate = [f"pidof {files[0]}"] - shutdown = [f"pkill {files[0]}"] - validation_mode = ConfigServiceMode.BLOCKING - default_configs = [ - ConfigString(id="value1", label="Text"), - ConfigBool(id="value2", label="Boolean"), - ConfigString( - id="value3", label="Multiple Choice", options=["value1", "value2", "value3"] - ), - ] - modes = { - "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, - "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, - "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, - } - - def get_text_template(self, name: str) -> str: - return TEMPLATE_TEXT - - -class TestConfigServices: - def test_set_template(self): - # given - node = mock.MagicMock() - text = "echo custom" - service = MyService(node) - - # when - service.set_template(MyService.files[0], text) - - # then - assert MyService.files[0] in service.custom_templates - assert service.custom_templates[MyService.files[0]] == text - - def test_create_directories(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.create_dirs() - - # then - directory = Path(MyService.directories[0]) - node.create_dir.assert_called_with(directory) - - def test_create_files_custom(self): - # given - node = mock.MagicMock() - service = MyService(node) - text = "echo custom" - service.set_template(MyService.files[0], text) - - # when - service.create_files() - - # then - file_path = Path(MyService.files[0]) - node.create_file.assert_called_with(file_path, text) - - def test_create_files_text(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.create_files() - - # then - file_path = Path(MyService.files[0]) - node.create_file.assert_called_with(file_path, TEMPLATE_TEXT) - - def test_run_startup(self): - # given - node = mock.MagicMock() - wait = True - service = MyService(node) - - # when - service.run_startup(wait=wait) - - # then - node.cmd.assert_called_with(MyService.startup[0], wait=wait) - - def test_run_startup_exception(self): - # given - node = mock.MagicMock() - node.cmd.side_effect = CoreCommandError(1, "error") - service = MyService(node) - - # when - with pytest.raises(ConfigServiceBootError): - service.run_startup(wait=True) - - def test_shutdown(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.stop() - - # then - node.cmd.assert_called_with(MyService.shutdown[0]) - - def test_run_validation(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.run_validation() - - # then - node.cmd.assert_called_with(MyService.validate[0]) - - def test_run_validation_timer(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.TIMER - service.validation_timer = 0 - - # when - service.run_validation() - - # then - node.cmd.assert_called_with(MyService.validate[0]) - - def test_run_validation_timer_exception(self): - # given - node = mock.MagicMock() - node.cmd.side_effect = CoreCommandError(1, "error") - service = MyService(node) - service.validation_mode = ConfigServiceMode.TIMER - service.validation_period = 0 - service.validation_timer = 0 - - # when - with pytest.raises(ConfigServiceBootError): - service.run_validation() - - def test_run_validation_non_blocking(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.NON_BLOCKING - service.validation_period = 0 - service.validation_timer = 0 - - # when - service.run_validation() - - # then - node.cmd.assert_called_with(MyService.validate[0]) - - def test_run_validation_non_blocking_exception(self): - # given - node = mock.MagicMock() - node.cmd.side_effect = CoreCommandError(1, "error") - service = MyService(node) - service.validation_mode = ConfigServiceMode.NON_BLOCKING - service.validation_period = 0 - service.validation_timer = 0 - - # when - with pytest.raises(ConfigServiceBootError): - service.run_validation() - - def test_render_config(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - config = service.render_config() - - # then - assert config == {"value1": "", "value2": "", "value3": ""} - - def test_render_config_custom(self): - # given - node = mock.MagicMock() - service = MyService(node) - custom_config = {"value1": "1", "value2": "2", "value3": "3"} - service.set_config(custom_config) - - # when - config = service.render_config() - - # then - assert config == custom_config - - def test_set_config(self): - # given - node = mock.MagicMock() - service = MyService(node) - custom_config = {"value1": "1", "value2": "2", "value3": "3"} - - # when - service.set_config(custom_config) - - # then - assert service.custom_config == custom_config - - def test_set_config_exception(self): - # given - node = mock.MagicMock() - service = MyService(node) - custom_config = {"value4": "1"} - - # when - with pytest.raises(CoreError): - service.set_config(custom_config) - - def test_start_blocking(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.create_dirs = mock.MagicMock() - service.create_files = mock.MagicMock() - service.run_startup = mock.MagicMock() - service.run_validation = mock.MagicMock() - service.wait_validation = mock.MagicMock() - - # when - service.start() - - # then - service.create_files.assert_called_once() - service.create_dirs.assert_called_once() - service.run_startup.assert_called_once() - service.run_validation.assert_not_called() - service.wait_validation.assert_not_called() - - def test_start_timer(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.TIMER - service.create_dirs = mock.MagicMock() - service.create_files = mock.MagicMock() - service.run_startup = mock.MagicMock() - service.run_validation = mock.MagicMock() - service.wait_validation = mock.MagicMock() - - # when - service.start() - - # then - service.create_files.assert_called_once() - service.create_dirs.assert_called_once() - service.run_startup.assert_called_once() - service.run_validation.assert_not_called() - service.wait_validation.assert_called_once() - - def test_start_non_blocking(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.NON_BLOCKING - service.create_dirs = mock.MagicMock() - service.create_files = mock.MagicMock() - service.run_startup = mock.MagicMock() - service.run_validation = mock.MagicMock() - service.wait_validation = mock.MagicMock() - - # when - service.start() - - # then - service.create_files.assert_called_once() - service.create_dirs.assert_called_once() - service.run_startup.assert_called_once() - service.run_validation.assert_called_once() - service.wait_validation.assert_not_called() diff --git a/daemon/tests/test_core.py b/daemon/tests/test_core.py index 919e44781..868380ca8 100644 --- a/daemon/tests/test_core.py +++ b/daemon/tests/test_core.py @@ -8,16 +8,16 @@ import pytest -from core.emulator.data import IpPrefixes +from core.emulator.data import IpPrefixes, NodeData from core.emulator.session import Session from core.errors import CoreCommandError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility from core.nodes.base import CoreNode, NodeBase -from core.nodes.network import HubNode, PtpNet, SwitchNode, WlanNode +from core.nodes.network import HubNode, SwitchNode, WlanNode _PATH: Path = Path(__file__).resolve().parent _MOBILITY_FILE: Path = _PATH / "mobility.scen" -_WIRED: List = [PtpNet, HubNode, SwitchNode] +_WIRED: List = [HubNode, SwitchNode] def ping(from_node: CoreNode, to_node: CoreNode, ip_prefixes: IpPrefixes): @@ -134,7 +134,7 @@ def test_mobility(self, session: Session, ip_prefixes: IpPrefixes): def node_update(_): event.set() - session.node_handlers.append(node_update) + session.broadcast_manager.add_handler(NodeData, node_update) # instantiate session session.instantiate() diff --git a/daemon/tests/test_grpc.py b/daemon/tests/test_grpc.py index 9aed33955..40a2751ef 100644 --- a/daemon/tests/test_grpc.py +++ b/daemon/tests/test_grpc.py @@ -24,22 +24,22 @@ MobilityAction, MoveNodesRequest, Node, - NodeServiceData, NodeType, Position, ServiceAction, - ServiceValidationMode, + ServiceData, SessionLocation, SessionState, ) from core.emane.models.ieee80211abg import EmaneIeee80211abgModel from core.emane.nodes import EmaneNet -from core.emulator.data import EventData, IpPrefixes, NodeData -from core.emulator.enumerations import EventTypes, ExceptionLevels, MessageFlags +from core.emulator.data import IpPrefixes, NodeData +from core.emulator.enumerations import AlertLevels, EventTypes, MessageFlags from core.errors import CoreError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility from core.nodes.base import CoreNode from core.nodes.network import SwitchNode, WlanNode +from core.services.defaults.utilservices.services import DefaultRouteService from core.xml.corexml import CoreXmlWriter @@ -93,25 +93,13 @@ def test_start_session(self, grpc_server: CoreGrpcServer, definition): wlan_node.set_mobility({mobility_config_key: mobility_config_value}) # setup service config - service_name = "DefaultRoute" - service_validate = ["echo hello"] - node1.service_configs[service_name] = NodeServiceData( - executables=[], - dependencies=[], - dirs=[], - configs=[], - startup=[], - validate=service_validate, - validation_mode=ServiceValidationMode.NON_BLOCKING, - validation_timer=0, - shutdown=[], - meta="", + service_name = DefaultRouteService.name + file_name = DefaultRouteService.files[0] + file_data = "hello world" + service_data = ServiceData( + templates={file_name: file_data}, ) - - # setup service file config - service_file = "defaultroute.sh" - service_file_data = "echo hello" - node1.service_file_configs[service_name] = {service_file: service_file_data} + node1.service_configs[service_name] = service_data # setup session option option_key = "controlnet" @@ -135,9 +123,9 @@ def test_start_session(self, grpc_server: CoreGrpcServer, definition): assert wlan_node.id in real_session.nodes assert iface1_id in real_session.nodes[node1.id].ifaces assert iface2_id in real_session.nodes[node2.id].ifaces - hook_file, hook_data = real_session.hooks[EventTypes.RUNTIME_STATE][0] - assert hook_file == hook.file - assert hook_data == hook.data + hooks = real_session.hook_manager.script_hooks[EventTypes.RUNTIME_STATE] + real_hook = hooks[hook.file] + assert real_hook == hook.data assert real_session.location.refxyz == (location_x, location_y, location_z) assert real_session.location.refgeo == ( location_lat, @@ -153,16 +141,11 @@ def test_start_session(self, grpc_server: CoreGrpcServer, definition): wlan_node.id, Ns2ScriptedMobility.name ) assert set_mobility_config[mobility_config_key] == mobility_config_value - service = real_session.services.get_service( - node1.id, service_name, default_service=True - ) - assert service.validate == tuple(service_validate) real_node1 = real_session.get_node(node1.id, CoreNode) - service_file = real_session.services.get_service_file( - real_node1, service_name, service_file - ) - assert service_file.data == service_file_data - assert option_value == real_session.options.get(option_key) + real_service = real_node1.services[service_name] + real_templates = real_service.get_templates() + real_template_data = real_templates[file_name] + assert file_data == real_template_data @pytest.mark.parametrize("session_id", [None, 6013]) def test_create_session( @@ -628,80 +611,7 @@ def test_mobility_action(self, grpc_server: CoreGrpcServer): # then assert result is True - def test_get_service_defaults(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - - # then - with client.context_connect(): - defaults = client.get_service_defaults(session.id) - - # then - assert len(defaults) > 0 - - def test_set_service_defaults(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - model = "test" - services = ["SSH"] - - # then - with client.context_connect(): - result = client.set_service_defaults(session.id, {model: services}) - - # then - assert result is True - assert session.services.default_services[model] == services - - def test_get_node_service(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - - # then - with client.context_connect(): - service = client.get_node_service(session.id, node.id, "DefaultRoute") - - # then - assert len(service.configs) > 0 - - def test_get_node_service_file(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - - # then - with client.context_connect(): - data = client.get_node_service_file( - session.id, node.id, "DefaultRoute", "defaultroute.sh" - ) - - # then - assert data is not None - def test_service_action(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - options = CoreNode.create_options() - options.legacy = True - node = session.add_node(CoreNode, options=options) - service_name = "DefaultRoute" - - # then - with client.context_connect(): - result = client.service_action( - session.id, node.id, service_name, ServiceAction.STOP - ) - - # then - assert result is True - - def test_config_service_action(self, grpc_server: CoreGrpcServer): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() @@ -710,7 +620,7 @@ def test_config_service_action(self, grpc_server: CoreGrpcServer): # then with client.context_connect(): - result = client.config_service_action( + result = client.service_action( session.id, node.id, service_name, ServiceAction.STOP ) @@ -808,63 +718,36 @@ def handle_event(event: Event) -> None: with client.context_connect(): client.events(session.id, handle_event) time.sleep(0.1) - event_data = EventData( - event_type=EventTypes.RUNTIME_STATE, time=str(time.monotonic()) - ) - session.broadcast_event(event_data) + session.broadcast_event(EventTypes.RUNTIME_STATE) # then queue.get(timeout=5) - def test_exception_events(self, grpc_server: CoreGrpcServer): + def test_alert_events(self, grpc_server: CoreGrpcServer): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() queue = Queue() - exception_level = ExceptionLevels.FATAL + alert_level = AlertLevels.FATAL source = "test" node_id = None - text = "exception message" + text = "alert message" def handle_event(event: Event) -> None: assert event.session_id == session.id - assert event.exception_event is not None - exception_event = event.exception_event - assert exception_event.level.value == exception_level.value - assert exception_event.node_id == 0 - assert exception_event.source == source - assert exception_event.text == text + assert event.alert_event is not None + alert_event = event.alert_event + assert alert_event.level.value == alert_level.value + assert alert_event.node_id == 0 + assert alert_event.source == source + assert alert_event.text == text queue.put(event) # then with client.context_connect(): client.events(session.id, handle_event) time.sleep(0.1) - session.exception(exception_level, source, text, node_id) - - # then - queue.get(timeout=5) - - def test_file_events(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - queue = Queue() - - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.file_event is not None - queue.put(event) - - # then - with client.context_connect(): - client.events(session.id, handle_event) - time.sleep(0.1) - file_data = session.services.get_service_file( - node, "DefaultRoute", "defaultroute.sh" - ) - session.broadcast_file(file_data) + session.broadcast_alert(alert_level, source, text, node_id) # then queue.get(timeout=5) @@ -905,7 +788,7 @@ def node_handler(node_data: NodeData): assert n.position.alt == alt queue.put(node_data) - session.node_handlers.append(node_handler) + session.broadcast_manager.add_handler(NodeData, node_handler) # then with client.context_connect(): diff --git a/daemon/tests/test_nodes.py b/daemon/tests/test_nodes.py index bb76bb4e7..50c2179c4 100644 --- a/daemon/tests/test_nodes.py +++ b/daemon/tests/test_nodes.py @@ -172,3 +172,32 @@ def test_net(self, session, net_type): # then assert node assert node.up + + def test_ptp(self, session): + # given + + # when + ptp = session.create_ptp() + + # then + assert ptp + assert ptp.up + + def test_control_net(self, session): + # given + + # when + control_net = session.create_control_net(0, "172.168.0.0/24", None, None) + + # then + assert control_net + assert control_net.up + + def test_control_net_error(self, session): + # given + ip_prefix = "172.168.0.0/24" + session.create_control_net(0, ip_prefix, None, None) + + # when + with pytest.raises(CoreError): + session.create_control_net(0, ip_prefix, None, None) diff --git a/daemon/tests/test_services.py b/daemon/tests/test_services.py index 69234e3a7..de98ae672 100644 --- a/daemon/tests/test_services.py +++ b/daemon/tests/test_services.py @@ -1,376 +1,296 @@ -import itertools from pathlib import Path +from unittest import mock import pytest -from mock import MagicMock -from core.emulator.session import Session -from core.errors import CoreCommandError -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceDependencies, ServiceManager - -_PATH: Path = Path(__file__).resolve().parent -_SERVICES_PATH = _PATH / "myservices" - -SERVICE_ONE = "MyService" -SERVICE_TWO = "MyService2" +from core.config import ConfigBool, ConfigString +from core.errors import CoreCommandError, CoreError +from core.services.base import CoreService, ServiceBootError, ServiceMode + +TEMPLATE_TEXT = "echo hello" + + +class MyService(CoreService): + name = "MyService" + group = "MyGroup" + directories = ["/usr/local/lib"] + files = ["test.sh"] + executables = [] + dependencies = [] + startup = [f"sh {files[0]}"] + validate = [f"pidof {files[0]}"] + shutdown = [f"pkill {files[0]}"] + validation_mode = ServiceMode.BLOCKING + default_configs = [ + ConfigString(id="value1", label="Text"), + ConfigBool(id="value2", label="Boolean"), + ConfigString( + id="value3", label="Multiple Choice", options=["value1", "value2", "value3"] + ), + ] + modes = { + "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, + "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, + "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, + } + + def get_text_template(self, name: str) -> str: + return TEMPLATE_TEXT class TestServices: - def test_service_all_files(self, session: Session): + def test_set_template(self): # given - ServiceManager.add_services(_SERVICES_PATH) - file_name = "myservice.sh" - node = session.add_node(CoreNode) + node = mock.MagicMock() + text = "echo custom" + service = MyService(node) # when - session.services.set_service_file(node.id, SERVICE_ONE, file_name, "# test") + service.set_template(MyService.files[0], text) # then - service = session.services.get_service(node.id, SERVICE_ONE) - all_files = session.services.all_files(service) - assert service - assert all_files and len(all_files) == 1 + assert MyService.files[0] in service.custom_templates + assert service.custom_templates[MyService.files[0]] == text - def test_service_all_configs(self, session: Session): + def test_create_directories(self): # given - ServiceManager.add_services(_SERVICES_PATH) - node = session.add_node(CoreNode) + node = mock.MagicMock() + service = MyService(node) # when - session.services.set_service(node.id, SERVICE_ONE) - session.services.set_service(node.id, SERVICE_TWO) + service.create_dirs() # then - all_configs = session.services.all_configs() - assert all_configs - assert len(all_configs) == 2 + directory = Path(MyService.directories[0]) + node.create_dir.assert_called_with(directory) - def test_service_add_services(self, session: Session): + def test_create_files_custom(self): # given - ServiceManager.add_services(_SERVICES_PATH) - node = session.add_node(CoreNode) - total_service = len(node.services) + node = mock.MagicMock() + service = MyService(node) + text = "echo custom" + service.set_template(MyService.files[0], text) # when - session.services.add_services(node, node.model, [SERVICE_ONE, SERVICE_TWO]) + service.create_files() # then - assert node.services - assert len(node.services) == total_service + 2 + file_path = Path(MyService.files[0]) + node.create_file.assert_called_with(file_path, text) - def test_service_file(self, request, session: Session): + def test_create_files_text(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) - file_path = Path(my_service.configs[0]) - file_path = node.host_path(file_path) + node = mock.MagicMock() + service = MyService(node) # when - session.services.create_service_files(node, my_service) + service.create_files() # then - if not request.config.getoption("mock"): - assert file_path.exists() + file_path = Path(MyService.files[0]) + node.create_file.assert_called_with(file_path, TEMPLATE_TEXT) - def test_service_validate(self, session: Session): + def test_run_startup(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) - session.services.create_service_files(node, my_service) + node = mock.MagicMock() + wait = True + service = MyService(node) # when - status = session.services.validate_service(node, my_service) + service.run_startup(wait=wait) # then - assert not status + node.cmd.assert_called_with(MyService.startup[0], wait=wait, shell=True) - def test_service_validate_error(self, session: Session): + def test_run_startup_exception(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_TWO) - node = session.add_node(CoreNode) - session.services.create_service_files(node, my_service) - node.cmd = MagicMock(side_effect=CoreCommandError(-1, "invalid")) + node = mock.MagicMock() + node.cmd.side_effect = CoreCommandError(1, "error") + service = MyService(node) # when - status = session.services.validate_service(node, my_service) - - # then - assert status + with pytest.raises(ServiceBootError): + service.run_startup(wait=True) - def test_service_startup(self, session: Session): + def test_shutdown(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) - session.services.create_service_files(node, my_service) + node = mock.MagicMock() + service = MyService(node) # when - status = session.services.startup_service(node, my_service, wait=True) + service.stop() # then - assert not status + node.cmd.assert_called_with(MyService.shutdown[0], shell=True) - def test_service_startup_error(self, session: Session): + def test_run_validation(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_TWO) - node = session.add_node(CoreNode) - session.services.create_service_files(node, my_service) - node.cmd = MagicMock(side_effect=CoreCommandError(-1, "invalid")) + node = mock.MagicMock() + service = MyService(node) # when - status = session.services.startup_service(node, my_service, wait=True) + service.run_validation() # then - assert status + node.cmd.assert_called_with(MyService.validate[0], shell=True) - def test_service_stop(self, session: Session): + def test_run_validation_timer(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) - session.services.create_service_files(node, my_service) + node = mock.MagicMock() + service = MyService(node) + service.validation_mode = ServiceMode.TIMER + service.validation_timer = 0 # when - status = session.services.stop_service(node, my_service) + service.run_validation() # then - assert not status + node.cmd.assert_called_with(MyService.validate[0], shell=True) + + def test_run_validation_timer_exception(self): + # given + node = mock.MagicMock() + node.cmd.side_effect = CoreCommandError(1, "error") + service = MyService(node) + service.validation_mode = ServiceMode.TIMER + service.validation_period = 0 + service.validation_timer = 0 + + # when + with pytest.raises(ServiceBootError): + service.run_validation() - def test_service_stop_error(self, session: Session): + def test_run_validation_non_blocking(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_TWO) - node = session.add_node(CoreNode) - session.services.create_service_files(node, my_service) - node.cmd = MagicMock(side_effect=CoreCommandError(-1, "invalid")) + node = mock.MagicMock() + service = MyService(node) + service.validation_mode = ServiceMode.NON_BLOCKING + service.validation_period = 0 + service.validation_timer = 0 # when - status = session.services.stop_service(node, my_service) + service.run_validation() # then - assert status + node.cmd.assert_called_with(MyService.validate[0], shell=True) - def test_service_custom_startup(self, session: Session): + def test_run_validation_non_blocking_exception(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = mock.MagicMock() + node.cmd.side_effect = CoreCommandError(1, "error") + service = MyService(node) + service.validation_mode = ServiceMode.NON_BLOCKING + service.validation_period = 0 + service.validation_timer = 0 # when - session.services.set_service(node.id, my_service.name) - custom_my_service = session.services.get_service(node.id, my_service.name) - custom_my_service.startup = ("sh custom.sh",) + with pytest.raises(ServiceBootError): + service.run_validation() + + def test_render_config(self): + # given + node = mock.MagicMock() + service = MyService(node) + + # when + config = service.render_config() # then - assert my_service.startup != custom_my_service.startup + assert config == {"value1": "", "value2": "0", "value3": ""} - def test_service_set_file(self, session: Session): + def test_render_config_custom(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - file_name = my_service.configs[0] - file_data1 = "# custom file one" - file_data2 = "# custom file two" - session.services.set_service_file( - node1.id, my_service.name, file_name, file_data1 - ) - session.services.set_service_file( - node2.id, my_service.name, file_name, file_data2 - ) + node = mock.MagicMock() + service = MyService(node) + custom_config = {"value1": "1", "value2": "2", "value3": "3"} + service.set_config(custom_config) # when - custom_service1 = session.services.get_service(node1.id, my_service.name) - session.services.create_service_files(node1, custom_service1) - custom_service2 = session.services.get_service(node2.id, my_service.name) - session.services.create_service_files(node2, custom_service2) - - def test_service_import(self): - """ - Test importing a custom service. - """ - ServiceManager.add_services(_SERVICES_PATH) - assert ServiceManager.get(SERVICE_ONE) - assert ServiceManager.get(SERVICE_TWO) - - def test_service_setget(self, session: Session): + config = service.render_config() + + # then + assert config == custom_config + + def test_set_config(self): # given - ServiceManager.add_services(_SERVICES_PATH) - my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = mock.MagicMock() + service = MyService(node) + custom_config = {"value1": "1", "value2": "2", "value3": "3"} # when - no_service = session.services.get_service(node.id, SERVICE_ONE) - default_service = session.services.get_service( - node.id, SERVICE_ONE, default_service=True - ) - session.services.set_service(node.id, SERVICE_ONE) - custom_service = session.services.get_service( - node.id, SERVICE_ONE, default_service=True - ) + service.set_config(custom_config) # then - assert no_service is None - assert default_service == my_service - assert custom_service and custom_service != my_service + assert service.custom_config == custom_config - def test_services_dependency(self): + def test_set_config_exception(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_d = CoreService() - service_d.name = "d" - service_e = CoreService() - service_e.name = "e" - service_a.dependencies = (service_b.name,) - service_b.dependencies = () - service_c.dependencies = (service_b.name, service_d.name) - service_d.dependencies = () - service_e.dependencies = () - services = [service_a, service_b, service_c, service_d, service_e] - expected1 = {service_a.name, service_b.name, service_c.name, service_d.name} - expected2 = [service_e] + node = mock.MagicMock() + service = MyService(node) + custom_config = {"value4": "1"} # when - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - results = ServiceDependencies(permutation).boot_order() - # then - for result in results: - result_set = {x.name for x in result} - if len(result) == 4: - a_index = result.index(service_a) - b_index = result.index(service_b) - c_index = result.index(service_c) - d_index = result.index(service_d) - assert b_index < a_index - assert b_index < c_index - assert d_index < c_index - assert result_set == expected1 - elif len(result) == 1: - assert expected2 == result - else: - raise ValueError( - f"unexpected result: {results}, perm({permutation})" - ) - - def test_services_dependency_missing(self): - # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_a.dependencies = (service_b.name,) - service_b.dependencies = (service_c.name,) - service_c.dependencies = ("d",) - services = [service_a, service_b, service_c] - - # when, then - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - with pytest.raises(ValueError): - ServiceDependencies(permutation).boot_order() - - def test_services_dependency_cycle(self): + with pytest.raises(CoreError): + service.set_config(custom_config) + + def test_start_blocking(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_a.dependencies = (service_b.name,) - service_b.dependencies = (service_c.name,) - service_c.dependencies = (service_a.name,) - services = [service_a, service_b, service_c] - - # when, then - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - with pytest.raises(ValueError): - ServiceDependencies(permutation).boot_order() - - def test_services_dependency_common(self): + node = mock.MagicMock() + service = MyService(node) + service.create_dirs = mock.MagicMock() + service.create_files = mock.MagicMock() + service.run_startup = mock.MagicMock() + service.run_validation = mock.MagicMock() + service.wait_validation = mock.MagicMock() + + # when + service.start() + + # then + service.create_files.assert_called_once() + service.create_dirs.assert_called_once() + service.run_startup.assert_called_once() + service.run_validation.assert_not_called() + service.wait_validation.assert_not_called() + + def test_start_timer(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_d = CoreService() - service_d.name = "d" - service_a.dependencies = (service_b.name,) - service_c.dependencies = (service_d.name, service_b.name) - services = [service_a, service_b, service_c, service_d] - expected = {service_a.name, service_b.name, service_c.name, service_d.name} + node = mock.MagicMock() + service = MyService(node) + service.validation_mode = ServiceMode.TIMER + service.create_dirs = mock.MagicMock() + service.create_files = mock.MagicMock() + service.run_startup = mock.MagicMock() + service.run_validation = mock.MagicMock() + service.wait_validation = mock.MagicMock() # when - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - results = ServiceDependencies(permutation).boot_order() - - # then - for result in results: - assert len(result) == 4 - result_set = {x.name for x in result} - a_index = result.index(service_a) - b_index = result.index(service_b) - c_index = result.index(service_c) - d_index = result.index(service_d) - assert b_index < a_index - assert d_index < c_index - assert b_index < c_index - assert expected == result_set - - def test_services_dependency_common2(self): + service.start() + + # then + service.create_files.assert_called_once() + service.create_dirs.assert_called_once() + service.run_startup.assert_called_once() + service.run_validation.assert_not_called() + service.wait_validation.assert_called_once() + + def test_start_non_blocking(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_d = CoreService() - service_d.name = "d" - service_a.dependencies = (service_b.name,) - service_b.dependencies = (service_c.name, service_d.name) - service_c.dependencies = (service_d.name,) - services = [service_a, service_b, service_c, service_d] - expected = {service_a.name, service_b.name, service_c.name, service_d.name} + node = mock.MagicMock() + service = MyService(node) + service.validation_mode = ServiceMode.NON_BLOCKING + service.create_dirs = mock.MagicMock() + service.create_files = mock.MagicMock() + service.run_startup = mock.MagicMock() + service.run_validation = mock.MagicMock() + service.wait_validation = mock.MagicMock() # when - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - results = ServiceDependencies(permutation).boot_order() - - # then - for result in results: - assert len(result) == 4 - result_set = {x.name for x in result} - a_index = result.index(service_a) - b_index = result.index(service_b) - c_index = result.index(service_c) - d_index = result.index(service_d) - assert b_index < a_index - assert c_index < b_index - assert d_index < b_index - assert d_index < c_index - assert expected == result_set + service.start() + + # then + service.create_files.assert_called_once() + service.create_dirs.assert_called_once() + service.run_startup.assert_called_once() + service.run_validation.assert_called_once() + service.wait_validation.assert_not_called() diff --git a/daemon/tests/test_xml.py b/daemon/tests/test_xml.py index 6841da8e7..04deb3079 100644 --- a/daemon/tests/test_xml.py +++ b/daemon/tests/test_xml.py @@ -11,7 +11,7 @@ from core.location.mobility import BasicRangeModel from core.nodes.base import CoreNode from core.nodes.network import SwitchNode, WlanNode -from core.services.utility import SshService +from core.services.defaults.utilservices.services import DefaultRouteService class TestXml: @@ -46,14 +46,13 @@ def test_xml_hooks(self, session: Session, tmpdir: TemporaryFile): session.shutdown() # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # verify nodes have been recreated - runtime_hooks = session.hooks.get(state) - assert runtime_hooks - runtime_hook = runtime_hooks[0] - assert file_name == runtime_hook[0] - assert data == runtime_hook[1] + hooks = session.hook_manager.script_hooks[state] + runtime_data = hooks[file_name] + assert runtime_data == data def test_xml_ptp( self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes @@ -98,6 +97,7 @@ def test_xml_ptp( assert len(session.link_manager.links()) == 0 # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # verify nodes have been recreated @@ -125,12 +125,10 @@ def test_xml_ptp_services( session.add_link(node1.id, node2.id, iface1_data, iface2_data) # set custom values for node service - session.services.set_service(node1.id, SshService.name) - service_file = SshService.configs[0] + service = node1.services[DefaultRouteService.name] + file_name = DefaultRouteService.files[0] file_data = "# test" - session.services.set_service_file( - node1.id, SshService.name, service_file, file_data - ) + service.set_template(file_name, file_data) # instantiate session session.instantiate() @@ -154,15 +152,18 @@ def test_xml_ptp_services( assert not session.get_node(node2.id, CoreNode) # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # retrieve custom service - service = session.services.get_service(node1.id, SshService.name) + node1_xml = session.get_node(node1.id, CoreNode) + service_xml = node1_xml.services[DefaultRouteService.name] # verify nodes have been recreated assert session.get_node(node1.id, CoreNode) assert session.get_node(node2.id, CoreNode) - assert service.config_data.get(service_file) == file_data + templates = service_xml.get_templates() + assert file_data == templates[file_name] def test_xml_mobility( self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes @@ -211,6 +212,7 @@ def test_xml_mobility( assert not session.get_node(node2.id, CoreNode) # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # retrieve configuration we set originally @@ -258,6 +260,7 @@ def test_network_to_network(self, session: Session, tmpdir: TemporaryFile): assert not session.get_node(switch2.id, SwitchNode) # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # verify nodes have been recreated @@ -314,6 +317,7 @@ def test_link_options( assert not session.get_node(switch.id, SwitchNode) # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # verify nodes have been recreated @@ -377,6 +381,7 @@ def test_link_options_ptp( assert not session.get_node(node2.id, CoreNode) # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # verify nodes have been recreated @@ -410,7 +415,7 @@ def test_link_options_bidirectional( # create link options1 = LinkOptions() - options1.unidirectional = 1 + options1.unidirectional = True options1.bandwidth = 5000 options1.delay = 10 options1.loss = 10.5 @@ -421,7 +426,7 @@ def test_link_options_bidirectional( node1.id, node2.id, iface1_data, iface2_data, options1 ) options2 = LinkOptions() - options2.unidirectional = 1 + options2.unidirectional = True options2.bandwidth = 10000 options2.delay = 20 options2.loss = 10 @@ -452,6 +457,7 @@ def test_link_options_bidirectional( assert not session.get_node(node2.id, CoreNode) # load saved xml + session.directory.mkdir() session.open_xml(file_path, start=True) # verify nodes have been recreated diff --git a/dockerfiles/Dockerfile.centos b/dockerfiles/Dockerfile.centos deleted file mode 100644 index 066544865..000000000 --- a/dockerfiles/Dockerfile.centos +++ /dev/null @@ -1,78 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM centos:7 -LABEL Description="CORE Docker CentOS Image" - -ARG PREFIX=/usr -ARG BRANCH=master -ENV LANG en_US.UTF-8 -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install system dependencies -RUN yum -y update && \ - yum install -y \ - xterm \ - git \ - sudo \ - wget \ - tzdata \ - unzip \ - libpcap-devel \ - libpcre3-devel \ - libxml2-devel \ - protobuf-devel \ - unzip \ - uuid-devel \ - tcpdump \ - make && \ - yum-builddep -y python3 && \ - yum autoremove -y && \ - yum install -y hostname - -# install python3.9 -RUN wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz && \ - tar xf Python-3.9.15.tgz && \ - cd Python-3.9.15 && \ - ./configure --enable-optimizations --with-ensurepip=install && \ - make -j$(nproc) altinstall && \ - python3.9 -m pip install --upgrade pip && \ - cd /opt && \ - rm -rf Python-3.9.15 - -# install core -RUN git clone https://github.com/coreemu/core && \ - cd core && \ - git checkout ${BRANCH} && \ - NO_SYSTEM=1 PYTHON=/usr/local/bin/python3.9 ./setup.sh && \ - PATH=/root/.local/bin:$PATH PYTHON=/usr/local/bin/python3.9 inv install -v -p ${PREFIX} --no-python - -# install emane -RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - cd emane-1.3.3-release-1/rpms/el7/x86_64 && \ - yum install -y epel-release && \ - yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm && \ - cd ../../../.. && \ - rm emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - rm -rf emane-1.3.3-release-1 - -# install emane python bindings -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - git checkout v1.3.3 && \ - ./autogen.sh && \ - PYTHON=${VENV_PATH}/bin/python ./configure --prefix=/usr && \ - cd src/python && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip - -WORKDIR /root diff --git a/dockerfiles/Dockerfile.centos-package b/dockerfiles/Dockerfile.centos-package deleted file mode 100644 index 8d4a12969..000000000 --- a/dockerfiles/Dockerfile.centos-package +++ /dev/null @@ -1,89 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM centos:7 -LABEL Description="CORE CentOS Image" - -ENV LANG en_US.UTF-8 -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install basic dependencies -RUN yum -y update && \ - yum install -y \ - xterm \ - git \ - sudo \ - wget \ - tzdata \ - unzip \ - libpcap-devel \ - libpcre3-devel \ - libxml2-devel \ - protobuf-devel \ - unzip \ - uuid-devel \ - tcpdump \ - automake \ - gawk \ - libreadline-devel \ - libtool \ - pkg-config \ - make && \ - yum-builddep -y python3 && \ - yum autoremove -y && \ - yum install -y hostname - -# install python3.9 -RUN wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz && \ - tar xf Python-3.9.15.tgz && \ - cd Python-3.9.15 && \ - ./configure --enable-optimizations --with-ensurepip=install && \ - make -j$(nproc) altinstall && \ - python3.9 -m pip install --upgrade pip && \ - cd /opt && \ - rm -rf Python-3.9.15 - -# install core -COPY core_*.rpm . -RUN PYTHON=/usr/local/bin/python3.9 yum install -y ./core_*.rpm && \ - rm -f core_*.rpm - -# install ospf mdr -RUN git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ - cd ospf-mdr && \ - ./bootstrap.sh && \ - ./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga && \ - make -j$(nproc) && \ - make install && \ - cd /opt && \ - rm -rf ospf-mdr - - # install emane -RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - cd emane-1.3.3-release-1/rpms/el7/x86_64 && \ - yum install -y epel-release && \ - yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm && \ - cd ../../../.. && \ - rm emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - rm -rf emane-1.3.3-release-1 - -# install emane python bindings -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - git checkout v1.3.3 && \ - ./autogen.sh && \ - PYTHON=${VENV_PATH}/bin/python ./configure --prefix=/usr && \ - cd src/python && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip diff --git a/dockerfiles/Dockerfile.emane-python b/dockerfiles/Dockerfile.emane-python new file mode 100644 index 000000000..ada90278f --- /dev/null +++ b/dockerfiles/Dockerfile.emane-python @@ -0,0 +1,40 @@ +# syntax=docker/dockerfile:1 +FROM ubuntu:22.04 +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends \ + automake \ + ca-certificates \ + g++ \ + git \ + libpcap-dev \ + libpcre3-dev \ + libprotobuf-dev \ + libtool \ + libxml2-dev \ + make \ + pkg-config \ + python3 \ + python3-pip \ + unzip \ + uuid-dev \ + wget && \ + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* +WORKDIR /opt +ARG PROTOC_VERSION=3.19.6 +RUN wget -q https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ + mkdir protoc && \ + unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ + git clone https://github.com/adjacentlink/emane.git && \ + cd emane && \ + git checkout v1.5.1 && \ + ./autogen.sh && \ + PYTHON=python3 ./configure --prefix=/usr && \ + cd src/python && \ + PATH=/opt/protoc/bin:$PATH make && \ + python3 setup.py bdist_wheel && \ + mv dist/*.whl /opt/ && \ + cd /opt && \ + rm -rf protoc && \ + rm -rf emane && \ + rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip diff --git a/dockerfiles/Dockerfile.ospf-mdr-deb b/dockerfiles/Dockerfile.ospf-mdr-deb new file mode 100644 index 000000000..db0bfb587 --- /dev/null +++ b/dockerfiles/Dockerfile.ospf-mdr-deb @@ -0,0 +1,32 @@ +# syntax=docker/dockerfile:1 +FROM ubuntu:22.04 +WORKDIR /opt +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends \ + git \ + ca-certificates \ + automake \ + make \ + libtool \ + pkg-config \ + gawk \ + g++ \ + dpkg-dev \ + debhelper \ + libreadline-dev \ + texinfo \ + imagemagick \ + groff \ + build-essential:native \ + texlive-latex-recommended \ + texlive-plain-generic && \ + git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ + cd ospf-mdr && \ + ./bootstrap.sh && \ + ./configure && \ + (make -f quagga.deb.mk build || make -f quagga.deb.mk build) && \ + mv quagga-mr_0.99*.deb /opt/ && \ + cd /opt && \ + rm -rf ospf-mdr && \ + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* diff --git a/dockerfiles/Dockerfile.ospf-mdr-rpm b/dockerfiles/Dockerfile.ospf-mdr-rpm new file mode 100644 index 000000000..aecbe302d --- /dev/null +++ b/dockerfiles/Dockerfile.ospf-mdr-rpm @@ -0,0 +1,33 @@ +# syntax=docker/dockerfile:1 +FROM rockylinux:8 +WORKDIR /opt +RUN yum update -y && \ + yum install -y epel-release dnf-plugins-core && \ + yum config-manager --set-enabled powertools && \ + yum update -y && \ + yum install -y \ + texinfo \ + rpm-build \ + texlive-base \ + texinfo-tex \ + texi2html \ + readline-devel \ + libpcap-devel \ + ImageMagick \ + git \ + automake \ + libtool \ + pkg-config \ + gcc-c++ \ + libcap-devel \ + make && \ + git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ + cd ospf-mdr && \ + ./bootstrap.sh && \ + ./configure && \ + (make -f quagga.rpm.mk build || make -f quagga.rpm.mk build) && \ + mv .rpmbuild/RPMS/x86_64/quagga-mr-0.99*.rpm /opt/ && \ + cd /opt && \ + rm -rf ospf-mdr && \ + yum autoremove -y && \ + yum clean all diff --git a/dockerfiles/Dockerfile.rocky b/dockerfiles/Dockerfile.rocky new file mode 100644 index 000000000..eed276103 --- /dev/null +++ b/dockerfiles/Dockerfile.rocky @@ -0,0 +1,81 @@ +# syntax=docker/dockerfile:1 +FROM rockylinux:8 +ENV LANG en_US.UTF-8 +WORKDIR /opt + +# install system dependencies +RUN yum update -y && \ + yum install -y \ + xterm \ + wget \ + tcpdump \ + python39 \ + python39-tkinter \ + iproute-tc && \ + yum autoremove -y && \ + yum clean all + +# install ospf mdr +RUN yum update -y && \ + yum install -y \ + automake \ + gcc-c++ \ + libtool \ + make \ + pkg-config \ + readline-devel \ + git && \ + git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ + cd ospf-mdr && \ + ./bootstrap.sh && \ + ./configure --disable-doc --enable-user=root --enable-group=root \ + --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ + --localstatedir=/var/run/quagga && \ + make -j$(nproc) && \ + make install && \ + cd /opt && \ + rm -rf ospf-mdr && \ + yum remove -y \ + automake \ + gcc-c++ \ + libtool \ + make \ + pkg-config \ + readline-devel \ + git && \ + yum autoremove -y --skip-broken && \ + yum clean all + +# install emane +ARG EMANE_VERSION=1.5.1 +ARG EMANE_RELEASE=emane-${EMANE_VERSION}-release-1 +ARG EMANE_PACKAGE=${EMANE_RELEASE}.el8.x86_64.tar.gz +RUN yum update -y && \ + wget -q https://adjacentlink.com/downloads/emane/${EMANE_PACKAGE} && \ + tar xf ${EMANE_PACKAGE} && \ + cd ${EMANE_RELEASE}/rpms/el8/x86_64 && \ + rm emane-spectrum-tools-*.rpm emane-model-lte*.rpm && \ + rm *devel*.rpm && \ + yum install -y ./emane*.rpm ./python3-emane-${EMANE_VERSION}-1.el8.noarch.rpm && \ + cd ../../../.. && \ + rm ${EMANE_PACKAGE} && \ + rm -rf ${EMANE_RELEASE} && \ + yum autoremove -y && \ + yum clean all + +# install core +ARG CORE_PACKAGE=core_9.1.0_x86_64.rpm +ARG PACKAGE_URL=https://github.com/coreemu/core/releases/latest/download/${CORE_PACKAGE} +RUN yum update -y && \ + wget -q ${PACKAGE_URL} && \ + PYTHON=python3.9 yum install -y ./${CORE_PACKAGE} && \ + rm -f ${CORE_PACKAGE} && \ + yum autoremove -y && \ + yum clean all + +# install emane python bindings +ARG VENV_PATH=/opt/core/venv +COPY --from=emane-python /opt/emane-*.whl . +RUN ${VENV_PATH}/bin/python -m pip install ./emane-*.whl + +WORKDIR /root diff --git a/dockerfiles/Dockerfile.ubuntu b/dockerfiles/Dockerfile.ubuntu index 8eceebf77..fd07fa90a 100644 --- a/dockerfiles/Dockerfile.ubuntu +++ b/dockerfiles/Dockerfile.ubuntu @@ -1,61 +1,62 @@ # syntax=docker/dockerfile:1 FROM ubuntu:22.04 -LABEL Description="CORE Docker Ubuntu Image" - -ARG PREFIX=/usr/local -ARG BRANCH=master -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv ENV DEBIAN_FRONTEND=noninteractive -ENV PATH="$PATH:${VENV_PATH}/bin" WORKDIR /opt # install system dependencies RUN apt-get update -y && \ apt-get install -y --no-install-recommends \ ca-certificates \ - git \ - sudo \ + xterm \ + psmisc \ + python3 \ + python3-tk \ + python3-pip \ + python3-venv \ wget \ - tzdata \ - libpcap-dev \ - libpcre3-dev \ - libprotobuf-dev \ - libxml2-dev \ - protobuf-compiler \ - unzip \ - uuid-dev \ iproute2 \ iputils-ping \ tcpdump && \ - apt-get autoremove -y + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* -# install core -RUN git clone https://github.com/coreemu/core && \ - cd core && \ - git checkout ${BRANCH} && \ - ./setup.sh && \ - PATH=/root/.local/bin:$PATH inv install -v -p ${PREFIX} && \ - cd /opt && \ - rm -rf ospf-mdr +# install ospf mdr +COPY --from=ospf-deb /opt/quagga-mr_0.99*.deb . +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends \ + ./quagga-mr_0.99*.deb && \ + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* # install emane -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make -j$(nproc) && \ - make install && \ - cd src/python && \ - make clean && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip +ARG EMANE_RELEASE=emane-1.5.1-release-1 +ARG EMANE_PACKAGE=${EMANE_RELEASE}.ubuntu-22_04.amd64.tar.gz +RUN apt-get update -y && \ + wget -q https://adjacentlink.com/downloads/emane/${EMANE_PACKAGE} && \ + tar xf ${EMANE_PACKAGE} && \ + cd ${EMANE_RELEASE}/debs/ubuntu-22_04/amd64 && \ + rm emane-spectrum-tools*.deb emane-model-lte*.deb && \ + rm *dev*.deb && \ + apt-get install -y --no-install-recommends ./emane*.deb ./python3-emane_*.deb && \ + cd ../../../.. && \ + rm ${EMANE_PACKAGE} && \ + rm -rf ${EMANE_RELEASE} && \ + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* + +# install core +ARG CORE_PACKAGE=core_9.1.0_amd64.deb +ARG PACKAGE_URL=https://github.com/coreemu/core/releases/latest/download/${CORE_PACKAGE} +RUN apt-get update -y && \ + wget -q ${PACKAGE_URL} && \ + apt-get install -y --no-install-recommends ./${CORE_PACKAGE} && \ + rm -f ${CORE_PACKAGE} && \ + apt-get autoremove -y && \ + rm -rf /var/lib/apt/lists/* + +# install emane python bindings +ARG VENV_PATH=/opt/core/venv +COPY --from=emane-python /opt/emane-*.whl . +RUN ${VENV_PATH}/bin/python -m pip install ./emane-*.whl WORKDIR /root diff --git a/dockerfiles/Dockerfile.ubuntu-package b/dockerfiles/Dockerfile.ubuntu-package deleted file mode 100644 index b8f66165d..000000000 --- a/dockerfiles/Dockerfile.ubuntu-package +++ /dev/null @@ -1,75 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM ubuntu:22.04 -LABEL Description="CORE Docker Ubuntu Image" - -ENV DEBIAN_FRONTEND=noninteractive -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install basic dependencies -RUN apt-get update -y && \ - apt-get install -y --no-install-recommends \ - ca-certificates \ - python3 \ - python3-tk \ - python3-pip \ - python3-venv \ - libpcap-dev \ - libpcre3-dev \ - libprotobuf-dev \ - libxml2-dev \ - protobuf-compiler \ - unzip \ - uuid-dev \ - automake \ - gawk \ - git \ - wget \ - libreadline-dev \ - libtool \ - pkg-config \ - g++ \ - make \ - iputils-ping \ - tcpdump && \ - apt-get autoremove -y - -# install core -COPY core_*.deb . -RUN apt-get install -y ./core_*.deb && \ - rm -f core_*.deb - -# install ospf mdr -RUN git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ - cd ospf-mdr && \ - ./bootstrap.sh && \ - ./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga && \ - make -j$(nproc) && \ - make install && \ - cd /opt && \ - rm -rf ospf-mdr - -# install emane -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make -j$(nproc) && \ - make install && \ - cd src/python && \ - make clean && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip - -WORKDIR /root diff --git a/docs/configservices.md b/docs/configservices.md deleted file mode 100644 index da81aa489..000000000 --- a/docs/configservices.md +++ /dev/null @@ -1,196 +0,0 @@ -# Config Services - -## Overview - -Config services are a newer version of services for CORE, that leverage a -templating engine, for more robust service file creation. They also -have the power of configuration key/value pairs that values that can be -defined and displayed within the GUI, to help further tweak a service, -as needed. - -CORE services are a convenience for creating reusable dynamic scripts -to run on nodes, for carrying out specific task(s). - -This boilds down to the following functions: - -* generating files the service will use, either directly for commands or for configuration -* command(s) for starting a service -* command(s) for validating a service -* command(s) for stopping a service - -Most CORE nodes will have a default set of services to run, associated with -them. You can however customize the set of services a node will use. Or even -further define a new node type within the GUI, with a set of services, that -will allow quickly dragging and dropping that node type during creation. - -## Available Services - -| Service Group | Services | -|----------------------------------|-----------------------------------------------------------------------| -| [BIRD](services/bird.md) | BGP, OSPF, RADV, RIP, Static | -| [EMANE](services/emane.md) | Transport Service | -| [FRR](services/frr.md) | BABEL, BGP, OSPFv2, OSPFv3, PIMD, RIP, RIPNG, Zebra | -| [NRL](services/nrl.md) | arouted, MGEN Sink, MGEN Actor, NHDP, OLSR, OLSRORG, OLSRv2, SMF | -| [Quagga](services/quagga.md) | BABEL, BGP, OSPFv2, OSPFv3, OSPFv3 MDR, RIP, RIPNG, XPIMD, Zebra | -| [SDN](services/sdn.md) | OVS, RYU | -| [Security](services/security.md) | Firewall, IPsec, NAT, VPN Client, VPN Server | -| [Utility](services/utility.md) | ATD, Routing Utils, DHCP, FTP, IP Forward, PCAP, RADVD, SSF, UCARP | -| [XORP](services/xorp.md) | BGP, OLSR, OSPFv2, OSPFv3, PIMSM4, PIMSM6, RIP, RIPNG, Router Manager | - -## Node Types and Default Services - -Here are the default node types and their services: - -| Node Type | Services | -|-----------|--------------------------------------------------------------------------------------------------------------------------------------------| -| *router* | zebra, OSFPv2, OSPFv3, and IPForward services for IGP link-state routing. | -| *PC* | DefaultRoute service for having a default route when connected directly to a router. | -| *mdr* | zebra, OSPFv3MDR, and IPForward services for wireless-optimized MANET Designated Router routing. | -| *prouter* | a physical router, having the same default services as the *router* node type; for incorporating Linux testbed machines into an emulation. | - -Configuration files can be automatically generated by each service. For -example, CORE automatically generates routing protocol configuration for the -router nodes in order to simplify the creation of virtual networks. - -To change the services associated with a node, double-click on the node to -invoke its configuration dialog and click on the *Services...* button, -or right-click a node a choose *Services...* from the menu. -Services are enabled or disabled by clicking on their names. The button next to -each service name allows you to customize all aspects of this service for this -node. For example, special route redistribution commands could be inserted in -to the Quagga routing configuration associated with the zebra service. - -To change the default services associated with a node type, use the Node Types -dialog available from the *Edit* button at the end of the Layer-3 nodes -toolbar, or choose *Node types...* from the *Session* menu. Note that -any new services selected are not applied to existing nodes if the nodes have -been customized. - -The node types are saved in the GUI config file **~/.coregui/config.yaml**. -Keep this in mind when changing the default services for -existing node types; it may be better to simply create a new node type. It is -recommended that you do not change the default built-in node types. - -## New Services - -Services can save time required to configure nodes, especially if a number -of nodes require similar configuration procedures. New services can be -introduced to automate tasks. - -### Creating New Services - -!!! note - - The directory base name used in **custom_services_dir** below should - be unique and should not correspond to any existing Python module name. - For example, don't use the name **subprocess** or **services**. - -1. Modify the example service shown below - to do what you want. It could generate config/script files, mount per-node - directories, start processes/scripts, etc. Your file can define one or more - classes to be imported. You can create multiple Python files that will be imported. - -2. Put these files in a directory such as **~/.coregui/custom_services**. - -3. Add a **custom_config_services_dir = ~/.coregui/custom_services** entry to the - /etc/core/core.conf file. - -4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) - should be displayed in the terminal (or service log, like journalctl). - -5. Start using your custom service on your nodes. You can create a new node - type that uses your service, or change the default services for an existing - node type, or change individual nodes. - -### Example Custom Service - -Below is the skeleton for a custom service with some documentation. Most -people would likely only setup the required class variables **(name/group)**. -Then define the **files** to generate and implement the -**get_text_template** function to dynamically create the files wanted. Finally, -the **startup** commands would be supplied, which typically tend to be -running the shell files generated. - -```python -from typing import Dict, List - -from core.config import ConfigString, ConfigBool, Configuration -from core.configservice.base import ConfigService, ConfigServiceMode, ShadowDir - - -# class that subclasses ConfigService -class ExampleService(ConfigService): - # unique name for your service within CORE - name: str = "Example" - # the group your service is associated with, used for display in GUI - group: str = "ExampleGroup" - # directories that the service should shadow mount, hiding the system directory - directories: List[str] = [ - "/usr/local/core", - ] - # files that this service should generate, defaults to nodes home directory - # or can provide an absolute path to a mounted directory - files: List[str] = [ - "example-start.sh", - "/usr/local/core/file1", - ] - # executables that should exist on path, that this service depends on - executables: List[str] = [] - # other services that this service depends on, can be used to define service start order - dependencies: List[str] = [] - # commands to run to start this service - startup: List[str] = [] - # commands to run to validate this service - validate: List[str] = [] - # commands to run to stop this service - shutdown: List[str] = [] - # validation mode, blocking, non-blocking, and timer - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - # configurable values that this service can use, for file generation - default_configs: List[Configuration] = [ - ConfigString(id="value1", label="Text"), - ConfigBool(id="value2", label="Boolean"), - ConfigString(id="value3", label="Multiple Choice", options=["value1", "value2", "value3"]), - ] - # sets of values to set for the configuration defined above, can be used to - # provide convenient sets of values to typically use - modes: Dict[str, Dict[str, str]] = { - "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, - "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, - "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, - } - # defines directories that this service can help shadow within a node - shadow_directories: List[ShadowDir] = [ - ShadowDir(path="/user/local/core", src="/opt/core") - ] - - def get_text_template(self, name: str) -> str: - return """ - # sample script 1 - # node id(${node.id}) name(${node.name}) - # config: ${config} - echo hello - """ -``` - -#### Validation Mode - -Validation modes are used to determine if a service has started up successfully. - -* blocking - startup commands are expected to run til completion and return 0 exit code -* non-blocking - startup commands are ran, but do not wait for completion -* timer - startup commands are ran, and an arbitrary amount of time is waited to consider started - -#### Shadow Directories - -Shadow directories provide a convenience for copying a directory and the files within -it to a nodes home directory, to allow a unique set of per node files. - -* `ShadowDir(path="/user/local/core")` - copies files at the given location into the node -* `ShadowDir(path="/user/local/core", src="/opt/core")` - copies files to the given location, - but sourced from the provided location -* `ShadowDir(path="/user/local/core", templates=True)` - copies files and treats them as - templates for generation -* `ShadowDir(path="/user/local/core", has_node_paths=True)` - copies files from the given - location, and looks for unique node names directories within it, using a directory named - default, when not preset diff --git a/docs/ctrlnet.md b/docs/ctrlnet.md index d20e3a417..ac4345912 100644 --- a/docs/ctrlnet.md +++ b/docs/ctrlnet.md @@ -22,7 +22,7 @@ interface configured with an address corresponding to its node number (e.g. *172.16.0.3* for *n3*.) A default for the primary control network may also be specified by setting -the *controlnet* line in the */etc/core/core.conf* configuration file which +the *controlnet* line in the */opt/core/etc/core.conf* configuration file which new sessions will use by default. To simultaneously run multiple sessions with control networks, the session option should be used instead of the *core.conf* default. @@ -56,10 +56,10 @@ done !!! note If adjustments to the primary control network configuration made in - **/etc/core/core.conf** do not seem to take affect, check if there is anything + **/opt/core/etc/core.conf** do not seem to take affect, check if there is anything set in the *Session Menu*, the *Options...* dialog. They may need to be cleared. These per session settings override the defaults in - **/etc/core/core.conf**. + **/opt/core/etc/core.conf**. ## Control Network in Distributed Sessions @@ -72,10 +72,10 @@ can be accessed, just like the single server case. In some situations, remote emulated nodes need to communicate with the host on which they are running and not the master server. Multiple control network prefixes can be specified in the either the session option or -*/etc/core/core.conf*, separated by spaces and beginning with the master +*/opt/core/etc/core.conf*, separated by spaces and beginning with the master server. Each entry has the form *"server:prefix"*. For example, if the servers *core1*,*core2*, and *core3* are assigned with nodes in the scenario and using -*/etc/core/core.conf* instead of the session option. +*/opt/core/etc/core.conf* instead of the session option. ```shell controlnet=core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.1.0/24 @@ -94,7 +94,7 @@ is desired. The control network script may help with this. ## Control Network Script A control network script may be specified using the *controlnet_updown_script* -option in the */etc/core/core.conf* file. This script will be run after the +option in the */opt/core/etc/core.conf* file. This script will be run after the bridge has been built (and address assigned) with the first argument being the name of the bridge, and the second argument being the keyword *"startup"*. The script will again be invoked prior to bridge removal with the second @@ -106,10 +106,10 @@ Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces. Since it is advisable to separate the OTA traffic from other traffic, we will need more than single channel leading out from the namespace. Up to three auxiliary control networks may be defined. Multiple control networks are set -up in */etc/core/core.conf* file. Lines *controlnet1*, *controlnet2* and +up in */opt/core/etc/core.conf* file. Lines *controlnet1*, *controlnet2* and *controlnet3* define the auxiliary networks. -For example, having the following */etc/core/core.conf*: +For example, having the following */opt/core/etc/core.conf*: ```shell controlnet = core1:172.17.1.0/24 core2:172.17.2.0/24 core3:172.17.3.0/24 @@ -135,7 +135,7 @@ configuration for auxiliary control networks. To extend the auxiliary control networks across a distributed test environment, host network interfaces need to be added to them. The following -lines in */etc/core/core.conf* will add host devices *eth1*, *eth2* and *eth3* +lines in */opt/core/etc/core.conf* will add host devices *eth1*, *eth2* and *eth3* to *controlnet1*, *controlnet2*, *controlnet3*: ```shell diff --git a/docs/devguide.md b/docs/devguide.md index 4fa439771..974efe416 100644 --- a/docs/devguide.md +++ b/docs/devguide.md @@ -17,29 +17,53 @@ daemon. Here is a brief description of the source directories. To setup CORE for develop we will leverage to automated install script. -## Clone CORE Repo +## Install the Development Environment -```shell +The current recommended development environment is Ubuntu 22.04. This section +covers a complete example for installing CORE on a clean install. It will help +setup CORE in development mode, OSPF MDR, and EMANE. + +``` shell +# install system packages +sudo apt-get update -y +sudo apt-get install -y ca-certificates git sudo wget tzdata libpcap-dev libpcre3-dev \ + libprotobuf-dev libxml2-dev protobuf-compiler unzip uuid-dev iproute2 iputils-ping \ + tcpdump + +# install core cd ~/Documents -git clone https://github.com/coreemu/core.git +git clone https://github.com/coreemu/core cd core -git checkout develop -``` +./setup.sh +source ~/.bashrc +inv install -d -## Install the Development Environment - -This command will automatically install system dependencies, clone and build OSPF-MDR, -build CORE, setup the CORE poetry environment, and install pre-commit hooks. You can -refer to the [install docs](install.md) for issues related to different distributions. +# install emane +cd ~/Documents +wget https://adjacentlink.com/downloads/emane/emane-1.5.1-release-1.ubuntu-22_04.amd64.tar.gz +tar xf emane-1.5.1-release-1.ubuntu-22_04.amd64.tar.gz +cd emane-1.5.1-release-1/debs/ubuntu-22_04/amd64 +sudo apt-get install -y ./openstatistic*.deb ./emane*.deb ./python3-emane_*.deb -```shell -./install -d +# install emane python bindings +cd ~/Documents +wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip +mkdir protoc +unzip protoc-3.19.6-linux-x86_64.zip -d protoc +git clone https://github.com/adjacentlink/emane.git +cd emane +git checkout v1.5.1 +./autogen.sh +./configure --prefix=/usr +cd src/python +PATH=~/Documents/protoc/bin:$PATH make +sudo /opt/core/venv/bin/python -m pip install . ``` ### pre-commit pre-commit hooks help automate running tools to check modified code. Every time a commit is made -python utilities will be ran to check validity of code, potentially failing and backing out the commit. +python utilities will be run to check validity of code, potentially failing and backing out the commit. These changes are currently mandated as part of the current CI, so add the changes and commit again. ## Running CORE @@ -61,10 +85,8 @@ inv test-mock ## Linux Network Namespace Commands -Linux network namespace containers are often managed using the *Linux Container Tools* or *lxc-tools* package. -The lxc-tools website is available here http://lxc.sourceforge.net/ for more information. CORE does not use these -management utilities, but includes its own set of tools for instantiating and configuring network namespace containers. -This section describes these tools. +CORE includes its own set of tools for instantiating and configuring network namespace +containers. This section describes these tools. ### vnoded diff --git a/docs/distributed.md b/docs/distributed.md index 95ec72687..6ed2338e7 100644 --- a/docs/distributed.md +++ b/docs/distributed.md @@ -13,7 +13,7 @@ distributed CORE package and some configuration to allow SSH as root. CORE configuration settings required for using distributed functionality. -Edit **/etc/core/core.conf** or specific configuration file being used. +Edit **/opt/core/etc/core.conf** or specific configuration file being used. ```shell # uncomment and set this to the address that remote servers @@ -58,14 +58,16 @@ First the distributed servers must be configured to allow passwordless root login over SSH. On distributed server: - ```shelll # install openssh-server sudo apt install openssh-server # open sshd config vi /etc/ssh/sshd_config +``` +Modify the following settings: +``` # verify these configurations in file PermitRootLogin yes PasswordAuthentication yes @@ -74,7 +76,13 @@ PasswordAuthentication yes # accept all env variables AcceptEnv * -# restart sshd +# if you are going to need more than 10 nodes on the server +# change this setting, as it defaults to 10 +MaxSessions 10 +``` + +Restart the sshd daemon: +``` sudo systemctl restart sshd ``` diff --git a/docs/docker.md b/docs/docker.md index 562fd453b..e515314c5 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -21,7 +21,7 @@ Custom configuration required to avoid iptable rules being added and removing the need for the default docker network, since core will be orchestrating connections between nodes. -Place the file below in **/etc/docker/docker.json** +Place the file below in **/etc/docker/daemon.json** ```json { diff --git a/docs/emane.md b/docs/emane.md index a034c63bf..91022fb49 100644 --- a/docs/emane.md +++ b/docs/emane.md @@ -71,7 +71,7 @@ Every topic below assumes CORE, EMANE, and OSPF MDR have been installed. ## EMANE Configuration -The CORE configuration file **/etc/core/core.conf** has options specific to +The CORE configuration file **/opt/core/etc/core.conf** has options specific to EMANE. An example emane section from the **core.conf** file is shown below: ```shell @@ -124,7 +124,7 @@ Here is an example model with documentation describing functionality: Example custom emane model. """ from pathlib import Path -from typing import Dict, Optional, Set, List +from typing import Optional from core.config import Configuration from core.emane import emanemanifest, emanemodel @@ -162,18 +162,20 @@ class ExampleModel(emanemodel.EmaneModel): name: str = "emane_example" mac_library: str = "rfpipemaclayer" - mac_xml: str = "/usr/share/emane/manifest/rfpipemaclayer.xml" - mac_defaults: Dict[str, str] = { + mac_xml: str = "rfpipemaclayer.xml" + mac_defaults: dict[str, str] = { "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml" } - mac_config: List[Configuration] = [] + mac_config: list[Configuration] = [] phy_library: Optional[str] = None - phy_xml: str = "/usr/share/emane/manifest/emanephy.xml" - phy_defaults: Dict[str, str] = { - "subid": "1", "propagationmodel": "2ray", "noisemode": "none" + phy_xml: str = "emanephy.xml" + phy_defaults: dict[str, str] = { + "subid": "1", + "propagationmodel": "2ray", + "noisemode": "none", } - phy_config: List[Configuration] = [] - config_ignore: Set[str] = set() + phy_config: list[Configuration] = [] + config_ignore: set[str] = set() @classmethod def load(cls, emane_prefix: Path) -> None: @@ -192,6 +194,7 @@ class ExampleModel(emanemodel.EmaneModel): # load phy configuration phy_xml_path = emane_prefix / manifest_path / cls.phy_xml cls.phy_config = emanemanifest.parse(phy_xml_path, cls.phy_defaults) + ``` ## Single PC with EMANE diff --git a/docs/grpc.md b/docs/grpc.md index 3266a57d6..bfb025d65 100644 --- a/docs/grpc.md +++ b/docs/grpc.md @@ -1,4 +1,4 @@ -* Table of Contents +# gRPC API ## Overview @@ -97,7 +97,7 @@ Event types: * node - events for node movements and icon changes * link - events for link configuration changes and wireless link add/delete * config - configuration events when legacy gui joins a session -* exception - alert/error events +* alert - alert events * file - file events when the legacy gui joins a session ```python diff --git a/docs/gui.md b/docs/gui.md index c296ac186..b86dab811 100644 --- a/docs/gui.md +++ b/docs/gui.md @@ -392,13 +392,6 @@ system and anything you change or do can impact the greater system. By default, will open within the nodes home directory for the running session, but it is temporary and will be removed when the session is stopped. -You can also launch GUI based applications from within standard CORE nodes, but you need to -enable xhost access to root. - -```shell -xhost +local:root -``` - ### Mobility Scripting CORE has a few ways to script mobility. @@ -467,11 +460,11 @@ create a bridge or namespace, or the failure to launch EMANE processes for an EMANE-based network. Clicking on an alert displays details for that -exceptio. The exception source is a text string -to help trace where the exception occurred; "service:UserDefined" for example, +alert. The alert source is a text string +to help trace where the alert occurred; "service:UserDefined" for example, would appear for a failed validation command with the UserDefined service. -A button is available at the bottom of the dialog for clearing the exception +A button is available at the bottom of the dialog for clearing the alert list. ## Customizing your Topology's Look diff --git a/docs/install.md b/docs/install.md index 51c05dbc1..c99d595db 100644 --- a/docs/install.md +++ b/docs/install.md @@ -6,12 +6,16 @@ ## Overview -CORE currently supports and provides the following installation options, with the package -option being preferred. +This page will provide details on various options that can be used +when installing CORE. -* [Package based install (rpm/deb)](#package-based-install) -* [Script based install](#script-based-install) -* [Dockerfile based install](#dockerfile-based-install) +### Complete Examples + +For complete examples installing CORE, OSPF MDR, EMANE, and the EMANE python +bindings, see the pages below. + +* [Installing on Ubuntu 22.04](install_ubuntu.md) +* [Installing on Rocky Linux 8](install_rocky.md) ### Requirements @@ -19,6 +23,7 @@ Any computer capable of running Linux should be able to run CORE. Since the phys containers, as a general rule you should select a machine having as much RAM and CPU resources as possible. * Linux Kernel v3.3+ +* Python 3.9+ * iproute2 4.5+ is a requirement for bridge related commands * nftables compatible kernel and nft command line tool @@ -46,7 +51,9 @@ The following is a list of files that would be installed after installation. * virtualenv `/opt/core/venv/bin` * local `/usr/local/bin` * configuration files - * `/etc/core/{core.conf, logging.conf}` + * `/opt/core/etc/{core.conf, logging.conf}` +* examples, tutorials, and data files + * `/opt/core/share` * ospf mdr repository files when using script based install * `/../ospf-mdr` @@ -95,15 +102,6 @@ sudo yum remove core sudo apt remove core ``` -## Installation Examples - -The below links will take you to sections providing complete examples for installing -CORE and related utilities on fresh installations. Otherwise, a breakdown for installing -different components and the options available are detailed below. - -* [Ubuntu 22.04](install_ubuntu.md) -* [CentOS 7](install_centos.md) - ## Package Based Install Starting with 9.0.0 there are pre-built rpm/deb packages. You can retrieve the @@ -265,29 +263,6 @@ an installation to your use case. inv install --dry -v -p -i ``` -## Dockerfile Based Install - -You can leverage one of the provided Dockerfiles, to run and launch CORE within a Docker container. - -Since CORE nodes will leverage software available within the system for a given use case, -make sure to update and build the Dockerfile with desired software. - -```shell -# clone core -git clone https://github.com/coreemu/core.git -cd core -# build image -sudo docker build -t core -f dockerfiles/Dockerfile. . -# start container -sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged core -# enable xhost access to the root user -xhost +local:root -# launch core-gui -sudo docker exec -it core core-gui -``` - -When done see [Post Install](#post-install). - ## Installing EMANE !!! note @@ -360,38 +335,13 @@ Place the file contents below in **/etc/docker/docker.json** } ``` -### Resolving Path Issues - -One problem running CORE you may run into, using the virtual environment or locally -can be issues related to your path. - -To add support for your user to run scripts from the virtual environment: - -```shell -# can add to ~/.bashrc -export PATH=$PATH:/opt/core/venv/bin -``` - -This will not solve the path issue when running as sudo, so you can do either -of the following to compensate. - -```shell -# run command passing in the right PATH to pickup from the user running the command -sudo env PATH=$PATH core-daemon - -# add an alias to ~/.bashrc or something similar -alias sudop='sudo env PATH=$PATH' -# now you can run commands like so -sudop core-daemon -``` - ### Running CORE -The following assumes I have resolved PATH issues and setup the `sudop` alias. +In typical usage CORE is made up of two parts, the **core-daemon** (server) and the **core-gui** (client). ```shell -# in one terminal run the server daemon using the alias above -sudop core-daemon +# in one terminal run the server daemon +sudo core-daemon # in another terminal run the gui client core-gui ``` diff --git a/docs/install_centos.md b/docs/install_centos.md deleted file mode 100644 index 53de2af61..000000000 --- a/docs/install_centos.md +++ /dev/null @@ -1,144 +0,0 @@ -# Install CentOS - -## Overview - -Below is a detailed path for installing CORE and related tooling on a fresh -CentOS 7 install. Both of the examples below will install CORE into its -own virtual environment located at **/opt/core/venv**. Both examples below -also assume using **~/Documents** as the working directory. - -## Script Install - -This section covers step by step commands that can be used to install CORE using -the script based installation path. - -``` shell -# install system packages -sudo yum -y update -sudo yum install -y git sudo wget tzdata unzip libpcap-devel libpcre3-devel \ - libxml2-devel protobuf-devel unzip uuid-devel tcpdump make epel-release -sudo yum-builddep -y python3 - -# install python3.9 -cd ~/Documents -wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz -tar xf Python-3.9.15.tgz -cd Python-3.9.15 -./configure --enable-optimizations --with-ensurepip=install -sudo make -j$(nproc) altinstall -python3.9 -m pip install --upgrade pip - -# install core -cd ~/Documents -git clone https://github.com/coreemu/core -cd core -NO_SYSTEM=1 PYTHON=/usr/local/bin/python3.9 ./setup.sh -source ~/.bashrc -PYTHON=python3.9 inv install -p /usr --no-python - -# install emane -cd ~/Documents -wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz -tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz -cd emane-1.3.3-release-1/rpms/el7/x86_64 -sudo yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm - -# install emane python bindings into CORE virtual environment -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -git checkout v1.3.3 -./autogen.sh -PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr -cd src/python -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Package Install - -This section covers step by step commands that can be used to install CORE using -the package based installation path. This will require downloading a package from the release -page, to use during the install CORE step below. - -``` shell -# install system packages -sudo yum -y update -sudo yum install -y git sudo wget tzdata unzip libpcap-devel libpcre3-devel libxml2-devel \ - protobuf-devel unzip uuid-devel tcpdump automake gawk libreadline-devel libtool \ - pkg-config make -sudo yum-builddep -y python3 - -# install python3.9 -cd ~/Documents -wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz -tar xf Python-3.9.15.tgz -cd Python-3.9.15 -./configure --enable-optimizations --with-ensurepip=install -sudo make -j$(nproc) altinstall -python3.9 -m pip install --upgrade pip - -# install core -cd ~/Documents -sudo PYTHON=python3.9 yum install -y ./core_*.rpm - -# install ospf mdr -cd ~/Documents -git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git -cd ospf-mdr -./bootstrap.sh -./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga -make -j$(nproc) -sudo make install - -# install emane -cd ~/Documents -wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz -tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz -cd emane-1.3.3-release-1/rpms/el7/x86_64 -sudo yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm - -# install emane python bindings into CORE virtual environment -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -git checkout v1.3.3 -./autogen.sh -PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr -cd src/python -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Setup PATH - -The CORE virtual environment and related scripts will not be found on your PATH, -so some adjustments needs to be made. - -To add support for your user to run scripts from the virtual environment: - -```shell -# can add to ~/.bashrc -export PATH=$PATH:/opt/core/venv/bin -``` - -This will not solve the path issue when running as sudo, so you can do either -of the following to compensate. - -```shell -# run command passing in the right PATH to pickup from the user running the command -sudo env PATH=$PATH core-daemon - -# add an alias to ~/.bashrc or something similar -alias sudop='sudo env PATH=$PATH' -# now you can run commands like so -sudop core-daemon -``` diff --git a/docs/install_docker.md b/docs/install_docker.md new file mode 100644 index 000000000..92a9ec182 --- /dev/null +++ b/docs/install_docker.md @@ -0,0 +1,46 @@ +# Install Docker + +## Overview + +CORE can be installed into and ran from a Docker container. This section will cover how you can build and run +CORE from a Docker based image. + +## Build Image + +You can leverage one of the provided Dockerfiles to build a CORE based image. Since CORE nodes will leverage software +available within the system for a given use case, make sure to update and build the Dockerfile with desired software. + +The example Dockerfiles are not meant to be an end all solution, but a solid starting point for running CORE. + +Provided Dockerfiles: + +* Dockerfile.emane-python - Build EMANE python bindings for use in files below +* Dockerfile.rocky - Rocky Linux 8, CORE from latest package, OSPF MDR, and EMANE +* Dockerfile.ubuntu - Ubuntu 22.04, CORE from latest package, OSPF MDR, and EMANE + +```shell +# clone core +git clone https://github.com/coreemu/core.git +cd core +# first you must build EMANE python bindings +sudo docker build -t emane-python -f dockerfiles/Dockerfile.emane-python . +# build ospf packages +sudo docker build -t ospf-rpm -f dockerfiles/Dockerfile.ospf-mdr-rpm . +sudo docker build -t ospf-deb -f dockerfiles/Dockerfile.ospf-mdr-deb . +# build desired CORE image +sudo docker build -t core -f dockerfiles/ . +``` + +## Run Container + +There are some required parameters when starting a CORE based Docker container for CORE to function properly. These +are shown below in the run command. + +```shell +# start container into the background and run the core-daemon by default +sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged --entrypoint core-daemon core +# enable xhost access to the root user, this will allow you to run the core-gui from the container +xhost +local:root +# launch core-gui from the running container launched previously +sudo docker exec -it core core-gui +``` diff --git a/docs/install_rocky.md b/docs/install_rocky.md new file mode 100644 index 000000000..f42d8f2f9 --- /dev/null +++ b/docs/install_rocky.md @@ -0,0 +1,99 @@ +# Install Rocky + +## Overview + +This helps provide an example for installation into a RHEL 8 like +environment. Below is a detailed example for installing CORE and related tooling on a fresh +Rocky Linux 8 install. Both of the examples below will install CORE into its +own virtual environment located at **/opt/core/venv**. Both examples below +also assume using **~/Documents** as the working directory. + +## Install + +This section covers step by step commands that can be used to install CORE using +the package based installation path. This will require downloading a package from the +[release page](https://github.com/coreemu/core/releases), to use during the install CORE step below. + +``` shell +# install system packages +sudo yum -y update +sudo yum install -y \ + xterm \ + wget \ + tcpdump \ + python39 \ + python39-tkinter \ + iproute-tc + +# install ospf mdr +cd ~/Documents +sudo yum install -y \ + automake \ + gcc-c++ \ + libtool \ + make \ + pkg-config \ + readline-devel \ + git +git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git +cd ospf-mdr +./bootstrap.sh +./configure --disable-doc --enable-user=root --enable-group=root \ + --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ + --localstatedir=/var/run/quagga +make -j$(nproc) +sudo make install + +# install emane +cd ~/Documents +EMANE_VERSION=1.5.1 +EMANE_RELEASE=emane-${EMANE_VERSION}-release-1 +EMANE_PACKAGE=${EMANE_RELEASE}.el8.x86_64.tar.gz +wget -q https://adjacentlink.com/downloads/emane/${EMANE_PACKAGE} +tar xf ${EMANE_PACKAGE} +cd ${EMANE_RELEASE}/rpms/el8/x86_64 +rm emane-spectrum-tools-*.rpm emane-model-lte*.rpm +rm *devel*.rpm +sudo yum install -y ./emane*.rpm ./python3-emane-${EMANE_VERSION}-1.el8.noarch.rpm + +# install core +cd ~/Documents +CORE_PACKAGE=core_9.1.0_x86_64.rpm +PACKAGE_URL=https://github.com/coreemu/core/releases/latest/download/${CORE_PACKAGE} +wget -q ${PACKAGE_URL} +PYTHON=python3.9 yum install -y ./${CORE_PACKAGE} + +# install emane python bindings into CORE virtual environment +cd ~/Documents +sudo yum install -y dnf-plugins-core +sudo yum config-manager --set-enabled devel +sudo yum update -y +sudo yum install -y \ + protobuf-devel \ + libxml2-devel \ + pcre-devel \ + libuuid-devel \ + libpcap-devel +wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip +mkdir protoc +unzip protoc-3.19.6-linux-x86_64.zip -d protoc +git clone https://github.com/adjacentlink/emane.git +cd emane +git checkout v${EMANE_VERSION} +./autogen.sh +PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr +cd src/python +PATH=~/Documents/protoc/bin:$PATH make +sudo /opt/core/venv/bin/python -m pip install . +``` + +## Running CORE + +This install will place CORE within a virtual environment, symlinks to CORE scripts will be added to **/usr/bin**. + +```shell +# in one terminal run the server daemon +sudo core-daemon +# in another terminal run the gui client +core-gui +``` diff --git a/docs/install_ubuntu.md b/docs/install_ubuntu.md index 57274a4f0..3fe364318 100644 --- a/docs/install_ubuntu.md +++ b/docs/install_ubuntu.md @@ -7,63 +7,39 @@ Ubuntu 22.04 installation. Both of the examples below will install CORE into its own virtual environment located at **/opt/core/venv**. Both examples below also assume using **~/Documents** as the working directory. -## Script Install +## Install This section covers step by step commands that can be used to install CORE using -the script based installation path. +the package based installation path. This will require downloading a package from the +[release page](https://github.com/coreemu/core/releases), to use during the install CORE step below. ``` shell # install system packages sudo apt-get update -y -sudo apt-get install -y ca-certificates git sudo wget tzdata libpcap-dev libpcre3-dev \ - libprotobuf-dev libxml2-dev protobuf-compiler unzip uuid-dev iproute2 iputils-ping \ +sudo apt-get install -y \ + ca-certificates \ + xterm \ + psmisc \ + python3 \ + python3-tk \ + python3-pip \ + python3-venv \ + wget \ + iproute2 \ + iputils-ping \ tcpdump -# install core -cd ~/Documents -git clone https://github.com/coreemu/core -cd core -./setup.sh -source ~/.bashrc -inv install - -# install emane -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -./autogen.sh -./configure --prefix=/usr -make -j$(nproc) -sudo make install -cd src/python -make clean -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Package Install - -This section covers step by step commands that can be used to install CORE using -the package based installation path. This will require downloading a package from the release -page, to use during the install CORE step below. - -``` shell -# install system packages -sudo apt-get update -y -sudo apt-get install -y ca-certificates python3 python3-tk python3-pip python3-venv \ - libpcap-dev libpcre3-dev libprotobuf-dev libxml2-dev protobuf-compiler unzip \ - uuid-dev automake gawk git wget libreadline-dev libtool pkg-config g++ make \ - iputils-ping tcpdump - -# install core -cd ~/Documents -sudo apt-get install -y ./core_*.deb - # install ospf mdr cd ~/Documents +apt-get install -y \ + automake \ + gawk \ + g++ \ + libreadline-dev \ + libtool \ + make \ + pkg-config \ + git git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git cd ospf-mdr ./bootstrap.sh @@ -75,42 +51,52 @@ sudo make install # install emane cd ~/Documents +EMANE_RELEASE=emane-1.5.1-release-1 +EMANE_PACKAGE=${EMANE_RELEASE}.ubuntu-22_04.amd64.tar.gz +wget -q https://adjacentlink.com/downloads/emane/${EMANE_PACKAGE} +tar xf ${EMANE_PACKAGE} +cd ${EMANE_RELEASE}/debs/ubuntu-22_04/amd64 +rm emane-spectrum-tools*.deb emane-model-lte*.deb +rm *dev*.deb +sudo apt-get install -y ./emane*.deb ./python3-emane_*.deb + +# install core +cd ~/Documents +CORE_PACKAGE=core_9.1.0_amd64.deb +PACKAGE_URL=https://github.com/coreemu/core/releases/latest/download/${CORE_PACKAGE} +wget -q ${PACKAGE_URL} +sudo apt-get install -y ./${CORE_PACKAGE} + +# install emane python bindings +cd ~/Documents +sudo apt-get install -y \ + unzip \ + libpcap-dev \ + libpcre3-dev \ + libprotobuf-dev \ + libxml2-dev \ + protobuf-compiler \ + uuid-dev wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip mkdir protoc unzip protoc-3.19.6-linux-x86_64.zip -d protoc git clone https://github.com/adjacentlink/emane.git cd emane +git checkout v1.5.1 ./autogen.sh ./configure --prefix=/usr -make -j$(nproc) -sudo make install cd src/python -make clean PATH=~/Documents/protoc/bin:$PATH make sudo /opt/core/venv/bin/python -m pip install . ``` -## Setup PATH - -The CORE virtual environment and related scripts will not be found on your PATH, -so some adjustments needs to be made. +## Running CORE -To add support for your user to run scripts from the virtual environment: +This install will place CORE within a virtual environment, symlinks to CORE scripts will be added to **/usr/bin**. ```shell -# can add to ~/.bashrc -export PATH=$PATH:/opt/core/venv/bin -``` - -This will not solve the path issue when running as sudo, so you can do either -of the following to compensate. - -```shell -# run command passing in the right PATH to pickup from the user running the command -sudo env PATH=$PATH core-daemon - -# add an alias to ~/.bashrc or something similar -alias sudop='sudo env PATH=$PATH' -# now you can run commands like so -sudop core-daemon +# in one terminal run the server daemon +sudo core-daemon +# in another terminal run the gui client +core-gui ``` diff --git a/docs/lxc.md b/docs/lxc.md deleted file mode 100644 index 1ee11453b..000000000 --- a/docs/lxc.md +++ /dev/null @@ -1,43 +0,0 @@ -# LXC Support - -## Overview - -LXC nodes are provided by way of LXD to create nodes using predefined -images and provide file system separation. - -## Installation - -### Debian Systems - -```shell -sudo snap install lxd -``` - -## Configuration - -Initialize LXD and say no to adding a default bridge. - -```shell -sudo lxd init -``` - -## Group Setup - -To use LXC nodes within the python GUI, you will need to make sure the user running the GUI is a member of the -lxd group. - -```shell -# add group if does not exist -sudo groupadd lxd - -# add user to group -sudo usermod -aG lxd $USER - -# to get this change to take effect, log out and back in or run the following -newgrp lxd -``` - -## Tools and Versions Tested With - -* LXD 3.14 -* nsenter from util-linux 2.31.1 diff --git a/docs/nodetypes.md b/docs/nodetypes.md index 8f0957467..11e21438d 100644 --- a/docs/nodetypes.md +++ b/docs/nodetypes.md @@ -21,12 +21,6 @@ Docker nodes provide a convenience for running nodes using predefind images and filesystems that CORE nodes do not provide. Details for using Docker nodes can be found [here](docker.md). -## LXC Nodes - -LXC nodes provide a convenience for running nodes using predefind images -and filesystems that CORE nodes do not provide. Details for using LXC -nodes can be found [here](lxc.md). - ## Physical Nodes The *physical* machine type is used for nodes that represent a real Linux-based diff --git a/docs/python.md b/docs/python.md index 0985bb8d2..200bd9528 100644 --- a/docs/python.md +++ b/docs/python.md @@ -74,22 +74,23 @@ Event types: * node - events for node movements and icon changes * link - events for link configuration changes and wireless link add/delete * config - configuration events when legacy gui joins a session -* exception - alert/error events +* alert - alert events * file - file events when the legacy gui joins a session ```python +from core.emulator.data import EventData, AlertData, LinkData, NodeData + + def event_listener(event): print(event) # add an event listener to event type you want to listen to # each handler will receive an object unique to that type -session.event_handlers.append(event_listener) -session.exception_handlers.append(event_listener) -session.node_handlers.append(event_listener) -session.link_handlers.append(event_listener) -session.file_handlers.append(event_listener) -session.config_handlers.append(event_listener) +session.broadcast_manager.add_handler(NodeData, event_listener) +session.broadcast_manager.add_handler(LinkData, event_listener) +session.broadcast_manager.add_handler(EventData, event_listener) +session.broadcast_manager.add_handler(AlertData, event_listener) ``` ### Configuring Links @@ -322,15 +323,9 @@ n1 = session.add_node(CoreNode, position=position, options=options) position = Position(x=300, y=100) n2 = session.add_node(CoreNode, position=position, options=options) -# configure general emane settings -config = session.emane.get_configs() -config.update({ - "eventservicettl": "2" -}) - -# configure emane model settings -# using a dict mapping currently support values as strings -session.emane.set_model_config(emane.id, EmaneIeee80211abgModel.name, { +# configure emane model using a dict, which currently support values as strings +session.emane.set_config(emane.id, EmaneIeee80211abgModel.name, { + "eventservicettl": "2", "unicastrate": "3", }) @@ -368,44 +363,39 @@ session.emane.set_config(config_id, EmaneIeee80211abgModel.name, { Services help generate and run bash scripts on nodes for a given purpose. -Configuring the files of a service results in a specific hard coded script being +Configuring the templates of a service results in a specific hard coded script being generated, instead of the default scripts, that may leverage dynamic generation. The following features can be configured for a service: -* configs - files that will be generated -* dirs - directories that will be mounted unique to the node +* files - files that will be generated +* directories - directories that will be mounted unique to the node * startup - commands to run start a service * validate - commands to run to validate a service * shutdown - commands to run to stop a service Editing service properties: - ```python # configure a service, for a node, for a given session -session.services.set_service(node_id, service_name) -service = session.services.get_service(node_id, service_name) -service.configs = ("file1.sh", "file2.sh") -service.dirs = ("/etc/node",) -service.startup = ("bash file1.sh",) -service.validate = () -service.shutdown = () +node = session.get_node(node_id, CoreNode) +service = node.services[service_name] +service.files = ["file1.sh", "file2.sh"] +service.directories = ["/etc/node"] +service.startup = ["bash file1.sh"] +service.validate = [] +service.shutdown = [] ``` -When editing a service file, it must be the name of `config` -file that the service will generate. +When editing a service file, it must be the name of +`file` that the service will generate. Editing a service file: - ```python # to edit the contents of a generated file you can specify # the service, the file name, and its contents -session.services.set_service_file( - node_id, - service_name, - file_name, - "echo hello", -) +node = session.get_node(node_id, CoreNode) +service = node.services[service_name] +service.set_template(file_name, "echo hello") ``` ## File Examples diff --git a/docs/services.md b/docs/services.md index 9e6e3642b..474f41221 100644 --- a/docs/services.md +++ b/docs/services.md @@ -1,22 +1,26 @@ -# Services (Deprecated) +# Services ## Overview -CORE uses the concept of services to specify what processes or scripts run on a -node when it is started. Layer-3 nodes such as routers and PCs are defined by -the services that they run. +CORE uses the concept of services to specify what processes or scripts to run on a +node when it is started. Ultimately, providing a convenience for creating reusable +dynamic scripts to run on nodes, for carrying out specific tasks. -Services may be customized for each node, or new custom services can be -created. New node types can be created each having a different name, icon, and -set of default services. Each service defines the per-node directories, -configuration files, startup index, starting commands, validation commands, -shutdown commands, and meta-data associated with a node. +Services leverage a templating engine, for robust service file creation. +They also have the power of configuration key/value pairs, that can be +defined and displayed within the GUI, to help further configure a service, as needed. -!!! note +This boils down to the following functions: + +* generating files the service will use, either directly for commands or for configuration +* command(s) for starting a service +* command(s) for validating a service +* command(s) for stopping a service - **Network namespace nodes do not undergo the normal Linux boot process** - using the **init**, **upstart**, or **systemd** frameworks. These - lightweight nodes use configured CORE *services*. +Most CORE nodes will have a default set of services to run, associated with +them. You can however customize the set of services a node will use. Or even +further define a new node type within the GUI, with a set of services, that +will allow quickly dragging and dropping that node type during creation. ## Available Services @@ -39,7 +43,6 @@ Here are the default node types and their services: | Node Type | Services | |-----------|--------------------------------------------------------------------------------------------------------------------------------------------| | *router* | zebra, OSFPv2, OSPFv3, and IPForward services for IGP link-state routing. | -| *host* | DefaultRoute and SSH services, representing an SSH server having a default route when connected directly to a router. | | *PC* | DefaultRoute service for having a default route when connected directly to a router. | | *mdr* | zebra, OSPFv3MDR, and IPForward services for wireless-optimized MANET Designated Router routing. | | *prouter* | a physical router, having the same default services as the *router* node type; for incorporating Linux testbed machines into an emulation. | @@ -48,84 +51,21 @@ Configuration files can be automatically generated by each service. For example, CORE automatically generates routing protocol configuration for the router nodes in order to simplify the creation of virtual networks. -To change the services associated with a node, double-click on the node to -invoke its configuration dialog and click on the *Services...* button, -or right-click a node a choose *Services...* from the menu. -Services are enabled or disabled by clicking on their names. The button next to -each service name allows you to customize all aspects of this service for this -node. For example, special route redistribution commands could be inserted in -to the Quagga routing configuration associated with the zebra service. - -To change the default services associated with a node type, use the Node Types -dialog available from the *Edit* button at the end of the Layer-3 nodes -toolbar, or choose *Node types...* from the *Session* menu. Note that -any new services selected are not applied to existing nodes if the nodes have -been customized. - -## Customizing a Service - -A service can be fully customized for a particular node. From the node's -configuration dialog, click on the button next to the service name to invoke -the service customization dialog for that service. -The dialog has three tabs for configuring the different aspects of the service: -files, directories, and startup/shutdown. - -!!! note - - A **yellow** customize icon next to a service indicates that service - requires customization (e.g. the *Firewall* service). - A **green** customize icon indicates that a custom configuration exists. - Click the *Defaults* button when customizing a service to remove any - customizations. - -The Files tab is used to display or edit the configuration files or scripts that -are used for this service. Files can be selected from a drop-down list, and -their contents are displayed in a text entry below. The file contents are -generated by the CORE daemon based on the network topology that exists at -the time the customization dialog is invoked. - -The Directories tab shows the per-node directories for this service. For the -default types, CORE nodes share the same filesystem tree, except for these -per-node directories that are defined by the services. For example, the -**/var/run/quagga** directory needs to be unique for each node running -the Zebra service, because Quagga running on each node needs to write separate -PID files to that directory. +To change the services associated with a node, right-click a node a choose +**Services...** from the menu button. Services are enabled or disabled by selecting +through the service groups and enabling the checkboxes on services. Select a selected +service and click the **Configure** button to further configure a given service. -!!! note - - The **/var/log** and **/var/run** directories are - mounted uniquely per-node by default. - Per-node mount targets can be found in **/tmp/pycore./.conf/** - -The Startup/shutdown tab lists commands that are used to start and stop this -service. The startup index allows configuring when this service starts relative -to the other services enabled for this node; a service with a lower startup -index value is started before those with higher values. Because shell scripts -generated by the Files tab will not have execute permissions set, the startup -commands should include the shell name, with -something like ```sh script.sh```. - -Shutdown commands optionally terminate the process(es) associated with this -service. Generally they send a kill signal to the running process using the -*kill* or *killall* commands. If the service does not terminate -the running processes using a shutdown command, the processes will be killed -when the *vnoded* daemon is terminated (with *kill -9*) and -the namespace destroyed. It is a good practice to -specify shutdown commands, which will allow for proper process termination, and -for run-time control of stopping and restarting services. - -Validate commands are executed following the startup commands. A validate -command can execute a process or script that should return zero if the service -has started successfully, and have a non-zero return value for services that -have had a problem starting. For example, the *pidof* command will check -if a process is running and return zero when found. When a validate command -produces a non-zero return value, an exception is generated, which will cause -an error to be displayed in the Check Emulation Light. - -!!! note +To change the default services associated with a node type, use the **Custom Nodes** +option under the *Edit* menu option. Here you can define new node types, with a custom +icon, and a custom set of services to start on nodes of this type. This node type +will be added to the container node options on the left toolbar, allowing for easy +drag and drop creation for nodes of this type. - To start, stop, and restart services during run-time, right-click a - node and use the *Services...* menu. +The node types are saved in the GUI config file **~/.coregui/config.yaml**. +Keep this in mind when changing the default services for +existing node types; it may be better to simply create a new node type. It is +recommended that you do not change the default built-in node types. ## New Services @@ -133,167 +73,138 @@ Services can save time required to configure nodes, especially if a number of nodes require similar configuration procedures. New services can be introduced to automate tasks. -### Leveraging UserDefined - -The easiest way to capture the configuration of a new process into a service -is by using the **UserDefined** service. This is a blank service where any -aspect may be customized. The UserDefined service is convenient for testing -ideas for a service before adding a new service type. - ### Creating New Services !!! note - The directory name used in **custom_services_dir** below should be unique and - should not correspond to any existing Python module name. For example, don't - use the name **subprocess** or **services**. + The directory base name used in **custom_services_dir** below should + be unique and should not correspond to any existing Python module name. + For example, don't use the name **subprocess** or **services**. 1. Modify the example service shown below to do what you want. It could generate config/script files, mount per-node - directories, start processes/scripts, etc. sample.py is a Python file that - defines one or more classes to be imported. You can create multiple Python - files that will be imported. + directories, start processes/scripts, etc. Your file can define one or more + classes to be imported. You can create multiple Python files that will be imported. -2. Put these files in a directory such as `/home//.coregui/custom_services` - Note that the last component of this directory name **custom_services** should not - be named the same as any python module, due to naming conflicts. +2. Put these files in a directory such as **~/.coregui/custom_services**. -3. Add a **custom_services_dir = `/home//.coregui/custom_services`** entry to the - /etc/core/core.conf file. +3. Set the **custom_services_dir = ~/.coregui/custom_services** entry to the + **/opt/core/etc/core.conf** file. 4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) - should be displayed in the daemon output. + should be displayed in the terminal (or service log, like journalctl). 5. Start using your custom service on your nodes. You can create a new node type that uses your service, or change the default services for an existing node type, or change individual nodes. -If you have created a new service type that may be useful to others, please -consider contributing it to the CORE project. - -#### Example Custom Service +### Example Custom Service Below is the skeleton for a custom service with some documentation. Most people would likely only setup the required class variables **(name/group)**. -Then define the **configs** (files they want to generate) and implement the -**generate_config** function to dynamically create the files wanted. Finally -the **startup** commands would be supplied, which typically tends to be +Then define the **files** to generate and implement the +**get_text_template** function to dynamically create the files wanted. Finally, +the **startup** commands would be supplied, which typically tend to be running the shell files generated. +This is a very simple service using the bare minimum needed. ```python -""" -Simple example custom service, used to drive shell commands on a node. -""" -from typing import Tuple +from core.services.base import CoreService -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceMode +class ExampleService(CoreService): + name: str = "Node Name" + group: str = "ExampleGroup" + files: list[str] = ["node_name.sh"] + startup: list[str] = [f"bash {files[0]}"] + + def get_text_template(self, name: str) -> str: + return """ + echo '${node.name}' > node_name.log + """ +``` + +This fleshes out all the fields and helps document their purpose. +```python +from core.config import ConfigString, ConfigBool, Configuration +from core.services.base import CoreService, ShadowDir, ServiceMode +# class that subclasses CoreService class ExampleService(CoreService): - """ - Example Custom CORE Service - - :cvar name: name used as a unique ID for this service and is required, no spaces - :cvar group: allows you to group services within the GUI under a common name - :cvar executables: executables this service depends on to function, if executable is - not on the path, service will not be loaded - :cvar dependencies: services that this service depends on for startup, tuple of - service names - :cvar dirs: directories that this service will create within a node - :cvar configs: files that this service will generate, without a full path this file - goes in the node's directory e.g. /tmp/pycore.12345/n1.conf/myfile - :cvar startup: commands used to start this service, any non-zero exit code will - cause a failure - :cvar validate: commands used to validate that a service was started, any non-zero - exit code will cause a failure - :cvar validation_mode: validation mode, used to determine startup success. - NON_BLOCKING - runs startup commands, and validates success with validation commands - BLOCKING - runs startup commands, and validates success with the startup commands themselves - TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone - :cvar validation_timer: time in seconds for a service to wait for validation, before - determining success in TIMER/NON_BLOCKING modes. - :cvar validation_period: period in seconds to wait before retrying validation, - only used in NON_BLOCKING mode - :cvar shutdown: shutdown commands to stop this service - """ - - name: str = "ExampleService" - group: str = "Utility" - executables: Tuple[str, ...] = () - dependencies: Tuple[str, ...] = () - dirs: Tuple[str, ...] = () - configs: Tuple[str, ...] = ("myservice1.sh", "myservice2.sh") - startup: Tuple[str, ...] = tuple(f"sh {x}" for x in configs) - validate: Tuple[str, ...] = () - validation_mode: ServiceMode = ServiceMode.NON_BLOCKING - validation_timer: int = 5 - validation_period: float = 0.5 - shutdown: Tuple[str, ...] = () - - @classmethod - def on_load(cls) -> None: + # unique name for your service within CORE + name: str = "Example" + # the group your service is associated with, used for display in GUI + group: str = "ExampleGroup" + # directories that the service should shadow mount, hiding the system directory + directories: list[str] = ["/usr/local/core"] + # files that this service should generate, defaults to nodes home directory + # or can provide an absolute path to a mounted directory + files: list[str] = ["example-start.sh"] + # executables that should exist on path, that this service depends on + executables: list[str] = [] + # other services that this service depends on, defines service start order + dependencies: list[str] = [] + # commands to run to start this service + startup: list[str] = [] + # commands to run to validate this service + validate: list[str] = [] + # commands to run to stop this service + shutdown: list[str] = [] + # validation mode, blocking, non-blocking, and timer + validation_mode: ServiceMode = ServiceMode.BLOCKING + # configurable values that this service can use, for file generation + default_configs: list[Configuration] = [ + ConfigString(id="value1", label="Text"), + ConfigBool(id="value2", label="Boolean"), + ConfigString(id="value3", label="Multiple Choice", + options=["value1", "value2", "value3"]), + ] + # sets of values to set for the configuration defined above, can be used to + # provide convenient sets of values to typically use + modes: dict[str, dict[str, str]] = { + "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, + "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, + "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, + } + # defines directories that this service can help shadow within a node + shadow_directories: list[ShadowDir] = [] + + def get_text_template(self, name: str) -> str: """ - Provides a way to run some arbitrary logic when the service is loaded, possibly - to help facilitate dynamic settings for the environment. + This function is used to return a string template that will be rendered + by the templating engine. Available variables will be node and any other + key/value pairs returned by the "data()" function. - :return: nothing + :param name: name of file to get template for + :return: string template """ - pass - - @classmethod - def get_configs(cls, node: CoreNode) -> Tuple[str, ...]: + return """ + # sample script 1 + # node id(${node.id}) name(${node.name}) + # config: ${config} + echo hello """ - Provides a way to dynamically generate the config files from the node a service - will run. Defaults to the class definition and can be left out entirely if not - needed. - :param node: core node that the service is being ran on - :return: tuple of config files to create - """ - return cls.configs +``` - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Returns a string representation for a file, given the node the service is - starting on the config filename that this information will be used for. This - must be defined, if "configs" are defined. +#### Validation Mode - :param node: core node that the service is being ran on - :param filename: configuration file to generate - :return: configuration file content - """ - cfg = "#!/bin/sh\n" - if filename == cls.configs[0]: - cfg += "# auto-generated by MyService (sample.py)\n" - for iface in node.get_ifaces(): - cfg += f'echo "Node {node.name} has interface {iface.name}"\n' - elif filename == cls.configs[1]: - cfg += "echo hello" - return cfg - - @classmethod - def get_startup(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the startup commands from the node a - service will run. Defaults to the class definition and can be left out entirely - if not needed. +Validation modes are used to determine if a service has started up successfully. - :param node: core node that the service is being ran on - :return: tuple of startup commands to run - """ - return cls.startup +* blocking - startup commands are expected to run til completion and return 0 exit code +* non-blocking - startup commands are ran, but do not wait for completion +* timer - startup commands are ran, and an arbitrary amount of time is waited to consider started - @classmethod - def get_validate(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the validate commands from the node a - service will run. Defaults to the class definition and can be left out entirely - if not needed. +#### Shadow Directories - :param node: core node that the service is being ran on - :return: tuple of commands to validate service startup with - """ - return cls.validate -``` +Shadow directories provide a convenience for copying a directory and the files within +it to a nodes home directory, to allow a unique set of per node files. + +* `ShadowDir(path="/user/local/core")` - copies files at the given location into the node +* `ShadowDir(path="/user/local/core", src="/opt/core")` - copies files to the given location, + but sourced from the provided location +* `ShadowDir(path="/user/local/core", templates=True)` - copies files and treats them as + templates for generation +* `ShadowDir(path="/user/local/core", has_node_paths=True)` - copies files from the given + location, and looks for unique node names directories within it, using a directory named + default, when not preset diff --git a/docs/services/security.md b/docs/services/security.md index a621009d0..0e8f6840f 100644 --- a/docs/services/security.md +++ b/docs/services/security.md @@ -57,7 +57,7 @@ CLIENT_NAME=client1 # create directory for keys for CORE to use # NOTE: the default is set to a directory that requires using sudo, but can be # anywhere and not require sudo at all -KEYDIR=/etc/core/keys +KEYDIR=/opt/core/etc/keys sudo mkdir $KEYDIR # move keys to directory diff --git a/docs/tutorials/common/grpc.md b/docs/tutorials/common/grpc.md index 2a85d7c82..f5738bae9 100644 --- a/docs/tutorials/common/grpc.md +++ b/docs/tutorials/common/grpc.md @@ -6,7 +6,7 @@ the remaining steps of a given section. 1. Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` 2. From another terminal run the tutorial python script, which will create a session to join ``` shell diff --git a/docs/tutorials/setup.md b/docs/tutorials/setup.md index 858b0f1d2..2b7877c9b 100644 --- a/docs/tutorials/setup.md +++ b/docs/tutorials/setup.md @@ -2,16 +2,7 @@ ## Setup for CORE -We assume the prior installation of CORE, using a virtual environment. You can -then adjust your PATH and add an alias to help more conveniently run CORE -commands. - -This can be setup in your **.bashrc** - -```shell -export PATH=$PATH:/opt/core/venv/bin -alias sudop='sudo env PATH=$PATH' -``` +We assume the prior installation of CORE, using a virtual environment. ## Setup for Chat App @@ -70,10 +61,10 @@ optional arguments: ### Installing the Chat App Service -1. You will first need to edit **/etc/core/core.conf** to update the config +1. You will first need to edit **/opt/core/etc/core.conf** to update the custom service path to pick up your service ``` shell - custom_config_services_dir = + custom_services_dir = ``` 2. Then you will need to copy/move **chatapp/chatapp_service.py** to the directory configured above diff --git a/docs/tutorials/tutorial1.md b/docs/tutorials/tutorial1.md index 7bda7e7ff..7e275fea1 100644 --- a/docs/tutorials/tutorial1.md +++ b/docs/tutorials/tutorial1.md @@ -38,7 +38,7 @@ between nodes in CORE. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -76,7 +76,7 @@ traffic being sent/received among many other uses. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -115,7 +115,7 @@ beneficial for understanding how software will behave in adverse conditions. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -158,7 +158,7 @@ within the nodes of our scenario. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -208,7 +208,7 @@ using `tail -f` to observe the output of running software. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell diff --git a/docs/tutorials/tutorial2.md b/docs/tutorials/tutorial2.md index 7b82e04eb..d319f3835 100644 --- a/docs/tutorials/tutorial2.md +++ b/docs/tutorials/tutorial2.md @@ -20,7 +20,7 @@ XML scenario file, leveraging an NS2 mobility file. * Make sure the **core-daemon** is running a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ```shell @@ -97,7 +97,7 @@ gRPC python script and providing mobility over the gRPC interface. * Make sure the **core-daemon** is running a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ```shell diff --git a/docs/tutorials/tutorial3.md b/docs/tutorials/tutorial3.md index eaa2a5e65..720ef0ede 100644 --- a/docs/tutorials/tutorial3.md +++ b/docs/tutorials/tutorial3.md @@ -21,7 +21,7 @@ file, leveraging an NS2 file for mobility. * Make sure the **core-daemon** is running a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ```shell @@ -80,7 +80,7 @@ This section covers using a gRPC script to create and provide scenario movement. * Make sure the **core-daemon** is running a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * From another terminal run the **scenario.py** script ```shell @@ -136,7 +136,7 @@ This section provides an example for running a script within a node, that leverages a control network in CORE for issuing mobility using the gRPC API. -* Edit the following line in **/etc/core/core.conf** +* Edit the following line in **/opt/core/etc/core.conf** ```shell grpcaddress = 0.0.0.0 ``` diff --git a/docs/tutorials/tutorial5.md b/docs/tutorials/tutorial5.md index 92337717d..b6bf8fac9 100644 --- a/docs/tutorials/tutorial5.md +++ b/docs/tutorials/tutorial5.md @@ -22,7 +22,7 @@ This section covers using the saved **scenario.xml** file to get and up and runn

    * Make sure the **core-daemon** is running in a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ```shell @@ -101,7 +101,7 @@ This section covers leveraging the gRPC script to get up and running.

    * Make sure the **core-daemon** is running in a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ```shell diff --git a/docs/tutorials/tutorial6.md b/docs/tutorials/tutorial6.md index 46bb57ac0..a10c6d3a3 100644 --- a/docs/tutorials/tutorial6.md +++ b/docs/tutorials/tutorial6.md @@ -17,10 +17,10 @@ Below is the list of files used for this tutorial. This section will cover running this sample tutorial that develops a scenario file. -* Ensure that **/etc/core/core.conf** has **grpcaddress** set to **0.0.0.0** +* Ensure that **/opt/core/etc/core.conf** has **grpcaddress** set to **0.0.0.0** * Make sure the **core-daemon** is running in a terminal ```shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ```shell diff --git a/docs/tutorials/tutorial7.md b/docs/tutorials/tutorial7.md index 2cc2f8126..38d855ea9 100644 --- a/docs/tutorials/tutorial7.md +++ b/docs/tutorials/tutorial7.md @@ -44,7 +44,7 @@ between nodes in CORE. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -82,7 +82,7 @@ traffic being sent/received among many other uses. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -121,7 +121,7 @@ within the nodes of our scenario. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell @@ -171,7 +171,7 @@ using `tail -f` to observe the output of running software. * Make sure the CORE daemon is running a terminal, if not already ``` shell - sudop core-daemon + sudo core-daemon ``` * In another terminal run the GUI ``` shell diff --git a/mkdocs.yml b/mkdocs.yml index 03504b131..60a918bd4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -40,7 +40,8 @@ nav: - Installation: - Overview: install.md - Ubuntu: install_ubuntu.md - - CentOS: install_centos.md + - Rocky: install_rocky.md + - Docker: install_docker.md - Tutorials: - Overview: tutorials/overview.md - Setup: tutorials/setup.md @@ -56,10 +57,8 @@ nav: - Node Types: - Overview: nodetypes.md - Docker: docker.md - - LXC: lxc.md - Services: - - Config Services: configservices.md - - Services (Deprecated): services.md + - Overview: services.md - Provided: - Bird: services/bird.md - EMANE: services/emane.md diff --git a/package/after-install.sh b/package/after-install-deb.sh similarity index 56% rename from package/after-install.sh rename to package/after-install-deb.sh index 341e909dc..8bf51de91 100644 --- a/package/after-install.sh +++ b/package/after-install-deb.sh @@ -6,11 +6,12 @@ fi PYTHON="${PYTHON:=python3}" if [ ! -z "${NO_VENV}" ]; then ${PYTHON} -m pip install /opt/core/core-*.whl - echo "DAEMON=/usr/local/bin/core-daemon" > /opt/core/service + sed -i 's|$DAEMON|/usr/local/bin/core-daemon|g' /lib/systemd/system/core-daemon.service else ${PYTHON} -m venv /opt/core/venv . /opt/core/venv/bin/activate pip install --upgrade pip pip install /opt/core/core-*.whl - echo "DAEMON=/opt/core/venv/bin/core-daemon" > /opt/core/service + sed -i 's|$DAEMON|/opt/core/venv/bin/core-daemon|g' /lib/systemd/system/core-daemon.service + ln -s /opt/core/venv/bin/core-* /usr/bin/ fi diff --git a/package/after-install-rpm.sh b/package/after-install-rpm.sh new file mode 100644 index 000000000..60b56573b --- /dev/null +++ b/package/after-install-rpm.sh @@ -0,0 +1,18 @@ +#!/bin/sh +if [ ! -z "${NO_PYTHON}" ]; then + exit 0 +fi + +PYTHON="${PYTHON:=python3}" +if [ ! -z "${NO_VENV}" ]; then + ${PYTHON} -m pip install /opt/core/core-*.whl + sed -i 's|$DAEMON|/usr/local/bin/core-daemon|g' /usr/lib/systemd/system/core-daemon.service +else + ${PYTHON} -m venv /opt/core/venv + . /opt/core/venv/bin/activate + pip install --upgrade pip + pip install /opt/core/core-*.whl + sed -i 's|$DAEMON|/opt/core/venv/bin/core-daemon|g' /usr/lib/systemd/system/core-daemon.service + ln -s /opt/core/venv/bin/core-* /usr/bin/ +fi +systemctl preset core-daemon diff --git a/package/after-remove.sh b/package/after-remove-deb.sh similarity index 82% rename from package/after-remove.sh rename to package/after-remove-deb.sh index 60a5cb594..55c92ff6c 100644 --- a/package/after-remove.sh +++ b/package/after-remove-deb.sh @@ -10,4 +10,6 @@ else ${PYTHON} -m venv /opt/core/venv . /opt/core/venv/bin/activate pip uninstall -y core + rm -rf /opt/core/venv + rm -rf /opt/core/share fi diff --git a/package/after-remove-rpm.sh b/package/after-remove-rpm.sh new file mode 100644 index 000000000..67f69c77a --- /dev/null +++ b/package/after-remove-rpm.sh @@ -0,0 +1,17 @@ +#!/bin/sh +if [ -v NO_PYTHON ]; then + exit 0 +fi + +PYTHON="${PYTHON:=python3}" +if [ -v NO_VENV ]; then + ${PYTHON} -m pip uninstall -y core +else + ${PYTHON} -m venv /opt/core/venv + . /opt/core/venv/bin/activate + pip uninstall -y core + rm -rf /opt/core/venv + rm -rf /opt/core/share +fi +systemctl --no-reload disable core-daemon +systemctl stop core-daemon diff --git a/package/core-daemon b/package/core-daemon deleted file mode 100644 index 8cb57e4c6..000000000 --- a/package/core-daemon +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/sh -### BEGIN INIT INFO -# Provides: core-daemon -# Required-Start: $network $remote_fs -# Required-Stop: $network $remote_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Start the core-daemon CORE daemon at boot time -# Description: Starts and stops the core-daemon CORE daemon used to -# provide network emulation services for the CORE GUI -# or scripts. -### END INIT INFO -# -# chkconfig: 35 90 03 -# description: Starts and stops the CORE daemon \ -# used to provide network emulation services. -# -# config: /etc/core/ - -. /opt/core/service -NAME=`basename $0` -PIDFILE="/var/$NAME.pid" -LOG="/var/log/$NAME.log" -CMD="$DAEMON" - -get_pid() { - cat "$PIDFILE" -} - -is_alive() { - [ -f "$PIDFILE" ] && ps -p `get_pid` > /dev/null 2>&1 -} - -corestart() { - if is_alive; then - echo "$NAME already started" - else - echo "starting $NAME" - $CMD 2>&1 >> "$LOG" & - fi - - echo $! > "$PIDFILE" - if ! is_alive; then - echo "unable to start $NAME, see $LOG" - exit 1 - fi -} - -corestop() { - if is_alive; then - echo -n "stopping $NAME.." - kill `get_pid` - for i in 1 2 3 4 5; do - sleep 1 - if ! is_alive; then - break - fi - echo -n "." - done - echo - - if is_alive; then - echo "not stopped; may still be shutting down" - exit 1 - else - echo "stopped" - if [ -f "$PIDFILE" ]; then - rm -f "$PIDFILE" - fi - fi - else - echo "$NAME not running" - fi -} - -corerestart() { - corestop - corestart -} - -corestatus() { - if is_alive; then - echo "$NAME is running" - else - echo "$NAME is stopped" - exit 1 - fi -} - - -case "$1" in - start) - corestart - ;; - stop) - corestop - ;; - restart) - corerestart - ;; - force-reload) - corerestart - ;; - status) - corestatus - ;; - *) - echo "Usage: $0 {start|stop|restart|status}" - exit 1 -esac - -exit $? diff --git a/package/core-daemon.service b/package/core-daemon.service index ede52c634..86025ed8a 100644 --- a/package/core-daemon.service +++ b/package/core-daemon.service @@ -4,7 +4,6 @@ After=network.target [Service] Type=simple -EnvironmentFile=/opt/core/service ExecStart=$DAEMON TasksMax=infinity diff --git a/package/etc/core.conf b/package/etc/core.conf index 1923250d4..6feea60f0 100644 --- a/package/etc/core.conf +++ b/package/etc/core.conf @@ -11,7 +11,6 @@ frr_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/frr /usr/libexec/frr" # this may be a comma-separated list, and directory names should be unique # and not named 'services' #custom_services_dir = /home//.coregui/custom_services -#custom_config_services_dir = /home//.coregui/custom_services # uncomment to establish a standalone control backchannel for accessing nodes # (overriden by the session option of the same name) diff --git a/package/examples/lxd/lxd2core.py b/package/examples/lxd/lxd2core.py deleted file mode 100644 index ec671b290..000000000 --- a/package/examples/lxd/lxd2core.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging - -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.base import CoreNode -from core.nodes.lxd import LxcNode - -if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG) - coreemu = CoreEmu() - session = coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - - try: - prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - - # create node one - options = LxcNode.create_options() - options.image = "ubuntu" - node1 = session.add_node(LxcNode, options=options) - interface1_data = prefixes.create_iface(node1) - - # create node two - node2 = session.add_node(CoreNode) - interface2_data = prefixes.create_iface(node2) - - # add link - session.add_link(node1.id, node2.id, interface1_data, interface2_data) - - # instantiate - session.instantiate() - finally: - input("continue to shutdown") - coreemu.shutdown() diff --git a/package/examples/lxd/lxd2lxd.py b/package/examples/lxd/lxd2lxd.py deleted file mode 100644 index 7e9e6a558..000000000 --- a/package/examples/lxd/lxd2lxd.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging - -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.lxd import LxcNode - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - - coreemu = CoreEmu() - session = coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - - # create nodes and interfaces - try: - prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - - # create node one - options = LxcNode.create_options() - options.image = "ubuntu:18.04" - node1 = session.add_node(LxcNode, options=options) - interface1_data = prefixes.create_iface(node1) - - # create node two - node2 = session.add_node(LxcNode, options=options) - interface2_data = prefixes.create_iface(node2) - - # add link - session.add_link(node1.id, node2.id, interface1_data, interface2_data) - - # instantiate - session.instantiate() - finally: - input("continue to shutdown") - coreemu.shutdown() diff --git a/package/examples/lxd/switch.py b/package/examples/lxd/switch.py deleted file mode 100644 index c093fd774..000000000 --- a/package/examples/lxd/switch.py +++ /dev/null @@ -1,46 +0,0 @@ -import logging - -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.base import CoreNode -from core.nodes.lxd import LxcNode -from core.nodes.network import SwitchNode - -if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG) - - coreemu = CoreEmu() - session = coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - - try: - prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - - # create switch - switch = session.add_node(SwitchNode) - - # node one - options = LxcNode.create_options() - options.image = "ubuntu" - node1 = session.add_node(LxcNode, options=options) - interface1_data = prefixes.create_iface(node1) - - # node two - node2 = session.add_node(LxcNode, options=options) - interface2_data = prefixes.create_iface(node2) - - # node three - node3 = session.add_node(CoreNode) - interface3_data = prefixes.create_iface(node3) - - # add links - session.add_link(node1.id, switch.id, interface1_data) - session.add_link(node2.id, switch.id, interface2_data) - session.add_link(node3.id, switch.id, interface3_data) - - # instantiate - session.instantiate() - finally: - input("continue to shutdown") - coreemu.shutdown() diff --git a/package/examples/myservices/__init__.py b/package/examples/myservices/__init__.py deleted file mode 100644 index bfe4afbe8..000000000 --- a/package/examples/myservices/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -"""myservices - -Custom services that you define can be put in this directory. Everything -listed in __all__ is automatically loaded when you add this directory to the -custom_services_dir = '/full/path/to/here' core.conf file option. -""" -__all__ = ["sample"] diff --git a/package/examples/myservices/exampleservice.py b/package/examples/myservices/exampleservice.py deleted file mode 100644 index b6b2bed02..000000000 --- a/package/examples/myservices/exampleservice.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Simple example custom service, used to drive shell commands on a node. -""" -from typing import Tuple - -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceMode - - -class ExampleService(CoreService): - """ - Example Custom CORE Service - - :cvar name: name used as a unique ID for this service and is required, no spaces - :cvar group: allows you to group services within the GUI under a common name - :cvar executables: executables this service depends on to function, if executable is - not on the path, service will not be loaded - :cvar dependencies: services that this service depends on for startup, tuple of - service names - :cvar dirs: directories that this service will create within a node - :cvar configs: files that this service will generate, without a full path this file - goes in the node's directory e.g. /tmp/pycore.12345/n1.conf/myfile - :cvar startup: commands used to start this service, any non-zero exit code will - cause a failure - :cvar validate: commands used to validate that a service was started, any non-zero - exit code will cause a failure - :cvar validation_mode: validation mode, used to determine startup success. - NON_BLOCKING - runs startup commands, and validates success with validation commands - BLOCKING - runs startup commands, and validates success with the startup commands themselves - TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone - :cvar validation_timer: time in seconds for a service to wait for validation, before - determining success in TIMER/NON_BLOCKING modes. - :cvar validation_period: period in seconds to wait before retrying validation, - only used in NON_BLOCKING mode - :cvar shutdown: shutdown commands to stop this service - """ - - name: str = "ExampleService" - group: str = "Utility" - executables: Tuple[str, ...] = () - dependencies: Tuple[str, ...] = () - dirs: Tuple[str, ...] = () - configs: Tuple[str, ...] = ("myservice1.sh", "myservice2.sh") - startup: Tuple[str, ...] = tuple(f"sh {x}" for x in configs) - validate: Tuple[str, ...] = () - validation_mode: ServiceMode = ServiceMode.NON_BLOCKING - validation_timer: int = 5 - validation_period: float = 0.5 - shutdown: Tuple[str, ...] = () - - @classmethod - def on_load(cls) -> None: - """ - Provides a way to run some arbitrary logic when the service is loaded, possibly - to help facilitate dynamic settings for the environment. - - :return: nothing - """ - pass - - @classmethod - def get_configs(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the config files from the node a service - will run. Defaults to the class definition and can be left out entirely if not - needed. - - :param node: core node that the service is being ran on - :return: tuple of config files to create - """ - return cls.configs - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Returns a string representation for a file, given the node the service is - starting on the config filename that this information will be used for. This - must be defined, if "configs" are defined. - - :param node: core node that the service is being ran on - :param filename: configuration file to generate - :return: configuration file content - """ - cfg = "#!/bin/sh\n" - if filename == cls.configs[0]: - cfg += "# auto-generated by MyService (sample.py)\n" - for iface in node.get_ifaces(): - cfg += f'echo "Node {node.name} has interface {iface.name}"\n' - elif filename == cls.configs[1]: - cfg += "echo hello" - return cfg - - @classmethod - def get_startup(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the startup commands from the node a - service will run. Defaults to the class definition and can be left out entirely - if not needed. - - :param node: core node that the service is being ran on - :return: tuple of startup commands to run - """ - return cls.startup - - @classmethod - def get_validate(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the validate commands from the node a - service will run. Defaults to the class definition and can be left out entirely - if not needed. - - :param node: core node that the service is being ran on - :return: tuple of commands to validate service startup with - """ - return cls.validate diff --git a/package/examples/python/distributed_lxd.py b/package/examples/python/distributed_lxd.py deleted file mode 100644 index 70af8a295..000000000 --- a/package/examples/python/distributed_lxd.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Example for scripting a standalone distributed LXD session that does not interact -with the GUI. -""" - -import argparse -import logging - -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.lxd import LxcNode - - -def parse(name): - parser = argparse.ArgumentParser(description=f"Run {name} example") - parser.add_argument( - "-a", - "--address", - help="local address that distributed servers will use for gre tunneling", - ) - parser.add_argument( - "-s", "--server", help="distributed server to use for creating nodes" - ) - options = parser.parse_args() - return options - - -def main(args): - # ip generator for example - prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - - # create emulator instance for creating sessions and utility methods - coreemu = CoreEmu({"distributed_address": args.address}) - session = coreemu.create_session() - - # initialize distributed - server_name = "core2" - session.distributed.add_server(server_name, args.server) - - # must be in configuration state for nodes to start, when using "node_add" below - session.set_state(EventTypes.CONFIGURATION_STATE) - - # create local node, switch, and remote nodes - options = LxcNode.create_options() - options.image = "ubuntu:18.04" - node1 = session.add_node(LxcNode, options=options) - options.server = server_name - node2 = session.add_node(LxcNode, options=options) - - # create node interfaces and link - interface1_data = prefixes.create_iface(node1) - interface2_data = prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, interface1_data, interface2_data) - - # instantiate session - session.instantiate() - - # pause script for verification - input("press enter for shutdown") - - # shutdown session - coreemu.shutdown() - - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - args = parse(__file__) - main(args) diff --git a/package/examples/configservices/switch.py b/package/share/examples/configservices/switch.py similarity index 94% rename from package/examples/configservices/switch.py rename to package/share/examples/configservices/switch.py index 937c3aa83..af0f48a2e 100644 --- a/package/examples/configservices/switch.py +++ b/package/share/examples/configservices/switch.py @@ -18,7 +18,7 @@ # node one options = CoreNode.create_options() - options.config_services = ["DefaultRoute", "IPForward"] + options.services = ["DefaultRoute", "IPForward"] node1 = session.add_node(CoreNode, options=options) interface = prefixes.create_iface(node1) session.add_link(node1.id, switch.id, iface1_data=interface) diff --git a/package/examples/controlnet_updown b/package/share/examples/controlnet_updown similarity index 100% rename from package/examples/controlnet_updown rename to package/share/examples/controlnet_updown diff --git a/daemon/core/configservices/utilservices/__init__.py b/package/share/examples/custom_emane/__init__.py similarity index 100% rename from daemon/core/configservices/utilservices/__init__.py rename to package/share/examples/custom_emane/__init__.py diff --git a/package/examples/myemane/examplemodel.py b/package/share/examples/custom_emane/examplemodel.py similarity index 92% rename from package/examples/myemane/examplemodel.py rename to package/share/examples/custom_emane/examplemodel.py index bd5102e4a..71c775ee3 100644 --- a/package/examples/myemane/examplemodel.py +++ b/package/share/examples/custom_emane/examplemodel.py @@ -2,7 +2,7 @@ Example custom emane model. """ from pathlib import Path -from typing import Dict, List, Optional, Set +from typing import Optional from core.config import Configuration from core.emane import emanemanifest, emanemodel @@ -41,19 +41,19 @@ class ExampleModel(emanemodel.EmaneModel): name: str = "emane_example" mac_library: str = "rfpipemaclayer" mac_xml: str = "rfpipemaclayer.xml" - mac_defaults: Dict[str, str] = { + mac_defaults: dict[str, str] = { "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml" } - mac_config: List[Configuration] = [] + mac_config: list[Configuration] = [] phy_library: Optional[str] = None phy_xml: str = "emanephy.xml" - phy_defaults: Dict[str, str] = { + phy_defaults: dict[str, str] = { "subid": "1", "propagationmodel": "2ray", "noisemode": "none", } - phy_config: List[Configuration] = [] - config_ignore: Set[str] = set() + phy_config: list[Configuration] = [] + config_ignore: set[str] = set() @classmethod def load(cls, emane_prefix: Path) -> None: diff --git a/package/examples/grpc/__init__.py b/package/share/examples/custom_service/__init__.py similarity index 100% rename from package/examples/grpc/__init__.py rename to package/share/examples/custom_service/__init__.py diff --git a/package/share/examples/custom_service/exampleservice.py b/package/share/examples/custom_service/exampleservice.py new file mode 100644 index 000000000..f13187318 --- /dev/null +++ b/package/share/examples/custom_service/exampleservice.py @@ -0,0 +1,62 @@ +""" +Describes what an example service could be +""" +from core.config import ConfigString, ConfigBool, Configuration +from core.services.base import CoreService, ShadowDir, ServiceMode + + +# class that subclasses ConfigService +class ExampleService(CoreService): + # unique name for your service within CORE + name: str = "Example" + # the group your service is associated with, used for display in GUI + group: str = "ExampleGroup" + # directories that the service should shadow mount, hiding the system directory + directories: list[str] = ["/usr/local/core"] + # files that this service should generate, defaults to nodes home directory + # or can provide an absolute path to a mounted directory + files: list[str] = ["example-start.sh"] + # executables that should exist on path, that this service depends on + executables: list[str] = [] + # other services that this service depends on, defines service start order + dependencies: list[str] = [] + # commands to run to start this service + startup: list[str] = [] + # commands to run to validate this service + validate: list[str] = [] + # commands to run to stop this service + shutdown: list[str] = [] + # validation mode, blocking, non-blocking, and timer + validation_mode: ServiceMode = ServiceMode.BLOCKING + # configurable values that this service can use, for file generation + default_configs: list[Configuration] = [ + ConfigString(id="value1", label="Text"), + ConfigBool(id="value2", label="Boolean"), + ConfigString(id="value3", label="Multiple Choice", + options=["value1", "value2", "value3"]), + ] + # sets of values to set for the configuration defined above, can be used to + # provide convenient sets of values to typically use + modes: dict[str, dict[str, str]] = { + "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, + "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, + "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, + } + # defines directories that this service can help shadow within a node + shadow_directories: list[ShadowDir] = [] + + def get_text_template(self, name: str) -> str: + """ + This function is used to return a string template that will be rendered + by the templating engine. Available variables will be node and any other + key/value pairs returned by the "data()" function. + + :param name: name of file to get template for + :return: string template + """ + return """ + # sample script 1 + # node id(${node.id}) name(${node.name}) + # config: ${config} + echo hello + """ diff --git a/package/examples/docker/docker2core.py b/package/share/examples/docker/docker2core.py similarity index 100% rename from package/examples/docker/docker2core.py rename to package/share/examples/docker/docker2core.py diff --git a/package/examples/docker/docker2docker.py b/package/share/examples/docker/docker2docker.py similarity index 100% rename from package/examples/docker/docker2docker.py rename to package/share/examples/docker/docker2docker.py diff --git a/package/examples/docker/switch.py b/package/share/examples/docker/switch.py similarity index 100% rename from package/examples/docker/switch.py rename to package/share/examples/docker/switch.py diff --git a/package/examples/myemane/__init__.py b/package/share/examples/grpc/__init__.py similarity index 100% rename from package/examples/myemane/__init__.py rename to package/share/examples/grpc/__init__.py diff --git a/package/share/examples/grpc/custom_service.py b/package/share/examples/grpc/custom_service.py new file mode 100644 index 000000000..b2102afb7 --- /dev/null +++ b/package/share/examples/grpc/custom_service.py @@ -0,0 +1,38 @@ +from core.api.grpc import client +from core.api.grpc.wrappers import Position, Service + +# interface helper +iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") + +# create grpc client and connect +core = client.CoreGrpcClient() +core.connect() + +# create new service +file_name = "example.sh" +custom_service = Service( + name="custom", + group="Custom", + files=[file_name], + startup=[f"bash {file_name}"], +) +templates = {file_name: "# this is a custom service ${node.name}"} +core.create_service(custom_service, templates) + +# add session +session = core.create_session() + +# create nodes +position = Position(x=100, y=100) +node1 = session.add_node(1, position=position) +node1.services = ["custom"] +position = Position(x=300, y=100) +node2 = session.add_node(2, position=position) + +# create link +iface1 = iface_helper.create_iface(node1.id, 0) +iface2 = iface_helper.create_iface(node2.id, 0) +session.add_link(node1=node1, node2=node2, iface1=iface1, iface2=iface2) + +# start session +core.start_session(session) diff --git a/package/examples/grpc/distributed_switch.py b/package/share/examples/grpc/distributed_switch.py similarity index 100% rename from package/examples/grpc/distributed_switch.py rename to package/share/examples/grpc/distributed_switch.py diff --git a/package/examples/grpc/emane80211.py b/package/share/examples/grpc/emane80211.py similarity index 100% rename from package/examples/grpc/emane80211.py rename to package/share/examples/grpc/emane80211.py diff --git a/package/examples/grpc/peertopeer.py b/package/share/examples/grpc/peertopeer.py similarity index 100% rename from package/examples/grpc/peertopeer.py rename to package/share/examples/grpc/peertopeer.py diff --git a/package/examples/grpc/switch.py b/package/share/examples/grpc/switch.py similarity index 100% rename from package/examples/grpc/switch.py rename to package/share/examples/grpc/switch.py diff --git a/package/examples/grpc/wireless.py b/package/share/examples/grpc/wireless.py similarity index 100% rename from package/examples/grpc/wireless.py rename to package/share/examples/grpc/wireless.py diff --git a/package/examples/grpc/wlan.py b/package/share/examples/grpc/wlan.py similarity index 100% rename from package/examples/grpc/wlan.py rename to package/share/examples/grpc/wlan.py diff --git a/package/examples/python/distributed_emane.py b/package/share/examples/python/distributed_emane.py similarity index 100% rename from package/examples/python/distributed_emane.py rename to package/share/examples/python/distributed_emane.py diff --git a/package/examples/python/distributed_ptp.py b/package/share/examples/python/distributed_ptp.py similarity index 100% rename from package/examples/python/distributed_ptp.py rename to package/share/examples/python/distributed_ptp.py diff --git a/package/examples/python/distributed_switch.py b/package/share/examples/python/distributed_switch.py similarity index 100% rename from package/examples/python/distributed_switch.py rename to package/share/examples/python/distributed_switch.py diff --git a/package/examples/python/emane80211.py b/package/share/examples/python/emane80211.py similarity index 100% rename from package/examples/python/emane80211.py rename to package/share/examples/python/emane80211.py diff --git a/package/examples/python/peertopeer.py b/package/share/examples/python/peertopeer.py similarity index 100% rename from package/examples/python/peertopeer.py rename to package/share/examples/python/peertopeer.py diff --git a/package/examples/python/switch.py b/package/share/examples/python/switch.py similarity index 100% rename from package/examples/python/switch.py rename to package/share/examples/python/switch.py diff --git a/package/examples/python/wireless.py b/package/share/examples/python/wireless.py similarity index 100% rename from package/examples/python/wireless.py rename to package/share/examples/python/wireless.py diff --git a/package/examples/python/wlan.py b/package/share/examples/python/wlan.py similarity index 100% rename from package/examples/python/wlan.py rename to package/share/examples/python/wlan.py diff --git a/package/examples/tdma/schedule.xml b/package/share/examples/tdma/schedule.xml similarity index 100% rename from package/examples/tdma/schedule.xml rename to package/share/examples/tdma/schedule.xml diff --git a/package/examples/services/sampleFirewall b/package/share/services/sampleFirewall similarity index 100% rename from package/examples/services/sampleFirewall rename to package/share/services/sampleFirewall diff --git a/package/examples/services/sampleIPsec b/package/share/services/sampleIPsec similarity index 92% rename from package/examples/services/sampleIPsec rename to package/share/services/sampleIPsec index 59e40fc98..e3b098d64 100644 --- a/package/examples/services/sampleIPsec +++ b/package/share/services/sampleIPsec @@ -5,9 +5,9 @@ # peers, along with subnets to tunnel. # directory containing the certificate and key described below -keydir=/etc/core/keys +keydir=/opt/core/etc/keys -# the name used for the "$certname.pem" x509 certificate and +# the name used for the "$certname.pem" x509 certificate and # "$certname.key" RSA private key, which can be generated using openssl certname=ipsec1 @@ -27,7 +27,7 @@ T2="172.16.4.0/24AND172.16.5.0/24 172.16.4.0/24AND172.16.6.0/24" # -------- END CUSTOMIZATION -------- -echo "building config $PWD/ipsec.conf..." +echo "building config $PWD/ipsec.conf..." echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log checkip=0 @@ -45,23 +45,23 @@ echo "#!/usr/sbin/setkey -f # Security policies \ " > $PWD/ipsec.conf i=0 -for hostpair in $tunnelhosts; do +for hostpair in $tunnelhosts; do i=`expr $i + 1` # parse tunnel host IP thishost=${hostpair%%AND*} - peerhost=${hostpair##*AND} + peerhost=${hostpair##*AND} if [ $checkip = "0" ] && [ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then echo "ERROR: invalid host address $thishost or $peerhost \ " >> $PWD/ipsec.log fi - # parse each tunnel addresses + # parse each tunnel addresses tunnel_list_var_name=T$i eval tunnels="$"$tunnel_list_var_name"" for ttunnel in $tunnels; do lclnet=${ttunnel%%AND*} - rmtnet=${ttunnel##*AND} - if [ $checkip = "0" ] && + rmtnet=${ttunnel##*AND} + if [ $checkip = "0" ] && [ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then echo "ERROR: invalid tunnel address $lclnet and $rmtnet \ " >> $PWD/ipsec.log diff --git a/package/examples/services/sampleVPNClient b/package/share/services/sampleVPNClient similarity index 98% rename from package/examples/services/sampleVPNClient rename to package/share/services/sampleVPNClient index addbf23ef..c5ac80999 100644 --- a/package/examples/services/sampleVPNClient +++ b/package/share/services/sampleVPNClient @@ -4,7 +4,7 @@ # OpenVPN software and a virtual TUN/TAP device. # directory containing the certificate and key described below -keydir=/etc/core/keys +keydir=/opt/core/etc/keys # the name used for a "$keyname.crt" certificate and "$keyname.key" private key. keyname=client1 diff --git a/package/examples/services/sampleVPNServer b/package/share/services/sampleVPNServer similarity index 99% rename from package/examples/services/sampleVPNServer rename to package/share/services/sampleVPNServer index 39639d052..c97ade451 100644 --- a/package/examples/services/sampleVPNServer +++ b/package/share/services/sampleVPNServer @@ -7,7 +7,7 @@ # directory containing the certificate and key described below, in addition to # a CA certificate and DH key -keydir=/etc/core/keys +keydir=/opt/core/etc/keys # the name used for a "$keyname.crt" certificate and "$keyname.key" private key. keyname=server2 diff --git a/package/examples/tutorials/chatapp/chatapp/__init__.py b/package/share/tutorials/chatapp/chatapp/__init__.py similarity index 100% rename from package/examples/tutorials/chatapp/chatapp/__init__.py rename to package/share/tutorials/chatapp/chatapp/__init__.py diff --git a/package/examples/tutorials/chatapp/chatapp/client.py b/package/share/tutorials/chatapp/chatapp/client.py similarity index 100% rename from package/examples/tutorials/chatapp/chatapp/client.py rename to package/share/tutorials/chatapp/chatapp/client.py diff --git a/package/examples/tutorials/chatapp/chatapp/server.py b/package/share/tutorials/chatapp/chatapp/server.py similarity index 100% rename from package/examples/tutorials/chatapp/chatapp/server.py rename to package/share/tutorials/chatapp/chatapp/server.py diff --git a/package/examples/tutorials/chatapp/chatapp_service.py b/package/share/tutorials/chatapp/chatapp_service.py similarity index 78% rename from package/examples/tutorials/chatapp/chatapp_service.py rename to package/share/tutorials/chatapp/chatapp_service.py index 6faf80711..6f32a8e96 100644 --- a/package/examples/tutorials/chatapp/chatapp_service.py +++ b/package/share/tutorials/chatapp/chatapp_service.py @@ -1,10 +1,10 @@ from typing import Dict, List from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode, ShadowDir +from core.services.base import CoreService, ServiceMode, ShadowDir -class ChatAppService(ConfigService): +class ChatAppService(CoreService): name: str = "ChatApp Server" group: str = "ChatApp" directories: List[str] = [] @@ -14,7 +14,7 @@ class ChatAppService(ConfigService): startup: List[str] = [f"bash {files[0]}"] validate: List[str] = [] shutdown: List[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING + validation_mode: ServiceMode = ServiceMode.BLOCKING default_configs: List[Configuration] = [] modes: Dict[str, Dict[str, str]] = {} shadow_directories: List[ShadowDir] = [] diff --git a/package/examples/tutorials/chatapp/setup.py b/package/share/tutorials/chatapp/setup.py similarity index 100% rename from package/examples/tutorials/chatapp/setup.py rename to package/share/tutorials/chatapp/setup.py diff --git a/package/examples/tutorials/tutorial1/scenario.py b/package/share/tutorials/tutorial1/scenario.py similarity index 100% rename from package/examples/tutorials/tutorial1/scenario.py rename to package/share/tutorials/tutorial1/scenario.py diff --git a/package/examples/tutorials/tutorial1/scenario.xml b/package/share/tutorials/tutorial1/scenario.xml similarity index 95% rename from package/examples/tutorials/tutorial1/scenario.xml rename to package/share/tutorials/tutorial1/scenario.xml index 428fe4ca6..d429948ed 100644 --- a/package/examples/tutorials/tutorial1/scenario.xml +++ b/package/share/tutorials/tutorial1/scenario.xml @@ -1,18 +1,18 @@ - + - + - + - + - + diff --git a/package/examples/tutorials/tutorial1/scenario_service.py b/package/share/tutorials/tutorial1/scenario_service.py similarity index 95% rename from package/examples/tutorials/tutorial1/scenario_service.py rename to package/share/tutorials/tutorial1/scenario_service.py index 5a3c55088..d0de570f6 100644 --- a/package/examples/tutorials/tutorial1/scenario_service.py +++ b/package/share/tutorials/tutorial1/scenario_service.py @@ -19,7 +19,7 @@ def main(): # create nodes position = Position(x=250, y=250) node1 = session.add_node(_id=1, name="n1", position=position) - node1.config_services.add("ChatApp Server") + node1.services.add("ChatApp Server") position = Position(x=500, y=250) node2 = session.add_node(_id=2, name="n2", position=position) diff --git a/package/examples/tutorials/tutorial1/scenario_service.xml b/package/share/tutorials/tutorial1/scenario_service.xml similarity index 92% rename from package/examples/tutorials/tutorial1/scenario_service.xml rename to package/share/tutorials/tutorial1/scenario_service.xml index ab092f4c2..3dd66e492 100644 --- a/package/examples/tutorials/tutorial1/scenario_service.xml +++ b/package/share/tutorials/tutorial1/scenario_service.xml @@ -1,19 +1,19 @@ - + - + - + - + - + @@ -34,9 +34,6 @@ - - - diff --git a/package/examples/tutorials/tutorial2/scenario.py b/package/share/tutorials/tutorial2/scenario.py similarity index 100% rename from package/examples/tutorials/tutorial2/scenario.py rename to package/share/tutorials/tutorial2/scenario.py diff --git a/package/examples/tutorials/tutorial2/scenario.xml b/package/share/tutorials/tutorial2/scenario.xml similarity index 84% rename from package/examples/tutorials/tutorial2/scenario.xml rename to package/share/tutorials/tutorial2/scenario.xml index ee60f7922..cb0386f1d 100644 --- a/package/examples/tutorials/tutorial2/scenario.xml +++ b/package/share/tutorials/tutorial2/scenario.xml @@ -1,5 +1,5 @@ - + @@ -8,27 +8,27 @@ - + - + - + - + - + - + @@ -42,17 +42,6 @@ - - - - - - - - - - - diff --git a/package/examples/tutorials/tutorial3/move-node2.py b/package/share/tutorials/tutorial3/move-node2.py similarity index 100% rename from package/examples/tutorials/tutorial3/move-node2.py rename to package/share/tutorials/tutorial3/move-node2.py diff --git a/package/examples/tutorials/tutorial3/movements1.txt b/package/share/tutorials/tutorial3/movements1.txt similarity index 100% rename from package/examples/tutorials/tutorial3/movements1.txt rename to package/share/tutorials/tutorial3/movements1.txt diff --git a/package/examples/tutorials/tutorial3/scenario.py b/package/share/tutorials/tutorial3/scenario.py similarity index 100% rename from package/examples/tutorials/tutorial3/scenario.py rename to package/share/tutorials/tutorial3/scenario.py diff --git a/package/examples/tutorials/tutorial3/scenario.xml b/package/share/tutorials/tutorial3/scenario.xml similarity index 86% rename from package/examples/tutorials/tutorial3/scenario.xml rename to package/share/tutorials/tutorial3/scenario.xml index dbe68d4d1..4c8ce35c6 100644 --- a/package/examples/tutorials/tutorial3/scenario.xml +++ b/package/share/tutorials/tutorial3/scenario.xml @@ -1,5 +1,5 @@ - + @@ -8,27 +8,27 @@ - + - + - + - + - + - + @@ -52,7 +52,7 @@ - + @@ -62,17 +62,6 @@ - - - - - - - - - - - diff --git a/package/examples/tutorials/tutorial4/tests/conftest.py b/package/share/tutorials/tutorial4/tests/conftest.py similarity index 100% rename from package/examples/tutorials/tutorial4/tests/conftest.py rename to package/share/tutorials/tutorial4/tests/conftest.py diff --git a/package/examples/tutorials/tutorial4/tests/test_ping.py b/package/share/tutorials/tutorial4/tests/test_ping.py similarity index 100% rename from package/examples/tutorials/tutorial4/tests/test_ping.py rename to package/share/tutorials/tutorial4/tests/test_ping.py diff --git a/package/examples/tutorials/tutorial5/client_for_windows.py b/package/share/tutorials/tutorial5/client_for_windows.py similarity index 100% rename from package/examples/tutorials/tutorial5/client_for_windows.py rename to package/share/tutorials/tutorial5/client_for_windows.py diff --git a/package/examples/tutorials/tutorial5/scenario.py b/package/share/tutorials/tutorial5/scenario.py similarity index 100% rename from package/examples/tutorials/tutorial5/scenario.py rename to package/share/tutorials/tutorial5/scenario.py diff --git a/package/examples/tutorials/tutorial5/scenario.xml b/package/share/tutorials/tutorial5/scenario.xml similarity index 94% rename from package/examples/tutorials/tutorial5/scenario.xml rename to package/share/tutorials/tutorial5/scenario.xml index 05d93045c..619f0a074 100644 --- a/package/examples/tutorials/tutorial5/scenario.xml +++ b/package/share/tutorials/tutorial5/scenario.xml @@ -1,5 +1,5 @@ - + @@ -8,9 +8,9 @@ - + - + @@ -25,9 +25,6 @@ - - - diff --git a/package/examples/tutorials/tutorial6/completed-scenario.xml b/package/share/tutorials/tutorial6/completed-scenario.xml similarity index 76% rename from package/examples/tutorials/tutorial6/completed-scenario.xml rename to package/share/tutorials/tutorial6/completed-scenario.xml index 2b9857278..127f46d1a 100644 --- a/package/examples/tutorials/tutorial6/completed-scenario.xml +++ b/package/share/tutorials/tutorial6/completed-scenario.xml @@ -1,5 +1,5 @@ - + @@ -16,29 +16,29 @@ - + - + - + - + - + - + - + - + - + @@ -52,17 +52,6 @@ - - - - - - - - - - - @@ -88,7 +77,7 @@ - + diff --git a/package/examples/tutorials/tutorial6/demo.py b/package/share/tutorials/tutorial6/demo.py similarity index 100% rename from package/examples/tutorials/tutorial6/demo.py rename to package/share/tutorials/tutorial6/demo.py diff --git a/package/examples/tutorials/tutorial6/drone.png b/package/share/tutorials/tutorial6/drone.png similarity index 100% rename from package/examples/tutorials/tutorial6/drone.png rename to package/share/tutorials/tutorial6/drone.png diff --git a/package/examples/tutorials/tutorial6/terrain.png b/package/share/tutorials/tutorial6/terrain.png similarity index 100% rename from package/examples/tutorials/tutorial6/terrain.png rename to package/share/tutorials/tutorial6/terrain.png diff --git a/package/examples/tutorials/tutorial7/scenario.py b/package/share/tutorials/tutorial7/scenario.py similarity index 100% rename from package/examples/tutorials/tutorial7/scenario.py rename to package/share/tutorials/tutorial7/scenario.py diff --git a/package/examples/tutorials/tutorial7/scenario.xml b/package/share/tutorials/tutorial7/scenario.xml similarity index 87% rename from package/examples/tutorials/tutorial7/scenario.xml rename to package/share/tutorials/tutorial7/scenario.xml index 721a7b8f9..1893383f3 100644 --- a/package/examples/tutorials/tutorial7/scenario.xml +++ b/package/share/tutorials/tutorial7/scenario.xml @@ -1,5 +1,5 @@ - + @@ -8,19 +8,19 @@ - + - + - + - + @@ -31,14 +31,6 @@ - - - - - - - - diff --git a/package/examples/tutorials/tutorial7/scenario_service.py b/package/share/tutorials/tutorial7/scenario_service.py similarity index 97% rename from package/examples/tutorials/tutorial7/scenario_service.py rename to package/share/tutorials/tutorial7/scenario_service.py index f0626ac2e..1c92e83c6 100644 --- a/package/examples/tutorials/tutorial7/scenario_service.py +++ b/package/share/tutorials/tutorial7/scenario_service.py @@ -28,7 +28,7 @@ def main(): ) position = Position(x=250, y=250) node2 = session.add_node(_id=2, model="mdr", name="n2", position=position) - node2.config_services.add("ChatApp Server") + node2.services.add("ChatApp Server") position = Position(x=500, y=250) node3 = session.add_node(_id=3, model="mdr", name="n3", position=position) diff --git a/package/examples/tutorials/tutorial7/scenario_service.xml b/package/share/tutorials/tutorial7/scenario_service.xml similarity index 87% rename from package/examples/tutorials/tutorial7/scenario_service.xml rename to package/share/tutorials/tutorial7/scenario_service.xml index da2cb8e85..063c5712b 100644 --- a/package/examples/tutorials/tutorial7/scenario_service.xml +++ b/package/share/tutorials/tutorial7/scenario_service.xml @@ -1,5 +1,5 @@ - + @@ -8,20 +8,20 @@ - + - + - + - + @@ -32,14 +32,6 @@ - - - - - - - - diff --git a/tasks.py b/tasks.py index b235e4788..ba1e2cc9b 100644 --- a/tasks.py +++ b/tasks.py @@ -24,9 +24,11 @@ "debian", } SUDOP: str = "sudo -E env PATH=$PATH" -VENV_PATH: str = "/opt/core/venv" -VENV_PYTHON: str = f"{VENV_PATH}/bin/python" -ACTIVATE_VENV: str = f". {VENV_PATH}/bin/activate" +CORE_PATH: Path = Path("/opt/core") +CORE_DATA_PATH: Path = CORE_PATH / "share" +CORE_VENV_PATH: Path = CORE_PATH / "venv" +CORE_VENV_PYTHON: Path = CORE_VENV_PATH / "bin/python" +ACTIVATE_VENV: str = f". {CORE_VENV_PATH}/bin/activate" class Progress: @@ -218,8 +220,8 @@ def install_poetry(c: Context, dev: bool, local: bool, hide: bool) -> None: else: args = "" if dev else "--only main" with c.cd(DAEMON_DIR): - c.run("sudo mkdir -p /opt/core", hide=hide) - c.run(f"sudo {python_bin} -m venv {VENV_PATH}") + c.run(f"sudo mkdir -p {CORE_PATH}", hide=hide) + c.run(f"sudo {python_bin} -m venv {CORE_VENV_PATH}") c.run(f"{ACTIVATE_VENV} && {SUDOP} poetry install {args}", hide=hide) if dev: c.run(f"{ACTIVATE_VENV} && poetry run pre-commit install", hide=hide) @@ -249,13 +251,18 @@ def install_ospf_mdr(c: Context, os_info: OsInfo, hide: bool) -> None: c.run("sudo make install", hide=hide) -def install_service(c, verbose=False, prefix=DEFAULT_PREFIX): +def install_service( + c: Context, os_info: OsInfo, verbose: bool = False, prefix: str = DEFAULT_PREFIX +): """ install systemd core service """ hide = not verbose bin_dir = Path(prefix).joinpath("bin") - systemd_dir = Path("/lib/systemd/system/") + if os_info.like == OsLike.REDHAT: + systemd_dir = Path("/usr/lib/systemd/system") + else: + systemd_dir = Path("/lib/systemd/system/") service_file = systemd_dir.joinpath("core-daemon.service") if systemd_dir.exists(): service_data = inspect.cleandoc( @@ -286,37 +293,34 @@ def install_core_files(c, local=False, verbose=False, prefix=DEFAULT_PREFIX): install core files (scripts, examples, and configuration) """ hide = not verbose - bin_dir = Path(prefix).joinpath("bin") # setup core python helper if not local: + bin_dir = Path(prefix).joinpath("bin") core_python = bin_dir.joinpath("core-python") temp = NamedTemporaryFile("w", delete=False) temp.writelines( [ "#!/bin/bash\n", - f'exec "{VENV_PYTHON}" "$@"\n', + f'exec "{CORE_VENV_PYTHON}" "$@"\n', ] ) temp.close() c.run(f"sudo cp {temp.name} {core_python}", hide=hide) c.run(f"sudo chmod 755 {core_python}", hide=hide) os.unlink(temp.name) + core_scripts = CORE_VENV_PATH / "bin/core-*" + c.run(f"sudo ln -s {core_scripts} {bin_dir}") # install core configuration file - config_dir = "/etc/core" - c.run(f"sudo mkdir -p {config_dir}", hide=hide) - c.run(f"sudo cp -n package/etc/core.conf {config_dir}", hide=hide) - c.run(f"sudo cp -n package/etc/logging.conf {config_dir}", hide=hide) + c.run(f"sudo cp -r -n package/etc {CORE_PATH}", hide=hide) # install examples - examples_dir = f"{prefix}/share/core" - c.run(f"sudo mkdir -p {examples_dir}", hide=hide) - c.run(f"sudo cp -r package/examples {examples_dir}", hide=hide) + c.run(f"sudo cp -r package/share {CORE_PATH}", hide=hide) @task( help={ "verbose": "enable verbose", "install-type": "used to force an install type, " - "can be one of the following (redhat, debian)", + "can be one of the following (redhat, debian)", "no-python": "avoid installing python system dependencies", }, ) @@ -348,7 +352,7 @@ def build( "local": "determines if core will install to local system, default is False", "prefix": f"prefix where scripts are installed, default is {DEFAULT_PREFIX}", "install-type": "used to force an install type, " - "can be one of the following (redhat, debian)", + "can be one of the following (redhat, debian)", "ospf": "disable ospf installation", "no-python": "avoid installing python system dependencies", }, @@ -367,7 +371,7 @@ def install( install core, poetry, scripts, service, and ospf mdr """ python_bin = get_env_python() - venv_path = None if local else VENV_PATH + venv_path = None if local else CORE_VENV_PATH print( f"installing core using python({python_bin}) venv({venv_path}) prefix({prefix})" ) @@ -391,7 +395,7 @@ def install( with p.start("installing scripts, examples, and configuration"): install_core_files(c, local, hide, prefix) with p.start("installing systemd service"): - install_service(c, hide, prefix) + install_service(c, os_info, hide, prefix) if ospf: with p.start("installing ospf mdr"): install_ospf_mdr(c, os_info, hide) @@ -403,7 +407,7 @@ def install( "emane-version": "version of emane install", "verbose": "enable verbose", "install-type": "used to force an install type, " - "can be one of the following (redhat, debian)", + "can be one of the following (redhat, debian)", }, ) def install_emane(c, emane_version, verbose=False, install_type=None): @@ -470,7 +474,7 @@ def uninstall( uninstall core, scripts, service, virtual environment, and clean build directory """ python_bin = get_env_python() - venv_path = None if local else VENV_PATH + venv_path = None if local else CORE_VENV_PATH print( f"uninstalling core using python({python_bin}) " f"venv({venv_path}) prefix({prefix})" @@ -488,23 +492,21 @@ def uninstall( python_bin = get_env_python() c.run(f"sudo {python_bin} -m pip uninstall -y core", hide=hide) else: - if Path(VENV_PYTHON).is_file(): - with c.cd(DAEMON_DIR): - if dev: + if CORE_VENV_PATH.is_dir(): + if dev: + with c.cd(DAEMON_DIR): c.run( f"{ACTIVATE_VENV} && poetry run pre-commit uninstall", hide=hide, ) - c.run(f"sudo {VENV_PYTHON} -m pip uninstall -y core", hide=hide) - # remove installed files - bin_dir = Path(prefix).joinpath("bin") - with p.start("uninstalling examples"): - examples_dir = Path(prefix).joinpath("share/core") - c.run(f"sudo rm -rf {examples_dir}") - # remove core-python symlink + c.run(f"sudo rm -rf {CORE_VENV_PATH}", hide=hide) + # remove data files + with p.start("uninstalling data files"): + c.run(f"sudo rm -rf {CORE_DATA_PATH}") + # remove symlinks if not local: - core_python = bin_dir.joinpath("core-python") - c.run(f"sudo rm -f {core_python}", hide=hide) + core_symlinks = Path(prefix) / "bin/core-*" + c.run(f"sudo rm -f {core_symlinks}", hide=hide) # remove service systemd_dir = Path("/lib/systemd/system/") service_name = "core-daemon.service" @@ -523,7 +525,7 @@ def uninstall( "prefix": f"prefix where scripts are installed, default is {DEFAULT_PREFIX}", "branch": "branch to install latest code from, default is current branch", "install-type": "used to force an install type, " - "can be one of the following (redhat, debian)", + "can be one of the following (redhat, debian)", }, ) def reinstall(