From 08d4acf519225bca856842d050dd358f378624c3 Mon Sep 17 00:00:00 2001 From: Olivier Boudeville Date: Fri, 1 May 2020 17:46:23 +0200 Subject: [PATCH] Initial import. --- .gitignore | 17 + GNUmakefile | 57 ++ GNUmakerules-automatic.inc | 7 + GNUmakerules-explicit.inc | 12 + GNUmakesettings.inc | 36 + GNUmakevars.inc | 152 ++++ conf/GNUmakefile | 31 + conf/rebar.config.template | 106 +++ conf/sys.config | 26 + conf/us_common.app.src | 52 ++ conf/vm.args | 36 + doc/GNUmakefile | 89 ++ ebin/GNUmakefile | 5 + priv/bin/us-common.sh | 701 +++++++++++++++ rebar.config | 106 +++ rebar.lock | 1 + src/GNUmakefile | 9 + src/class_USConfigServer.erl | 1102 +++++++++++++++++++++++ src/class_USScheduler.erl | 1502 +++++++++++++++++++++++++++++++ src/class_USServer.erl | 249 +++++ src/class_USTaskRing.erl | 341 +++++++ src/us_common.app.src | 1 + test/GNUmakefile | 4 + test/class_USScheduler_test.erl | 300 ++++++ 24 files changed, 4942 insertions(+) create mode 100644 .gitignore create mode 100644 GNUmakefile create mode 100644 GNUmakerules-automatic.inc create mode 100644 GNUmakerules-explicit.inc create mode 100644 GNUmakesettings.inc create mode 100644 GNUmakevars.inc create mode 100644 conf/GNUmakefile create mode 100644 conf/rebar.config.template create mode 100644 conf/sys.config create mode 100644 conf/us_common.app.src create mode 100644 conf/vm.args create mode 100644 doc/GNUmakefile create mode 100644 ebin/GNUmakefile create mode 100644 priv/bin/us-common.sh create mode 100644 rebar.config create mode 100644 rebar.lock create mode 100644 src/GNUmakefile create mode 100644 src/class_USConfigServer.erl create mode 100644 src/class_USScheduler.erl create mode 100644 src/class_USServer.erl create mode 100644 src/class_USTaskRing.erl create mode 120000 src/us_common.app.src create mode 100644 test/GNUmakefile create mode 100644 test/class_USScheduler_test.erl diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..adae21b --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +# Listing path patterns that shall be ignored by git: +*.beam +include/*.hrl +*.traces +*.log +*.plt +*erlang.log.* +declared-types-in-*.txt +.eunit +erl_crash.dump +*~ + +# Added in relation to rebar3: +.rebar +rebar3.crashdump +.rebar3 +_* diff --git a/GNUmakefile b/GNUmakefile new file mode 100644 index 0000000..7faf4e8 --- /dev/null +++ b/GNUmakefile @@ -0,0 +1,57 @@ +US_COMMON_TOP = . + + +.PHONY: help help-intro help-us-common register-version-in-header \ + register-us-common info info-local + + +MODULES_DIRS = src doc conf test + + +# To override the 'all' default target with a parallel version: +BASE_MAKEFILE = true + + + +# Default target: +help: help-intro help-us-common + + +help-intro: + @echo " Following main make targets are available for package $(PACKAGE_NAME):" + + +help-us-common: + @cd $(TRACES_TOP) && $(MAKE) -s help-traces + + +register-version-in-header: + @if [ -z "$(VERSION_FILE)" ] ; then \ + echo "Error, no version file defined." 1>&2 ; exit 52 ; else \ + $(MAKE) register-us-common ; fi + + +register-us-common: + @echo "-define( us_common_version, \"$(US_COMMON_VERSION)\" )." >> $(VERSION_FILE) + + +# Useful to extract internal layout for re-use in upper layers: +list-beam-dirs: + @for d in $(US_COMMON_BEAM_DIRS) ; do echo $$(readlink -f $$d) ; done + + +stats: + @$(MAKE_CODE_STATS) $(US_COMMON_TOP) + + +info: info-local + + +info-local: + @echo "REBAR3 = $(REBAR3)" + @echo "TRACES_TOP = $(TRACES_TOP)" + @echo "WOOPER_TOP = $(WOOPER_TOP)" + @echo "MYRIAD_TOP = $(MYRIAD_TOP)" + + +include $(US_COMMON_TOP)/GNUmakesettings.inc diff --git a/GNUmakerules-automatic.inc b/GNUmakerules-automatic.inc new file mode 100644 index 0000000..3dfec8a --- /dev/null +++ b/GNUmakerules-automatic.inc @@ -0,0 +1,7 @@ +# Here are gathered automatic (generic, pattern-based) rules for US-common. +# +# See GNUmakerules-explicit.inc for the explicit (immediate, static) +# counterparts. + + +# Prerequisite: US_COMMON_TOP must be set. diff --git a/GNUmakerules-explicit.inc b/GNUmakerules-explicit.inc new file mode 100644 index 0000000..7a1cd3f --- /dev/null +++ b/GNUmakerules-explicit.inc @@ -0,0 +1,12 @@ +# Here are gathered explicit (immediate, static) rules for US-common. +# +# See GNUmakerules-automatic.inc for their automatic (generic, pattern-based) +# counterparts. + + +# Prerequisite: US_COMMON_TOP must be set. + + + +# The first non-generic, therefore default, rule: +default-us-common-rule: all diff --git a/GNUmakesettings.inc b/GNUmakesettings.inc new file mode 100644 index 0000000..97a1b72 --- /dev/null +++ b/GNUmakesettings.inc @@ -0,0 +1,36 @@ +# This is the single file that all US-common GNUmakefiles, and only them, should +# include. + +# Prerequisite: US_COMMON_TOP must be set. + + +# Each project should define its own GNUmakesettings.inc, as explained in +# myriad/GNUmakesettings.inc. + + +# Nothing simpler can be used: + + +# First the make variables: + +include $(US_COMMON_TOP)/GNUmakevars.inc +include $(TRACES_TOP)/GNUmakevars.inc +include $(WOOPER_TOP)/GNUmakevars.inc +include $(MYRIAD_TOP)/GNUmakevars.inc + + +# Then rules are split between automatic (generic, pattern-based) ones and +# explicit (immediate, static) ones, so that the latter can be placed last +# (otherwise they would shadow any default target, such as 'all', defined by any +# lower layer, for instance when overriding base cleaning with their own +# 'clean-local' target): + +include $(US_COMMON_TOP)/GNUmakerules-automatic.inc +include $(TRACES_TOP)/GNUmakerules-automatic.inc +include $(WOOPER_TOP)/GNUmakerules-automatic.inc +include $(MYRIAD_TOP)/GNUmakerules-automatic.inc + +include $(US_COMMON_TOP)/GNUmakerules-explicit.inc +include $(TRACES_TOP)/GNUmakerules-explicit.inc +include $(WOOPER_TOP)/GNUmakerules-explicit.inc +include $(MYRIAD_TOP)/GNUmakerules-explicit.inc diff --git a/GNUmakevars.inc b/GNUmakevars.inc new file mode 100644 index 0000000..40395d6 --- /dev/null +++ b/GNUmakevars.inc @@ -0,0 +1,152 @@ +# Prerequisite: US_COMMON_TOP must be set. + + +# Useful to target for example the root of the current layer (ex: to locate the +# relevant, layer-local '_build' directory): +# +ifndef LAYER_TOP + LAYER_TOP := $(US_COMMON_TOP) +endif + + +# Project section. + + +# PROJECT_NAME should be defined on a per-project basis. +ifndef PROJECT_NAME + PROJECT_NAME = US-common +endif + + +# REBAR3_PROJECT_NAME should be defined on a per-project basis. +ifndef REBAR3_PROJECT_NAME + REBAR3_PROJECT_NAME := us_common +endif + + +# The uniquely-defined version of this layer, for all uses, including rebar: +US_COMMON_VERSION := 0.0.1 + + +# PROJECT_VERSION should be defined on a per-project basis: +ifndef PROJECT_VERSION + PROJECT_VERSION = $(US_COMMON_VERSION) +endif + + +ifndef PACKAGE_NAME + PACKAGE_NAME = $(PROJECT_NAME) +endif + + +ifndef PACKAGE_TOP + PACKAGE_TOP = $(US_COMMON_TOP) +endif + + + +# For any quick, local, non-packaged update thereof: +WOOPER_SIBLING_BUILD = $(LAYER_TOP)/../Ceylan-WOOPER + + +# The OTP tree of the WOOPER application within a local OTP _build tree: +WOOPER_LOCAL_APP := $(LAYER_TOP)/_build/default/lib/wooper + +# Where WOOPER BEAMs are to lie when used as an OTP application: +WOOPER_LOCAL_EBIN := $(WOOPER_LOCAL_APP)/ebin/ + + +US_COMMON_RELEASE_BASE := us-common-$(PROJECT_VERSION) + +US_COMMON_RELEASE_ARCHIVE_ZIP := $(US_COMMON_RELEASE_BASE).zip +US_COMMON_RELEASE_ARCHIVE_BZ2 := $(US_COMMON_RELEASE_BASE).tar.bz2 +US_COMMON_RELEASE_ARCHIVE_XZ := $(US_COMMON_RELEASE_BASE).tar.xz + + +# Source section. +US_COMMON_SRC := $(US_COMMON_TOP)/src + +# BEAM path section. +US_COMMON_BEAM := $(US_COMMON_TOP) + +# Include path section (also for include_lib). +US_COMMON_INC = -I$(US_COMMON_SRC) -I$(US_COMMON_TOP)/.. + + +# In an OTP/rebar3-style application layout, at compilation time, libraries +# making use of WOOPER will expect its includes to be located in: +# +# (Myriad already taken care of, at the WOOPER level) +# +WOOPER_OTP_INC := -I$(US_COMMON_TOP)/../wooper/include + +INC += $(US_COMMON_INC) $(WOOPER_OTP_INC) + + +# When building a layer in an OTP context from its usual, GIT root (ex: with +# 'make rebar3-application'), the BEAMs of WOOPER are to be found in the OTP +# build tree - rather than in a supposedly fully-built usual root for WOOPER: +# +WOOPER_OTP_BEAM_DIR_FROM_USUAL = $(LAYER_TOP)/$(WOOPER_REBAR_BUILD_BASE)/ebin + + +# In an OTP/rebar3-style application layout, at compilation time, modules +# compiled (directly or not) by the WOOPER parse transform will expect its +# module to be located in: +# +WOOPER_OTP_BEAM_DIR_FROM_OTP = $(LAYER_TOP)/../wooper/ebin + + +DOC_ROOT = $(US_COMMON_TOP)/../../../doc + + +ifndef VM_NAME + + VM_NAME := us_common_debug + +endif + + +ifndef VM_TEST_NAME + + VM_TEST_NAME := us_common_test + +endif + + +# We rely on Myriad and WOOPER as well: +BEAM_DIRS += $(US_COMMON_BEAM_DIRS) + + +# So that they can be fetched from outside +# (see the 'list-beam-dirs' target) +# +US_COMMON_BEAM_DIRS = $(US_COMMON_BEAM)/src + + +# For later reuse in the next layer of the software stack: +US_COMMON_PLT_FILE := $(US_COMMON_TOP)/us_common.plt + + +# This is the merged PLT of the level just below in the software stack. +# For us-common, it is the PLT of 'Traces': +ifndef PREDECESSOR_PLT + PREDECESSOR_PLT = $(TRACES_PLT_FILE) +endif + + +# We define the path to the Traces layer: +# +# (it may be a symbolic link pointing to the actual Traces package to be used, +# which itself may be either a 'Ceylan-Traces' directory or a 'traces' one) +# +ifneq ($(wildcard $(US_COMMON_TOP)/../traces),) + + TRACES_TOP = $(US_COMMON_TOP)/../traces + +else + + # Default: + TRACES_TOP = $(US_COMMON_TOP)/../Ceylan-Traces + +endif diff --git a/conf/GNUmakefile b/conf/GNUmakefile new file mode 100644 index 0000000..36527e3 --- /dev/null +++ b/conf/GNUmakefile @@ -0,0 +1,31 @@ +US_COMMON_TOP = .. + + +.PHONY: clean-local clean-rebar + + + +# Typically to generate various flavours (normal, for Hex, for testing, etc.) of +# rebar.config files: +# +# (note: not centralised in a GNUmakerules-automatic file for all layers, as +# substitutions are layer-specific) +# +%: %.template + @echo " Generating $@ from $< (VERSION_FOR_REBAR3 being $(VERSION_FOR_REBAR3), TRACES_VERSION_FOR_REBAR being $(TRACES_VERSION), WOOPER_VERSION_FOR_REBAR being $(WOOPER_VERSION), MYRIAD_VERSION_FOR_REBAR being $(MYRIAD_VERSION)); this file should be copied to the root of this layer in order to be taken into account." + @cat $< | sed "s|\"MYRIAD_VERSION_FOR_REBAR\"|\"$(MYRIAD_VERSION)\"|g" | sed "s|\"WOOPER_VERSION_FOR_REBAR\"|\"$(WOOPER_VERSION)\"|g" | sed "s|\"TRACES_VERSION_FOR_REBAR\"|\"$(TRACES_VERSION)\"|g" | sed "s|\"VERSION_FOR_REBAR3\"|\"$(VERSION_FOR_REBAR3)\"|g" | sed "s|FIRST_FILES_FOR_REBAR|$(REBAR_FIRST_FILES)|g" | sed "s|DEBUG_KEY_FOR_REBAR|$(DEBUG_INFO_KEY)|g"| sed "s|INCS_FOR_REBAR|$(REBAR_INCS)|g" > $@ + + +clean: clean-local + + +clean-local: clean-rebar + -@/bin/rm -f $(REBAR_CONF_FILES) + + +clean-rebar: + -@/bin/rm -f rebar.config + -@#/bin/rm -rf priv/ + + +include $(US_COMMON_TOP)/GNUmakesettings.inc diff --git a/conf/rebar.config.template b/conf/rebar.config.template new file mode 100644 index 0000000..856c183 --- /dev/null +++ b/conf/rebar.config.template @@ -0,0 +1,106 @@ +% This is a configuration file of rebar3, so that the us_common application can +% better integrate in the current OTP ecosystem, despite its (more complex, +% probably more flexible) native build based on GNU make. + +% If the name of this file is 'rebar.config', then it is a generated file, +% otherwise it is a template (located in conf/rebar.config.template), meant to +% be filled by information determined at build time or coming from +% GNUmakevars.inc. +% +% See the Myriad counterpart file (same name) for more explanations. + + +% Settings for the 'default' profile follow. + + +% Depends on the following applications (Erlang implied): +{deps, [ + {myriad, {git, "git://github.com/Olivier-Boudeville/Ceylan-Myriad", + {branch, "master"}}}, + {wooper, {git, "git://github.com/Olivier-Boudeville/Ceylan-WOOPER", + {branch, "master"}}}, + {traces, {git, "git://github.com/Olivier-Boudeville/Ceylan-Traces", + {branch, "master"}}} +]}. + + + +% Include directories found in INC: +{erl_opts, [ {d,myriad_debug_mode}, {d,wooper_debug_mode}, {d,traces_debug_mode}, {d,us_common_debug_mode}, {d,tracing_activated}, debug_info, {debug_info_key,"DEBUG_KEY_FOR_REBAR"}, report_warnings, warn_export_all, warn_export_vars, warn_shadow_vars, warn_obsolete_guards, warn_unused_import, warnings_as_errors, {parse_transform,wooper_parse_transform}, INCS_FOR_REBAR ]}. + + + +% For release generation: +% +% (defaults are for the development mode) +% +% With relx, only direct dependencies need to be listed, and version constraints +% can be used, instead of exact, specific versions. +% +{relx, [ + + % Not 'us_common_release', otherwise plenty of paths will be cluttered: + {release, {us_common, "VERSION_FOR_REBAR3"}, + + % Listing an application here seems necessary (otherwise its .app file will + % not be found), however it will also result in its automatic starting with + % no specfied arguments, whereas at least for some (ex: simple_bridge) we + % need to specify them (ex: which backend to be used). + % + % Listing 'traces' here implies its prerequisites (namely Myriad and WOOPER) + % and is needed, otherwise their respective modules will not be in the + % code path: + % + [ traces, us_common ] }, + + {sys_config, "conf/sys.config"}, + + % We need specified VM arguments (notably: long node names wanted): + {vm_args, "conf/vm.args"}, + + {dev_mode, true}, + {include_erts, false}, + + {extended_start_script, true}, + {include_src, false} + +] }. + + +{profiles, [ + + % Production mode (the default one when used as a dependency): + {prod, [ + + % Like for the default profile, except for removed {d,myriad_debug_mode}, + % {d,wooper_debug_mode}, {d,traces_debug_mode}, {d,us_common_debug_mode} and + % {d,tracing_activated}, and added {d,exec_target_is_production}: + % + {erl_opts, [ {d,exec_target_is_production}, debug_info, {debug_info_key,"DEBUG_KEY_FOR_REBAR"}, report_warnings, warn_export_all, warn_export_vars, warn_shadow_vars, warn_obsolete_guards, warn_unused_import, warnings_as_errors, {parse_transform,wooper_parse_transform}, INCS_FOR_REBAR ]}, + + % For the build makefiles: + {env, [ { 'REBAR_PROFILE', "prod" } ] }, + + {relx, [ + + % Not wanting the release to contain symlinks to applications: + {dev_mode, false}, + + % Creating a full target system: + %{include_erts, true}, + + % Or sharing a local Erlang environment (then the OTP versions must + % precisely match): + % + {include_erts, false}, + + {extended_start_script, true}, + {include_src, false} + + ]} ] } + + % No test profile currently needed. + +]}. + +% No Hex package wanted with this version. \ No newline at end of file diff --git a/conf/sys.config b/conf/sys.config new file mode 100644 index 0000000..b96fec5 --- /dev/null +++ b/conf/sys.config @@ -0,0 +1,26 @@ +% See https://erlang.org/doc/man/config.html for more information. +[ + + { kernel, [ + + % Allowed TCP port range (expected to be in-line with firewall settings): + % + % (see https://erlang.org/doc/man/kernel_app.html) + % + { inet_dist_listen_min, 50000 }, + { inet_dist_listen_max, 55000 } + + ] + + }, + + { traces, [ + + % By default no (server-side) graphical trace supervisor wanted: + %{ is_batch, true } + { is_batch, false } + + ] + } + +]. \ No newline at end of file diff --git a/conf/us_common.app.src b/conf/us_common.app.src new file mode 100644 index 0000000..b7299fc --- /dev/null +++ b/conf/us_common.app.src @@ -0,0 +1,52 @@ +% Description of the US-common OTP active application, typically used by rebar3. +% +% The real source of this file is conf/us_common.app.src, from which +% _build/lib/us_common/ebin/us_common.app is generated and copied to +% ebin/us_common.app; finally src/us_common.app.src is a mere symlink to this +% last file, so we have: +% +% ./conf/us_common.app.src [only real source] +% ./_build/lib/us_common/ebin.us_common.app +% ./ebin/us_common.app +% ./src/us_common.app.src -> ../ebin/us_common.app +% +% For more information see the Myriad 'rebar3-create-app-file' make target and +% its associated comments. + +% See also: +% - http://erlang.org/doc/man/app.html +% - https://learnyousomeerlang.com/building-otp-applications + + +{application, us_common, + [{description, "US-common, the base OTP active application on which the various Universal Service elements are built (see http://us.esperide.org)"}, + {vsn, "VERSION_FOR_REBAR3"}, + {registered, []}, + + % Regarding: + % - WOOPER, see http://wooper.esperide.org/wooper.html#otp + % - Myriad, see http://myriad.esperide.org/myriad.html#otp + % + % myriad is a dependency of wooper, which is itself a dependency of traces and + % as such may not be listed here, however we stay conservative; + % + {applications, [kernel, stdlib, sasl, myriad, wooper, traces]}, + {env,[]}, + + % Flat hierarchy in ebin here: + {modules, [MODULES_FOR_REBAR3]}, + + {licenses, ["US-Common is licensed by its author (Olivier Boudeville) under the GNU Affero General Public License (AGPL), version 3.0 or later"]}, + + % Active application: + % + % (no specific relevant startup argument to specify here) + % + {mod, {us_common_app, []}}, + + {links, [ {"Official website", "http://us.esperide.org" }, + {"Github", "https://github.com/Olivier-Boudeville/us-common"} ]} + + %{exclude_files, []} + + ]}. diff --git a/conf/vm.args b/conf/vm.args new file mode 100644 index 0000000..f6730ca --- /dev/null +++ b/conf/vm.args @@ -0,0 +1,36 @@ +# Custom us-common VM arguments. + +## Name of the node (long name wanted, not short): +-name us_common + +## Cookie for distributed erlang (will be switched at runtime): +-setcookie us_common_initial_cookie + +## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive +## (Disabled by default..use with caution!) +##-heart + +## Enable kernel poll and a few async threads + +# Mostly irrelevant since OTP 21: +##+K true + +# In case of many file I/O: ++A 128 + +## Increase number of concurrent ports/sockets: +-env ERL_MAX_PORTS 8192 + +## Tweak GC to run more often: +##-env ERL_FULLSWEEP_AFTER 10 + +# +B [c | d | i] +# Option c makes Ctrl-C interrupt the current shell instead of invoking the emulator break +# handler. Option d (same as specifying +B without an extra option) disables the break handler. # Option i makes the emulator ignore any break signal. +# If option c is used with oldshell on Unix, Ctrl-C will restart the shell process rather than +# interrupt it. +# Disable the emulator break handler +# it easy to accidentally type ctrl-c when trying +# to reach for ctrl-d. ctrl-c on a live node can +# have very undesirable results +##+Bi diff --git a/doc/GNUmakefile b/doc/GNUmakefile new file mode 100644 index 0000000..f596775 --- /dev/null +++ b/doc/GNUmakefile @@ -0,0 +1,89 @@ +US_COMMON_TOP = .. + + +DOC_BASE_NAME := US-common-manual-english + +DOC_PDF_NAME := $(DOC_BASE_NAME).pdf + +OVERALL_DOCUMENT_TARGET = $(DOC_BASE_NAME).rst + +CURRENT_US_COMMON_HTML = $(DOC_BASE_NAME).html + +HTML_TARGET = $(CURRENT_US_COMMON_HTML) + +DOC_GENERATED_FILES = $(DOC_PDF_NAME) $(HTML_TARGET) + +DOC_FILES = $(DOC_GENERATED_FILES) + + +CURRENT_US_COMMON_DOC := $(DOC_PDF_NAME)-$(US_COMMON_VERSION).pdf + +PROJECT_CSS := pygments-default.css,us-common.css + +PROJECT_ICON := us-common-icon.png + +WEB_CONTENT = *.html *.css *.png *.pdf + +# Read from the user's environment: +GITHUB_PAGES_BASE := $(shell basename "$(US_COMMON_WEB_MIRROR_BRANCH)") + + +OVERALL_DOCUMENT_SOURCE = $(DOC_BASE_NAME).rst + + +.PHONY: all doc full-doc clone-mirror \ + export-doc export-to-official export-to-mirror \ + info-web clean clean-doc test uml-diagram-test + + + +# Default do-nothing target: +all: + @echo " Run 'make full-doc' to generate the manual of the us-common layer." + + +doc: + + +full-doc: $(DOC_BASE_NAME).pdf $(DOC_BASE_NAME).html + + +html: clean-doc $(HTML_TARGET) + + +# Creates a separate repository for the GitHub pages branch: +# (please then remove all initial content of that branch) +# +clone-mirror: + @cd ../../ && git clone https://github.com/Olivier-Boudeville/.git $(GITHUB_PAGES_BASE) && cd $(GITHUB_PAGES_BASE) && git checkout -b gh-pages + + +export-doc: clean-doc full-doc export-to-official export-to-mirror + + +export-to-official: $(DOC_GENERATED_FILES) + @echo " Exporting us-common documentation to official website ($(WEB_SRV))" + @/bin/scp $(SP) $(WEB_CONTENT) $(WEB_SRV):$(WEB_ROOT)/US-Common/ + + +export-to-mirror: $(DOC_GENERATED_FILES) + @echo " Exporting us-common documentation to mirror website in $(US_COMMON_WEB_MIRROR_BRANCH)" + @/bin/cp -f $(WEB_CONTENT) $(US_COMMON_WEB_MIRROR_BRANCH) && cd $(US_COMMON_WEB_MIRROR_BRANCH) && git add . && git merge -s ours && git commit -m "US-Common doc updated." && git push && git pull --ff-only + + +clean: clean-doc + +clean-doc: + -@/bin/rm -f *.aux *.log *.maf *.mtc* *.stc* *.tex *.toc \ + $(CURRENT_US_COMMON_DOC) $(DOC_GENERATED_FILES) + + +info-web: + @echo "WEB_CONTENT = $(WEB_CONTENT)" + @echo "US_COMMON_WEB_MIRROR_BRANCH = $(US_COMMON_WEB_MIRROR_BRANCH)" + @echo "GITHUB_PAGES_BASE = $(GITHUB_PAGES_BASE)" + + +DOCUTILS_TOP = . + +include $(US_COMMON_TOP)/GNUmakesettings.inc diff --git a/ebin/GNUmakefile b/ebin/GNUmakefile new file mode 100644 index 0000000..5dbfb7d --- /dev/null +++ b/ebin/GNUmakefile @@ -0,0 +1,5 @@ +US_COMMON_TOP = .. + +# Mostly so that ebin is known of GIT. + +include $(US_COMMON_TOP)/GNUmakesettings.inc diff --git a/priv/bin/us-common.sh b/priv/bin/us-common.sh new file mode 100644 index 0000000..4027500 --- /dev/null +++ b/priv/bin/us-common.sh @@ -0,0 +1,701 @@ +# Common script facilities relating to the Universal Server. +# +# Allows to avoid code duplication. Meant to be sourced, not directly executed. +# +# Used for example by start-us-web.sh and stop-us-web.sh. + + + +# Notes: +# +# - "-f" detects symlinks as well +# - some whitespace flexibility is allowed in the configuration files + + + +# Sets notably: us_config_dir, us_config_file, epmd_opt, vm_cookie, +# execution_context, us_username, us_groupname, us_app_base_dir, us_log_dir. +# +read_us_config_file() +{ + + us_config_filename="us.config" + + # Use any command-line specified overriding US configuration directory (for + # runtime basics): + # + us_config_dir="$1" + + + if [ -n "${us_config_dir}" ] ; then + + if [ -d "${us_config_dir}" ]; then + + echo "Using command-line specified US configuration directory '${us_config_dir}'." + + else + + echo " Error, command-line specified US configuration directory '${us_config_dir}' not found." 1>&2 + exit 10 + + fi + + us_config_file="${us_config_dir}/${us_config_filename}" + + if [ ! -f "${us_config_file}" ] ; then + + echo " Error, no US configuration file found (no '${us_config_file}')." 1>&2 + + exit 15 + + fi + + else + + echo "No US configuration directory specified on the command-line, searching it through standard paths." + + # Otherwise (not set in the command-line; the general case) try to use + # the default US configuration directory for all US-related services; we + # try to apply in this script exactly the same rules as + # class_USConfigServer, in order to always select the same actual + # configuration file that the US framework itself will use afterwards, + # once launched: + + if [ -z "${XDG_CONFIG_HOME}" ] ; then + + base_path="$HOME/.config" + + else + + base_path="${XDG_CONFIG_HOME}" + + fi + + app_dir="universal-server" + + us_config_dir="${base_path}/${app_dir}" + + us_config_file="${us_config_dir}/${us_config_filename}" + + echo "Looking up '${us_config_file}'..." + + if [ ! -f "${us_config_file}" ] ; then + + if [ -z "${XDG_CONFIG_DIRS}" ] ; then + + base_path="/etc/xdg" + + else + + # Pops the first element of that list: + # + # (note: currently the next - if any - directories in that list are + # not tested; not implemented yet) + # + base_path=$(echo "${XDG_CONFIG_DIRS}" | sed 's|:.*$||1') + + fi + + us_config_dir="${base_path}/${app_dir}" + us_config_file="${us_config_dir}/${us_config_filename}" + + echo "Looking up '${us_config_file}'..." + + if [ -f "${us_config_file}" ] ; then + + echo "No configuration file specified on the command-line, using default one '${us_config_file}'." + + else + + # Any hardcoded, built-in defaults not desirable, as could + # silently hide an error: + + #echo " Warning: no US configuration file '${us_config_file}' specified, and no default one found, using built-in defaults." 1>&2 + + #erl_epmd_port=4506 + #vm_cookie="xxx" + #execution_context="production" + #us_web_username="web-srv" + + # So: + echo " Error, no command-line specified or default US configuration file found." 1>&2 + + exit 20 + + fi + + fi + + fi + + + echo "Using US configuration file '${us_config_file}'." + + # Just extracting what this script needs, i.e. common settings and the name + # of the US-web configuration file. + + + # US configuration content, read once for all, with comments (%) removed: + us_base_content=$(/bin/cat "${us_config_file}" | sed 's|^[[:space:]]*%.*||1') + + erl_epmd_port=$(echo "${us_base_content}" | grep epmd_port | sed 's|^[[:space:]]*{[[:space:]]*epmd_port,[[:space:]]*||1' | sed 's|[[:space:]]*}.$||1') + + if [ -z "${erl_epmd_port}" ] ; then + + echo "No Erlang EPMD port specified, not interfering with context defaults." + epmd_opt="" + + else + + echo "Using specified EPMD port, '${erl_epmd_port}'." + epmd_opt="ERL_EPMD_PORT=${erl_epmd_port}" + + fi + + #echo "epmd_opt = $epmd_opt" + + + vm_cookie=$(echo "${us_base_content}" | grep vm_cookie | sed 's|^{[[:space:]]*vm_cookie,[[:space:]]*||1' | sed 's|[[:space:]]*}.$||1') + + #echo "vm_cookie = $vm_cookie" + + + execution_context=$(echo "${us_base_content}" | grep execution_context | sed 's|^{[[:space:]]*execution_context,[[:space:]]*||1' | sed 's|[[:space:]]*}.$||1') + + # echo "execution_context = $execution_context" + + if [ -z "${execution_context}" ] ; then + + # Default: + execution_context="production" + + fi + + + us_username=$(echo "${us_base_content}" | grep us_username | sed 's|^{[[:space:]]*us_username,[[:space:]]*||1' | sed 's|[[:space:]]*}.$||1') + + #echo "us_username = $us_username" + + + us_groupname=$(echo "${us_base_content}" | grep us_groupname | sed 's|^{[[:space:]]*us_groupname,[[:space:]]"*||1' | sed 's|"[[:space:]]*}.$||1') + + #echo "us_groupname = $us_groupname" + + + + us_app_base_dir=$(echo "${us_base_content}" | grep us_app_base_dir | sed 's|^{[[:space:]]*us_app_base_dir,[[:space:]]*"||1' | sed 's|"[[:space:]]*}.$||1') + + + if [ -z "${us_app_base_dir}" ] ; then + + # Environment variable as last-resort: + if [ -z "${US_APP_BASE_DIR}" ] ; then + + echo " Error, no base directory specified for the US application (no 'us_app_base_dir' entry in the main US configuration file '${us_config_file}' nor US_APP_BASE_DIR environment variable set)." 1>&2 + exit 25 + + else + us_app_base_dir="${US_APP_BASE_DIR}" + echo "No base directory specified for the US application, using the value of the US_APP_BASE_DIR environment variable, trying '${us_app_base_dir}'." + + fi + + else + + echo "Using specified base directory for the US application, '${us_app_base_dir}'." + + fi + + + if [ ! -d "${us_app_base_dir}" ] ; then + + echo " Error, the base directory determined for the US application, '${us_app_base_dir}', is not an existing directory." 1>&2 + exit 30 + + fi + + + # In the context of the US server, us_log_dir is the directory in which + # VM-level and Traces log files are to be written: + + us_log_dir=$(echo "${us_base_content}" | grep us_log_dir | sed 's|^{[[:space:]]*us_log_dir,[[:space:]]*"||1' | sed 's|"[[:space:]]*}.$||1') + + if [ -z "${us_log_dir}" ] ; then + + us_log_dir="${us_app_base_dir}/log" + + if [ ! -d "${us_log_dir}" ] ; then + + saved_log_dir="${us_log_dir}" + + # Maybe in development mode then (i.e. as a rebar3 build tree): + us_log_dir="${us_app_base_dir}/_build/default/rel/universal_server/log" + + if [ ! -d "${us_log_dir}" ] ; then + + echo " Error, no US log directory found: neither '${saved_log_dir}' (as a standard release) nor '${us_log_dir}' (as a rebar3 build tree)." 1>&2 + exit 35 + + else + + echo "Rebar3 build tree detected, US log directory found as '${us_log_dir}'." + fi + + else + + echo "Standard OTP release detected, US log directory found as '${us_log_dir}'." + + fi + + else + + # Checks whether absolute: + if [[ "${us_log_dir:0:1}" == / || "${us_log_dir:0:2}" == ~[/a-z] ]] ; then + + echo "Using directly specified directory for logs, '${us_log_dir}'." + + else + + # If it is relative, it is relative to the US application base + # directory: + # + us_log_dir="${us_app_base_dir}/${us_log_dir}" + echo "Using specified directory for US logs (made absolute), '${us_log_dir}'." + + fi + + fi + + if [ ! -d "${us_log_dir}" ] ; then + + echo " Error, no US log directory found ('${us_log_dir}')." 1>&2 + exit 60 + + fi + + echo "US logs will be written in the '${us_log_dir}' directory." + + # To delimit between US and US-web: + echo + +} + + + + +# Sets notably: us_web_config_file, us_web_username, us_web_app_base_dir, +# us_web_log_dir, us_web_rel_dir, us_web_exec. +# +# read_us_config_file must have been run beforehand. +# +read_us_web_config_file() +{ + + us_web_config_filename=$(echo "${us_base_content}" | grep us_web_config_filename | sed 's|^[[:space:]]*{[[:space:]]*us_web_config_filename,[[:space:]]*"||1' | sed 's|"[[:space:]]*}.$||1') + + us_web_default_config_filename="us-web.config" + + if [ -z "${us_web_config_filename}" ] ; then + + us_web_config_filename="${us_web_default_config_filename}" + + echo "No US-web configuration filename specified, using default one, '${us_web_config_filename}'." + + else + + echo "Using specified US-web configuration filename, '${us_web_config_filename}'." + + fi + + us_web_config_file="${base_path}/${app_dir}/${us_web_config_filename}" + + echo "Looking up '${us_web_config_file}'..." + + if [ ! -f "${us_web_config_file}" ] ; then + + echo " Error, US-web configuration filename '${us_web_config_filename}' not found." 1>&2 + exit 40 + + fi + + echo "Using US-web configuration file '${us_web_config_file}'." + + + # US-web configuration data content, read once for all, with comments (%) + # removed: + # + us_web_base_content=$(/bin/cat "${us_web_config_file}" | sed 's|^[[:space:]]*%.*||1') + + us_web_username=$(echo "${us_web_base_content}" | grep us_web_username | sed 's|^{[[:space:]]*us_web_username,[[:space:]]*"||1' | sed 's|"[[:space:]]*}.$||1') + + if [ -z "${us_web_username}" ] ; then + + us_web_username="${USER}" + + if [ -z "${us_web_username}" ] ; then + + echo " Error, no USER environment variable set, whereas not username specified in configuration file." 1>&2 + exit 45 + + fi + + echo "No web username specified, using current one, '${us_web_username}'." + + else + + echo "Using specified web username, '${us_web_username}'." + + fi + + #echo "us_web_username = $us_web_username" + + + us_web_app_base_dir=$(echo "${us_web_base_content}" | grep us_web_app_base_dir | sed 's|^{[[:space:]]*us_web_app_base_dir,[[:space:]]*"||1' | sed 's|"[[:space:]]*}.$||1') + + if [ -z "${us_web_app_base_dir}" ] ; then + + # Environment variable as last-resort: + if [ -z "${US_WEB_APP_BASE_DIR}" ] ; then + + # Wild guess: + us_web_app_base_dir=$(/bin/ls -d ${us_app_base_dir}/../../*/us_web 2>/dev/null | xargs realpath) + + echo "No base directory specified for the US-web application nor US_WEB_APP_BASE_DIR environment variable set, deriving it from the US application one: trying '${us_web_app_base_dir}'." + + else + + us_web_app_base_dir="${US_WEB_APP_BASE_DIR}" + echo "No base directory specified for the US-web application, using the value of the US_WEB_APP_BASE_DIR environment variable, trying '${us_web_app_base_dir}'." + + fi + + + else + + echo "Using the specified base directory for the US-web application, '${us_web_app_base_dir}'." + + fi + + if [ ! -d "${us_web_app_base_dir}" ] ; then + + echo " Error, the base directory determined for the US-web application, '${us_web_app_base_dir}', is not an existing directory." 1>&2 + exit 45 + + fi + + + # VM-level logs (not based on us_web_log_dir - which is dedicated to the + # web-related ones): + # + # (note that us_web_vm_log_dir is for US-web what us_log_dir is for US) + # + # (typically here in production mode, as a standard release) + # + us_web_vm_log_dir="${us_web_app_base_dir}/log" + + if [ ! -d "${us_web_vm_log_dir}" ] ; then + + saved_log_dir="${us_web_vm_log_dir}" + + # Maybe in development mode then (i.e. as a rebar3 build tree): + us_web_vm_log_dir="${us_web_app_base_dir}/_build/default/rel/us_web/log" + + if [ ! -d "${us_web_vm_log_dir}" ] ; then + + # Not an error per se, may happen for example when running a new + # release: + + #echo " Error, no US-web VM log directory found: neither '${saved_log_dir}' (as a standard release) nor '${us_web_vm_log_dir}' (as a rebar3 build tree). Possibly a release not even started?" 1>&2 + #exit 50 + + echo "No US-web VM log directory found, neither '${saved_log_dir}' (as a standard release) nor '${us_web_vm_log_dir}' (as a rebar3 build tree)." + + else + + echo "Rebar3 build tree detected, US-web VM log directory found as '${us_web_vm_log_dir}'." + fi + + else + + echo "Standard OTP release detected, US-web VM log directory found as '${us_web_vm_log_dir}'." + + fi + + us_web_vm_log_file="${us_web_vm_log_dir}/erlang.log.1" + + + # Supposing first the path of a real release having been specified; for + # example: "/opt/universal-server/us-web-x.y.z": + # + us_web_rel_dir="${us_web_app_base_dir}" + + us_web_exec="${us_web_app_base_dir}/bin/us_web" + + if [ ! -x "${us_web_exec}" ] ; then + + saved_exec="${us_web_exec}" + + # Maybe then in a rebar3 build tree: + us_web_rel_dir="${us_web_app_base_dir}/_build/default/rel/us_web" + + us_web_exec="${us_web_rel_dir}/bin/us_web" + + if [ ! -x "${us_web_exec}" ] ; then + + echo " Error, the specified US-web application base directory ('${us_web_app_base_dir}') does not seem to be a legit one: no '${saved_exec}' (not a standard release) nor '${us_web_exec}' (not a rebar3 build tree)." 1>&2 + exit 50 + + else + + echo "Rebar3 build tree detected, US-web application found as '${us_web_exec}'." + fi + + else + + echo "Standard OTP release detected, US-web application found as '${us_web_exec}'." + + fi + + + us_web_log_dir=$(echo "${us_web_base_content}" | grep us_web_log_dir | sed 's|^{[[:space:]]*us_web_log_dir,[[:space:]]*"||1' | sed 's|"[[:space:]]*}.$||1') + + if [ -z "${us_web_log_dir}" ] ; then + + us_web_log_dir="/var/log" + echo "No base directory specified for web logs (no 'us_web_log_dir' entry in the US-web configuration file '${us_web_config_file}'), trying default log directory '${us_web_log_dir}'." + + else + + # Checks whether absolute: + if [[ "${us_web_log_dir:0:1}" == / || "${us_web_log_dir:0:2}" == ~[/a-z] ]] ; then + + echo "Using directly specified directory for web logs, '${us_web_log_dir}'." + + else + + # If it is relative, it is relative to the US-web application base + # directory: + # + us_web_log_dir="${us_web_app_base_dir}/${us_web_log_dir}" + echo "Using specified directory for web logs (made absolute), '${us_web_log_dir}'." + + fi + + fi + + if [ ! -d "${us_web_log_dir}" ] ; then + + echo " Error, no US-web log directory (for web-level logs) found ('${us_web_log_dir}')." 1>&2 + exit 60 + + fi + + echo "US-web (web-level) logs to be written in the '${us_web_log_dir}' directory." + +} + + + +secure_authbind() +{ + + authbind="/bin/authbind" + + if [ ! -x "${authbind}" ] ; then + + echo " Error, authbind ('${authbind}') not found." 1>&2 + exit 60 + + fi + +} + + + +# Updates the relevant US-web vm.args release file with any user-defined Erlang +# cookie the VM switched to. +# +# The relevant US configuration file must have been run beforehand (see +# read_us_config_file). +# +# See also: the reciprocal restore_us_web_config_cookie function. +# +update_us_web_config_cookie() +{ + + if [ -n "${vm_cookie}" ] ; then + + # Let's auto-generate on the fly a vm.args with the right runtime cookie + # (as it was changed at startup): + + base_rel_cfg_dir="${us_web_app_base_dir}/releases/latest-release" + + if [ ! -d "${base_rel_cfg_dir}" ]; then + + echo " Error, the base configuration directory for the release, '${base_rel_cfg_dir}' (obtained from '${us_web_app_base_dir}'), could not be found." 1>&2 + + exit 20 + + fi + + vm_args_file="${base_rel_cfg_dir}/vm.args" + + if [ ! -f "${vm_args_file}" ]; then + + echo " Error, the release vm.args file could not be found (searched for '${vm_args_file}')." 1>&2 + + exit 25 + + fi + + # The original VM args (typically including a safe, dummy cookie): + original_vm_args_file="${base_rel_cfg_dir}/.vm.args.original" + + # Do not overwrite original information (ex: if update was run twice + # with no restore in-between): + # + if [ ! -f "${original_vm_args_file}" ] ; then + + /bin/mv -f "${vm_args_file}" "${original_vm_args_file}" + + else + + /bin/rm -f "${vm_args_file}" + + fi + + # So in all cases, here original_vm_args_file exists, and vm_args_file + # not. + + # Avoid reading and writing in the same file: + /bin/cat "${original_vm_args_file}" | /bin/sed "s|-setcookie.*|-setcookie ${vm_cookie}|1" > "${vm_args_file}" + + #/bin/cp -f "${vm_args_file}" "${base_rel_cfg_dir}/vm.args-for-inspection.txt" + + #echo "Content of newer vm.args:" 1>&2 + #/bin/cat "${vm_args_file}" 1>&2 + + # Leaving as is original_vm_args_file, for any future use. + + echo "US-web vm.args updated with cookie ${vm_cookie}." + + # So both files exist. + + else + + echo "(no cookie defined, no vm.args updated)" + + fi + +} + + + +# Restores the original VM args file, after it has been updated, to avoid +# leaking the actual runtime cookie in any future command-line. +# +# (reciprocal fucntion of update_us_web_config_cookie) +# +restore_us_web_config_cookie() +{ + + # Depends on whether a specific cookie had been defined: + + if [ -n "${vm_cookie}" ] ; then + + if [ -z "${original_vm_args_file}" ] ; then + + # No prior update_us_web_config_cookie call? + echo " Error, filename of original VM args not set (abnormal)." 1>&2 + + exit 60 + + else + + if [ -f "${original_vm_args_file}" ] ; then + + /bin/mv -f "${original_vm_args_file}" "${vm_args_file}" + + fi + + fi + + fi + +} + + + +# Prepares a proper launch of US-web. +# +# read_us_web_config_file must have been run beforehand. +# +prepare_us_web_launch() +{ + + # Not wanting to look at older, potentially misleading logs: + + echo "Removing any logs in '${us_web_vm_log_dir}'." + + # Ensuring that directory exists: + mkdir -p ${us_web_vm_log_dir} + + /bin/rm -f ${us_web_vm_log_dir}/erlang.log.* ${us_web_vm_log_dir}/run_erl.log 2>/dev/null + + # Needed as a sign that any future start succeeded: + trace_file="${us_log_dir}/us_web.traces" + echo "Removing any '${trace_file}' trace file." + /bin/rm -f "${trace_file}" 2>/dev/null + + echo "Fixing permissions." + + # So that the VM can write its logs despite authbind: + chown ${us_web_username}:${us_groupname} ${us_web_vm_log_dir} + +} + + +# Inspects the VM logs of US-web (beware of ancient entries being displayed). +# +# read_us_web_config_file must have been run beforehand. +# +inspect_us_web_log() +{ + + # (run_erl.log not that useful) + + # A common problem is: "Protocol 'inet_tcp': the name xxx@yyy seems to be in + # use by another Erlang node". This may not even be true (no VM running), + # just a lingering EPMD believing this node still exists. + + + # Waits a bit if necessary while any writing takes place: + if [ ! -f "${us_web_vm_log_file}" ] ; then + sleep 1 + fi + + echo + if [ -f "${us_web_vm_log_file}" ] ; then + + echo "EPMD names output:" + epmd -port ${erl_epmd_port} -names + + echo + echo "Displaying the end of '${us_web_vm_log_file}':" + + # Still a bit of waiting, otherwise any error may not have been reported + # yet: + # + sleep 1 + + # A sufficient height is *necessary*: + tail --lines=80 "${us_web_vm_log_file}" + + else + + echo " Error, no US-web VM log file found (no '${us_web_vm_log_file}')." 1>&2 + exit 50 + + fi + +} diff --git a/rebar.config b/rebar.config new file mode 100644 index 0000000..bde71f3 --- /dev/null +++ b/rebar.config @@ -0,0 +1,106 @@ +% This is a configuration file of rebar3, so that the us_common application can +% better integrate in the current OTP ecosystem, despite its (more complex, +% probably more flexible) native build based on GNU make. + +% If the name of this file is 'rebar.config', then it is a generated file, +% otherwise it is a template (located in conf/rebar.config.template), meant to +% be filled by information determined at build time or coming from +% GNUmakevars.inc. +% +% See the Myriad counterpart file (same name) for more explanations. + + +% Settings for the 'default' profile follow. + + +% Depends on the following applications (Erlang implied): +{deps, [ + {myriad, {git, "git://github.com/Olivier-Boudeville/Ceylan-Myriad", + {branch, "master"}}}, + {wooper, {git, "git://github.com/Olivier-Boudeville/Ceylan-WOOPER", + {branch, "master"}}}, + {traces, {git, "git://github.com/Olivier-Boudeville/Ceylan-Traces", + {branch, "master"}}} +]}. + + + +% Include directories found in INC: +{erl_opts, [ {d,myriad_debug_mode}, {d,wooper_debug_mode}, {d,traces_debug_mode}, {d,us_common_debug_mode}, {d,tracing_activated}, debug_info, {debug_info_key,"Ceylan-Myriad"}, report_warnings, warn_export_all, warn_export_vars, warn_shadow_vars, warn_obsolete_guards, warn_unused_import, warnings_as_errors, {parse_transform,wooper_parse_transform}, {i,"src"}, {i,".."}, {i,"../traces/../wooper/include/"}, {i,"../traces/src"}, {i,"../traces/../wooper/include/"}, {i,"../traces/../wooper/src"}, {i,"../traces/../wooper/../myriad/include/"}, {i,"include"}, {i,"../traces/include"}, {i,"../traces/../wooper/include"}, {i,"../traces/../wooper/examples"}, {i,"../traces/../wooper/../myriad/include/data-management"}, {i,"../traces/../wooper/../myriad/include/maths"}, {i,"../traces/../wooper/../myriad/include/meta"}, {i,"../traces/../wooper/../myriad/include/user-interface/src"}, {i,"../traces/../wooper/../myriad/include/user-interface/src/textual"}, {i,"../traces/../wooper/../myriad/include/user-interface/src/graphical"}, {i,"../traces/../wooper/../myriad/include/utils"}, {i,"../traces/../wooper/../myriad/include/scripts"}, {i,"../traces/../wooper/../myriad/include/apps/merge-tool"}, {i,"../traces/../wooper/../myriad/.."} ]}. + + + +% For release generation: +% +% (defaults are for the development mode) +% +% With relx, only direct dependencies need to be listed, and version constraints +% can be used, instead of exact, specific versions. +% +{relx, [ + + % Not 'us_common_release', otherwise plenty of paths will be cluttered: + {release, {us_common, "0.0.1"}, + + % Listing an application here seems necessary (otherwise its .app file will + % not be found), however it will also result in its automatic starting with + % no specfied arguments, whereas at least for some (ex: simple_bridge) we + % need to specify them (ex: which backend to be used). + % + % Listing 'traces' here implies its prerequisites (namely Myriad and WOOPER) + % and is needed, otherwise their respective modules will not be in the + % code path: + % + [ traces, us_common ] }, + + {sys_config, "conf/sys.config"}, + + % We need specified VM arguments (notably: long node names wanted): + {vm_args, "conf/vm.args"}, + + {dev_mode, true}, + {include_erts, false}, + + {extended_start_script, true}, + {include_src, false} + +] }. + + +{profiles, [ + + % Production mode (the default one when used as a dependency): + {prod, [ + + % Like for the default profile, except for removed {d,myriad_debug_mode}, + % {d,wooper_debug_mode}, {d,traces_debug_mode}, {d,us_common_debug_mode} and + % {d,tracing_activated}, and added {d,exec_target_is_production}: + % + {erl_opts, [ {d,exec_target_is_production}, debug_info, {debug_info_key,"Ceylan-Myriad"}, report_warnings, warn_export_all, warn_export_vars, warn_shadow_vars, warn_obsolete_guards, warn_unused_import, warnings_as_errors, {parse_transform,wooper_parse_transform}, {i,"src"}, {i,".."}, {i,"../traces/../wooper/include/"}, {i,"../traces/src"}, {i,"../traces/../wooper/include/"}, {i,"../traces/../wooper/src"}, {i,"../traces/../wooper/../myriad/include/"}, {i,"include"}, {i,"../traces/include"}, {i,"../traces/../wooper/include"}, {i,"../traces/../wooper/examples"}, {i,"../traces/../wooper/../myriad/include/data-management"}, {i,"../traces/../wooper/../myriad/include/maths"}, {i,"../traces/../wooper/../myriad/include/meta"}, {i,"../traces/../wooper/../myriad/include/user-interface/src"}, {i,"../traces/../wooper/../myriad/include/user-interface/src/textual"}, {i,"../traces/../wooper/../myriad/include/user-interface/src/graphical"}, {i,"../traces/../wooper/../myriad/include/utils"}, {i,"../traces/../wooper/../myriad/include/scripts"}, {i,"../traces/../wooper/../myriad/include/apps/merge-tool"}, {i,"../traces/../wooper/../myriad/.."} ]}, + + % For the build makefiles: + {env, [ { 'REBAR_PROFILE', "prod" } ] }, + + {relx, [ + + % Not wanting the release to contain symlinks to applications: + {dev_mode, false}, + + % Creating a full target system: + %{include_erts, true}, + + % Or sharing a local Erlang environment (then the OTP versions must + % precisely match): + % + {include_erts, false}, + + {extended_start_script, true}, + {include_src, false} + + ]} ] } + + % No test profile currently needed. + +]}. + +% No Hex package wanted with this version. \ No newline at end of file diff --git a/rebar.lock b/rebar.lock new file mode 100644 index 0000000..57afcca --- /dev/null +++ b/rebar.lock @@ -0,0 +1 @@ +[]. diff --git a/src/GNUmakefile b/src/GNUmakefile new file mode 100644 index 0000000..ddcb0f3 --- /dev/null +++ b/src/GNUmakefile @@ -0,0 +1,9 @@ +US_COMMON_TOP = .. + +#MODULES_DIRS = apps + +# Must be the first target, otherwise BEAM files will not be built: +all: + + +include $(US_COMMON_TOP)/GNUmakesettings.inc diff --git a/src/class_USConfigServer.erl b/src/class_USConfigServer.erl new file mode 100644 index 0000000..f91fd08 --- /dev/null +++ b/src/class_USConfigServer.erl @@ -0,0 +1,1102 @@ +% Copyright (C) 2019-2020 Olivier Boudeville +% +% This file is part of US-Common, part of the Universal Server framework. +% +% This program is free software: you can redistribute it and/or modify it under +% the terms of the GNU Affero General Public License as published by the Free +% Software Foundation, either version 3 of the License, or (at your option) any +% later version. +% +% This program is distributed in the hope that it will be useful, but WITHOUT +% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +% FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +% details. +% +% You should have received a copy of the GNU Affero General Public License along +% with this program. If not, see . +% +% Author: Olivier Boudeville (olivier.boudeville@esperide.com) +% Creation date: Tuesday, December 24, 2019. + + +-module(class_USConfigServer). + + +-define( class_description, + "Singleton server holding the configuration information of the " + "Universal Server." ). + + +% Determines what are the direct mother classes of this class (if any): +-define( superclasses, [ class_USServer ] ). + + +% Shorthands: +-type bin_directory_path() :: file_utils:bin_directory_path(). +-type bin_file_path() :: file_utils:bin_file_path(). +-type server_pid() :: class_UniversalServer:server_pid(). + + +% A table holding US configuration information: +-type us_config_table() :: table( atom(), term() ). + + + +% Design notes: +% +% This overall, singleton server registers itself globally, so that other +% services can interact with it even if running in separate virtual machines +% (ex: US-web). +% +% The base directories for configuration information are, by decreasing order of +% priority: +% - $XDG_CONFIG_HOME (default: "$HOME/.config") +% - $XDG_CONFIG_DIRS (default: "/etc/xdg", directories being separated by ':') +% +% See +% https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html +% and +% https://stackoverflow.com/questions/1024114/location-of-ini-config-files-in-linux-unix +% for more information. +% +% +% All base directories shall be absolute directories. +% +% The configuration directory is defined as the ?app_subdir sub-directory of the +% base directories that contains the ?config_filename files, and all other +% related configuration files. +% +% Ex: "~/.config/universal-server". + +% See also: the start/stop scripts and us-common.sh, which apply mostly the same +% look-up logic. + + +-define( class_attributes, [ + + { config_base_directory, bin_directory_path(), + "the base directory where all US configuration is to be found" }, + + % No vm_cookie :: net_utils:cookie() stored, as can be read directly from + % the VM. + + { epmd_port, net_utils:tcp_port(), + "the EPMD TCP port presumably in use (as read from the configuration)" }, + + { tcp_port_range, maybe( net_utils:tcp_port_range() ), + "the range (if any) of TCP ports to use for out-of-band inter-VM " + "communication (not using the Erlang carrier; ex: for send_file)" }, + + { execution_context, basic_utils:execution_context(), + "tells whether this server is to run in development or production mode" }, + + { log_directory, bin_directory_path(), + "the directory where all US-specific, low-level logging (as opposed to " + "Erlang-level log files such as erlang.log.* or to higher-level traces) " + "will be done" }, + + { web_config_filename, maybe( bin_file_path() ), + "the path to the configuration file (if any) regarding US-web (i.e. " + "webserver, virtual hosting, etc.)" }, + + { us_server_pid, maybe( server_pid() ), + "the PID of the associated US server (if any)" }, + + { us_username, system_utils:user_name(), + "the user who runs the Universal server application (note that there " + "may be discrepancies between the one of US and the one of other " + "servers such as US-web)" }, + + { us_groupname, system_utils:group_name(), + "the group that shall be common to all US-related users" }, + + + { us_server_registration_name, naming_utils:registration_name(), + "the name under which the US server may be registered" }, + + { us_web_config_server_pid, maybe( class_UniversalServer:server_pid() ), + "the PID of the US web configuration server (if any)" }, + + % Cannot easily be obtained otherwise: + { registration_name, naming_utils:registration_name(), + "the name under which this configuration server is registered" }, + + { app_base_directory, maybe( bin_directory_path() ), + "the directory (if any) where US-related applications (at least " + "the universal server itself) are located" } ] ). + + +% Used by the trace_categorize/1 macro to use the right emitter: +-define( trace_emitter_categorization, "US.Configuration.USConfigServer" ). + + + +% For various defines: +-include_lib("us-common/include/class_USConfigServer.hrl"). + + +% The defaut registration name of the overall US configuration server: +-define( default_us_config_reg_name, us_config_server ). + +% The defaut registration name of the overall US server: +-define( default_us_reg_name, us_server ). + + +% The defaut registration scope of the overall US configuration server: +-define( default_registration_scope, global_only ). + + +% The name of the main Universal Server configuration file: +-define( us_config_filename, "us.config" ). + + +% Keys possibly read from the US configuration filename: +-define( vm_cookie_key, vm_cookie ). +-define( epmd_port_key, epmd_port ). +-define( tcp_port_range_key, tcp_port_range ). + +-define( execution_context_key, execution_context ). + +-define( us_username_key, us_username ). +-define( us_groupname_key, us_groupname ). + +-define( us_server_registration_name_key, + us_server_registration_name ). + +-define( us_config_server_registration_name_key, + us_config_server_registration_name ). + + +-define( us_app_base_dir_key, us_app_base_dir ). + +-define( us_log_dir_key, us_log_dir ). + +-define( us_web_config_filename_key, us_web_config_filename ). + + +% All known, licit keys for the US configuration file: +-define( known_config_keys, [ ?vm_cookie_key, ?epmd_port_key, + ?tcp_port_range_key, ?execution_context_key, + ?us_username_key, ?us_groupname_key, + ?us_server_registration_name_key, ?us_config_server_registration_name_key, + ?us_app_base_dir_key, ?us_log_dir_key, ?us_web_config_filename_key ] ). + + +% The last-resort environment variable: +-define( us_app_env_variable, "US_APP_BASE_DIR" ). + +-define( default_log_base_dir, "/var/log" ). + + +% Allows to define WOOPER base variables and methods for that class: +-include_lib("wooper/include/wooper.hrl"). + +% Allows to use macros for trace sending: +-include_lib("traces/include/class_TraceEmitter.hrl"). + + + + +% Constructs the US configuration server, using the default logic to find its +% configuration file. +% +-spec construct( wooper:state() ) -> wooper:state(). +construct( State ) -> + + % First the direct mother classes, then this class-specific actions: + TraceState = class_USServer:construct( State, + ?trace_categorize("USConfigServer") ), + + ?send_info( TraceState, "Creating a US configuration server." ), + + BinCfgDir = case get_us_config_directory() of + + { undefined, CfgMsg } -> + ?send_error_fmt( TraceState, "Unable to determine the US " + "configuration directory: ~s.", [ CfgMsg ] ), + throw( us_configuration_directory_not_found ); + + { BinFoundCfgDir, CfgMsg } -> + ?send_info( TraceState, CfgMsg ), + BinFoundCfgDir + + end, + + CfgState = load_configuration( BinCfgDir, TraceState ), + + % Enforce security in all cases ("chmod 700"); if it fails here, the + % combined path/user configuration must be incorrect; however this server + % may be run from another US application (typically US-web), possibly + % running as a user of their own, different from the main US user (yet + % supposedly in the same US group). + % + % So: + + CurrentUserId = system_utils:get_user_id(), + + LogDir = getAttribute( CfgState, log_directory ), + + case file_utils:get_owner_of( LogDir ) of + + CurrentUserId -> + file_utils:change_permissions( LogDir, + [ owner_read, owner_write, owner_execute, + group_read, group_write, group_execute ] ); + + % Not owned, do nothing: + _OtherId -> + ok + + end, + + ReadyState = setAttributes( CfgState, [ + { config_base_directory, BinCfgDir }, + { us_server_pid, undefined }, + { us_web_config_server_pid, undefined } ] ), + + ?send_info_fmt( TraceState, "Now ready: ~s.", [ to_string( ReadyState ) ] ), + + ReadyState. + + + + +% Constructs the US configuration server, using specified configuration +% directory. +% +% Useful for example to create auxiliary universal servers. +% +-spec construct( wooper:state(), file_utils:directory_path() ) -> wooper:state(). +construct( State, ConfigDir ) when is_list( ConfigDir ) -> + + % First the direct mother classes, then this class-specific actions: + TraceState = class_USServer:construct( State, + ?trace_categorize("USConfigServer") ), + + ?send_info_fmt( TraceState, "Creating the overall US configuration server, " + "using the '~s' configuration directory for that.", + [ ConfigDir ] ), + + BinCfgDir = text_utils:string_to_binary( ConfigDir ), + + CfgState = load_configuration( BinCfgDir, TraceState ), + + ReadyState = setAttributes( CfgState, [ + { config_base_directory, BinCfgDir }, + { us_web_config_server_pid, undefined } ] ), + + ?send_info( TraceState, to_string( ReadyState ) ), + + ReadyState. + + + +% Overridden destructor. +-spec destruct( wooper:state() ) -> wooper:state(). +destruct( State ) -> + + % Automatic unregistering. + + ?info( "Deleted." ), + + State. + + + +% Method section. + + +% Notifies this server about the specified US web configuration server, and +% requests web-information from it. +% +-spec getWebRuntimeSettings( wooper:state() ) -> + request_return( { bin_directory_path(), + basic_utils:execution_context(), + maybe( bin_file_path() ), maybe( server_pid() ) } ). +getWebRuntimeSettings( State ) -> + + USWebConfigServerPid = ?getSender(), + + RegState = case ?getAttr(us_web_config_server_pid) of + + undefined -> + ?info_fmt( "Registering web configuration server ~w.", + [ USWebConfigServerPid ] ), + setAttribute( State, us_web_config_server_pid, + USWebConfigServerPid ); + + USWebConfigServerPid -> + State; + + OtherPid -> + ?error_fmt( "Notified of web configuration server ~w; ignored, as " + "already knowing ~w.", + [ USWebConfigServerPid, OtherPid ] ), + State + + end, + + wooper:return_state_result( RegState, { ?getAttr(config_base_directory), + ?getAttr(execution_context), ?getAttr(web_config_filename), + ?getAttr(us_server_pid) } ). + + + +% Static section. + + +% Returns the main default settings regarding the US configuration server, for +% its clients. +% +-spec get_default_settings() -> static_return( + { file_utils:file_name(), basic_utils:atom_key(), + naming_utils:registration_name(), naming_utils:look_up_scope() } ). +get_default_settings() -> + + wooper:return_static( { ?us_config_filename, + ?us_config_server_registration_name_key, ?default_us_config_reg_name, + naming_utils:registration_to_look_up_scope( + ?default_registration_scope ) } ). + + + +% Returns any found configuration directory and a corresponding trace message. +% +% This is a static method (no state involved), so that both this kind of servers +% and others (ex: web configuration ones) can use the same, factored, logic. +% +-spec get_us_config_directory() -> static_return( + { maybe( bin_directory_path() ), text_utils:ustring() } ). +get_us_config_directory() -> + + HomeDir = system_utils:get_user_home_directory(), + + % See design notes about directory selection. + + FirstEnvVar = "XDG_CONFIG_HOME", + + % We prefer devising a single trace message rather than too many: + { FirstPath, FirstMsg } = + case system_utils:get_environment_variable( FirstEnvVar ) of + + false -> + CfgHomeDefaultPath = file_utils:join( HomeDir, ".config" ), + CfgHomeMsg = text_utils:format( + "no '~s' environment variable defined, defaulting to '~s'", + [ FirstEnvVar, CfgHomeDefaultPath ] ), + { CfgHomeDefaultPath, CfgHomeMsg }; + + Path -> + { Path, text_utils:format( "path '~s' was obtained from " + "environment variable '~s'", [ Path, FirstEnvVar ] ) } + + end, + + SecondEnvVar = "XDG_CONFIG_DIRS", + + { ListedPathsAsStrings, SecondMsg } = + case system_utils:get_environment_variable( SecondEnvVar ) of + + false -> + % A single one here: + DefaultCfgDirs = "/etc/xdg", + CfgDirsMsg = text_utils:format( + "no '~s' environment variable defined, defaulting to '~s'", + [ SecondEnvVar, DefaultCfgDirs ] ), + { DefaultCfgDirs, CfgDirsMsg }; + + Paths -> + { Paths, text_utils:format( "paths '~s' was obtained from " + "environment variable '~s'", [ Paths, SecondEnvVar ] ) } + + end, + + ListedPaths = text_utils:split( ListedPathsAsStrings, _Seps=[ $: ] ), + + AllBasePaths = [ FirstPath | ListedPaths ], + + CfgSuffix = file_utils:join( ?app_subdir, ?us_config_filename ), + + BaseMsg = text_utils:format( "Looking-up for Universal Server " + "configuration directory, based on suffix '~s', knowing that: ~s~n" + "Configuration directory ", [ CfgSuffix, + text_utils:strings_to_string([ FirstMsg, SecondMsg ] ) ] ), + + ResPair = find_file_in( AllBasePaths, CfgSuffix, BaseMsg, _Msgs=[] ), + + wooper:return_static( ResPair ). + + + + +% Helper section. + + + +% (helper) +find_file_in( _AllBasePaths=[], CfgSuffix, BaseMsg, Msgs ) -> + + % Configuration directory not found: + + FullMsg = BaseMsg ++ text_utils:format( "could not be determined, " + "short of locating a relevant configuration file ('~s') for that: ", + [ CfgSuffix ] ) + ++ text_utils:strings_to_enumerated_string( lists:reverse( Msgs ) ), + + { undefined, FullMsg }; + + +find_file_in( _AllBasePaths=[ Path | T ], CfgSuffix, BaseMsg, Msgs ) -> + + CfgFilePath = file_utils:normalise_path( + file_utils:join( Path, CfgSuffix ) ), + + case file_utils:is_existing_file_or_link( CfgFilePath ) of + + true -> + CfgDir = filename:dirname( CfgFilePath ), + + FullMsg = text_utils:format( "Configuration directory found " + "as '~s', since containing '~s'", [ CfgDir, CfgFilePath ] ) + ++ case Msgs of + + [] -> + ""; + + _ -> + ", after having searched through: " + ++ text_utils:strings_to_enumerated_string( + lists:reverse( Msgs ) ) + + end, + + { text_utils:string_to_binary( CfgDir ), FullMsg }; + + false -> + NewMsgs = [ text_utils:format( "not found as '~s'", + [ CfgFilePath ] ) | Msgs ], + find_file_in( T, CfgSuffix, BaseMsg, NewMsgs ) + + end. + + + +% Returns the Universal Server configuration table (i.e. the one of US, not +% specifically of US web), and directly applies some of the read settings. +% +-spec load_configuration( bin_directory_path(), wooper:state() ) -> + wooper:state(). +load_configuration( BinCfgDir, State ) -> + + CfgFilename = file_utils:join( BinCfgDir, ?us_config_filename ), + + % Should, by design, never fail: + case file_utils:is_existing_file_or_link( CfgFilename ) of + + true -> + ok; + + false -> + ?error_fmt( "The overall US configuration file ('~s') " + "could not be found.", [ CfgFilename ] ), + % Must have disappeared then: + throw( { us_config_file_not_found, CfgFilename } ) + + end, + + ?info_fmt( "Reading the Universal Server configuration from '~s'.", + [ CfgFilename ] ), + + % Ensures as well that all top-level terms are pairs indeed: + ConfigTable = table:new_from_unique_entries( + file_utils:read_terms( CfgFilename ) ), + + ?info_fmt( "Read US configuration ~s", [ table:to_string( ConfigTable ) ] ), + + % We follow the usual order in the configuration file: + + % Const: + manage_vm_cookie( ConfigTable, State ), + + EpmdState = manage_epmd_port( ConfigTable, State ), + + TCPState = manage_tcp_port_range( ConfigTable, EpmdState ), + + ExecState = manage_execution_context( ConfigTable, TCPState ), + + UserState = manage_os_user_group( ConfigTable, ExecState ), + + RegState = manage_registration_names( ConfigTable, UserState ), + + DirState = manage_app_base_directory( ConfigTable, RegState ), + + LogState = manage_log_directory( ConfigTable, DirState ), + + WebState = manage_web_config( ConfigTable, LogState ), + + % Detect any extraneous, unexpected entry: + LicitKeys = ?known_config_keys, + + case list_utils:difference( table:keys( ConfigTable ), LicitKeys ) of + + [] -> + WebState; + + UnexpectedKeys -> + ?error_fmt( "Unknown key(s) in '~s': ~s~nLicit keys: ~s", + [ CfgFilename, + text_utils:terms_to_string( UnexpectedKeys ), + text_utils:terms_to_string( LicitKeys ) ] ), + throw( { invalid_configuration_keys, UnexpectedKeys, CfgFilename } ) + + end. + + + + +% Manages any user-configured VM cookie. +-spec manage_vm_cookie( us_config_table(), wooper:state() ) -> void(). +manage_vm_cookie( ConfigTable, State ) -> + + case table:lookup_entry( ?vm_cookie_key, ConfigTable ) of + + key_not_found -> + CurrentCookie = net_utils:get_cookie(), + ?info_fmt( "No user-configured cookie, sticking to original one, " + "'~s'.", [ CurrentCookie ] ); + + { value, UserCookie } when is_atom( UserCookie ) -> + InitialCookie = net_utils:get_cookie(), + + ?info_fmt( "Switching the Erlang cookie of the current VM from " + "the current one, '~s', to the specified one, '~s'.", + [ InitialCookie, UserCookie ] ), + + net_utils:set_cookie( UserCookie ); + + { value, InvalidCookie } -> + ?error_fmt( "Read invalid user-configured Erlang cookie: '~p'.", + [ InvalidCookie ] ), + throw( { invalid_vm_cookie, InvalidCookie, ?vm_cookie_key } ) + + end. + + + +% Manages any user-configured EPMD port. +-spec manage_epmd_port( us_config_table(), wooper:state() ) -> wooper:state(). +manage_epmd_port( ConfigTable, State ) -> + + % No simple, integrated way of checking the actual port currently in use: + Port = case table:lookup_entry( ?epmd_port_key, ConfigTable ) of + + key_not_found -> + DefaultEpmdPort = net_utils:get_default_epmd_port(), + ?info_fmt( "No user-configured EPMD TCP port, supposing already " + "using the Erlang-level default one, ~B.", + [ DefaultEpmdPort ] ), + DefaultEpmdPort; + + { value, UserEPMDPort } when is_integer( UserEPMDPort ) -> + ?info_fmt( "Supposing already running using the user-defined " + "EPMD TCP port #~B.", [ UserEPMDPort ] ), + UserEPMDPort; + + { value, InvalidEPMDPort } -> + ?error_fmt( "Read invalid user-configured EPMD port: '~p'.", + [ InvalidEPMDPort ] ), + throw( { invalid_epmd_port, InvalidEPMDPort, ?epmd_port_key } ) + + end, + + setAttribute( State, epmd_port, Port ). + + + +% Manages any user-configured TCP port range. +-spec manage_tcp_port_range( us_config_table(), wooper:state() ) -> + wooper:state(). +manage_tcp_port_range( ConfigTable, State ) -> + + PortRange = case table:lookup_entry( ?tcp_port_range_key, ConfigTable ) of + + key_not_found -> + ?info( "No user-configured TCP port range." ), + undefined; + + { value, R={ MinTCPPort, MaxTCPPort } } when is_integer( MinTCPPort ) + andalso is_integer( MaxTCPPort ) + andalso MinTCPPort < MaxTCPPort -> + ?info_fmt( "User-configured TCP port range is [~w,~w[.", + [ MinTCPPort, MaxTCPPort ] ), + R; + + { value, InvalidTCPPortRange } -> + ?error_fmt( "Read invalid user-configured TCP port range: '~p'.", + [ InvalidTCPPortRange ] ), + throw( { invalid_tcp_port_range, InvalidTCPPortRange, + ?tcp_port_range_key } ) + + end, + + setAttribute( State, tcp_port_range, PortRange ). + + + +% Manages any user-configured execution context. +-spec manage_execution_context( us_config_table(), wooper:state() ) -> + wooper:state(). +manage_execution_context( ConfigTable, State ) -> + + MyriadExecStr= text_utils:format( "for Myriad: ~s", + [ basic_utils:get_execution_target() ] ), + + WOOPERExecStr = text_utils:format( "for WOOPER: ~s", + [ wooper:get_execution_target() ] ), + + TracesExecStr = text_utils:format( "for Traces: ~s", + [ traces:get_execution_target() ] ), + + UsExecTarget = get_execution_target(), + + USExecStr = text_utils:format( "for the Universal Server: ~s", + [ UsExecTarget ] ), + + CompileContextStr = text_utils:format( "while the following compilation " + "contexts were applied: ~s", [ text_utils:strings_to_string( [ + MyriadExecStr, WOOPERExecStr, TracesExecStr, USExecStr ] ) ] ), + + Context = case table:lookup_entry( ?execution_context_key, ConfigTable ) of + + key_not_found -> + DefaultContext = production, + ?info_fmt( "No user-configured execution context, " + "defaulting to '~s', ~s.", + [ DefaultContext, CompileContextStr ] ), + DefaultContext; + + { value, development } -> + ?info_fmt( "The 'development' execution context has been " + "configured by the user, ~s.", [ CompileContextStr ] ), + development; + + { value, production } -> + ?info_fmt( "The 'production' execution context has been configured " + "by the user, ~s.", [ CompileContextStr ] ), + production; + + { value, InvalidContext } -> + ?error_fmt( "Read invalid user-configured execution context: '~p'.", + [ InvalidContext ] ), + throw( { invalid_execution_context, InvalidContext, + ?execution_context_key } ) + + end, + + case Context of + + UsExecTarget -> + ok; + + _OtherContext -> + ?warning_fmt( "The runtime user-configured execution context (~s) " + "does not match the compile-time execution target of " + "this Universal Server (~s).", + [ Context, UsExecTarget ] ) + + end, + + setAttribute( State, execution_context, Context ). + + + +% Manages any user-configured specification regarding the (operating-system +% level) US user and group. +% +-spec manage_os_user_group( us_config_table(), wooper:state() ) -> + wooper:state(). +manage_os_user_group( ConfigTable, State ) -> + + % Mostly used by start/stop scripts: + UsUsername = case table:lookup_entry( ?us_username_key, ConfigTable ) of + + key_not_found -> + ActualUsername = system_utils:get_user_name(), + ?info_fmt( "No user-configured US operating-system username set " + "for this server; runtime-detected: '~s'.", + [ ActualUsername ] ), + ActualUsername; + + { value, Username } when is_list( Username ) -> + + case system_utils:get_user_name() of + + Username -> + ?info_fmt( "Using user-configured US operating-system " + "username '~s' for this server, which matches " + "the current runtime user.", [ Username ] ), + Username; + + OtherUsername -> + + % Currently not a blocking error as this US configuration + % server might be run in the context of US-web, hence with + % its username, which might differ. + + ?warning_fmt( "The user-configured US operating-system " + "username '~s' for this server does not " + "match the current runtime user, '~s' " + "(acceptable if created on behalf on a " + "US-related service - typically a " + "standalone US-web server, i.e. one with " + "no prior US-server companion running).", + [ Username, OtherUsername ] ), + OtherUsername + %throw( { inconsistent_os_us_user, OtherUsername, + % Username, ?us_username_key } ) + + end + + end, + + + UsGroupname = case table:lookup_entry( ?us_groupname_key, ConfigTable ) of + + key_not_found -> + ActualGroupname = system_utils:get_group_name(), + ?info_fmt( "No group-configured US operating-system group name set " + "for this server; runtime-detected: '~s'.", + [ ActualGroupname ] ), + ActualGroupname; + + { value, Groupname } when is_list( Groupname ) -> + case system_utils:get_group_name() of + + Groupname -> + ?info_fmt( "Using group-configured US operating-system " + "groupname '~s' for this server, which matches " + "the current runtime group.", [ Groupname ] ), + Groupname; + + OtherGroupname -> + ?error_fmt( "The group-configured US operating-system " + "groupname '~s' for this server does not match " + "the current runtime group, '~s'.", + [ Groupname, OtherGroupname ] ), + throw( { inconsistent_os_us_group, OtherGroupname, + Groupname, ?us_groupname_key } ) + + end + + end, + + setAttributes( State, [ { us_username, + text_utils:string_to_binary( UsUsername ) }, + { us_groupname, + text_utils:string_to_binary( UsGroupname ) } ] ). + + + +% Manages any user-configured registration names for this instance and for the +% US server. +% +-spec manage_registration_names( us_config_table(), wooper:state() ) -> + wooper:state(). +manage_registration_names( ConfigTable, State ) -> + + ThisRegName = case table:lookup_entry( + ?us_config_server_registration_name_key, ConfigTable ) of + + key_not_found -> + DefaultThisRegName = ?default_us_config_reg_name, + ?info_fmt( "No user-configured registration name for this server, " + "using default name '~s'.", [ DefaultThisRegName ] ), + DefaultThisRegName; + + { value, UserThisRegName } when is_atom( UserThisRegName ) -> + ?info_fmt( "Using user-configured registration name '~s' for " + "this server.", [ UserThisRegName ] ), + UserThisRegName; + + { value, InvalidThisRegName } -> + ?error_fmt( "Read invalid user-configured registration name for " + "this server: '~p'.", [ InvalidThisRegName ] ), + throw( { invalid_us_config_registration_name, InvalidThisRegName, + ?us_config_server_registration_name_key } ) + + end, + + Scope = ?default_registration_scope, + + naming_utils:register_as( ThisRegName, Scope ), + + ?info_fmt( "Registered as '~s' (scope: ~s).", [ ThisRegName, Scope ] ), + + + % We read the registration name of the US server knowing that its upcoming + % creation is likely (it will have to be told about the name it shall use): + + USRegName = case table:lookup_entry( ?us_server_registration_name_key, + ConfigTable ) of + + key_not_found -> + DefaultRegName = ?default_us_reg_name, + ?info_fmt( "No user-configured registration name for the US server, " + "using default name '~s'.", [ DefaultRegName ] ), + DefaultRegName; + + { value, UserRegName } when is_atom( UserRegName ) -> + ?info_fmt( "Using user-configured registration name '~s' for " + "the US server.", [ UserRegName ] ), + UserRegName; + + { value, InvalidRegName } -> + ?error_fmt( "Read invalid user-configured registration name for " + "the US server: '~p'.", [ InvalidRegName ] ), + throw( { invalid_us_registration_name, InvalidRegName, + ?us_server_registration_name_key } ) + + end, + + setAttributes( State, [ { registration_name, ThisRegName }, + { registration_scope, Scope }, + { us_server_registration_name, USRegName } ] ). + + + +% Manages any user-configured application base directory. +-spec manage_app_base_directory( us_config_table(), wooper:state() ) -> + wooper:state(). +manage_app_base_directory( ConfigTable, State ) -> + + % As opposed to, say, start/stop script, the Erlang code does not care so + % much about these directories, so warnings, not errors, will be issued if + % not found (the US framework being also launchable thanks to, for example, + % 'make debug'). + + RawBaseDir = case table:lookup_entry( ?us_app_base_dir_key, ConfigTable ) of + + key_not_found -> + + case system_utils:get_environment_variable( + ?us_app_env_variable ) of + + false -> + % Guessing then, typically current directory is: + % [...]/universal_server/_build/default/rel/universal_server, + % and we want the first universal_server, so: + % + GuessedDir = file_utils:normalise_path( file_utils:join( [ + file_utils:get_current_directory(), "..", "..", "..", + ".." ] ) ), + + ?warning_fmt( "No user-configured US application " + "base directory set (neither in-file or " + "through the '~s' environment variable), " + "hence trying to guess it as '~s'.", + [ ?us_app_env_variable, GuessedDir ] ), + GuessedDir; + + EnvDir -> + ?info_fmt( "No user-configured US application base " + "directory set in configuration file, using " + "the value of the '~s' environment variable: " + "'~s'.", [ ?us_app_env_variable, EnvDir ] ), + EnvDir + + end; + + { value, D } when is_list( D ) -> + D; + + { value, D } when is_binary( D ) -> + file_utils:binary_to_string( D ); + + { value, InvalidDir } -> + ?error_fmt( "Read invalid user-configured US application base " + "directory: '~p'.", [ InvalidDir ] ), + throw( { invalid_us_app_base_directory, InvalidDir, + ?us_app_base_dir_key } ) + + end, + + BaseDir = file_utils:ensure_path_is_absolute( RawBaseDir ), + + MaybeBinDir = case file_utils:is_existing_directory_or_link( BaseDir ) of + + true -> + % As it may end with a version (ex: "universal_server-0.0.1") or as + % a "universal_server-latest" symlink thereof: + % + case filename:basename( BaseDir ) of + + "universal_server" ++ _ -> + ?info_fmt( "US application base directory set to '~s'.", + [ BaseDir ] ), + text_utils:string_to_binary( BaseDir ); + + _Other -> + ?warning_fmt( "The US application base directory '~s' " + "does not seem legit (it should start with " + "'universal_server'), thus considering knowing none.", + [ BaseDir ] ), + %throw( { incorrect_us_app_base_directory, BaseDir, + % ?us_app_base_dir_key } ) + undefined + + end; + + false -> + ?warning_fmt( "The US application base directory '~s' does not " + "exist, thus considering knowing none.", [ BaseDir ] ), + %throw( { non_existing_us_app_base_directory, BaseDir, + % ?us_app_base_dir_key } ) + undefined + + end, + + setAttribute( State, app_base_directory, MaybeBinDir ). + + + +% Manages any user-configured log directory to rely on, creating it if +% necessary. +% +-spec manage_log_directory( us_config_table(), wooper:state() ) -> + wooper:state(). +manage_log_directory( ConfigTable, State ) -> + + BaseDir = case table:lookup_entry( ?us_log_dir_key, ConfigTable ) of + + key_not_found -> + ?default_log_base_dir; + + { value, D } when is_list( D ) -> + file_utils:ensure_path_is_absolute( D, + ?getAttr(app_base_directory) ); + + { value, InvalidDir } -> + ?error_fmt( "Read invalid user-configured log directory: '~p'.", + [ InvalidDir ] ), + throw( { invalid_log_directory, InvalidDir, ?us_log_dir_key } ) + + end, + + case file_utils:is_existing_directory_or_link( BaseDir ) of + + true -> + ok; + + false -> + ?warning_fmt( "The log directory '~s' does not exist, " + "creating it.", [ BaseDir ] ) + %throw( { non_existing_log_directory, BaseDir } ) + + end, + + % Would lead to inconvenient paths, at least if defined as relative: + %LogDir = file_utils:join( BaseDir, ?app_subdir ), + LogDir = BaseDir, + + file_utils:create_directory_if_not_existing( LogDir, create_parents ), + + BinLogDir = text_utils:string_to_binary( LogDir ), + + setAttribute( State, log_directory, BinLogDir ). + + + +% Manages any user-configured configuration filename for webservers and virtual +% hosting. +% +-spec manage_web_config( us_config_table(), wooper:state() ) -> wooper:state(). +manage_web_config( ConfigTable, State ) -> + + MaybeBinWebFilename = case table:lookup_entry( ?us_web_config_filename_key, + ConfigTable ) of + + key_not_found -> + ?info( "No user-configured configuration filename for webservers " + "and virtual hosting." ), + undefined; + + % Read but not checked intentionally (to be done by the web + % configuration server): + % + { value, WebFilename } when is_list( WebFilename ) -> + ?info_fmt( "Obtained user-configured configuration filename for " + "webservers and virtual hosting: '~s'.", + [ WebFilename ] ), + text_utils:string_to_binary( WebFilename ); + + { value, InvalidWebFilename } -> + ?error_fmt( "Obtained invalid user-configured configuration " + "filename for webservers and virtual hosting: '~p'.", + [ InvalidWebFilename ] ), + throw( { invalid_us_web_config_filename, InvalidWebFilename, + ?us_web_config_filename_key } ) + + end, + + setAttribute( State, web_config_filename, MaybeBinWebFilename ). + + + +% Returns a textual description of this server. +-spec to_string( wooper:state() ) -> text_utils:ustring(). +to_string( State ) -> + + RegString = text_utils:format( "registered as '~s' (scope: ~s)", + [ ?getAttr(registration_name), ?default_registration_scope ] ), + + SrvString = case ?getAttr(us_server_pid) of + + undefined -> + "no US server"; + + SrvPid -> + text_utils:format( "US server ~w", [ SrvPid ] ) + + end, + + WebString = case ?getAttr(us_web_config_server_pid) of + + undefined -> + "no web configuration server"; + + WebPid -> + text_utils:format( "web configuration server ~w", [ WebPid ] ) + + end, + + text_utils:format( "US overall configuration server, ~s, running in " + "the ~s execution context, presumably on a VM using EPMD port #~B, " + "using configuration directory '~s' and log directory '~s', " + "having found as US-web configuration file '~s', knowing ~s and ~s", + [ RegString, ?getAttr(execution_context), ?getAttr(epmd_port), + ?getAttr(config_base_directory), ?getAttr(log_directory), + ?getAttr(web_config_filename), SrvString, WebString ] ). + + + +% Returns the execution target this module (hence, probably, that layer as a +% whole) was compiled with, i.e. either the atom 'development' or 'production'. + +% Dispatched in actual clauses, otherwise Dialyzer will detect an +% underspecification: +% +% -spec get_execution_target() -> execution_target(). + +-ifdef(exec_target_is_production). + +-spec get_execution_target() -> 'production'. +get_execution_target() -> + production. + +-else. % exec_target_is_production + +-spec get_execution_target() -> 'development'. +get_execution_target() -> + development. + +-endif. % exec_target_is_production diff --git a/src/class_USScheduler.erl b/src/class_USScheduler.erl new file mode 100644 index 0000000..4bd483a --- /dev/null +++ b/src/class_USScheduler.erl @@ -0,0 +1,1502 @@ +% Copyright (C) 2020-2020 Olivier Boudeville +% +% This file is part of US-Common, part of the Universal Server framework. +% +% This program is free software: you can redistribute it and/or modify it under +% the terms of the GNU Affero General Public License as published by the Free +% Software Foundation, either version 3 of the License, or (at your option) any +% later version. +% +% This program is distributed in the hope that it will be useful, but WITHOUT +% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +% FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +% details. +% +% You should have received a copy of the GNU Affero General Public License along +% with this program. If not, see . +% +% Author: Olivier Boudeville (olivier.boudeville@esperide.com) +% Creation date: Wednesday, March 18, 2020. + + +-module(class_USScheduler). + + +-define( class_description, "Task scheduler for the US framework." ). + + +% Determines what are the direct mother classes of this class (if any): +-define( superclasses, [ class_USServer ] ). + + + +% Scheduling of tasks on behalf of the US framework. + + +-type server_pid() :: class_UniversalServer:server_pid(). + + +% The command corresponding to a task to execute respects the general form of a +% WOOPER oneway, i.e. OnewayName or {OnewayName,Args}, where Args is +% conventionally a single non-list term or a list of any arguments. +% +% We considered, yet finally did not keep, the idea of always adding as last +% element the PID of the sending scheduler. +% +-type task_command() :: wooper:oneway_call(). + + +% Shorthands: + +-type ustring() :: text_utils:ustring(). + +-type ms_duration() :: time_utils:ms_duration(). + + +% No shorthand, as too ambiguous to be used here: +%-type milliseconds() :: unit_utils:milliseconds(). + +% To avoid mistakes between amounts of milliseconds: + +%-type ms_since_year_0() :: unit_utils:milliseconds(). + + +% Since start of this scheduler (in monotonic time): +-type ms_since_start() :: unit_utils:milliseconds(). + + +-type timestamp() :: time_utils:timestamp(). +-type dhms_duration() :: time_utils:dhms_duration(). + + +% Specifies the start time of a task scheduling: +-type start_time() :: + + % As soon as possible: + 'asap' + + % When relevant/applicable (flexible start): + | 'flexible' + + % Preferably after this specified duration from receiving: + | dhms_duration() + + % Preferably at this specified (future) time: + | timestamp(). + + + +% Time between two schedulings of a task, as expressed by the user: +-type user_periodicity() :: + + % Just to be executed once (one shot): + 'once' + + % Actual period: + | dhms_duration() + + | unit_utils:seconds(). + + +% Time between two schedulings of a task, as used internally: +-type periodicity() :: maybe( ms_duration() ). + + +% The number of times a task shall be scheduled: +-type schedule_count() :: 'unlimited' | basic_utils:count(). + + +% The PID of the process requesting a task to be scheduled: +-type requester_pid() :: pid(). + + +% The PID of the process to which a task command will be sent whenever +% scheduled: +% +-type actuator_pid() :: pid(). + + +-type task_registration_outcome() :: 'task_done' + | { 'task_registered', task_id() }. + +-type task_unregistration_outcome():: 'task_unregistered' | 'task_already_done' + | { 'task_unregistration_failed', basic_utils:error_reason() }. + + + +% Identifier of a task, as assigned by a scheduler. +-type task_id() :: basic_utils:count(). + + +% Reference to a timer: +-type timer_ref() :: timer:tref(). + + +% Associates the reference of a live timer to a schedule offset: +-type timer_table() :: table( schedule_offset(), timer_ref() ). + +-export_type([ server_pid/0, task_command/0, start_time/0, user_periodicity/0, + requester_pid/0, actuator_pid/0, + task_registration_outcome/0, task_unregistration_outcome/0, + task_id/0, timer_ref/0, timer_table/0 ]). + + + +% A millisecond offset relative to the start time of this scheduler: +% (hence designed to be rather small) +% +-type schedule_offset() :: ms_since_start(). + + +% Describes a task to schedule. +-record( task_entry, { + + % Unique identifier of this task: + id :: task_id(), + + % The command to trigger on the actuator: + command :: task_command(), + + % The offset (relative to the start time of this scheduler) at which + % the next scheduling of this task shall happen: + % + next_schedule :: schedule_offset(), + + % The periodicity at which this task shall be scheduled: + periodicity :: periodicity(), + + % The number of times this task shall still be scheduled: + count :: schedule_count(), + + % The number of times this task has already been scheduled: + schedule_count = 0 :: basic_utils:count(), + + % The internal time offset (if any) at which this task was first + % scheduled: + % + started_on = undefined :: maybe( schedule_offset() ), + + % The internal time offset (if any) at which this task was last + % scheduled: + % + last_schedule = undefined :: maybe( schedule_offset() ), + + % The PID of the process having registered this task: + requester_pid :: requester_pid(), + + % The PID of the process that will be triggered whenever this task is + % scheduled: + % + actuator_pid :: actuator_pid() } ). + +-type task_entry() :: #task_entry{}. + + +% To keep track of task information: +-type task_table() :: table( task_id(), task_entry() ). + + +% Registers which tasks shall be scheduled at a given time offset. +-type schedule_pair() :: { schedule_offset(), [ task_id() ] }. + +% Schedule pairs, ordered from closest future to most remote one: +-type schedule_plan() :: [ schedule_pair() ]. + + + +% The class-specific attributes: +-define( class_attributes, [ + + { task_table, task_table(), "a table registering all ongoing tasks" }, + + { schedule_plan, schedule_plan(), "the ordered list of next schedulings" }, + + { timer_table, timer_table(), "a table registering live timers" }, + + % Allows also to count all tasks that have been submitted: + { next_task_id, task_id(), "Identifier of the next registered task" } ] ). + + +% Used by the trace_categorize/1 macro to use the right emitter: +-define( trace_emitter_categorization, "US.Scheduling.Scheduler" ). + + +-define( registration_name, us_scheduler ). + +-define( registration_scope, global_only ). + + +% Exported helpers: +-export([ vet_user_periodicity/2 ]). + + +% To silence about unused functions: +-export([ get_schedule_offset_for/2, get_current_timestamp/1 ]). + + +% Allows to define WOOPER base variables and methods for that class: +-include_lib("wooper/include/wooper.hrl"). + +% Allows to use macros for trace sending: +-include_lib("traces/include/class_TraceEmitter.hrl"). + + + +% Implementation notes: +% +% On task commands. +% +% We preferred to limit the ability of a scheduler only to sending messages to +% whichever process needed (ex: no MFA direct calls), for a better separation of +% concerns. +% +% +% On time. +% +% The monotonic time is relied on internally, to avoid any warp/leap performed +% by the system time. See http://erlang.org/doc/apps/erts/time_correction.html +% for more information. +% +% Timestamps (ex: to designate Sunday, March 22, 2020 at noon) are converted +% into internal time offsets whenever a scheduler registers a task. +% +% As a result, correct periodicities will be enforced (ex: really a 1-hour +% wallclock duration between two schedulings, regardless of any time warp) but, +% for example due to DST (Daylight Saving Time), tasks may appear to be executed +% with a time offset (ex: at 1 PM instead of noon). Working with a monotonic, +% UTC (Universal Coordinated Time)-like time is thus intentional. +% +% Due to some external event (ex: system overload or suspension), task deadlines +% may be missed, in which case they will be rescheduled in a row at a faster +% pace on a best-effort basis (rather than being skipped as a whole). As a +% result, the number of schedulings should match the expected one, yet the +% average periodicities may not always be preserved (trade-off: not loosing +% schedulings, yet preparing one scheduling from the previous one, hence trying +% to preserve specified periodicity). +% +% Internal durations are expressed in milliseconds, although the accuracy is +% most probably coarser than that, and at least most of the user-defined +% durations are only expressed at the second level. +% +% All internal durations are relative to the start time of this scheduler +% (rather than for example to year 0 of the Gregorian calendar, or to the EPOCH, +% i.e. 1/1/1970 at 00:00:00), as we prefer smaller durations (offsets) than +% "absolute" ones; we used the 'server_start' attribute and monotonic time for +% that. +% +% For any time unit TU (ex: TU=millisecond here): +% erlang:monotonic_time(TU) + erlang:time_offset(TU) = erlang:system_time(TU). +% +% This scheduler uses internally monotonic time, and, at its boundaries (ex: at +% task submission), whenever having to deal with absolute timestamps, these are +% immediately converted to internal conventions (ex: a task deadline is to be +% expressed by the user in system time, and will be converted directly into a +% monotonic timestamp when received; scheduling reports proceed the other way +% round). +% +% All kinds of internal system times (notably the VM one) are in UTC, so +% depending on time zones and DST, hour-based offsets apply. At the interfaces +% between the scheduler and the user, universal time is converted into local +% time, and the other way round. + +% The timer module is used, rather than a utility process using for example +% receive/after, presumably for a better accuracy. + + + + +% Constructs the main (singleton), default US scheduler. +-spec construct( wooper:state() ) -> wooper:state(). +construct( State ) -> + + % First the direct mother classes, then this class-specific actions: + SrvState = class_USServer:construct( State, + ?trace_categorize(_DefaultTraceEmitterName="Main US Scheduler"), + ?registration_name, ?registration_scope ), + + init_common( SrvState ). + + + +% Constructs a (named) US scheduler. +-spec construct( wooper:state(), ustring() ) -> wooper:state(). +construct( State, SchedulerName ) -> + + % First the direct mother classes, then this class-specific actions: + SrvState = class_USServer:construct( State, + ?trace_categorize(SchedulerName) ), + + init_common( SrvState ). + + + +% Constructs a (named, registered) US scheduler. +-spec construct( wooper:state(), ustring(), + naming_utils:registration_name() ) -> wooper:state(). +construct( State, SchedulerName, RegistrationName ) -> + + % First the direct mother classes, then this class-specific actions: + SrvState = class_USServer:construct( State, + ?trace_categorize(SchedulerName), RegistrationName, ?registration_scope ), + + init_common( SrvState ). + + + +% (helper) +-spec init_common( wooper:state() ) -> wooper:state(). +init_common( State ) -> + + EmptyTable = table:new(), + + ReadyState = setAttributes( State, [ { task_table, EmptyTable }, + { schedule_plan, [] }, + { timer_table, EmptyTable }, + { next_task_id, 1 } ] ), + + trace_utils:trace_fmt( "Scheduler ready, at ~s.", + [ get_current_timestamp_string( ReadyState ) ] ), + + ReadyState. + + + +% Overridden destructor. +-spec destruct( wooper:state() ) -> wooper:state(). +destruct( State ) -> + + ?info_fmt( "Deleting ~s", [ to_string( State ) ] ), + + % Cancelling all still live timers, while resisting to any error: + [ timer:cancel( TimerRef ) + || TimerRef <- table:values( ?getAttr(timer_table) ) ], + + State. + + + +% Method section. + + +% Triggers immediately specified one-shot task: specified command will be +% triggered at once, a single time, being assigned to actuactor process. +% +% Returns either 'task_done' if the task was done on the fly (hence is already +% triggered, and no task identifier applies), or {'task_registered',TaskId} if +% it is registered for a later trigger (then its assigned task identifier is +% returned). +% +-spec triggerOneshotTask( wooper:state(), task_command(), actuator_pid() ) -> + request_return( task_registration_outcome() ). +triggerOneshotTask( State, UserTaskCommand, UserActPid ) -> + + % Note: in this case the sender could have contacted directly the actuator. + + { NewState, Result } = registerOneshotTask( State, UserTaskCommand, + _StartTime=asap, UserActPid ), + + wooper:return_state_result( NewState, Result ). + + + +% Registers specified one-shot task: specified command will be executed once, at +% specified time, as assigned to requesting and specified actuactor process. +% +% Returns either 'task_done' if the task was done on the fly (hence is already +% triggered, and no task identifier applies), or {'task_registered',TaskId} if +% it is registered for a later trigger (then its assigned task identifier is +% returned). +% +% Note: if the deadline is specified in absolute terms (ex: as +% {{2020,3,22},{16,1,48}}), the conversion to internal time will be done +% immediately (at task submission time), resulting in any system time change +% (ex: DST) not being taken into account (as the respect of periodicities is +% preferred over the one of literal timestamps). +% +-spec registerOneshotTask( wooper:state(), task_command(), start_time(), + actuator_pid() ) -> request_return( task_registration_outcome() ). +registerOneshotTask( State, UserTaskCommand, UserStartTime, UserActPid ) -> + + { NewState, Result } = registerTask( State, UserTaskCommand, UserStartTime, + _Periodicity=once, _Count=1, UserActPid ), + + wooper:return_state_result( NewState, Result ). + + + +% Registers specified task: specified command will be executed starting from +% specified time, at specified periodicity, for specified number of times, being +% assigned to requesting and specified actuactor process. +% +% Returns either 'task_done' if the task was done on the fly (hence is already +% triggered, and no task identifier applies since it is fully completed), or +% {'task_registered',TaskId} if it is registered for a later trigger (then its +% assigned task identifier is returned). +% +% Note: if the deadline is specified in absolute terms (ex: as +% {{2020,3,22},{16,1,48}}), the conversion to internal time will be done +% immediately (at task submission time), resulting in any future system time +% change (ex: DST) not being taken into account at this level (as the respect of +% periodicities is preferred over the one of literal timestamps). +% +-spec registerTask( wooper:state(), task_command(), start_time(), periodicity(), + schedule_count(), actuator_pid() ) -> + request_return( task_registration_outcome() ). +registerTask( State, UserTaskCommand, UserStartTime, UserPeriodicity, UserCount, + UserActPid ) -> + + % Checks and canonicalise specified elements: + TaskCommand = vet_task_command( UserTaskCommand, State ), + MsDurationBeforeStart = vet_start_time( UserStartTime, State ), + Count = vet_count( UserCount, State ), + MaybePeriodicity = vet_periodicity( UserPeriodicity, Count, State ), + ReqPid = ?getSender(), + ActPid = vet_actuator_pid( UserActPid ), + + ?info_fmt( "Registering task whose command is '~p', whose declared start " + "time is ~w (hence to happen in ~s), to be triggered ~s with ~s " + "on actuator ~w (whereas requester is ~w).", + [ TaskCommand, UserStartTime, + time_utils:duration_to_string( MsDurationBeforeStart ), + schedule_count_to_string( Count ), + periodicity_to_string( MaybePeriodicity ), ActPid, ReqPid ] ), + + % Immediate launch requested? + case MsDurationBeforeStart of + + % Immediate launch here: + 0 -> + % Thus, in all cases: + launch_task( TaskCommand, ActPid, State ), + + case MaybePeriodicity of + + undefined -> + % Just to execute once (implied and checked: Count=1). + % + % Not even recording it then, it was just fire and forget: + % not task entry, just updating the task count. + % + wooper:return_state_result( + incrementAttribute( State, next_task_id ), task_done ); + + MsPeriod -> + case decrement_count( Count ) of + + 0 -> + % Here also, Count was 1, same case as before, no + % future for this task: + % + wooper:return_state_result( incrementAttribute( + State, next_task_id ), task_done ); + + NewCount -> + % The count is thus now still 1 or more, or + % unlimited; in all cases a next scheduling will + % happen and must be recorded: + + TaskId = ?getAttr(next_task_id), + NowMs = get_current_schedule_offset( State ), + NextSchedule = NowMs + MsPeriod, + + TI = #task_entry{ id=TaskId, + command=TaskCommand, + next_schedule=NextSchedule, + periodicity=MsPeriod, + count=NewCount, + schedule_count=1, + started_on=NowMs, + last_schedule=NowMs, + requester_pid=ReqPid, + actuator_pid=ActPid }, + + RegState = register_task_schedule( TaskId, TI, + NextSchedule, MsPeriod, State ), + + wooper:return_state_result( RegState, + { task_registered, TaskId } ) + + end + + end; + + + % Deferred launch here: + _ -> + TaskId = ?getAttr(next_task_id), + NowMs = get_current_schedule_offset( State ), + NextSchedule = NowMs + MsDurationBeforeStart, + TI = #task_entry{ id=TaskId, + command=TaskCommand, + next_schedule=NextSchedule, + periodicity=MaybePeriodicity, + count=Count, + + % Defaults: + %schedule_count=0 + %started_on=undefined, + %last_schedule=undefined, + + requester_pid=ReqPid, + actuator_pid=ActPid }, + + RegState = register_task_schedule( TaskId, TI, NextSchedule, + MsDurationBeforeStart, State ), + + wooper:return_state_result( RegState, { task_registered, TaskId } ) + + end. + + + +% Unregisters specified task, based on its identifier. +% +% Returns either 'task_unregistered' if the operation succeeded, or +% 'task_already_done' if the task was already fully done, or +% {'task_unregistration_failed',Reason} if the operation failed. +% +-spec unregisterTask( wooper:state(), task_id() ) -> + request_return( task_unregistration_outcome() ). +unregisterTask( State, TaskId ) when is_integer( TaskId ) andalso TaskId > 0 -> + { Outcome, NewState } = unregister_task( TaskId, State ), + wooper:return_state_result( NewState, Outcome ). + + + +% Unregisters specified tasks, based on their identifier. +% +% Returns, in order, for each task either 'task_unregistered' if the operation +% succeeded, 'task_already_done' if the task was already fully done, or +% {'task_unregistration_failed',Reason} if the operation failed. +% +-spec unregisterTasks( wooper:state(), [ task_id() ] ) -> + request_return( [ task_unregistration_outcome() ] ). +unregisterTasks( State, TaskIds ) when is_list( TaskIds ) -> + + { RevOutcomes, NewState } = lists:foldl( + fun( TaskId, _Acc={ AccOutcomes, AccState } ) -> + { Outcome, NewAccState } = unregister_task( TaskId, AccState ), + { [ Outcome | AccOutcomes ], NewAccState } + end, + _Acc0={ [], State }, + _List=TaskIds ), + + wooper:return_state_result( NewState, lists:reverse( RevOutcomes ) ). + + + +% (helper) +-spec unregisterTask( task_id(), wooper:state() ) -> + { task_unregistration_outcome(), wooper:state() }. +unregister_task( TaskId, State ) when is_integer( TaskId ) andalso TaskId > 0 -> + + NextTaskId = ?getAttr(next_task_id), + + case TaskId >= NextTaskId of + + % Could not have been allocated: + true -> + ?error_fmt( "Requested to unregister task ~B, which never existed " + "(as the next task identifier is ~B).", + [ TaskId, NextTaskId ] ), + { { task_unregistration_failed, never_existed }, State }; + + false -> + case table:extract_entry_if_existing( TaskId, + ?getAttr(task_table) ) of + + % Already dropped, hence done: + false -> + ?info_fmt( "Unregistered task ~B, which was actually done.", + [ TaskId ] ), + { task_already_done, State }; + + { TaskEntry, ShrunkTaskTable } -> + + ?info_fmt( "Task #~B unregistered, was: ~s", + [ TaskId, task_entry_to_string( TaskEntry, State ) ] ), + + SchedulePlan = ?getAttr(schedule_plan), + + case unschedule_task( TaskId, + TaskEntry#task_entry.next_schedule, SchedulePlan, + ?getAttr(timer_table) ) of + + not_found -> + + ?error_fmt( "Internal error: task id #~B not found " + "in schedule plan, which ~s", [ TaskId, + schedule_plan_to_string( SchedulePlan, + State ) ] ), + + NewState = setAttribute( State, task_table, + ShrunkTaskTable ), + + { { task_unregistration_failed, internal_error }, + NewState }; + + + { NewPlan, NewTimerTable } -> + + NewState = setAttributes( State, [ + { task_table, ShrunkTaskTable }, + { schedule_plan, NewPlan }, + { timer_table, NewTimerTable } ] ), + + { task_unregistered, NewState } + + end + + end + + end; + +unregister_task( TaskId, State ) -> + ?error_fmt( "Task unregistering failed, invalid task identifier: '~p'.", + [ TaskId ] ), + { { task_unregistration_failed, invalid_task_id }, State }. + + + +% Triggers specified scheduling. +% +% Expected to be called through a timer having set beforehand. +% +% Note: a reference() or any other non-guessable element could be used to avoid +% any process to be able to interfere by triggering schedulings. +% +-spec timerTrigger( wooper:state(), schedule_offset() ) -> oneway_return(). +timerTrigger( State, ScheduleOffset ) -> + + NowMs = get_current_schedule_offset( State ), + + % As long as a drift is below this threshold, we do not worry: + OffsetThreshold = 250, + + case erlang:abs( NowMs - ScheduleOffset ) > OffsetThreshold of + + true -> + ?trace_fmt( "Triggered for offset #~B (~s), while being at #~B " + "(~s), hence with a signed drift of ~s (late if positive).", + [ ScheduleOffset, + get_timestamp_string_for( ScheduleOffset, State ), NowMs, + get_timestamp_string_for( NowMs, State ), + time_utils:duration_to_string( NowMs - ScheduleOffset ) ] ); + + false -> + ok + + end, + + TimerTable = ?getAttr(timer_table), + + % Resorb any pending schedule: + { NewPlan, NewTimerTable, NewTaskTable } = perform_schedule( ScheduleOffset, + NowMs, ?getAttr(schedule_plan), TimerTable, ?getAttr(task_table), + State ), + + %trace_utils:debug_fmt( "After having being triggered for #~B, went " + % "from ~s to ~s", + % [ ScheduleOffset, timer_table_to_string( TimerTable, State ), + % timer_table_to_string( NewTimerTable, State ) ] ), + + TrigState = setAttributes( State, [ { schedule_plan, NewPlan }, + { timer_table, NewTimerTable }, + { task_table, NewTaskTable } ] ), + + wooper:return_state( TrigState ). + + + +% (helper; State specified for traces only) +% +% Take care of any lingering schedule: +-spec perform_schedule( schedule_offset(), schedule_offset(), schedule_plan(), + timer_table(), task_table(), wooper:state() ) -> + { schedule_plan(), timer_table(), task_table() }. +perform_schedule( ScheduleOffset, NowMs, _SchedulePlan=[ { Off, TaskIds } | T ], + TimerTable, TaskTable, State ) when Off < ScheduleOffset -> + + ?error_fmt( "While scheduling #~B (~s), found late offset #~B (~s), " + "triggering its delayed tasks first: ~w.", + [ ScheduleOffset, get_timestamp_for( ScheduleOffset, State ), + Off, get_timestamp_for( Off, State ), TaskIds ] ), + + % Using Off rather than ScheduleOffset here: + { NewPlan, NewTimerTable, NewTaskTable } = trigger_tasks( TaskIds, + Off, NowMs, _NewerPlan=T, TimerTable, TaskTable, State ), + + % Drops this offset entry: + perform_schedule( ScheduleOffset, NowMs, NewPlan, NewTimerTable, + NewTaskTable, State ); + + +% Matching offsets: +perform_schedule( ScheduleOffset, NowMs, + _SchedulePlan=[ { ScheduleOffset, TaskIds } | T ], + TimerTable, TaskTable, State ) -> + + %?debug_fmt( "Normal scheduling of #~B (~s), triggering its tasks: ~w.", + % [ ScheduleOffset, get_timestamp_string_for( ScheduleOffset, State ), + % TaskIds ] ), + + % Dropping current offset: + { NewPlan, NewTimerTable, NewTaskTable } = trigger_tasks( TaskIds, + ScheduleOffset, NowMs, _NewerPlan=T, TimerTable, TaskTable, State ), + + % Stop recursing here, keeping the next schedules to come: + { NewPlan, NewTimerTable, NewTaskTable }; + + +% This triggered offset is not found; this is possible: should a T1 timer be +% late, a later one T2 might trigger before (despite registered_offset(T1) < +% registered_offset(T2)), and T2 should have taken care of all preceding tasks +% still registered (through first clause), including those of T1. So, when T1 is +% finally triggered, none of its tasks is left, and this should not be then a +% fatal error. +% +% So, in all other cases (either there exists future offsets or none at all), we +% do not trigger offsets anymore this time, just disregarding the current one: +% +perform_schedule( ScheduleOffset, _NowMs, SchedulePlan, TimerTable, TaskTable, + State ) -> + + ?warning_fmt( "Triggered schedule offset #~B (~s) not found (whereas " + "schedule plan ~s), ignoring it, as supposing this is a late " + "scheduling already applied.", [ ScheduleOffset, + get_timestamp_string_for( ScheduleOffset, State ), + schedule_plan_to_string( SchedulePlan, State ) ] ), + + % Hopefully this lacking timer can still be cancelled: + ShrunkTimerTable = remove_timer( ScheduleOffset, TimerTable ), + + { SchedulePlan, ShrunkTimerTable, TaskTable }. + + + +% Triggers specified tasks, and returns updated schedule plan, timer and task +% tables. +% +% Note that: +% +% - the specified offset (ScheduleOffset) is the planned one; if being a late +% trigger, it may be significantly in the past of the current offset +% +% - the specified schedule plan is supposed to have already the entry for the +% specified schedule offset removed. +% +% (helper) +% +-spec trigger_tasks( [ task_id() ], schedule_offset(), schedule_offset(), + schedule_plan(), timer_table(), task_table(), wooper:state() ) -> + { schedule_plan(), timer_table(), task_table() }. +trigger_tasks( _TaskIds=[], ScheduleOffset, _NowMs, SchedulePlan, TimerTable, + TaskTable, _State ) -> + + ShrunkTimerTable = remove_timer( ScheduleOffset, TimerTable ), + + { SchedulePlan, ShrunkTimerTable, TaskTable }; + + +trigger_tasks( _TaskIds=[ TaskId | T ], ScheduleOffset, NowMs, SchedulePlan, + TimerTable, TaskTable, State ) -> + + { TaskEntry, ShrunkTaskTable } = table:extract_entry( TaskId, TaskTable ), + + % Check: + TaskId = TaskEntry#task_entry.id, + + % next_schedule shall at least roughly match. + + ?debug_fmt( "Triggering task ~B: ~s.", + [ TaskId, task_entry_to_string( TaskEntry, State ) ] ), + + launch_task( TaskEntry#task_entry.command, + TaskEntry#task_entry.actuator_pid, State ), + + case decrement_count( TaskEntry#task_entry.count ) of + + 0 -> + ?debug_fmt( "Dropping task ~B for good.", [ TaskId ] ), + ShrunkTimerTable = remove_timer( ScheduleOffset, TimerTable ), + { SchedulePlan, ShrunkTimerTable, ShrunkTaskTable }; + + NewCount -> + % Will thus be still scheduled afterwards. + + % If first scheduling: + StartOffset = case TaskEntry#task_entry.started_on of + + undefined -> + % Now rather than scheduled: + NowMs; + + AlreadyStartOffset -> + AlreadyStartOffset + + end, + + % Periodicity not expected to be 'undefined' here: + Periodicity = TaskEntry#task_entry.periodicity, + + % Basing on planned (not actual, i.e. NowMs) offset to (attempt to) + % resorb any delay: + % + NextSchedule = ScheduleOffset + Periodicity, + + NewTaskEntry = TaskEntry#task_entry{ + next_schedule=NextSchedule, + count=NewCount, + schedule_count=TaskEntry#task_entry.schedule_count+1, + started_on=StartOffset, + last_schedule=NowMs }, + + NewTaskTable = table:add_entry( TaskId, NewTaskEntry, + ShrunkTaskTable ), + + { NewPlan, NewTimerTable } = insert_task_at( TaskId, NextSchedule, + Periodicity, SchedulePlan, TimerTable ), + + %?debug_fmt( "New plan for #~B after trigger of task ~B: ~s", + % [ ScheduleOffset, TaskId, + % schedule_plan_to_string( NewPlan, State ) ] ), + + trigger_tasks( T, ScheduleOffset, NowMs, NewPlan, NewTimerTable, + NewTaskTable, State ) + + end. + + + +% Effective launching of specified task. +-spec launch_task( task_command(), actuator_pid(), wooper:state() ) -> void(). +launch_task( Cmd, ActuatorPid, _State ) -> + + % We want to let the requester be able to specify exactly any command term; + % otherwise we would have added automatically for example the PID of the + % sending scheduler and the corresponding task identifier. + + %?trace_fmt( "Sending command '~p' to actuator ~w.", [ Cmd, ActuatorPid ] ), + ActuatorPid ! Cmd. + + + +% Helper section. + + +% Registers a future scheduling of the specified task. +% +% (both ScheduleOffset and DurationFromNow specified to avoid a recomputation) +% +-spec register_task_schedule( task_id(), task_entry(), schedule_offset(), + ms_duration(), wooper:state() ) -> wooper:state(). +register_task_schedule( TaskId, TaskEntry, ScheduleOffset, DurationFromNow, + State ) -> + + %?debug_fmt( "Registering task #~B: ~s.", + % [ TaskId, task_entry_to_string( TaskEntry, State ) ] ), + + NewTaskTable = table:add_new_entry( TaskId, TaskEntry, + ?getAttr(task_table) ), + + { NewPlan, NewTimerTable } = insert_task_at( TaskId, ScheduleOffset, + DurationFromNow, ?getAttr(schedule_plan), _AccPlan=[], + ?getAttr(timer_table) ), + + setAttributes( State, [ { task_table, NewTaskTable }, + { schedule_plan, NewPlan }, + { timer_table, NewTimerTable }, + { next_task_id, TaskId+1 } ] ). + + + +% Inserts specified task at specified offset in plan. +-spec insert_task_at( task_id(), schedule_offset(), ms_duration(), + schedule_plan(), timer_table() ) -> { schedule_plan(), timer_table() }. +insert_task_at( TaskId, ScheduleOffset, DurationFromNow, Plan, TimerTable ) -> + + %NewP = { NewPlan, _NewTimerTable } = + NewP = insert_task_at( TaskId, ScheduleOffset, + DurationFromNow, Plan, _AccPlan=[], TimerTable ), + + %trace_utils:debug_fmt( "After having inserted task ~B at offset #~B " + % "(duration from now: ~s), new plan is:~n ~p", + % [ TaskId, ScheduleOffset, + % time_utils:duration_to_string( DurationFromNow ), NewPlan ] ), + + NewP. + + + +% (helper) +% +% Schedule plan exhausted, adding it at end: +insert_task_at( TaskId, ScheduleOffset, DurationFromNow, _SchedulePlan=[], + AccPlan, TimeTable ) -> + + % Not set yet: + NewTimeTable = add_timer( ScheduleOffset, DurationFromNow, TimeTable ), + + { lists:reverse( [ { ScheduleOffset, [ TaskId ] } | AccPlan ] ), + NewTimeTable }; + + +% Too early in plan, continue: +insert_task_at( TaskId, ScheduleOffset, DurationFromNow, + _SchedulePlan=[ H={ Off, _Ids } | T ], AccPlan, TimeTable ) + when Off < ScheduleOffset -> + insert_task_at( TaskId, ScheduleOffset, DurationFromNow, T, + [ H | AccPlan ], TimeTable ); + +% Matching offset found, registering and stopping: +insert_task_at( TaskId, ScheduleOffset, _DurationFromNow, + _SchedulePlan=[ { ScheduleOffset, Ids } | T ], AccPlan, TimeTable ) -> + % Timer already set for that offset, none to add: + { lists:reverse( AccPlan ) ++ [ { ScheduleOffset, [ TaskId | Ids ] } | T ], + TimeTable }; + +% Gone past target: +insert_task_at( TaskId, ScheduleOffset, DurationFromNow, + SchedulePlan, % Implicit: SchedulePlan=[ { Off, Ids } | T ], + AccPlan, TimeTable ) -> % Implicit: when Off > ScheduleOffset + + % Not set yet, registering and stop recursing: + NewTimeTable = add_timer( ScheduleOffset, DurationFromNow, TimeTable ), + + { lists:reverse( [ { ScheduleOffset, [ TaskId ] } | AccPlan ] ) + ++ SchedulePlan, NewTimeTable }. + + + +% Removes specified task from schedule plan. +-spec unschedule_task( task_id(), schedule_offset(), schedule_plan(), + timer_table() ) -> 'not_found' | { schedule_plan(), timer_table() }. +unschedule_task( TaskId, PlannedNextSchedule, SchedulePlan, TimerTable ) -> + unschedule_task( TaskId, PlannedNextSchedule, SchedulePlan, _AccPlan=[], + TimerTable ). + + +% (helper) +unschedule_task( _TaskId, _PlannedNextSchedule, _SchedulePlan=[], _AccPlan, + _TimerTable ) -> + not_found; + +unschedule_task( TaskId, PlannedNextSchedule, + _SchedulePlan=[ { PlannedNextSchedule, TaskIds } | T ], AccPlan, + TimerTable ) -> + + case lists:member( TaskId, TaskIds ) of + + true -> + case lists:delete( TaskId, TaskIds ) of + + % Empty slot now, hence removed as a whole, together with the + % corresponding timer that is now useless: + % + [] -> + + %trace_utils:debug_fmt( + % "Removing empty schedule slot at #~B.", + % [ PlannedNextSchedule ] ), + + NewSchedPlan = lists:reverse( AccPlan ) ++ T, + + ShrunkTimerTable = remove_timer( PlannedNextSchedule, + TimerTable ), + + { NewSchedPlan, ShrunkTimerTable }; + + ShrunkTaskIds -> + { lists:reverse( AccPlan ) ++ + [ { PlannedNextSchedule, ShrunkTaskIds } | T ], + TimerTable } + + end; + + false -> + not_found + + end; + +% Not the shedule offset you are looking for: +unschedule_task( TaskId, PlannedNextSchedule, _SchedulePlan=[ P | T ], Acc, + TimerTable ) -> + unschedule_task( TaskId, PlannedNextSchedule, T, [ P | Acc ], TimerTable ). + + + +% Adds a timer to trigger a future scheduler. +-spec add_timer( schedule_offset(), ms_duration(), timer_table() ) -> + timer_table(). +add_timer( ScheduleOffset, DurationFromNow, TimerTable ) -> + + % WOOPER oneway to be sent to this instance: + Message = { timerTrigger, [ ScheduleOffset ] }, + + % Duration not exact, but is at least as long as requested: + case timer:send_after( DurationFromNow, Message ) of + + % TimerRef useful to cancel; not expected to be already existing: + { ok, TimerRef } -> + table:add_new_entry( ScheduleOffset, TimerRef, TimerTable ); + + { error, Reason } -> + throw( { timer_setting_failed, Reason, ScheduleOffset, + DurationFromNow } ) + + end. + + + +% Removes a timer. +-spec remove_timer( schedule_offset(), timer_table() ) -> timer_table(). +remove_timer( ScheduleOffset, TimerTable ) -> + + %trace_utils:debug_fmt( "Removing timer for #~B.", [ ScheduleOffset ] ), + + { TimerRef, ShrunkTimerTable } = + table:extract_entry( ScheduleOffset, TimerTable ), + + case timer:cancel( TimerRef ) of + + { ok, cancel } -> + ok; + + { error, Reason } -> + trace_utils:error_fmt( "The cancellation of timer '~p' " + "failed for schedule offset #~B (reason: '~p').", + [ TimerRef, ScheduleOffset, Reason ] ) + %throw( { timer_cancellation_failed, Reason, + % TimerRef, ScheduleOffset } ) + + end, + + ShrunkTimerTable. + + + + +% Returns the time offset of specified (absolute) time, thus expressed in +% internal time, hence relative to the start time of this scheduler. +% +% Corresponds to the (real, actual) number of milliseconds since the start of +% this scheduler. +% +-spec get_current_schedule_offset( wooper:state() ) -> schedule_offset(). +get_current_schedule_offset( State ) -> + time_utils:get_monotonic_time() - ?getAttr(server_start). + + +% Returns the (approximate) user-level timestamp corresponding to now. +-spec get_current_timestamp( wooper:state() ) -> timestamp(). +get_current_timestamp( State ) -> + % Better (more homogeneous) than using calendar:local_time/0 for example: + get_timestamp_for( get_current_schedule_offset( State ), State ). + + +% Returns a textual description of the (approximate) user-level timestamp +% corresponding to now. +% +-spec get_current_timestamp_string( wooper:state() ) -> ustring(). +get_current_timestamp_string( State ) -> + get_timestamp_string_for( get_current_schedule_offset( State ), State ). + + + +% Returns the internal time offset corresponding to this user-level timestamp +% (such as {{2020,3,23},{16,44,0}}). +% +-spec get_schedule_offset_for( timestamp(), wooper:state() ) -> + schedule_offset(). +get_schedule_offset_for( UserTimestamp, State ) -> + + % Number of milliseconds since year 0: + GregorianMillisecs = + 1000 * calendar:datetime_to_gregorian_seconds( + time_utils:local_to_universal_time( UserTimestamp ) ), + + GregorianMillisecs - ?getAttr(server_gregorian_start). + + + +% Returns the (approximate) user-level timestamp (ex: {{2020,3,23},{16,44,0}}), +% in VM system time (UTC), corresponding to specified internal time offset. +% +-spec get_timestamp_for( schedule_offset(), wooper:state() ) -> timestamp(). +get_timestamp_for( Offset, State ) -> + + GregorianSecs = + round( ( Offset + ?getAttr(server_gregorian_start) ) / 1000 ), + + time_utils:universal_to_local_time( + calendar:gregorian_seconds_to_datetime( GregorianSecs ) ). + + + +% Returns a textual description of the user-level timestamp corresponding to +% specified internal time offset. +% +-spec get_timestamp_string_for( schedule_offset(), wooper:state() ) -> + ustring(). +get_timestamp_string_for( Offset, State ) -> + time_utils:timestamp_to_string( get_timestamp_for( Offset, State ) ). + + + +% Registers the fact that a task has just been triggered once more. +-spec decrement_count( schedule_count() ) -> maybe( schedule_count() ). +decrement_count( _Count=unlimited ) -> + unlimited; + +% (guard just for safety, should never be negative): +decrement_count( Count ) when Count > 0 -> + Count - 1. + + + +% Vet helpers, to check and canonicalise. + + +% Checks and canonicalises this user-specified task command. +vet_task_command( UserTaskCommand=Oneway, _State ) when is_atom( Oneway ) -> + UserTaskCommand; + +% If Args is not z list, expecting that this is a single non-list element that +% will be considered to be wrapped in a list by the callee, so both are correct +% and they cover all cases: +% +vet_task_command( UserTaskCommand={ Oneway, _Args }, _State ) + when is_atom( Oneway ) -> + UserTaskCommand; + +vet_task_command( UserTaskCommand, State ) -> + ?error_fmt( "Invalid user-specified task command: '~p'.", + [ UserTaskCommand ] ), + throw( { invalid_task_command, UserTaskCommand } ). + + + +% Checks and canonicalises this user-specified start time: returns the number of +% milliseconds before starting any corresponding task (possibly zero). +% +-spec vet_start_time( term(), wooper:state() ) -> ms_duration(). +vet_start_time( _UserStartTime=asap, _State ) -> + 0; + +vet_start_time( _UserStartTime=flexible, _State ) -> + 0; + +vet_start_time( _UserStartTime=StartTime, State ) -> + + case time_utils:is_timestamp( StartTime ) of + + true -> + + % We consider that this scheduler-side local time (including time + % zone and Daylight Saving Time) and the time received from the + % user obey the same conventions: + % + Now = time_utils:get_timestamp(), + + % So the duration (only value that will matter) shall be correct: + case time_utils:get_duration( Now, StartTime ) of + + SecD when SecD > 0 -> + % In the future, perfect: + 1000 * SecD; + + SecD -> + ?warning_fmt( "Specified user start time (~p, i.e. ~s) " + "is in the past (i.e. ~s before current time, which " + "is ~s), requesting immediate scheduling instead.", + [ StartTime, + time_utils:timestamp_to_string( StartTime ), + time_utils:duration_to_string( -1000 * SecD ), + time_utils:timestamp_to_string( Now ) ] ), + 0 + + end; + + false -> + % Durations are more reliable: + case time_utils:is_dhms_duration( StartTime ) of + + true -> + MsDuration = 1000 * + time_utils:dhms_to_seconds( StartTime ), + + case MsDuration of + + D when D > 0 -> + % In the future, perfect: + D; + + D -> + ?warning_fmt( "Specified user duration " + "(~p, i.e. ~s) is negative (i.e. in the past), " + "requesting immediate scheduling.", + [ StartTime, + time_utils:duration_to_string( D ) ] ), + 0 + + end; + + false -> + ?error_fmt( "Invalid user-specified start time (neither " + "timestamp nor duration): '~p'.", + [ StartTime ] ), + throw( { invalid_start_time, StartTime } ) + + end + + end. + + + +% Returns the user-specified schedule count. +-spec vet_count( term(), wooper:state() ) -> schedule_count(). +vet_count( ScheduleCount=unlimited, _State ) -> + ScheduleCount; + +vet_count( C, _State ) when is_integer( C ) andalso C > 0 -> + C; + +vet_count( Other, State ) -> + + ?error_fmt( "Invalid user-specified schedule count: ~p.", + [ Other ] ), + + throw( { invalid_schedule_count, Other } ). + + + +% Returns any user-specified periodicity. +-spec vet_periodicity( term(), term(), wooper:state() ) -> + maybe( periodicity() ). +vet_periodicity( _UserPeriodicity=once, _Count=1, _State ) -> + undefined; + +vet_periodicity( UserPeriodicity=once, Count, State ) -> + + ?error_fmt( "Task periodicity is 'once', whereas specified schedule count " + "is ~p.", [ Count ] ), + + throw( { periodicity_count_mismatch, UserPeriodicity, Count } ); + +vet_periodicity( UserPeriodicity, _Count, State ) -> + vet_user_periodicity( UserPeriodicity, State ). + + + +% Returns a vetted periodicity. +% +% (helper, defined for reuse) +-spec vet_user_periodicity( term(), wooper:state() ) -> ms_duration(). +vet_user_periodicity( UserPeriodicity, State ) -> + + case time_utils:is_dhms_duration( UserPeriodicity ) of + + true -> + case 1000 * time_utils:dhms_to_seconds( UserPeriodicity ) of + + D when D > 0 -> + D; + + D -> + ?error_fmt( "Invalid user-specified non strictly positive " + "task periodicity ~p, i.e. ~s).", + [ UserPeriodicity, + time_utils:duration_to_string( D ) ] ), + throw( { non_strictly_positive_user_periodicity, + UserPeriodicity } ) + + end; + + false -> + case is_integer( UserPeriodicity ) andalso UserPeriodicity > 0 of + + true -> + 1000 * UserPeriodicity; + + false -> + throw( { invalid_user_periodicity, UserPeriodicity } ) + + end + + end. + + + +% Returns the user-specified actuator PID. +-spec vet_actuator_pid( term() ) -> actuator_pid(). +vet_actuator_pid( Pid ) when is_pid( Pid ) -> + Pid; + +vet_actuator_pid( Other ) -> + throw( { invalid_actuator_pid, Other } ). + + + + +% Section for textual representations. + + +% Returns a textual description of this server. +-spec to_string( wooper:state() ) -> ustring(). +to_string( State ) -> + + TaskStr = case table:enumerate( ?getAttr(task_table) ) of + + [] -> + "no task"; + + TaskPairs -> + TaskDescs = [ text_utils:format( "task #~B: ~s", + [ TId, task_entry_to_string( TE, State ) ] ) + || { TId, TE } <- lists:sort( TaskPairs ) ], + text_utils:format( "~B tasks: ~s", [ length( TaskPairs ), + text_utils:strings_to_string( TaskDescs ) ] ) + + end, + + SchedStr = schedule_plan_to_string( ?getAttr(schedule_plan), State ), + + TimerStr = timer_table_to_string( ?getAttr(timer_table), State ), + + text_utils:format( "US scheduler, a ~s; " + "registering ~s (with a total of ~B task(s) already declared); " + "current schedule ~s; with ~s", + [ class_USServer:to_string( State ), TaskStr, + ?getAttr(next_task_id) - 1, SchedStr, TimerStr ] ). + + + +% Returns a textual description of the specified schedule plan. +-spec schedule_plan_to_string( schedule_plan(), wooper:state() ) -> ustring(). +schedule_plan_to_string( _SchedulePlan=[], _State ) -> + "is empty"; + +schedule_plan_to_string( SchedulePlan, State ) -> + + NowOffset = get_current_schedule_offset( State ), + text_utils:format( "registers ~B trigger(s) (at #~B, i.e. ~s): ~s", + [ length( SchedulePlan ), NowOffset, + get_timestamp_string_for( NowOffset, State ), + text_utils:strings_to_string( + [ trigger_to_string( P, State ) || P <- SchedulePlan ] ) ] ). + + + +% Returns a textual description of the specified timer table. +-spec timer_table_to_string( timer_table(), wooper:state() ) -> ustring(). +timer_table_to_string( TimerTable, State ) -> + + case table:keys( TimerTable ) of + + [] -> + "no timer set"; + + [ Offset ] -> + text_utils:format( "a single timer set, at #~B (~s)", + [ Offset, get_timestamp_string_for( Offset, State ) ] ); + + Offsets -> + text_utils:format( "~B timers set, at: ~s", [ length( Offsets ), + text_utils:strings_to_string( [ text_utils:format( "#~B (~s)", + [ Off, get_timestamp_string_for( Off, State ) ] ) + || Off <- Offsets ] ) ] ) + + end. + + + + +% Returns a textual description of the specified schedule pair. +-spec trigger_to_string( schedule_pair(), wooper:state() ) -> ustring(). +trigger_to_string( { Offset, TaskIds }, State ) -> + text_utils:format( "at offset #~B (~s), ~B task(s) registered: ~w", + [ Offset, get_timestamp_string_for( Offset, State ), + length( TaskIds ), TaskIds ] ). + + + +% Returns a textual description of the specified task entry. +-spec task_entry_to_string( task_entry(), wooper:state() ) -> ustring(). +task_entry_to_string( #task_entry{ id=_TaskId, + command=Cmd, + next_schedule=NextSchedOffset, + periodicity=Periodicity, + count=Count, + schedule_count=SchedCount, + started_on=MaybeStartOffset, + last_schedule=MaybeLastOffset, + requester_pid=RequesterPid, + actuator_pid=ActuatorPid }, State ) -> + + ExecStr = case MaybeStartOffset of + + undefined -> + SchedCount = 0, + "never executed yet"; + + StartOffset -> + case SchedCount of + + 1 -> + % First and last are the same here: + MaybeStartOffset = MaybeLastOffset, + text_utils:format( + "already executed a single time, at #~B (~s)", + [ StartOffset, + get_timestamp_string_for( StartOffset, State ) ] ); + + % Expected higher than 1: + C when C > 1 -> + text_utils:format( "already executed ~B times, the " + "first at #~B (~s) and the last at #~B (~s)", + [ C, StartOffset, + get_timestamp_string_for( StartOffset, State ), + MaybeLastOffset, + get_timestamp_string_for( MaybeLastOffset, State ) ] ) + + end + + end, + + NextSchedTime = get_timestamp_string_for( NextSchedOffset, State ), + + PeriodStr = periodicity_to_string( Periodicity ), + + CountStr = schedule_count_to_string( Count ), + + text_utils:format( + "task to trigger command '~p' on actuator ~w, ~s, " + "to be scheduled next at offset #~B (~s) according to ~s, and for ~s; " + "it was declared by ~w", + [ Cmd, ActuatorPid, ExecStr, NextSchedOffset, NextSchedTime, PeriodStr, + CountStr, RequesterPid ]). + + + +% Returns a textual description of specified periodicity. +-spec periodicity_to_string( periodicity() ) -> ustring(). +% Should never happen: +periodicity_to_string( _Periodicity=undefined ) -> + "no periodicity"; + +periodicity_to_string( Periodicity ) -> + text_utils:format( "a periodicity of ~s", + [ time_utils:duration_to_string( Periodicity ) ] ). + + + +% Returns a textual description of specified schedule count. +-spec schedule_count_to_string( schedule_count() ) -> ustring(). +schedule_count_to_string( _Count=unlimited ) -> + "an unlimited number of times"; + +schedule_count_to_string( _Count=1 ) -> + "a single time"; + +schedule_count_to_string( Count ) -> + text_utils:format( "~B times", [ Count ] ). diff --git a/src/class_USServer.erl b/src/class_USServer.erl new file mode 100644 index 0000000..842a4d8 --- /dev/null +++ b/src/class_USServer.erl @@ -0,0 +1,249 @@ +% Copyright (C) 2014-2020 Olivier Boudeville +% +% This file is part of US-Common, part of the Universal Server framework. +% +% This program is free software: you can redistribute it and/or modify it under +% the terms of the GNU Affero General Public License as published by the Free +% Software Foundation, either version 3 of the License, or (at your option) any +% later version. +% +% This program is distributed in the hope that it will be useful, but WITHOUT +% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +% FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +% details. +% +% You should have received a copy of the GNU Affero General Public License along +% with this program. If not, see . +% +% Author: Olivier Boudeville (olivier.boudeville@esperide.com) +% Creation date: Saturday, June 28, 2014. + + +-module(class_USServer). + +-define( class_description, "Mother class of all US servers." ). + + +% Determines what are the direct mother classes of this class (if any): +-define( superclasses, [ class_TraceEmitter ] ). + + +% Exported helpers: +-export([ register_name/3, unregister_name/1, to_string/1 ]). + + +-type server_pid() :: class_TraceEmitter:emitter_pid(). + +-export_type([ server_pid/0 ]). + + +% Shorthands: + +-type ustring() :: text_utils:ustring(). + + + +% Implementation notes: +% +% We have to store to time-related information, one, 'server_start', to +% determine actual durations, and one, 'server_gregorian_start', to record the +% actual time at which this server was started. + + +% Class-specific attributes of a service are: +-define( class_attributes, [ + + % Attempt also to keep this millisecond count not too large: + { server_start, time_utils:ms_monotonic(), + "a point of time reference for later duration measurements, in VM " + "monotonic time (milliseconds)" }, + + % As Gregorian conventions are used for conversions (from a given measured + % duration, obtained through monotonic times), adding this value allows, + % with the Gregorian result, to establish directly a proper (absolut) + % timestamp: + % + { server_gregorian_start, ms_since_year_0(), + "the internal system time at which this server was started " + "since year 0, to facilitate timestamp conversions to/from user time" }, + + { registration_name, maybe( naming_utils:registration_name() ), + "records the name of this server, as registered in the naming service" }, + + { registration_scope, maybe( naming_utils:registration_scope() ), + "records the scope of the registration of this server in the naming " + "service" } ] ). + + + +% Allows to define WOOPER base variables and methods for that class: +-include("wooper.hrl"). + + +-define( trace_emitter_categorization, "US" ). + + +% Allows to use macros for trace sending: +-include("class_TraceEmitter.hrl"). + + + +% Constructs a new server instance. +% +% Parameter is ServerName, the name of that server. +% +-spec construct( wooper:state(), ustring() ) -> wooper:state(). +construct( State, ServerName ) -> + init_common( ServerName, _RegistrationName=undefined, + _RegistrationScope=undefined, State ). + + + +% Constructs a new, registered, server instance. +% +% Parameters are: +% +% - ServerName, the name of that server +% - RegistrationName, the name under which this server shall be registered +% - RegistrationScope, the scope at which this server shall be registered +% +-spec construct( wooper:state(), ustring(), naming_utils:registration_name(), + naming_utils:registration_scope() ) -> wooper:state(). +construct( State, ServerName, RegistrationName, RegistrationScope ) -> + init_common( ServerName, RegistrationName, RegistrationScope, State ). + + + +% (helper) +-spec init_common( ustring(), naming_utils:registration_name(), + naming_utils:registration_scope(), wooper:state() ) -> wooper:state(). +init_common( ServerName, RegistrationName, RegistrationScope, State ) -> + + % First the direct mother classes: + TraceState = class_TraceEmitter:construct( State, + ?trace_categorize(ServerName) ), + + % Constant based on the number of milliseconds of the EPOCH, since year 0; + % used in order to compute the most complete offset (in UTC): + % + MsOfEpoch = time_utils:get_epoch_milliseconds_since_year_0(), + + SetState = setAttributes( TraceState, [ + + { server_start, time_utils:get_monotonic_time() }, + + % Hence since year 0 (so a large number), based on "user" time: + { server_gregorian_start, MsOfEpoch + time_utils:get_system_time() } ] ), + + register_name( RegistrationName, RegistrationScope, SetState ). + + + +% Destructor. +-spec destruct( wooper:state() ) -> wooper:state(). +destruct( State ) -> + + ?info_fmt( "Deleting server ~s.", [ to_string( State ) ] ), + + UnregState = unregister_name( State ), + + ?info( "Server deleted." ), + + UnregState. + + + +% Any server must be able to answer to (asynchronous) ping requests from a +% monitoring server. +% +% (const oneway, as meant to be asynchronous) +% +-spec ping( wooper:state(), class_Supervisor:ping_id(), pid() ) -> + const_oneway_return(). +ping( State, PingId, MonitorPid ) -> + + % Sends back another oneway (no result expected here): + MonitorPid ! { pong, [ PingId, self() ] }, + + wooper:const_return(). + + + +% Registers this (supposedly not registered) server to naming server. +% +% (exported helper) +% +-spec register_name( naming_utils:registration_name(), + naming_utils:registration_scope(), wooper:state() ) -> + wooper:state(). +register_name( _RegistrationName=undefined, _RegistrationScope, State ) -> + ?info( "No name to register, no registration performed." ), + State; + +register_name( RegistrationName, RegistrationScope, State ) -> + + naming_utils:register_as( RegistrationName, RegistrationScope ), + + ?info_fmt( "Registered (~s) as '~s'.", + [ RegistrationScope, RegistrationName ] ), + + setAttributes( State, [ { registration_name, RegistrationName }, + { registration_scope, RegistrationScope } ] ). + + + +% Unregisters this (supposedly registered) server from naming server. +% +% (exported helper) +% +-spec unregister_name( wooper:state() ) -> wooper:state(). +unregister_name( State ) -> + + case ?getAttr(registration_name) of + + undefined -> + ?info( "No registration name available, " + "no unregistering performed." ), + State; + + RegName -> + + RegScope = ?getAttr(registration_scope), + + ?info_fmt( "Unregistering from name '~s' (scope: ~s).", + [ RegName, RegScope ] ), + + naming_utils:unregister( RegName, RegScope ), + + State + + end. + + + +% Returns a textual description of this server. +-spec to_string( wooper:state() ) -> string(). +to_string( State ) -> + + StartTimestamp = time_utils:gregorian_ms_to_timestamp( + ?getAttr(server_gregorian_start) ), + + UptimeStr = time_utils:duration_to_string( + time_utils:get_monotonic_time() - ?getAttr(server_start) ), + + TimeStr = text_utils:format( "started on ~s (uptime: ~s)", + [ time_utils:timestamp_to_string( StartTimestamp ), UptimeStr ] ), + + RegString = case ?getAttr(registration_name) of + + undefined -> + "with no registration name defined"; + + RegName -> + text_utils:format( "whose registration name is '~s' (scope: ~s)", + [ RegName, ?getAttr(registration_scope) ] ) + + end, + + text_utils:format( "server named '~s', ~s, ~s", + [ ?getAttr(name), TimeStr, RegString ] ). diff --git a/src/class_USTaskRing.erl b/src/class_USTaskRing.erl new file mode 100644 index 0000000..53aafb6 --- /dev/null +++ b/src/class_USTaskRing.erl @@ -0,0 +1,341 @@ +% Copyright (C) 2020-2020 Olivier Boudeville +% +% This file is part of US-Common, part of the Universal Server framework. +% +% This program is free software: you can redistribute it and/or modify it under +% the terms of the GNU Affero General Public License as published by the Free +% Software Foundation, either version 3 of the License, or (at your option) any +% later version. +% +% This program is distributed in the hope that it will be useful, but WITHOUT +% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +% FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +% details. +% +% You should have received a copy of the GNU Affero General Public License along +% with this program. If not, see . +% +% Author: Olivier Boudeville (olivier.boudeville@esperide.com) +% Creation date: Wednesday, April 22, 2020. + + +-module(class_USTaskRing). + + +-define( class_description, + "Ring to schedule evenly and synchronously a set of tasks, as a " + "series." ). + + +% Design notes: +% +% This class allows to schedule a set of tasks (corresponding to as many +% actuators) observing the same periodicity: +% - synchronously: a given task will not start until the preceding one is over +% - evenly: if having N tasks at periodicity P, a new task will be scheduled +% every P/N +% +% A key property is that all registered tasks will be serialized (no +% overlapping happening), and uniformly. +% +% A typical use-case is having a resource (ex: a tool for web analysis) used by +% multiple consumers (ex: several websites) whose (single) state may not be +% protected against concurrent accesses. +% +% At any time, up to one consumer shall access that resource, none shall be +% skipped, the expected pace shall be respected as much as possible, and +% resource use shall be spaced as evenly as possible (ex: to avoiding spikes in +% resource consumption such as CPU). +% +% For that, a task ring registering all tasks is created;, it will register to +% the scheduler, and will chain these tasks accordingly. +% +% No blocking receive of the triggered actuator is done by a task ring, so that +% it remains responsive. +% +% However, should a task fail to report its completion, that ring will never +% schedule any other task. + + +% Determines what are the direct mother classes of this class (if any): +-define( superclasses, [ class_USServer ] ). + + +% Not a request, but a oneway expected to send in turn a oneway callback. +-type task_sync_command() :: wooper:oneway_call(). + +-type ring_pid() :: pid(). + +-export_type([ task_sync_command/0, ring_pid/0 ]). + + + +% Shorthands: + +-type ustring() :: text_utils:ustring(). + +-type user_periodicity() :: class_USScheduler:user_periodicity(). + +-type ms_duration() :: time_utils:ms_duration(). + + +-type periodicity() :: ms_duration(). + +-type actuator_pid() :: class_USScheduler:actuator_pid() . + +-type schedule_count() :: class_USScheduler:schedule_count(). + +-type scheduler_pid() :: class_USScheduler:server_pid(). + +-type task_id() :: class_USScheduler:task_id(). + +-type actuator_ring() :: ring_utils:ring( actuator_pid() ). + + +% To silence unused shorthands: +-export_type([ actuator_ring/0, periodicity/0, task_id/0 ]). + + + + +% The class-specific attributes: +-define( class_attributes, [ + + { actuator_ring, actuator_ring(), + "the actuators, cycling in a round-robin manner" }, + + { task_periodicity, ms_duration(), + "the periodicity at which each task will be triggered" }, + + % Not useful to record: + %{ ring_periodicity, ms_duration(), + % "the periodicity at which this ring will be triggered, i.e. the duration" + % " between two successive task triggers" }, + + { task_call, task_sync_command(), + "the task synchronous command (oneway triggering in turn a callback) to " + "be sent to each actuator in a row" }, + + { scheduler_pid, scheduler_pid(), + "the PID of the scheduler used by this ring" }, + + { waited_actuator_pid, maybe( actuator_pid() ), + "the PID of any waited actuator" }, + + { task_id, task_id(), + "the task identifier of that ring as set by its scheduler" } ] ). + + + + +% Used by the trace_categorize/1 macro to use the right emitter: +-define( trace_emitter_categorization, "US.Scheduling.TaskRing" ). + + +% Allows to define WOOPER base variables and methods for that class: +-include_lib("wooper/include/wooper.hrl"). + +% Allows to use macros for trace sending: +-include_lib("traces/include/class_TraceEmitter.hrl"). + + + +% Creates a task ring for specified actuators, so that they are (immediately, +% yet flexibly) triggered with the specified request (a synchronous task +% command) at the specified overall periodicity, for the specified number of +% times, by the specified scheduler. +% +-spec construct( wooper:state(), ustring(), [ actuator_pid() ], + wooper:oneway_name(), wooper:method_arguments(), user_periodicity(), + schedule_count(), scheduler_pid() ) -> wooper:state(). +construct( _State, _RingName, _Actuators=[], _TaskOnewayName, _TaskOnewayArgs, + _TaskPeriodicity, _ScheduleCount, _SchedulerPid ) -> + throw( no_actuator_defined ); + + +construct( State, RingName, Actuators, TaskOnewayName, TaskOnewayArgs, + UserTaskPeriodicity, ScheduleCount, SchedulerPid ) + when is_list( TaskOnewayArgs ) -> + + % First the direct mother classes, then this class-specific actions: + SrvState = class_USServer:construct( State, ?trace_categorize(RingName) ), + + TaskPeriodicity = class_USScheduler:vet_user_periodicity( + UserTaskPeriodicity, SrvState ), + + TaskCall = { TaskOnewayName, + list_utils:append_at_end( self(), TaskOnewayArgs ) }, + + SetState = setAttributes( SrvState, + [ { task_periodicity, TaskPeriodicity }, + { task_call, TaskCall }, + { scheduler_pid, SchedulerPid }, + { waited_actuator_pid, undefined } ] ), + + { RingPeriodicity, ActState } = + set_actuators( Actuators, TaskPeriodicity, SetState ), + + % No need to perform immediately the first trigger here: the scheduler will + % by itself ensure that. + + % Self-registering: + SchedulerPid ! { registerTask, [ _Cmd=triggerNextTask, _StartTime=flexible, + RingPeriodicity, ScheduleCount, _ActPid=self() ], + self() }, + + FinalState = receive + + % No other result expected: + { wooper_result, { task_registered, TaskId } } -> + setAttribute( ActState, task_id, TaskId ) + + end, + + ?send_info_fmt( FinalState, "Started ~s.", [ to_string( FinalState ) ] ), + + FinalState. + + + +% Overridden destructor. +-spec destruct( wooper:state() ) -> wooper:state(). +destruct( State ) -> + + TaskId = ?getAttr(task_id), + + SchedPid = ?getAttr(scheduler_pid), + + SchedPid ! { unregisterTask, [ TaskId ], self() }, + + ?trace_fmt( "Being destructed, unregistering from scheduler ~w " + "(task: #B).", [ SchedPid, TaskId ] ), + + receive + + task_unregistered -> + ok; + + { task_unregistration_failed, Error } -> + ?error_fmt( "Unregistration of task #~B failed " + "at deletion: ~p.", [ TaskId, Error ] ) + + end, + + % Nothing to be done for actuators. + + ?info( "Deleted." ), + + State. + + + +% Method section. + + +% Requires this ring to trigger its next task (typically triggered itself by a +% scheduler, as a class_USScheduler:task_command()). +% +% Expects this task, triggered synchronously, to call back notifyTaskDone/2. +% +-spec triggerNextTask( wooper:state() ) -> oneway_return(). +triggerNextTask( State ) -> + + case ?getAttr(waited_actuator_pid) of + + % Normal case: + undefined -> + { ThisActuatorPid, NewRing } = + ring_utils:head( ?getAttr(actuator_ring) ), + + ThisActuatorPid ! ?getAttr(task_call), + + ?debug_fmt( "Triggered actuator ~w.", [ ThisActuatorPid ] ), + + TrigState = setAttributes( State, [ + { actuator_ring, NewRing }, + { waited_actuator_pid, ThisActuatorPid } ] ), + + wooper:return_state( TrigState ); + + % A task is supposedly still in progress: + LingeringActPid -> + % No other measure really possible (no task overlapping): + ?error_fmt( "Next task triggered whereas current actuator (~w) " + "was not reported as having finished. Not triggering " + "a new task, si skipping this period as a whole " + "to wait for the lingering actuator.", + [ LingeringActPid ] ), + wooper:const_return() + + end. + + + +% Notifies this ring that the specified actuator completed its task. +-spec notifyTaskDone( wooper:state(), actuator_pid() ) -> oneway_return(). +notifyTaskDone( State, ActuatorPid ) -> + + % Check: + ActuatorPid = ?getAttr(waited_actuator_pid), + + ?debug_fmt( "Actuator ~w reported as having operated.", + [ ActuatorPid ] ), + + DoneState = setAttribute( State, waited_actuator_pid, undefined ), + + wooper:return_state( DoneState ). + + + +% Helper section. + + +% Sets new actuators (no interaction done with the scheduler). +-spec set_actuators( [ actuator_pid() ], ms_duration(), wooper:state() ) -> + { unit_utils:seconds(), wooper:state() }. +set_actuators( _NewActuators=[], _TaskPeriodicity, _State ) -> + throw( no_actuator_defined ); + +set_actuators( NewActuators, TaskPeriodicity, State ) -> + + NewActuatorRing = ring_utils:from_list( NewActuators ), + + ActuatorCount = length( NewActuators ), + + % By design not a division by zero; seconds wanted for the scheduler: + RingPeriodicity = erlang:round( TaskPeriodicity / ActuatorCount ), + + ?trace_fmt( "This ring is to be triggered by its scheduler every ~s, " + "as task-level periodicity is ~s, and ~B actuators are being " + "synchronised.", + [ time_utils:duration_to_string( RingPeriodicity ), + time_utils:duration_to_string( TaskPeriodicity ), + ActuatorCount ] ), + + SetState = setAttribute( State, actuator_ring, NewActuatorRing ), + + % Seconds wanted for the scheduler: + { RingPeriodicity div 1000, SetState }. + + + +% Returns a textual description of this task ring. +-spec to_string( wooper:state() ) -> ustring(). +to_string( State ) -> + + Ring = ?getAttr(actuator_ring), + + RingSize = ring_utils:size( Ring ), + + TaskPeriodicity = ?getAttr(task_periodicity), + + text_utils:format( "task ring '~s' alternating between ~B actuators (~s) " + "each with a periodicity of ~s (hence ~s between triggers; " + "using for that task id ~B, assigned by scheduler ~w) for the sending " + "of following synchronised call:~n'~p'", + [ ?getAttr(name), RingSize, + text_utils:pids_to_short_string( ring_utils:to_list( Ring ) ), + time_utils:duration_to_string( TaskPeriodicity ), + % Not a division by zero: + time_utils:duration_to_string( TaskPeriodicity / RingSize ), + ?getAttr(task_id), ?getAttr(scheduler_pid), ?getAttr(task_call) ] ). diff --git a/src/us_common.app.src b/src/us_common.app.src new file mode 120000 index 0000000..5a517ec --- /dev/null +++ b/src/us_common.app.src @@ -0,0 +1 @@ +../ebin/us_common.app \ No newline at end of file diff --git a/test/GNUmakefile b/test/GNUmakefile new file mode 100644 index 0000000..6fa7288 --- /dev/null +++ b/test/GNUmakefile @@ -0,0 +1,4 @@ +US_COMMON_TOP = .. + + +include $(US_COMMON_TOP)/GNUmakesettings.inc diff --git a/test/class_USScheduler_test.erl b/test/class_USScheduler_test.erl new file mode 100644 index 0000000..a0d8e6c --- /dev/null +++ b/test/class_USScheduler_test.erl @@ -0,0 +1,300 @@ +% Copyright (C) 2020-2020 Olivier Boudeville +% +% This file is part of US-Common, part of the Universal Server framework. +% +% This program is free software: you can redistribute it and/or modify it under +% the terms of the GNU Affero General Public License as published by the Free +% Software Foundation, either version 3 of the License, or (at your option) any +% later version. +% +% This program is distributed in the hope that it will be useful, but WITHOUT +% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +% FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +% details. +% +% You should have received a copy of the GNU Affero General Public License along +% with this program. If not, see . +% +% Author: Olivier Boudeville (olivier.boudeville@esperide.com) +% Creation date: Saturday, March 28, 2020. + + +-module(class_USScheduler_test). + + +% For spawn: +-export([ operate_loop/0 ]). + +% To silence unused code: +-export([ check_command_acks/1 ]). + + +% Test target: +-include("traces_for_tests.hrl"). + + + + +% Main loop of an actuator test process. +operate_loop() -> + + receive + + { operate, [ Pid, Name ] } -> + trace_utils:trace_fmt( "--> Actuator ~w just operated on behalf " + "of ~s.", [ self(), Name ] ), + Pid ! { operated, Name, self() }, + operate_loop(); + + stop -> + trace_utils:trace_fmt( "--> Actuator ~w stopped.", [ self() ] ) + + end. + + + +% Checks that the exact number of command acks has been received. +check_command_acks( TotalExpectedSchedulings ) -> + + % Hopefully at their right moment: + test_facilities:display( + "Checking that all commands were actually executed." ), + + wait_for_command_acks( TotalExpectedSchedulings ), + + test_facilities:display( + "Checking that no extraneous command has been executed." ), + + % Wait for any extraneous, faulty command ack: + receive + + { operated, _AnyPid } -> + throw( extraneous_command_ack ) + + after 1000 -> + + ok + + end. + + + +% Waits for the specified number of command acks. +wait_for_command_acks( _Count=0 ) -> + trace_utils:debug( "All command acks received." ), + ok; + +wait_for_command_acks( Count ) -> + + receive + + { operated, Name, _AnyPid } -> + NewCount = Count - 1, + trace_utils:debug_fmt( "Received a command ack regarding ~s; " + "still waiting for ~B of them.", [ Name, NewCount ] ), + wait_for_command_acks( NewCount ) + + end. + + + +% Returns a suitable task command for named requester. +get_command( Name ) -> + % Oneway call: + { operate, [ self(), Name ] }. + + + +% Runs the tests. +-spec run() -> no_return(). +run() -> + + ?test_start, + + test_facilities:start( ?MODULE ), + + test_facilities:display( "Testing the scheduling services." ), + + SchedPid = class_USScheduler:new_link(), + + FirstActuatorPid = spawn_link( ?MODULE, operate_loop, [] ), + + test_facilities:display( "Scheduler ~w created, interacting with test " + "actuator ~w by registering new tasks.", [ SchedPid, FirstActuatorPid ] ), + + + test_facilities:display( "Testing single schedulings." ), + + test_facilities:display( "First testing, as immediate schedulings." ), + + SchedPid ! { triggerOneshotTask, [ get_command( first ), FirstActuatorPid ], + self() }, + + task_done = test_receive(), + + + SchedPid ! { registerOneshotTask, [ get_command( second ), + _SecondOneshotStartTime=asap, FirstActuatorPid ], self() }, + + task_done = test_receive(), + + + SchedPid ! { registerOneshotTask, [ get_command( third ), + _ThirdOneshotStartTime=flexible, FirstActuatorPid ], self() }, + + task_done = test_receive(), + + + + test_facilities:display( "Second testing, as deferred schedulings." ), + + % Duration-based schedule, in one second from now: + SchedPid ! { registerOneshotTask, [ get_command( fourth ), + _OneshotDHMSDuration={ 0, 0, 0, 1 }, FirstActuatorPid ], self() }, + + { task_registered, _IdFourthTask=4 } = test_receive(), + + + % Timestamp-based schedule, in two seconds from now: + NowTimestamp = time_utils:get_timestamp(), + + OneshotFutureTimestamp = + time_utils:offset_timestamp( NowTimestamp, _Seconds=2 ), + + SchedPid ! { registerOneshotTask, [ get_command( fifth ), + OneshotFutureTimestamp, FirstActuatorPid ], self() }, + + { task_registered, _IdFifthTask=5 } = test_receive(), + + + + test_facilities:display( "Testing multiple (deferred) schedulings." ), + + % Expected to be unregistered prior its sole trigger: + SixthDHMSDuration={ 0, 0, 0, 30 }, + + % Actually a single scheduling: + SchedPid ! { registerTask, [ get_command( sixth ), SixthDHMSDuration, + _SixthPeriodicity=once, _SixthCount=1, FirstActuatorPid ], + self() }, + + { task_registered, IdSixthTask=6 } = test_receive(), + + + % After 1 second: every 2 seconds, for a total of 5 times. + + SeventhDHMSDuration = { 0, 0, 0, 1 }, + + SchedPid ! { registerTask, [ get_command( seventh ), SeventhDHMSDuration, + _SeventhPeriodicity=2, _SeventhCount=5, FirstActuatorPid ], self() }, + + { task_registered, IdSeventhTask=7 } = test_receive(), + + + % To wait not a lot, for the seventh task to still have triggers: + WaitMs = 2000, + + % To wait for the seventh task to be fully over: + %WaitMs = 15000, + + test_facilities:display( "Waiting ~s before unregistering tasks.", + [ time_utils:duration_to_string( WaitMs ) ] ), + + timer:sleep( WaitMs ), + + + test_facilities:display( "Testing the unregistering of tasks." ), + + NonExistingId = 1000, + + test_facilities:display( "Next error about the unregistering of task ~B " + "is expected:", [ NonExistingId ] ), + + SchedPid ! { unregisterTask, NonExistingId, self() }, + + receive + + { wooper_result, + { task_unregistration_failed, never_existed } } -> + ok + + end, + + + IdFirstTask = 1, + + % First task already done by design: + SchedPid ! { unregisterTask, IdFirstTask, self() }, + + receive + + { wooper_result, task_already_done } -> + ok + + end, + + % Supposedly still active: + SchedPid ! { unregisterTask, IdSixthTask, self() }, + + receive + + { wooper_result, task_unregistered } -> + ok + + end, + + % First done, seventh supposedly still active: + ToUnregister = [ IdSixthTask, IdSeventhTask ], + SchedPid ! { unregisterTasks, [ ToUnregister ], self() }, + + % Very approximate, but sufficient for this test (false increment to + % outsmart the compiler): + % + case WaitMs + basic_utils:identity( 0 ) > 10000 of + + % Task 7 must be over then: + true -> + receive + + { wooper_result, [ task_already_done, task_already_done ] } -> + ok + + end; + + % Task 7 not fully completed: + false -> + receive + + { wooper_result, [ task_already_done, task_unregistered ] } -> + ok + + end + + end, + + test_facilities:display( + "Checking that no extraneous registration ack was received." ), + + receive + + { wooper_result, { task_registered, AnyCount } } -> + throw( { extra_task_registration_ack, AnyCount } ) + + after 1000 -> + + ok + + end, + + % Cannot be done now that the seventh task is unregistered as an unspecified + % time: + % + %check_command_acks( _TotalExpectedSchedulings=11 ), + + test_facilities:display( "Test success, tearing down." ), + + FirstActuatorPid ! stop, + + wooper:delete_synchronously_instance( SchedPid ), + + ?test_stop.