diff --git a/.native/hassio_supervisor.sh b/.native/hassio_supervisor.sh
deleted file mode 100755
index 692c42be..00000000
--- a/.native/hassio_supervisor.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-echo " "
-echo "Ensure that you have read the documentation on installing Hass.io before continuing."
-echo "Not following the installation instructions may render you system to be unable to connect to the internet."
-echo "Hass.io Documentation: "
-echo " https://sensorsiot.github.io/IOTstack/Containers/Home-Assistant/"
-
-echo " "
-sleep 1
-
-read -r -n 1 -p "Press Y to continue, any other key to cancel " response;
-
-if [[ $response == "y" || $response == "Y" ]]; then
- echo "Install requirements for Hass.io"
- sudo apt install -y bash jq curl avahi-daemon dbus
- hassio_machine=$(whiptail --title "Machine type" --menu \
- "Please select you device type" 20 78 12 -- \
- "raspberrypi4-64" " " \
- "raspberrypi4" " " \
- "raspberrypi3-64" " " \
- "raspberrypi3" " " \
- "raspberrypi2" " " \
- "qemux86" " " \
- "qemux86-64" " " \
- "qemuarm" " " \
- "qemuarm-64" " " \
- "orangepi-prime" " " \
- "odroid-xu" " " \
- "odroid-c2" " " \
- "intel-nuc" " " \
- "tinker" " " \
- 3>&1 1>&2 2>&3)
-
- if [ -n "$hassio_machine" ]; then
- sudo systemctl disable ModemManager
- sudo systemctl stop ModemManager
- curl -sL "https://raw.githubusercontent.com/Kanga-Who/home-assistant/master/supervised-installer.sh" | sudo bash -s -- -m $hassio_machine
- clear
- exit 0
- else
- clear
- echo "No selection"
- exit 4
- fi
- clear
- exit 3
-else
- clear
- exit 5
-fi
\ No newline at end of file
diff --git a/.templates/deconz/hardware_list.yml b/.templates/deconz/hardware_list.yml
old mode 100755
new mode 100644
diff --git a/.templates/env.yml b/.templates/env.yml
old mode 100755
new mode 100644
diff --git a/.templates/example_template/example_service.yml b/.templates/example_template/example_service.yml
old mode 100755
new mode 100644
diff --git a/.templates/home_assistant/service.yml b/.templates/home_assistant/service.yml
index 54692d8b..a03c4b1e 100644
--- a/.templates/home_assistant/service.yml
+++ b/.templates/home_assistant/service.yml
@@ -8,4 +8,9 @@ home_assistant:
volumes:
- /etc/localtime:/etc/localtime:ro
- ./volumes/home_assistant:/config
-
+ - /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket
+ devices:
+ - "/dev/ttyAMA0:/dev/ttyAMA0"
+ - "/dev/vcio:/dev/vcio"
+ - "/dev/gpiomem:/dev/gpiomem"
+ privileged: true
diff --git a/.templates/influxdb/terminal.sh b/.templates/influxdb/terminal.sh
index 6486beb8..54c464e5 100755
--- a/.templates/influxdb/terminal.sh
+++ b/.templates/influxdb/terminal.sh
@@ -12,4 +12,4 @@ echo "to exit type: EXIT"
echo ""
echo "docker exec -it influxdb influx"
-docker exec -it influxdb influx
+docker exec -it influxdb influx "$@"
diff --git a/.templates/nodered/addons.yml b/.templates/nodered/addons.yml
old mode 100755
new mode 100644
diff --git a/.templates/prometheus-cadvisor/service.yml b/.templates/prometheus-cadvisor/service.yml
index 96cfd558..66c53af8 100644
--- a/.templates/prometheus-cadvisor/service.yml
+++ b/.templates/prometheus-cadvisor/service.yml
@@ -1,5 +1,5 @@
prometheus-cadvisor:
- container_name: cadvisor
+ container_name: prometheus-cadvisor
image: zcube/cadvisor:latest
restart: unless-stopped
ports:
diff --git a/.templates/prometheus-nodeexporter/service.yml b/.templates/prometheus-nodeexporter/service.yml
index 879968f3..517fbea5 100644
--- a/.templates/prometheus-nodeexporter/service.yml
+++ b/.templates/prometheus-nodeexporter/service.yml
@@ -1,5 +1,5 @@
prometheus-nodeexporter:
- container_name: nodeexporter
+ container_name: prometheus-nodeexporter
image: prom/node-exporter:latest
restart: unless-stopped
expose:
diff --git a/.templates/prometheus/service.yml b/.templates/prometheus/service.yml
index fa02549b..702678c8 100644
--- a/.templates/prometheus/service.yml
+++ b/.templates/prometheus/service.yml
@@ -18,6 +18,6 @@ prometheus:
# - --web.console.libraries=/usr/share/prometheus/console_libraries
# - --web.console.templates=/usr/share/prometheus/consoles
depends_on:
- - cadvisor
- - nodeexporter
+ - prometheus-cadvisor
+ - prometheus-nodeexporter
diff --git a/.templates/webthings_gateway/local.json b/.templates/webthings_gateway/local.json
old mode 100755
new mode 100644
diff --git a/.templates/zigbee2mqtt_assistant/service.yml b/.templates/zigbee2mqtt_assistant/service.yml
old mode 100755
new mode 100644
diff --git a/docs/Basic_setup/Backup-and-Restore.md b/docs/Basic_setup/Backup-and-Restore.md
old mode 100755
new mode 100644
diff --git a/docs/Basic_setup/Default-Configs.md b/docs/Basic_setup/Default-Configs.md
old mode 100755
new mode 100644
diff --git a/docs/Basic_setup/Updates/New-Menu-Release-Notes.md b/docs/Basic_setup/Updates/New-Menu-Release-Notes.md
old mode 100755
new mode 100644
diff --git a/docs/Containers/Blynk_server.md b/docs/Containers/Blynk_server.md
index 95a5b90d..d5bf3260 100644
--- a/docs/Containers/Blynk_server.md
+++ b/docs/Containers/Blynk_server.md
@@ -115,7 +115,7 @@ The remaining instructions in the *Dockerfile* customise the ***base image*** to
The ***local image*** is instantiated to become your running container.
-When you run the `docker images` command after Blynk Server has been built, you will see two rows that are relevant:
+When you run the `docker images` command after Blynk Server has been built, you *may* see two rows that are relevant:
```bash
$ docker images
@@ -127,7 +127,9 @@ ubuntu latest 897590a6c564 7 days ago 49.8MB
* `ubuntu ` is the ***base image***; and
* `iotstack_blynk_server ` is the ***local image***.
-You will see the same pattern in *Portainer*, which reports the ***base image*** as "unused". You should not remove the ***base*** image, even though it appears to be unused.
+You *may* see the same pattern in *Portainer*, which reports the ***base image*** as "unused". You should not remove the ***base*** image, even though it appears to be unused.
+
+> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
## Logging
@@ -216,6 +218,8 @@ At the time of writing, version 0.41.16 was the most up-to-date. Suppose that ve
$ docker system prune -f
```
+ The second `prune` will only be needed if there is an old *base image* and that, in turn, depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
+
## Using Blynk Server
See the [References](#references) for documentation links.
diff --git a/docs/Containers/Dozzle.md b/docs/Containers/Dozzle.md
new file mode 100644
index 00000000..04d7031b
--- /dev/null
+++ b/docs/Containers/Dozzle.md
@@ -0,0 +1,12 @@
+# Dozzle
+
+## Reference
+* [Dozzle GitHub](https://github.com/amir20/dozzle)
+
+## Webinterface
+Webninterface is available at `"your_ip":8889`
+
+
+## About *Dozzle*
+Dozzle is a small lightweight application with a web based interface to monitor Docker logs.
+It doesn’t store any log files. It is for live monitoring of your container logs only.
\ No newline at end of file
diff --git a/docs/Containers/Home-Assistant.md b/docs/Containers/Home-Assistant.md
index aa03d63a..0ea204c4 100644
--- a/docs/Containers/Home-Assistant.md
+++ b/docs/Containers/Home-Assistant.md
@@ -1,6 +1,6 @@
# Home Assistant
-Home Assistant is a home automation platform running on Python 3. It is able to track and control all devices at your home and offer a platform for automating control.
+Home Assistant is a home automation platform. It is able to track and control all devices at your home and offer a platform for automating control.
## References
@@ -13,216 +13,184 @@ Home Assistant is a home automation platform running on Python 3. It is able to
- [DockerHub](https://hub.docker.com/r/homeassistant/home-assistant/)
-## Home Assistant: two versions
+## Home Assistant: two versions
There are two versions of Home Assistant:
-* Hass.io (Home Assistant Core), and
-* Home Assistant Container.
+* Home Assistant Container; and
+* Supervised Home Assistant (also known as both "Hass.io" and "Home Assistant Core").
Each version:
* provides a web-based management interface on port 8123; and
* runs in "host mode" in order to discover devices on your LAN, including devices communicating via multicast traffic.
-IOTstack allows you to **install** either, or both, versions.
+Home Assistant Container runs as a **single** Docker container, and doesn't support all the features that Supervised Home Assistant does (such as add-ons). Supervised Home Assistant runs as a **collection** of Docker containers under its own orchestration.
-Note:
+Technically, both versions of Home Assistant can be installed on your Raspberry Pi but you can't **run** both at the same time. Each version runs in "host mode" and binds to port 8123 so, in practice, the first version to start will claim the port and the second will then be blocked.
-* Technically, both versions can **run** at the same time but it is not **supported**. Each version runs in "host mode" and binds to port 8123 so, in practice, the first version to start will claim the port and the second version will then be blocked.
+IOTstack used to offer a menu entry leading to a convenience script that could install Supervised Home Assistant but that stopped working when Home Assistant changed their approach. Now, the only method supported by IOTstack is Home Assistant Container.
-### Hass.io
-
-Hass.io uses its own orchestration:
-
-* hassio\_supervisor
-* hassio\_audio
-* hassio\_cli
-* hassio\_dns
-* hassio\_multicast
-* hassio\_observer
-* homeassistant.
-
-IOTstack can only offer limited configuration of Hass.io since it is its own platform.
-
-### Home Assistant Container
+### Installing Home Assistant Container
-Home Assistant Container runs as a single Docker container, and doesn't support all the features that Hass.io does (such as add-ons).
+Home Assistant (Container) can be found in the `Build Stack` menu. Selecting it in this menu results in a service definition being added to:
-## Menu installation
+```
+~/IOTstack/docker-compose.yml
+```
-### Installing Hass.io
+When you choose "Home Assistant", the service definition added to your `docker-compose.yml` includes the following:
-Hass.io creates a conundrum:
+```yaml
+image: ghcr.io/home-assistant/home-assistant:stable
+#image: ghcr.io/home-assistant/raspberrypi3-homeassistant:stable
+#image: ghcr.io/home-assistant/raspberrypi4-homeassistant:stable
+```
-* If you are definitely going to install Hass.io then you **must** install its dependencies **before** you install Docker.
-* One of Hass.io's dependencies is [Network Manager](https://wiki.archlinux.org/index.php/NetworkManager). Network Manager makes **serious** changes to your operating system, with side-effects you may not expect such as giving your Raspberry Pi's WiFi interface a random MAC address both during the installation and, then, each time you reboot. You are in for a world of pain if you install Network Manager without first understanding what is going to happen and planning accordingly.
-* If you don't install Hass.io's dependencies before you install Docker, you will either have to uninstall Docker or rebuild your system. This is because both Docker and Network Manager adjust your Raspberry Pi's networking. Docker is happy to install after Network Manager, but the reverse is not true.
+The active image is *generic* in the sense that it should work on any platform. You may wish to edit your `docker-compose.yml` to deactivate the generic image in favour of an image tailored to your hardware.
-#### Step 1: If Docker is already installed, uninstall it
+The normal IOTstack commands apply to Home Assistant Container such as:
```bash
-$ sudo apt -y purge docker-ce docker-ce-cli containerd.io
-$ sudo apt -y remove docker-compose
-$ sudo pip3 uninstall docker-compose
+$ cd ~/IOTstack
+$ docker-compose up -d
```
-Note:
-
-* Removing Docker does **not** interfere with your existing `~/IOTstack` folder.
-
-#### Step 2: Ensure your system is fully up-to-date
+### Installing Supervised Home Assistant
-```bash
-$ sudo apt update
-$ sudo apt upgrade -y
-```
+The direction being taken by the Home Assistant folks is to supply a ready-to-run image for your Raspberry Pi. That effectively dedicates your Raspberry Pi to Home Assistant and precludes the possibility of running alongside IOTstack and containers like Mosquitto, InfluxDB, Node-RED, Grafana, PiHole and WireGuard.
-#### Step 3: Install Hass.io dependencies (stage 1)
+Alternatively you can try to manually install Supervised Home Assistant using their [installation instructions for advanced users](https://github.com/home-assistant/supervised-installer) and when it works, install IOTstack. In theory this should work, but isn't tested or supported.
-```bash
-$ sudo apt install -y apparmor apparmor-profiles apparmor-utils
-$ sudo apt install -y software-properties-common apt-transport-https ca-certificates dbus
-```
+The recommended approach is to start from a clean slate and use [PiBuilder](https://github.com/Paraphraser/PiBuilder).
-#### Step 4: Connect to your Raspberry Pi via Ethernet
+When you visit the PiBuilder link you may well have a reaction like "all far too complicated" but you should try to get past that. PiBuilder has two main use-cases:
-You can skip this step if you interact with your Raspberry Pi via a screen connected to its HDMI port, along with a keyboard and mouse.
+1. Getting a Raspberry Pi built for IOTstack (and, optionally, Supervised Home Assistant) with the least fuss.
+2. Letting you record all your own customisations so that you can rebuild your Pis quickly with all your customisations already in place (the "magic smoke" scenario).
-If, however, you are running "headless" (SSH or VNC), we **strongly recommend** connecting your Raspberry Pi to Ethernet. This is only a temporary requirement. You can return to WiFi-only operation after Hass.io is installed.
+It's the second use-case that produces most of the apparent complexity you see when you read the [PiBuilder README](https://github.com/Paraphraser/PiBuilder/blob/master/README.md) for the first time.
-When the Ethernet interface initialises, work out its IP address:
+The first time you use PiBuilder, the process boils down to:
-```bash
-$ ifconfig eth0
+1. Clone the PiBuilder repo onto your support host (Mac, Windows, etc).
+2. Customise two files within the PiBuilder scope:
-eth0: flags=4163 mtu 1500
- inet 192.168.132.9 netmask 255.255.255.0 broadcast 192.168.132.255
- ether ab:cd:ef:12:34:56 txqueuelen 1000 (Ethernet)
- RX packets 4166292 bytes 3545370373 (3.3 GiB)
- RX errors 0 dropped 0 overruns 0 frame 0
- TX packets 2086814 bytes 2024386593 (1.8 GiB)
- TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
-```
+ - `wpa_supplicant.conf`
+ - `options.sh` where, among other things, you will enable:
-In the above, the IP address assigned to the Ethernet interface is on the second line of output, to the right of "inet": 192.168.132.9.
+ - `HOME_ASSISTANT_SUPERVISED_INSTALL=true`
-Drop out of your existing session (SSH or VNC) and re-connect to your Raspberry Pi using the IP address assigned to its Ethernet interface:
+3. Choose a Raspbian image and transfer it to your installation media (SD/SSD). The imaging tools typically finish by ejecting the installation media.
+4. Re-mount the installation media on your support host and either:
-```bash
-$ ssh pi@192.168.132.9
-```
+ - Run the supplied `setup_boot_volume.sh` script (if your support host is macOS or Unix); or
+ - Just drag the *contents* of the PiBuilder "boot" folder into the top level of the "/boot" partition on your installation media (if your support host is Windows).
-or:
+5. Move the installation media to your Raspberry Pi and apply power.
+6. Run the scripts in order:
-```
-vnc://pi@192.168.132.9
-```
+ Step | Command run on support host | Command run on Raspberry Pi
+ :---:|-----------------------------------|-------------
+ 1 | `ssh -4 pi@raspberrypi.local` |
+ 2 | | `/boot/scripts/01_setup.sh «name»`
+ 3 | `ssh-keygen -R raspberrypi.local` |
+ 4 | `ssh -4 pi@«name».local` |
+ 5 | | `/boot/scripts/02_setup.sh`
+ 6 | `ssh pi@«name».local` |
+ 7 | | `/boot/scripts/03_setup.sh`
+ 8 | `ssh pi@«name».local` |
+ 9 | | `/boot/scripts/04_setup.sh`
+ 10 | `ssh pi@«name».local` |
+ 11 | | `/boot/scripts/05_setup.sh`
-The reason for stipulating the IP address, rather than a name like `raspberrypi.local` is so that you are *definitely* connected to the Ethernet interface.
+ where «name» is the name you give to your Raspberry Pi (eg "iot-hub").
-If you ignore the advice about connecting via Ethernet and install Network Manager while your session is connected via WiFi, your connection will freeze part way through the installation (when Network Manager starts running and unconditionally changes your Raspberry Pi's WiFi MAC address).
+After step 9, Supervised Home Assistant will be running. The `04_setup.sh` script also deals with the [random MACs](#why-random-macs-are-such-a-hassle) problem. After step 11, you'll be able to either:
-You *may* be able to re-connect after the WiFi interface acquires a new IP address and advertises that via multicast DNS associated with the name of your device (eg `raspberrypi.local`), but you may also find that the only way to regain control is to power-cycle your Raspberry Pi.
+1. Restore a backup; or
+2. Run the IOTstack menu and choose your containers.
-The advice about using Ethernet is well-intentioned. You should heed this advice even if means you need to temporarily relocate your Raspberry Pi just so you can attach it via Ethernet for the next few steps. You can go back to WiFi later, once everything is set up. You have been warned!
+## Why random MACs are such a hassle
-#### Step 5: Install Hass.io dependencies (stage 2)
+> This material was originally posted as part of [Issue 312](https://github.com/SensorsIot/IOTstack/issues/312). It was moved here following a suggestion by [lole-elol](https://github.com/lole-elol).
-Install Network Manager:
+When you connect to a Raspberry Pi via SSH (Secure Shell), that's a layer 7 protocol that is riding on top of TCP/IP. TCP (Transmission Control Protocol) is a layer 4 connection-oriented protocol which rides on IP (Internet Protocol) which is a layer 3 protocol. So far, so good.
-```bash
-$ sudo apt install -y network-manager
-```
+But you also need to know what happens at layers 2 and 1. When your SSH client (eg Mac or PC or another Unix box) opens its SSH connection, at layer 3 the IP stack applies the subnet mask against the IP addresses of both the source device (your Mac, PC, etc) and destination device (Raspberry Pi) to split them into "network portion" (on the left) and "host portion" on the right. It then compares the two network portions and, if they are the same, it says "local network".
-#### Step 6: Consider disabling random MAC address allocation
+> To complete the picture, if they do not compare the same, then IP substitutes the so-called "default gateway" address (ie your router) and repeats the mask-and-compare process which, unless something is seriously mis-configured, will result in those comparing the same and being "local network". This is why data-comms gurus sometimes say, "all networking is local".
-To understand why you should consider disabling random MAC address allocation, see [why random MACs are such a hassle ](#why-random-macs-are-such-a-hassle).
+What happens next depends on the data communications media but we'll assume Ethernet and WiFi seeing as they are pretty much interchangeable for our purposes.
-You can stop Network Manager from allocating random MAC addresses to your WiFi interface by running the following commands:
+The source machine (Mac, PC, etc) issues an ARP (address resolution protocol). It is a broadcast frame (we talk about "frames" rather than "packets" at Layer 2) asking the question, "who has this destination IP address?" The Raspberry Pi responds with a unicast packet saying, "that's me" and part of that includes the MAC (media access control) address of the Raspberry Pi. The source machine only does this **once** (and this is a key point). It assumes the relationship between IP address and MAC address will not change and it adds the relationship to its "ARP cache". You can see the cache on any Unix computer with:
```bash
-$ sudo sed -i.bak '$a\\n[device]\nwifi.scan-rand-mac-address=no\n' /etc/NetworkManager/NetworkManager.conf
-$ sudo systemctl restart NetworkManager.service
+$ arp -a
```
-Acknowledgement:
+The Raspberry Pi makes the same assumption: it has learned both the IP and MAC address of the source machine (Mac, PC, etc) from the ARP request and has added that to its own ARP cache.
-* This tip came from [@steveatk on Discord](https://discordapp.com/channels/638610460567928832/638610461109256194/758825690715652116).
+In addition, every layer two switch (got one of those in your home?) has been snooping on this traffic and has learned, for each of its ports, which MAC address(es) are on those ports.
-#### Step 7: Re-install Docker
+Not "MAC **and** IP". A switch works at Layer 2. All it sees are frames. It only caches MAC addresses!
-You can re-install Docker using the IOTstack menu or one of the scripts provided with IOTstack but the following commands guarantee an up-to-date version of `docker-compose` and also include a dependency needed if you want to run with the 64-bit kernel:
+When the switch saw the "who has?" ARP broadcast, it replicated that out of all of its ports but when the "that's me" came back from the Raspberry Pi as a unicast response, it only went out on the switch port where the source machine (Mac, PC, etc) was attached.
-```bash
-$ curl -fsSL https://get.docker.com | sh
-$ sudo usermod -G docker -a $USER
-$ sudo usermod -G bluetooth -a $USER
-$ sudo apt install -y python3-pip python3-dev
-$ [ "$(uname -m)" = "aarch64" ] && sudo apt install libffi-dev
-$ sudo pip3 install -U docker-compose
-$ sudo pip3 install -U ruamel.yaml==0.16.12 blessed
-$ sudo reboot
-```
+After that, it's all caching. The Mac or PC has a packet to send to the Pi. It finds the hit in its ARP cache, wraps the packet in a frame and sends it out its Ethernet or WiFi interface. Any switches receive the frame, consult their own tables, and send the frame out the port on the next hop to the destination device. It doesn't matter whether you have one switch or several in a cascade, they have all learned the "next hop" to each destination MAC address they have seen.
-Note:
+Ditto when the Pi sends back any reply packets. ARP. Switch. Mac/PC. All cached.
-* Installing or re-installing Docker does **not** interfere with your existing `~/IOTstack` folder.
+The same basic principles apply, irrespective of whether the "switching function" is wired (Ethernet) or WiFi, so it doesn't really matter if your home arrangement is as straightforward as Mac or PC and Pi, both WiFi, via a local WiFi "hub" which is either standalone or part of your router. If something is capable of learning where a MAC is, it does.
-#### Step 8: Run the Hass.io installation
+Still so far so good.
-Start at:
+Now comes the problem. You have established an SSH session connected to the Pi over its WiFi interface. You install Network Manager. As part of its setup, Network Manager discards the **fixed** MAC address which is burned into the Pi's WiFi interface and substitutes a randomly generated MAC address. It doesn't ask for permission to do that. It doesn't warn you it's about to do it. It just does it.
-```bash
-$ cd ~/IOTstack
-$ ./menu.sh
-```
+When the WiFi interface comes up, it almost certainly "speaks" straight away via DHCP to ask for an IP address. The DHCP server looks in its own table of MAC-to-IP associations (fixed or dynamic, doesn't matter) and says "never seen **that** MAC before - here's a brand new IP address lease".
-Hass.io installation can be found inside the `Native Installs` menu on the main menu. You will be asked to select your device type during the installation.
+The DHCP request is broadcast so all the switches will have learned the new MAC but they'll also still have the old MAC (until it times out). The Mac/PC will receive the DHCP broadcast but, unless it's the DHCP server, will discard it. Either way, it has no means of knowing that this new random MAC belongs to the Pi so it can't do anything sensible with the information.
-The installation of Hass.io takes up to 20 minutes (depending on your internet connection). You may also need to respond "Y" to a prompt during the installation process. Refrain from restarting your machine until it has come online and you are able to create a user account.
+Meanwhile, SSH is trying to keep the session alive. It still thinks "old IP address" and its ARP cache still thinks old IP belongs to old MAC. Switches know where the frames are meant to go but even if a frame does get somewhere near the Pi, the Pi's NIC (network interface card) ignores it because it's now the wrong destination MAC. The upshot is that SSH looks like the session has frozen and it will eventually time-out with a broken pipe.
-Hass.io installation is provided as a convenience. It is independent of, is not maintained by, and does not appear in the `docker-compose.yml` for IOTstack. Hass.io has its own service for maintaining its uptime.
+To summarise: Network Manager has changed the MAC without so much as a by-your-leave and, unless you have assigned static IP addresses **in the Raspberry Pi** it's quite likely that the Pi will have a different IP address as well. But even a static IP can't save you from the machinations of Network Manager!
-#### Re-check random MAC address allocation
+The Pi is as happy as the proverbial Larry. It goes on, blissfully unaware that it has just confused the heck out of everything else. You can speed-up some of the activities that need to happen before everything gets going again. You can do things like clear the old entry from the ARP cache on the Mac/PC. You can try to force a multicast DNS daemon restart so that the "raspberrypi.local" address gets updated more quickly but mDNS is a distributed database so it can be hit and miss (and can sometimes lead to complaints about two devices trying to use the same name). Usually, the most effective thing you can do is pull power from the Pi, reboot your Mac/PC (easiest way to clear its ARP cache) and then apply power to the Pi so that it announces its mDNS address at the right time for the newly-booted Mac/PC to hear it and update its mDNS records.
-Installing Hass.io can re-enable random MAC address allocation. You should check this via:
+That's why the installation advice says words to the effect of:
-```bash
-$ tail -3 /etc/NetworkManager/NetworkManager.conf
-[device]
-wifi.scan-rand-mac-address=no
+> whatever else you do, **don't** try to install Network Manager while you're connected over WiFi. If SSH is how you're going to do it, you're in for a world of pain if you don't run an Ethernet cable for at least that part of the process.
-```
+And it does get worse, of course. Installing Network Manager turns on random WiFi MAC. You can turn it off and go back to the fixed MAC. But then, when you install Docker, it happens again. It may also be that other packages come along in future and say, "hey, look, Network Manager is installed - let's take advantage of that" and it happens again when you least expect it.
-If you do **NOT** see `wifi.scan-rand-mac-address=no`, repeat [Step 6](#step-6-consider-disabling-random-mac-address-allocation).
+Devices changing their MACs at random is becoming reasonably common. If you have a mobile device running a reasonably current OS, it is probably changing its MAC all the time. The idea is to make it hard for Fred's Corner Store to track you and conclude, "Hey, Alex is back in the shop again."
-### Installing Home Assistant Container
+Random MACs are not a problem for a **client** device like a phone, tablet or laptop. But they are definitely a serious problem for a **server** device.
-Home Assistant can be found in the `Build Stack` menu. Selecting it in this menu results in a service definition being added to:
+> In TCP/IP any device can be a client or a server for any protocol. The distinction here is about *typical* use. A mobile device is not usually set up to *offer* services like MQTT or Node-RED. It typically *initiates* connections with servers like Docker containers running on a Raspberry Pi.
-```
-~/IOTstack/docker-compose.yml
-```
+It is not just configuration-time SSH sessions that break. If you decide to leave Raspberry Pi random Wifi MAC active **and** you have other clients (eq IoT devices) communicating with the Pi over WiFi, you will wrong-foot those clients each time the Raspberry Pi reboots. Data communications services from those clients will be impacted until those client devices time-out and catch up.
-When you choose "Home Assistant", the service definition added to your `docker-compose.yml` includes the following:
+## Using bluetooth from the container
+In order to be able to use BT & BLE devices from HA integrations, make sure that bluetooth is enabled and powered on at the start of the (Rpi) host by editing `/etc/bluetooth/main.conf`:
-```yaml
-image: ghcr.io/home-assistant/home-assistant:stable
-#image: ghcr.io/home-assistant/raspberrypi3-homeassistant:stable
-#image: ghcr.io/home-assistant/raspberrypi4-homeassistant:stable
+```conf
+....
+[Policy]
+AutoEnable=true
```
-The active image is *generic* in the sense that it should work on any platform. You may wish to edit your `docker-compose.yml` to deactivate the generic image in favour of an image tailored to your hardware.
-
-The normal IOTstack commands apply to Home Assistant Container such as:
+After a reboot, check that BT is up:
-```bash
-$ cd ~/IOTstack
-$ docker-compose up -d
+```sh
+(root) # hciconfig
+...
+UP
+...
```
+ref: https://scribles.net/auto-power-on-bluetooth-adapter-on-boot-up/
-## HTTPS with a valid certificate
+## HTTPS with a valid SSL certificate
Some HA integrations (e.g google assistant) require your HA API to be
accessible via https with a valid certificate. You can configure HA to do this:
@@ -334,98 +302,3 @@ your RPi hostname is raspberrypi)
outside your LAN(e.g. using a mobile phone):
`https://homeassistant..duckdns.org/` Now the certificate
should work without any warnings.
-
-## Deactivating Hass.io
-
-Because Hass.io is independent of IOTstack, you can't deactivate it with any of the commands you normally use for IOTstack.
-
-To deactivate Hass.io you first need to stop the service that controls it. Run the following commands in the terminal:
-
-```bash
-$ sudo systemctl stop hassio-supervisor.service
-$ sudo systemctl disable hassio-supervisor.service
-```
-
-This will stop the main service and prevent it from starting on the next boot. Next you need to stop and remove the dependent services:
-
-```bash
-$ docker stop hassio_audio hassio_cli hassio_dns hassio_multicast hassio_observer homeassistant
-$ docker rm hassio_audio hassio_cli hassio_dns hassio_multicast hassio_observer homeassistant
-```
-
-Double-check with `docker ps` to see if there are other containers running with a `hassio_` prefix. They can stopped and removed in the same fashion for `hassio_audio` and so-on.
-
-The stored files are located in `/usr/share/hassio` which can be removed if you need to.
-
-You can use Portainer to view what is running and clean up the unused images.
-
-At this point, Hass.io is stopped and will not start again after a reboot. Your options are:
-
-* Leave things as they are; or
-* Re-install Hass.io by starting over at [Installing Hass.io](#installing-hassio); or
-* Re-activate Hass.io by:
-
- ```bash
- $ sudo systemctl enable hassio-supervisor.service
- $ sudo systemctl start hassio-supervisor.service
- ```
-
-## Why random MACs are such a hassle
-
-> This material was originally posted as part of [Issue 312](https://github.com/SensorsIot/IOTstack/issues/312). It was moved here following a suggestion by [lole-elol](https://github.com/lole-elol).
-
-When you connect to a Raspberry Pi via SSH (Secure Shell), that's a layer 7 protocol that is riding on top of TCP/IP. TCP (Transmission Control Protocol) is a layer 4 connection-oriented protocol which rides on IP (Internet Protocol) which is a layer 3 protocol. So far, so good.
-
-But you also need to know what happens at layers 2 and 1. When your SSH client (eg Mac or PC or another Unix box) opens its SSH connection, at layer 3 the IP stack applies the subnet mask against the IP addresses of both the source device (your Mac, PC, etc) and destination device (Raspberry Pi) to split them into "network portion" (on the left) and "host portion" on the right. It then compares the two network portions and, if they are the same, it says "local network".
-
-> To complete the picture, if they do not compare the same, then IP substitutes the so-called "default gateway" address (ie your router) and repeats the mask-and-compare process which, unless something is seriously mis-configured, will result in those comparing the same and being "local network". This is why data-comms gurus sometimes say, "all networking is local".
-
-What happens next depends on the data communications media but we'll assume Ethernet and WiFi seeing as they are pretty much interchangeable for our purposes.
-
-The source machine (Mac, PC, etc) issues an ARP (address resolution protocol). It is a broadcast frame (we talk about "frames" rather than "packets" at Layer 2) asking the question, "who has this destination IP address?" The Raspberry Pi responds with a unicast packet saying, "that's me" and part of that includes the MAC (media access control) address of the Raspberry Pi. The source machine only does this **once** (and this is a key point). It assumes the relationship between IP address and MAC address will not change and it adds the relationship to its "ARP cache". You can see the cache on any Unix computer with:
-
-```bash
-$ arp -a
-```
-
-The Raspberry Pi makes the same assumption: it has learned both the IP and MAC address of the source machine (Mac, PC, etc) from the ARP request and has added that to its own ARP cache.
-
-In addition, every layer two switch (got one of those in your home?) has been snooping on this traffic and has learned, for each of its ports, which MAC address(es) are on those ports.
-
-Not "MAC **and** IP". A switch works at Layer 2. All it sees are frames. It only caches MAC addresses!
-
-When the switch saw the "who has?" ARP broadcast, it replicated that out of all of its ports but when the "that's me" came back from the Raspberry Pi as a unicast response, it only went out on the switch port where the source machine (Mac, PC, etc) was attached.
-
-After that, it's all caching. The Mac or PC has a packet to send to the Pi. It finds the hit in its ARP cache, wraps the packet in a frame and sends it out its Ethernet or WiFi interface. Any switches receive the frame, consult their own tables, and send the frame out the port on the next hop to the destination device. It doesn't matter whether you have one switch or several in a cascade, they have all learned the "next hop" to each destination MAC address they have seen.
-
-Ditto when the Pi sends back any reply packets. ARP. Switch. Mac/PC. All cached.
-
-The same basic principles apply, irrespective of whether the "switching function" is wired (Ethernet) or WiFi, so it doesn't really matter if your home arrangement is as straightforward as Mac or PC and Pi, both WiFi, via a local WiFi "hub" which is either standalone or part of your router. If something is capable of learning where a MAC is, it does.
-
-Still so far so good.
-
-Now comes the problem. You have established an SSH session connected to the Pi over its WiFi interface. You install Network Manager. As part of its setup, Network Manager discards the **fixed** MAC address which is burned into the Pi's WiFi interface and substitutes a randomly generated MAC address. It doesn't ask for permission to do that. It doesn't warn you it's about to do it. It just does it.
-
-When the WiFi interface comes up, it almost certainly "speaks" straight away via DHCP to ask for an IP address. The DHCP server looks in its own table of MAC-to-IP associations (fixed or dynamic, doesn't matter) and says "never seen **that** MAC before - here's a brand new IP address lease".
-
-The DHCP request is broadcast so all the switches will have learned the new MAC but they'll also still have the old MAC (until it times out). The Mac/PC will receive the DHCP broadcast but, unless it's the DHCP server, will discard it. Either way, it has no means of knowing that this new random MAC belongs to the Pi so it can't do anything sensible with the information.
-
-Meanwhile, SSH is trying to keep the session alive. It still thinks "old IP address" and its ARP cache still thinks old IP belongs to old MAC. Switches know where the frames are meant to go but even if a frame does get somewhere near the Pi, the Pi's NIC (network interface card) ignores it because it's now the wrong destination MAC. The upshot is that SSH looks like the session has frozen and it will eventually time-out with a broken pipe.
-
-To summarise: Network Manager has changed the MAC without so much as a by-your-leave and, unless you have assigned static IP addresses **in the Raspberry Pi** it's quite likely that the Pi will have a different IP address as well. But even a static IP can't save you from the machinations of Network Manager!
-
-The Pi is as happy as the proverbial Larry. It goes on, blissfully unaware that it has just confused the heck out of everything else. You can speed-up some of the activities that need to happen before everything gets going again. You can do things like clear the old entry from the ARP cache on the Mac/PC. You can try to force a multicast DNS daemon restart so that the "raspberrypi.local" address gets updated more quickly but mDNS is a distributed database so it can be hit and miss (and can sometimes lead to complaints about two devices trying to use the same name). Usually, the most effective thing you can do is pull power from the Pi, reboot your Mac/PC (easiest way to clear its ARP cache) and then apply power to the Pi so that it announces its mDNS address at the right time for the newly-booted Mac/PC to hear it and update its mDNS records.
-
-That's why the installation advice says words to the effect of:
-
-> whatever else you do, **don't** try to install Network Manager while you're connected over WiFi. If SSH is how you're going to do it, you're in for a world of pain if you don't run an Ethernet cable for at least that part of the process.
-
-And it does get worse, of course. Installing Network Manager turns on random WiFi MAC. You can turn it off and go back to the fixed MAC. But then, when you install Docker, it happens again. It may also be that other packages come along in future and say, "hey, look, Network Manager is installed - let's take advantage of that" and it happens again when you least expect it.
-
-Devices changing their MACs at random is becoming reasonably common. If you have a mobile device running a reasonably current OS, it is probably changing its MAC all the time. The idea is to make it hard for Fred's Corner Store to track you and conclude, "Hey, Alex is back in the shop again."
-
-Random MACs are not a problem for a **client** device like a phone, tablet or laptop. But they are definitely a serious problem for a **server** device.
-
-> In TCP/IP any device can be a client or a server for any protocol. The distinction here is about *typical* use. A mobile device is not usually set up to *offer* services like MQTT or Node-RED. It typically *initiates* connections with servers like Docker containers running on a Raspberry Pi.
-
-It is not just configuration-time SSH sessions that break. If you decide to leave Raspberry Pi random Wifi MAC active **and** you have other clients (eq IoT devices) communicating with the Pi over WiFi, you will wrong-foot those clients each time the Raspberry Pi reboots. Data communications services from those clients will be impacted until those client devices time-out and catch up.
diff --git a/docs/Containers/Mosquitto.md b/docs/Containers/Mosquitto.md
index 8811111d..282e4fd9 100644
--- a/docs/Containers/Mosquitto.md
+++ b/docs/Containers/Mosquitto.md
@@ -128,7 +128,7 @@ The remaining instructions in the *Dockerfile* customise the *base image* to pro
The *local image* is instantiated to become your running container.
-When you run the `docker images` command after Mosquitto has been built, you will see two rows for Mosquitto:
+When you run the `docker images` command after Mosquitto has been built, you *may* see two rows for Mosquitto:
```bash
$ docker images
@@ -140,7 +140,9 @@ eclipse-mosquitto latest 46ad1893f049 4 weeks ago 8.31MB
* `eclipse-mosquitto` is the *base image*; and
* `iotstack_mosquitto` is the *local image*.
-You will see the same pattern in Portainer, which reports the *base image* as "unused". You should not remove the *base* image, even though it appears to be unused.
+You *may* see the same pattern in Portainer, which reports the *base image* as "unused". You should not remove the *base* image, even though it appears to be unused.
+
+> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
### Migration considerations
@@ -632,7 +634,7 @@ Breaking it down into parts:
Your existing Mosquitto container continues to run while the rebuild proceeds. Once the freshly-built *local image* is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your MQTT broker service.
-The `prune` is the simplest way of cleaning up. The first call removes the old *local image*. The second call cleans up the old *base image*.
+The `prune` is the simplest way of cleaning up. The first call removes the old *local image*. The second call cleans up the old *base image*. Whether an old *base image* exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
### Mosquitto version pinning
diff --git a/docs/Containers/NextCloud.md b/docs/Containers/NextCloud.md
index 649aba0a..f649757f 100644
--- a/docs/Containers/NextCloud.md
+++ b/docs/Containers/NextCloud.md
@@ -288,7 +288,7 @@ $ docker system prune
$ docker system prune
```
-The first "prune" removes the old *local* image, the second removes the old *base* image.
+The first "prune" removes the old *local* image, the second removes the old *base* image. Whether an old *base image* exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
## Backups
diff --git a/docs/Containers/Node-RED.md b/docs/Containers/Node-RED.md
index 32d12276..1095d409 100644
--- a/docs/Containers/Node-RED.md
+++ b/docs/Containers/Node-RED.md
@@ -115,7 +115,7 @@ Notes:
> Acknowledgement: Successful installation of the SQLite node is thanks to @fragolinux.
-When you run the `docker images` command after Node-RED has been built, you will see two rows for Node-RED:
+When you run the `docker images` command after Node-RED has been built, you *may* see two rows for Node-RED:
```bash
$ docker images
@@ -127,12 +127,14 @@ nodered/node-red latest deb99584fa75 5 days ago
* `nodered/node-red` is the *base image*; and
* `iotstack_nodered` is the *local image*. The *local* image is the one that is instantiated to become the running container.
-You will see the same pattern in Portainer, which reports the *base image* as "unused":
+You *may* see the same pattern in Portainer, which reports the *base image* as "unused":
![nodered-portainer-unused-image](./images/nodered-portainer-unused-image.png)
You should not remove the *base* image, even though it appears to be unused.
+> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
+
## Securing Node-RED
### Setting an encryption key for your credentials
@@ -858,7 +860,8 @@ Breaking it down into parts:
Your existing Node-RED container continues to run while the rebuild proceeds. Once the freshly-built *local image* is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your Node-RED service.
-The `prune` is the simplest way of cleaning up old images. Sometimes you need to run this twice, the first time to clean up the old local image, the second time for the old base image.
+The `prune` is the simplest way of cleaning up old images. Sometimes you need to run this twice, the first time to clean up the old local image, the second time for the old base image. Whether an old base image exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
+
## Customising Node-RED
diff --git a/docs/Containers/Prometheus.md b/docs/Containers/Prometheus.md
index f9152209..a8af53a0 100644
--- a/docs/Containers/Prometheus.md
+++ b/docs/Containers/Prometheus.md
@@ -40,9 +40,9 @@ If you do not select all three containers, Prometheus will not start.
When you select *Prometheus* in the IOTstack menu, the service definition includes the three containers:
-* *Prometheus*
-* *CAdvisor*
-* *Node Exporter*
+* *prometheus*
+* *prometheus-cadvisor;* and
+* *prometheus-nodeexporter*.
## Significant directories and files
@@ -142,7 +142,7 @@ The remaining instructions in the *Dockerfile* customise the *base image* to pro
The *local image* is instantiated to become your running container.
-When you run the `docker images` command after *Prometheus* has been built, you will see two rows for *Prometheus*:
+When you run the `docker images` command after *Prometheus* has been built, you *may* see two rows for *Prometheus*:
```bash
$ docker images
@@ -154,7 +154,9 @@ prom/prometheus latest 3f9575991a6c 3 days ago 169MB
* `prom/prometheus` is the *base image*; and
* `iotstack_prometheus` is the *local image*.
-You will see the same pattern in Portainer, which reports the *base image* as "unused". You should not remove the *base* image, even though it appears to be unused.
+You *may* see the same pattern in Portainer, which reports the *base image* as "unused". You should not remove the *base* image, even though it appears to be unused.
+
+> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
### Dependencies: *CAdvisor* and *Node Exporter*
@@ -316,6 +318,8 @@ Your existing *Prometheus* container continues to run while the rebuild proceeds
The `prune` is the simplest way of cleaning up. The first call removes the old *local image*. The second call cleans up the old *base image*.
+> Whether an old *base image* exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
+
### *Prometheus* version pinning
If you need to pin *Prometheus* to a particular version:
diff --git a/docs/Containers/Telegraf.md b/docs/Containers/Telegraf.md
index 0bc37dbd..b0511ef5 100644
--- a/docs/Containers/Telegraf.md
+++ b/docs/Containers/Telegraf.md
@@ -118,7 +118,7 @@ The remaining instructions in the *Dockerfile* customise the ***base image*** to
The ***local image*** is instantiated to become your running container.
-When you run the `docker images` command after Telegraf has been built, you will see two rows for Telegraf:
+When you run the `docker images` command after Telegraf has been built, you *may* see two rows for Telegraf:
```bash
$ docker images
@@ -130,7 +130,9 @@ telegraf latest a721ac170fad 3 days ago 273MB
* `telegraf ` is the ***base image***; and
* `iotstack_telegraf ` is the ***local image***.
-You will see the same pattern in *Portainer*, which reports the ***base image*** as "unused". You should not remove the ***base*** image, even though it appears to be unused.
+You *may* see the same pattern in *Portainer*, which reports the ***base image*** as "unused". You should not remove the ***base*** image, even though it appears to be unused.
+
+> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
### Migration considerations
@@ -331,7 +333,7 @@ Breaking it down into parts:
Your existing Telegraf container continues to run while the rebuild proceeds. Once the freshly-built ***local image*** is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your service.
-The `prune` is the simplest way of cleaning up. The first call removes the old ***local image***. The second call cleans up the old ***base image***.
+The `prune` is the simplest way of cleaning up. The first call removes the old ***local image***. The second call cleans up the old ***base image***. Whether an old ***base image*** exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
### Telegraf version pinning
diff --git a/docs/Containers/Zigbee2MQTT.md b/docs/Containers/Zigbee2MQTT.md
index 5242920b..5ad81659 100644
--- a/docs/Containers/Zigbee2MQTT.md
+++ b/docs/Containers/Zigbee2MQTT.md
@@ -259,4 +259,4 @@ $ docker system prune
Note:
-* Sometimes it is necessary to repeat the `docker system prune` command.
+* Sometimes it is necessary to repeat the `docker system prune` command but it depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images.
diff --git a/docs/Containers/Zigbee2mqttassistant.md b/docs/Containers/Zigbee2mqttassistant.md
old mode 100755
new mode 100644
diff --git a/docs/Developers/BuildStack-RandomPassword.md b/docs/Developers/BuildStack-RandomPassword.md
old mode 100755
new mode 100644
diff --git a/docs/Developers/BuildStack-Services.md b/docs/Developers/BuildStack-Services.md
old mode 100755
new mode 100644
diff --git a/docs/Developers/Menu-System.md b/docs/Developers/Menu-System.md
old mode 100755
new mode 100644
diff --git a/docs/Developers/PostBuild-Script.md b/docs/Developers/PostBuild-Script.md
old mode 100755
new mode 100644
diff --git a/docs/Developers/index.md b/docs/Developers/index.md
old mode 100755
new mode 100644
diff --git a/install.sh b/install.sh
index 77d39102..21f6d438 100755
--- a/install.sh
+++ b/install.sh
@@ -219,6 +219,8 @@ function do_docker_checks() {
DOCKER_VERSION_MAJOR=$(echo "$DOCKER_VERSION"| cut -d'.' -f 1)
DOCKER_VERSION_MINOR=$(echo "$DOCKER_VERSION"| cut -d'.' -f 2)
DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION"| cut -d'.' -f 3)
+ DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION_BUILD"| cut -f1 -d"-")
+ DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION_BUILD"| cut -f1 -d"+")
if [ "$(minimum_version_check $REQ_DOCKER_VERSION $DOCKER_VERSION_MAJOR $DOCKER_VERSION_MINOR $DOCKER_VERSION_BUILD )" == "true" ]; then
[ -f .docker_outofdate ] && rm .docker_outofdate
diff --git a/menu.sh b/menu.sh
index d0f362de..55e19ae6 100755
--- a/menu.sh
+++ b/menu.sh
@@ -279,6 +279,7 @@ function do_docker_checks() {
DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION"| cut -d'.' -f 3)
DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION_BUILD"| cut -f1 -d"-")
+ DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION_BUILD"| cut -f1 -d"+")
if [ "$(minimum_version_check $REQ_DOCKER_VERSION $DOCKER_VERSION_MAJOR $DOCKER_VERSION_MINOR $DOCKER_VERSION_BUILD )" == "true" ]; then
[ -f .docker_outofdate ] && rm .docker_outofdate
diff --git a/scripts/deps/.gitignore b/scripts/deps/.gitignore
old mode 100755
new mode 100644
diff --git a/scripts/deps/version_check.py b/scripts/deps/version_check.py
index aa3a272e..d60740b7 100755
--- a/scripts/deps/version_check.py
+++ b/scripts/deps/version_check.py
@@ -1,4 +1,14 @@
+import re
+
def checkVersion(requiredVersion, currentVersion):
+ """
+ >>> checkVersion('18.2.0', '20.10.11')
+ (True, '', [])
+ >>> checkVersion('18.2.0', '16.3.1')
+ (False, 'Version Check Fail', [False, False, True])
+ >>> checkVersion('18.2.0', '20.10.5+dfsg1')
+ (True, '', [])
+ """
requiredSplit = requiredVersion.split('.')
if len(requiredSplit) < 2:
@@ -19,7 +29,7 @@ def checkVersion(requiredVersion, currentVersion):
try:
currentMajor = int(currentSplit[0])
currentMinor = int(currentSplit[1])
- currentBuild = currentSplit[2].split("-")[0]
+ currentBuild = re.split(r'[+-]', currentSplit[2])[0]
currentBuild = int(currentBuild)
except:
return False, 'Invalid Current Version', currentVersion
diff --git a/scripts/native_installs.py b/scripts/native_installs.py
index 7e9e4280..a55ce9bf 100755
--- a/scripts/native_installs.py
+++ b/scripts/native_installs.py
@@ -30,21 +30,6 @@ def onResize(sig, action):
if (screenActive):
mainRender(1, mainMenuList, currentMenuItemIndex)
- def installHassIo():
- print(term.clear())
- print("Install Home Assistant Supervisor")
- print("./.native/hassio_supervisor.sh")
- res = subprocess.call("./.native/hassio_supervisor.sh", shell=True)
- print("")
- if res == 0:
- print("Preinstallation complete. Your system may run slow for a few hours as Hass.io installs its services.")
- print("Press [Up] or [Down] arrow key to show the menu if it has scrolled too far.")
- else:
- print("Preinstallation not completed.")
- input("Process terminated. Press [Enter] to show menu and continue.")
- time.sleep(0.5)
- return True
-
def installRtl433():
print(term.clear())
print("Install RTL_433")
@@ -93,7 +78,6 @@ def goBack():
return True
mainMenuList = [
- ["Hass.io (Supervisor)", installHassIo],
["RTL_433", installRtl433],
["RPIEasy", installRpiEasy],
["Upgrade Docker and Docker-Compose", upgradeDockerAndCompose],