diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..ef97a78 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,7 @@ +We use [Developer Certificate of Origin](https://developercertificate.org/). + +To use DCO, you only need to add your signature into a Git commit: + +``` +$ git commit -s -m "your commit message." +``` diff --git a/README.md b/README.md index c8f726f..a0d5658 100644 --- a/README.md +++ b/README.md @@ -78,16 +78,16 @@ For more details about dashboard configuration (e.g. how to add widgets), please # Provide Image Input -To capture an image via Pi camera +To capture an image via configured IP camera ``` -$ mosquitto_pub -h localhost -t berrynet/event/camera -m snapshot_picam +$ mosquitto_pub -h localhost -t berrynet/event/camera -m snapshot_ipcam ``` -To capture an image via configured IP camera +To capture an image via board-connected camera (RPi camera or USB webcam) ``` -$ mosquitto_pub -h localhost -t berrynet/event/camera -m snapshot_ipcam +$ mosquitto_pub -h localhost -t berrynet/event/camera -m snapshot_boardcam ``` To provide a local image @@ -96,6 +96,49 @@ To provide a local image $ mosquitto_pub -h localhost -t berrynet/event/localImage -m ``` +To start and stop streaming from board-connected camera + +``` +$ mosquitto_pub -h localhost -t berrynet/event/camera -m stream_boardcam_start +$ mosquitto_pub -h localhost -t berrynet/event/camera -m stream_boardcam_stop +``` + +To start and stop streaming from Nest IP camera + +``` +$ mosquitto_pub -h localhost -t berrynet/event/camera -m stream_nest_ipcam_start +$ mosquitto_pub -h localhost -t berrynet/event/camera -m stream_nest_ipcam_stop +``` + + +# Enable Data Collector + +You might want to store the snapshot and inference results for data analysis. + +To enable data collector, you can set the storage directory path in config.js: + +``` +config.storageDirPath = ''; +``` + +and restart BerryNet. + + +# Use Your Data To Train + +The original instruction of retraining YOLOv2 model see [github repository of darknet](https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects) + +In the current of BerryNet, TinyYolo is used instead of YOLOv2. +The major differences are: + +1. Create file yolo-obj.cfg with the same content as in `tiny-yolo.cfg` +2. Download pre-trained weights of darknet reference model, `darknet.weights.12`, for the convolutional layers (6.1MB) +https://drive.google.com/drive/folders/0B-oZJEwmkAObMzAtc2QzZDhyVGM?usp=sharing + +The rest parts are the same as retraining YOLO. + +If you use [LabelMe](http://labelme.csail.mit.edu/Release3.0/) to annotate data, `utils/xmlTotxt.py` can help convert the xml format to the text format that darknet uses. + # Discussion diff --git a/berrynet-manager b/berrynet-manager index 39fce93..900bdb8 100755 --- a/berrynet-manager +++ b/berrynet-manager @@ -1,19 +1,19 @@ #! /bin/sh # # Copyright 2017 DT42 -# +# # This file is part of BerryNet. -# +# # BerryNet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # BerryNet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with BerryNet. If not, see . @@ -26,25 +26,34 @@ help() { exit 1 } -if [ $# -lt 1 ] - then - help +if [ $# -lt 1 ]; then + help fi case $1 in - start | stop | status) - sudo systemctl $1 detection_server.service agent.service broker.service dashboard.service localimg.service camera.service journal.service cleaner.timer - ;; - log) - sudo journalctl -x --no-pager -u detection_server.service + start | stop | status) + sudo systemctl $1 \ + detection_fast_server.service \ + agent.service \ + broker.service \ + dashboard.service \ + localimg.service \ + camera.service \ + journal.service \ + data_collector.service \ + line.service + ;; + log) + sudo journalctl -x --no-pager -u detection_fast_server.service sudo journalctl -x --no-pager -u agent.service sudo journalctl -x --no-pager -u broker.service sudo journalctl -x --no-pager -u dashboard.service sudo journalctl -x --no-pager -u localimg.service sudo journalctl -x --no-pager -u camera.service sudo journalctl -x --no-pager -u journal.service - sudo journalctl -x --no-pager -u cleaner.timer - ;; - *) - help -esac + sudo journalctl -x --no-pager -u data_collector.service + sudo journalctl -x --no-pager -u line.service + ;; + *) + help +esac diff --git a/camera.js b/camera.js index 2b9c519..48461db 100644 --- a/camera.js +++ b/camera.js @@ -22,6 +22,7 @@ const mqtt = require('mqtt'); const request = require('request'); const spawnsync = require('child_process').spawnSync; const config = require('./config'); +const cv = require('opencv'); const broker = config.brokerHost; const client = mqtt.connect(broker); @@ -30,10 +31,21 @@ const topicActionInference = config.topicActionInference; const topicEventCamera = config.topicEventCamera; const cameraURI = config.ipcameraSnapshot; const snapshotFile = '/tmp/snapshot.jpg'; +const snapshotWidth = config.boardcameraImageWidth; +const snapshotHeight = config.boardcameraImageHeight; const cameraCmd = '/usr/bin/raspistill'; const cameraArgs = ['-vf', '-hf', - '-w', '1024', '-h', '768', + '-w', snapshotWidth, + '-h', snapshotHeight, '-o', snapshotFile]; +const usbCameraCmd = '/usr/bin/fswebcam'; +const usbCameraArgs = ['-r', snapshotWidth + 'x' + snapshotHeight, + '--no-banner', '-D', '0.5', snapshotFile]; +const fps = 30; +var cameraIntervalID = null; +var cameraInterval = 1000.0 / fps; +var cameraCV = null; +var frameCounter = 0; function log(m) { client.publish(topicActionLog, m); @@ -50,6 +62,13 @@ client.on('message', (t, m) => { const action = m.toString(); if (action == 'snapshot_picam') { + /* NOTE: We use V4L2 to support RPi camera, so RPi camera's usage is + * the same as USB camera. Both RPi and USB cameras are called + * "board camera". + * + * This action is obsoleted and will be removed in the future. + */ + // Take a snapshot from RPi3 camera. The snapshot will be displayed // on dashboard. spawnsync(cameraCmd, cameraArgs); @@ -75,6 +94,75 @@ client.on('message', (t, m) => { } } ); + } else if (action == 'snapshot_boardcam') { + // Take a snapshot from USB camera. + spawnsync(usbCameraCmd, usbCameraArgs); + fs.readFile(snapshotFile, function(err, data) { + if (err) { + log('camera client: cannot get image.'); + } else { + log('camera client: publishing image.'); + client.publish(topicActionInference, data); + } + }); + } else if (action == 'stream_boardcam_start') { + if ((!cameraCV) && (!cameraIntervalID)) { + cameraCV = new cv.VideoCapture(0); + cameraCV.setWidth(snapshotWidth); + cameraCV.setHeight(snapshotHeight); + cameraIntervalID = setInterval(function() { + cameraCV.read(function(err, im) { + if (err) { + throw err; + } + if (frameCounter < fps * 2) { + frameCounter++; + } else { + frameCounter = 0; + im.save(snapshotFile); + fs.readFile(snapshotFile, function(err, data) { + if (err) { + log('camera client: cannot get image.'); + } else { + log('camera client: publishing image.'); + client.publish(topicActionInference, data); + } + }); + } + im.release(); + }); + }, cameraInterval); + } + } else if (action == 'stream_boardcam_stop') { + if (cameraCV) { + cameraCV.release(); + cameraCV = null; + } + if (cameraIntervalID) { + clearInterval(cameraIntervalID); + cameraIntervalID = null; + } + } else if (action == 'stream_nest_ipcam_start') { + if (!cameraIntervalID) { + cameraIntervalID = setInterval(function() { + request.get( + {uri: cameraURI, encoding: null}, + (e, res, body) => { + if (!e && res.statusCode == 200) { + log('camera client: publishing image.'); + client.publish(topicActionInference, body); + } else { + log('camera client: cannot get image.'); + } + } + ); + }, cameraInterval); + } + } else if (action == 'stream_nest_ipcam_stop') { + if (cameraIntervalID) { + clearInterval(cameraIntervalID); + cameraIntervalID = null; + } } else { log('camera client: unkown action.'); } diff --git a/cleaner.sh b/cleaner.sh deleted file mode 100644 index 5993dae..0000000 --- a/cleaner.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -TARGETDIR="./inference/image" - -rm -rf $TARGETDIR/snapshot* diff --git a/client.js b/client.js index 2228c49..1d57ed2 100644 --- a/client.js +++ b/client.js @@ -1,14 +1,14 @@ -var mqtt = require('mqtt') -//var client = mqtt.connect('mqtt://test.mosquitto.org') -var client = mqtt.connect('mqtt://localhost:1883') +var mqtt = require('mqtt'); +//var client = mqtt.connect('mqtt://test.mosquitto.org'); +var client = mqtt.connect('mqtt://localhost:1883'); client.on('connect', function () { - client.subscribe('presence') - client.publish('presence', 'Hello mqtt') -}) + client.subscribe('presence'); + client.publish('presence', 'Hello mqtt'); +}); client.on('message', function (topic, message) { // message is Buffer - console.log(message.toString()) - client.end() -}) + console.log(message.toString()); + client.end(); +}); diff --git a/config.js b/config.js index b4bf420..6845e16 100644 --- a/config.js +++ b/config.js @@ -41,22 +41,36 @@ config.topicEventCamera = padTopicBase('event/camera'); config.topicEventLocalImage = padTopicBase('event/localImage'); config.topicNotifyEmail = padTopicBase('notify/email'); config.topicNotifySMS = padTopicBase('notify/sms'); +config.topicNotifyLINE = padTopicBase('notify/line'); config.topicDashboardLog = padTopicBase('dashboard/log'); config.topicDashboardSnapshot = padTopicBase('dashboard/snapshot'); config.topicDashboardInferenceResult = padTopicBase('dashboard/inferenceResult'); +config.topicJSONInferenceResult = padTopicBase('data/jsonInferenceResult'); // IP camera config.ipcameraSnapshot = ''; +// Board camera, e.g. USB and RPi cameras +config.boardcameraImageWidth = 640; +config.boardcameraImageHeight = 480; + +// data collector configs +config.storageDirPath = ''; + // email notification -config.senderEmail = 'SENDER_EMAIL'; -config.senderPassword = 'SENDER_PASSWORD'; -config.receiverEmail = 'RECEIVER_EMAIL'; +config.senderEmail = ''; +config.senderPassword = ''; +config.receiverEmail = ''; // for compatibility config.sender_email = config.senderEmail; config.sender_password = config.senderPassword; config.receiver_email = config.receiverEmail; +// Authentication and channel information for LINE +config.LINETargetUserID = ''; +config.LINEChannelSecret = ''; +config.LINEChannelAccessToken = ''; + // make config importable module.exports = config; diff --git a/config/bcm2835-v4l2.conf b/config/bcm2835-v4l2.conf new file mode 100644 index 0000000..0feba8a --- /dev/null +++ b/config/bcm2835-v4l2.conf @@ -0,0 +1,3 @@ +# BerryNet supports accessing RPi camera access via OpenCV. + +bcm2835_v4l2 diff --git a/configure b/configure index 6f0e11b..ff75622 100755 --- a/configure +++ b/configure @@ -23,7 +23,7 @@ LOG="/tmp/berrynet.log" install_system_dependencies() { sudo apt-get update - sudo apt-get install -y python-dev python-pip python-opencv mongodb libkrb5-dev libzmq3-dev libyaml-dev imagemagick curl + sudo apt-get install -y python-dev python-pip python-opencv mongodb libkrb5-dev libzmq3-dev libyaml-dev imagemagick curl fswebcam wget git libopencv-dev sudo service mongodb start sudo -H pip install watchdog cython } @@ -32,6 +32,12 @@ install_optional_dependencies() { sudo apt-get install -y mosquitto-clients } +install_nodejs() { + # v6.x is LTS, if you want the latest feature, change to "setup_7.x". + curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - + sudo apt-get install -y nodejs +} + install_tensorflow() { TENSORFLOW_VERSION="1.0.1" TENSORFLOW_PKGNAME="tensorflow-${TENSORFLOW_VERSION}-cp27-none-linux_armv7l.whl" @@ -59,6 +65,47 @@ install_darkflow() { popd > /dev/null } +install_darknet() { + # build dependencies + pip install --user PeachPy + pip install --user git+https://github.com/Maratyszcza/confu + + pushd /tmp > /dev/null + git clone https://github.com/ninja-build/ninja.git + pushd ninja > /dev/null + git checkout release + ./configure.py --bootstrap + popd > /dev/null + popd > /dev/null + + sudo apt-get install -y clang + + pushd /tmp > /dev/null + git clone https://github.com/thomaspark-pkj/NNPACK-darknet.git + pushd NNPACK-darknet > /dev/null + $HOME/.local/bin/confu setup + python ./configure.py --backend auto + /tmp/ninja/ninja + sudo cp lib/{libgoogletest-core.a,libnnpack.a,libpthreadpool.a} /usr/lib/ + sudo cp include/nnpack.h /usr/include/ + sudo cp deps/pthreadpool/include/pthreadpool.h /usr/include/ + popd > /dev/null + popd > /dev/null + + # build detection backend (darknet) + pushd inference > /dev/null + git clone https://github.com/thomaspark-pkj/darknet-nnpack.git darknet + pushd darknet > /dev/null + patch -p 1 < ../../patch/01-detection-backend.patch + make -j + popd > /dev/null + popd > /dev/null + + cp utils/darknet/detectord.py inference/darknet + mkdir inference/darknet/utils + cp utils/darknet/utils/localrun.sh inference/darknet/utils +} + download_classifier_model() { # Inception v3 is default classifier model INCEPTION_PKGNAME=inception_dec_2015.zip @@ -79,33 +126,9 @@ download_classifier_model() { download_detector_model() { pushd inference/darkflow > /dev/null mkdir bin - wget -O bin/tiny-yolo.weights https://s3.amazonaws.com/berrynet/models/tinyyolo/tiny-yolo.weights - wget -O cfg/tiny-yolo.cfg https://s3.amazonaws.com/berrynet/models/tinyyolo/tiny-yolo.cfg popd > /dev/null -} - -install_nodejs() { - # v6.x is LTS, if you want the latest feature, change to "setup_7.x". - curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash - - sudo apt-get install -y nodejs -} - -install_nodejs_legacy() { - # FIXME: Use bash as default ambiguously. - # Raspbian is 32-bit, so we download armv7l instead of arm64. - NODEJS_VERSION="v6.10.2" - NODEJS_OS="linux" - NODEJS_ARCH="armv7l" - #NODEJS_ARCH="x64" - NODEJS_PKGNAME="node-$NODEJS_VERSION-$NODEJS_OS-$NODEJS_ARCH" - if [ ! -e "$NODEJS_PKGNAME" ]; then - if [ ! -e "$PWD/$NODEJS_PKGNAME.tar.xz" ]; then - wget https://nodejs.org/dist/$NODEJS_VERSION/$NODEJS_PKGNAME.tar.xz - fi - tar xJf $NODEJS_PKGNAME.tar.xz - echo "export PATH=$PWD/$NODEJS_PKGNAME/bin:\$PATH" >> $HOME/.bashrc - fi - export PATH=$PWD/$NODEJS_PKGNAME/bin:$PATH + wget -O /tmp/tinyyolo_20170816_all.deb https://s3.amazonaws.com/berrynet/models/tinyyolo/tinyyolo_20170816_all.deb + sudo dpkg -i /tmp/tinyyolo_20170816_all.deb } install_dashboard() { @@ -122,26 +145,35 @@ install_dashboard() { install_systemd_configs() { sudo cp systemd/* /etc/systemd/system + sudo cp config/bcm2835-v4l2.conf /etc/modules-load.d + # enable ramfs to speedup I/O + echo -e "tmpfs /var/ramfs tmpfs nodev,nosuid,size=50M 0 0" \ + | sudo tee -a /etc/fstab + sudo mount -a } install_gateway() { local working_dir="/usr/local/berrynet" sudo mkdir -p $working_dir - sudo cp -a broker.js camera.js cleaner.sh config.js dashboard inference journal.js localimg.js mail.js package.json $working_dir + sudo cp -a \ + broker.js camera.js config.js dashboard data_collector.js \ + inference journal.js localimg.js mail.js line.js package.json \ + $working_dir sudo cp berrynet-manager /usr/local/bin # install npm dependencies pushd $working_dir > /dev/null - sudo npm install + sudo npm install --unsafe-perm popd > /dev/null } install_system_dependencies 2>&1 | tee -a $LOG install_optional_dependencies 2>&1 | tee -a $LOG +install_nodejs 2>&1 | tee -a $LOG install_tensorflow 2>&1 | tee -a $LOG download_classifier_model 2>&1 | tee -a $LOG install_darkflow 2>&1 | tee -a $LOG +install_darknet 2>&1 | tee -a $LOG download_detector_model 2>&1 | tee -a $LOG -install_nodejs 2>&1 | tee -a $LOG install_dashboard 2>&1 | tee -a $LOG install_systemd_configs 2>&1 | tee -a $LOG install_gateway 2>&1 | tee -a $LOG diff --git a/data_collector.js b/data_collector.js new file mode 100644 index 0000000..f159fdc --- /dev/null +++ b/data_collector.js @@ -0,0 +1,126 @@ +// Copyright 2017 DT42 +// +// This file is part of BerryNet. +// +// BerryNet is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// BerryNet is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with BerryNet. If not, see . + +'use strict'; + +const fs = require('fs'); +const mqtt = require('mqtt'); +const path = require('path'); +const config = require('./config'); + +const broker = config.brokerHost; +const client = mqtt.connect(broker); +const topicActionLog = config.topicActionLog; +const topicActionInference = config.topicActionInference; +const topicDashboardSnapshot = config.topicDashboardSnapshot; +const topicJSONInferenceResult = config.topicJSONInferenceResult; +const storageDirPath = config.storageDirPath; + + +/** + * Log wrapper to publish log message via MQTT and display on console. + * @param {string} m Log message. + */ +function log(m) { + client.publish(topicActionLog, m); + console.log(m); +} + +/** + * Save published MQTT binary data as an image file. + * @param {object} b The binary data published via MQTT. + * @param {string} filepath The file path of the saved image. + */ +function saveBufferToImage(b, filepath) { + fs.writeFile(filepath, b, (e) => { + if (e) + log(`log client: cannot save buffer to image.`); + else + log(`log client: saved buffer to image successfully.`); + }); +} + +/** + * Get time string in ISO format. + * @return {string} Time string. + */ +function getTimeString() { + const d = new Date(); + return d.toISOString(); +} + +/** + * Save snapshot, detection image, and detection JSON to data directory. + * @param {string} topic Subscribed MQTT topic. + * @param {object} message Snapshot binary | 'snapshot.jpg' | detection JSON. + */ +function callbackSaveData(topic, message) { + if (topic == topicActionInference) { + console.log('Get ' + topicActionInference); + + // NOTE: topicActionInference always happens prior other topics. + callbackSaveData.timeString = getTimeString(); + console.log(callbackSaveData.timeString); + const snapshotImage = path.join( + storageDirPath, + callbackSaveData.timeString + '.jpg'); + saveBufferToImage(message, snapshotImage); + } else if (topic == topicDashboardSnapshot) { + console.log('Get ' + topicDashboardSnapshot); + + const detectionImage = path.join( + storageDirPath, + callbackSaveData.timeString + '-detection.jpg'); + /* + fs.readFile(config.snapshot, (err, data) => { + fs.writeFile(detectionImage, data, (e) => { + if (e) + log('Failed to save detection image.'); + }); + }); + */ + fs.createReadStream(config.snapshot) + .pipe(fs.createWriteStream(detectionImage)); + } else if (topic == topicJSONInferenceResult) { + console.log('Get ' + topicJSONInferenceResult); + + const detectionJSON = path.join( + storageDirPath, + callbackSaveData.timeString + '-detection.json'); + fs.writeFile(detectionJSON, message, (e) => { + if (e) + log('Failed to save detection JSON.'); + }); + } else { + console.log('Unsubscribed topic ' + topic); + } +} + +fs.mkdir(storageDirPath, (e) => { + if (e) + log('Failed to create data storage dir.'); +}); + +client.on('connect', () => { + client.subscribe(topicActionLog); + client.subscribe(topicActionInference); + client.subscribe(topicDashboardSnapshot); + client.subscribe(topicJSONInferenceResult); + log(`log client: connected to ${broker} successfully.`); +}); + +client.on('message', callbackSaveData); diff --git a/inference/agent.js b/inference/agent.js index 4ee9879..a1eabc1 100644 --- a/inference/agent.js +++ b/inference/agent.js @@ -1,17 +1,17 @@ // Copyright 2017 DT42 -// +// // This file is part of BerryNet. -// +// // BerryNet is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// +// // BerryNet is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// +// // You should have received a copy of the GNU General Public License // along with BerryNet. If not, see . @@ -30,6 +30,8 @@ const topicActionLog = config.topicActionLog; const topicActionInference = config.topicActionInference; const topicDashboardSnapshot = config.topicDashboardSnapshot; const topicDashboardInferenceResult = config.topicDashboardInferenceResult; +const topicJSONInferenceResult = config.topicJSONInferenceResult; +const topicNotifyLINE = config.topicNotifyLINE; const inferenceEngine = config.inferenceEngine; function log(m) { @@ -46,6 +48,32 @@ function saveBufferToImage(b, filepath) { }); } +const parseDarknet = function(str) { + const elements = str.split(' '); + // label might consists of multiple words, e.g. cell phone. + const label = elements.slice(0, elements.length - 5).join(' '); + let [confidence, x, y, width, height] = elements.slice(elements.length - 5); + let result = { + label: label, + confidence: parseFloat(confidence), + top: parseInt(y), + bottom: parseInt(y) + parseInt(height), + left: parseInt(x), + right: parseInt(x) + parseInt(width) + }; + return result +} + +const darknetToJSON = function(data) { + let dataStrList = data.toString().replace(/\n$/, '').split('\n'); + let jsonResult = []; + for (let i in dataStrList) { + let item = `${dataStrList[i]}`; + jsonResult.push(parseDarknet(item)); + } + return jsonResult; +}; + client.on('connect', () => { client.subscribe(topicActionInference); log(`inference client: connected to ${broker} successfully.`); @@ -54,13 +82,13 @@ client.on('connect', () => { client.on('message', (t, m) => { const size = m.length; const now = moment().format('YYYYMMDD-HHmmss'); - const inference_server_img_dir = __dirname + '/image' - const snapshot = `snapshot-${now}.jpg` - const snapshot_path = path.join(inference_server_img_dir, snapshot) - const donefile_path = snapshot_path + '.done' - const resultfile_path = snapshot_path + '.txt' - const resultdonefile_path = snapshot_path + '.txt.done' - const dashboard_image_path = __dirname + '/../dashboard/www/freeboard/snapshot.jpg' + const inference_server_img_dir = __dirname + '/image'; + const snapshot = `snapshot-${now}.jpg`; + const snapshot_path = path.join(inference_server_img_dir, snapshot); + const donefile_path = snapshot_path + '.done'; + const resultfile_path = snapshot_path + '.txt'; + const resultdonefile_path = snapshot_path + '.txt.done'; + const dashboard_image_path = __dirname + '/../dashboard/www/freeboard/snapshot.jpg'; log(`inference client: on topic ${t}, received ${size} bytes.`); @@ -73,49 +101,50 @@ client.on('message', (t, m) => { // Listen to classifier/detector's result done file. When result done // file (.txt.done) is created, result is available. var watcher = fs.watch(inference_server_img_dir, (eventType, filename) => { - /* Merge inference result and snapshot into single image. */ - if (eventType === 'change') { + if (eventType === 'rename') { if (filename === (snapshot + '.txt.done')) { - /* - fs.open(resultfile_path, 'r', (err, fd) => { - if (err) { - if (err.code === 'ENOENT') { - console.error(resultfile_path + ' does not exist'); - return; - } - throw err; - } - - readMyData(fd); - }); - */ - fs.readFile(resultfile_path, (err, result) => { if (err) throw err - watcher.close() + watcher.close(); if (inferenceEngine === 'classifier') { fs.writeFile(dashboard_image_path, m, (err, written, buffer) => { console.log('Written snapshot to dashboard image directory: ' + dashboard_image_path); client.publish(topicDashboardSnapshot, 'snapshot.jpg'); }) + client.publish(topicDashboardInferenceResult, + result.toString().replace(/(\n)+/g, '
')); } else if (inferenceEngine === 'detector') { console.log('Snapshot is created by detector, only notify dashboard to update.'); client.publish(topicDashboardSnapshot, 'snapshot.jpg'); + client.publish(topicDashboardInferenceResult, + result.toString().replace(/(\n)+/g, '
')); + client.publish(topicJSONInferenceResult, + JSON.stringify(darknetToJSON(result))); + + // Delete intermediate files. + // + // Note: Data collector will not be affected. It retrieves data from + // * topicActionInference: contains snapshot raw data + // * topicDashboardSnapshot: to copy snapshot with bounding boxes + // * topicDashboardInferenceResult: contains inference result string + fs.unlink(snapshot_path, (e) => {}); + fs.unlink(resultfile_path, (e) => {}); + fs.unlink(resultdonefile_path, (e) => {}); } else { console.log('Unknown owner ' + inferenceEngine); } - client.publish(topicDashboardInferenceResult, result.toString().replace(/(\n)+/g, '
')) + client.publish(topicNotifyLINE, dashboard_image_path); }) } else { - console.log('Detect change of ' + filename + ', but comparing target is ' + snapshot + '.txt.done'); + console.log('rename event for ' + + filename + + ', but it is not inference result done file.'); } - } else if (eventType == 'rename') { - console.log('watch get rename event for ' + filename) } else { - console.log('watch get unknown event, ' + eventType) + console.log(eventType + ' event for ' + filename + ', ignore it.'); } - }) + }); }); diff --git a/inference/classify_server.py b/inference/classify_server.py index 405f88e..447f5c3 100644 --- a/inference/classify_server.py +++ b/inference/classify_server.py @@ -1,17 +1,17 @@ # Copyright 2017 DT42 -# +# # This file is part of BerryNet. -# +# # BerryNet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # BerryNet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with BerryNet. If not, see . @@ -153,7 +153,7 @@ def on_created(self, event): def main(_): """Called by Tensorflow""" - + global sess, threads # Creates graph from saved GraphDef. @@ -172,7 +172,7 @@ def main(_): args=(labels,))) for t in threads: t.start() for t in threads: t.join() - + if __name__ == '__main__': pid = str(os.getpid()) @@ -188,7 +188,7 @@ def main(_): logging("model_dir: ", FLAGS.model_dir) logging("image_dir: ", FLAGS.image_dir) - # workaround the issue that SIGINT cannot be received (fork a child to + # workaround the issue that SIGINT cannot be received (fork a child to # avoid blocking the main process in Thread.join() child_pid = os.fork() if child_pid == 0: diff --git a/inference/detection_server.py b/inference/detection_server.py index 28be01b..7ac0f1d 100644 --- a/inference/detection_server.py +++ b/inference/detection_server.py @@ -1,17 +1,17 @@ # Copyright 2017 DT42 -# +# # This file is part of BerryNet. -# +# # BerryNet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # BerryNet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with BerryNet. If not, see . @@ -159,7 +159,7 @@ def main(): _logging('config dir: {}'.format(options['model'])) server(tfnet) - + if __name__ == '__main__': logging.basicConfig(filename='/tmp/dlDetector.log', level=logging.DEBUG) @@ -174,7 +174,7 @@ def main(): with open(pidfile, 'w') as f: f.write(pid) - # workaround the issue that SIGINT cannot be received (fork a child to + # workaround the issue that SIGINT cannot be received (fork a child to # avoid blocking the main process in Thread.join() child_pid = os.fork() if child_pid == 0: diff --git a/journal.js b/journal.js index 4d1882c..3546b90 100644 --- a/journal.js +++ b/journal.js @@ -54,7 +54,7 @@ client.on('connect', () => { client.on('message', (t, m) => { // secretly save a copy of the image - if (t == topicNotifyEmail) { + if (t === topicNotifyEmail) { const filename = 'snapshot.jpg'; saveBufferToImage(m, snapshot); client.publish(topicDashboardSnapshot, filename); diff --git a/line.js b/line.js new file mode 100644 index 0000000..2afa94b --- /dev/null +++ b/line.js @@ -0,0 +1,84 @@ +// Copyright 2017 DT42 +// +// This file is part of BerryNet. +// +// BerryNet is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// BerryNet is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with BerryNet. If not, see . + +'use strict'; + +const mqtt = require('mqtt'); +const line = require('@line/bot-sdk'); +const imgur = require('imgur'); +const config = require('./config'); + +const broker = config.brokerHost; +const client = mqtt.connect(broker); +const topicActionLog = config.topicActionLog; +const topicNotifyLINE = config.topicNotifyLINE; +const topicDashboardInferenceResult = config.topicDashboardInferenceResult; +const targetUserID = config.LINETargetUserID; + +// create LINE SDK config +const LINEConfig = { + channelAccessToken: config.LINEChannelAccessToken, + channelSecret: config.LINEChannelSecret, +}; + +// create LINE SDK client +const LINEClient = new line.Client(LINEConfig); + +function log(m) { + client.publish(topicActionLog, m); + console.log(m); +} + +client.on('connect', () => { + client.subscribe(topicNotifyLINE); + client.subscribe(topicDashboardInferenceResult); + log(`client connected to ${broker} successfully.`); +}); + +client.on('message', (t, m) => { + const size = m.length; + log(`client on topic ${t}, received ${size} bytes.`) + + if (t === topicDashboardInferenceResult) { + const result = m.toString(); + LINEClient.pushMessage(targetUserID, { type: 'text', text: result }); + return; + } + + // save image to file and upload it to imgur for display in LINE message + const snapshot_path = m.toString(); + imgur.uploadFile(snapshot_path) + .then((json) => { + var imgurLink = json.data.link; + imgurLink = imgurLink.replace('http:\/\/', 'https:\/\/'); + log(`An image has been uploaded to imgur. link: ${imgurLink}`); + + // Image can only be delivered via 'https://' URL, 'http://' doesn't work + LINEClient.pushMessage(targetUserID, { type: 'image', + originalContentUrl: imgurLink, + previewImageUrl: imgurLink }) + .then((v) => { + log(`A message sent to ${targetUserID} successfully.`); + }) + .catch((err) => { + log(`An error occurred, ${err}.`); + }); + }) + .catch((err) => { + log(`An error occurred. ${err}`); + }); +}); diff --git a/mail.js b/mail.js index 841d808..20b39fc 100644 --- a/mail.js +++ b/mail.js @@ -1,17 +1,17 @@ // Copyright 2017 DT42 -// +// // This file is part of BerryNet. -// +// // BerryNet is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// +// // BerryNet is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// +// // You should have received a copy of the GNU General Public License // along with BerryNet. If not, see . diff --git a/package.json b/package.json index 5179913..34f0b3e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "BerryNet", - "version": "1.0.0", + "version": "2.0.0", "description": "Deep learning gateway on Raspberry Pi", "main": "index.js", "author": "DT42", @@ -8,10 +8,13 @@ "dependencies": { "ascoltatori": "^3.1.0", "emailjs": "^1.0.8", + "@line/bot-sdk": "^1.0.0", + "imgur": "^0.2.1", "imagemagick": "^0.1.3", "mocha": "^3.2.0", "mosca": "^2.2.0", "mqtt": "^2.0.1", + "opencv": "^6.0.0", "pino": "^2.13.0", "prompt": "^1.0.0", "request": "^2.79.0" diff --git a/patch/01-detection-backend.patch b/patch/01-detection-backend.patch new file mode 100644 index 0000000..f1f1057 --- /dev/null +++ b/patch/01-detection-backend.patch @@ -0,0 +1,174 @@ +diff --git a/Makefile b/Makefile +index 7ba6b25..31950ce 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + GPU=0 + CUDNN=0 +-OPENCV=0 ++OPENCV=1 + NNPACK=1 + ARM_NEON=1 + DEBUG=0 +diff --git a/examples/coco.c b/examples/coco.c +index a07906e..170af71 100644 +--- a/examples/coco.c ++++ b/examples/coco.c +@@ -342,7 +342,7 @@ void test_coco(char *cfgfile, char *weightfile, char *filename, float thresh) + printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time)); + get_detection_boxes(l, 1, 1, thresh, probs, boxes, 0); + if (nms) do_nms_sort(boxes, probs, l.side*l.side*l.n, l.classes, nms); +- draw_detections(im, l.side*l.side*l.n, thresh, boxes, probs, coco_classes, alphabet, 80); ++ draw_detections(im, l.side*l.side*l.n, thresh, boxes, probs, coco_classes, alphabet, 80, 0); + save_image(im, "prediction"); + show_image(im, "predictions"); + free_image(im); +diff --git a/examples/detector.c b/examples/detector.c +index 3c4a107..f2de3cc 100644 +--- a/examples/detector.c ++++ b/examples/detector.c +@@ -581,6 +581,9 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam + list *options = read_data_cfg(datacfg); + char *name_list = option_find_str(options, "names", "data/names.list"); + char **names = get_labels(name_list); ++ char done[256]; ++ FILE *done_signal = NULL; ++ memset(done, 0, 256); + + image **alphabet = load_alphabet(); + network net = parse_network_cfg(cfgfile); +@@ -621,6 +624,7 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam + //resize_network(&net, sized.w, sized.h); + #endif + layer l = net.layers[net.n-1]; ++ sprintf(done, "%s.txt.done", input); + + box *boxes = calloc(l.w*l.h*l.n, sizeof(box)); + float **probs = calloc(l.w*l.h*l.n, sizeof(float *)); +@@ -634,7 +638,7 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam + get_region_boxes(l, im.w, im.h, net.w, net.h, thresh, probs, boxes, 0, 0, hier_thresh, 1); + if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); + //else if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms); +- draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, names, alphabet, l.classes); ++ draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, names, alphabet, l.classes, input); + if(outfile){ + save_image(im, outfile); + } +@@ -650,11 +654,13 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam + cvDestroyAllWindows(); + #endif + } ++ done_signal = fopen(done, "w"); + + free_image(im); + free_image(sized); + free(boxes); + free_ptrs((void **)probs, l.w*l.h*l.n); ++ fclose(done_signal); + if (filename) break; + } + #ifdef NNPACK +diff --git a/examples/yolo.c b/examples/yolo.c +index 5b3fd16..9e74736 100644 +--- a/examples/yolo.c ++++ b/examples/yolo.c +@@ -309,7 +309,7 @@ void test_yolo(char *cfgfile, char *weightfile, char *filename, float thresh) + get_detection_boxes(l, 1, 1, thresh, probs, boxes, 0); + if (nms) do_nms_sort(boxes, probs, l.side*l.side*l.n, l.classes, nms); + //draw_detections(im, l.side*l.side*l.n, thresh, boxes, probs, voc_names, alphabet, 20); +- draw_detections(im, l.side*l.side*l.n, thresh, boxes, probs, voc_names, alphabet, 20); ++ draw_detections(im, l.side*l.side*l.n, thresh, boxes, probs, voc_names, alphabet, 20, 0); + save_image(im, "predictions"); + show_image(im, "predictions"); + +diff --git a/include/darknet.h b/include/darknet.h +index b6b9402..2de7cc0 100644 +--- a/include/darknet.h ++++ b/include/darknet.h +@@ -695,7 +695,7 @@ float box_iou(box a, box b); + void do_nms(box *boxes, float **probs, int total, int classes, float thresh); + data load_all_cifar10(); + box_label *read_boxes(char *filename, int *n); +-void draw_detections(image im, int num, float thresh, box *boxes, float **probs, char **names, image **labels, int classes); ++void draw_detections(image im, int num, float thresh, box *boxes, float **probs, char **names, image **labels, int classes, char* result_file); + + matrix network_predict_data(network net, data test); + image **load_alphabet(); +diff --git a/src/demo.c b/src/demo.c +index 9dc4946..0030d0d 100644 +--- a/src/demo.c ++++ b/src/demo.c +@@ -77,7 +77,7 @@ void *detect_in_thread(void *ptr) + printf("\nFPS:%.1f\n",fps); + printf("Objects:\n\n"); + image display = buff[(buff_index+2) % 3]; +- draw_detections(display, demo_detections, demo_thresh, boxes, probs, demo_names, demo_alphabet, demo_classes); ++ draw_detections(display, demo_detections, demo_thresh, boxes, probs, demo_names, demo_alphabet, demo_classes, 0); + + demo_index = (demo_index + 1)%demo_frame; + running = 0; +diff --git a/src/image.c b/src/image.c +index 83ed382..c1b5b2a 100644 +--- a/src/image.c ++++ b/src/image.c +@@ -190,24 +190,33 @@ image **load_alphabet() + return alphabets; + } + +-void draw_detections(image im, int num, float thresh, box *boxes, float **probs, char **names, image **alphabet, int classes) ++void draw_detections(image im, int num, float thresh, box *boxes, float **probs, char **names, image **alphabet, int classes, char* result_file) + { + int i; +- ++ FILE *predict_result = NULL; ++ char result_txt[256]; ++ memset(result_txt, 0, 256); ++ if (result_file != NULL) { ++ sprintf(result_txt, "%s.txt", result_file); ++ predict_result = fopen(result_txt, "wa"); ++ if (!predict_result) { ++ printf("%s: Predict result file opened error\n", result_txt); ++ return; ++ } ++ } + for(i = 0; i < num; ++i){ + int class = max_index(probs[i], classes); + float prob = probs[i][class]; + if(prob > thresh){ + +- int width = im.h * .006; ++ int width = im.h * .012; + + if(0){ + width = pow(prob, 1./2.)*10+1; + alphabet = 0; + } + +- //printf("%d %s: %.0f%%\n", i, names[class], prob*100); +- printf("%s: %.0f%%\n", names[class], prob*100); ++ printf("%s %.0f%%\n", names[class], prob*100); + int offset = class*123457 % classes; + float red = get_color(2,offset,classes); + float green = get_color(1,offset,classes); +@@ -232,6 +241,12 @@ void draw_detections(image im, int num, float thresh, box *boxes, float **probs, + if(bot > im.h-1) bot = im.h-1; + + draw_box_width(im, left, top, right, bot, width, red, green, blue); ++ // output: label, accuracy, x, y, width, height ++ if (predict_result) ++ fprintf(predict_result, "%s %.2f %d %d %d %d\n", ++ names[class], prob, left, top, right - left, bot - top); ++ printf("%s %.2f %d %d %d %d\n", ++ names[class], prob, left, top, right - left, bot - top); + if (alphabet) { + image label = get_label(alphabet, names[class], (im.h*.03)/10); + draw_label(im, top + width, left, label, rgb); +@@ -239,6 +254,8 @@ void draw_detections(image im, int num, float thresh, box *boxes, float **probs, + } + } + } ++ if (predict_result) ++ fclose(predict_result); + } + + void transpose_image(image im) diff --git a/systemd/cleaner.timer b/systemd/cleaner.timer deleted file mode 100644 index 2b46c95..0000000 --- a/systemd/cleaner.timer +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Timer to clean snapshots every 5 min. - -[Timer] -OnBootSec=5min -OnUnitActiveSec=5min - -[Install] -WantedBy=timers.target diff --git a/systemd/data_collector.service b/systemd/data_collector.service new file mode 100644 index 0000000..5dd2664 --- /dev/null +++ b/systemd/data_collector.service @@ -0,0 +1,15 @@ +[Unit] +Description=MQTT client agent for DL inference data collection. +After=network.target + +[Service] +Type=simple +WorkingDirectory=/usr/local/berrynet +PIDFile=/tmp/data_collector.pid +ExecStart=/usr/bin/node data_collector.js +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target +WantedBy=graphical.target diff --git a/systemd/detection_fast_server.service b/systemd/detection_fast_server.service new file mode 100644 index 0000000..c3e5d4f --- /dev/null +++ b/systemd/detection_fast_server.service @@ -0,0 +1,15 @@ +[Unit] +Description=detection server +After=network.target + +[Service] +Type=simple +WorkingDirectory=/usr/local/berrynet/inference/darknet +PIDFile=/tmp/detection_server.pid +ExecStart=/bin/bash utils/localrun.sh /usr/local/berrynet/inference/image +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target +WantedBy=graphical.target diff --git a/systemd/cleaner.service b/systemd/line.service similarity index 53% rename from systemd/cleaner.service rename to systemd/line.service index 6351a0e..f04a623 100644 --- a/systemd/cleaner.service +++ b/systemd/line.service @@ -1,12 +1,14 @@ [Unit] -Description=Clean snapshots every 5 min +Description=LINE client agent for notification After=network.target [Service] Type=simple WorkingDirectory=/usr/local/berrynet -PIDFile=/tmp/cleaner.pid -ExecStart=/bin/bash cleaner.sh +PIDFile=/tmp/line.pid +ExecStart=/usr/bin/node line.js +Restart=always +RestartSec=10 [Install] WantedBy=multi-user.target diff --git a/utils/darknet/detectord.py b/utils/darknet/detectord.py new file mode 100644 index 0000000..6729875 --- /dev/null +++ b/utils/darknet/detectord.py @@ -0,0 +1,149 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# Copyright 2016 dt42.io. All Rights Reserved. +# +# 09-01-2016 joseph@dt42.io Initial version +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Simple image classification server with Inception. + +The server monitors image_dir and run inferences on new images added to the +directory. Every image file should come with another empty file with '.done' +suffix to signal readiness. Inference result of a image can be read from the +'.txt' file of that image after '.txt.done' is spotted. + +This is an example the server expects clients to do. Note the order. + +# cp cat.jpg /run/image_dir +# touch /run/image_dir/cat.jpg.done + +Clients should wait for appearance of 'cat.jpg.txt.done' before getting +result from 'cat.jpg.txt'. +""" + + +from __future__ import print_function +import os +import sys +import time +import signal +import argparse +import subprocess +import errno +from watchdog.observers import Observer +from watchdog.events import PatternMatchingEventHandler + + +def logging(*args): + print("[%08.3f]" % time.time(), ' '.join(args)) + + +class EventHandler(PatternMatchingEventHandler): + def process(self, event): + """ + event.event_type + 'modified' | 'created' | 'moved' | 'deleted' + event.is_directory + True | False + event.src_path + path/to/observed/file + """ + # the file will be processed there + _msg = event.src_path + os.remove(_msg) + logging(_msg, event.event_type) + darknet.stdin.write(_msg.rstrip('.done').encode('utf8') + b'\n') + + # ignore all other types of events except 'modified' + def on_created(self, event): + self.process(event) + + +def check_pid(pid): + try: + os.kill(pid, 0) + except OSError as err: + if err.errno == errno.ESRCH: + # ESRCH == No such process + return False + elif err.errno == errno.EPERM: + # EPERM means no permission, and the process exists to deny the + # access + return True + else: + raise + else: + return True + +if __name__ == '__main__': + ap = argparse.ArgumentParser() + pid = str(os.getpid()) + basename = os.path.splitext(os.path.basename(__file__))[0] + ap.add_argument('input_dir') + ap.add_argument( + '-p', '--pid', default='/tmp/%s.pid' % basename, + help='pid file path') + ap.add_argument( + '-fi', '--fifo', default='/tmp/acti_yolo', + help='fifo pipe path') + ap.add_argument( + '-d', '--data', default='cfg/coco.data', + help='fifo pipe path') + ap.add_argument( + '-c', '--config', default='cfg/yolo.cfg', + help='fifo pipe path') + ap.add_argument( + '-w', '--weight', default='yolo.weights', + help='fifo pipe path') + args = vars(ap.parse_args()) + WATCH_DIR = os.path.abspath(args['input_dir']) + FIFO_PIPE = os.path.abspath(args['fifo']) + data = args['data'] + cfg = args['config'] + weight = args['weight'] + pidfile = args['pid'] + + if os.path.isfile(pidfile): + with open(pidfile) as f: + prev_pid = int(f.readline()) + if check_pid(prev_pid): + logging("{} already exists and process {} is still running, exists.".format( + pidfile, prev_pid)) + sys.exit(1) + else: + logging("{} exists but process {} died, clean it up.".format(pidfile, prev_pid)) + os.unlink(pidfile) + + with open(pidfile, 'w') as f: + f.write(pid) + + logging("watch_dir: ", WATCH_DIR) + logging("pid: ", pidfile) + + cmd = ['./darknet', 'detector', 'test', data, cfg, weight, '-out', '/usr/local/berrynet/dashboard/www/freeboard/snapshot'] + darknet = subprocess.Popen(cmd, bufsize=0, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT) + + observer = Observer() + observer.schedule( + EventHandler(['*.jpg.done', '*.png.done']), + path=WATCH_DIR, recursive=True) + observer.start() + try: + darknet.wait() + except KeyboardInterrupt: + logging("Interrupt by user, clean up") + os.kill(darknet.pid, signal.SIGKILL) + os.unlink(pidfile) diff --git a/utils/darknet/utils/localrun.sh b/utils/darknet/utils/localrun.sh new file mode 100644 index 0000000..fced5ad --- /dev/null +++ b/utils/darknet/utils/localrun.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +SNAPSHOT_DIR="$1" +MODEL_DIR="/var/lib/dlmodels/tinyyolo-20170816" + +usage() { + echo "Usage: /utils/local_debug.sh SNAPSHOT_DIR" + exit 1 +} + + +if [ "$SNAPSHOT_DIR" = "" ]; then + usage +else + echo "SNAPSHOT_DIR: $SNAPSHOT_DIR" +fi + +python detectord.py \ + -c $MODEL_DIR/assets/tiny-yolo.cfg \ + -w $MODEL_DIR/tiny-yolo.weights \ + $SNAPSHOT_DIR diff --git a/utils/local-launcher.js b/utils/local-launcher.js index 8460bcf..31d504d 100755 --- a/utils/local-launcher.js +++ b/utils/local-launcher.js @@ -20,12 +20,14 @@ const broker = exec('node broker.js', execCallback); const cameraAgent = exec('node camera.js', execCallback); //const eventNotifier = exec('node mail.js ' + config.sender_email + ' ' + config.sender_email_password + ' ' + config.receiver_email, execCallback); const eventLogger = exec('node journal.js', execCallback); +const LINEAgent = exec('node line.js', execCallback); const webServer = exec('cd dashboard && node server.js', execCallback); //const dlClassifier = exec('cd inference && python classify_server.py --model_dir=model --image_dir=image', execCallback); const dlDetector = exec('cd inference/darkflow && python detection_server.py', execCallback); const inferenceAgent = exec('cd inference && node agent.js', execCallback); const localImageAgent = exec('node localimg.js', execCallback); const webBrowser = exec('DISPLAY=:0 sensible-browser http://localhost:8080/index.html#source=dashboard.json', execCallback); +const dataCollector = exec('node data_collector.js', execCallback); broker.stdout.on('data', function(data) { console.log("[broker] " + data); @@ -43,6 +45,10 @@ eventLogger.stdout.on('data', function(data) { console.log('[eventLogger] ' + data); }); +LINEAgent.stdout.on('data', function(data) { + console.log('[LINEAgent] ' + data); +}); + webServer.stdout.on('data', function(data) { console.log('[webServer] ' + data); }); @@ -55,15 +61,21 @@ inferenceAgent.stdout.on('data', function(data) { console.log('[inferenceAgent] ' + data); }); +dataCollector.stdout.on('data', function(data) { + console.log('[dataCollector] ' + data); +}); + process.on('SIGINT', function() { console.log('Get SIGINT'); broker.kill(); cameraAgent.kill(); eventNotifier.kill(); + LINEAgent.kill(); eventLogger.kill(); webServer.kill(); //dlClassifier.kill(); dlDetector.kill(); inferenceAgent.kill(); + dataCollector.kill(); process.exit(0); }); diff --git a/utils/nest/nest_get_snapshoturl.sh b/utils/nest/nest_get_snapshoturl.sh new file mode 100755 index 0000000..df79afb --- /dev/null +++ b/utils/nest/nest_get_snapshoturl.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +TMPFILE=`mktemp` +NODE='node' + +if [ -x /usr/bin/nodejs ]; then + NODE='/usr/bin/nodejs' +fi +"$NODE" nest_get_token.js | tee "$TMPFILE" + +TOKEN=`cat "$TMPFILE" | grep 'token=' | sed 's/.*token=//'` + +"$NODE" nest_get_snapshoturl_by_token.js "$TOKEN" diff --git a/utils/nest/nest_get_snapshoturl_by_token.js b/utils/nest/nest_get_snapshoturl_by_token.js new file mode 100644 index 0000000..00e04b4 --- /dev/null +++ b/utils/nest/nest_get_snapshoturl_by_token.js @@ -0,0 +1,40 @@ +var Client = require('node-rest-client').Client; + +var ACCESSTOKEN=''; +const NEST_API_URL = 'https://developer-api.nest.com'; + +function usage() { + if (process.argv.length >= 2) { + console.log("Usage: "+process.argv[0]+" "+process.argv[1]+" "); + } else if (process.argv.length >= 1) { + console.log("Usage: "+process.argv[0]+" "); + } else { + console.log("Usage: node nest_get_snapshoturl_by_token.js "); + } +} + +if (process.argv.length <= 2) { + usage(); + process.exit(0); +} + +ACCESSTOKEN=process.argv[2]; + +var client = new Client(); +var args = { + headers: { + "Authorization": 'Bearer ' + ACCESSTOKEN + } +}; + +client.get(NEST_API_URL, args, function (data, response) { + var cameras=data.devices.cameras; + + for (i=0; i { + PINCODE = answer; + + var options = { + "method": "POST", + "hostname": "api.home.nest.com", + "port": null, + "path": "/oauth2/access_token", + "headers": { + "content-type": "application/x-www-form-urlencoded" + } + }; + + var req = http.request(options, function (res) { + var chunks = []; + + res.on("data", function (chunk) { + chunks.push(chunk); + }); + + res.on("end", function () { + var body = Buffer.concat(chunks); + var bodyStr = body.toString(); + + ACCESSTOKEN=JSON.parse(bodyStr); + + var token = ACCESSTOKEN.access_token; + console.log("token="+token); + + }); + }); + + req.write(qs.stringify({ code: PINCODE, + client_id: PRODUCTID, + client_secret: PRODUCTSECRET, + grant_type: 'authorization_code' })); + req.end(); + rl.close(); +}); + diff --git a/utils/nest/nestcamera.md b/utils/nest/nestcamera.md new file mode 100644 index 0000000..4796104 --- /dev/null +++ b/utils/nest/nestcamera.md @@ -0,0 +1,96 @@ +Nest IP Camera libs +======================= + + +Nest IP Camera Introduction +------------------------ +Nest IP Camera is a really closed product. The setup program +for PC are only supported on Windows and MacOS. +If the camera is not connect to your home WiFi, you have to +use Windows or MacOS to let it connect before usage. + +Also the power charger is also important. The camera won't start unless +it connects to its official power charger. So make sure your camera is +connected to WiFi and is running. + + +APIs +------------------------ +There's no way to directly connect to the camera even it is in your local +lan. To access the snapshot from the camera, you have to use their *cloud* +API to obtain it. So basically everything is controlled by their cloud. + + +Old v2/mobile API +------------------------ +This library is inside npm. To install it, just use + +~~~ +npm install nest-api +~~~ + +However, this api is obsolete and no documents can be found. +But it still works partially. The way to use this API is provide your +username (normally e-mail address) and your password. + +~~~ +var NEST_USER='paulliu@dt42.io'; +var NEST_PASSWORD='************'; +var nestApi = new NestApi(NEST_USER, NEST_PASSWORD); + +nestApi.login(function(sessionData) { + console.log(sessionData); + nestApi.get(function(data) { + console.log(data); + nestApi.post({'path':'/v2/mobile/'+sessionData.user+'/quartz/CAMERAID/public_share_enabled', 'body':'true'}, function(data2) { + console.log(data2); + }); + }); +}); +~~~ + +But the post method seems cannot modify the public_shared_enabled flag. +I haven't try all of them. + + +v3 API +------------------------------------------- +This is an OAuth Restful API. You need to register your app/product first. +So please go to https://console.developers.nest.com/products +and register a product. The "support URL" field can be http://localhost because +we are not actually a web app. + +After that, you'll get 3 credentials. + + 1. Product ID + 2. Product Secret + 3. Authorization URL + +First, generate proper unique and secure STATE parameter and replace the +STATE in Authorization URL, show that URL to the user. + +The user will use that URL in the browser and get a PINCODE to you. + +Then use the PINCODE to get a token. + +By using that token, you can use all of the Rest APIs. + +We wrote 3 small scripts to show how to do this. +First, please replace the credentials provided from Nest into nest_get_token.js + +Each time you run nest_get_token.js you'll be shown an URL and waiting you +to enter the PINCODE. You have to use your browser to open that URL and +obtain the PINCODE for the program to continue. The program will later +show you the token after you input the PINCODE. + +And then you can run nest_get_snapshoturl_by_token.js and provide the token +obtain from the above to get the snapshoturl. + +We also wrote a small script for you to do that. Just run +nest_get_snapshoturl.sh and it will call the two scripts from the above and +give you the snapshoturl of Nest IP camera. + + +Streaming +--------------- +Currently there's no way to obtain the streaming from Nest IP camera. diff --git a/utils/rpi3-temperature.sh b/utils/rpi3-temperature.sh new file mode 100755 index 0000000..1d0d3ef --- /dev/null +++ b/utils/rpi3-temperature.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Display CPU & GPU temperature for degrees C. +# +# Thanks badfur and yuusou for writing the script. +# https://www.raspberrypi.org/forums/viewtopic.php?t=34994 + +cpuTemp0=$(cat /sys/class/thermal/thermal_zone0/temp) +cpuTemp1=$(($cpuTemp0/1000)) +cpuTemp2=$(($cpuTemp0/100)) +cpuTempM=$(($cpuTemp2 % $cpuTemp1)) + +gpuTemp0=$(/opt/vc/bin/vcgencmd measure_temp) +gpuTemp0=${gpuTemp0//\'/º} +gpuTemp0=${gpuTemp0//temp=/} + +echo CPU Temp: $cpuTemp1"."$cpuTempM"ºC" +echo GPU Temp: $gpuTemp0 diff --git a/utils/xmlTotxt.py b/utils/xmlTotxt.py new file mode 100644 index 0000000..ee2f022 --- /dev/null +++ b/utils/xmlTotxt.py @@ -0,0 +1,158 @@ +""" + +$python xmlTotxt.py -n $FOLDER -u $LABELME_USER + --classes $CLASS_1 $CLASS_2 $CLASS_3... + +FOLDER: LabelMe project folder name +LABELME_USER: LabelMe user name +CLASS_i: Class labels defined on LabelMe + +""" +import argparse +import logging +import os +import xml.etree.ElementTree as ET + +from os.path import join +from shutil import copyfile + + +def convert(size, in_x, in_y): + dw = 1./size[0] + dh = 1./size[1] + x = (in_x[0] + in_x[1])/2.0 + y = (in_y[0] + in_y[1])/2.0 + w = in_x[1] - in_x[0] + h = in_y[1] - in_y[0] + x = x*dw + w = w*dw + y = y*dh + h = h*dh + return (x, y, w, h) + + +def convert_annotation(in_dir, out_dir, image_id, out_id, classes): + in_file = open("%s/%s.xml" % (in_dir, image_id)) + o_file = open(out_dir + "/%s.txt" % out_id, "w") + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find("imagesize") + h = float(size.find("nrows").text) + w = float(size.find("ncols").text) + + for obj in root.iter("object"): + X = [] + Y = [] + cls = obj.find("name").text + if cls not in classes: + logging.debug("%s is not in the selected class" % cls) + continue + cls_id = classes.index(cls) + for pt in obj.find("polygon").findall("pt"): + X.append(float(pt.find("x").text)) + Y.append(float(pt.find("y").text)) + if (len(X) < 2 or len(Y) < 2): + logging.warning("%s doesn't have sufficient info, ignore" % cls) + continue + X = list(set(X)) + X.sort() + Y = list(set(Y)) + Y.sort() + bb = convert((w, h), X, Y) + o_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + "\n") + + +def find_output_id(out_img_dir, image_id, suffix): + counter = 0 + out_img_path = join(out_img_dir, image_id + "." + suffix) + out_id = image_id + while os.path.exists(out_img_path): + logging.info("%s exists" % out_img_path) + counter += 1 + out_id = image_id + "_" + str(counter) + out_img_path = join(out_img_dir, out_id + "." + suffix) + return out_id, out_img_path + + +def main(): + + parser = argparse.ArgumentParser( + description="Simple tool to make the scene image better." + ) + parser.add_argument( + "-v", "--verbosity", action="count", + help="increase output verbosity" + ) + parser.add_argument( + "-r", "--root", type=str, default=None, + help="Specify the root directory (default: PWD)" + ) + parser.add_argument( + "-n", "--name", type=str, default="youtube_09", + help="Specify the name of the original video (default: youtube_09)" + ) + parser.add_argument( + "-u", "--user", type=str, default="V", + help="Specify the username (default: V)" + ) + parser.add_argument( + "-o", "--outdir", type=str, default="test2017", + help="Output dir in root, must end with 2017 (default: test2017)" + ) + parser.add_argument( + "--classes", nargs="+", type=str, default=["fighting", "dog"], + help="Classes to be trained. Default: [fighting, dog]" + ) + parser.add_argument( + "-d", "--delete", action="store_true", + help="Use this option to clean up train.txt" + ) + + args = parser.parse_args() + + log_level = logging.WARNING + if args.verbosity == 1: + log_level = logging.INFO + elif args.verbosity >= 2: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, + format="[xmlTotxt: %(levelname)s] %(message)s") + + logging.info(args.classes) + + if args.root is None: + args.root = os.getcwd() + in_dir = join(args.root, "Annotations", "users", + args.user, args.name) + in_img_dir = join(args.root, "Images", "users", + args.user, args.name) + out_dir = join(args.root, args.outdir, "labels") + out_img_dir = join(args.root, args.outdir, "JPEGImages") + + if not os.path.exists(out_dir): + os.makedirs(out_dir) + + if not os.path.exists(out_img_dir): + os.makedirs(out_img_dir) + + foutput = join(args.root, "train.txt") + if args.delete: + output = open(foutput, "w") + else: + output = open(foutput, "a") + + for _img_path in os.listdir(in_img_dir): + img_path = os.path.join(in_img_dir, _img_path) + suffix = os.path.basename(_img_path).split(".")[1] + logging.debug("Find %s" % img_path) + image_id = os.path.basename(_img_path).split(".")[0] + out_id, out_img_path = find_output_id(out_img_dir, image_id, suffix) + convert_annotation(in_dir, out_dir, image_id, out_id, args.classes) + logging.info("Copy %s to %s" % (img_path, out_img_path)) + copyfile(img_path, out_img_path) + + output.write(out_img_path + "\n") + + +if __name__ == '__main__': + main()