Skip to content

Commit

Permalink
Split recovered videos by 10min
Browse files Browse the repository at this point in the history
  • Loading branch information
sdunesme committed Oct 26, 2022
1 parent 848f463 commit 3667de7
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 48 deletions.
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,14 @@ flask db upgrade # At each woodcamrm version upgrade
flask run
```

## Archive the 10min video files on an external server with a simple cron job on this server

First, copy your ssh public key on the woodcam-rm server. Then edit your crontab with ```crontab -e```.

```
*/10 5-22 * * * rsync <woodcam-rm server>:<path to app repository>/videos/<station common_name>/archives/*.avi <archiving path>/"$(date +'\%Y-\%m-\%d')"/ >> /var/log/woodcamrm-archiving.log 2>&1
```

## Deploy video archives compression on another server

Video compression can be CPU intensive. In order to keep the main Woodcam RM application light, you have the possibility of deporting this task to another server.
Expand Down
10 changes: 3 additions & 7 deletions docker-compose.dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ services:
- "21:21"
- "21000-21010:21000-21010"
volumes:
- video-data:/data
- /tmp/woodcamrm:/data

app:
build: .
Expand All @@ -46,7 +46,7 @@ services:
TZ: $SCHEDULER_TIMEZONE
volumes:
- ./.env:/app/.env
- video-data:/data
- /tmp/woodcamrm:/data
- ./:/app
- prometheus-config:/etc/prometheus
- grafana-config:/etc/grafana
Expand All @@ -62,7 +62,7 @@ services:
TZ: $SCHEDULER_TIMEZONE
volumes:
- ./.env:/app/.env
- video-data:/data
- /tmp/woodcamrm:/data
- ./:/app

prometheus:
Expand Down Expand Up @@ -137,7 +137,3 @@ volumes:
prometheus-config:
grafana-data:
grafana-config:
video-data:
driver_opts:
type: tmpfs
device: tmpfs
2 changes: 1 addition & 1 deletion woodcamrm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def save_video_file(filepath, rtsp_url, station_id):
archive_output = cv2.VideoWriter(archive_file, fourcc, 3, (int(width),int(height)))

archive_timeout = time.time() + 600
logger.debug(f"starting {archive_file} recording")
logger.warning(f"starting {archive_file} recording")
logger.debug(f"archive file timeout: {datetime.fromtimestamp(archive_timeout).strftime('%Y-%m-%d %H:%M:%S')}")
while time.time() < archive_timeout:
videos_number+=1
Expand Down
82 changes: 48 additions & 34 deletions woodcamrm/api_endpoints.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import os
import requests
import datetime
import urllib3
import pytz
import simplejson as json

from datetime import datetime, timedelta
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import check_password_hash
from requests.auth import HTTPDigestAuth
Expand Down Expand Up @@ -68,22 +70,25 @@ def get(self):
class DataRecovery(Resource):
decorators = [auth.login_required]

# @datarec_ns.doc(description='Download local camera record on the WoodCam-RM server',
# params={
# 'station': 'Station ID',
# 'from_date': 'From datetime. Accepted format: YYYY-mm-ddTHH:MM:SS+ZZ:ZZ (example: 2018-12-21T00:00:01+02:00)',
# 'to_date': 'To datetime. Accepted format: YYYY-mm-ddTHH:MM:SS+ZZ:ZZ (example: 2018-12-21T00:00:01+02:00)'
# }
# )
@datarec_ns.doc(description='Download local camera record on the WoodCam-RM server')
@datarec_ns.doc(description='Download local camera record on the WoodCam-RM server',
params={
'station': 'Station common name',
'from_date': 'Download record from datetime. Accepted format: YYYY-mm-ddTHH:MM:SS+ZZ:ZZ (example: 2018-12-21T00:00:01+02:00)',
'to_date': 'Download record to datetime. Accepted format: YYYY-mm-ddTHH:MM:SS+ZZ:ZZ (example: 2018-12-21T00:00:01+02:00)'
}
)
@api.expect(clip_parser)
def post(self) -> None:

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

args = clip_parser.parse_args()
station = args['station']
from_date = args['from_date']
to_date = args['to_date']

recovered_duration = 10

# Check if station exists
st = Stations.query.filter(Stations.common_name == station).first()
if not st:
Expand All @@ -94,51 +99,60 @@ def post(self) -> None:
if not os.path.isdir(recovery_dir):
os.mkdir(recovery_dir)

# Get list of local recordings on cameras with AXIS API
# Generate list of 10min video files
def datetime_range(start, end):
current = start
while current < end:
yield current
current += timedelta(minutes=recovered_duration)

starts = [dt for dt in datetime_range(from_date, to_date)]

# Get list of recordings stored on cameras with AXIS API
rep = requests.get(f"https://{st.ip}:{st.camera_port}/axis-cgi/record/list.cgi?recordingid=all",
auth=HTTPDigestAuth(st.api_user, st.api_password),
verify=False)

tree = ElementTree.fromstring(rep.content)
recordings = [rec.attrib for rec in tree.findall("./recordings/recording")]

# Check each record if it correspond to start or end of recovery request
# Convert string attributes to datetime
for record in recordings:
for timekey in ('starttime', 'starttimelocal', 'stoptime', 'stoptimelocal'):
if record[timekey]:
record[timekey] = datetime.datetime.strptime(record[timekey], '%Y-%m-%dT%H:%M:%S.%f%z')
record[timekey] = datetime.strptime(record[timekey], '%Y-%m-%dT%H:%M:%S.%f%z')

if record['starttime'].day == from_date.day:
start_record = record

if record['stoptime'] and record['stoptime'].day == to_date.day:
stop_record = record
# Start loop for each 10min record output
recovered_list = []
for start_time in starts:
stop_time = start_time + timedelta(minutes=recovered_duration)

record = next(rec for rec in recordings if rec["starttime"].day == start_time.day)

# If all the recovery request is contained inside a single record
if start_record == stop_record:

r = requests.get(f"https://{st.ip}:{st.camera_port}/axis-cgi/record/export/exportrecording.cgi",
auth=HTTPDigestAuth(st.api_user, st.api_password),
verify=False,
stream=True,
params={
'schemaversion': 1,
'recordingid': start_record['recordingid'],
'diskid': start_record['diskid'],
'recordingid': record['recordingid'],
'diskid': record['diskid'],
'exportformat': 'matroska',
'starttime': from_date.strftime('%Y-%m-%dT%H:%M:%SZ'),
'stoptime': to_date.strftime('%Y-%m-%dT%H:%M:%SZ')
'starttime': start_time.astimezone(pytz.UTC).strftime('%Y-%m-%dT%H:%M:%SZ'),
'stoptime': stop_time.astimezone(pytz.UTC).strftime('%Y-%m-%dT%H:%M:%SZ')
})

recovered_archive_file = os.path.join(recovery_dir, f"recovered_{from_date.strftime('%Y-%m-%d_%H-%M-%S')}.mkv")
size = open(recovered_archive_file, 'wb').write(r.content)
r.close()

else:
return {'error': 'recovery request need to download multiple records, which is not supported yet'}, 400

return {'recovered_archive_file': recovered_archive_file,
'size': size,

if r.status_code == 200:
recovered_archive_file = os.path.join(recovery_dir, f"recovered_{start_time.strftime('%Y-%m-%d_%H-%M-%S')}.mkv")

with open(recovered_archive_file, 'wb') as output:
output.write(r.content)

r.close()

recovered_list.append(recovered_archive_file)

return {'recovered_archive_files': recovered_list,
'retention': 3600}


Expand Down
13 changes: 7 additions & 6 deletions woodcamrm/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,12 +300,13 @@ def records_check():

# Remove archive clips older than 1hour
archives_dir = os.path.join(st.storage_path, 'archives')
old_clips = [os.path.join(archives_dir, f) for f in os.listdir(archives_dir)
if time.time() - os.stat(os.path.join(archives_dir, f)).st_mtime >= (60*60)
and not os.path.isdir(os.path.join(archives_dir, f))]

for clip in old_clips:
os.remove(clip)
if os.path.isdir(archives_dir):
old_clips = [os.path.join(archives_dir, f) for f in os.listdir(archives_dir)
if time.time() - os.stat(os.path.join(archives_dir, f)).st_mtime >= (60*60)
and not os.path.isdir(os.path.join(archives_dir, f))]

for clip in old_clips:
os.remove(clip)

# Remove recovered clips older than 1hour
recovery_dir = os.path.join(st.storage_path, 'recovery')
Expand Down

0 comments on commit 3667de7

Please sign in to comment.