forked from SonsOfTone/RaspberrIP-Camera
-
Notifications
You must be signed in to change notification settings - Fork 0
/
MyDeviceSource.cpp
221 lines (185 loc) · 9.09 KB
/
MyDeviceSource.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2013 Live Networks, Inc. All rights reserved.
// A template for a MediaSource encapsulating an audio/video input device
//
// NOTE: Sections of this code labeled "%%% TO BE WRITTEN %%%" are incomplete, and need to be written by the programmer
// (depending on the features of the particular device).
// Implementation
//Modified By Antoine DUBOST-BULARD 25/10/2014
//Based on DeviceSource.cpp from Live55 RTSP library
#include "MyDeviceSource.hh"
#include <GroupsockHelper.hh> // for "gettimeofday()"
#include <semaphore.h>
sem_t SemaphoreCapture;
sem_t SemaphoreStreaming;
char H264Buffer[1*1024*1024];
MyDeviceSource*
MyDeviceSource::createNew(UsageEnvironment& env,
MyDeviceParameters params) {
return new MyDeviceSource(env, params);
}
EventTriggerId MyDeviceSource::eventTriggerId = 0;
unsigned MyDeviceSource::referenceCount = 0;
MyDeviceSource::MyDeviceSource(UsageEnvironment& env,
MyDeviceParameters params)
: FramedSource(env), fParams(params) {
if (referenceCount == 0) {
// Any global initialization of the device would be done here:
//%%% TO BE WRITTEN %%%
sem_init(&SemaphoreCapture, 0, 0);
sem_init(&SemaphoreStreaming, 0, 0);
StreamingDone();
ImageBufferSize = 0;
}
++referenceCount;
// Any instance-specific initialization of the device would be done here:
//%%% TO BE WRITTEN %%%
// We arrange here for our "deliverFrame" member function to be called
// whenever the next frame of data becomes available from the device.
//
// If the device can be accessed as a readable socket, then one easy way to do this is using a call to
// envir().taskScheduler().turnOnBackgroundReadHandling( ... )
// (See examples of this call in the "liveMedia" directory.)
//
// If, however, the device *cannot* be accessed as a readable socket, then instead we can implement it using 'event triggers':
// Create an 'event trigger' for this device (if it hasn't already been done):
if (eventTriggerId == 0) {
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}
MyDeviceSource::~MyDeviceSource() {
// Any instance-specific 'destruction' (i.e., resetting) of the device would be done here:
//%%% TO BE WRITTEN %%%
--referenceCount;
if (referenceCount == 0) {
// Any global 'destruction' (i.e., resetting) of the device would be done here:
//%%% TO BE WRITTEN %%%
// Reclaim our 'event trigger'
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
}
void MyDeviceSource::doGetNextFrame() {
// This function is called (by our 'downstream' object) when it asks for new data.
//printf("doGetNextFrame : Wait for event\n");
WaitCaptureDone();
//printf("doGetNextFrame : New Frame Received, size is : %d\n", ImageBufferSize);
// Note: If, for some reason, the source device stops being readable (e.g., it gets closed), then you do the following:
if (0 /* the source stops being readable */ /*%%% TO BE WRITTEN %%%*/) {
handleClosure(this);
return;
}
// If a new frame of data is immediately available to be delivered, then do this now:
if (1 /* a new frame of data is immediately available to be delivered*/ /*%%% TO BE WRITTEN %%%*/) {
//BufferReadyFlag = 0;
deliverFrame();
}
// No new data is immediately available to be delivered. We don't do anything more here.
// Instead, our event trigger must be called (e.g., from a separate thread) when new data becomes available.
}
void MyDeviceSource::deliverFrame0(void* clientData) {
((MyDeviceSource*)clientData)->deliverFrame();
}
void MyDeviceSource::FeedStreamer(char * Buffer, unsigned int BufferSize)
{
memcpy(H264Buffer, Buffer, BufferSize);
ImageBufferSize = BufferSize;
}
void MyDeviceSource::StreamingDone()
{
sem_post(&SemaphoreStreaming);
}
void MyDeviceSource::CaptureDone()
{
sem_post(&SemaphoreCapture);
}
void MyDeviceSource::WaitStreamingDone()
{
sem_wait(&SemaphoreStreaming);
}
void MyDeviceSource::WaitCaptureDone()
{
sem_wait(&SemaphoreCapture);
}
void MyDeviceSource::deliverFrame() {
// This function is called when new frame data is available from the device.
// We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
// 'in' parameters (these should *not* be modified by this function):
// fTo: The frame data is copied to this address.
// (Note that the variable "fTo" is *not* modified. Instead,
// the frame data is copied to the address pointed to by "fTo".)
// fMaxSize: This is the maximum number of bytes that can be copied
// (If the actual frame is larger than this, then it should
// be truncated, and "fNumTruncatedBytes" set accordingly.)
// 'out' parameters (these are modified by this function):
// fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
// fNumTruncatedBytes: Should be set iff the delivered frame would have been
// bigger than "fMaxSize", in which case it's set to the number of bytes
// that have been omitted.
// fPresentationTime: Should be set to the frame's presentation time
// (seconds, microseconds). This time must be aligned with 'wall-clock time' - i.e., the time that you would get
// by calling "gettimeofday()".
// fDurationInMicroseconds: Should be set to the frame's duration, if known.
// If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
// to set this variable, because - in this case - data will never arrive 'early'.
// Note the code below.
if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
//WaitCaptureDone();
//fprintf(stderr, "Start Streaming\n");
u_int8_t* newFrameDataStart = (u_int8_t*)(H264Buffer+4); //Remove 4 bytes because framer do not want them
unsigned int newFrameSize = ImageBufferSize-4; //Remove 4 bytes because framer do not want them
if((newFrameDataStart[0] == 0x25) || (newFrameDataStart[0] == 0x21) || (newFrameDataStart[0] == 0x27) || (newFrameDataStart[0] == 0x28))
{
/*if(newFrameDataStart[0] == 0x25) fprintf(stderr, "I Image Type - size is %d %d\n", newFrameSize, fMaxSize);
else if(newFrameDataStart[0] == 0x21) fprintf(stderr, "P Image Type - size is %d %d\n", newFrameSize, fMaxSize);
else if(newFrameDataStart[0] == 0x27) fprintf(stderr, "SPS Image Type - size is %d %d\n", newFrameSize, fMaxSize);
else if(newFrameDataStart[0] == 0x28) fprintf(stderr, "PPS Image Type - size is %d %d\n", newFrameSize, fMaxSize);
else fprintf(stderr, "Unknown Image Type - size is %d %d\n", newFrameSize, fMaxSize);*/
//fprintf(stderr, "Frame 0x%x Size is %d %d\n", newFrameDataStart, newFrameSize, fMaxSize);
// Deliver the data here:
if (newFrameSize > fMaxSize) {
fprintf(stderr, "Frame Size is greater than Max Size\n");
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
} else {
fFrameSize = newFrameSize;
}
gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
// If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
fDurationInMicroseconds = 0;
//printf("Deliver Frame 0x%x, with size = %d\n", newFrameDataStart, fFrameSize);
memmove(fTo, newFrameDataStart, fFrameSize);
}
else
{
fprintf(stderr, "Loosing Packet\n");
}
StreamingDone();
// After delivering the data, inform the reader that it is now available:
FramedSource::afterGetting(this);
}
// The following code would be called to signal that a new frame of data has become available.
// This (unlike other "LIVE555 Streaming Media" library code) may be called from a separate thread.
// (Note, however, that "triggerEvent()" cannot be called with the same 'event trigger id' from different threads.
// Also, if you want to have multiple device threads, each one using a different 'event trigger id', then you will need
// to make "eventTriggerId" a non-static member variable of "MyDeviceSource".)
void signalNewFrameData() {
TaskScheduler* ourScheduler = NULL; //%%% TO BE WRITTEN %%%
MyDeviceSource* ourDevice = NULL; //%%% TO BE WRITTEN %%%
if (ourScheduler != NULL) { // sanity check
ourScheduler->triggerEvent(MyDeviceSource::eventTriggerId, ourDevice);
}
}