OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // Private API for receiving real-time media perception information. | |
6 [platforms=("chromeos")] | |
7 namespace mediaPerceptionPrivate { | |
8 enum Status { | |
9 // An error occurred. | |
10 ERROR, | |
11 | |
12 // Attempt to reach media analytics process failed. | |
13 TIMEOUT, | |
14 | |
15 // Media analytics process waiting to be launched. | |
16 UNINITIALIZED, | |
17 | |
18 // Analytics process running and media processing pipeline started, | |
19 // but it is not yet receiving image frames. This is a transitional state | |
20 // between SUSPENDED and RUNNING for the time it takes to warm up the | |
21 // media processing pipeline, which can take anywhere from a few seconds | |
22 // to a minute. | |
23 // Note: STARTED is the initial reply to SetState RUNNING. | |
24 STARTED, | |
25 | |
26 // Analytics process running and media processing pipeling injesting image | |
27 // frames. At this point, MediaPerception signals should be coming over | |
28 // D-Bus. | |
29 RUNNING, | |
30 | |
31 // Analytics process is running and ready to be set to state RUNNING. | |
32 // The D-Bus communications are enabled but the media processing pipeline | |
33 // is suspended. | |
34 SUSPENDED | |
35 }; | |
36 | |
37 // The system and configuration state of the analytics process and v4lplugin. | |
38 dictionary State { | |
39 Status status; | |
40 | |
41 // Optional $(ref:setState) parameter. Specifies the video device the media | |
42 // analytics process should open while the media processing pipeline is | |
43 // starting. To set this parameter, status has to be RUNNING. | |
44 DOMString? deviceContext; | |
45 }; | |
46 | |
47 dictionary Point { | |
48 // The horizontal distance from the top left corner of the image. | |
49 double? x; | |
50 | |
51 // The vertical distance from the top left corner of the image. | |
52 double? y; | |
53 }; | |
54 | |
55 dictionary BoundingBox { | |
56 // Specifies whether the points are normalized to the size of the image. | |
57 boolean? normalized; | |
58 | |
59 // The two points that define the corners of a bounding box. | |
60 Point? topLeft; | |
61 Point? bottomRight; | |
62 }; | |
63 | |
64 enum EntityType { | |
65 UNSPECIFIED, | |
66 FACE, | |
67 PERSON | |
68 }; | |
69 | |
70 dictionary Entity { | |
71 // A unique id associated with the detected entity, which can be used to | |
72 // track the entity over time. | |
73 long? id; | |
74 | |
75 EntityType? type; | |
76 | |
77 // Minimum box which captures entire detected entity. | |
78 BoundingBox? boundingBox; | |
79 | |
80 // A value for the quality of this detection. | |
81 double? confidence; | |
82 }; | |
83 | |
84 // The set of computer vision metadata for an image frame. | |
85 dictionary FramePerception { | |
86 long? frameId; | |
87 | |
88 long? frameWidthInPx; | |
89 long? frameHeightInPx; | |
90 | |
91 // The timestamp associated with the frame (when its recieved by the | |
92 // analytics process). | |
93 double? timestamp; | |
94 | |
95 // The list of entities detected in this frame. | |
96 Entity[]? entities; | |
97 }; | |
98 | |
99 dictionary MediaPerception { | |
100 // The time the media perception data was emitted by the media processing | |
101 // pipeline. This value will be greater than the timestamp stored within | |
102 // the FramePerception dictionary and the difference between them can be | |
103 // viewed as the processing time for a single frame. | |
104 double? timestamp; | |
105 | |
106 // An array of framePerceptions. | |
107 FramePerception[]? framePerceptions; | |
108 }; | |
109 | |
110 enum ImageFormat { | |
111 UNSPECIFIED, | |
112 RGB, | |
113 PNG, | |
114 JPEG | |
115 }; | |
116 | |
117 dictionary ImageFrame { | |
118 long? width; | |
119 long? height; | |
120 | |
121 ImageFormat? format; | |
122 | |
123 long? dataLength; | |
124 | |
125 // The bytes of the image frame. | |
126 ArrayBuffer? frame; | |
127 }; | |
128 | |
129 dictionary PerceptionSample { | |
130 // The video analytics FramePerception for the associated image frame | |
131 // data. | |
132 FramePerception? framePerception; | |
133 | |
134 // The image frame data for the associated FramePerception object. | |
135 ImageFrame? imageFrame; | |
136 }; | |
137 | |
138 dictionary Diagnostics { | |
139 // A buffer of image frames and the associated video analytics information | |
140 // that can be used to diagnose a malfunction. | |
141 PerceptionSample[]? perceptionSamples; | |
142 }; | |
143 | |
144 callback StateCallback = void(State state); | |
145 | |
146 callback DiagnosticsCallback = void(Diagnostics diagnostics); | |
147 | |
148 interface Functions { | |
149 // Get the status of the media perception process. | |
150 // |callback| : The current state of the system. | |
151 static void getState(StateCallback callback); | |
152 | |
153 // Set the desired state of the system. | |
154 // |state| : A dictionary with the desired new state. Settable states are | |
tbarzic
2017/05/11 23:42:50
nit: The only settable states
Luke Sorenson
2017/05/11 23:50:26
Done.
| |
155 // RUNNING and SUSPENDED. | |
156 // |callback| : The State of the system after setting it. Can be used to | |
157 // verify the state was set as desired. | |
158 static void setState(State state, StateCallback callback); | |
159 | |
160 // Get a diagnostics buffer out of the video analytics process. | |
161 // |callback| : Returns a Diagnostics dictionary object. | |
162 static void getDiagnostics(DiagnosticsCallback callback); | |
163 }; | |
164 | |
165 interface Events { | |
166 // Fired when media perception information is received from the media | |
167 // analytics process. | |
168 // |mediaPerception| : The dictionary which contains a dump of everything | |
169 // the analytics process has detected or determined from the incoming media | |
170 // streams. | |
171 static void onMediaPerception(MediaPerception mediaPerception); | |
172 }; | |
173 }; | |
OLD | NEW |