OpenShot Library | libopenshot  0.4.0
CVStabilization.cpp
Go to the documentation of this file.
1 
10 // Copyright (c) 2008-2019 OpenShot Studios, LLC
11 //
12 // SPDX-License-Identifier: LGPL-3.0-or-later
13 
14 #include <fstream>
15 #include <iomanip>
16 #include <iostream>
17 
18 #include "CVStabilization.h"
19 #include "Exceptions.h"
20 
21 #include "stabilizedata.pb.h"
22 #include <google/protobuf/util/time_util.h>
23 
24 using namespace std;
25 using namespace openshot;
26 using google::protobuf::util::TimeUtil;
27 
28 // Set default smoothing window value to compute stabilization
29 CVStabilization::CVStabilization(std::string processInfoJson, ProcessingController &processingController)
30 : processingController(&processingController){
31  SetJson(processInfoJson);
32  start = 1;
33  end = 1;
34 }
35 
36 // Process clip and store necessary stabilization data
37 void CVStabilization::stabilizeClip(openshot::Clip& video, size_t _start, size_t _end, bool process_interval){
38 
39  if(error){
40  return;
41  }
42  processingController->SetError(false, "");
43 
44  start = _start; end = _end;
45  // Compute max and average transformation parameters
46  avr_dx=0; avr_dy=0; avr_da=0; max_dx=0; max_dy=0; max_da=0;
47 
48  video.Open();
49  // Save original video width and height
50  cv::Size readerDims(video.Reader()->info.width, video.Reader()->info.height);
51 
52  size_t frame_number;
53  if(!process_interval || end <= 1 || end-start == 0){
54  // Get total number of frames in video
55  start = (int)(video.Start() * video.Reader()->info.fps.ToFloat()) + 1;
56  end = (int)(video.End() * video.Reader()->info.fps.ToFloat()) + 1;
57  }
58 
59  // Extract and track opticalflow features for each frame
60  for (frame_number = start; frame_number <= end; frame_number++)
61  {
62  // Stop the feature tracker process
63  if(processingController->ShouldStop()){
64  return;
65  }
66 
67  std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
68 
69  // Grab OpenCV Mat image
70  cv::Mat cvimage = f->GetImageCV();
71  // Resize frame to original video width and height if they differ
72  if(cvimage.size().width != readerDims.width || cvimage.size().height != readerDims.height)
73  cv::resize(cvimage, cvimage, cv::Size(readerDims.width, readerDims.height));
74  cv::cvtColor(cvimage, cvimage, cv::COLOR_RGB2GRAY);
75 
76  if(!TrackFrameFeatures(cvimage, frame_number)){
77  prev_to_cur_transform.push_back(TransformParam(0, 0, 0));
78  }
79 
80  // Update progress
81  processingController->SetProgress(uint(100*(frame_number-start)/(end-start)));
82  }
83 
84  // Calculate trajectory data
85  std::vector <CamTrajectory> trajectory = ComputeFramesTrajectory();
86 
87  // Calculate and save smoothed trajectory data
88  trajectoryData = SmoothTrajectory(trajectory);
89 
90  // Calculate and save transformation data
91  transformationData = GenNewCamPosition(trajectoryData);
92 
93  // Normalize smoothed trajectory data
94  for(auto &dataToNormalize : trajectoryData){
95  dataToNormalize.second.x/=readerDims.width;
96  dataToNormalize.second.y/=readerDims.height;
97  }
98  // Normalize transformation data
99  for(auto &dataToNormalize : transformationData){
100  dataToNormalize.second.dx/=readerDims.width;
101  dataToNormalize.second.dy/=readerDims.height;
102  }
103 }
104 
105 // Track current frame features and find the relative transformation
106 bool CVStabilization::TrackFrameFeatures(cv::Mat frame, size_t frameNum){
107  // Check if there are black frames
108  if(cv::countNonZero(frame) < 1){
109  return false;
110  }
111 
112  // Initialize prev_grey if not
113  if(prev_grey.empty()){
114  prev_grey = frame;
115  return true;
116  }
117 
118  // OpticalFlow features vector
119  std::vector <cv::Point2f> prev_corner, cur_corner;
120  std::vector <cv::Point2f> prev_corner2, cur_corner2;
121  std::vector <uchar> status;
122  std::vector <float> err;
123  // Extract new image features
124  cv::goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30);
125  // Track features
126  cv::calcOpticalFlowPyrLK(prev_grey, frame, prev_corner, cur_corner, status, err);
127  // Remove untracked features
128  for(size_t i=0; i < status.size(); i++) {
129  if(status[i]) {
130  prev_corner2.push_back(prev_corner[i]);
131  cur_corner2.push_back(cur_corner[i]);
132  }
133  }
134  // In case no feature was detected
135  if(prev_corner2.empty() || cur_corner2.empty()){
136  last_T = cv::Mat();
137  // prev_grey = cv::Mat();
138  return false;
139  }
140 
141  // Translation + rotation only
142  cv::Mat T = cv::estimateAffinePartial2D(prev_corner2, cur_corner2); // false = rigid transform, no scaling/shearing
143 
144  double da, dx, dy;
145  // If T has nothing inside return (probably a segment where there is nothing to stabilize)
146  if(T.size().width == 0 || T.size().height == 0){
147  return false;
148  }
149  else{
150  // If no transformation is found, just use the last known good transform
151  if(T.data == NULL){
152  if(!last_T.empty())
153  last_T.copyTo(T);
154  else
155  return false;
156  }
157  // Decompose T
158  dx = T.at<double>(0,2);
159  dy = T.at<double>(1,2);
160  da = atan2(T.at<double>(1,0), T.at<double>(0,0));
161  }
162 
163  // Filter transformations parameters, if they are higher than these: return
164  if(dx > 200 || dy > 200 || da > 0.1){
165  return false;
166  }
167 
168  // Keep computing average and max transformation parameters
169  avr_dx+=fabs(dx);
170  avr_dy+=fabs(dy);
171  avr_da+=fabs(da);
172  if(fabs(dx) > max_dx)
173  max_dx = dx;
174  if(fabs(dy) > max_dy)
175  max_dy = dy;
176  if(fabs(da) > max_da)
177  max_da = da;
178 
179  T.copyTo(last_T);
180 
181  prev_to_cur_transform.push_back(TransformParam(dx, dy, da));
182  frame.copyTo(prev_grey);
183 
184  return true;
185 }
186 
187 std::vector<CamTrajectory> CVStabilization::ComputeFramesTrajectory(){
188 
189  // Accumulated frame to frame transform
190  double a = 0;
191  double x = 0;
192  double y = 0;
193 
194  vector <CamTrajectory> trajectory; // trajectory at all frames
195 
196  // Compute global camera trajectory. First frame is the origin
197  for(size_t i=0; i < prev_to_cur_transform.size(); i++) {
198  x += prev_to_cur_transform[i].dx;
199  y += prev_to_cur_transform[i].dy;
200  a += prev_to_cur_transform[i].da;
201 
202  // Save trajectory data to vector
203  trajectory.push_back(CamTrajectory(x,y,a));
204  }
205  return trajectory;
206 }
207 
208 std::map<size_t,CamTrajectory> CVStabilization::SmoothTrajectory(std::vector <CamTrajectory> &trajectory){
209 
210  std::map <size_t,CamTrajectory> smoothed_trajectory; // trajectory at all frames
211 
212  for(size_t i=0; i < trajectory.size(); i++) {
213  double sum_x = 0;
214  double sum_y = 0;
215  double sum_a = 0;
216  int count = 0;
217 
218  for(int j=-smoothingWindow; j <= smoothingWindow; j++) {
219  if(i+j < trajectory.size()) {
220  sum_x += trajectory[i+j].x;
221  sum_y += trajectory[i+j].y;
222  sum_a += trajectory[i+j].a;
223 
224  count++;
225  }
226  }
227 
228  double avg_a = sum_a / count;
229  double avg_x = sum_x / count;
230  double avg_y = sum_y / count;
231 
232  // Add smoothed trajectory data to map
233  smoothed_trajectory[i + start] = CamTrajectory(avg_x, avg_y, avg_a);
234  }
235  return smoothed_trajectory;
236 }
237 
238 // Generate new transformations parameters for each frame to follow the smoothed trajectory
239 std::map<size_t,TransformParam> CVStabilization::GenNewCamPosition(std::map <size_t,CamTrajectory> &smoothed_trajectory){
240  std::map <size_t,TransformParam> new_prev_to_cur_transform;
241 
242  // Accumulated frame to frame transform
243  double a = 0;
244  double x = 0;
245  double y = 0;
246 
247  for(size_t i=0; i < prev_to_cur_transform.size(); i++) {
248  x += prev_to_cur_transform[i].dx;
249  y += prev_to_cur_transform[i].dy;
250  a += prev_to_cur_transform[i].da;
251 
252  // target - current
253  double diff_x = smoothed_trajectory[i + start].x - x;
254  double diff_y = smoothed_trajectory[i + start].y - y;
255  double diff_a = smoothed_trajectory[i + start].a - a;
256 
257  double dx = prev_to_cur_transform[i].dx + diff_x;
258  double dy = prev_to_cur_transform[i].dy + diff_y;
259  double da = prev_to_cur_transform[i].da + diff_a;
260 
261  // Add transformation data to map
262  new_prev_to_cur_transform[i + start] = TransformParam(dx, dy, da);
263  }
264  return new_prev_to_cur_transform;
265 }
266 
267 // Save stabilization data to protobuf file
269  using std::ios;
270 
271  // Create stabilization message
272  pb_stabilize::Stabilization stabilizationMessage;
273 
274  std::map<size_t,CamTrajectory>::iterator trajData = trajectoryData.begin();
275  std::map<size_t,TransformParam>::iterator transData = transformationData.begin();
276 
277  // Iterate over all frames data and save in protobuf message
278  for(; trajData != trajectoryData.end(); ++trajData, ++transData){
279  AddFrameDataToProto(stabilizationMessage.add_frame(), trajData->second, transData->second, trajData->first);
280  }
281  // Add timestamp
282  *stabilizationMessage.mutable_last_updated() = TimeUtil::SecondsToTimestamp(time(NULL));
283 
284  // Write the new message to disk.
285  std::fstream output(protobuf_data_path, ios::out | ios::trunc | ios::binary);
286  if (!stabilizationMessage.SerializeToOstream(&output)) {
287  std::cerr << "Failed to write protobuf message." << std::endl;
288  return false;
289  }
290 
291  // Delete all global objects allocated by libprotobuf.
292  google::protobuf::ShutdownProtobufLibrary();
293 
294  return true;
295 }
296 
297 // Add frame stabilization data into protobuf message
298 void CVStabilization::AddFrameDataToProto(pb_stabilize::Frame* pbFrameData, CamTrajectory& trajData, TransformParam& transData, size_t frame_number){
299 
300  // Save frame number
301  pbFrameData->set_id(frame_number);
302 
303  // Save camera trajectory data
304  pbFrameData->set_a(trajData.a);
305  pbFrameData->set_x(trajData.x);
306  pbFrameData->set_y(trajData.y);
307 
308  // Save transformation data
309  pbFrameData->set_da(transData.da);
310  pbFrameData->set_dx(transData.dx);
311  pbFrameData->set_dy(transData.dy);
312 }
313 
315 
316  // Check if the stabilizer info for the requested frame exists
317  if ( transformationData.find(frameId) == transformationData.end() ) {
318 
319  return TransformParam();
320  } else {
321 
322  return transformationData[frameId];
323  }
324 }
325 
327 
328  // Check if the stabilizer info for the requested frame exists
329  if ( trajectoryData.find(frameId) == trajectoryData.end() ) {
330 
331  return CamTrajectory();
332  } else {
333 
334  return trajectoryData[frameId];
335  }
336 }
337 
338 // Load JSON string into this object
339 void CVStabilization::SetJson(const std::string value) {
340  // Parse JSON string into JSON objects
341  try
342  {
343  const Json::Value root = openshot::stringToJson(value);
344  // Set all values that match
345 
346  SetJsonValue(root);
347  }
348  catch (const std::exception& e)
349  {
350  // Error parsing JSON (or missing keys)
351  throw openshot::InvalidJSON("JSON is invalid (missing keys or invalid data types)");
352  }
353 }
354 
355 // Load Json::Value into this object
356 void CVStabilization::SetJsonValue(const Json::Value root) {
357 
358  // Set data from Json (if key is found)
359  if (!root["protobuf_data_path"].isNull()){
360  protobuf_data_path = (root["protobuf_data_path"].asString());
361  }
362  if (!root["smoothing-window"].isNull()){
363  smoothingWindow = (root["smoothing-window"].asInt());
364  }
365 }
366 
367 /*
368 ||||||||||||||||||||||||||||||||||||||||||||||||||
369  ONLY FOR MAKE TEST
370 ||||||||||||||||||||||||||||||||||||||||||||||||||
371 */
372 
373 // Load protobuf data file
375  using std::ios;
376  // Create stabilization message
377  pb_stabilize::Stabilization stabilizationMessage;
378  // Read the existing tracker message.
379  std::fstream input(protobuf_data_path, ios::in | ios::binary);
380  if (!stabilizationMessage.ParseFromIstream(&input)) {
381  std::cerr << "Failed to parse protobuf message." << std::endl;
382  return false;
383  }
384 
385  // Make sure the data maps are empty
386  transformationData.clear();
387  trajectoryData.clear();
388 
389  // Iterate over all frames of the saved message and assign to the data maps
390  for (size_t i = 0; i < stabilizationMessage.frame_size(); i++) {
391  const pb_stabilize::Frame& pbFrameData = stabilizationMessage.frame(i);
392 
393  // Load frame number
394  size_t id = pbFrameData.id();
395 
396  // Load camera trajectory data
397  float x = pbFrameData.x();
398  float y = pbFrameData.y();
399  float a = pbFrameData.a();
400 
401  // Assign data to trajectory map
402  trajectoryData[id] = CamTrajectory(x,y,a);
403 
404  // Load transformation data
405  float dx = pbFrameData.dx();
406  float dy = pbFrameData.dy();
407  float da = pbFrameData.da();
408 
409  // Assing data to transformation map
410  transformationData[id] = TransformParam(dx,dy,da);
411  }
412 
413  // Delete all global objects allocated by libprotobuf.
414  google::protobuf::ShutdownProtobufLibrary();
415 
416  return true;
417 }
openshot::stringToJson
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
openshot::Clip::Open
void Open() override
Open the internal reader.
Definition: Clip.cpp:320
TransformParam::da
double da
Definition: CVStabilization.h:50
CamTrajectory::x
double x
Definition: CVStabilization.h:63
CVStabilization::SetJson
void SetJson(const std::string value)
Load JSON string into this object.
Definition: CVStabilization.cpp:339
CVStabilization::GetCamTrajectoryTrackedData
CamTrajectory GetCamTrajectoryTrackedData(size_t frameId)
Definition: CVStabilization.cpp:326
ProcessingController::ShouldStop
bool ShouldStop()
Definition: ProcessingController.h:68
ProcessingController::SetError
void SetError(bool err, std::string message)
Definition: ProcessingController.h:74
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:28
openshot::Clip
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
CVStabilization::stabilizeClip
void stabilizeClip(openshot::Clip &video, size_t _start=0, size_t _end=0, bool process_interval=false)
Process clip and store necessary stabilization data.
Definition: CVStabilization.cpp:37
openshot::Clip::End
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:356
openshot::Clip::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:391
CVStabilization::AddFrameDataToProto
void AddFrameDataToProto(pb_stabilize::Frame *pbFrameData, CamTrajectory &trajData, TransformParam &transData, size_t frame_number)
Add frame stabilization data into protobuf message.
Definition: CVStabilization.cpp:298
CVStabilization.h
Header file for CVStabilization class.
CVStabilization::_LoadStabilizedData
bool _LoadStabilizedData()
Definition: CVStabilization.cpp:374
CVStabilization::SaveStabilizedData
bool SaveStabilizedData()
Definition: CVStabilization.cpp:268
CVStabilization::trajectoryData
std::map< size_t, CamTrajectory > trajectoryData
Definition: CVStabilization.h:107
CVStabilization::CVStabilization
CVStabilization(std::string processInfoJson, ProcessingController &processingController)
Set default smoothing window value to compute stabilization.
Definition: CVStabilization.cpp:29
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:217
CVStabilization::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: CVStabilization.cpp:356
CVStabilization::GetTransformParamData
TransformParam GetTransformParamData(size_t frameId)
Definition: CVStabilization.cpp:314
openshot::ClipBase::Start
void Start(float value)
Set start position (in seconds) of clip (trim start of video)
Definition: ClipBase.cpp:42
TransformParam::dx
double dx
Definition: CVStabilization.h:48
CVStabilization::transformationData
std::map< size_t, TransformParam > transformationData
Definition: CVStabilization.h:108
ProcessingController
Definition: ProcessingController.h:20
TransformParam
Definition: CVStabilization.h:39
TransformParam::dy
double dy
Definition: CVStabilization.h:49
openshot::Clip::Reader
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:274
CamTrajectory::y
double y
Definition: CVStabilization.h:64
ProcessingController::SetProgress
void SetProgress(uint p)
Definition: ProcessingController.h:52
CamTrajectory
Definition: CVStabilization.h:54
CamTrajectory::a
double a
Definition: CVStabilization.h:65
Exceptions.h
Header file for all Exception classes.