OpenShot Library | libopenshot  0.1.3
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Create CrashHandler and Attach (incase of errors)
38 
39  // Init viewport size (curve based, because it can be animated)
40  viewport_scale = Keyframe(100.0);
41  viewport_x = Keyframe(0.0);
42  viewport_y = Keyframe(0.0);
43 
44  // Init background color
45  color.red = Keyframe(0.0);
46  color.green = Keyframe(0.0);
47  color.blue = Keyframe(0.0);
48 
49  // Init FileInfo struct (clear all values)
50  info.width = width;
51  info.height = height;
52  info.fps = fps;
53  info.sample_rate = sample_rate;
54  info.channels = channels;
55  info.channel_layout = channel_layout;
57  info.duration = 60 * 30; // 30 minute default duration
58  info.has_audio = true;
59  info.has_video = true;
61 
62  // Init cache
63  final_cache = new CacheMemory();
65 }
66 
67 // Add an openshot::Clip to the timeline
69 {
70  // All clips should be converted to the frame rate of this timeline
71  if (auto_map_clips)
72  // Apply framemapper (or update existing framemapper)
73  apply_mapper_to_clip(clip);
74 
75  // Add clip to list
76  clips.push_back(clip);
77 
78  // Sort clips
79  sort_clips();
80 }
81 
82 // Add an effect to the timeline
84 {
85  // Add effect to list
86  effects.push_back(effect);
87 
88  // Sort effects
89  sort_effects();
90 }
91 
92 // Remove an effect from the timeline
94 {
95  effects.remove(effect);
96 }
97 
98 // Remove an openshot::Clip to the timeline
100 {
101  clips.remove(clip);
102 }
103 
104 // Apply a FrameMapper to a clip which matches the settings of this timeline
105 void Timeline::apply_mapper_to_clip(Clip* clip)
106 {
107  // Determine type of reader
108  ReaderBase* clip_reader = NULL;
109  if (clip->Reader()->Name() == "FrameMapper")
110  {
111  // Get the existing reader
112  clip_reader = (ReaderBase*) clip->Reader();
113 
114  } else {
115 
116  // Create a new FrameMapper to wrap the current reader
118  }
119 
120  // Update the mapping
121  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
123 
124  // Update clip reader
125  clip->Reader(clip_reader);
126 }
127 
128 // Apply the timeline's framerate and samplerate to all clips
130 {
131  // Clear all cached frames
132  final_cache->Clear();
133 
134  // Loop through all clips
135  list<Clip*>::iterator clip_itr;
136  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
137  {
138  // Get clip object from the iterator
139  Clip *clip = (*clip_itr);
140 
141  // Apply framemapper (or update existing framemapper)
142  apply_mapper_to_clip(clip);
143  }
144 }
145 
146 // Calculate time of a frame number, based on a framerate
147 float Timeline::calculate_time(long int number, Fraction rate)
148 {
149  // Get float version of fps fraction
150  float raw_fps = rate.ToFloat();
151 
152  // Return the time (in seconds) of this frame
153  return float(number - 1) / raw_fps;
154 }
155 
156 // Apply effects to the source frame (if any)
157 tr1::shared_ptr<Frame> Timeline::apply_effects(tr1::shared_ptr<Frame> frame, long int timeline_frame_number, int layer)
158 {
159  // Calculate time of frame
160  float requested_time = calculate_time(timeline_frame_number, info.fps);
161 
162  // Debug output
163  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "requested_time", requested_time, "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
164 
165  // Find Effects at this position and layer
166  list<EffectBase*>::iterator effect_itr;
167  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
168  {
169  // Get effect object from the iterator
170  EffectBase *effect = (*effect_itr);
171 
172  // Does clip intersect the current requested time
173  float effect_duration = effect->End() - effect->Start();
174  bool does_effect_intersect = (effect->Position() <= requested_time && effect->Position() + effect_duration >= requested_time && effect->Layer() == layer);
175 
176  // Debug output
177  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "requested_time", requested_time, "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "effect_duration", effect_duration);
178 
179  // Clip is visible
180  if (does_effect_intersect)
181  {
182  // Determine the frame needed for this clip (based on the position on the timeline)
183  float time_diff = (requested_time - effect->Position()) + effect->Start();
184  int effect_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
185 
186  // Debug output
187  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "time_diff", time_diff, "effect_frame_number", effect_frame_number, "effect_duration", effect_duration, "does_effect_intersect", does_effect_intersect, "", -1, "", -1);
188 
189  // Apply the effect to this frame
190  frame = effect->GetFrame(frame, effect_frame_number);
191  }
192 
193  } // end effect loop
194 
195  // Return modified frame
196  return frame;
197 }
198 
199 // Get or generate a blank frame
200 tr1::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, long int number)
201 {
202  tr1::shared_ptr<Frame> new_frame;
203 
204  // Init some basic properties about this frame
205  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
206 
207  try {
208  // Debug output
209  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
210 
211  // Set max image size (used for performance optimization)
212  clip->SetMaxSize(info.width, info.height);
213 
214  // Attempt to get a frame (but this could fail if a reader has just been closed)
215  new_frame = tr1::shared_ptr<Frame>(clip->GetFrame(number));
216 
217  // Return real frame
218  return new_frame;
219 
220  } catch (const ReaderClosed & e) {
221  // ...
222  } catch (const TooManySeeks & e) {
223  // ...
224  } catch (const OutOfBoundsFrame & e) {
225  // ...
226  }
227 
228  // Debug output
229  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
230 
231  // Create blank frame
232  new_frame = tr1::shared_ptr<Frame>(new Frame(number, info.width, info.height, "#000000", samples_in_frame, info.channels));
233  new_frame->SampleRate(info.sample_rate);
234  new_frame->ChannelsLayout(info.channel_layout);
235  return new_frame;
236 }
237 
238 // Process a new layer of video or audio
239 void Timeline::add_layer(tr1::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
240 {
241  // Get the clip's frame & image
242  tr1::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
243 
244  // No frame found... so bail
245  if (!source_frame)
246  return;
247 
248  // Debug output
249  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
250 
251  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
252  if (source_clip->Waveform())
253  {
254  // Debug output
255  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
256 
257  // Get the color of the waveform
258  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
259  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
260  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
261  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
262 
263  // Generate Waveform Dynamically (the size of the timeline)
264  tr1::shared_ptr<QImage> source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha);
265  source_frame->AddImage(tr1::shared_ptr<QImage>(source_image));
266  }
267 
268  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
269  * effects on the top clip. */
270  if (is_top_clip && source_frame)
271  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
272 
273  // Declare an image to hold the source frame's image
274  tr1::shared_ptr<QImage> source_image;
275 
276  /* COPY AUDIO - with correct volume */
277  if (source_clip->Reader()->info.has_audio) {
278 
279  // Debug output
280  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
281 
282  if (source_frame->GetAudioChannelsCount() == info.channels)
283  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
284  {
285  float initial_volume = 1.0f;
286  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
287  float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)
288  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
289  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
290 
291  // If channel filter enabled, check for correct channel (and skip non-matching channels)
292  if (channel_filter != -1 && channel_filter != channel)
293  continue; // skip to next channel
294 
295  // If channel mapping disabled, just use the current channel
296  if (channel_mapping == -1)
297  channel_mapping = channel;
298 
299  // If no ramp needed, set initial volume = clip's volume
300  if (isEqual(previous_volume, volume))
301  initial_volume = volume;
302 
303  // Apply ramp to source frame (if needed)
304  if (!isEqual(previous_volume, volume))
305  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
306 
307  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
308  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
309  // number of samples returned is variable... and does not match the number expected.
310  // This is a crude solution at best. =)
311  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
312  // Force timeline frame to match the source frame
313  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
314 
315  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
316  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
317  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
318 
319  }
320  else
321  // Debug output
322  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
323 
324  }
325 
326  // Skip out if only an audio frame
327  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
328  // Skip the rest of the image processing for performance reasons
329  return;
330 
331  // Debug output
332  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
333 
334  // Get actual frame image data
335  source_image = source_frame->GetImage();
336 
337  // Get some basic image properties
338  int source_width = source_image->width();
339  int source_height = source_image->height();
340 
341  /* ALPHA & OPACITY */
342  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
343  {
344  float alpha = source_clip->alpha.GetValue(clip_frame_number);
345 
346  // Get source image's pixels
347  unsigned char *pixels = (unsigned char *) source_image->bits();
348 
349  // Loop through pixels
350  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
351  {
352  // Get the alpha values from the pixel
353  int A = pixels[byte_index + 3];
354 
355  // Apply alpha to pixel
356  pixels[byte_index + 3] *= alpha;
357  }
358 
359  // Debug output
360  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
361  }
362 
363  /* RESIZE SOURCE IMAGE - based on scale type */
364  switch (source_clip->scale)
365  {
366  case (SCALE_FIT):
367  // keep aspect ratio
368  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::KeepAspectRatio, Qt::SmoothTransformation)));
369  source_width = source_image->width();
370  source_height = source_image->height();
371 
372  // Debug output
373  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
374  break;
375 
376  case (SCALE_STRETCH):
377  // ignore aspect ratio
378  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)));
379  source_width = source_image->width();
380  source_height = source_image->height();
381 
382  // Debug output
383  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
384  break;
385 
386  case (SCALE_CROP):
387  QSize width_size(info.width, round(info.width / (float(source_width) / float(source_height))));
388  QSize height_size(round(info.height / (float(source_height) / float(source_width))), info.height);
389 
390  // respect aspect ratio
391  if (width_size.width() >= info.width && width_size.height() >= info.height)
392  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
393  else
394  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); // height is larger, so resize to it
395  source_width = source_image->width();
396  source_height = source_image->height();
397 
398  // Debug output
399  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
400  break;
401  }
402 
403  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
404  float x = 0.0; // left
405  float y = 0.0; // top
406 
407  // Adjust size for scale x and scale y
408  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
409  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
410  float scaled_source_width = source_width * sx;
411  float scaled_source_height = source_height * sy;
412 
413  switch (source_clip->gravity)
414  {
415  case (GRAVITY_TOP):
416  x = (info.width - scaled_source_width) / 2.0; // center
417  break;
418  case (GRAVITY_TOP_RIGHT):
419  x = info.width - scaled_source_width; // right
420  break;
421  case (GRAVITY_LEFT):
422  y = (info.height - scaled_source_height) / 2.0; // center
423  break;
424  case (GRAVITY_CENTER):
425  x = (info.width - scaled_source_width) / 2.0; // center
426  y = (info.height - scaled_source_height) / 2.0; // center
427  break;
428  case (GRAVITY_RIGHT):
429  x = info.width - scaled_source_width; // right
430  y = (info.height - scaled_source_height) / 2.0; // center
431  break;
432  case (GRAVITY_BOTTOM_LEFT):
433  y = (info.height - scaled_source_height); // bottom
434  break;
435  case (GRAVITY_BOTTOM):
436  x = (info.width - scaled_source_width) / 2.0; // center
437  y = (info.height - scaled_source_height); // bottom
438  break;
439  case (GRAVITY_BOTTOM_RIGHT):
440  x = info.width - scaled_source_width; // right
441  y = (info.height - scaled_source_height); // bottom
442  break;
443  }
444 
445  // Debug output
446  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "source_width", source_width, "info.height", info.height, "source_height", source_height);
447 
448  /* LOCATION, ROTATION, AND SCALE */
449  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
450  x += (info.width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
451  y += (info.height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
452  bool is_x_animated = source_clip->location_x.Points.size() > 1;
453  bool is_y_animated = source_clip->location_y.Points.size() > 1;
454  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
455  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
456 
457  int offset_x = -1;
458  int offset_y = -1;
459  bool transformed = false;
460  QTransform transform;
461 
462  // Transform source image (if needed)
463  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
464 
465  if (!isEqual(r, 0)) {
466  // ROTATE CLIP
467  float origin_x = x + (source_width / 2.0);
468  float origin_y = y + (source_height / 2.0);
469  transform.translate(origin_x, origin_y);
470  transform.rotate(r);
471  transform.translate(-origin_x,-origin_y);
472  transformed = true;
473  }
474 
475  if (!isEqual(x, 0) || !isEqual(y, 0)) {
476  // TRANSLATE/MOVE CLIP
477  transform.translate(x, y);
478  transformed = true;
479  }
480 
481  if (!isEqual(sx, 0) || !isEqual(sy, 0)) {
482  // TRANSLATE/MOVE CLIP
483  transform.scale(sx, sy);
484  transformed = true;
485  }
486 
487  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
488  // SHEAR HEIGHT/WIDTH
489  transform.shear(shear_x, shear_y);
490  transformed = true;
491  }
492 
493  // Debug output
494  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
495 
496  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
497  tr1::shared_ptr<QImage> new_image = new_frame->GetImage();
498 
499  // Load timeline's new frame image into a QPainter
500  QPainter painter(new_image.get());
501  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
502 
503  // Apply transform (translate, rotate, scale)... if any
504  if (transformed)
505  painter.setTransform(transform);
506 
507  // Composite a new layer onto the image
508  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
509  painter.drawImage(0, 0, *source_image);
510 
511  // Draw transform selection handles (if needed)
512  if (source_clip->handles == TRANSFORM_HANDLE_SELECTION) {
513  // Debug output
514  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Add transform selection handles)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
515 
516  // Draw 4 corners
517  painter.fillRect(0.0, 0.0, 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // top left
518  painter.fillRect(source_width - (12.0/sx), 0, 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // top right
519  painter.fillRect(0.0, source_height - (12.0/sy), 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // bottom left
520  painter.fillRect(source_width - (12.0/sx), source_height - (12.0/sy), 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // bottom right
521 
522  // Draw 4 sides (centered)
523  painter.fillRect(0.0 + (source_width / 2.0) - (6.0/sx), 0, 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // top center
524  painter.fillRect(0.0 + (source_width / 2.0) - (6.0/sx), source_height - (6.0/sy), 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // bottom center
525  painter.fillRect(0.0, (source_height / 2.0) - (6.0/sy), 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // left center
526  painter.fillRect(source_width - (12.0/sx), (source_height / 2.0) - (6.0/sy), 12.0/sx, 12.0/sy, QBrush(QColor("#53a0ed"))); // right center
527 
528 
529  // Draw origin QPen(const QBrush &brush, qreal width, Qt::PenStyle style = Qt::SolidLine, Qt::PenCapStyle cap = Qt::SquareCap, Qt::PenJoinStyle join = Qt::BevelJoin)
530  painter.setBrush(QColor(83, 160, 237, 122));
531  painter.setPen(Qt::NoPen);
532  painter.drawEllipse((source_width / 2.0) - (25.0/sx), (source_height / 2.0) - (25.0/sy), 50.0/sx, 50.0/sy);
533  }
534 
535  painter.end();
536 
537  // Debug output
538  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
539 }
540 
541 // Update the list of 'opened' clips
542 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
543 {
544  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
545 
546  // is clip already in list?
547  bool clip_found = open_clips.count(clip);
548 
549  if (clip_found && !does_clip_intersect)
550  {
551  // Remove clip from 'opened' list, because it's closed now
552  open_clips.erase(clip);
553 
554  // Close clip
555  clip->Close();
556  }
557  else if (!clip_found && does_clip_intersect)
558  {
559  // Add clip to 'opened' list, because it's missing
560  open_clips[clip] = clip;
561 
562  // Open the clip
563  clip->Open();
564  }
565 
566  // Debug output
567  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
568 }
569 
570 // Sort clips by position on the timeline
571 void Timeline::sort_clips()
572 {
573  // Debug output
574  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
575 
576  // sort clips
577  clips.sort(CompareClips());
578 }
579 
580 // Sort effects by position on the timeline
581 void Timeline::sort_effects()
582 {
583  // sort clips
584  effects.sort(CompareEffects());
585 }
586 
587 // Close the reader (and any resources it was consuming)
589 {
590  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
591 
592  // Close all open clips
593  list<Clip*>::iterator clip_itr;
594  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
595  {
596  // Get clip object from the iterator
597  Clip *clip = (*clip_itr);
598 
599  // Open or Close this clip, based on if it's intersecting or not
600  update_open_clips(clip, false);
601  }
602 
603  // Mark timeline as closed
604  is_open = false;
605 
606  // Clear cache
607  final_cache->Clear();
608 }
609 
610 // Open the reader (and start consuming resources)
612 {
613  is_open = true;
614 }
615 
616 // Compare 2 floating point numbers for equality
617 bool Timeline::isEqual(double a, double b)
618 {
619  return fabs(a - b) < 0.000001;
620 }
621 
622 // Get an openshot::Frame object for a specific frame number of this reader.
623 tr1::shared_ptr<Frame> Timeline::GetFrame(long int requested_frame) throw(ReaderClosed, OutOfBoundsFrame)
624 {
625  // Check for open reader (or throw exception)
626  if (!is_open)
627  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
628 
629  // Adjust out of bounds frame number
630  if (requested_frame < 1)
631  requested_frame = 1;
632 
633  // Check cache
634  tr1::shared_ptr<Frame> frame = final_cache->GetFrame(requested_frame);
635  if (frame) {
636  // Debug output
637  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
638 
639  // Return cached frame
640  return frame;
641  }
642  else
643  {
644  // Create a scoped lock, allowing only a single thread to run the following code at one time
645  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
646 
647  // Check cache again (due to locking)
648  frame = final_cache->GetFrame(requested_frame);
649  if (frame) {
650  // Debug output
651  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
652 
653  // Return cached frame
654  return frame;
655  }
656 
657  // Minimum number of frames to process (for performance reasons)
658  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
659 
660  // Get a list of clips that intersect with the requested section of timeline
661  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
662  vector<Clip*> nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
663 
664  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
665  // Allow nested OpenMP sections
666  omp_set_nested(true);
667 
668  // Debug output
669  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
670 
671  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
672  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
673  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
674  {
675  // Calculate time of timeline frame
676  float requested_time = calculate_time(frame_number, info.fps);
677  // Loop through clips
678  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
679  {
680  // Get clip object from the iterator
681  Clip *clip = nearby_clips[clip_index];
682  bool does_clip_intersect = (clip->Position() <= requested_time && clip->Position() + clip->Duration() >= requested_time);
683  if (does_clip_intersect)
684  {
685  // Get clip frame #
686  float time_diff = (requested_time - clip->Position()) + clip->Start();
687  int clip_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
688  // Cache clip object
689  clip->GetFrame(clip_frame_number);
690  }
691  }
692  }
693 
694  #pragma omp parallel
695  {
696  // Loop through all requested frames
697  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
698  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
699  {
700  // Debug output
701  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
702 
703  // Init some basic properties about this frame
704  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
705 
706  // Create blank frame (which will become the requested frame)
707  tr1::shared_ptr<Frame> new_frame(tr1::shared_ptr<Frame>(new Frame(frame_number, info.width, info.height, "#000000", samples_in_frame, info.channels)));
708  new_frame->AddAudioSilence(samples_in_frame);
709  new_frame->SampleRate(info.sample_rate);
710  new_frame->ChannelsLayout(info.channel_layout);
711 
712  // Debug output
713  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
714 
715  // Add Background Color to 1st layer (if animated or not black)
716  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
717  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
718  new_frame->AddColor(info.width, info.height, color.GetColorHex(frame_number));
719 
720  // Calculate time of frame
721  float requested_time = calculate_time(frame_number, info.fps);
722 
723  // Debug output
724  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "requested_time", requested_time, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1);
725 
726  // Find Clips near this time
727  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
728  {
729  // Get clip object from the iterator
730  Clip *clip = nearby_clips[clip_index];
731 
732  // Does clip intersect the current requested time
733  bool does_clip_intersect = (clip->Position() <= requested_time && clip->Position() + clip->Duration() >= requested_time);
734 
735  // Debug output
736  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "requested_time", requested_time, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1);
737 
738  // Clip is visible
739  if (does_clip_intersect)
740  {
741  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
742  bool is_top_clip = true;
743  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
744  {
745  Clip *nearby_clip = nearby_clips[top_clip_index];
746  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
747  nearby_clip->Position() <= requested_time && nearby_clip->Position() + nearby_clip->Duration() >= requested_time &&
748  nearby_clip->Position() > clip->Position()) {
749  is_top_clip = false;
750  break;
751  }
752  }
753 
754  // Determine the frame needed for this clip (based on the position on the timeline)
755  float time_diff = (requested_time - clip->Position()) + clip->Start();
756  int clip_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
757 
758  // Debug output
759  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "time_diff", time_diff, "requested_time", requested_time, "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
760 
761  // Add clip's frame as layer
762  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip);
763 
764  } else
765  // Debug output
766  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "requested_time", requested_time, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1);
767 
768  } // end clip loop
769 
770  // Debug output
771  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
772 
773  // Set frame # on mapped frame
774  new_frame->SetFrameNumber(frame_number);
775 
776  // Add final frame to cache
777  final_cache->Add(new_frame);
778 
779  } // end frame loop
780  } // end parallel
781 
782  // Debug output
783  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
784 
785  // Return frame (or blank frame)
786  return final_cache->GetFrame(requested_frame);
787  }
788 }
789 
790 
791 // Find intersecting clips (or non intersecting clips)
792 vector<Clip*> Timeline::find_intersecting_clips(long int requested_frame, int number_of_frames, bool include)
793 {
794  // Find matching clips
795  vector<Clip*> matching_clips;
796 
797  // Calculate time of frame
798  float min_requested_time = calculate_time(requested_frame, info.fps);
799  float max_requested_time = calculate_time(requested_frame + (number_of_frames - 1), info.fps);
800 
801  // Re-Sort Clips (since they likely changed)
802  sort_clips();
803 
804  // Find Clips at this time
805  list<Clip*>::iterator clip_itr;
806  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
807  {
808  // Get clip object from the iterator
809  Clip *clip = (*clip_itr);
810 
811  // Does clip intersect the current requested time
812  float clip_duration = clip->End() - clip->Start();
813  bool does_clip_intersect = (clip->Position() <= min_requested_time && clip->Position() + clip_duration >= min_requested_time) ||
814  (clip->Position() > min_requested_time && clip->Position() <= max_requested_time);
815 
816  // Debug output
817  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_time", min_requested_time, "max_requested_time", max_requested_time, "clip->Position()", clip->Position(), "clip_duration", clip_duration, "does_clip_intersect", does_clip_intersect);
818 
819  // Open (or schedule for closing) this clip, based on if it's intersecting or not
820  #pragma omp critical (reader_lock)
821  update_open_clips(clip, does_clip_intersect);
822 
823 
824  // Clip is visible
825  if (does_clip_intersect && include)
826  // Add the intersecting clip
827  matching_clips.push_back(clip);
828 
829  else if (!does_clip_intersect && !include)
830  // Add the non-intersecting clip
831  matching_clips.push_back(clip);
832 
833  } // end clip loop
834 
835  // return list
836  return matching_clips;
837 }
838 
839 // Get the cache object used by this reader
840 void Timeline::SetCache(CacheBase* new_cache) {
841  // Set new cache
842  final_cache = new_cache;
843 }
844 
845 // Generate JSON string of this object
846 string Timeline::Json() {
847 
848  // Return formatted string
849  return JsonValue().toStyledString();
850 }
851 
852 // Generate Json::JsonValue for this object
853 Json::Value Timeline::JsonValue() {
854 
855  // Create root json object
856  Json::Value root = ReaderBase::JsonValue(); // get parent properties
857  root["type"] = "Timeline";
858  root["viewport_scale"] = viewport_scale.JsonValue();
859  root["viewport_x"] = viewport_x.JsonValue();
860  root["viewport_y"] = viewport_y.JsonValue();
861  root["color"] = color.JsonValue();
862 
863  // Add array of clips
864  root["clips"] = Json::Value(Json::arrayValue);
865 
866  // Find Clips at this time
867  list<Clip*>::iterator clip_itr;
868  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
869  {
870  // Get clip object from the iterator
871  Clip *existing_clip = (*clip_itr);
872  root["clips"].append(existing_clip->JsonValue());
873  }
874 
875  // Add array of effects
876  root["effects"] = Json::Value(Json::arrayValue);
877 
878  // loop through effects
879  list<EffectBase*>::iterator effect_itr;
880  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
881  {
882  // Get clip object from the iterator
883  EffectBase *existing_effect = (*effect_itr);
884  root["effects"].append(existing_effect->JsonValue());
885  }
886 
887  // return JsonValue
888  return root;
889 }
890 
891 // Load JSON string into this object
892 void Timeline::SetJson(string value) throw(InvalidJSON) {
893 
894  // Parse JSON string into JSON objects
895  Json::Value root;
896  Json::Reader reader;
897  bool success = reader.parse( value, root );
898  if (!success)
899  // Raise exception
900  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
901 
902  try
903  {
904  // Set all values that match
905  SetJsonValue(root);
906  }
907  catch (exception e)
908  {
909  // Error parsing JSON (or missing keys)
910  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
911  }
912 }
913 
914 // Load Json::JsonValue into this object
915 void Timeline::SetJsonValue(Json::Value root) throw(InvalidFile, ReaderClosed) {
916 
917  // Close timeline before we do anything (this also removes all open and closing clips)
918  Close();
919 
920  // Set parent data
922 
923  if (!root["clips"].isNull()) {
924  // Clear existing clips
925  clips.clear();
926 
927  // loop through clips
928  for (int x = 0; x < root["clips"].size(); x++) {
929  // Get each clip
930  Json::Value existing_clip = root["clips"][x];
931 
932  // Create Clip
933  Clip *c = new Clip();
934 
935  // Load Json into Clip
936  c->SetJsonValue(existing_clip);
937 
938  // Add Clip to Timeline
939  AddClip(c);
940  }
941  }
942 
943  if (!root["effects"].isNull()) {
944  // Clear existing effects
945  effects.clear();
946 
947  // loop through effects
948  for (int x = 0; x < root["effects"].size(); x++) {
949  // Get each effect
950  Json::Value existing_effect = root["effects"][x];
951 
952  // Create Effect
953  EffectBase *e = NULL;
954 
955  if (!existing_effect["type"].isNull()) {
956  // Create instance of effect
957  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
958 
959  // Load Json into Effect
960  e->SetJsonValue(existing_effect);
961 
962  // Add Effect to Timeline
963  AddEffect(e);
964  }
965  }
966  }
967 
968  if (!root["duration"].isNull()) {
969  // Update duration of timeline
970  info.duration = root["duration"].asDouble();
972  }
973 }
974 
975 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
977 
978  // Parse JSON string into JSON objects
979  Json::Value root;
980  Json::Reader reader;
981  bool success = reader.parse( value, root );
982  if (!success || !root.isArray())
983  // Raise exception
984  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
985 
986  try
987  {
988  // Process the JSON change array, loop through each item
989  for (int x = 0; x < root.size(); x++) {
990  // Get each change
991  Json::Value change = root[x];
992  string root_key = change["key"][(uint)0].asString();
993 
994  // Process each type of change
995  if (root_key == "clips")
996  // Apply to CLIPS
997  apply_json_to_clips(change);
998 
999  else if (root_key == "effects")
1000  // Apply to EFFECTS
1001  apply_json_to_effects(change);
1002 
1003  else
1004  // Apply to TIMELINE
1005  apply_json_to_timeline(change);
1006 
1007  }
1008  }
1009  catch (exception e)
1010  {
1011  // Error parsing JSON (or missing keys)
1012  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
1013  }
1014 }
1015 
1016 // Apply JSON diff to clips
1017 void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
1018 
1019  // Get key and type of change
1020  string change_type = change["type"].asString();
1021  string clip_id = "";
1022  Clip *existing_clip = NULL;
1023 
1024  // Find id of clip (if any)
1025  for (int x = 0; x < change["key"].size(); x++) {
1026  // Get each change
1027  Json::Value key_part = change["key"][x];
1028 
1029  if (key_part.isObject()) {
1030  // Check for id
1031  if (!key_part["id"].isNull()) {
1032  // Set the id
1033  clip_id = key_part["id"].asString();
1034 
1035  // Find matching clip in timeline (if any)
1036  list<Clip*>::iterator clip_itr;
1037  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1038  {
1039  // Get clip object from the iterator
1040  Clip *c = (*clip_itr);
1041  if (c->Id() == clip_id) {
1042  existing_clip = c;
1043  break; // clip found, exit loop
1044  }
1045  }
1046  break; // id found, exit loop
1047  }
1048  }
1049  }
1050 
1051  // Check for a more specific key (targetting this clip's effects)
1052  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1053  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1054  {
1055  // This change is actually targetting a specific effect under a clip (and not the clip)
1056  Json::Value key_part = change["key"][3];
1057 
1058  if (key_part.isObject()) {
1059  // Check for id
1060  if (!key_part["id"].isNull())
1061  {
1062  // Set the id
1063  string effect_id = key_part["id"].asString();
1064 
1065  // Find matching effect in timeline (if any)
1066  list<EffectBase*> effect_list = existing_clip->Effects();
1067  list<EffectBase*>::iterator effect_itr;
1068  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1069  {
1070  // Get effect object from the iterator
1071  EffectBase *e = (*effect_itr);
1072  if (e->Id() == effect_id) {
1073  // Apply the change to the effect directly
1074  apply_json_to_effects(change, e);
1075  return; // effect found, don't update clip
1076  }
1077  }
1078  }
1079  }
1080  }
1081 
1082  // Calculate start and end frames that this impacts, and remove those frames from the cache
1083  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1084  long int new_starting_frame = change["value"]["position"].asDouble() * info.fps.ToDouble();
1085  long int new_ending_frame = (change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble();
1086  final_cache->Remove(new_starting_frame - 2, new_ending_frame + 2);
1087  }
1088 
1089  // Determine type of change operation
1090  if (change_type == "insert") {
1091 
1092  // Create new clip
1093  Clip *clip = new Clip();
1094  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1095  AddClip(clip); // Add clip to timeline
1096 
1097  } else if (change_type == "update") {
1098 
1099  // Update existing clip
1100  if (existing_clip) {
1101 
1102  // Calculate start and end frames that this impacts, and remove those frames from the cache
1103  long int old_starting_frame = existing_clip->Position() * info.fps.ToDouble();
1104  long int old_ending_frame = (existing_clip->Position() + existing_clip->End() - existing_clip->Start()) * info.fps.ToDouble();
1105  final_cache->Remove(old_starting_frame - 2, old_ending_frame + 2);
1106 
1107  // Update clip properties from JSON
1108  existing_clip->SetJsonValue(change["value"]);
1109  }
1110 
1111  } else if (change_type == "delete") {
1112 
1113  // Remove existing clip
1114  if (existing_clip) {
1115 
1116  // Calculate start and end frames that this impacts, and remove those frames from the cache
1117  long int old_starting_frame = existing_clip->Position() * info.fps.ToDouble();
1118  long int old_ending_frame = (existing_clip->Position() + existing_clip->End() - existing_clip->Start()) * info.fps.ToDouble();
1119  final_cache->Remove(old_starting_frame - 2, old_ending_frame + 2);
1120 
1121  // Remove clip from timeline
1122  RemoveClip(existing_clip);
1123  }
1124 
1125  }
1126 
1127 }
1128 
1129 // Apply JSON diff to effects
1130 void Timeline::apply_json_to_effects(Json::Value change) throw(InvalidJSONKey) {
1131 
1132  // Get key and type of change
1133  string change_type = change["type"].asString();
1134  EffectBase *existing_effect = NULL;
1135 
1136  // Find id of an effect (if any)
1137  for (int x = 0; x < change["key"].size(); x++) {
1138  // Get each change
1139  Json::Value key_part = change["key"][x];
1140 
1141  if (key_part.isObject()) {
1142  // Check for id
1143  if (!key_part["id"].isNull())
1144  {
1145  // Set the id
1146  string effect_id = key_part["id"].asString();
1147 
1148  // Find matching effect in timeline (if any)
1149  list<EffectBase*>::iterator effect_itr;
1150  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1151  {
1152  // Get effect object from the iterator
1153  EffectBase *e = (*effect_itr);
1154  if (e->Id() == effect_id) {
1155  existing_effect = e;
1156  break; // effect found, exit loop
1157  }
1158  }
1159  break; // id found, exit loop
1160  }
1161  }
1162  }
1163 
1164  // Now that we found the effect, apply the change to it
1165  if (existing_effect || change_type == "insert")
1166  // Apply change to effect
1167  apply_json_to_effects(change, existing_effect);
1168 }
1169 
1170 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1171 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) throw(InvalidJSONKey) {
1172 
1173  // Get key and type of change
1174  string change_type = change["type"].asString();
1175 
1176  // Calculate start and end frames that this impacts, and remove those frames from the cache
1177  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1178  long int new_starting_frame = change["value"]["position"].asDouble() * info.fps.ToDouble();
1179  long int new_ending_frame = (change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble();
1180  final_cache->Remove(new_starting_frame - 2, new_ending_frame + 2);
1181  }
1182 
1183  // Determine type of change operation
1184  if (change_type == "insert") {
1185 
1186  // Determine type of effect
1187  string effect_type = change["value"]["type"].asString();
1188 
1189  // Create Effect
1190  EffectBase *e = NULL;
1191 
1192  // Init the matching effect object
1193  e = EffectInfo().CreateEffect(effect_type);
1194 
1195  // Load Json into Effect
1196  e->SetJsonValue(change["value"]);
1197 
1198  // Add Effect to Timeline
1199  AddEffect(e);
1200 
1201  } else if (change_type == "update") {
1202 
1203  // Update existing effect
1204  if (existing_effect) {
1205 
1206  // Calculate start and end frames that this impacts, and remove those frames from the cache
1207  long int old_starting_frame = existing_effect->Position() * info.fps.ToDouble();
1208  long int old_ending_frame = (existing_effect->Position() + existing_effect->End() - existing_effect->Start()) * info.fps.ToDouble();
1209  final_cache->Remove(old_starting_frame - 2, old_ending_frame + 2);
1210 
1211  // Update effect properties from JSON
1212  existing_effect->SetJsonValue(change["value"]);
1213  }
1214 
1215  } else if (change_type == "delete") {
1216 
1217  // Remove existing effect
1218  if (existing_effect) {
1219 
1220  // Calculate start and end frames that this impacts, and remove those frames from the cache
1221  long int old_starting_frame = existing_effect->Position() * info.fps.ToDouble();
1222  long int old_ending_frame = (existing_effect->Position() + existing_effect->End() - existing_effect->Start()) * info.fps.ToDouble();
1223  final_cache->Remove(old_starting_frame - 2, old_ending_frame + 2);
1224 
1225  // Remove effect from timeline
1226  RemoveEffect(existing_effect);
1227  }
1228 
1229  }
1230 }
1231 
1232 // Apply JSON diff to timeline properties
1233 void Timeline::apply_json_to_timeline(Json::Value change) throw(InvalidJSONKey) {
1234 
1235  // Get key and type of change
1236  string change_type = change["type"].asString();
1237  string root_key = change["key"][(uint)0].asString();
1238  string sub_key = "";
1239  if (change["key"].size() >= 2)
1240  sub_key = change["key"][(uint)1].asString();
1241 
1242  // Clear entire cache
1243  final_cache->Clear();
1244 
1245  // Determine type of change operation
1246  if (change_type == "insert" || change_type == "update") {
1247 
1248  // INSERT / UPDATE
1249  // Check for valid property
1250  if (root_key == "color")
1251  // Set color
1252  color.SetJsonValue(change["value"]);
1253  else if (root_key == "viewport_scale")
1254  // Set viewport scale
1255  viewport_scale.SetJsonValue(change["value"]);
1256  else if (root_key == "viewport_x")
1257  // Set viewport x offset
1258  viewport_x.SetJsonValue(change["value"]);
1259  else if (root_key == "viewport_y")
1260  // Set viewport y offset
1261  viewport_y.SetJsonValue(change["value"]);
1262  else if (root_key == "duration") {
1263  // Update duration of timeline
1264  info.duration = change["value"].asDouble();
1266  }
1267  else if (root_key == "width")
1268  // Set width
1269  info.width = change["value"].asInt();
1270  else if (root_key == "height")
1271  // Set height
1272  info.height = change["value"].asInt();
1273  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1274  // Set fps fraction
1275  if (!change["value"]["num"].isNull())
1276  info.fps.num = change["value"]["num"].asInt();
1277  if (!change["value"]["den"].isNull())
1278  info.fps.den = change["value"]["den"].asInt();
1279  }
1280  else if (root_key == "fps" && sub_key == "num")
1281  // Set fps.num
1282  info.fps.num = change["value"].asInt();
1283  else if (root_key == "fps" && sub_key == "den")
1284  // Set fps.den
1285  info.fps.den = change["value"].asInt();
1286  else if (root_key == "sample_rate")
1287  // Set sample rate
1288  info.sample_rate = change["value"].asInt();
1289  else if (root_key == "channels")
1290  // Set channels
1291  info.channels = change["value"].asInt();
1292  else if (root_key == "channel_layout")
1293  // Set channel layout
1294  info.channel_layout = (ChannelLayout) change["value"].asInt();
1295 
1296  else
1297 
1298  // Error parsing JSON (or missing keys)
1299  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1300 
1301 
1302  } else if (change["type"].asString() == "delete") {
1303 
1304  // DELETE / RESET
1305  // Reset the following properties (since we can't delete them)
1306  if (root_key == "color") {
1307  color = Color();
1308  color.red = Keyframe(0.0);
1309  color.green = Keyframe(0.0);
1310  color.blue = Keyframe(0.0);
1311  }
1312  else if (root_key == "viewport_scale")
1313  viewport_scale = Keyframe(1.0);
1314  else if (root_key == "viewport_x")
1315  viewport_x = Keyframe(0.0);
1316  else if (root_key == "viewport_y")
1317  viewport_y = Keyframe(0.0);
1318  else
1319  // Error parsing JSON (or missing keys)
1320  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1321 
1322  }
1323 
1324 }
1325 
1326 
1327 
1328 
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:915
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Definition: Timeline.cpp:623
void Close()
Close the internal reader.
Definition: Clip.cpp:222
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:846
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:220
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:99
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
virtual void Add(tr1::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:246
float End()
Override End() method.
Definition: Clip.cpp:236
virtual tr1::shared_ptr< Frame > GetFrame(long int frame_number)=0
Get a frame from the cache.
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:258
Align clip to the bottom right of its parent.
Definition: Enums.h:45
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Definition: Timeline.cpp:840
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it&#39;s parent.
Definition: Clip.h:151
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:230
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:115
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:853
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
float End()
Get end position (in seconds) of clip (trim end of video)
Definition: ClipBase.h:86
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:248
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:84
#define OPEN_MP_NUM_PROCESSORS
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
void SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:46
virtual tr1::shared_ptr< Frame > GetFrame(tr1::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:233
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:812
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
Exception for missing JSON Change key.
Definition: Exceptions.h:182
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:221
float GetValue(long int index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:222
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:243
bool Waveform()
Waveform property.
Definition: Clip.h:215
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ClipBase.h:97
ScaleType scale
The scale determines how a clip should be resized to fit it&#39;s parent.
Definition: Clip.h:152
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:82
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:255
TransformHandleType handles
The transform handle determines if selection handles are added to clip (to display the clips edges) ...
Definition: Clip.h:154
Add selection handles to clip (useful for editors to display edges of clip)
Definition: Enums.h:68
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:83
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline&#39;s framerate and samplerate to all clips.
Definition: Timeline.cpp:129
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:188
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:178
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
This class represents a fraction.
Definition: Fraction.h:42
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:45
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:256
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:68
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:106
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:109
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:588
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:226
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:155
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:244
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:205
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
int GetInt(long int index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:611
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:976
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:99
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:93
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:225
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:247
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:219
string GetColorHex(long int frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Color color
Background color of timeline canvas.
Definition: Timeline.h:251
virtual void Remove(long int frame_number)=0
Remove a specific frame.
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:892
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
long int video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:83
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:497
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:732
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:87
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:48
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:85
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:46
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254