OpenShot Library | libopenshot  0.2.4
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @ref License
7  */
8 
9 /* LICENSE
10  *
11  * Copyright (c) 2008-2019 OpenShot Studios, LLC
12  * <http://www.openshotstudios.com/>. This file is part of
13  * OpenShot Library (libopenshot), an open-source project dedicated to
14  * delivering high quality video editing and animation solutions to the
15  * world. For more information visit <http://www.openshot.org/>.
16  *
17  * OpenShot Library (libopenshot) is free software: you can redistribute it
18  * and/or modify it under the terms of the GNU Lesser General Public License
19  * as published by the Free Software Foundation, either version 3 of the
20  * License, or (at your option) any later version.
21  *
22  * OpenShot Library (libopenshot) is distributed in the hope that it will be
23  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU Lesser General Public License for more details.
26  *
27  * You should have received a copy of the GNU Lesser General Public License
28  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
29  */
30 
31 #include "../include/Timeline.h"
32 
33 using namespace openshot;
34 
35 // Default Constructor for the timeline (which sets the canvas width and height)
36 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
37  is_open(false), auto_map_clips(true), managed_cache(true)
38 {
39  // Create CrashHandler and Attach (incase of errors)
41 
42  // Init viewport size (curve based, because it can be animated)
43  viewport_scale = Keyframe(100.0);
44  viewport_x = Keyframe(0.0);
45  viewport_y = Keyframe(0.0);
46 
47  // Init background color
48  color.red = Keyframe(0.0);
49  color.green = Keyframe(0.0);
50  color.blue = Keyframe(0.0);
51 
52  // Init FileInfo struct (clear all values)
53  info.width = width;
54  info.height = height;
55  info.fps = fps;
56  info.sample_rate = sample_rate;
57  info.channels = channels;
58  info.channel_layout = channel_layout;
60  info.duration = 60 * 30; // 30 minute default duration
61  info.has_audio = true;
62  info.has_video = true;
64  info.display_ratio = openshot::Fraction(width, height);
67 
68  // Init max image size
70 
71  // Init cache
72  final_cache = new CacheMemory();
74 }
75 
77  if (is_open)
78  // Auto Close if not already
79  Close();
80 
81  // Free all allocated frame mappers
82  std::set<FrameMapper *>::iterator it;
83  for (it = allocated_frame_mappers.begin(); it != allocated_frame_mappers.end(); ) {
84  // Dereference and clean up FrameMapper object
85  FrameMapper *mapper = (*it);
86  mapper->Reader(NULL);
87  mapper->Close();
88  delete mapper;
89  // Remove reference and proceed to next element
90  it = allocated_frame_mappers.erase(it);
91  }
92 
93  // Destroy previous cache (if managed by timeline)
94  if (managed_cache && final_cache) {
95  delete final_cache;
96  final_cache = NULL;
97  }
98 }
99 
100 // Add an openshot::Clip to the timeline
102 {
103  // All clips should be converted to the frame rate of this timeline
104  if (auto_map_clips)
105  // Apply framemapper (or update existing framemapper)
106  apply_mapper_to_clip(clip);
107 
108  // Add clip to list
109  clips.push_back(clip);
110 
111  // Sort clips
112  sort_clips();
113 }
114 
115 // Add an effect to the timeline
117 {
118  // Add effect to list
119  effects.push_back(effect);
120 
121  // Sort effects
122  sort_effects();
123 }
124 
125 // Remove an effect from the timeline
127 {
128  effects.remove(effect);
129 }
130 
131 // Remove an openshot::Clip to the timeline
133 {
134  clips.remove(clip);
135 }
136 
137 // Apply a FrameMapper to a clip which matches the settings of this timeline
138 void Timeline::apply_mapper_to_clip(Clip* clip)
139 {
140  // Get lock (prevent getting frames while this happens)
141  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
142 
143  // Determine type of reader
144  ReaderBase* clip_reader = NULL;
145  if (clip->Reader()->Name() == "FrameMapper")
146  {
147  // Get the existing reader
148  clip_reader = (ReaderBase*) clip->Reader();
149 
150  } else {
151 
152  // Create a new FrameMapper to wrap the current reader
154  allocated_frame_mappers.insert(mapper);
155  clip_reader = (ReaderBase*) mapper;
156  }
157 
158  // Update the mapping
159  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
161 
162  // Update clip reader
163  clip->Reader(clip_reader);
164 }
165 
166 // Apply the timeline's framerate and samplerate to all clips
168 {
169  // Clear all cached frames
170  ClearAllCache();
171 
172  // Loop through all clips
173  for (auto clip : clips)
174  {
175  // Apply framemapper (or update existing framemapper)
176  apply_mapper_to_clip(clip);
177  }
178 }
179 
180 // Calculate time of a frame number, based on a framerate
181 double Timeline::calculate_time(int64_t number, Fraction rate)
182 {
183  // Get float version of fps fraction
184  double raw_fps = rate.ToFloat();
185 
186  // Return the time (in seconds) of this frame
187  return double(number - 1) / raw_fps;
188 }
189 
190 // Apply effects to the source frame (if any)
191 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
192 {
193  // Debug output
194  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer);
195 
196  // Find Effects at this position and layer
197  for (auto effect : effects)
198  {
199  // Does clip intersect the current requested time
200  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
201  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
202 
203  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
204 
205  // Debug output
206  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer);
207 
208  // Clip is visible
209  if (does_effect_intersect)
210  {
211  // Determine the frame needed for this clip (based on the position on the timeline)
212  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
213  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
214 
215  // Debug output
216  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect);
217 
218  // Apply the effect to this frame
219  frame = effect->GetFrame(frame, effect_frame_number);
220  }
221 
222  } // end effect loop
223 
224  // Return modified frame
225  return frame;
226 }
227 
228 // Get or generate a blank frame
229 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
230 {
231  std::shared_ptr<Frame> new_frame;
232 
233  // Init some basic properties about this frame
234  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
235 
236  try {
237  // Debug output
238  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame);
239 
240  // Attempt to get a frame (but this could fail if a reader has just been closed)
241  #pragma omp critical (T_GetOtCreateFrame)
242  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
243 
244  // Return real frame
245  return new_frame;
246 
247  } catch (const ReaderClosed & e) {
248  // ...
249  } catch (const TooManySeeks & e) {
250  // ...
251  } catch (const OutOfBoundsFrame & e) {
252  // ...
253  }
254 
255  // Debug output
256  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame);
257 
258  // Create blank frame
259  new_frame = std::make_shared<Frame>(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels);
260  #pragma omp critical (T_GetOtCreateFrame)
261  {
262  new_frame->SampleRate(info.sample_rate);
263  new_frame->ChannelsLayout(info.channel_layout);
264  }
265  return new_frame;
266 }
267 
268 // Process a new layer of video or audio
269 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
270 {
271  // Get the clip's frame & image
272  std::shared_ptr<Frame> source_frame;
273  #pragma omp critical (T_addLayer)
274  source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
275 
276  // No frame found... so bail
277  if (!source_frame)
278  return;
279 
280  // Debug output
281  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
282 
283  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
284  if (source_clip->Waveform())
285  {
286  // Debug output
287  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number);
288 
289  // Get the color of the waveform
290  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
291  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
292  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
293  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
294 
295  // Generate Waveform Dynamically (the size of the timeline)
296  std::shared_ptr<QImage> source_image;
297  #pragma omp critical (T_addLayer)
298  source_image = source_frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha);
299  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
300  }
301 
302  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
303  * effects on the top clip. */
304  if (is_top_clip && source_frame) {
305  #pragma omp critical (T_addLayer)
306  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
307  }
308 
309  // Declare an image to hold the source frame's image
310  std::shared_ptr<QImage> source_image;
311 
312  /* COPY AUDIO - with correct volume */
313  if (source_clip->Reader()->info.has_audio) {
314  // Debug output
315  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
316 
317  if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
318  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
319  {
320  // Get volume from previous frame and this frame
321  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
322  float volume = source_clip->volume.GetValue(clip_frame_number);
323  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
324  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
325 
326  // Apply volume mixing strategy
327  if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
328  // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
329  previous_volume = previous_volume / max_volume;
330  volume = volume / max_volume;
331  }
332  else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
333  // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
334  previous_volume = previous_volume * 0.77;
335  volume = volume * 0.77;
336  }
337 
338  // If channel filter enabled, check for correct channel (and skip non-matching channels)
339  if (channel_filter != -1 && channel_filter != channel)
340  continue; // skip to next channel
341 
342  // If no volume on this frame or previous frame, do nothing
343  if (previous_volume == 0.0 && volume == 0.0)
344  continue; // skip to next channel
345 
346  // If channel mapping disabled, just use the current channel
347  if (channel_mapping == -1)
348  channel_mapping = channel;
349 
350  // Apply ramp to source frame (if needed)
351  if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
352  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
353 
354  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
355  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
356  // number of samples returned is variable... and does not match the number expected.
357  // This is a crude solution at best. =)
358  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
359  // Force timeline frame to match the source frame
360  #pragma omp critical (T_addLayer)
361  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
362 
363  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
364  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
365  #pragma omp critical (T_addLayer)
366  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
367 
368  }
369  else
370  // Debug output
371  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
372 
373  }
374 
375  // Skip out if video was disabled or only an audio frame (no visualisation in use)
376  if (source_clip->has_video.GetInt(clip_frame_number) == 0 ||
377  (!source_clip->Waveform() && !source_clip->Reader()->info.has_video))
378  // Skip the rest of the image processing for performance reasons
379  return;
380 
381  // Debug output
382  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number);
383 
384  // Get actual frame image data
385  source_image = source_frame->GetImage();
386 
387  /* ALPHA & OPACITY */
388  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
389  {
390  float alpha = source_clip->alpha.GetValue(clip_frame_number);
391 
392  // Get source image's pixels
393  unsigned char *pixels = (unsigned char *) source_image->bits();
394 
395  // Loop through pixels
396  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
397  {
398  // Get the alpha values from the pixel
399  int A = pixels[byte_index + 3];
400 
401  // Apply alpha to pixel
402  pixels[byte_index + 3] *= alpha;
403  }
404 
405  // Debug output
406  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number);
407  }
408 
409  /* RESIZE SOURCE IMAGE - based on scale type */
410  QSize source_size = source_image->size();
411  switch (source_clip->scale)
412  {
413  case (SCALE_FIT): {
414  // keep aspect ratio
415  source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio);
416 
417  // Debug output
418  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
419  break;
420  }
421  case (SCALE_STRETCH): {
422  // ignore aspect ratio
423  source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio);
424 
425  // Debug output
426  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
427  break;
428  }
429  case (SCALE_CROP): {
430  QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height()))));
431  QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT);
432 
433  // respect aspect ratio
434  if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT)
435  source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
436  else
437  source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
438 
439  // Debug output
440  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
441  break;
442  }
443  case (SCALE_NONE): {
444  // Calculate ratio of source size to project size
445  // Even with no scaling, previews need to be adjusted correctly
446  // (otherwise NONE scaling draws the frame image outside of the preview)
447  float source_width_ratio = source_size.width() / float(info.width);
448  float source_height_ratio = source_size.height() / float(info.height);
449  source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio);
450 
451  // Debug output
452  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_NONE)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
453  break;
454  }
455  }
456 
457  float crop_x = source_clip->crop_x.GetValue(clip_frame_number);
458  float crop_y = source_clip->crop_y.GetValue(clip_frame_number);
459  float crop_w = source_clip->crop_width.GetValue(clip_frame_number);
460  float crop_h = source_clip->crop_height.GetValue(clip_frame_number);
461  switch(source_clip->crop_gravity)
462  {
463  case (GRAVITY_TOP_LEFT):
464  // This is only here to prevent unused-enum warnings
465  break;
466  case (GRAVITY_TOP):
467  crop_x += 0.5;
468  break;
469  case (GRAVITY_TOP_RIGHT):
470  crop_x += 1.0;
471  break;
472  case (GRAVITY_LEFT):
473  crop_y += 0.5;
474  break;
475  case (GRAVITY_CENTER):
476  crop_x += 0.5;
477  crop_y += 0.5;
478  break;
479  case (GRAVITY_RIGHT):
480  crop_x += 1.0;
481  crop_y += 0.5;
482  break;
483  case (GRAVITY_BOTTOM_LEFT):
484  crop_y += 1.0;
485  break;
486  case (GRAVITY_BOTTOM):
487  crop_x += 0.5;
488  crop_y += 1.0;
489  break;
490  case (GRAVITY_BOTTOM_RIGHT):
491  crop_x += 1.0;
492  crop_y += 1.0;
493  break;
494  }
495 
496 
497  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
498  float x = 0.0; // left
499  float y = 0.0; // top
500 
501  // Adjust size for scale x and scale y
502  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
503  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
504  float scaled_source_width = source_size.width() * sx;
505  float scaled_source_height = source_size.height() * sy;
506 
507  switch (source_clip->gravity)
508  {
509  case (GRAVITY_TOP_LEFT):
510  // This is only here to prevent unused-enum warnings
511  break;
512  case (GRAVITY_TOP):
513  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
514  break;
515  case (GRAVITY_TOP_RIGHT):
516  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
517  break;
518  case (GRAVITY_LEFT):
519  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
520  break;
521  case (GRAVITY_CENTER):
522  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
523  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
524  break;
525  case (GRAVITY_RIGHT):
526  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
527  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
528  break;
529  case (GRAVITY_BOTTOM_LEFT):
530  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
531  break;
532  case (GRAVITY_BOTTOM):
533  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
534  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
535  break;
536  case (GRAVITY_BOTTOM_RIGHT):
537  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
538  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
539  break;
540  }
541 
542  // Debug output
543  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height);
544 
545  /* LOCATION, ROTATION, AND SCALE */
546  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
547  x += (Settings::Instance()->MAX_WIDTH * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
548  y += (Settings::Instance()->MAX_HEIGHT * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
549  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
550  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
551 
552  bool transformed = false;
553  QTransform transform;
554 
555  // Transform source image (if needed)
556  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
557 
558  if (!isEqual(r, 0)) {
559  // ROTATE CLIP
560  float origin_x = x + (scaled_source_width / 2.0);
561  float origin_y = y + (scaled_source_height / 2.0);
562  transform.translate(origin_x, origin_y);
563  transform.rotate(r);
564  transform.translate(-origin_x,-origin_y);
565  transformed = true;
566  }
567 
568  if (!isEqual(x, 0) || !isEqual(y, 0)) {
569  // TRANSLATE/MOVE CLIP
570  transform.translate(x, y);
571  transformed = true;
572  }
573 
574  // SCALE CLIP (if needed)
575  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
576  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
577 
578  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
579  transform.scale(source_width_scale, source_height_scale);
580  transformed = true;
581  }
582 
583  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
584  // SHEAR HEIGHT/WIDTH
585  transform.shear(shear_x, shear_y);
586  transformed = true;
587  }
588 
589  // Debug output
590  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed);
591 
592  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
593  std::shared_ptr<QImage> new_image;
594  #pragma omp critical (T_addLayer)
595  new_image = new_frame->GetImage();
596 
597  // Load timeline's new frame image into a QPainter
598  QPainter painter(new_image.get());
599  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
600 
601  // Apply transform (translate, rotate, scale)... if any
602  if (transformed)
603  painter.setTransform(transform);
604 
605  // Composite a new layer onto the image
606  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
607  painter.drawImage(0, 0, *source_image, crop_x * source_image->width(), crop_y * source_image->height(), crop_w * source_image->width(), crop_h * source_image->height());
608 
609  // Draw frame #'s on top of image (if needed)
610  if (source_clip->display != FRAME_DISPLAY_NONE) {
611  std::stringstream frame_number_str;
612  switch (source_clip->display)
613  {
614  case (FRAME_DISPLAY_NONE):
615  // This is only here to prevent unused-enum warnings
616  break;
617 
618  case (FRAME_DISPLAY_CLIP):
619  frame_number_str << clip_frame_number;
620  break;
621 
622  case (FRAME_DISPLAY_TIMELINE):
623  frame_number_str << timeline_frame_number;
624  break;
625 
626  case (FRAME_DISPLAY_BOTH):
627  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
628  break;
629  }
630 
631  // Draw frame number on top of image
632  painter.setPen(QColor("#ffffff"));
633  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
634  }
635 
636  painter.end();
637 
638  // Debug output
639  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed);
640 }
641 
642 // Update the list of 'opened' clips
643 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
644 {
645  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size());
646 
647  // is clip already in list?
648  bool clip_found = open_clips.count(clip);
649 
650  if (clip_found && !does_clip_intersect)
651  {
652  // Remove clip from 'opened' list, because it's closed now
653  open_clips.erase(clip);
654 
655  // Close clip
656  clip->Close();
657  }
658  else if (!clip_found && does_clip_intersect)
659  {
660  // Add clip to 'opened' list, because it's missing
661  open_clips[clip] = clip;
662 
663  try {
664  // Open the clip
665  clip->Open();
666 
667  } catch (const InvalidFile & e) {
668  // ...
669  }
670  }
671 
672  // Debug output
673  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size());
674 }
675 
676 // Sort clips by position on the timeline
677 void Timeline::sort_clips()
678 {
679  // Debug output
680  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size());
681 
682  // sort clips
683  clips.sort(CompareClips());
684 }
685 
686 // Sort effects by position on the timeline
687 void Timeline::sort_effects()
688 {
689  // sort clips
690  effects.sort(CompareEffects());
691 }
692 
693 // Close the reader (and any resources it was consuming)
695 {
696  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close");
697 
698  // Close all open clips
699  for (auto clip : clips)
700  {
701  // Open or Close this clip, based on if it's intersecting or not
702  update_open_clips(clip, false);
703  }
704 
705  // Mark timeline as closed
706  is_open = false;
707 
708  // Clear cache
709  final_cache->Clear();
710 }
711 
712 // Open the reader (and start consuming resources)
714 {
715  is_open = true;
716 }
717 
718 // Compare 2 floating point numbers for equality
719 bool Timeline::isEqual(double a, double b)
720 {
721  return fabs(a - b) < 0.000001;
722 }
723 
724 // Get an openshot::Frame object for a specific frame number of this reader.
725 std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
726 {
727  // Adjust out of bounds frame number
728  if (requested_frame < 1)
729  requested_frame = 1;
730 
731  // Check cache
732  std::shared_ptr<Frame> frame;
733  #pragma omp critical (T_GetFrame)
734  frame = final_cache->GetFrame(requested_frame);
735  if (frame) {
736  // Debug output
737  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame);
738 
739  // Return cached frame
740  return frame;
741  }
742  else
743  {
744  // Create a scoped lock, allowing only a single thread to run the following code at one time
745  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
746 
747  // Check for open reader (or throw exception)
748  if (!is_open)
749  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.");
750 
751  // Check cache again (due to locking)
752  #pragma omp critical (T_GetFrame)
753  frame = final_cache->GetFrame(requested_frame);
754  if (frame) {
755  // Debug output
756  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame);
757 
758  // Return cached frame
759  return frame;
760  }
761 
762  // Minimum number of frames to process (for performance reasons)
763  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
764 
765  // Get a list of clips that intersect with the requested section of timeline
766  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
767  std::vector<Clip*> nearby_clips;
768  #pragma omp critical (T_GetFrame)
769  nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
770 
771  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
772  // Allow nested OpenMP sections
773  omp_set_nested(true);
774 
775  // Debug output
776  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS);
777 
778  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
779  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
780  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
781  {
782  // Loop through clips
783  for (auto clip : nearby_clips)
784  {
785  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
786  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
787 
788  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
789  if (does_clip_intersect)
790  {
791  // Get clip frame #
792  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
793  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
794  // Cache clip object
795  clip->GetFrame(clip_frame_number);
796  }
797  }
798  }
799 
800  #pragma omp parallel
801  {
802  // Loop through all requested frames
803  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1)
804  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
805  {
806  // Debug output
807  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num());
808 
809  // Init some basic properties about this frame
810  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
811 
812  // Create blank frame (which will become the requested frame)
813  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels));
814  #pragma omp critical (T_GetFrame)
815  {
816  new_frame->AddAudioSilence(samples_in_frame);
817  new_frame->SampleRate(info.sample_rate);
818  new_frame->ChannelsLayout(info.channel_layout);
819  }
820 
821  // Debug output
822  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
823 
824  // Add Background Color to 1st layer (if animated or not black)
825  if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
826  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
827  new_frame->AddColor(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, color.GetColorHex(frame_number));
828 
829  // Debug output
830  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size());
831 
832  // Find Clips near this time
833  for (auto clip : nearby_clips)
834  {
835  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
836  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
837 
838  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
839 
840  // Debug output
841  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect);
842 
843  // Clip is visible
844  if (does_clip_intersect)
845  {
846  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
847  bool is_top_clip = true;
848  float max_volume = 0.0;
849  for (auto nearby_clip : nearby_clips)
850  {
851  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
852  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
853  long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
854  long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
855 
856  // Determine if top clip
857  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
858  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
859  nearby_clip_start_position > clip_start_position && is_top_clip == true) {
860  is_top_clip = false;
861  }
862 
863  // Determine max volume of overlapping clips
864  if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
865  nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
866  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
867  max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
868  }
869  }
870 
871  // Determine the frame needed for this clip (based on the position on the timeline)
872  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
873  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
874 
875  // Debug output
876  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
877 
878  // Add clip's frame as layer
879  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
880 
881  } else
882  // Debug output
883  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect);
884 
885  } // end clip loop
886 
887  // Debug output
888  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
889 
890  // Set frame # on mapped frame
891  #pragma omp ordered
892  {
893  new_frame->SetFrameNumber(frame_number);
894 
895  // Add final frame to cache
896  final_cache->Add(new_frame);
897  }
898 
899  } // end frame loop
900  } // end parallel
901 
902  // Debug output
903  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num());
904 
905  // Return frame (or blank frame)
906  return final_cache->GetFrame(requested_frame);
907  }
908 }
909 
910 
911 // Find intersecting clips (or non intersecting clips)
912 std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
913 {
914  // Find matching clips
915  std::vector<Clip*> matching_clips;
916 
917  // Calculate time of frame
918  float min_requested_frame = requested_frame;
919  float max_requested_frame = requested_frame + (number_of_frames - 1);
920 
921  // Re-Sort Clips (since they likely changed)
922  sort_clips();
923 
924  // Find Clips at this time
925  for (auto clip : clips)
926  {
927  // Does clip intersect the current requested time
928  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
929  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
930 
931  bool does_clip_intersect =
932  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
933  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
934 
935  // Debug output
936  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect);
937 
938  // Open (or schedule for closing) this clip, based on if it's intersecting or not
939  #pragma omp critical (reader_lock)
940  update_open_clips(clip, does_clip_intersect);
941 
942  // Clip is visible
943  if (does_clip_intersect && include)
944  // Add the intersecting clip
945  matching_clips.push_back(clip);
946 
947  else if (!does_clip_intersect && !include)
948  // Add the non-intersecting clip
949  matching_clips.push_back(clip);
950 
951  } // end clip loop
952 
953  // return list
954  return matching_clips;
955 }
956 
957 // Set the cache object used by this reader
958 void Timeline::SetCache(CacheBase* new_cache) {
959  // Destroy previous cache (if managed by timeline)
960  if (managed_cache && final_cache) {
961  delete final_cache;
962  final_cache = NULL;
963  managed_cache = false;
964  }
965 
966  // Set new cache
967  final_cache = new_cache;
968 }
969 
970 // Generate JSON string of this object
971 std::string Timeline::Json() {
972 
973  // Return formatted string
974  return JsonValue().toStyledString();
975 }
976 
977 // Generate Json::JsonValue for this object
978 Json::Value Timeline::JsonValue() {
979 
980  // Create root json object
981  Json::Value root = ReaderBase::JsonValue(); // get parent properties
982  root["type"] = "Timeline";
983  root["viewport_scale"] = viewport_scale.JsonValue();
984  root["viewport_x"] = viewport_x.JsonValue();
985  root["viewport_y"] = viewport_y.JsonValue();
986  root["color"] = color.JsonValue();
987 
988  // Add array of clips
989  root["clips"] = Json::Value(Json::arrayValue);
990 
991  // Find Clips at this time
992  for (auto existing_clip : clips)
993  {
994  root["clips"].append(existing_clip->JsonValue());
995  }
996 
997  // Add array of effects
998  root["effects"] = Json::Value(Json::arrayValue);
999 
1000  // loop through effects
1001  for (auto existing_effect: effects)
1002  {
1003  root["effects"].append(existing_effect->JsonValue());
1004  }
1005 
1006  // return JsonValue
1007  return root;
1008 }
1009 
1010 // Load JSON string into this object
1011 void Timeline::SetJson(std::string value) {
1012 
1013  // Get lock (prevent getting frames while this happens)
1014  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1015 
1016  // Parse JSON string into JSON objects
1017  Json::Value root;
1018  Json::CharReaderBuilder rbuilder;
1019  Json::CharReader* reader(rbuilder.newCharReader());
1020 
1021  std::string errors;
1022  bool success = reader->parse( value.c_str(),
1023  value.c_str() + value.size(), &root, &errors );
1024  delete reader;
1025 
1026  if (!success)
1027  // Raise exception
1028  throw InvalidJSON("JSON could not be parsed (or is invalid)");
1029 
1030  try
1031  {
1032  // Set all values that match
1033  SetJsonValue(root);
1034  }
1035  catch (const std::exception& e)
1036  {
1037  // Error parsing JSON (or missing keys)
1038  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1039  }
1040 }
1041 
1042 // Load Json::JsonValue into this object
1043 void Timeline::SetJsonValue(Json::Value root) {
1044 
1045  // Close timeline before we do anything (this also removes all open and closing clips)
1046  bool was_open = is_open;
1047  Close();
1048 
1049  // Set parent data
1051 
1052  if (!root["clips"].isNull()) {
1053  // Clear existing clips
1054  clips.clear();
1055 
1056  // loop through clips
1057  for (const Json::Value existing_clip : root["clips"]) {
1058  // Create Clip
1059  Clip *c = new Clip();
1060 
1061  // Load Json into Clip
1062  c->SetJsonValue(existing_clip);
1063 
1064  // Add Clip to Timeline
1065  AddClip(c);
1066  }
1067  }
1068 
1069  if (!root["effects"].isNull()) {
1070  // Clear existing effects
1071  effects.clear();
1072 
1073  // loop through effects
1074  for (const Json::Value existing_effect :root["effects"]) {
1075  // Create Effect
1076  EffectBase *e = NULL;
1077 
1078  if (!existing_effect["type"].isNull()) {
1079  // Create instance of effect
1080  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) ) {
1081 
1082  // Load Json into Effect
1083  e->SetJsonValue(existing_effect);
1084 
1085  // Add Effect to Timeline
1086  AddEffect(e);
1087  }
1088  }
1089  }
1090  }
1091 
1092  if (!root["duration"].isNull()) {
1093  // Update duration of timeline
1094  info.duration = root["duration"].asDouble();
1096  }
1097 
1098  // Re-open if needed
1099  if (was_open)
1100  Open();
1101 }
1102 
1103 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1104 void Timeline::ApplyJsonDiff(std::string value) {
1105 
1106  // Get lock (prevent getting frames while this happens)
1107  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1108 
1109  // Parse JSON string into JSON objects
1110  Json::Value root;
1111  Json::CharReaderBuilder rbuilder;
1112  Json::CharReader* reader(rbuilder.newCharReader());
1113 
1114  std::string errors;
1115  bool success = reader->parse( value.c_str(),
1116  value.c_str() + value.size(), &root, &errors );
1117  delete reader;
1118 
1119  if (!success || !root.isArray())
1120  // Raise exception
1121  throw InvalidJSON("JSON could not be parsed (or is invalid).");
1122 
1123  try
1124  {
1125  // Process the JSON change array, loop through each item
1126  for (const Json::Value change : root) {
1127  std::string change_key = change["key"][(uint)0].asString();
1128 
1129  // Process each type of change
1130  if (change_key == "clips")
1131  // Apply to CLIPS
1132  apply_json_to_clips(change);
1133 
1134  else if (change_key == "effects")
1135  // Apply to EFFECTS
1136  apply_json_to_effects(change);
1137 
1138  else
1139  // Apply to TIMELINE
1140  apply_json_to_timeline(change);
1141 
1142  }
1143  }
1144  catch (const std::exception& e)
1145  {
1146  // Error parsing JSON (or missing keys)
1147  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1148  }
1149 }
1150 
1151 // Apply JSON diff to clips
1152 void Timeline::apply_json_to_clips(Json::Value change) {
1153 
1154  // Get key and type of change
1155  std::string change_type = change["type"].asString();
1156  std::string clip_id = "";
1157  Clip *existing_clip = NULL;
1158 
1159  // Find id of clip (if any)
1160  for (auto key_part : change["key"]) {
1161  // Get each change
1162  if (key_part.isObject()) {
1163  // Check for id
1164  if (!key_part["id"].isNull()) {
1165  // Set the id
1166  clip_id = key_part["id"].asString();
1167 
1168  // Find matching clip in timeline (if any)
1169  for (auto c : clips)
1170  {
1171  if (c->Id() == clip_id) {
1172  existing_clip = c;
1173  break; // clip found, exit loop
1174  }
1175  }
1176  break; // id found, exit loop
1177  }
1178  }
1179  }
1180 
1181  // Check for a more specific key (targetting this clip's effects)
1182  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1183  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1184  {
1185  // This change is actually targetting a specific effect under a clip (and not the clip)
1186  Json::Value key_part = change["key"][3];
1187 
1188  if (key_part.isObject()) {
1189  // Check for id
1190  if (!key_part["id"].isNull())
1191  {
1192  // Set the id
1193  std::string effect_id = key_part["id"].asString();
1194 
1195  // Find matching effect in timeline (if any)
1196  std::list<EffectBase*> effect_list = existing_clip->Effects();
1197  for (auto e : effect_list)
1198  {
1199  if (e->Id() == effect_id) {
1200  // Apply the change to the effect directly
1201  apply_json_to_effects(change, e);
1202 
1203  // Calculate start and end frames that this impacts, and remove those frames from the cache
1204  int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1205  int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1206  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1207 
1208  return; // effect found, don't update clip
1209  }
1210  }
1211  }
1212  }
1213  }
1214 
1215  // Calculate start and end frames that this impacts, and remove those frames from the cache
1216  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1217  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1218  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1219  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1220  }
1221 
1222  // Determine type of change operation
1223  if (change_type == "insert") {
1224 
1225  // Create new clip
1226  Clip *clip = new Clip();
1227  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1228  AddClip(clip); // Add clip to timeline
1229 
1230  // Apply framemapper (or update existing framemapper)
1231  apply_mapper_to_clip(clip);
1232 
1233  } else if (change_type == "update") {
1234 
1235  // Update existing clip
1236  if (existing_clip) {
1237 
1238  // Calculate start and end frames that this impacts, and remove those frames from the cache
1239  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1240  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1241  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1242 
1243  // Remove cache on clip's Reader (if found)
1244  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1245  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1246 
1247  // Update clip properties from JSON
1248  existing_clip->SetJsonValue(change["value"]);
1249 
1250  // Apply framemapper (or update existing framemapper)
1251  apply_mapper_to_clip(existing_clip);
1252  }
1253 
1254  } else if (change_type == "delete") {
1255 
1256  // Remove existing clip
1257  if (existing_clip) {
1258 
1259  // Calculate start and end frames that this impacts, and remove those frames from the cache
1260  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1261  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1262  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1263 
1264  // Remove clip from timeline
1265  RemoveClip(existing_clip);
1266  }
1267 
1268  }
1269 
1270 }
1271 
1272 // Apply JSON diff to effects
1273 void Timeline::apply_json_to_effects(Json::Value change) {
1274 
1275  // Get key and type of change
1276  std::string change_type = change["type"].asString();
1277  EffectBase *existing_effect = NULL;
1278 
1279  // Find id of an effect (if any)
1280  for (auto key_part : change["key"]) {
1281 
1282  if (key_part.isObject()) {
1283  // Check for id
1284  if (!key_part["id"].isNull())
1285  {
1286  // Set the id
1287  std::string effect_id = key_part["id"].asString();
1288 
1289  // Find matching effect in timeline (if any)
1290  for (auto e : effects)
1291  {
1292  if (e->Id() == effect_id) {
1293  existing_effect = e;
1294  break; // effect found, exit loop
1295  }
1296  }
1297  break; // id found, exit loop
1298  }
1299  }
1300  }
1301 
1302  // Now that we found the effect, apply the change to it
1303  if (existing_effect || change_type == "insert")
1304  // Apply change to effect
1305  apply_json_to_effects(change, existing_effect);
1306 }
1307 
1308 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1309 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1310 
1311  // Get key and type of change
1312  std::string change_type = change["type"].asString();
1313 
1314  // Calculate start and end frames that this impacts, and remove those frames from the cache
1315  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1316  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1317  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1318  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1319  }
1320 
1321  // Determine type of change operation
1322  if (change_type == "insert") {
1323 
1324  // Determine type of effect
1325  std::string effect_type = change["value"]["type"].asString();
1326 
1327  // Create Effect
1328  EffectBase *e = NULL;
1329 
1330  // Init the matching effect object
1331  if ( (e = EffectInfo().CreateEffect(effect_type)) ) {
1332 
1333  // Load Json into Effect
1334  e->SetJsonValue(change["value"]);
1335 
1336  // Add Effect to Timeline
1337  AddEffect(e);
1338  }
1339 
1340  } else if (change_type == "update") {
1341 
1342  // Update existing effect
1343  if (existing_effect) {
1344 
1345  // Calculate start and end frames that this impacts, and remove those frames from the cache
1346  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1347  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1348  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1349 
1350  // Update effect properties from JSON
1351  existing_effect->SetJsonValue(change["value"]);
1352  }
1353 
1354  } else if (change_type == "delete") {
1355 
1356  // Remove existing effect
1357  if (existing_effect) {
1358 
1359  // Calculate start and end frames that this impacts, and remove those frames from the cache
1360  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1361  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1362  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1363 
1364  // Remove effect from timeline
1365  RemoveEffect(existing_effect);
1366  }
1367 
1368  }
1369 }
1370 
1371 // Apply JSON diff to timeline properties
1372 void Timeline::apply_json_to_timeline(Json::Value change) {
1373 
1374  // Get key and type of change
1375  std::string change_type = change["type"].asString();
1376  std::string root_key = change["key"][(uint)0].asString();
1377  std::string sub_key = "";
1378  if (change["key"].size() >= 2)
1379  sub_key = change["key"][(uint)1].asString();
1380 
1381  // Clear entire cache
1382  final_cache->Clear();
1383 
1384  // Determine type of change operation
1385  if (change_type == "insert" || change_type == "update") {
1386 
1387  // INSERT / UPDATE
1388  // Check for valid property
1389  if (root_key == "color")
1390  // Set color
1391  color.SetJsonValue(change["value"]);
1392  else if (root_key == "viewport_scale")
1393  // Set viewport scale
1394  viewport_scale.SetJsonValue(change["value"]);
1395  else if (root_key == "viewport_x")
1396  // Set viewport x offset
1397  viewport_x.SetJsonValue(change["value"]);
1398  else if (root_key == "viewport_y")
1399  // Set viewport y offset
1400  viewport_y.SetJsonValue(change["value"]);
1401  else if (root_key == "duration") {
1402  // Update duration of timeline
1403  info.duration = change["value"].asDouble();
1405  }
1406  else if (root_key == "width")
1407  // Set width
1408  info.width = change["value"].asInt();
1409  else if (root_key == "height")
1410  // Set height
1411  info.height = change["value"].asInt();
1412  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1413  // Set fps fraction
1414  if (!change["value"]["num"].isNull())
1415  info.fps.num = change["value"]["num"].asInt();
1416  if (!change["value"]["den"].isNull())
1417  info.fps.den = change["value"]["den"].asInt();
1418  }
1419  else if (root_key == "fps" && sub_key == "num")
1420  // Set fps.num
1421  info.fps.num = change["value"].asInt();
1422  else if (root_key == "fps" && sub_key == "den")
1423  // Set fps.den
1424  info.fps.den = change["value"].asInt();
1425  else if (root_key == "display_ratio" && sub_key == "" && change["value"].isObject()) {
1426  // Set display_ratio fraction
1427  if (!change["value"]["num"].isNull())
1428  info.display_ratio.num = change["value"]["num"].asInt();
1429  if (!change["value"]["den"].isNull())
1430  info.display_ratio.den = change["value"]["den"].asInt();
1431  }
1432  else if (root_key == "display_ratio" && sub_key == "num")
1433  // Set display_ratio.num
1434  info.display_ratio.num = change["value"].asInt();
1435  else if (root_key == "display_ratio" && sub_key == "den")
1436  // Set display_ratio.den
1437  info.display_ratio.den = change["value"].asInt();
1438  else if (root_key == "pixel_ratio" && sub_key == "" && change["value"].isObject()) {
1439  // Set pixel_ratio fraction
1440  if (!change["value"]["num"].isNull())
1441  info.pixel_ratio.num = change["value"]["num"].asInt();
1442  if (!change["value"]["den"].isNull())
1443  info.pixel_ratio.den = change["value"]["den"].asInt();
1444  }
1445  else if (root_key == "pixel_ratio" && sub_key == "num")
1446  // Set pixel_ratio.num
1447  info.pixel_ratio.num = change["value"].asInt();
1448  else if (root_key == "pixel_ratio" && sub_key == "den")
1449  // Set pixel_ratio.den
1450  info.pixel_ratio.den = change["value"].asInt();
1451 
1452  else if (root_key == "sample_rate")
1453  // Set sample rate
1454  info.sample_rate = change["value"].asInt();
1455  else if (root_key == "channels")
1456  // Set channels
1457  info.channels = change["value"].asInt();
1458  else if (root_key == "channel_layout")
1459  // Set channel layout
1460  info.channel_layout = (ChannelLayout) change["value"].asInt();
1461  else
1462  // Error parsing JSON (or missing keys)
1463  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1464 
1465 
1466  } else if (change["type"].asString() == "delete") {
1467 
1468  // DELETE / RESET
1469  // Reset the following properties (since we can't delete them)
1470  if (root_key == "color") {
1471  color = Color();
1472  color.red = Keyframe(0.0);
1473  color.green = Keyframe(0.0);
1474  color.blue = Keyframe(0.0);
1475  }
1476  else if (root_key == "viewport_scale")
1477  viewport_scale = Keyframe(1.0);
1478  else if (root_key == "viewport_x")
1479  viewport_x = Keyframe(0.0);
1480  else if (root_key == "viewport_y")
1481  viewport_y = Keyframe(0.0);
1482  else
1483  // Error parsing JSON (or missing keys)
1484  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1485 
1486  }
1487 
1488 }
1489 
1490 // Clear all caches
1492 
1493  // Get lock (prevent getting frames while this happens)
1494  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1495 
1496  // Clear primary cache
1497  final_cache->Clear();
1498 
1499  // Loop through all clips
1500  for (auto clip : clips)
1501  {
1502  // Clear cache on clip
1503  clip->Reader()->GetCache()->Clear();
1504 
1505  // Clear nested Reader (if any)
1506  if (clip->Reader()->Name() == "FrameMapper") {
1507  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1508  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1509  nested_reader->Reader()->GetCache()->Clear();
1510  }
1511 
1512  }
1513 }
1514 
1515 // Set Max Image Size (used for performance optimization). Convenience function for setting
1516 // Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT.
1517 void Timeline::SetMaxSize(int width, int height) {
1518  // Maintain aspect ratio regardless of what size is passed in
1519  QSize display_ratio_size = QSize(info.display_ratio.num * info.pixel_ratio.ToFloat(), info.display_ratio.den * info.pixel_ratio.ToFloat());
1520  QSize proposed_size = QSize(std::min(width, info.width), std::min(height, info.height));
1521 
1522  // Scale QSize up to proposed size
1523  display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
1524 
1525  // Set max size
1526  Settings::Instance()->MAX_WIDTH = display_ratio_size.width();
1527  Settings::Instance()->MAX_HEIGHT = display_ratio_size.height();
1528 }
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1104
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:1043
Display the timeline&#39;s frame number.
Definition: Enums.h:72
void Close()
Close the internal reader.
Definition: Clip.cpp:259
int MAX_HEIGHT
Maximum height for image data (useful for optimzing for a smaller preview or render) ...
Definition: Settings.h:101
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:95
int num
Numerator for the fraction.
Definition: Fraction.h:47
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:72
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:45
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:257
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:226
Align clip to the bottom right of its parent.
Definition: Enums.h:48
void SetCache(CacheBase *new_cache)
Definition: Timeline.cpp:958
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:253
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:68
Do not scale the clip.
Definition: Enums.h:57
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:44
float duration
Length of time (in seconds)
Definition: ReaderBase.h:65
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:215
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:978
openshot::Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:232
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Definition: Fraction.cpp:74
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:49
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:54
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:80
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:259
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:143
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:84
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:212
void Close()
Close the openshot::FrameMapper and internal reader.
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:97
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:78
#define OPEN_MP_NUM_PROCESSORS
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:146
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:223
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:213
void SetJson(std::string value)
Load JSON string into this object.
Definition: Timeline.cpp:1011
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:337
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:62
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:236
Do not display the frame number.
Definition: Enums.h:70
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:295
Align clip to the top right of its parent.
Definition: Enums.h:42
Align clip to the bottom left of its parent.
Definition: Enums.h:46
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:827
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:171
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:286
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:374
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:179
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:50
Exception for missing JSON Change key.
Definition: Exceptions.h:253
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:547
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:63
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:95
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
int MAX_WIDTH
Maximum width for image data (useful for optimzing for a smaller preview or render) ...
Definition: Settings.h:98
bool Waveform()
Waveform property.
Definition: Clip.h:208
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:75
openshot::Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:248
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:49
int height
The height of the video (in pixels)
Definition: ReaderBase.h:67
Align clip to the bottom center of its parent.
Definition: Enums.h:47
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
void SetMaxSize(int width, int height)
Definition: Timeline.cpp:1517
Align clip to the top left of its parent.
Definition: Enums.h:40
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:218
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:77
Exception for files that can not be found or opened.
Definition: Exceptions.h:173
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1491
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:219
std::string Id()
Get basic properties.
Definition: ClipBase.h:76
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:77
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline&#39;s framerate and samplerate to all clips.
Definition: Timeline.cpp:167
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Definition: Timeline.cpp:725
This class represents a fraction.
Definition: Fraction.h:45
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:67
openshot::Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:252
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:84
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:101
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:49
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:43
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:101
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:116
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:129
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:694
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:170
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:249
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:56
Display the clip&#39;s internal frame number.
Definition: Enums.h:71
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Exception for frames that are out of bounds.
Definition: Exceptions.h:285
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:242
This class represents a color (used on the timeline and clips)
Definition: Color.h:45
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:45
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:81
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:44
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:713
Display both the clip&#39;s and timeline&#39;s frame number.
Definition: Enums.h:73
This namespace is the default namespace for all code in the openshot library.
std::string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:971
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
Json::Value JsonValue() const
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:329
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:132
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:126
Exception for invalid JSON.
Definition: Exceptions.h:205
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:522
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:262
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3) ...
Definition: ReaderBase.h:73
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:258
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:144
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:219
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:138
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:48
openshot::GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:229
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: Settings.cpp:41
openshot::Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:233
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square) ...
Definition: ReaderBase.h:72
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:237
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:214
Color color
Background color of timeline canvas.
Definition: Timeline.h:262
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:36
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:46
Align clip to the top center of its parent.
Definition: Enums.h:41
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:51
int den
Denominator for the fraction.
Definition: Fraction.h:48
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:83
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:55
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:116
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:147
openshot::Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:230
openshot::Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:231
virtual ~Timeline()
Definition: Timeline.cpp:76
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:81
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:51
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:70
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:79
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:49
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:82
Exception when too many seek attempts happen.
Definition: Exceptions.h:369