OpenShot Library | libopenshot  0.2.2
Clip.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Clip.h"
29 
30 using namespace openshot;
31 
32 // Init default settings for a clip
33 void Clip::init_settings()
34 {
35  // Init clip settings
36  Position(0.0);
37  Layer(0);
38  Start(0.0);
39  End(0.0);
41  scale = SCALE_FIT;
45  waveform = false;
47 
48  // Init scale curves
49  scale_x = Keyframe(1.0);
50  scale_y = Keyframe(1.0);
51 
52  // Init location curves
53  location_x = Keyframe(0.0);
54  location_y = Keyframe(0.0);
55 
56  // Init alpha
57  alpha = Keyframe(1.0);
58 
59  // Init rotation
60  init_reader_rotation();
61 
62  // Init time & volume
63  time = Keyframe(1.0);
64  volume = Keyframe(1.0);
65 
66  // Init audio waveform color
67  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
68 
69  // Init crop settings
71  crop_width = Keyframe(-1.0);
72  crop_height = Keyframe(-1.0);
73  crop_x = Keyframe(0.0);
74  crop_y = Keyframe(0.0);
75 
76  // Init shear and perspective curves
77  shear_x = Keyframe(0.0);
78  shear_y = Keyframe(0.0);
79  perspective_c1_x = Keyframe(-1.0);
80  perspective_c1_y = Keyframe(-1.0);
81  perspective_c2_x = Keyframe(-1.0);
82  perspective_c2_y = Keyframe(-1.0);
83  perspective_c3_x = Keyframe(-1.0);
84  perspective_c3_y = Keyframe(-1.0);
85  perspective_c4_x = Keyframe(-1.0);
86  perspective_c4_y = Keyframe(-1.0);
87 
88  // Init audio channel filter and mappings
89  channel_filter = Keyframe(-1.0);
90  channel_mapping = Keyframe(-1.0);
91 
92  // Init audio and video overrides
93  has_audio = Keyframe(-1.0);
94  has_video = Keyframe(-1.0);
95 
96  // Default pointers
97  manage_reader = false;
98 }
99 
100 // Init reader's rotation (if any)
101 void Clip::init_reader_rotation() {
102  // Only init rotation from reader when needed
103  if (rotation.Points.size() > 1)
104  // Do nothing if more than 1 rotation Point
105  return;
106  else if (rotation.Points.size() == 1 && rotation.GetValue(1) != 0.0)
107  // Do nothing if 1 Point, and it's not the default value
108  return;
109 
110  // Init rotation
111  if (reader && reader->info.metadata.count("rotate") > 0) {
112  // Use reader metadata rotation (if any)
113  // This is typical with cell phone videos filmed in different orientations
114  try {
115  float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
116  rotation = Keyframe(rotate_metadata);
117  } catch (exception e) {}
118  }
119  else
120  // Default no rotation
121  rotation = Keyframe(0.0);
122 }
123 
124 // Default Constructor for a clip
125 Clip::Clip() : reader(NULL), resampler(NULL), audio_cache(NULL)
126 {
127  // Init all default settings
128  init_settings();
129 }
130 
131 // Constructor with reader
132 Clip::Clip(ReaderBase* new_reader) : reader(new_reader), resampler(NULL), audio_cache(NULL)
133 {
134  // Init all default settings
135  init_settings();
136 
137  // Open and Close the reader (to set the duration of the clip)
138  Open();
139  Close();
140 
141  // Update duration
142  End(reader->info.duration);
143 }
144 
145 // Constructor with filepath
146 Clip::Clip(string path) : reader(NULL), resampler(NULL), audio_cache(NULL)
147 {
148  // Init all default settings
149  init_settings();
150 
151  // Get file extension (and convert to lower case)
152  string ext = get_file_extension(path);
153  transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
154 
155  // Determine if common video formats
156  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
157  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
158  {
159  try
160  {
161  // Open common video format
162  reader = new FFmpegReader(path);
163 
164  } catch(...) { }
165  }
166 
167  // If no video found, try each reader
168  if (!reader)
169  {
170  try
171  {
172  // Try an image reader
173  reader = new QtImageReader(path);
174 
175  } catch(...) {
176  try
177  {
178  // Try a video reader
179  reader = new FFmpegReader(path);
180 
181  } catch(...) { }
182  }
183  }
184 
185  // Update duration
186  if (reader) {
187  End(reader->info.duration);
188  manage_reader = true;
189  init_reader_rotation();
190  }
191 }
192 
193 // Destructor
195 {
196  // Delete the reader if clip created it
197  if (manage_reader && reader) {
198  delete reader;
199  reader = NULL;
200  }
201 
202  // Close the resampler
203  if (resampler) {
204  delete resampler;
205  resampler = NULL;
206  }
207 }
208 
209 /// Set the current reader
210 void Clip::Reader(ReaderBase* new_reader)
211 {
212  // set reader pointer
213  reader = new_reader;
214 
215  // Init rotation (if any)
216  init_reader_rotation();
217 }
218 
219 /// Get the current reader
221 {
222  if (reader)
223  return reader;
224  else
225  // Throw error if reader not initialized
226  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
227 }
228 
229 // Open the internal reader
231 {
232  if (reader)
233  {
234  // Open the reader
235  reader->Open();
236 
237  // Set some clip properties from the file reader
238  if (end == 0.0)
239  End(reader->info.duration);
240  }
241  else
242  // Throw error if reader not initialized
243  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
244 }
245 
246 // Close the internal reader
248 {
249  if (reader) {
250  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
251 
252  // Close the reader
253  reader->Close();
254  }
255  else
256  // Throw error if reader not initialized
257  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
258 }
259 
260 // Get end position of clip (trim end of video), which can be affected by the time curve.
261 float Clip::End()
262 {
263  // if a time curve is present, use it's length
264  if (time.Points.size() > 1)
265  {
266  // Determine the FPS fo this clip
267  float fps = 24.0;
268  if (reader)
269  // file reader
270  fps = reader->info.fps.ToFloat();
271  else
272  // Throw error if reader not initialized
273  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
274 
275  return float(time.GetLength()) / fps;
276  }
277  else
278  // just use the duration (as detected by the reader)
279  return end;
280 }
281 
282 // Get an openshot::Frame object for a specific frame number of this reader.
283 std::shared_ptr<Frame> Clip::GetFrame(int64_t requested_frame)
284 {
285  if (reader)
286  {
287  // Adjust out of bounds frame number
288  requested_frame = adjust_frame_number_minimum(requested_frame);
289 
290  // Adjust has_video and has_audio overrides
291  int enabled_audio = has_audio.GetInt(requested_frame);
292  if (enabled_audio == -1 && reader && reader->info.has_audio)
293  enabled_audio = 1;
294  else if (enabled_audio == -1 && reader && !reader->info.has_audio)
295  enabled_audio = 0;
296  int enabled_video = has_video.GetInt(requested_frame);
297  if (enabled_video == -1 && reader && reader->info.has_video)
298  enabled_video = 1;
299  else if (enabled_video == -1 && reader && !reader->info.has_audio)
300  enabled_video = 0;
301 
302  // Is a time map detected
303  int64_t new_frame_number = requested_frame;
304  int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
305  if (time.Values.size() > 1)
306  new_frame_number = time_mapped_number;
307 
308  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
309  std::shared_ptr<Frame> original_frame;
310  #pragma omp critical (Clip_GetFrame)
311  original_frame = GetOrCreateFrame(new_frame_number);
312 
313  // Create a new frame
314  std::shared_ptr<Frame> frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
315  #pragma omp critical (Clip_GetFrame)
316  {
317  frame->SampleRate(original_frame->SampleRate());
318  frame->ChannelsLayout(original_frame->ChannelsLayout());
319  }
320 
321  // Copy the image from the odd field
322  if (enabled_video)
323  frame->AddImage(std::shared_ptr<QImage>(new QImage(*original_frame->GetImage())));
324 
325  // Loop through each channel, add audio
326  if (enabled_audio && reader->info.has_audio)
327  for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
328  frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
329 
330  // Get time mapped frame number (used to increase speed, change direction, etc...)
331  std::shared_ptr<Frame> new_frame = get_time_mapped_frame(frame, requested_frame);
332 
333  // Apply effects to the frame (if any)
334  apply_effects(new_frame);
335 
336  // Return processed 'frame'
337  return new_frame;
338  }
339  else
340  // Throw error if reader not initialized
341  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
342 }
343 
344 // Get file extension
345 string Clip::get_file_extension(string path)
346 {
347  // return last part of path
348  return path.substr(path.find_last_of(".") + 1);
349 }
350 
351 // Reverse an audio buffer
352 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
353 {
354  int number_of_samples = buffer->getNumSamples();
355  int channels = buffer->getNumChannels();
356 
357  // Reverse array (create new buffer to hold the reversed version)
358  AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
359  reversed->clear();
360 
361  for (int channel = 0; channel < channels; channel++)
362  {
363  int n=0;
364  for (int s = number_of_samples - 1; s >= 0; s--, n++)
365  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
366  }
367 
368  // Copy the samples back to the original array
369  buffer->clear();
370  // Loop through channels, and get audio samples
371  for (int channel = 0; channel < channels; channel++)
372  // Get the audio samples for this channel
373  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
374 
375  delete reversed;
376  reversed = NULL;
377 }
378 
379 // Adjust the audio and image of a time mapped frame
380 std::shared_ptr<Frame> Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
381 {
382  // Check for valid reader
383  if (!reader)
384  // Throw error if reader not initialized
385  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
386 
387  // Check for a valid time map curve
388  if (time.Values.size() > 1)
389  {
390  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
391  std::shared_ptr<Frame> new_frame;
392 
393  // create buffer and resampler
394  juce::AudioSampleBuffer *samples = NULL;
395  if (!resampler)
396  resampler = new AudioResampler();
397 
398  // Get new frame number
399  int new_frame_number = adjust_frame_number_minimum(round(time.GetValue(frame_number)));
400 
401  // Create a new frame
402  int samples_in_frame = Frame::GetSamplesPerFrame(new_frame_number, reader->info.fps, reader->info.sample_rate, frame->GetAudioChannelsCount());
403  new_frame = std::make_shared<Frame>(new_frame_number, 1, 1, "#000000", samples_in_frame, frame->GetAudioChannelsCount());
404 
405  // Copy the image from the new frame
406  new_frame->AddImage(std::shared_ptr<QImage>(new QImage(*GetOrCreateFrame(new_frame_number)->GetImage())));
407 
408  // Get delta (difference in previous Y value)
409  int delta = int(round(time.GetDelta(frame_number)));
410 
411  // Init audio vars
412  int sample_rate = reader->info.sample_rate;
413  int channels = reader->info.channels;
414  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
415 
416  // Only resample audio if needed
417  if (reader->info.has_audio) {
418  // Determine if we are speeding up or slowing down
419  if (time.GetRepeatFraction(frame_number).den > 1) {
420  // SLOWING DOWN AUDIO
421  // Resample data, and return new buffer pointer
422  AudioSampleBuffer *resampled_buffer = NULL;
423  int resampled_buffer_size = 0;
424 
425  // SLOW DOWN audio (split audio)
426  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
427  samples->clear();
428 
429  // Loop through channels, and get audio samples
430  for (int channel = 0; channel < channels; channel++)
431  // Get the audio samples for this channel
432  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
433  number_of_samples, 1.0f);
434 
435  // Reverse the samples (if needed)
436  if (!time.IsIncreasing(frame_number))
437  reverse_buffer(samples);
438 
439  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
440  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
441 
442  // Resample the data (since it's the 1st slice)
443  resampled_buffer = resampler->GetResampledBuffer();
444 
445  // Get the length of the resampled buffer (if one exists)
446  resampled_buffer_size = resampled_buffer->getNumSamples();
447 
448  // Just take the samples we need for the requested frame
449  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
450  if (start > 0)
451  start -= 1;
452  for (int channel = 0; channel < channels; channel++)
453  // Add new (slower) samples, to the frame object
454  new_frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
455  number_of_samples, 1.0f);
456 
457  // Clean up
458  resampled_buffer = NULL;
459 
460  }
461  else if (abs(delta) > 1 && abs(delta) < 100) {
462  int start = 0;
463  if (delta > 0) {
464  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
465  int total_delta_samples = 0;
466  for (int delta_frame = new_frame_number - (delta - 1);
467  delta_frame <= new_frame_number; delta_frame++)
468  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
469  reader->info.sample_rate,
470  reader->info.channels);
471 
472  // Allocate a new sample buffer for these delta frames
473  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
474  samples->clear();
475 
476  // Loop through each frame in this delta
477  for (int delta_frame = new_frame_number - (delta - 1);
478  delta_frame <= new_frame_number; delta_frame++) {
479  // buffer to hold detal samples
480  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
481  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
482  number_of_delta_samples);
483  delta_samples->clear();
484 
485  for (int channel = 0; channel < channels; channel++)
486  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
487  number_of_delta_samples, 1.0f);
488 
489  // Reverse the samples (if needed)
490  if (!time.IsIncreasing(frame_number))
491  reverse_buffer(delta_samples);
492 
493  // Copy the samples to
494  for (int channel = 0; channel < channels; channel++)
495  // Get the audio samples for this channel
496  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
497  number_of_delta_samples, 1.0f);
498 
499  // Clean up
500  delete delta_samples;
501  delta_samples = NULL;
502 
503  // Increment start position
504  start += number_of_delta_samples;
505  }
506  }
507  else {
508  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
509  int total_delta_samples = 0;
510  for (int delta_frame = new_frame_number - (delta + 1);
511  delta_frame >= new_frame_number; delta_frame--)
512  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
513  reader->info.sample_rate,
514  reader->info.channels);
515 
516  // Allocate a new sample buffer for these delta frames
517  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
518  samples->clear();
519 
520  // Loop through each frame in this delta
521  for (int delta_frame = new_frame_number - (delta + 1);
522  delta_frame >= new_frame_number; delta_frame--) {
523  // buffer to hold delta samples
524  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
525  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
526  number_of_delta_samples);
527  delta_samples->clear();
528 
529  for (int channel = 0; channel < channels; channel++)
530  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
531  number_of_delta_samples, 1.0f);
532 
533  // Reverse the samples (if needed)
534  if (!time.IsIncreasing(frame_number))
535  reverse_buffer(delta_samples);
536 
537  // Copy the samples to
538  for (int channel = 0; channel < channels; channel++)
539  // Get the audio samples for this channel
540  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
541  number_of_delta_samples, 1.0f);
542 
543  // Clean up
544  delete delta_samples;
545  delta_samples = NULL;
546 
547  // Increment start position
548  start += number_of_delta_samples;
549  }
550  }
551 
552  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
553  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
554 
555  // Resample data, and return new buffer pointer
556  AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
557  int resampled_buffer_size = buffer->getNumSamples();
558 
559  // Add the newly resized audio samples to the current frame
560  for (int channel = 0; channel < channels; channel++)
561  // Add new (slower) samples, to the frame object
562  new_frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
563 
564  // Clean up
565  buffer = NULL;
566  }
567  else {
568  // Use the samples on this frame (but maybe reverse them if needed)
569  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
570  samples->clear();
571 
572  // Loop through channels, and get audio samples
573  for (int channel = 0; channel < channels; channel++)
574  // Get the audio samples for this channel
575  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
576 
577  // reverse the samples
578  if (!time.IsIncreasing(frame_number))
579  reverse_buffer(samples);
580 
581  // Add reversed samples to the frame object
582  for (int channel = 0; channel < channels; channel++)
583  new_frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
584 
585 
586  }
587 
588  delete samples;
589  samples = NULL;
590  }
591 
592  // Return new time mapped frame
593  return new_frame;
594 
595  } else
596  // Use original frame
597  return frame;
598 }
599 
600 // Adjust frame number minimum value
601 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
602 {
603  // Never return a frame number 0 or below
604  if (frame_number < 1)
605  return 1;
606  else
607  return frame_number;
608 
609 }
610 
611 // Get or generate a blank frame
612 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
613 {
614  std::shared_ptr<Frame> new_frame;
615 
616  // Init some basic properties about this frame
617  int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
618 
619  try {
620  // Debug output
621  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
622 
623  // Determine the max size of this clips source image (based on the timeline's size, the scaling mode,
624  // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
625  // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
626  // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
627  // the future.
628  if (scale == SCALE_FIT || scale == SCALE_STRETCH) {
629  // Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
630  float max_scale_x = scale_x.GetMaxPoint().co.Y;
631  float max_scale_y = scale_y.GetMaxPoint().co.Y;
632  reader->SetMaxSize(max(float(max_width), max_width * max_scale_x), max(float(max_height), max_height * max_scale_y));
633 
634  } else if (scale == SCALE_CROP) {
635  // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
636  float max_scale_x = scale_x.GetMaxPoint().co.Y;
637  float max_scale_y = scale_y.GetMaxPoint().co.Y;
638  QSize width_size(max_width * max_scale_x, round(max_width / (float(reader->info.width) / float(reader->info.height))));
639  QSize height_size(round(max_height / (float(reader->info.height) / float(reader->info.width))), max_height * max_scale_y);
640 
641  // respect aspect ratio
642  if (width_size.width() >= max_width && width_size.height() >= max_height)
643  reader->SetMaxSize(max(max_width, width_size.width()), max(max_height, width_size.height()));
644  else
645  reader->SetMaxSize(max(max_width, height_size.width()), max(max_height, height_size.height()));
646 
647  } else {
648  // No scaling, use original image size (slower)
649  reader->SetMaxSize(0, 0);
650  }
651 
652  // Attempt to get a frame (but this could fail if a reader has just been closed)
653  new_frame = reader->GetFrame(number);
654 
655  // Return real frame
656  if (new_frame)
657  return new_frame;
658 
659  } catch (const ReaderClosed & e) {
660  // ...
661  } catch (const TooManySeeks & e) {
662  // ...
663  } catch (const OutOfBoundsFrame & e) {
664  // ...
665  }
666 
667  // Debug output
668  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
669 
670  // Create blank frame
671  new_frame = std::make_shared<Frame>(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels);
672  new_frame->SampleRate(reader->info.sample_rate);
673  new_frame->ChannelsLayout(reader->info.channel_layout);
674  new_frame->AddAudioSilence(samples_in_frame);
675  return new_frame;
676 }
677 
678 // Generate JSON string of this object
679 string Clip::Json() {
680 
681  // Return formatted string
682  return JsonValue().toStyledString();
683 }
684 
685 // Get all properties for a specific frame
686 string Clip::PropertiesJSON(int64_t requested_frame) {
687 
688  // Generate JSON properties list
689  Json::Value root;
690  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
691  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
692  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
693  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
694  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
695  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
696  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
697  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
698  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
699  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
700  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
701 
702  // Add gravity choices (dropdown style)
703  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
704  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
705  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
706  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
707  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
708  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
709  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
710  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
711  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
712 
713  // Add scale choices (dropdown style)
714  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
715  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
716  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
717  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
718 
719  // Add frame number display choices (dropdown style)
720  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
721  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
722  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
723  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
724 
725  // Add volume mixing choices (dropdown style)
726  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
727  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
728  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
729 
730  // Add waveform choices (dropdown style)
731  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
732  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
733 
734  // Keyframes
735  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
736  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
737  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
738  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
739  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
740  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
741  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
742  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
743  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
744  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
745  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
746  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
747  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
748  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
749 
750  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
751  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
752  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
753  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
754 
755 
756  // Return formatted string
757  return root.toStyledString();
758 }
759 
760 // Generate Json::JsonValue for this object
761 Json::Value Clip::JsonValue() {
762 
763  // Create root json object
764  Json::Value root = ClipBase::JsonValue(); // get parent properties
765  root["gravity"] = gravity;
766  root["scale"] = scale;
767  root["anchor"] = anchor;
768  root["display"] = display;
769  root["mixing"] = mixing;
770  root["waveform"] = waveform;
771  root["scale_x"] = scale_x.JsonValue();
772  root["scale_y"] = scale_y.JsonValue();
773  root["location_x"] = location_x.JsonValue();
774  root["location_y"] = location_y.JsonValue();
775  root["alpha"] = alpha.JsonValue();
776  root["rotation"] = rotation.JsonValue();
777  root["time"] = time.JsonValue();
778  root["volume"] = volume.JsonValue();
779  root["wave_color"] = wave_color.JsonValue();
780  root["crop_width"] = crop_width.JsonValue();
781  root["crop_height"] = crop_height.JsonValue();
782  root["crop_x"] = crop_x.JsonValue();
783  root["crop_y"] = crop_y.JsonValue();
784  root["shear_x"] = shear_x.JsonValue();
785  root["shear_y"] = shear_y.JsonValue();
786  root["channel_filter"] = channel_filter.JsonValue();
787  root["channel_mapping"] = channel_mapping.JsonValue();
788  root["has_audio"] = has_audio.JsonValue();
789  root["has_video"] = has_video.JsonValue();
790  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
791  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
792  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
793  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
794  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
795  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
796  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
797  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
798 
799  // Add array of effects
800  root["effects"] = Json::Value(Json::arrayValue);
801 
802  // loop through effects
803  list<EffectBase*>::iterator effect_itr;
804  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
805  {
806  // Get clip object from the iterator
807  EffectBase *existing_effect = (*effect_itr);
808  root["effects"].append(existing_effect->JsonValue());
809  }
810 
811  if (reader)
812  root["reader"] = reader->JsonValue();
813 
814  // return JsonValue
815  return root;
816 }
817 
818 // Load JSON string into this object
819 void Clip::SetJson(string value) {
820 
821  // Parse JSON string into JSON objects
822  Json::Value root;
823  Json::Reader reader;
824  bool success = reader.parse( value, root );
825  if (!success)
826  // Raise exception
827  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
828 
829  try
830  {
831  // Set all values that match
832  SetJsonValue(root);
833  }
834  catch (exception e)
835  {
836  // Error parsing JSON (or missing keys)
837  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
838  }
839 }
840 
841 // Load Json::JsonValue into this object
842 void Clip::SetJsonValue(Json::Value root) {
843 
844  // Set parent data
846 
847  // Set data from Json (if key is found)
848  if (!root["gravity"].isNull())
849  gravity = (GravityType) root["gravity"].asInt();
850  if (!root["scale"].isNull())
851  scale = (ScaleType) root["scale"].asInt();
852  if (!root["anchor"].isNull())
853  anchor = (AnchorType) root["anchor"].asInt();
854  if (!root["display"].isNull())
855  display = (FrameDisplayType) root["display"].asInt();
856  if (!root["mixing"].isNull())
857  mixing = (VolumeMixType) root["mixing"].asInt();
858  if (!root["waveform"].isNull())
859  waveform = root["waveform"].asBool();
860  if (!root["scale_x"].isNull())
861  scale_x.SetJsonValue(root["scale_x"]);
862  if (!root["scale_y"].isNull())
863  scale_y.SetJsonValue(root["scale_y"]);
864  if (!root["location_x"].isNull())
865  location_x.SetJsonValue(root["location_x"]);
866  if (!root["location_y"].isNull())
867  location_y.SetJsonValue(root["location_y"]);
868  if (!root["alpha"].isNull())
869  alpha.SetJsonValue(root["alpha"]);
870  if (!root["rotation"].isNull())
871  rotation.SetJsonValue(root["rotation"]);
872  if (!root["time"].isNull())
873  time.SetJsonValue(root["time"]);
874  if (!root["volume"].isNull())
875  volume.SetJsonValue(root["volume"]);
876  if (!root["wave_color"].isNull())
877  wave_color.SetJsonValue(root["wave_color"]);
878  if (!root["crop_width"].isNull())
879  crop_width.SetJsonValue(root["crop_width"]);
880  if (!root["crop_height"].isNull())
881  crop_height.SetJsonValue(root["crop_height"]);
882  if (!root["crop_x"].isNull())
883  crop_x.SetJsonValue(root["crop_x"]);
884  if (!root["crop_y"].isNull())
885  crop_y.SetJsonValue(root["crop_y"]);
886  if (!root["shear_x"].isNull())
887  shear_x.SetJsonValue(root["shear_x"]);
888  if (!root["shear_y"].isNull())
889  shear_y.SetJsonValue(root["shear_y"]);
890  if (!root["channel_filter"].isNull())
891  channel_filter.SetJsonValue(root["channel_filter"]);
892  if (!root["channel_mapping"].isNull())
893  channel_mapping.SetJsonValue(root["channel_mapping"]);
894  if (!root["has_audio"].isNull())
895  has_audio.SetJsonValue(root["has_audio"]);
896  if (!root["has_video"].isNull())
897  has_video.SetJsonValue(root["has_video"]);
898  if (!root["perspective_c1_x"].isNull())
899  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
900  if (!root["perspective_c1_y"].isNull())
901  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
902  if (!root["perspective_c2_x"].isNull())
903  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
904  if (!root["perspective_c2_y"].isNull())
905  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
906  if (!root["perspective_c3_x"].isNull())
907  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
908  if (!root["perspective_c3_y"].isNull())
909  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
910  if (!root["perspective_c4_x"].isNull())
911  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
912  if (!root["perspective_c4_y"].isNull())
913  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
914  if (!root["effects"].isNull()) {
915 
916  // Clear existing effects
917  effects.clear();
918 
919  // loop through effects
920  for (int x = 0; x < root["effects"].size(); x++) {
921  // Get each effect
922  Json::Value existing_effect = root["effects"][x];
923 
924  // Create Effect
925  EffectBase *e = NULL;
926 
927  if (!existing_effect["type"].isNull()) {
928  // Create instance of effect
929  if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
930 
931  // Load Json into Effect
932  e->SetJsonValue(existing_effect);
933 
934  // Add Effect to Timeline
935  AddEffect(e);
936  }
937  }
938  }
939  }
940  if (!root["reader"].isNull()) // does Json contain a reader?
941  {
942  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
943  {
944  // Close previous reader (if any)
945  bool already_open = false;
946  if (reader)
947  {
948  // Track if reader was open
949  already_open = reader->IsOpen();
950 
951  // Close and delete existing reader (if any)
952  reader->Close();
953  delete reader;
954  reader = NULL;
955  }
956 
957  // Create new reader (and load properties)
958  string type = root["reader"]["type"].asString();
959 
960  if (type == "FFmpegReader") {
961 
962  // Create new reader
963  reader = new FFmpegReader(root["reader"]["path"].asString(), false);
964  reader->SetJsonValue(root["reader"]);
965 
966  } else if (type == "QtImageReader") {
967 
968  // Create new reader
969  reader = new QtImageReader(root["reader"]["path"].asString(), false);
970  reader->SetJsonValue(root["reader"]);
971 
972 #ifdef USE_IMAGEMAGICK
973  } else if (type == "ImageReader") {
974 
975  // Create new reader
976  reader = new ImageReader(root["reader"]["path"].asString(), false);
977  reader->SetJsonValue(root["reader"]);
978 
979  } else if (type == "TextReader") {
980 
981  // Create new reader
982  reader = new TextReader();
983  reader->SetJsonValue(root["reader"]);
984 #endif
985 
986  } else if (type == "ChunkReader") {
987 
988  // Create new reader
989  reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
990  reader->SetJsonValue(root["reader"]);
991 
992  } else if (type == "DummyReader") {
993 
994  // Create new reader
995  reader = new DummyReader();
996  reader->SetJsonValue(root["reader"]);
997  }
998 
999  // mark as managed reader
1000  if (reader)
1001  manage_reader = true;
1002 
1003  // Re-Open reader (if needed)
1004  if (already_open)
1005  reader->Open();
1006 
1007  }
1008  }
1009 }
1010 
1011 // Sort effects by order
1012 void Clip::sort_effects()
1013 {
1014  // sort clips
1015  effects.sort(CompareClipEffects());
1016 }
1017 
1018 // Add an effect to the clip
1020 {
1021  // Add effect to list
1022  effects.push_back(effect);
1023 
1024  // Sort effects
1025  sort_effects();
1026 }
1027 
1028 // Remove an effect from the clip
1030 {
1031  effects.remove(effect);
1032 }
1033 
1034 // Apply effects to the source frame (if any)
1035 std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
1036 {
1037  // Find Effects at this position and layer
1038  list<EffectBase*>::iterator effect_itr;
1039  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1040  {
1041  // Get clip object from the iterator
1042  EffectBase *effect = (*effect_itr);
1043 
1044  // Apply the effect to this frame
1045  frame = effect->GetFrame(frame, frame->number);
1046 
1047  } // end effect loop
1048 
1049  // Return modified frame
1050  return frame;
1051 }
openshot::Clip::perspective_c4_y
Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:256
openshot::Keyframe::IsIncreasing
bool IsIncreasing(int index)
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
openshot::ReaderInfo::sample_rate
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
openshot::EffectInfo
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
openshot::FRAME_DISPLAY_BOTH
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:70
openshot::Fraction::ToFloat
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
openshot::Color::JsonValue
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
openshot::Coordinate::Y
double Y
The Y value of the coordinate (usually representing the value of the property being animated)
Definition: Coordinate.h:62
openshot::EffectBase
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
openshot::Clip::shear_y
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:248
openshot::ClipBase::max_width
int max_width
The maximum image width needed by this clip (used for optimizations)
Definition: ClipBase.h:61
openshot::ClipBase::add_property_choice_json
Json::Value add_property_choice_json(string name, int value, int selected_value)
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:101
openshot::Clip::Open
void Open()
Open the internal reader.
Definition: Clip.cpp:230
openshot::Keyframe::GetLong
int64_t GetLong(int64_t index)
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:270
openshot::Color::green
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
openshot::Clip::perspective_c2_x
Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:251
openshot::Clip::End
float End()
Override End() method.
Definition: Clip.cpp:261
openshot::ReaderBase::SetJsonValue
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:168
openshot::ChunkReader
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:104
openshot::Clip::perspective_c4_x
Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:255
openshot::ClipBase::Id
void Id(string value)
Set basic properties.
Definition: ClipBase.h:90
openshot::Clip::Close
void Close()
Close the internal reader.
Definition: Clip.cpp:247
openshot::Clip::time
Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:233
openshot::FRAME_DISPLAY_CLIP
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:68
openshot::FRAME_DISPLAY_TIMELINE
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:69
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: AudioBufferSource.h:45
openshot::Point::co
Coordinate co
This is the primary coordinate.
Definition: Point.h:83
openshot::Clip::scale_x
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:223
openshot::ReaderInfo::channel_layout
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
openshot::ClipBase::max_height
int max_height
The maximium image height needed by this clip (used for optimizations)
Definition: ClipBase.h:62
openshot::GRAVITY_TOP_LEFT
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:37
openshot::Color::SetJsonValue
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
openshot::Keyframe::GetInt
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
openshot::Clip::display
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:157
openshot::Clip::SetJsonValue
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:842
openshot::Keyframe::GetLength
int64_t GetLength()
Definition: KeyFrame.cpp:442
openshot::DummyReader
This class is used as a simple, dummy reader, which always returns a blank frame.
Definition: DummyReader.h:53
openshot::GRAVITY_TOP_RIGHT
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:39
openshot::GravityType
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:35
openshot::ReaderInfo::duration
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
openshot::Frame
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:115
openshot::Clip::crop_y
Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:244
openshot::Clip::~Clip
~Clip()
Destructor.
Definition: Clip.cpp:194
openshot::ReaderInfo::has_video
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
openshot::ReaderBase::JsonValue
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:113
openshot::ClipBase::previous_properties
string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:60
openshot::ReaderInfo::width
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
openshot::ReaderBase::info
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:112
openshot::EffectBase::JsonValue
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:81
openshot::Clip::scale_y
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:224
openshot::TooManySeeks
Exception when too many seek attempts happen.
Definition: Exceptions.h:254
openshot::GRAVITY_RIGHT
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
openshot::Clip::crop_height
Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:242
openshot::FRAME_DISPLAY_NONE
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:67
openshot::CompareClipEffects
Definition: Clip.h:68
openshot::ClipBase::add_property_json
Json::Value add_property_json(string name, float value, string type, string memo, Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame)
Generate JSON for a property.
Definition: ClipBase.cpp:65
openshot::Clip::shear_x
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:247
openshot::OutOfBoundsFrame
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
openshot::GRAVITY_TOP
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:38
openshot::Color
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
openshot::Clip::has_video
Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:264
openshot::QtImageReader
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:69
openshot::Clip::channel_filter
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:259
openshot::AudioResampler
This class is used to resample audio data for many sequential frames.
Definition: AudioResampler.h:53
openshot::ReaderInfo::height
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
openshot::VOLUME_MIX_REDUCE
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:78
openshot::Fraction::num
int num
Numerator for the fraction.
Definition: Fraction.h:44
openshot::Keyframe::GetValue
double GetValue(int64_t index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
openshot::Clip::wave_color
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:237
openshot::Clip::crop_width
Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:241
openshot::EffectBase::GetFrame
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
openshot::Clip::location_x
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:225
openshot::Fraction::den
int den
Denominator for the fraction.
Definition: Fraction.h:45
openshot::Clip::perspective_c1_y
Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:250
openshot::Keyframe
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:64
openshot::Clip::perspective_c2_y
Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:252
openshot::ReaderBase::Open
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::GRAVITY_BOTTOM
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:44
openshot::ReaderInfo::has_audio
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
openshot::Clip::SetJson
void SetJson(string value)
Load JSON string into this object.
Definition: Clip.cpp:819
openshot::ReaderBase::IsOpen
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:152
openshot::Clip::GetFrame
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:283
openshot::ImageReader
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:67
openshot::Clip::mixing
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:158
openshot::Keyframe::Values
vector< Coordinate > Values
Vector of all Values (i.e. the processed coordinates from the curve)
Definition: KeyFrame.h:93
openshot::SCALE_CROP
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:51
openshot::Clip::PropertiesJSON
string PropertiesJSON(int64_t requested_frame)
Definition: Clip.cpp:686
openshot::Clip::AddEffect
void AddEffect(EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1019
openshot::Color::blue
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
openshot::Keyframe::GetMaxPoint
Point GetMaxPoint()
Get max point (by Y coordinate)
Definition: KeyFrame.cpp:207
openshot::Keyframe::JsonValue
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
openshot::Keyframe::GetDelta
double GetDelta(int64_t index)
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:410
openshot::ClipBase::end
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:59
openshot::Frame::GetSamplesPerFrame
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:521
openshot::FFmpegReader
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:92
openshot::Clip::scale
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
Definition: Clip.h:155
openshot::Clip::Json
string Json()
Get and Set JSON methods.
Definition: Clip.cpp:679
openshot::ReaderInfo::metadata
std::map< string, string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:86
openshot::ZmqLogger::Instance
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:38
openshot::ClipBase::start
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:58
openshot::Clip::Reader
ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:220
openshot::SCALE_FIT
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
openshot::GRAVITY_BOTTOM_LEFT
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:43
openshot::GRAVITY_BOTTOM_RIGHT
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:45
openshot::Clip::has_audio
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:263
openshot::ANCHOR_CANVAS
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:60
openshot::AudioResampler::SetBuffer
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
Definition: AudioResampler.cpp:74
openshot::Clip::getFrameCriticalSection
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: Clip.h:112
openshot::Clip::crop_gravity
GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:240
openshot::Clip::perspective_c3_x
Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:253
OpenShot Wipe Tests.e
e
Definition: OpenShot Wipe Tests.py:28
openshot::ClipBase::SetJsonValue
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ClipBase.cpp:49
openshot::ReaderClosed
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
openshot::ReaderInfo::fps
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
openshot::Clip::channel_mapping
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:260
openshot::Clip::RemoveEffect
void RemoveEffect(EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1029
openshot::GRAVITY_LEFT
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
openshot::Clip::crop_x
Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:243
openshot::Clip::location_y
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:226
openshot::Clip::Clip
Clip()
Default Constructor.
Definition: Clip.cpp:125
openshot::ReaderBase
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:96
openshot::Clip::gravity
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Definition: Clip.h:154
openshot::ReaderBase::GetFrame
virtual std::shared_ptr< Frame > GetFrame(int64_t number)=0
openshot::EffectInfo::CreateEffect
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
openshot::ZmqLogger::AppendDebugMethod
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
openshot::VOLUME_MIX_AVERAGE
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:77
openshot::ReaderBase::Close
virtual void Close()=0
Close the reader (and any resources it was consuming)
openshot::AnchorType
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:58
openshot::ScaleType
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:49
openshot::Color::red
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
openshot::Clip::rotation
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:230
openshot::AudioResampler::GetResampledBuffer
AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
Definition: AudioResampler.cpp:120
openshot::Clip::anchor
AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:156
openshot::Clip::volume
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:234
openshot::ReaderBase::SetMaxSize
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ReaderBase.h:144
openshot::Clip::perspective_c3_y
Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:254
openshot::SCALE_NONE
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:54
openshot::TextReader
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:81
openshot::Keyframe::SetJsonValue
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
openshot::GRAVITY_CENTER
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
openshot::Keyframe::GetRepeatFraction
Fraction GetRepeatFraction(int64_t index)
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:388
openshot::SCALE_STRETCH
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
openshot::Clip::alpha
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:229
openshot::VOLUME_MIX_NONE
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:76
openshot::ChunkVersion
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:75
openshot::Clip::JsonValue
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:761
openshot::ReaderInfo::channels
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
openshot::VolumeMixType
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:74
openshot::ClipBase::JsonValue
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ClipBase.cpp:33
openshot::Keyframe::Points
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
openshot::Clip::perspective_c1_x
Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:249
openshot::FrameDisplayType
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:65