30 #ifdef USE_IMAGEMAGICK
41 struct CompositeChoice {
const char* name;
CompositeType value; };
42 const CompositeChoice composite_choices[] = {
65 const int composite_choices_count =
sizeof(composite_choices)/
sizeof(CompositeChoice);
102 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
127 parentTrackedObject =
nullptr;
128 parentClipObject = NULL;
153 const auto rotate_meta = reader->
info.
metadata.find(
"rotate");
160 float rotate_angle = 0.0f;
162 rotate_angle = strtof(rotate_meta->second.c_str(),
nullptr);
163 }
catch (
const std::exception& e) {
170 auto has_default_scale = [](
const Keyframe& kf) {
171 return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
177 if (fabs(rotate_angle) < 0.0001f)
180 float w =
static_cast<float>(reader->
info.
width);
181 float h =
static_cast<float>(reader->
info.
height);
182 if (w <= 0.0f || h <= 0.0f)
185 float rad = rotate_angle *
static_cast<float>(M_PI) / 180.0f;
187 float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
188 float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
189 if (new_width <= 0.0f || new_height <= 0.0f)
192 float uniform_scale = std::min(w / new_width, h / new_height);
199 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
206 Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
225 Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
231 std::string ext = get_file_extension(
path);
232 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
235 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
236 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" || ext==
"gif" ||
path.find(
"%") != std::string::npos)
278 allocated_reader = reader;
288 if (allocated_reader) {
289 delete allocated_reader;
290 allocated_reader = NULL;
310 if (parentTimeline) {
312 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
313 Clip* clipObject = parentTimeline->
GetClip(object_id);
318 parentClipObject = NULL;
320 else if (clipObject) {
322 parentTrackedObject =
nullptr;
329 parentTrackedObject = trackedObject;
334 parentClipObject = clipObject;
342 bool is_same_reader =
false;
343 if (new_reader && allocated_reader) {
344 if (new_reader->
Name() ==
"FrameMapper") {
347 if (allocated_reader == clip_mapped_reader->
Reader()) {
348 is_same_reader =
true;
353 if (allocated_reader && !is_same_reader) {
355 allocated_reader->
Close();
356 delete allocated_reader;
358 allocated_reader = NULL;
380 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
401 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
407 if (is_open && reader) {
432 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
458 return GetFrame(NULL, clip_frame_number, NULL);
463 std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
466 return GetFrame(background_frame, clip_frame_number, NULL);
474 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
479 std::shared_ptr<Frame> frame = NULL;
482 frame = GetOrCreateFrame(clip_frame_number);
485 int64_t timeline_frame_number = clip_frame_number;
486 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
487 if (background_frame) {
489 timeline_frame_number = background_frame->number;
490 timeline_size.setWidth(background_frame->GetWidth());
491 timeline_size.setHeight(background_frame->GetHeight());
495 apply_timemapping(frame);
498 apply_waveform(frame, timeline_size);
501 apply_effects(frame, timeline_frame_number, options,
true);
504 apply_keyframes(frame, timeline_size);
507 apply_effects(frame, timeline_frame_number, options,
false);
512 if (!background_frame) {
513 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
514 "#00000000", frame->GetAudioSamplesCount(),
515 frame->GetAudioChannelsCount());
517 apply_background(frame, background_frame,
false);
522 if (!background_frame) {
527 auto output = std::make_shared<Frame>(*frame.get());
528 apply_background(output, background_frame,
true);
533 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
540 for (
const auto& effect : effects) {
541 if (effect->Id() ==
id) {
550 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
554 return parentClipObject;
559 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
563 return parentTrackedObject;
567 std::string Clip::get_file_extension(std::string
path)
570 const auto dot_pos =
path.find_last_of(
'.');
571 if (dot_pos == std::string::npos || dot_pos + 1 >=
path.size()) {
572 return std::string();
575 return path.substr(dot_pos + 1);
579 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
584 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
589 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
591 int64_t clip_frame_number = frame->number;
592 int64_t new_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
609 int source_sample_count = round(target_sample_count * fabs(delta));
615 location.
frame = new_frame_number;
630 init_samples.clear();
631 resampler->
SetBuffer(&init_samples, 1.0);
639 if (source_sample_count <= 0) {
641 frame->AddAudioSilence(target_sample_count);
647 source_samples->clear();
650 int remaining_samples = source_sample_count;
652 while (remaining_samples > 0) {
653 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
654 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
657 if (
auto *fm =
dynamic_cast<FrameMapper*
>(reader)) {
658 fm->SetDirectionHint(is_increasing);
660 source_frame->SetAudioDirection(is_increasing);
662 if (frame_sample_count == 0) {
672 if (remaining_samples - frame_sample_count >= 0) {
674 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
675 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
683 remaining_samples -= frame_sample_count;
684 source_pos += frame_sample_count;
688 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
689 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
692 remaining_samples = 0;
693 source_pos += remaining_samples;
700 frame->AddAudioSilence(target_sample_count);
702 if (source_sample_count != target_sample_count) {
704 double resample_ratio = double(source_sample_count) / double(target_sample_count);
705 resampler->
SetBuffer(source_samples, resample_ratio);
713 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
719 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
724 delete source_samples;
732 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
735 if (frame_number < 1)
743 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
747 int64_t clip_frame_number = adjust_frame_number_minimum(number);
748 bool is_increasing =
true;
753 const int64_t time_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
754 if (
auto *fm =
dynamic_cast<FrameMapper*
>(reader)) {
756 fm->SetDirectionHint(is_increasing);
758 clip_frame_number = time_frame_number;
763 "Clip::GetOrCreateFrame (from reader)",
764 "number", number,
"clip_frame_number", clip_frame_number);
767 auto reader_frame = reader->
GetFrame(clip_frame_number);
770 reader_frame->number = number;
771 reader_frame->SetAudioDirection(is_increasing);
777 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
780 reader_copy->AddColor(QColor(Qt::transparent));
784 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
800 "Clip::GetOrCreateFrame (create blank)",
802 "estimated_samples_in_frame", estimated_samples_in_frame);
805 auto new_frame = std::make_shared<Frame>(
807 "#000000", estimated_samples_in_frame, reader->
info.
channels);
810 new_frame->AddAudioSilence(estimated_samples_in_frame);
826 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
827 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
829 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
830 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
831 root[
"duration"] =
add_property_json(
"Duration", Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
836 root[
"composite"] =
add_property_json(
"Composite",
composite,
"int",
"", NULL, 0, composite_choices_count - 1,
false, requested_frame);
837 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
838 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
869 for (
int i = 0; i < composite_choices_count; ++i)
877 if (parentClipObject)
882 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
885 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
886 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
887 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
888 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
889 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
890 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
891 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
894 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
895 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
896 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
897 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
898 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
899 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
900 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
940 return root.toStyledString();
948 root[
"parentObjectId"] = parentObjectId;
950 root[
"scale"] =
scale;
955 root[
"waveform"] = waveform;
983 root[
"effects"] = Json::Value(Json::arrayValue);
986 for (
auto existing_effect : effects)
988 root[
"effects"].append(existing_effect->JsonValue());
994 root[
"reader"] = Json::Value(Json::objectValue);
1010 catch (
const std::exception& e)
1013 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1019 auto ensure_default_keyframe = [](
Keyframe& kf,
double default_value) {
1029 if (!root[
"parentObjectId"].isNull()){
1030 parentObjectId = root[
"parentObjectId"].asString();
1031 if (parentObjectId.size() > 0 && parentObjectId !=
""){
1034 parentTrackedObject =
nullptr;
1035 parentClipObject = NULL;
1038 if (!root[
"gravity"].isNull())
1040 if (!root[
"scale"].isNull())
1042 if (!root[
"anchor"].isNull())
1044 if (!root[
"display"].isNull())
1046 if (!root[
"mixing"].isNull())
1048 if (!root[
"composite"].isNull())
1050 if (!root[
"waveform"].isNull())
1051 waveform = root[
"waveform"].asBool();
1052 if (!root[
"scale_x"].isNull())
1054 if (!root[
"scale_y"].isNull())
1056 if (!root[
"location_x"].isNull())
1058 if (!root[
"location_y"].isNull())
1060 if (!root[
"alpha"].isNull())
1062 if (!root[
"rotation"].isNull())
1064 if (!root[
"time"].isNull())
1066 if (!root[
"volume"].isNull())
1068 if (!root[
"wave_color"].isNull())
1070 if (!root[
"shear_x"].isNull())
1072 if (!root[
"shear_y"].isNull())
1074 if (!root[
"origin_x"].isNull())
1076 if (!root[
"origin_y"].isNull())
1078 if (!root[
"channel_filter"].isNull())
1080 if (!root[
"channel_mapping"].isNull())
1082 if (!root[
"has_audio"].isNull())
1084 if (!root[
"has_video"].isNull())
1086 if (!root[
"perspective_c1_x"].isNull())
1088 if (!root[
"perspective_c1_y"].isNull())
1090 if (!root[
"perspective_c2_x"].isNull())
1092 if (!root[
"perspective_c2_y"].isNull())
1094 if (!root[
"perspective_c3_x"].isNull())
1096 if (!root[
"perspective_c3_y"].isNull())
1098 if (!root[
"perspective_c4_x"].isNull())
1100 if (!root[
"perspective_c4_y"].isNull())
1105 ensure_default_keyframe(
scale_x, 1.0);
1106 ensure_default_keyframe(
scale_y, 1.0);
1109 ensure_default_keyframe(
origin_x, 0.5);
1110 ensure_default_keyframe(
origin_y, 0.5);
1111 ensure_default_keyframe(
rotation, 0.0);
1112 if (!root[
"effects"].isNull()) {
1118 for (
const auto existing_effect : root[
"effects"]) {
1120 if (existing_effect.isNull()) {
1126 if (!existing_effect[
"type"].isNull()) {
1129 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1140 if (!root[
"reader"].isNull())
1142 if (!root[
"reader"][
"type"].isNull())
1145 bool already_open =
false;
1149 already_open = reader->
IsOpen();
1156 std::string type = root[
"reader"][
"type"].asString();
1158 if (type ==
"FFmpegReader") {
1164 }
else if (type ==
"QtImageReader") {
1170 #ifdef USE_IMAGEMAGICK
1171 }
else if (type ==
"ImageReader") {
1174 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1177 }
else if (type ==
"TextReader") {
1184 }
else if (type ==
"ChunkReader") {
1190 }
else if (type ==
"DummyReader") {
1196 }
else if (type ==
"Timeline") {
1206 allocated_reader = reader;
1217 final_cache.
Clear();
1221 void Clip::sort_effects()
1234 effects.push_back(effect);
1243 effect->ParentTimeline(parentTimeline);
1250 if (parentTimeline){
1252 effect->ParentTimeline(parentTimeline);
1258 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1261 trackedObjectBBox->ParentClip(
this);
1271 final_cache.
Clear();
1277 effects.remove(effect);
1280 final_cache.
Clear();
1284 void Clip::apply_background(std::shared_ptr<openshot::Frame> frame,
1285 std::shared_ptr<openshot::Frame> background_frame,
1286 bool update_frame_image) {
1288 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1289 QPainter painter(background_canvas.get());
1292 painter.setCompositionMode(
static_cast<QPainter::CompositionMode
>(
composite));
1293 painter.drawImage(0, 0, *frame->GetImage());
1298 if (update_frame_image)
1299 frame->AddImage(background_canvas);
1303 void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
TimelineInfoStruct* options,
bool before_keyframes)
1305 for (
auto effect : effects)
1308 if (effect->info.apply_before_clip && before_keyframes) {
1309 effect->ProcessFrame(frame, frame->number);
1310 }
else if (!effect->info.apply_before_clip && !before_keyframes) {
1311 effect->ProcessFrame(frame, frame->number);
1315 if (
timeline != NULL && options != NULL) {
1324 bool Clip::isNear(
double a,
double b)
1326 return fabs(a - b) < 0.000001;
1330 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1332 if (!frame->has_image_data) {
1338 std::shared_ptr<QImage> source_image = frame->GetImage();
1339 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1340 timeline_size.height(),
1341 QImage::Format_RGBA8888_Premultiplied);
1342 background_canvas->fill(QColor(Qt::transparent));
1345 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1348 QPainter painter(background_canvas.get());
1349 painter.setRenderHint(QPainter::TextAntialiasing,
true);
1350 if (!transform.isIdentity()) {
1351 painter.setRenderHint(QPainter::SmoothPixmapTransform,
true);
1354 painter.setTransform(transform);
1357 painter.setCompositionMode(
static_cast<QPainter::CompositionMode
>(
composite));
1361 if (alpha_value != 1.0f) {
1362 painter.setOpacity(alpha_value);
1363 painter.drawImage(0, 0, *source_image);
1365 painter.setOpacity(1.0);
1367 painter.drawImage(0, 0, *source_image);
1375 std::stringstream frame_number_str;
1382 frame_number_str << frame->number;
1395 painter.setPen(QColor(
"#ffffff"));
1396 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1402 frame->AddImage(background_canvas);
1406 void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1414 std::shared_ptr<QImage> source_image = frame->GetImage();
1418 "frame->number", frame->number,
1419 "Waveform()", Waveform(),
1420 "width", timeline_size.width(),
1421 "height", timeline_size.height());
1430 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue,
alpha);
1431 frame->AddImage(source_image);
1435 QSize Clip::scale_size(QSize source_size,
ScaleType source_scale,
int target_width,
int target_height) {
1436 switch (source_scale)
1439 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1443 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1447 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1456 QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1459 std::shared_ptr<QImage> source_image = frame->GetImage();
1462 QSize source_size = scale_size(source_image->size(),
scale, width, height);
1465 float parentObject_location_x = 0.0;
1466 float parentObject_location_y = 0.0;
1467 float parentObject_scale_x = 1.0;
1468 float parentObject_scale_y = 1.0;
1469 float parentObject_shear_x = 0.0;
1470 float parentObject_shear_y = 0.0;
1471 float parentObject_rotation = 0.0;
1477 long parent_frame_number = frame->number + parent_start_offset;
1480 parentObject_location_x = parentClipObject->
location_x.
GetValue(parent_frame_number);
1481 parentObject_location_y = parentClipObject->
location_y.
GetValue(parent_frame_number);
1482 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(parent_frame_number);
1483 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(parent_frame_number);
1484 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(parent_frame_number);
1485 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(parent_frame_number);
1486 parentObject_rotation = parentClipObject->
rotation.
GetValue(parent_frame_number);
1492 Clip* parentClip = (
Clip*) parentTrackedObject->ParentClip();
1497 long parent_frame_number = frame->number + parent_start_offset;
1500 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1504 parentClip->
scale, width, height);
1507 int trackedWidth = trackedObjectProperties[
"w"] * trackedObjectProperties[
"sx"] * parent_size.width() *
1509 int trackedHeight = trackedObjectProperties[
"h"] * trackedObjectProperties[
"sy"] * parent_size.height() *
1513 source_size = scale_size(source_size,
scale, trackedWidth, trackedHeight);
1516 parentObject_location_x = parentClip->
location_x.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cx"] - 0.5) * parentClip->
scale_x.
GetValue(parent_frame_number));
1517 parentObject_location_y = parentClip->
location_y.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cy"] - 0.5) * parentClip->
scale_y.
GetValue(parent_frame_number));
1518 parentObject_rotation = trackedObjectProperties[
"r"] + parentClip->
rotation.
GetValue(parent_frame_number);
1531 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1532 sx*= parentObject_scale_x;
1533 sy*= parentObject_scale_y;
1536 float scaled_source_width = source_size.width() * sx;
1537 float scaled_source_height = source_size.height() * sy;
1545 x = (width - scaled_source_width) / 2.0;
1548 x = width - scaled_source_width;
1551 y = (height - scaled_source_height) / 2.0;
1554 x = (width - scaled_source_width) / 2.0;
1555 y = (height - scaled_source_height) / 2.0;
1558 x = width - scaled_source_width;
1559 y = (height - scaled_source_height) / 2.0;
1562 y = (height - scaled_source_height);
1565 x = (width - scaled_source_width) / 2.0;
1566 y = (height - scaled_source_height);
1569 x = width - scaled_source_width;
1570 y = (height - scaled_source_height);
1576 "Clip::get_transform (Gravity)",
1577 "frame->number", frame->number,
1578 "source_clip->gravity",
gravity,
1579 "scaled_source_width", scaled_source_width,
1580 "scaled_source_height", scaled_source_height);
1582 QTransform transform;
1588 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1589 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1595 "Clip::get_transform (Build QTransform - if needed)",
1596 "frame->number", frame->number,
1599 "sx", sx,
"sy", sy);
1601 if (!isNear(x, 0) || !isNear(y, 0)) {
1603 transform.translate(x, y);
1605 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1607 float origin_x_offset = (scaled_source_width * origin_x_value);
1608 float origin_y_offset = (scaled_source_height * origin_y_value);
1609 transform.translate(origin_x_offset, origin_y_offset);
1610 transform.rotate(r);
1611 transform.shear(shear_x_value, shear_y_value);
1612 transform.translate(-origin_x_offset,-origin_y_offset);
1615 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1616 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1617 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1618 transform.scale(source_width_scale, source_height_scale);
1625 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1642 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1644 return frame_number;