diff --git a/README.md b/README.md index f1f2b5db13..13d542b9d5 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,8 @@ Final Cut Pro X XML Format: - http://www.amwa.tv/downloads/specifications/aafobjectspec-v1.1.pdf - http://www.amwa.tv/downloads/specifications/aafeditprotocol.pdf +- set ${OTIO_AAF_PYTHON_LIB} to point the location of the PyAAF module. + Contrib Adapters ---------------- diff --git a/contrib/adapters/advanced_authoring_format.py b/contrib/adapters/advanced_authoring_format.py new file mode 100644 index 0000000000..3045eaddf5 --- /dev/null +++ b/contrib/adapters/advanced_authoring_format.py @@ -0,0 +1,462 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""OpenTimelineIO Advanced Authoring Format (AAF) Adapter + +Requires that you set the environment variables: + OTIO_AAF_PYTHON_LIB - should point at the PyAAF module. +""" + +import os +import sys +import re +import opentimelineio as otio + +if os.environ["OTIO_AAF_PYTHON_LIB"] not in sys.path: + sys.path += [os.environ["OTIO_AAF_PYTHON_LIB"]] + +import aaf # noqa (E402 module level import not at top of file) +import aaf.storage # noqa +import aaf.mob # noqa +import aaf.define # noqa +import aaf.component # noqa +import aaf.base # noqa + +debug = False +__names = set() + + +def _unique_name(name): + while name in __names: + m = re.search(r'(\d+)$', name) + if m: + num = int(m.group(1)) + name = re.sub(r'(\d+)$', str(num+1), name) + else: + name = name+" 2" + __names.add(name) + return name + + +def _get_name(item): + if hasattr(item, 'name'): + name = item.name + if name: + return name + if isinstance(item, aaf.component.SourceClip): + return item.resolve_ref().name or _unique_name("Untitled SourceClip") + return _unique_name(_get_class_name(item)) + + +def _get_class_name(item): + if hasattr(item, "class_name"): + return item.class_name + else: + return item.__class__.__name__ + + +def _transcribe_property(prop): + if isinstance(prop, list): + return [_transcribe_property(child) for child in prop] + + elif type(prop) in (str, unicode, int, float, bool): + return prop + + if isinstance(prop, aaf.iterator.PropValueResolveIter): + result = {} + for child in prop: + if isinstance(child, aaf.property.TaggedValue): + result[child.name] = _transcribe_property(child.value) + elif isinstance(child, aaf.mob.MasterMob): + result[child.name] = str(child.mobID) + elif isinstance(child, aaf.mob.SourceMob): + result[child.name] = str(child.mobID) + # elif hasattr(child, "name"): + # result[child.name] = _transcribe(child) + else: + if debug: + print("??? {}".format(child)) + return result + + else: + return str(prop) + + +def _add_child(parent, child, source): + if child is None: + if debug: + print("MISSING CHILD? {}".format(source)) + elif isinstance(child, otio.schema.Marker): + parent.markers.append(child) + else: + parent.append(child) + + +def _transcribe(item, parent=None, editRate=24, masterMobs=None): + result = None + metadata = {} + + metadata["Name"] = _get_name(item) + metadata["ClassName"] = _get_class_name(item) + + if isinstance(item, aaf.component.Component): + metadata["Length"] = item.length + + if isinstance(item, aaf.base.AAFObject): + for prop in item.properties(): + if hasattr(prop, 'name') and hasattr(prop, 'value'): + key = str(prop.name) + value = prop.value + # if isinstance(value, aaf.dictionary.Dictionary): + # ??? + metadata[key] = _transcribe_property(value) + + if False: + pass + + # elif isinstance(item, aaf.storage.File): + # self.extendChildItems([item.header]) + + # elif isinstance(item, aaf.storage.Header): + # self.extendChildItems([item.storage()]) + # self.extendChildItems([item.dictionary()]) + + # elif isinstance(item, DummyItem): + # self.extendChildItems(item.item) + + elif isinstance(item, aaf.storage.ContentStorage): + result = otio.schema.SerializableCollection() + + # Gather all the Master Mobs, so we can find them later by MobID + # when we parse the SourceClips in the composition + if masterMobs is None: + masterMobs = {} + for mob in item.master_mobs(): + child = _transcribe(mob, parent=item) + if child is not None: + mobID = child.metadata.get("AAF", {}).get("MobID") + masterMobs[mobID] = child + + for mob in item.composition_mobs(): + child = _transcribe(mob, parent=item, masterMobs=masterMobs) + _add_child(result, child, mob) + + # for mob in item.GetSourceMobs(): + # source = _transcribe(mob, parent=item, masterMobs=masterMobs) + # result.append(source) + + elif isinstance(item, aaf.mob.Mob): + result = otio.schema.Timeline() + + for slot in item.slots(): + child = _transcribe(slot, parent=item, masterMobs=masterMobs) + _add_child(result.tracks, child, slot) + + # elif isinstance(item, aaf.dictionary.Dictionary): + # l = [] + # l.append(DummyItem(list(item.class_defs()), 'ClassDefs')) + # l.append(DummyItem(list(item.codec_defs()), 'CodecDefs')) + # l.append(DummyItem(list(item.container_defs()), 'ContainerDefs')) + # l.append(DummyItem(list(item.data_defs()), 'DataDefs')) + # l.append(DummyItem(list(item.interpolation_defs()), + # 'InterpolationDefs')) + # l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs')) + # l.append(DummyItem(list(item.operation_defs()), 'OperationDefs')) + # l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs')) + # l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs')) + # l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs')) + # l.append(DummyItem(list(item.type_defs()), 'TypeDefs')) + # self.extendChildItems(l) + # + # elif isinstance(item, aaf.mob.Mob): + # + # self.extendChildItems(list(item.slots())) + # + # elif isinstance(item, aaf.mob.MobSlot): + # self.extendChildItems([item.segment]) + # elif isinstance(item, aaf.component.NestedScope): + # self.extendChildItems(list(item.segments())) + # elif isinstance(item, aaf.component.Sequence): + # self.extendChildItems(list(item.components())) + # + # elif isinstance(item, aaf.component.SourceClip): + # ref = item.resolve_ref() + # name = ref.name + # if name: + # self.extendChildItems([name]) + # + # elif isinstance(item,aaf.component.OperationGroup): + # self.extendChildItems(list(item.input_segments())) + +# elif isinstance(item, pyaaf.AxSelector): +# self.extendChildItems(list(item.EnumAlternateSegments())) +# +# elif isinstance(item, pyaaf.AxScopeReference): +# #print item, item.GetRelativeScope(),item.GetRelativeSlot() +# pass +# +# elif isinstance(item, pyaaf.AxEssenceGroup): +# segments = [] +# +# for i in xrange(item.CountChoices()): +# choice = item.GetChoiceAt(i) +# segments.append(choice) +# self.extendChildItems(segments) +# +# elif isinstance(item, pyaaf.AxProperty): +# self.properties['Value'] = str(item.GetValue()) + # elif isinstance(item, (aaf.base.AAFObject,aaf.define.MetaDef)): + # pass + # + # elif isinstance(item, aaf.component.Component): + # pass + # + # else: + # self.properties['Name'] = str(item) + # self.properties['ClassName'] = str(type(item)) + # return + + elif isinstance(item, aaf.component.SourceClip): + result = otio.schema.Clip() + + length = item.length + startTime = int(metadata.get("StartTime", "0")) + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(startTime, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + mobID = metadata.get("SourceID") + if masterMobs and mobID: + masterMob = masterMobs.get(mobID) + if masterMob: + media = otio.media_reference.MissingReference() + # copy the metadata from the master into the media_reference + media.metadata["AAF"] = masterMob.metadata.get("AAF", {}) + result.media_reference = media + + elif isinstance(item, aaf.component.Transition): + result = otio.schema.Transition() + + # Does AAF support anything else? + result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve + + in_offset = int(metadata.get("CutPoint", "0")) + out_offset = item.length - in_offset + result.in_offset = otio.opentime.RationalTime(in_offset, editRate) + result.out_offset = otio.opentime.RationalTime(out_offset, editRate) + + elif isinstance(item, aaf.component.Filler): + result = otio.schema.Gap() + + length = item.length + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + elif isinstance(item, aaf.component.NestedScope): + # TODO: Is this the right class? + result = otio.schema.Stack() + + for segment in item.segments(): + child = _transcribe(segment, parent=item, masterMobs=masterMobs) + _add_child(result, child, segment) + + elif isinstance(item, aaf.component.Sequence): + result = otio.schema.Track() + + for component in item.components(): + child = _transcribe(component, parent=item, masterMobs=masterMobs) + _add_child(result, child, component) + + elif isinstance(item, aaf.mob.TimelineMobSlot): + result = otio.schema.Track() + + child = _transcribe(item.segment, parent=item, masterMobs=masterMobs) + _add_child(result, child, item.segment) + if child is not None: + child.metadata["AAF"]["MediaKind"] = str(item.segment.media_kind) + + elif isinstance(item, aaf.mob.MobSlot): + result = otio.schema.Track() + + child = _transcribe(item.segment, parent=item, masterMobs=masterMobs) + _add_child(result, child, item.segment) + + elif isinstance(item, aaf.component.Timecode): + pass + elif isinstance(item, aaf.component.Pulldown): + pass + elif isinstance(item, aaf.component.EdgeCode): + pass + elif isinstance(item, aaf.component.ScopeReference): + # TODO: is this like FILLER? + + result = otio.schema.Gap() + + length = item.length + result.source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, editRate), + otio.opentime.RationalTime(length, editRate) + ) + + elif isinstance(item, aaf.component.DescriptiveMarker): + + # TODO: We can get markers this way, but they come in on + # a separate Track. We need to consolidate them onto the + # same track(s) as the Clips. + # result = otio.schema.Marker() + pass + + else: + # result = otio.core.Composition() + if debug: + print("SKIPPING: {}: {} -- {}".format(type(item), item, result)) + + if result is not None: + result.name = str(metadata["Name"]) + if not result.metadata: + result.metadata = {} + result.metadata["AAF"] = metadata + + return result + + +def _fix_transitions(thing): + if isinstance(thing, otio.schema.Timeline): + _fix_transitions(thing.tracks) + elif ( + isinstance(thing, otio.core.Composition) or + isinstance(thing, otio.schema.SerializableCollection) + ): + if isinstance(thing, otio.schema.Track): + for c, child in enumerate(thing): + + # Was the item before us a Transition? + if c > 0 and isinstance( + thing[c-1], + otio.schema.Transition + ): + trans = thing[c-1] + child.source_range.start_time += trans.in_offset + child.source_range.duration -= trans.in_offset + + # Is the item after us a Transition? + if c < len(thing)-1 and isinstance( + thing[c+1], + otio.schema.Transition + ): + after = thing[c+1] + child.source_range.duration -= after.out_offset + + for c, child in enumerate(thing): + _fix_transitions(child) + + +def _simplify(thing): + if isinstance(thing, otio.schema.SerializableCollection): + if len(thing) == 1: + return _simplify(thing[0]) + else: + for c, child in enumerate(thing): + thing[c] = _simplify(child) + return thing + + elif isinstance(thing, otio.schema.Timeline): + result = _simplify(thing.tracks) + + # Only replace the Timeline's stack if the simplified result + # was also a Stack. Otherwise leave it (the contents will have + # been simplified in place). + if isinstance(result, otio.schema.Stack): + thing.tracks = result + + return thing + + elif isinstance(thing, otio.core.Composition): + # simplify our children + for c, child in enumerate(thing): + thing[c] = _simplify(child) + + # remove empty children of Stacks + if isinstance(thing, otio.schema.Stack): + for c in reversed(range(len(thing))): + child = thing[c] + if not _contains_something_valuable(child): + # TODO: We're discarding metadata... should we retain it? + del thing[c] + + # skip redundant containers + if len(thing) == 1: + # TODO: We may be discarding metadata here, should we merge it? + return thing[0] + + return thing + + +def _contains_something_valuable(thing): + if isinstance(thing, otio.core.Item): + if len(thing.effects) > 0 or len(thing.markers) > 0: + return True + + if isinstance(thing, otio.core.Composition): + + if len(thing) == 0: + # NOT valuable because it is empty + return False + + for child in thing: + if _contains_something_valuable(child): + # valuable because this child is valuable + return True + + # none of the children were valuable, so thing is NOT valuable + return False + + if isinstance(thing, otio.schema.Gap): + # TODO: Are there other valuable things we should look for on a Gap? + return False + + # anything else is presumed to be valuable + return True + + +def read_from_file(filepath, simplify=True): + + f = aaf.open(filepath) + + # header = f.header + storage = f.storage + # topLevelMobs = list(storage.toplevel_mobs()) + + __names.clear() + result = _transcribe(storage) + + _fix_transitions(result) + + if simplify: + result = _simplify(result) + + return result diff --git a/contrib/adapters/contrib_adapters.plugin_manifest.json b/contrib/adapters/contrib_adapters.plugin_manifest.json index 968928eb74..28721065e2 100644 --- a/contrib/adapters/contrib_adapters.plugin_manifest.json +++ b/contrib/adapters/contrib_adapters.plugin_manifest.json @@ -35,6 +35,13 @@ "execution_scope" : "in process", "filepath" : "burnins.py", "suffixes" : [] + }, + { + "OTIO_SCHEMA" : "Adapter.1", + "name" : "AAF", + "execution_scope" : "in process", + "filepath" : "advanced_authoring_format.py", + "suffixes" : ["aaf"] } ] } diff --git a/contrib/adapters/tests/sample_data/simple.aaf b/contrib/adapters/tests/sample_data/simple.aaf new file mode 100644 index 0000000000..81a09225c9 Binary files /dev/null and b/contrib/adapters/tests/sample_data/simple.aaf differ diff --git a/contrib/adapters/tests/sample_data/transitions.aaf b/contrib/adapters/tests/sample_data/transitions.aaf new file mode 100644 index 0000000000..0b56704f06 Binary files /dev/null and b/contrib/adapters/tests/sample_data/transitions.aaf differ diff --git a/contrib/adapters/tests/sample_data/trims.aaf b/contrib/adapters/tests/sample_data/trims.aaf new file mode 100644 index 0000000000..e4953dafdf Binary files /dev/null and b/contrib/adapters/tests/sample_data/trims.aaf differ diff --git a/contrib/adapters/tests/test_aaf_adapter.py b/contrib/adapters/tests/test_aaf_adapter.py new file mode 100644 index 0000000000..34939abf34 --- /dev/null +++ b/contrib/adapters/tests/test_aaf_adapter.py @@ -0,0 +1,429 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Test the AAF adapter.""" + +# python +import os +import unittest + +import opentimelineio as otio + +SAMPLE_DATA_DIR = os.path.join(os.path.dirname(__file__), "sample_data") +EXAMPLE_PATH = os.path.join(SAMPLE_DATA_DIR, "simple.aaf") +EXAMPLE_PATH2 = os.path.join(SAMPLE_DATA_DIR, "transitions.aaf") +EXAMPLE_PATH3 = os.path.join(SAMPLE_DATA_DIR, "trims.aaf") + + +@unittest.skipIf( + "OTIO_AAF_PYTHON_LIB" not in os.environ, + "OTIO_AAF_PYTHON_LIB not set, required for the AAF adapter" +) +class AAFAdapterTest(unittest.TestCase): + + def test_aaf_read(self): + aaf_path = EXAMPLE_PATH + timeline = otio.adapters.read_from_file(aaf_path) + self.assertEqual(timeline.name, "OTIO TEST 1.Exported.01") + fps = timeline.duration().rate + self.assertEqual(fps, 24.0) + self.assertEqual( + timeline.duration(), + otio.opentime.from_timecode("00:02:16:18", fps) + ) + + self.assertEqual(len(timeline.tracks), 1) + video_track = timeline.tracks[0] + self.assertEqual(len(video_track), 5) + + clips = list(timeline.each_clip()) + + self.assertEqual( + [clip.name for clip in clips], + [ + "tech.fux (loop)-HD.mp4", + "t-hawk (loop)-HD.mp4", + "out-b (loop)-HD.mp4", + "KOLL-HD.mp4", + "brokchrd (loop)-HD.mp4" + ] + ) + self.maxDiff = None + self.assertEqual( + [clip.source_range for clip in clips], + [ + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:30:00", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:20:00", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:30:02", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:26:16", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:30:00", fps) + ) + ] + ) + + def test_aaf_simplify(self): + aaf_path = EXAMPLE_PATH + timeline = otio.adapters.read_from_file(aaf_path, simplify=True) + self.assertTrue(timeline is not None) + self.assertEqual(type(timeline), otio.schema.Timeline) + self.assertEqual(timeline.name, "OTIO TEST 1.Exported.01") + fps = timeline.duration().rate + self.assertEqual(fps, 24.0) + self.assertEqual( + timeline.duration(), + otio.opentime.from_timecode("00:02:16:18", fps) + ) + self.assertEqual(len(timeline.tracks), 1) + video_track = timeline.tracks[0] + self.assertEqual(len(video_track), 5) + + def test_aaf_no_simplify(self): + aaf_path = EXAMPLE_PATH + collection = otio.adapters.read_from_file(aaf_path, simplify=False) + self.assertTrue(collection is not None) + self.assertEqual(type(collection), otio.schema.SerializableCollection) + self.assertEqual(len(collection), 1) + + timeline = collection[0] + self.assertEqual(timeline.name, "OTIO TEST 1.Exported.01") + fps = timeline.duration().rate + self.assertEqual(fps, 24.0) + self.assertEqual( + timeline.duration(), + otio.opentime.from_timecode("00:02:16:18", fps) + ) + + self.assertEqual(len(timeline.tracks), 12) + + video_track = timeline.tracks[8][0] + self.assertEqual(len(video_track), 5) + + def test_aaf_read_trims(self): + aaf_path = EXAMPLE_PATH3 + timeline = otio.adapters.read_from_file(aaf_path) + self.assertEqual( + timeline.name, + "OTIO TEST 1.Exported.01 - trims.Exported.02" + ) + fps = timeline.duration().rate + self.assertEqual(fps, 24.0) + + self.assertEqual(len(timeline.tracks), 1) + video_track = timeline.tracks[0] + self.assertEqual(len(video_track), 6) + + self.assertEqual( + [type(item) for item in video_track], + [ + otio.schema.Clip, + otio.schema.Clip, + otio.schema.Clip, + otio.schema.Clip, + otio.schema.Gap, + otio.schema.Clip, + ] + ) + + clips = list(video_track.each_clip()) + + self.assertEqual( + [item.name for item in video_track], + [ + "tech.fux (loop)-HD.mp4", + "t-hawk (loop)-HD.mp4", + "out-b (loop)-HD.mp4", + "KOLL-HD.mp4", + "Filler", # Gap + "brokchrd (loop)-HD.mp4" + ] + ) + + self.maxDiff = None + desired_ranges = [ + otio.opentime.TimeRange( + otio.opentime.from_frames(0, fps), + otio.opentime.from_frames(720-0, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(121, fps), + otio.opentime.from_frames(480-121, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(123, fps), + otio.opentime.from_frames(523-123, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(0, fps), + otio.opentime.from_frames(559-0, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(69, fps), + otio.opentime.from_frames(720-69, fps) + ) + ] + for clip, desired in zip(clips, desired_ranges): + actual = clip.source_range + self.assertEqual( + actual, + desired, + "clip '{}' source_range should be {} not {}".format( + clip.name, + desired, + actual + ) + ) + + desired_ranges = [ + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:30:00", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:30:00", fps), + otio.opentime.from_timecode("00:00:14:23", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:00:44:23", fps), + otio.opentime.from_timecode("00:00:16:16", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:01:01:15", fps), + otio.opentime.from_timecode("00:00:23:07", fps) + ), + otio.opentime.TimeRange( # Gap + otio.opentime.from_timecode("00:01:24:22", fps), + otio.opentime.from_timecode("00:00:04:12", fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_timecode("00:01:29:10", fps), + otio.opentime.from_timecode("00:00:27:03", fps) + ) + ] + for item, desired in zip(video_track, desired_ranges): + actual = item.trimmed_range_in_parent() + self.assertEqual( + actual, + desired, + "item '{}' trimmed_range_in_parent should be {} not {}".format( + clip.name, + desired, + actual + ) + ) + + self.assertEqual( + timeline.duration(), + otio.opentime.from_timecode("00:01:56:13", fps) + ) + + def test_aaf_read_transitions(self): + aaf_path = EXAMPLE_PATH2 + timeline = otio.adapters.read_from_file(aaf_path) + self.assertEqual(timeline.name, "OTIO TEST - transitions.Exported.01") + fps = timeline.duration().rate + self.assertEqual(fps, 24.0) + + self.assertEqual(len(timeline.tracks), 1) + video_track = timeline.tracks[0] + self.assertEqual(len(video_track), 12) + + clips = list(timeline.each_clip()) + self.assertEqual(len(clips), 4) + + self.assertEqual( + [type(item) for item in video_track], + [ + otio.schema.Gap, + otio.schema.Transition, + otio.schema.Clip, + otio.schema.Transition, + otio.schema.Clip, + otio.schema.Transition, + otio.schema.Gap, + otio.schema.Transition, + otio.schema.Clip, + otio.schema.Clip, + otio.schema.Transition, + otio.schema.Gap, + ] + ) + + self.assertEqual( + [item.name for item in video_track], + [ + "Filler", + "Transition", + "tech.fux (loop)-HD.mp4", + "Transition 2", + "t-hawk (loop)-HD.mp4", + "Transition 3", + "Filler 2", + "Transition 4", + "KOLL-HD.mp4", + "brokchrd (loop)-HD.mp4", + "Transition 5", + "Filler 3" + ] + ) + + self.maxDiff = None + desired_ranges = [ + otio.opentime.TimeRange( + otio.opentime.from_frames(0, fps), + otio.opentime.from_frames(117, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(123, fps), + otio.opentime.from_frames(200-123, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(55, fps), + otio.opentime.from_frames(199-55, fps) + ), + otio.opentime.TimeRange( + otio.opentime.from_frames(0, fps), + otio.opentime.from_frames(130, fps) + ) + ] + for clip, desired in zip(clips, desired_ranges): + actual = clip.source_range + self.assertEqual( + actual, + desired, + "clip '{}' source_range should be {} not {}".format( + clip.name, + desired, + actual + ) + ) + + desired_ranges = [ + otio.opentime.TimeRange( # Gap + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:00:00", fps) + ), + otio.opentime.TimeRange( # Transition + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:00:12", fps) + ), + otio.opentime.TimeRange( # tech.fux + otio.opentime.from_timecode("00:00:00:00", fps), + otio.opentime.from_timecode("00:00:04:21", fps) + ), + otio.opentime.TimeRange( # Transition + otio.opentime.from_timecode("00:00:02:21", fps), + otio.opentime.from_timecode("00:00:02:00", fps) + ), + otio.opentime.TimeRange( # t-hawk + otio.opentime.from_timecode("00:00:04:21", fps), + otio.opentime.from_timecode("00:00:03:05", fps) + ), + otio.opentime.TimeRange( # Transition + otio.opentime.from_timecode("00:00:07:14", fps), + otio.opentime.from_timecode("00:00:01:00", fps) + ), + otio.opentime.TimeRange( # Gap + otio.opentime.from_timecode("00:00:08:02", fps), + otio.opentime.from_timecode("00:00:02:05", fps) + ), + otio.opentime.TimeRange( # Transition + otio.opentime.from_timecode("00:00:09:07", fps), + otio.opentime.from_timecode("00:00:02:00", fps) + ), + otio.opentime.TimeRange( # KOLL-HD + otio.opentime.from_timecode("00:00:10:07", fps), + otio.opentime.from_timecode("00:00:06:00", fps) + ), + otio.opentime.TimeRange( # brokchrd + otio.opentime.from_timecode("00:00:16:07", fps), + otio.opentime.from_timecode("00:00:05:10", fps) + ), + otio.opentime.TimeRange( # Transition + otio.opentime.from_timecode("00:00:19:17", fps), + otio.opentime.from_timecode("00:00:02:00", fps) + ), + otio.opentime.TimeRange( # Gap + otio.opentime.from_timecode("00:00:21:17", fps), + otio.opentime.from_timecode("00:00:00:00", fps) + ) + ] + for item, desired in zip(video_track, desired_ranges): + actual = item.trimmed_range_in_parent() + self.assertEqual( + actual, + desired, + "item '{}' trimmed_range_in_parent should be {} not {}".format( + clip.name, + desired, + actual + ) + ) + + self.assertEqual( + timeline.duration(), + otio.opentime.from_timecode("00:00:21:17", fps) + ) + + def test_aaf_user_comments(self): + aaf_path = EXAMPLE_PATH3 + timeline = otio.adapters.read_from_file(aaf_path) + self.assertTrue(timeline is not None) + self.assertEqual(type(timeline), otio.schema.Timeline) + self.assertTrue(timeline.metadata.get("AAF") is not None) + correctWords = [ + "test1", + "testing 1 2 3", + u"Eyjafjallaj\xf6kull", + "'s' \"d\" `b`", + None, # Gap + None + ] + for clip, correctWord in zip(timeline.tracks[0], correctWords): + if isinstance(clip, otio.schema.Gap): + continue + AAFmetadata = clip.media_reference.metadata.get("AAF") + self.assertTrue(AAFmetadata is not None) + self.assertTrue(AAFmetadata.get("UserComments") is not None) + self.assertEqual( + AAFmetadata.get("UserComments").get("CustomTest"), + correctWord + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/opentimelineio/adapters/cmx_3600.py b/opentimelineio/adapters/cmx_3600.py index 5557147b19..1cda2e312e 100644 --- a/opentimelineio/adapters/cmx_3600.py +++ b/opentimelineio/adapters/cmx_3600.py @@ -594,118 +594,232 @@ def write_to_string(input_otio, rate=None, style='avid'): # "No audio tracks are currently supported." # ) - lines = [] - - lines.append("TITLE: {}".format(input_otio.name)) # TODO: We should try to detect the frame rate and output an # appropriate "FCM: NON-DROP FRAME" etc here. - lines.append("") - - edit_number = 1 track = input_otio.tracks[0] edl_rate = rate or track.duration().rate - for i, clip in enumerate(track): - source_tc_in = otio.opentime.to_timecode( - clip.source_range.start_time, - edl_rate - ) - source_tc_out = otio.opentime.to_timecode( - clip.source_range.end_time_exclusive(), - edl_rate - ) + kind = "V" if track.kind == otio.schema.TrackKind.Video else "A" + + # Transitions in EDLs are unconventionally represented. + # + # Where a trasition might normally be visualized like: + # A |---------------------|\--| + # B |----\|-------------------| + # |-----|---| + # 57 43 + # + # In an EDL it can be thought of more like this: + # A |---------------|xxxxxxxxx| + # B |\------------------------| + # |---------| + # 100 + + edit_number = 0 + clips_and_events = [] + for prev_child, child, next_child in lookahead_and_behind_enumerate(track): + if isinstance(child, otio.schema.Transition): + continue - range_in_track = track.range_of_child_at_index(i) - record_tc_in = otio.opentime.to_timecode( - range_in_track.start_time, - edl_rate - ) - record_tc_out = otio.opentime.to_timecode( - range_in_track.end_time_exclusive(), - edl_rate - ) + edit_number += 1 - reel = "AX" - name = None - url = None + event = EventLine( + kind=kind, + edit_number=edit_number, + rate=edl_rate + ) + event.source_in = child.source_range.start_time + event.source_out = child.source_range.end_time_exclusive() + range_in_track = child.range_in_parent() + event.record_in = range_in_track.start_time + event.record_out = range_in_track.end_time_exclusive() + + if isinstance(next_child, otio.schema.Transition): + trans = next_child + a_side_event = event + + # Shorten this, the A-side + a_side_event.source_out -= trans.in_offset + a_side_event.record_out -= trans.in_offset + + if isinstance(prev_child, otio.schema.Transition): + a_side, a_side_event = clips_and_events[-1] + b_side, b_side_event = child, event + trans = prev_child + + # Add A-side cut + cut_line = EventLine( + kind=kind, + edit_number=edit_number, + rate=edl_rate + ) + cut_line.reel = a_side_event.reel + cut_line.source_in = a_side_event.source_out + cut_line.source_out = a_side_event.source_out + cut_line.record_in = a_side_event.record_out + cut_line.record_out = a_side_event.record_out + clips_and_events += [(None, cut_line)] - if clip.media_reference: - if isinstance(clip.media_reference, otio.schema.Gap): - reel = "BL" - elif hasattr(clip.media_reference, 'target_url'): - url = clip.media_reference.target_url - else: - url = clip.name + # Lengthen B-side, adding dissolve + b_side_event.source_in -= trans.in_offset + b_side_event.record_in = a_side_event.record_out + b_side_event.dissolve_length = trans.in_offset + trans.out_offset - name = clip.name + event.reel = reel_from_clip(child) - kind = "V" if track.kind == otio.schema.TrackKind.Video else "A" + clips_and_events += [(child, event)] - lines.append( - "{:03d} {:8} {:5} C {} {} {} {}".format( - edit_number, - reel, - kind, - source_tc_in, - source_tc_out, - record_tc_in, - record_tc_out - ) - ) + lines = [] - if name: - # Avid Media Composer outputs two spaces before the - # clip name so we match that. - lines.append("* FROM CLIP NAME: {}".format(name)) - if url and style == 'avid': - lines.append("* FROM CLIP: {}".format(url)) - if url and style == 'nucoda': - lines.append("* FROM FILE: {}".format(url)) - - cdl = clip.metadata.get('cdl') - if cdl: - asc_sop = cdl.get('asc_sop') - asc_sat = cdl.get('asc_sat') - if asc_sop: - lines.append( - "*ASC_SOP ({} {} {}) ({} {} {}) ({} {} {})".format( - asc_sop['slope'][0], - asc_sop['slope'][1], - asc_sop['slope'][2], - asc_sop['offset'][0], - asc_sop['offset'][1], - asc_sop['offset'][2], - asc_sop['power'][0], - asc_sop['power'][1], - asc_sop['power'][2] - )) - if asc_sat: - lines.append("*ASC_SAT {}".format( - asc_sat - )) + if input_otio.name: + lines += ["TITLE: {}".format(input_otio.name), ""] - # Output any markers on this clip - for marker in clip.markers: - timecode = otio.opentime.to_timecode( - marker.marked_range.start_time, - edl_rate + for clip, event in clips_and_events: + lines += [str(event)] + if clip: + lines += generate_comment_lines( + clip, + style=style, + edl_rate=edl_rate ) - color = marker.color - meta = marker.metadata.get("cmx_3600") - if not color and meta and meta.get("color"): - color = meta.get("color").upper() - comment = (marker.name or '').upper() - lines.append("* LOC: {} {:7} {}".format(timecode, color, comment)) + text = "\n".join(lines) + "\n" + return text - # If we are carrying any unhandled CMX 3600 comments on this clip - # then output them blindly. - extra_comments = clip.metadata.get('cmx_3600', {}).get('comments', []) - for comment in extra_comments: - lines.append("* {}".format(comment)) - lines.append("") - edit_number += 1 +def generate_comment_lines(clip, style, edl_rate, from_or_to='FROM'): + lines = [] + url = None + if clip.media_reference: + if hasattr(clip.media_reference, 'target_url'): + url = clip.media_reference.target_url + else: + url = clip.name + + if from_or_to not in ['FROM', 'TO']: + raise otio.exceptions.NotSupportedError( + "The clip FROM or TO value '{}' is not supported.".format( + from_or_to + ) + ) - text = "\n".join(lines) - return text + if clip.name: + # Avid Media Composer outputs two spaces before the + # clip name so we match that. + lines.append("* {from_or_to} CLIP NAME: {name}".format( + from_or_to=from_or_to, + name=clip.name + )) + if url and style == 'avid': + lines.append("* {from_or_to} CLIP: {url}".format( + from_or_to=from_or_to, + url=url + )) + if url and style == 'nucoda': + lines.append("* {from_or_to} FILE: {url}".format( + from_or_to=from_or_to, + url=url + )) + + cdl = clip.metadata.get('cdl') + if cdl: + asc_sop = cdl.get('asc_sop') + asc_sat = cdl.get('asc_sat') + if asc_sop: + lines.append( + "*ASC_SOP ({} {} {}) ({} {} {}) ({} {} {})".format( + asc_sop['slope'][0], + asc_sop['slope'][1], + asc_sop['slope'][2], + asc_sop['offset'][0], + asc_sop['offset'][1], + asc_sop['offset'][2], + asc_sop['power'][0], + asc_sop['power'][1], + asc_sop['power'][2] + )) + if asc_sat: + lines.append("*ASC_SAT {}".format( + asc_sat + )) + + # Output any markers on this clip + for marker in clip.markers: + timecode = otio.opentime.to_timecode( + marker.marked_range.start_time, + edl_rate + ) + + color = marker.color + meta = marker.metadata.get("cmx_3600") + if not color and meta and meta.get("color"): + color = meta.get("color").upper() + comment = (marker.name or '').upper() + lines.append("* LOC: {} {:7} {}".format(timecode, color, comment)) + + # If we are carrying any unhandled CMX 3600 comments on this clip + # then output them blindly. + extra_comments = clip.metadata.get('cmx_3600', {}).get('comments', []) + for comment in extra_comments: + lines.append("* {}".format(comment)) + + return lines + + +def lookahead_and_behind_enumerate(iterable): + prv = None + iterator = iter(iterable) + cur = next(iterator) + for nxt in iterator: + yield (prv, cur, nxt) + prv, cur = cur, nxt + yield (prv, cur, None) + + +def reel_from_clip(clip): + if ( + clip.media_reference + and isinstance(clip.media_reference, otio.schema.Gap) + ): + return 'BL' + elif clip.metadata.get('cmx_3600', {}).get('reel'): + return clip.metadata.get('cmx_3600').get('reel') + return 'AX' + + +class EventLine(object): + def __init__(self, edit_number=0, reel='AX', kind='V', rate=None): + self.__rate = rate + + self.edit_number = edit_number + self.reel = reel + self.kind = kind + + self.source_in = otio.opentime.RationalTime(0.0, rate=rate) + self.source_out = otio.opentime.RationalTime(0.0, rate=rate) + self.record_in = otio.opentime.RationalTime(0.0, rate=rate) + self.record_out = otio.opentime.RationalTime(0.0, rate=rate) + + self.dissolve_length = otio.opentime.RationalTime(0.0, rate) + + def __str__(self): + ser = { + 'edit': self.edit_number, + 'reel': self.reel, + 'kind': self.kind, + 'src_in': otio.opentime.to_timecode(self.source_in, self.__rate), + 'src_out': otio.opentime.to_timecode(self.source_out, self.__rate), + 'rec_in': otio.opentime.to_timecode(self.record_in, self.__rate), + 'rec_out': otio.opentime.to_timecode(self.record_out, self.__rate), + 'diss': int(otio.opentime.to_frames( + self.dissolve_length, + self.__rate + )), + } + + if self.dissolve_length.value > 0: + return "{edit:03d} {reel:8} {kind:5} D {diss:03d} " \ + "{src_in} {src_out} {rec_in} {rec_out}".format(**ser) + else: + return "{edit:03d} {reel:8} {kind:5} C " \ + "{src_in} {src_out} {rec_in} {rec_out}".format(**ser) diff --git a/opentimelineio/algorithms/__init__.py b/opentimelineio/algorithms/__init__.py index ca70119dd2..16fa21a11d 100644 --- a/opentimelineio/algorithms/__init__.py +++ b/opentimelineio/algorithms/__init__.py @@ -26,5 +26,10 @@ # flake8: noqa from .track_algo import ( + track_trimmed_to_range, track_with_expanded_transitions ) + +from .stack_algo import ( + flatten_stack +) \ No newline at end of file diff --git a/opentimelineio/algorithms/stack_algo.py b/opentimelineio/algorithms/stack_algo.py new file mode 100644 index 0000000000..9b2a383089 --- /dev/null +++ b/opentimelineio/algorithms/stack_algo.py @@ -0,0 +1,67 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +__doc__ = """ Algorithms for stack objects. """ + +import copy + +from .. import ( + schema +) +from . import ( + track_algo +) + + +def flatten_stack(in_stack): + """ Flatten a Stack into a single Track. + """ + + if not isinstance(in_stack, schema.Stack): + raise ValueError("Input to flatten_stack must be a Stack") + + flat_track = schema.Track() + flat_track.name = "Flattened" + + def _get_next_item(in_stack, track_index=0, trim_range=None): + if track_index < len(in_stack): + track = in_stack[track_index] + if trim_range is not None: + track = track_algo.track_trimmed_to_range(track, trim_range) + for item in track: + if item.visible(): + yield item + else: + # TODO: in the range... + for more in _get_next_item( + in_stack, + track_index+1, + item.range_in_parent() + ): + yield more + + for item in _get_next_item(in_stack): + flat_track.append(copy.deepcopy(item)) + + return flat_track diff --git a/opentimelineio/algorithms/track_algo.py b/opentimelineio/algorithms/track_algo.py index 1c1d627328..d4af787d56 100644 --- a/opentimelineio/algorithms/track_algo.py +++ b/opentimelineio/algorithms/track_algo.py @@ -32,7 +32,45 @@ ) -def track_with_expanded_transitions(in_seq): +def track_trimmed_to_range(in_track, trim_range): + """Returns a new track that is a copy of the in_track, but with items + outside the trim_range removed and items on the ends trimmed to the + trim_range. Note that the track is never expanded, only shortened. + Please note that you could do nearly the same thing non-destructively by + just setting the Track's source_range but sometimes you want to really cut + away the stuff outside and that's what this function is meant for.""" + new_track = copy.deepcopy(in_track) + + # iterate backwards so we can delete items + for c, child in reversed(list(enumerate(new_track))): + child_range = child.range_in_parent() + if not trim_range.overlaps(child_range): + # completely outside the trim range, so we discard it + del new_track[c] + elif trim_range.contains(child_range): + # completely contained, keep the whole thing + pass + else: + # we need to clip the end(s) + child_source_range = child.trimmed_range() + + # should we trim the start? + if trim_range.start_time > child_range.start_time: + trim_amount = trim_range.start_time - child_range.start_time + child_source_range.start_time += trim_amount + child_source_range.duration -= trim_amount + + # should we trim the end? + trim_end = trim_range.end_time_exclusive() + child_end = child_range.end_time_exclusive() + if trim_end < child_end: + trim_amount = child_end - trim_end + child_source_range.duration -= trim_amount + + return new_track + + +def track_with_expanded_transitions(in_track): """Expands transitions such that neighboring clips are trimmed into regions of overlap. @@ -48,14 +86,14 @@ def track_with_expanded_transitions(in_seq): result_track = [] - seq_iter = iter(in_seq) + seq_iter = iter(in_track) prev_thing = None thing = next(seq_iter, None) next_thing = next(seq_iter, None) while thing is not None: if isinstance(thing, schema.Transition): - result_track.append(_expand_transition(thing, in_seq)) + result_track.append(_expand_transition(thing, in_track)) else: # not a transition, but might be trimmed by one before or after # in the track diff --git a/opentimelineio/schema/timeline.py b/opentimelineio/schema/timeline.py index 269adbd231..81bedef34f 100644 --- a/opentimelineio/schema/timeline.py +++ b/opentimelineio/schema/timeline.py @@ -81,7 +81,7 @@ def __repr__(self): def each_child( self, search_range=None, - descended_from_type=core.Composition + descended_from_type=core.Composable ): return self.tracks.each_child(search_range, descended_from_type) diff --git a/tests/test_cmx_3600_adapter.py b/tests/test_cmx_3600_adapter.py index 849f017761..36e2a85596 100755 --- a/tests/test_cmx_3600_adapter.py +++ b/tests/test_cmx_3600_adapter.py @@ -372,17 +372,153 @@ def test_nucoda_edl_write(self): adapter_name='cmx_3600', style='nucoda' ) + + expected = \ + 'TITLE: test_nucoda_timeline\n\n' \ + '001 AX V C ' \ + '00:00:00:00 00:00:00:05 00:00:00:00 00:00:00:05\n' \ + '* FROM CLIP NAME: test clip1\n' \ + '* FROM FILE: S:\\var\\tmp\\test.exr\n' \ + '002 AX V C ' \ + '00:00:00:00 00:00:00:05 00:00:00:05 00:00:00:10\n' \ + '* FROM CLIP NAME: test clip2\n' \ + '* FROM FILE: S:\\var\\tmp\\test.exr\n' + + self.assertEqual(result, expected) + + def test_nucoda_edl_write_with_transition(self): + track = otio.schema.Track() + tl = otio.schema.Timeline( + "CrossDissolve_Day-Night_Long_1 from CZuber", + tracks=[track] + ) + + cl = otio.schema.Clip( + metadata={'cmx_3600': {'reel': 'Reel1'}}, + source_range=otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(131.0, 24.0), + duration=otio.opentime.RationalTime(102.0, 24.0) + ) + ) + trans = otio.schema.Transition( + in_offset=otio.opentime.RationalTime(57.0, 24.0), + out_offset=otio.opentime.RationalTime(43.0, 24.0) + ) + cl2 = otio.schema.Clip( + metadata={'cmx_3600': {'reel': 'Reel2'}}, + source_range=otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(280.0, 24.0), + duration=otio.opentime.RationalTime(143.0, 24.0) + ) + ) + cl3 = otio.schema.Clip( + metadata={'cmx_3600': {'reel': 'Reel3'}}, + source_range=otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0.0, 24.0), + duration=otio.opentime.RationalTime(24.0, 24.0) + ) + ) + tl.tracks[0].extend([cl, trans, cl2, cl3]) + + result = otio.adapters.write_to_string( + tl, + adapter_name='cmx_3600', + style='nucoda' + ) + + expected = \ + 'TITLE: CrossDissolve_Day-Night_Long_1 from CZuber\n\n' \ + '001 Reel1 V C 00:00:05:11 00:00:07:08 ' \ + '00:00:00:00 00:00:01:21\n' \ + '002 Reel1 V C 00:00:07:08 00:00:07:08 ' \ + '00:00:01:21 00:00:01:21\n' \ + '002 Reel2 V D 100 00:00:09:07 00:00:17:15 ' \ + '00:00:01:21 00:00:10:05\n' \ + '003 Reel3 V C 00:00:00:00 00:00:01:00 ' \ + '00:00:10:05 00:00:11:05\n' + + self.assertEqual(result, expected) + + def test_nucoda_edl_write_with_double_transition(self): + track = otio.schema.Track() + tl = otio.schema.Timeline(tracks=[track]) + + cl = otio.schema.Clip( + metadata={'cmx_3600': {'reel': 'Reel1'}}, + source_range=otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(24.0, 24.0), + duration=otio.opentime.RationalTime(24.0, 24.0) + ) + ) + trans = otio.schema.Transition( + in_offset=otio.opentime.RationalTime(6.0, 24.0), + out_offset=otio.opentime.RationalTime(6.0, 24.0) + ) + cl2 = otio.schema.Clip( + metadata={'cmx_3600': {'reel': 'Reel2'}}, + source_range=otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(24.0, 24.0), + duration=otio.opentime.RationalTime(24.0, 24.0) + ) + ) + trans2 = otio.schema.Transition( + in_offset=otio.opentime.RationalTime(6.0, 24.0), + out_offset=otio.opentime.RationalTime(6.0, 24.0) + ) + cl3 = otio.schema.Clip( + metadata={'cmx_3600': {'reel': 'Reel3'}}, + source_range=otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(24.0, 24.0), + duration=otio.opentime.RationalTime(24.0, 24.0) + ) + ) + tl.tracks[0].extend([cl, trans, cl2, trans2, cl3]) + + result = otio.adapters.write_to_string( + tl, + adapter_name='cmx_3600', + style='nucoda' + ) + + expected = \ + '001 Reel1 V C ' \ + '00:00:01:00 00:00:01:18 00:00:00:00 00:00:00:18\n' \ + '002 Reel1 V C ' \ + '00:00:01:18 00:00:01:18 00:00:00:18 00:00:00:18\n' \ + '002 Reel2 V D 012 ' \ + '00:00:00:18 00:00:01:18 00:00:00:18 00:00:01:18\n' \ + '003 Reel2 V C ' \ + '00:00:01:18 00:00:01:18 00:00:01:18 00:00:01:18\n' \ + '003 Reel3 V D 012 ' \ + '00:00:00:18 00:00:02:00 00:00:01:18 00:00:03:00\n' + + self.assertEqual(result, expected) + + def test_custom_reel_names(self): + track = otio.schema.Track() + tl = otio.schema.Timeline(tracks=[track]) + tr = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(1.0, 24.0), + duration=otio.opentime.RationalTime(24.0, 24.0) + ) + cl = otio.schema.Clip( + source_range=tr + ) + cl.metadata['cmx_3600'] = { + 'reel': 'v330_21f' + } + tl.tracks[0].append(cl) + + result = otio.adapters.write_to_string( + tl, + adapter_name='cmx_3600', + style='nucoda' + ) + self.assertEqual( result, - 'TITLE: test_nucoda_timeline\n\n' - '001 AX V C ' - '00:00:00:00 00:00:00:05 00:00:00:00 00:00:00:05\n' - '* FROM CLIP NAME: test clip1\n' - '* FROM FILE: S:\\var\\tmp\\test.exr\n\n' - '002 AX V C ' - '00:00:00:00 00:00:00:05 00:00:00:05 00:00:00:10\n' - '* FROM CLIP NAME: test clip2\n' - '* FROM FILE: S:\\var\\tmp\\test.exr\n' + '001 v330_21f V C ' + '00:00:00:01 00:00:01:01 00:00:00:00 00:00:01:00\n' ) def test_mixed_avid_nucoda_read_raises_exception(self): @@ -409,5 +545,22 @@ def test_invalid_edl_style_raises_exception(self): ) +class UtilsTest(unittest.TestCase): + + def test_lookahead_and_behind_enumerate(self): + result = [] + for window in cmx_3600.lookahead_and_behind_enumerate(range(0,6)): + result.append(window) + + expected = [(None, 0, 1), + (0, 1, 2), + (1, 2, 3), + (2, 3, 4), + (3, 4, 5), + (4, 5, None)] + + self.assertEqual(result, expected) + + if __name__ == '__main__': unittest.main() diff --git a/tests/test_composition.py b/tests/test_composition.py index a3c6f35c66..de5d6a478b 100755 --- a/tests/test_composition.py +++ b/tests/test_composition.py @@ -60,6 +60,70 @@ def test_parent_manip(self): co = otio.core.Composition(children=[it]) self.assertEqual(it._parent, co) + def test_each_child_recursion(self): + tl = otio.schema.Timeline(name="TL") + + tr1 = otio.schema.Track(name="tr1") + tl.tracks.append(tr1) + c1 = otio.schema.Clip(name="c1") + tr1.append(c1) + c2 = otio.schema.Clip(name="c2") + tr1.append(c2) + c3 = otio.schema.Clip(name="c3") + tr1.append(c3) + + tr2 = otio.schema.Track(name="tr2") + tl.tracks.append(tr2) + c4 = otio.schema.Clip(name="c4") + tr2.append(c4) + c5 = otio.schema.Clip(name="c5") + tr2.append(c5) + + st = otio.schema.Stack(name="st") + tr2.append(st) + c6 = otio.schema.Clip(name="c6") + st.append(c6) + tr3 = otio.schema.Track(name="tr3") + c7 = otio.schema.Clip(name="c7") + tr3.append(c7) + c8 = otio.schema.Clip(name="c8") + tr3.append(c8) + st.append(tr3) + + self.assertEqual(2, len(tl.tracks)) + self.assertEqual(3, len(tr1)) + self.assertEqual(3, len(tr2)) + self.assertEqual(2, len(st)) + self.assertEqual(2, len(tr3)) + + clips = list(tl.each_clip()) + self.assertEqual( + [c1, c2, c3, c4, c5, c6, c7, c8], + clips + ) + + all_tracks = list(tl.each_child( + descended_from_type=otio.schema.Track + )) + self.assertEqual( + [tr1, tr2, tr3], + all_tracks + ) + + all_stacks = list(tl.each_child( + descended_from_type=otio.schema.Stack + )) + self.assertEqual( + [st], + all_stacks + ) + + all_children = list(tl.each_child()) + self.assertEqual( + [tr1, c1, c2, c3, tr2, c4, c5, st, c6, tr3, c7, c8], + all_children + ) + class StackTest(unittest.TestCase): diff --git a/tests/test_stack_algo.py b/tests/test_stack_algo.py new file mode 100644 index 0000000000..08188d0257 --- /dev/null +++ b/tests/test_stack_algo.py @@ -0,0 +1,394 @@ +# +# Copyright 2017 Pixar Animation Studios +# +# Licensed under the Apache License, Version 2.0 (the "Apache License") +# with the following modification; you may not use this file except in +# compliance with the Apache License and the following modification to it: +# Section 6. Trademarks. is deleted and replaced with: +# +# 6. Trademarks. This License does not grant permission to use the trade +# names, trademarks, service marks, or product names of the Licensor +# and its affiliates, except as required to comply with Section 4(c) of +# the License and to reproduce the content of the NOTICE file. +# +# You may obtain a copy of the Apache License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the Apache License with the above modification is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the Apache License for the specific +# language governing permissions and limitations under the Apache License. +# + +"""Test file for the stack algorithms library.""" + +import unittest +import opentimelineio as otio + + +class StackAlgoTests(unittest.TestCase): + """ test harness for stack algo functions """ + + def setUp(self): + self.trackZ = otio.adapters.read_from_string(""" + { + "OTIO_SCHEMA": "Track.1", + "children": [ + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "Z", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 150 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + } + ], + "effects": [], + "kind": "Video", + "markers": [], + "metadata": {}, + "name": "Sequence1", + "source_range": null + } + """, "otio_json") + + self.trackABC = otio.adapters.read_from_string(""" + { + "OTIO_SCHEMA": "Track.1", + "children": [ + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "A", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "B", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "C", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + } + ], + "effects": [], + "kind": "Video", + "markers": [], + "metadata": {}, + "name": "Sequence1", + "source_range": null + } + """, "otio_json") + + self.trackDgE = otio.adapters.read_from_string(""" + { + "OTIO_SCHEMA": "Track.1", + "children": [ + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "D", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Gap.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "g", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "E", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + } + ], + "effects": [], + "kind": "Video", + "markers": [], + "metadata": {}, + "name": "Sequence1", + "source_range": null + } + """, "otio_json") + + self.trackgFg = otio.adapters.read_from_string(""" + { + "OTIO_SCHEMA": "Track.1", + "children": [ + { + "OTIO_SCHEMA": "Gap.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "g1", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "F", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Gap.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "g2", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + } + ], + "effects": [], + "kind": "Video", + "markers": [], + "metadata": {}, + "name": "Sequence1", + "source_range": null + } + """, "otio_json") + + def test_flatten_single_track(self): + stack = otio.schema.Stack(children=[ + self.trackABC + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual( + flat_track[:], + self.trackABC[:] + ) + + def test_flatten_obscured_track(self): + stack = otio.schema.Stack(children=[ + self.trackABC, + self.trackZ + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual( + flat_track[:], + self.trackABC[:] + ) + + stack = otio.schema.Stack(children=[ + self.trackZ, + self.trackABC + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual( + flat_track[:], + self.trackZ[:] + ) + + # def assertOTIOSame(self, a, b): + # def j(o): + # return otio.adapters.write_to_string(o, "otio_json") + # self.assertEqual(j(a), j(b)) + + def test_flatten_gaps(self): + stack = otio.schema.Stack(children=[ + self.trackDgE, + self.trackABC + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual(flat_track[0], self.trackDgE[0]) + self.assertEqual(flat_track[1], self.trackABC[1]) + self.assertEqual(flat_track[2], self.trackDgE[2]) + + stack = otio.schema.Stack(children=[ + self.trackgFg, + self.trackABC + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual(flat_track[0], self.trackABC[0]) + self.assertEqual(flat_track[1], self.trackgFg[1]) + self.assertEqual(flat_track[2], self.trackABC[2]) + + def test_flatten_gaps_with_trims(self): + stack = otio.schema.Stack(children=[ + self.trackDgE, + self.trackZ + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual(flat_track[0], self.trackDgE[0]) + self.assertEqual(flat_track[1].name, "Z") + self.assertEqual( + flat_track[1].source_range, + otio.opentime.TimeRange( + otio.opentime.RationalTime(50, 24), + otio.opentime.RationalTime(50, 24) + ) + ) + self.assertEqual(flat_track[2], self.trackDgE[2]) + + stack = otio.schema.Stack(children=[ + self.trackgFg, + self.trackZ + ]) + flat_track = otio.algorithms.flatten_stack(stack) + self.assertEqual(flat_track[0].name, "Z") + self.assertEqual( + flat_track[0].source_range, + otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(50, 24) + ) + ) + self.assertEqual(flat_track[1], self.trackgFg[1]) + self.assertEqual(flat_track[2].name, "Z") + self.assertEqual( + flat_track[2].source_range, + otio.opentime.TimeRange( + otio.opentime.RationalTime(100, 24), + otio.opentime.RationalTime(50, 24) + ) + ) diff --git a/tests/test_sequence_algo.py b/tests/test_track_algo.py similarity index 53% rename from tests/test_sequence_algo.py rename to tests/test_track_algo.py index 88e28ce7c4..7110e5651d 100644 --- a/tests/test_sequence_algo.py +++ b/tests/test_track_algo.py @@ -235,3 +235,181 @@ def DISABLED_test_expand_track(self): seq[-1].source_range, expanded_seq[-1].source_range ) + + +class TrackTrimmingTests(unittest.TestCase): + """ test harness for track trimming function """ + + def make_sample_track(self): + return otio.adapters.read_from_string(""" + { + "OTIO_SCHEMA": "Track.1", + "children": [ + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "A", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "B", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + }, + { + "OTIO_SCHEMA": "Clip.1", + "effects": [], + "markers": [], + "media_reference": null, + "metadata": {}, + "name": "C", + "source_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 50 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 0.0 + } + } + } + ], + "effects": [], + "kind": "Video", + "markers": [], + "metadata": {}, + "name": "Sequence1", + "source_range": null + } + """, "otio_json") + + def test_trim_to_existing_range(self): + original_track = self.make_sample_track() + self.assertEqual( + original_track.trimmed_range(), + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0, 24), + duration=otio.opentime.RationalTime(150, 24) + ) + ) + + # trim to the exact range it already has + trimmed = otio.algorithms.track_trimmed_to_range( + original_track, + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0, 24), + duration=otio.opentime.RationalTime(150, 24) + ) + ) + # it shouldn't have changed at all + self.assertEqual(original_track, trimmed) + + def test_trim_to_longer_range(self): + original_track = self.make_sample_track() + # trim to a larger range + trimmed = otio.algorithms.track_trimmed_to_range( + original_track, + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(-10, 24), + duration=otio.opentime.RationalTime(160, 24) + ) + ) + # it shouldn't have changed at all + self.assertEqual(original_track, trimmed) + + def test_trim_front(self): + original_track = self.make_sample_track() + # trim off the front (clip A and part of B) + trimmed = otio.algorithms.track_trimmed_to_range( + original_track, + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(60, 24), + duration=otio.opentime.RationalTime(90, 24) + ) + ) + self.assertNotEqual(original_track, trimmed) + self.assertEqual(len(trimmed), 2) + self.assertEqual( + trimmed.trimmed_range(), + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0, 24), + duration=otio.opentime.RationalTime(90, 24) + ) + ) + # did clip B get trimmed? + self.assertEqual(trimmed[0].name, "B") + self.assertEqual( + trimmed[0].trimmed_range(), + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(10, 24), + duration=otio.opentime.RationalTime(40, 24) + ) + ) + # clip C should have been left alone + self.assertEqual(trimmed[1], original_track[2]) + + def test_trim_end(self): + original_track = self.make_sample_track() + # trim off the end (clip C and part of B) + trimmed = otio.algorithms.track_trimmed_to_range( + original_track, + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0, 24), + duration=otio.opentime.RationalTime(90, 24) + ) + ) + self.assertNotEqual(original_track, trimmed) + self.assertEqual(len(trimmed), 2) + self.assertEqual( + trimmed.trimmed_range(), + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0, 24), + duration=otio.opentime.RationalTime(90, 24) + ) + ) + # clip A should have been left alone + self.assertEqual(trimmed[0], original_track[0]) + # did clip B get trimmed? + self.assertEqual(trimmed[1].name, "B") + self.assertEqual( + trimmed[1].trimmed_range(), + otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime(0, 24), + duration=otio.opentime.RationalTime(40, 24) + ) + )