Refacto of the scene cut detection, make it generic and add another algorythm as an option
parent
de16df05ef
commit
425c842bfd
104
auto_splitter.py
104
auto_splitter.py
|
@ -1,104 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
import bpy
|
||||
|
||||
from vse_toolbox import bl_utils
|
||||
|
||||
|
||||
def launch_split(movie_strip, threshold, frame_start=None, frame_end=None):
|
||||
"""Launch ffmpeg command to detect changing frames from a movie strip.
|
||||
|
||||
Args:
|
||||
movie_strip (bpy.types.Sequence): blender sequence strip to detect changes.
|
||||
threshold (float): value of the detection factor (from 0 to 1).
|
||||
frame_start (int, optional): first frame to detect.
|
||||
Defaults to None.
|
||||
frame_end (int, optional): last frame to detect.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: ffmpeg command log.
|
||||
"""
|
||||
|
||||
path = bl_utils.abspath(movie_strip.filepath)
|
||||
fps = bpy.context.scene.render.fps
|
||||
|
||||
if frame_start is None:
|
||||
frame_start = 0
|
||||
if frame_end is None:
|
||||
frame_end = movie_strip.frame_duration
|
||||
|
||||
frame_start = frame_start - movie_strip.frame_start
|
||||
|
||||
#frame_start += movie_strip.frame_offset_start
|
||||
#frame_end -= movie_strip.frame_offset_end
|
||||
|
||||
# Launch ffmpeg command to split
|
||||
ffmpeg_cmd = get_command(str(path), threshold, frame_start, frame_end, fps)
|
||||
|
||||
print(ffmpeg_cmd)
|
||||
process = subprocess.Popen(
|
||||
ffmpeg_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
|
||||
return process
|
||||
|
||||
|
||||
def get_command(path, threshold, frame_start, frame_end, fps):
|
||||
"""Generate the ffmpeg command which detect change from a movie.
|
||||
|
||||
Args:
|
||||
path (_type_): path to detect changes.
|
||||
threshold (_type_): value of the detection factor (from 0 to 1).
|
||||
frame_start (_type_): first frame to detect.
|
||||
frame_end (_type_): last frame to detect.
|
||||
fps (_type_): framerate of the movie.
|
||||
|
||||
Returns:
|
||||
list: ffmpeg command as list for subprocess module.
|
||||
"""
|
||||
|
||||
start_time = frame_start/fps
|
||||
end_time = frame_end/fps
|
||||
|
||||
return [
|
||||
'ffmpeg',
|
||||
'-i',
|
||||
str(path),
|
||||
'-vf',
|
||||
f"trim=start={start_time}:end={end_time}, select='gt(scene, {threshold})',showinfo",
|
||||
'-f',
|
||||
'null',
|
||||
'-'
|
||||
]
|
||||
|
||||
|
||||
def get_split_time(log, as_frame=True, fps=None):
|
||||
"""Parse ffmpeg command lines to detect the timecode
|
||||
|
||||
Args:
|
||||
log (str): log to parse.
|
||||
as_frame (bool, optional): if wanted the timecode as frame number.
|
||||
Defaults to True.
|
||||
fps (_type_, optional): framerate of the movie (mandatory if as_frame used).
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
_type_: _description_
|
||||
"""
|
||||
timecodes = re.findall(r'pts_time:([\d.]+)', log)
|
||||
|
||||
if not timecodes:
|
||||
return
|
||||
|
||||
timecode = timecodes[0]
|
||||
|
||||
if as_frame:
|
||||
# convert timecode to frame number
|
||||
return round(float(timecode) * fps)
|
||||
|
||||
return timecode
|
|
@ -460,16 +460,25 @@ class VSETB_OT_import_shots(Operator):
|
|||
|
||||
conformed = False
|
||||
|
||||
scn.sequence_editor_clear()
|
||||
scn.sequence_editor_create()
|
||||
if import_shots.clear:
|
||||
frame_index = 1
|
||||
scn.sequence_editor_clear()
|
||||
scn.sequence_editor_create()
|
||||
else:
|
||||
frame_index = scn.frame_end +1
|
||||
|
||||
self.set_sequencer_channels([t.name for t in task_types])
|
||||
frame_index = 1
|
||||
|
||||
|
||||
for sequence in sequences:
|
||||
shots_data = tracker.get_shots(sequence=sequence.id)
|
||||
sequence_start = frame_index
|
||||
for shot_data in shots_data:
|
||||
frames = int(shot_data['nb_frames'])
|
||||
frames = shot_data['nb_frames']
|
||||
if not frames:
|
||||
frames = 100
|
||||
print(f'No nb frames on tracker for {shot_data["name"]}')
|
||||
frames = int(frames)
|
||||
frame_end = frame_index + frames
|
||||
|
||||
strip = scn.sequence_editor.sequences.new_effect(
|
||||
|
@ -497,9 +506,13 @@ class VSETB_OT_import_shots(Operator):
|
|||
|
||||
print(f'Loading Preview from {preview}')
|
||||
channel_index = get_channel_index(f'{task_type.name} Video')
|
||||
video_clip = import_movie(preview, frame_start=frame_index, frame_end=frame_end)
|
||||
video_clip = import_movie(preview, frame_start=frame_index)
|
||||
video_clip.channel = channel_index
|
||||
|
||||
if strip.frame_final_end != video_clip.frame_final_end: # Conform shot duration to longest media
|
||||
frames = video_clip.frame_final_duration
|
||||
strip.frame_final_end = video_clip.frame_final_end
|
||||
|
||||
if video_clip.frame_offset_end:
|
||||
video_clip.color_tag = 'COLOR_01'
|
||||
|
||||
|
@ -511,8 +524,7 @@ class VSETB_OT_import_shots(Operator):
|
|||
|
||||
# Load Audio
|
||||
channel_index = get_channel_index(f'{task_type.name} Audio')
|
||||
audio_clip = import_sound(preview, frame_start=frame_index,
|
||||
frame_end=frame_end)
|
||||
audio_clip = import_sound(preview, frame_start=frame_index)
|
||||
audio_clip.channel = channel_index
|
||||
if video_clip.frame_offset_end:
|
||||
audio_clip.color_tag = 'COLOR_01'
|
||||
|
@ -632,6 +644,8 @@ class VSETB_OT_import_shots(Operator):
|
|||
row = split.row()
|
||||
row.label(icon="ERROR")
|
||||
row.label(text='Add at least one Sequence')
|
||||
|
||||
layout.prop(import_shots, 'clear', text='Clear')
|
||||
|
||||
def invoke(self, context, event):
|
||||
scn = context.scene
|
||||
|
|
|
@ -11,7 +11,7 @@ from vse_toolbox.sequencer_utils import (get_strips, rename_strips, set_channels
|
|||
get_channel_index, new_text_strip, get_strip_at, get_channel_name,
|
||||
create_shot_strip)
|
||||
|
||||
from vse_toolbox import auto_splitter
|
||||
from vse_toolbox.scene_cut_detection import detect_scene_change
|
||||
from vse_toolbox.bl_utils import get_scene_settings, get_strip_settings, get_addon_prefs
|
||||
from shutil import copy2
|
||||
|
||||
|
@ -176,30 +176,47 @@ class VSETB_OT_set_sequencer(Operator):
|
|||
return {"FINISHED"}
|
||||
|
||||
|
||||
class VSETB_OT_auto_split(Operator):
|
||||
class VSETB_OT_scene_cut_detection(Operator):
|
||||
"""Launch subprocess with ffmpeg and python to find and create each
|
||||
shots strips from video source"""
|
||||
|
||||
bl_idname = "vse_toolbox.auto_split"
|
||||
bl_label = "Auto Split"
|
||||
bl_description = "Generate shots strips"
|
||||
bl_idname = "vse_toolbox.scene_cut_detection"
|
||||
bl_label = "Scene Cut Detection"
|
||||
bl_description = "Detect scene change and create strips on the destination channel, use crop to restric detection"
|
||||
bl_options = {"REGISTER", "UNDO"}
|
||||
|
||||
threshold: FloatProperty(name="Threshold", default=0.6, min=0, max=1)
|
||||
frame_first: IntProperty(name='Start Split')
|
||||
frame_last: IntProperty(name='End Split')
|
||||
movie_channel_name: EnumProperty(
|
||||
animated_threshold: FloatProperty(name="Threshold", default=0.5, min=0, max=1,
|
||||
description='Probability for the current frame to introduce a new scene')
|
||||
still_threshold: FloatProperty(name="Threshold", default=0.001, min=0, max=1, precision=4,
|
||||
description="Noise tolerance, difference ratio between 0 and 1")
|
||||
frame_start: IntProperty(name='Start Split')
|
||||
frame_end: IntProperty(name='End Split')
|
||||
source_channel_name: EnumProperty(
|
||||
items=lambda self, ctx: ((c.name, c.name, '') for c in ctx.scene.sequence_editor.channels),
|
||||
name='Movie Channel')
|
||||
name='Source Channel')
|
||||
destination_channel_name: EnumProperty(
|
||||
items=lambda self, ctx: ((c.name, c.name, '') for c in ctx.scene.sequence_editor.channels),
|
||||
name='Destination Channel')
|
||||
movie_type: EnumProperty(
|
||||
items=[('ANIMATED', 'Animated', 'Use select filter from ffmpeg, best for animated frame'),
|
||||
('STILL', 'Still', 'Use freezedetect filter from ffmpeg, best for board or text')],
|
||||
name='Movie Type')
|
||||
|
||||
def invoke(self, context, event):
|
||||
|
||||
self.frame_first = context.scene.frame_start
|
||||
self.frame_last = context.scene.frame_end
|
||||
# Select active channel by default
|
||||
if strip := context.active_sequence_strip:
|
||||
self.source_channel_name = get_channel_name(strip)
|
||||
|
||||
if context.scene.sequence_editor.channels.get('Shots'):
|
||||
self.destination_channel_name = 'Shots'
|
||||
|
||||
self.frame_start = context.scene.frame_start
|
||||
self.frame_end = context.scene.frame_end
|
||||
|
||||
if context.selected_sequences:
|
||||
self.frame_first = min([s.frame_final_start for s in context.selected_sequences])
|
||||
self.frame_last = max([s.frame_final_end for s in context.selected_sequences])
|
||||
self.frame_start = min([s.frame_final_start for s in context.selected_sequences])
|
||||
self.frame_end = max([s.frame_final_end for s in context.selected_sequences])
|
||||
|
||||
return context.window_manager.invoke_props_dialog(self)
|
||||
|
||||
|
@ -210,54 +227,66 @@ class VSETB_OT_auto_split(Operator):
|
|||
col.use_property_split = True
|
||||
col.use_property_decorate = False
|
||||
|
||||
col.prop(self, 'threshold')
|
||||
col.prop(self, 'movie_channel_name')
|
||||
row = col.row(align=True)
|
||||
row.prop(self, 'movie_type', expand=True)
|
||||
if self.movie_type == 'ANIMATED':
|
||||
col.prop(self, 'animated_threshold')
|
||||
else:
|
||||
col.prop(self, 'still_threshold')
|
||||
|
||||
col.prop(self, 'source_channel_name')
|
||||
col.prop(self, 'destination_channel_name')
|
||||
|
||||
split_col = col.column(align=True)
|
||||
split_col.prop(self, 'frame_first', text='Frame Split First')
|
||||
split_col.prop(self, 'frame_last', text='Last')
|
||||
split_col.prop(self, 'frame_start', text='Frame Split First')
|
||||
split_col.prop(self, 'frame_end', text='Last')
|
||||
|
||||
def execute(self, context):
|
||||
if self.source_channel_name == self.destination_channel_name:
|
||||
self.report({"ERROR"}, 'Source and Destination cannot be the same channel')
|
||||
return {'CANCELLED'}
|
||||
|
||||
context.window_manager.modal_handler_add(self)
|
||||
return {'PASS_THROUGH'}
|
||||
|
||||
def modal(self, context, event):
|
||||
|
||||
strips = get_strips(channel=self.movie_channel_name)
|
||||
scn = context.scene
|
||||
strips = get_strips(channel=self.source_channel_name)
|
||||
|
||||
i = 1
|
||||
frame_start = self.frame_first
|
||||
frame_start = self.frame_start
|
||||
for strip in strips:
|
||||
|
||||
if strip.type != 'MOVIE':
|
||||
continue
|
||||
|
||||
# Skip strip outside the frame range to create shot from.
|
||||
if strip.frame_final_start >= self.frame_last or strip.frame_final_end <= self.frame_first:
|
||||
if strip.frame_final_start >= self.frame_end or strip.frame_final_end <= self.frame_start:
|
||||
continue
|
||||
|
||||
threshold = self.animated_threshold if self.movie_type == 'ANIMATED' else self.still_threshold
|
||||
|
||||
process = auto_splitter.launch_split(strip, self.threshold, frame_start=self.frame_first, frame_end=self.frame_last)
|
||||
|
||||
for line in process.stdout:
|
||||
|
||||
# Get frame split from the movie timeline (not from blender strips timeline)
|
||||
frame_end = auto_splitter.get_split_time(line, fps=24)
|
||||
params = dict(strip=strip, movie_type= self.movie_type, threshold=threshold,
|
||||
frame_start=self.frame_start, frame_end=self.frame_end,
|
||||
crop=(strip.crop.min_x, strip.crop.max_x, strip.crop.min_y, strip.crop.max_y))
|
||||
|
||||
for frame_end in detect_scene_change(**params):
|
||||
if not frame_end:
|
||||
continue
|
||||
|
||||
# Convert movie frame to strips frame
|
||||
if frame_start+int(strip.frame_start) < self.frame_first:
|
||||
frame_start = self.frame_first
|
||||
if frame_start+int(strip.frame_start) < self.frame_start:
|
||||
frame_start = self.frame_start
|
||||
|
||||
frame_end += int(strip.frame_final_start)
|
||||
if frame_end > self.frame_last:
|
||||
frame_end = self.frame_last
|
||||
if frame_end > self.frame_end:
|
||||
frame_end = self.frame_end
|
||||
|
||||
create_shot_strip(
|
||||
f'tmp_shot_{str(i).zfill(3)}',
|
||||
start=frame_start,
|
||||
end=frame_end
|
||||
end=frame_end,
|
||||
channel=self.destination_channel_name
|
||||
)
|
||||
|
||||
i += 1
|
||||
|
@ -265,14 +294,15 @@ class VSETB_OT_auto_split(Operator):
|
|||
|
||||
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
|
||||
|
||||
process.wait()
|
||||
#process.wait()
|
||||
|
||||
# Last strip:
|
||||
if frame_start < self.frame_last:
|
||||
if frame_start < self.frame_end:
|
||||
create_shot_strip(
|
||||
f'tmp_shot_{str(i).zfill(3)}',
|
||||
start=frame_start,
|
||||
end=self.frame_last
|
||||
end=self.frame_end,
|
||||
channel=self.destination_channel_name
|
||||
)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
@ -705,7 +735,7 @@ class VSETB_OT_update_media(Operator):
|
|||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
if context.active_sequence_strip .type not in ('MOVIE', 'SOUND'):
|
||||
if not (strip := context.active_sequence_strip) or strip.type not in ('MOVIE', 'SOUND'):
|
||||
cls.poll_message_set('No active AUDIO or MOVIE strips')
|
||||
return
|
||||
|
||||
|
@ -761,7 +791,7 @@ def unregister_keymaps():
|
|||
classes = (
|
||||
VSETB_OT_rename,
|
||||
VSETB_OT_set_sequencer,
|
||||
VSETB_OT_auto_split,
|
||||
VSETB_OT_scene_cut_detection,
|
||||
VSETB_OT_set_stamps,
|
||||
VSETB_OT_show_waveform,
|
||||
VSETB_OT_previous_shot,
|
||||
|
|
|
@ -355,7 +355,9 @@ class VSETB_OT_import_spreadsheet(Operator):
|
|||
#print(SPREADSHEET[:2])
|
||||
|
||||
cell_types = project.get_cell_types()
|
||||
cell_names = {k: spreadsheet.cells[k].import_name for k in header if k}
|
||||
|
||||
header_enabled = [x for x in header if spreadsheet.cells[x].enabled]
|
||||
cell_names = {x: spreadsheet.cells[x].import_name for x in header_enabled if x}
|
||||
|
||||
#separator = spreadsheet.separator.replace('\\n', '\n').replace('\\t', '\t').replace('\\r', '\r')
|
||||
|
||||
|
@ -364,10 +366,12 @@ class VSETB_OT_import_spreadsheet(Operator):
|
|||
frame_start = scn.frame_start
|
||||
for row in SPREADSHEET[1:]:
|
||||
#print(row)
|
||||
cell_data = {cell_names[k]: v for k, v in zip(header, row) if k}
|
||||
#cell_data = {k: v for k, v in zip(header, row)}
|
||||
cell_data = {cell_names[k]: v for k, v in zip(header, row) if k in cell_names}
|
||||
shot_name = cell_data['Shot']
|
||||
|
||||
if not shot_name:
|
||||
raise Exception()
|
||||
|
||||
#strip = next((s for s in sequencer if s.vsetb_strip_settings.source_name == shot_name), None)
|
||||
strip = next((s for s in shot_strips if s.name == shot_name), None)
|
||||
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
import bpy
|
||||
|
||||
from vse_toolbox import bl_utils
|
||||
|
||||
|
||||
def detect_scene_change(strip, movie_type="ANIMATED", threshold=0.5, frame_start=None, frame_end=None, crop=None):
|
||||
"""Launch ffmpeg command to detect changing frames from a movie strip.
|
||||
|
||||
Args:
|
||||
movie_strip (bpy.types.Sequence): blender sequence strip to detect changes.
|
||||
threshold (float): value of the detection factor (from 0 to 1).
|
||||
frame_start (int, optional): first frame to detect.
|
||||
Defaults to None.
|
||||
frame_end (int, optional): last frame to detect.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: ffmpeg command log.
|
||||
"""
|
||||
|
||||
path = bl_utils.abspath(strip.filepath)
|
||||
fps = bpy.context.scene.render.fps
|
||||
|
||||
if frame_start is None:
|
||||
frame_start = 0
|
||||
if frame_end is None:
|
||||
frame_end = strip.frame_duration
|
||||
|
||||
frame_start = frame_start - strip.frame_start
|
||||
|
||||
start_time = frame_start/fps
|
||||
end_time = frame_end/fps
|
||||
|
||||
#frame_start += strip.frame_offset_start
|
||||
#frame_end -= strip.frame_offset_end
|
||||
|
||||
if movie_type == 'ANIMATED':
|
||||
return select_generator(path, start_time=start_time, end_time=end_time, threshold=threshold, crop=crop)
|
||||
|
||||
elif movie_type == 'STILL':
|
||||
return freeze_detect_generator(path, start_time=start_time, end_time=end_time, threshold=threshold, crop=crop)
|
||||
|
||||
else:
|
||||
raise Exception(f'movie_type: {movie_type} not implemented')
|
||||
|
||||
|
||||
def freeze_detect_generator(path, start_time, end_time, threshold=0.005, crop=None):
|
||||
"""Generate the ffmpeg command which detect change from a movie.
|
||||
|
||||
Args:
|
||||
path (_type_): path to detect changes.
|
||||
threshold (_type_): value of the detection factor (from 0 to 1).
|
||||
frame_start (_type_): first frame to detect.
|
||||
frame_end (_type_): last frame to detect.
|
||||
fps (_type_): framerate of the movie.
|
||||
|
||||
Returns:
|
||||
list: ffmpeg command as list for subprocess module.
|
||||
"""
|
||||
|
||||
if crop is None:
|
||||
crop = [0, 0, 0, 0]
|
||||
|
||||
crop_expr = f"crop=iw-{crop[0]}-{crop[1]}:ih-{crop[2]}-{crop[3]}:{crop[0]}:{crop[-1]}"
|
||||
|
||||
command = [
|
||||
'ffmpeg',
|
||||
'-nostats',
|
||||
'-i',
|
||||
str(path),
|
||||
'-ss', str(start_time),
|
||||
'-t', str(end_time),
|
||||
'-vf',
|
||||
f"{crop_expr}, freezedetect=n={threshold}:d=0.01,metadata=print", #,
|
||||
'-f',
|
||||
'null',
|
||||
'-'
|
||||
]
|
||||
|
||||
print(command)
|
||||
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
|
||||
def parse_split_time(log):
|
||||
print(log)
|
||||
timecodes = re.findall(r'freeze_end: ([\d.]+)', log)
|
||||
if timecodes:
|
||||
return round(float(timecodes[0]) * bpy.context.scene.render.fps)
|
||||
|
||||
return filter(None, (parse_split_time(x) for x in process.stdout))
|
||||
|
||||
def select_generator(path, start_time, end_time, threshold, crop=None):
|
||||
"""Generate the ffmpeg command which detect change from a movie.
|
||||
|
||||
Args:
|
||||
path (_type_): path to detect changes.
|
||||
threshold (_type_): value of the detection factor (from 0 to 1).
|
||||
frame_start (_type_): first frame to detect.
|
||||
frame_end (_type_): last frame to detect.
|
||||
fps (_type_): framerate of the movie.
|
||||
|
||||
Returns:
|
||||
list: ffmpeg command as list for subprocess module.
|
||||
"""
|
||||
|
||||
if crop is None:
|
||||
crop = [0, 0, 0, 0]
|
||||
|
||||
crop_expr = f"crop=iw-{crop[0]}-{crop[1]}:ih-{crop[2]}-{crop[3]}:{crop[0]}:{crop[-1]}"
|
||||
|
||||
command = [
|
||||
'ffmpeg',
|
||||
'-nostats',
|
||||
'-i',
|
||||
str(path),
|
||||
'-ss', str(start_time),
|
||||
'-t', str(end_time),
|
||||
'-vf',
|
||||
f"{crop_expr}, select='gt(scene, {threshold})', showinfo",
|
||||
'-f',
|
||||
'null',
|
||||
'-'
|
||||
]
|
||||
|
||||
print(command)
|
||||
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
|
||||
def parse_split_time(log):
|
||||
print(log)
|
||||
timecodes = re.findall(r'pts_time:([\d.]+)', log)
|
||||
if timecodes:
|
||||
return round(float(timecodes[0]) * bpy.context.scene.render.fps)
|
||||
|
||||
return filter(None, (parse_split_time(x) for x in process.stdout))
|
||||
|
|
@ -585,12 +585,12 @@ def update_text_strips(scene):
|
|||
strip.text = strip['text_pattern'].format_map(MissingKey(**format_data))
|
||||
|
||||
|
||||
def create_shot_strip(name, start, end):
|
||||
def create_shot_strip(name, start, end, channel='Shots'):
|
||||
|
||||
shot_strip = bpy.context.scene.sequence_editor.sequences.new_effect(
|
||||
name,
|
||||
'COLOR',
|
||||
get_channel_index('Shots'),
|
||||
get_channel_index(channel),
|
||||
frame_start=start,
|
||||
frame_end=end
|
||||
)
|
||||
|
|
|
@ -179,7 +179,6 @@ class VSETB_PT_sequencer(VSETB_main, Panel):
|
|||
|
||||
col = layout.column()
|
||||
col.operator('vse_toolbox.set_sequencer', text='Set-Up Sequencer', icon='SEQ_SEQUENCER')
|
||||
col.operator('vse_toolbox.auto_split', text='Auto Split Shots')
|
||||
col.operator('vse_toolbox.strips_rename', text=f'Rename {channel}', icon='SORTALPHA')
|
||||
col.operator('vse_toolbox.set_stamps', text='Set Stamps', icon='COLOR')
|
||||
col.operator("vse_toolbox.collect_files", text='Collect Files', icon='PACKAGE')
|
||||
|
@ -456,6 +455,7 @@ class VSETB_MT_main_menu(Menu):
|
|||
layout.operator('vse_toolbox.remove_channel', text='Remove Channel', icon='TRIA_DOWN_BAR')
|
||||
layout.separator()
|
||||
layout.operator('vse_toolbox.merge_shot_strips', text='Merge Shots')
|
||||
layout.operator('vse_toolbox.scene_cut_detection', text='Scene Cut Detection', icon='SCULPTMODE_HLT')
|
||||
|
||||
def draw_vse_toolbox_menu(self, context):
|
||||
self.layout.menu("VSETB_MT_main_menu")
|
||||
|
|
|
@ -252,6 +252,7 @@ class ImportShots(PropertyGroup):
|
|||
|
||||
#shot_folder_template: StringProperty(
|
||||
# name="Shot Template", default="$PROJECT_ROOT/sequences/sq{sequence}/sh{shot}")
|
||||
clear: BoolProperty(default=True)
|
||||
|
||||
video_template : StringProperty(
|
||||
name="Video Path", default="//sources/{sequence}_{shot}_{task}.{ext}")
|
||||
|
|
Loading…
Reference in New Issue