trailing whitespaces cleanup
parent
f3646e37df
commit
12cce98e41
|
@ -17,7 +17,7 @@ Activate / deactivate all masks using MA layers
|
||||||
1.2.1
|
1.2.1
|
||||||
|
|
||||||
- added: bundle app_template to load "GR Render" workspace from it
|
- added: bundle app_template to load "GR Render" workspace from it
|
||||||
- added: ui button in dopesheet to load GP render workspace if not loaded
|
- added: ui button in dopesheet to load GP render workspace
|
||||||
|
|
||||||
1.2.0
|
1.2.0
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ Activate / deactivate all masks using MA layers
|
||||||
|
|
||||||
0.9.4
|
0.9.4
|
||||||
|
|
||||||
- feat: `Renumber files on disk` option using number in file outputs (under advanced gp render options)
|
- feat: `Renumber files on disk` option using number in file outputs (under advanced gp render options)
|
||||||
- feat: new `Check for problems` button, check if problem in layer state, missing file out, broken gp modifier target and report
|
- feat: new `Check for problems` button, check if problem in layer state, missing file out, broken gp modifier target and report
|
||||||
- added: clean nodes now also rearrange inside nodegroup
|
- added: clean nodes now also rearrange inside nodegroup
|
||||||
- changed: `Check layers` now trigger `export layer infos` automatically.
|
- changed: `Check layers` now trigger `export layer infos` automatically.
|
||||||
|
@ -121,7 +121,7 @@ Activate / deactivate all masks using MA layers
|
||||||
- feat: Select a file output node. Set active file slot path and settings to main Scene output.
|
- feat: Select a file output node. Set active file slot path and settings to main Scene output.
|
||||||
- Button in GP render panel with `Advanced` options active.
|
- Button in GP render panel with `Advanced` options active.
|
||||||
- Or search operator label `Set Active File Output To Composite`
|
- Or search operator label `Set Active File Output To Composite`
|
||||||
- if Composite is already linked, pop-up ask if link should be replaced
|
- if Composite is already linked, pop-up ask if link should be replaced
|
||||||
|
|
||||||
0.7.0
|
0.7.0
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ class GPEXP_OT_add_layer_to_render(bpy.types.Operator):
|
||||||
if not layer:
|
if not layer:
|
||||||
self.report({'ERROR'}, 'No active layer')
|
self.report({'ERROR'}, 'No active layer')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
ct = 0
|
ct = 0
|
||||||
# send scene ?
|
# send scene ?
|
||||||
hided = 0
|
hided = 0
|
||||||
|
@ -30,7 +30,7 @@ class GPEXP_OT_add_layer_to_render(bpy.types.Operator):
|
||||||
l.viewlayer_render = fn.get_view_layer('exclude').name
|
l.viewlayer_render = fn.get_view_layer('exclude').name
|
||||||
continue
|
continue
|
||||||
gen_vlayer.get_set_viewlayer_from_gp(ob, l)
|
gen_vlayer.get_set_viewlayer_from_gp(ob, l)
|
||||||
|
|
||||||
if l.hide:
|
if l.hide:
|
||||||
hided += 1
|
hided += 1
|
||||||
ct += 1
|
ct += 1
|
||||||
|
@ -64,7 +64,7 @@ class GPEXP_OT_add_objects_to_render(bpy.types.Operator):
|
||||||
else:
|
else:
|
||||||
scn = context.scene
|
scn = context.scene
|
||||||
|
|
||||||
|
|
||||||
excludes = [] # ['MA', 'IN'] # Get list dynamically
|
excludes = [] # ['MA', 'IN'] # Get list dynamically
|
||||||
if self.mode == 'SELECTED':
|
if self.mode == 'SELECTED':
|
||||||
gen_vlayer.export_gp_objects([o for o in context.selected_objects if o.type == 'GPENCIL'], exclude_list=excludes, scene=scn)
|
gen_vlayer.export_gp_objects([o for o in context.selected_objects if o.type == 'GPENCIL'], exclude_list=excludes, scene=scn)
|
||||||
|
@ -106,7 +106,7 @@ GPEXP_OT_add_objects_to_render,
|
||||||
GPEXP_OT_split_to_scene,
|
GPEXP_OT_split_to_scene,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ def batch_setup_render_scene(context=None, render_scn=None):
|
||||||
for fo in render_scn.node_tree.nodes:
|
for fo in render_scn.node_tree.nodes:
|
||||||
if fo.type == 'OUTPUT_FILE':
|
if fo.type == 'OUTPUT_FILE':
|
||||||
fn.renumber_keep_existing(fo)
|
fn.renumber_keep_existing(fo)
|
||||||
|
|
||||||
## Swap to bg_cam (if any)
|
## Swap to bg_cam (if any)
|
||||||
# if render_scn.objects.get('bg_cam') and (not render_scn.camera or render_scn.camera.name != 'bg_cam'):
|
# if render_scn.objects.get('bg_cam') and (not render_scn.camera or render_scn.camera.name != 'bg_cam'):
|
||||||
# print('Swap to bg cam')
|
# print('Swap to bg cam')
|
||||||
|
@ -66,42 +66,42 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
@classmethod
|
@classmethod
|
||||||
def poll(cls, context):
|
def poll(cls, context):
|
||||||
return context.object and context.object.type == 'GPENCIL'
|
return context.object and context.object.type == 'GPENCIL'
|
||||||
|
|
||||||
# timer : bpy.props.FloatProperty(default=0.1, options={'SKIP_SAVE'})
|
# timer : bpy.props.FloatProperty(default=0.1, options={'SKIP_SAVE'})
|
||||||
|
|
||||||
excluded_prefix : bpy.props.StringProperty(
|
excluded_prefix : bpy.props.StringProperty(
|
||||||
name='Excluded Layer By Prefix', default='GP, RG, PO, MA',
|
name='Excluded Layer By Prefix', default='GP, RG, PO, MA',
|
||||||
description='Exclude layer to send to render by prefix (comma separated list)')
|
description='Exclude layer to send to render by prefix (comma separated list)')
|
||||||
|
|
||||||
clean_name_and_visibility : bpy.props.BoolProperty(name='Clean Name And Visibility', default=True,
|
clean_name_and_visibility : bpy.props.BoolProperty(name='Clean Name And Visibility', default=True,
|
||||||
description='Add object name to layer name when there is only prefix (ex: "CO_")\
|
description='Add object name to layer name when there is only prefix (ex: "CO_")\
|
||||||
\nEnable visibility for layer with prefix included in Prefix Filter')
|
\nEnable visibility for layer with prefix included in Prefix Filter')
|
||||||
|
|
||||||
clean_material_duplication : bpy.props.BoolProperty(name='Clean Material Duplication', default=True,
|
clean_material_duplication : bpy.props.BoolProperty(name='Clean Material Duplication', default=True,
|
||||||
description='Clean material stack. i.e: Replace "mat.001" in material stack if "mat" exists and has same color')
|
description='Clean material stack. i.e: Replace "mat.001" in material stack if "mat" exists and has same color')
|
||||||
|
|
||||||
prefix_filter : bpy.props.StringProperty(name='Prefix Filter', default='CO, CU, FX, TO', # , MA # exclude MA if mask are applied
|
prefix_filter : bpy.props.StringProperty(name='Prefix Filter', default='CO, CU, FX, TO', # , MA # exclude MA if mask are applied
|
||||||
description='Comma separated prefix to render. Set the other prefix and non-prefixed layer to exluded viewlayer')
|
description='Comma separated prefix to render. Set the other prefix and non-prefixed layer to exluded viewlayer')
|
||||||
|
|
||||||
set_layers_colors : bpy.props.BoolProperty(name='Set Layers Colors', default=True,
|
set_layers_colors : bpy.props.BoolProperty(name='Set Layers Colors', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
trigger_rename_lowercase : bpy.props.BoolProperty(name='Trigger Rename Lowercase', default=True,
|
trigger_rename_lowercase : bpy.props.BoolProperty(name='Trigger Rename Lowercase', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
trigger_renumber_by_distance : bpy.props.BoolProperty(name='Trigger Renumber By Distance', default=True,
|
trigger_renumber_by_distance : bpy.props.BoolProperty(name='Trigger Renumber By Distance', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
export_layer_infos : bpy.props.BoolProperty(name='Export Layer Infos', default=True,
|
export_layer_infos : bpy.props.BoolProperty(name='Export Layer Infos', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
group_all_adjacent_layer_type : bpy.props.BoolProperty(name='Group All Adjacent Layer Type', default=True,
|
group_all_adjacent_layer_type : bpy.props.BoolProperty(name='Group All Adjacent Layer Type', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
change_to_gp_workspace : bpy.props.BoolProperty(name='Change To Gp Workspace', default=True,
|
change_to_gp_workspace : bpy.props.BoolProperty(name='Change To Gp Workspace', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
batch_setup_render_scene : bpy.props.BoolProperty(name='Batch Setup Render Scene', default=True,
|
batch_setup_render_scene : bpy.props.BoolProperty(name='Batch Setup Render Scene', default=True,
|
||||||
description='')
|
description='')
|
||||||
|
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
row = col.row()
|
row = col.row()
|
||||||
row.prop(self, 'prefix_filter')
|
row.prop(self, 'prefix_filter')
|
||||||
row.active = self.clean_name_and_visibility
|
row.active = self.clean_name_and_visibility
|
||||||
|
|
||||||
col.prop(self, 'clean_material_duplication')
|
col.prop(self, 'clean_material_duplication')
|
||||||
|
|
||||||
col.prop(self, 'set_layers_colors')
|
col.prop(self, 'set_layers_colors')
|
||||||
|
@ -132,7 +132,7 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
col.prop(self, 'batch_setup_render_scene')
|
col.prop(self, 'batch_setup_render_scene')
|
||||||
|
|
||||||
# layout.prop(self, 'clear_unused_view_layers')
|
# layout.prop(self, 'clear_unused_view_layers')
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
print('-- Auto-build Render scene --\n')
|
print('-- Auto-build Render scene --\n')
|
||||||
|
|
||||||
|
@ -146,9 +146,9 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
if render_scn:
|
if render_scn:
|
||||||
self.report({'ERROR'}, 'A "Render" scene already exists')
|
self.report({'ERROR'}, 'A "Render" scene already exists')
|
||||||
return {'CANCELLED'}
|
return {'CANCELLED'}
|
||||||
|
|
||||||
all_gp_objects = [o for o in context.scene.objects if o.type == 'GPENCIL']
|
all_gp_objects = [o for o in context.scene.objects if o.type == 'GPENCIL']
|
||||||
|
|
||||||
## clean name and visibility
|
## clean name and visibility
|
||||||
if self.clean_name_and_visibility:
|
if self.clean_name_and_visibility:
|
||||||
for o in all_gp_objects:
|
for o in all_gp_objects:
|
||||||
|
@ -159,7 +159,7 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
## Clean name when layer has no name after prefix
|
## Clean name when layer has no name after prefix
|
||||||
if re.match(r'^[A-Z]{2}_$', l.info):
|
if re.match(r'^[A-Z]{2}_$', l.info):
|
||||||
l.info = l.info + o.name.lower()
|
l.info = l.info + o.name.lower()
|
||||||
|
|
||||||
## Make used prefix visible ?? (maybe some layer were intentionally hidden...)
|
## Make used prefix visible ?? (maybe some layer were intentionally hidden...)
|
||||||
if (res := re.search(r'^([A-Z]{2})_', l.info)):
|
if (res := re.search(r'^([A-Z]{2})_', l.info)):
|
||||||
if res.group(1) in prefix_to_render and l.hide == True and not 'invisible' in l.info:
|
if res.group(1) in prefix_to_render and l.hide == True and not 'invisible' in l.info:
|
||||||
|
@ -202,7 +202,7 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
print('Trigger renumber by distance')
|
print('Trigger renumber by distance')
|
||||||
bpy.ops.gp.auto_number_object('EXEC_DEFAULT')
|
bpy.ops.gp.auto_number_object('EXEC_DEFAULT')
|
||||||
# bpy.ops.gp.auto_number_object('INVOKE_DEFAULT')
|
# bpy.ops.gp.auto_number_object('INVOKE_DEFAULT')
|
||||||
|
|
||||||
## Export layer infos ? (skip if json already exists)
|
## Export layer infos ? (skip if json already exists)
|
||||||
if self.export_layer_infos:
|
if self.export_layer_infos:
|
||||||
print('Export layer infos (skip if json already exists)')
|
print('Export layer infos (skip if json already exists)')
|
||||||
|
@ -221,7 +221,7 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
return {'CANCELLED'}
|
return {'CANCELLED'}
|
||||||
|
|
||||||
context.window.scene = render_scn
|
context.window.scene = render_scn
|
||||||
|
|
||||||
## Group all adjacent layer type
|
## Group all adjacent layer type
|
||||||
if self.group_all_adjacent_layer_type:
|
if self.group_all_adjacent_layer_type:
|
||||||
print('Group all adjacent layer type')
|
print('Group all adjacent layer type')
|
||||||
|
@ -252,7 +252,7 @@ class GPEXP_OT_render_auto_build(bpy.types.Operator):
|
||||||
|
|
||||||
## Batch setup render scene
|
## Batch setup render scene
|
||||||
batch_setup_render_scene(render_scn=render_scn)
|
batch_setup_render_scene(render_scn=render_scn)
|
||||||
|
|
||||||
## No need for timer anymore !
|
## No need for timer anymore !
|
||||||
# if batch_setup_render_scene:
|
# if batch_setup_render_scene:
|
||||||
# if self.timer > 0:
|
# if self.timer > 0:
|
||||||
|
@ -296,7 +296,7 @@ GPEXP_OT_render_auto_build,
|
||||||
GPEXP_OT_render_scene_setup,
|
GPEXP_OT_render_scene_setup,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -32,8 +32,8 @@ def check_layer_state(pool=None, reports=None):
|
||||||
# # all masks disable
|
# # all masks disable
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
## just list masks
|
## just list masks
|
||||||
# state = '' if l.use_mask_layer else ' (disabled)'
|
# state = '' if l.use_mask_layer else ' (disabled)'
|
||||||
# reports.append(f'{ob.name} > {l.info} masks{state}:')
|
# reports.append(f'{ob.name} > {l.info} masks{state}:')
|
||||||
# for ml in l.mask_layers:
|
# for ml in l.mask_layers:
|
||||||
# mlstate = ' (disabled)' if ml.hide else ''
|
# mlstate = ' (disabled)' if ml.hide else ''
|
||||||
|
@ -60,7 +60,7 @@ def check_file_output_numbering(reports=None):
|
||||||
if S.name == 'Scene' or not S.node_tree or not S.use_nodes:
|
if S.name == 'Scene' or not S.node_tree or not S.use_nodes:
|
||||||
continue
|
continue
|
||||||
file_outs += [n for n in S.node_tree.nodes if n.type == 'OUTPUT_FILE']
|
file_outs += [n for n in S.node_tree.nodes if n.type == 'OUTPUT_FILE']
|
||||||
|
|
||||||
used=False
|
used=False
|
||||||
|
|
||||||
if not file_outs:
|
if not file_outs:
|
||||||
|
@ -84,7 +84,7 @@ class GPEXP_OT_check_render_scene(bpy.types.Operator):
|
||||||
bl_description = "Auto check render scene"
|
bl_description = "Auto check render scene"
|
||||||
bl_options = {"REGISTER"} # , "UNDO"
|
bl_options = {"REGISTER"} # , "UNDO"
|
||||||
|
|
||||||
# clear_unused_view_layers : bpy.props.BoolProperty(name="Clear unused view layers",
|
# clear_unused_view_layers : bpy.props.BoolProperty(name="Clear unused view layers",
|
||||||
# description="Delete view layer that aren't used in the nodetree anymore",
|
# description="Delete view layer that aren't used in the nodetree anymore",
|
||||||
# default=True)
|
# default=True)
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ class GPEXP_OT_check_render_scene(bpy.types.Operator):
|
||||||
layout = self.layout
|
layout = self.layout
|
||||||
# layout.prop(self, 'clear_unused_view_layers')
|
# layout.prop(self, 'clear_unused_view_layers')
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
reports = []
|
reports = []
|
||||||
# check gp modifiers
|
# check gp modifiers
|
||||||
broken_mods = check_broken_modifier_target()
|
broken_mods = check_broken_modifier_target()
|
||||||
|
@ -108,7 +108,7 @@ class GPEXP_OT_check_render_scene(bpy.types.Operator):
|
||||||
reports.append('GP modifiers targets:')
|
reports.append('GP modifiers targets:')
|
||||||
reports += broken_mods
|
reports += broken_mods
|
||||||
|
|
||||||
# check layers
|
# check layers
|
||||||
layer_state = check_layer_state()
|
layer_state = check_layer_state()
|
||||||
if layer_state:
|
if layer_state:
|
||||||
if reports: reports.append('')
|
if reports: reports.append('')
|
||||||
|
@ -137,7 +137,7 @@ classes=(
|
||||||
GPEXP_OT_check_render_scene,
|
GPEXP_OT_check_render_scene,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
32
OP_clean.py
32
OP_clean.py
|
@ -15,12 +15,12 @@ class GPEXP_OT_clean_compo_tree(bpy.types.Operator):
|
||||||
|
|
||||||
# mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
# mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
render = bpy.data.scenes.get('Render')
|
render = bpy.data.scenes.get('Render')
|
||||||
if not render:
|
if not render:
|
||||||
print('SKIP, no Render scene')
|
print('SKIP, no Render scene')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
print('re-arranging frames')
|
print('re-arranging frames')
|
||||||
fn.rearrange_frames(render.node_tree)
|
fn.rearrange_frames(render.node_tree)
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ class GPEXP_OT_clean_compo_tree(bpy.types.Operator):
|
||||||
break
|
break
|
||||||
if out:
|
if out:
|
||||||
fn.reorder_fileout(out, ng=n)
|
fn.reorder_fileout(out, ng=n)
|
||||||
|
|
||||||
|
|
||||||
## clear disconnected fileout ??...
|
## clear disconnected fileout ??...
|
||||||
# for fo in render.node_tree.nodes:
|
# for fo in render.node_tree.nodes:
|
||||||
|
@ -57,31 +57,31 @@ class GPEXP_OT_clean_compo_tree(bpy.types.Operator):
|
||||||
bl_options = {"REGISTER", "UNDO"}
|
bl_options = {"REGISTER", "UNDO"}
|
||||||
|
|
||||||
# Internal prop (use when launching from python)
|
# Internal prop (use when launching from python)
|
||||||
use_render_scene : bpy.props.BoolProperty(name="Use Render Scene",
|
use_render_scene : bpy.props.BoolProperty(name="Use Render Scene",
|
||||||
description="Force the clean on scene named Render, abort if not exists (not exposed)",
|
description="Force the clean on scene named Render, abort if not exists (not exposed)",
|
||||||
default=False, options={'SKIP_SAVE'})
|
default=False, options={'SKIP_SAVE'})
|
||||||
|
|
||||||
clear_unused_view_layers : bpy.props.BoolProperty(name="Clear unused view layers",
|
clear_unused_view_layers : bpy.props.BoolProperty(name="Clear unused view layers",
|
||||||
description="Delete view layer that aren't used in the nodetree anymore",
|
description="Delete view layer that aren't used in the nodetree anymore",
|
||||||
default=True)
|
default=True)
|
||||||
|
|
||||||
arrange_rl_nodes : bpy.props.BoolProperty(name="Arrange Render Node In Frames",
|
arrange_rl_nodes : bpy.props.BoolProperty(name="Arrange Render Node In Frames",
|
||||||
description="Re-arrange Render Layer Nodes Y positions within each existing frames" ,
|
description="Re-arrange Render Layer Nodes Y positions within each existing frames" ,
|
||||||
default=True)
|
default=True)
|
||||||
|
|
||||||
arrange_frames : bpy.props.BoolProperty(name="Arrange Frames",
|
arrange_frames : bpy.props.BoolProperty(name="Arrange Frames",
|
||||||
description="Re-arrange all frames Y positions" ,
|
description="Re-arrange all frames Y positions" ,
|
||||||
default=True)
|
default=True)
|
||||||
|
|
||||||
reorder_inputs : bpy.props.BoolProperty(name="Reorder I/O Sockets",
|
reorder_inputs : bpy.props.BoolProperty(name="Reorder I/O Sockets",
|
||||||
description="Reorder inputs/outputs of all 'NG_' nodegroup and their connected file output",
|
description="Reorder inputs/outputs of all 'NG_' nodegroup and their connected file output",
|
||||||
default=True)
|
default=True)
|
||||||
|
|
||||||
clear_isolated_node_in_groups : bpy.props.BoolProperty(name="Clear Isolated Node In Groups",
|
clear_isolated_node_in_groups : bpy.props.BoolProperty(name="Clear Isolated Node In Groups",
|
||||||
description="Clean content of 'NG_' nodegroup bpy deleting isolated nodes)",
|
description="Clean content of 'NG_' nodegroup bpy deleting isolated nodes)",
|
||||||
default=True)
|
default=True)
|
||||||
|
|
||||||
fo_clear_disconnected : bpy.props.BoolProperty(name="Remove Disconnected Export Inputs",
|
fo_clear_disconnected : bpy.props.BoolProperty(name="Remove Disconnected Export Inputs",
|
||||||
description="Clear any disconnected intput of every 'file output' node",
|
description="Clear any disconnected intput of every 'file output' node",
|
||||||
default=False)
|
default=False)
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ class GPEXP_OT_clean_compo_tree(bpy.types.Operator):
|
||||||
layout.prop(self, 'fo_clear_disconnected')
|
layout.prop(self, 'fo_clear_disconnected')
|
||||||
if self.fo_clear_disconnected:
|
if self.fo_clear_disconnected:
|
||||||
layout.label(text='Disconnected inputs are not exported', icon='INFO')
|
layout.label(text='Disconnected inputs are not exported', icon='INFO')
|
||||||
|
|
||||||
# box = layout.box()
|
# box = layout.box()
|
||||||
# box.prop(self, 'arrange_frames')
|
# box.prop(self, 'arrange_frames')
|
||||||
# box.prop(self, 'reorder_inputs')
|
# box.prop(self, 'reorder_inputs')
|
||||||
|
@ -152,17 +152,17 @@ class GPEXP_OT_clean_compo_tree(bpy.types.Operator):
|
||||||
break
|
break
|
||||||
if out:
|
if out:
|
||||||
fn.reorder_fileout(out, ng=n)
|
fn.reorder_fileout(out, ng=n)
|
||||||
|
|
||||||
# Clear input that do not exists
|
# Clear input that do not exists
|
||||||
fn.clean_nodegroup_inputs(n, skip_existing_pass=True)
|
fn.clean_nodegroup_inputs(n, skip_existing_pass=True)
|
||||||
|
|
||||||
fn.bridge_reconnect_nodegroup(n)
|
fn.bridge_reconnect_nodegroup(n)
|
||||||
|
|
||||||
if self.clear_isolated_node_in_groups:
|
if self.clear_isolated_node_in_groups:
|
||||||
for n in nodes:
|
for n in nodes:
|
||||||
if n.type != 'GROUP' or not n.name.startswith('NG_'):
|
if n.type != 'GROUP' or not n.name.startswith('NG_'):
|
||||||
continue
|
continue
|
||||||
fn.clear_nodegroup_content_if_disconnected(n.node_tree)
|
fn.clear_nodegroup_content_if_disconnected(n.node_tree)
|
||||||
|
|
||||||
if self.fo_clear_disconnected:
|
if self.fo_clear_disconnected:
|
||||||
for fo in nodes:
|
for fo in nodes:
|
||||||
|
@ -179,7 +179,7 @@ classes=(
|
||||||
GPEXP_OT_clean_compo_tree,
|
GPEXP_OT_clean_compo_tree,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
10
OP_clear.py
10
OP_clear.py
|
@ -14,20 +14,20 @@ class GPEXP_OT_clear_render_tree(bpy.types.Operator):
|
||||||
mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
|
|
||||||
render = bpy.data.scenes.get('Render')
|
render = bpy.data.scenes.get('Render')
|
||||||
if not render:
|
if not render:
|
||||||
print('SKIP, no Render scene')
|
print('SKIP, no Render scene')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
# clear all nodes in frames
|
# clear all nodes in frames
|
||||||
if render.use_nodes:
|
if render.use_nodes:
|
||||||
for i in range(len(render.node_tree.nodes))[::-1]:
|
for i in range(len(render.node_tree.nodes))[::-1]:
|
||||||
|
|
||||||
# skip frames to delete later
|
# skip frames to delete later
|
||||||
if render.node_tree.nodes[i].type == 'FRAME':
|
if render.node_tree.nodes[i].type == 'FRAME':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# skip unparented nodes
|
# skip unparented nodes
|
||||||
if not render.node_tree.nodes[i].parent:
|
if not render.node_tree.nodes[i].parent:
|
||||||
continue
|
continue
|
||||||
|
@ -59,7 +59,7 @@ classes=(
|
||||||
GPEXP_OT_clear_render_tree,
|
GPEXP_OT_clear_render_tree,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -14,10 +14,10 @@ class GPEXP_OT_reconnect_render_layer(bpy.types.Operator):
|
||||||
|
|
||||||
# mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
# mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
node_tree = context.scene.node_tree
|
node_tree = context.scene.node_tree
|
||||||
nodes = node_tree.nodes
|
nodes = node_tree.nodes
|
||||||
|
|
||||||
changed = []
|
changed = []
|
||||||
for n in nodes:
|
for n in nodes:
|
||||||
if not n.select or not n.type == 'R_LAYERS':
|
if not n.select or not n.type == 'R_LAYERS':
|
||||||
|
@ -25,10 +25,10 @@ class GPEXP_OT_reconnect_render_layer(bpy.types.Operator):
|
||||||
|
|
||||||
if not ' / ' in n.layer:
|
if not ' / ' in n.layer:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if n.outputs[0].is_linked: # already connected
|
if n.outputs[0].is_linked: # already connected
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# get namme
|
# get namme
|
||||||
obname = n.layer.split()[0]
|
obname = n.layer.split()[0]
|
||||||
grp_name = f'NG_{obname}'
|
grp_name = f'NG_{obname}'
|
||||||
|
@ -39,7 +39,7 @@ class GPEXP_OT_reconnect_render_layer(bpy.types.Operator):
|
||||||
if not grp:
|
if not grp:
|
||||||
print(f'{n.name} Node group not found : {n.layer} !-> {grp_name}')
|
print(f'{n.name} Node group not found : {n.layer} !-> {grp_name}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
inp = grp.inputs.get(n.layer)
|
inp = grp.inputs.get(n.layer)
|
||||||
if not inp:
|
if not inp:
|
||||||
print(f'{n.name} no inputs name "{n.layer}" in group {grp_name}')
|
print(f'{n.name} no inputs name "{n.layer}" in group {grp_name}')
|
||||||
|
@ -48,7 +48,7 @@ class GPEXP_OT_reconnect_render_layer(bpy.types.Operator):
|
||||||
# reconnect
|
# reconnect
|
||||||
node_tree.links.new(n.outputs[0], inp)
|
node_tree.links.new(n.outputs[0], inp)
|
||||||
changed.append(f'{n.name} ({n.layer}) to {grp_name}')
|
changed.append(f'{n.name} ({n.layer}) to {grp_name}')
|
||||||
|
|
||||||
if changed:
|
if changed:
|
||||||
self.report({'INFO'}, f'{len(changed)} nodes reconnected')
|
self.report({'INFO'}, f'{len(changed)} nodes reconnected')
|
||||||
else:
|
else:
|
||||||
|
@ -68,8 +68,8 @@ class GPEXP_OT_delete_render_layer(bpy.types.Operator):
|
||||||
|
|
||||||
# mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
# mode : bpy.props.StringProperty(default='NORMAL', options={'SKIP_SAVE'})
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
|
|
||||||
rd_scn = bpy.data.scenes.get('Render')
|
rd_scn = bpy.data.scenes.get('Render')
|
||||||
if not rd_scn:
|
if not rd_scn:
|
||||||
self.report({'ERROR'}, 'Viewlayers needs to be generated first!')
|
self.report({'ERROR'}, 'Viewlayers needs to be generated first!')
|
||||||
|
@ -81,7 +81,7 @@ class GPEXP_OT_delete_render_layer(bpy.types.Operator):
|
||||||
# if l.viewlayer_render and l.viewlayer_render != act.viewlayer_render and rd_scn.view_layers.get(l.viewlayer_render)]
|
# if l.viewlayer_render and l.viewlayer_render != act.viewlayer_render and rd_scn.view_layers.get(l.viewlayer_render)]
|
||||||
|
|
||||||
rlayers_nodes = [n for n in nodes if n.select and n.type == 'R_LAYERS']
|
rlayers_nodes = [n for n in nodes if n.select and n.type == 'R_LAYERS']
|
||||||
|
|
||||||
vls = [rd_scn.view_layers.get(n.layer) for n in rlayers_nodes if rd_scn.view_layers.get(n.layer)]
|
vls = [rd_scn.view_layers.get(n.layer) for n in rlayers_nodes if rd_scn.view_layers.get(n.layer)]
|
||||||
|
|
||||||
vl_names = [v.name for v in vls]
|
vl_names = [v.name for v in vls]
|
||||||
|
@ -115,7 +115,7 @@ class GPEXP_OT_delete_render_layer(bpy.types.Operator):
|
||||||
for i in range(len(grp.inputs))[::-1]:
|
for i in range(len(grp.inputs))[::-1]:
|
||||||
if grp.inputs[i].name == sockin.name:
|
if grp.inputs[i].name == sockin.name:
|
||||||
ngroup.inputs.remove(ngroup.inputs[i])
|
ngroup.inputs.remove(ngroup.inputs[i])
|
||||||
|
|
||||||
gp_in_socket = ngroup.nodes['Group Input'].outputs[i]
|
gp_in_socket = ngroup.nodes['Group Input'].outputs[i]
|
||||||
for lnk in gp_in_socket.links:
|
for lnk in gp_in_socket.links:
|
||||||
inside_nodes += fn.all_connected_forward(lnk.to_node)
|
inside_nodes += fn.all_connected_forward(lnk.to_node)
|
||||||
|
@ -125,7 +125,7 @@ class GPEXP_OT_delete_render_layer(bpy.types.Operator):
|
||||||
if grp.outputs[i].name == sockout.name:
|
if grp.outputs[i].name == sockout.name:
|
||||||
ngroup.outputs.remove(ngroup.outputs[i])
|
ngroup.outputs.remove(ngroup.outputs[i])
|
||||||
break
|
break
|
||||||
|
|
||||||
for sub_n in reversed(inside_nodes):
|
for sub_n in reversed(inside_nodes):
|
||||||
ngroup.nodes.remove(sub_n)
|
ngroup.nodes.remove(sub_n)
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ class GPEXP_OT_set_active_fileout_to_compout(bpy.types.Operator):
|
||||||
if not len(self.fo.file_slots):
|
if not len(self.fo.file_slots):
|
||||||
self.report({'ERROR'}, 'no slots in active file output')
|
self.report({'ERROR'}, 'no slots in active file output')
|
||||||
return {'CANCELLED'}
|
return {'CANCELLED'}
|
||||||
|
|
||||||
# check if active slot has a source
|
# check if active slot has a source
|
||||||
if not self.fo.inputs[self.fo.active_input_index].is_linked:
|
if not self.fo.inputs[self.fo.active_input_index].is_linked:
|
||||||
return self.execute(context)
|
return self.execute(context)
|
||||||
|
@ -170,11 +170,11 @@ class GPEXP_OT_set_active_fileout_to_compout(bpy.types.Operator):
|
||||||
if not out or not out.inputs[0].is_linked:
|
if not out or not out.inputs[0].is_linked:
|
||||||
self.compo_out_from_link = ''
|
self.compo_out_from_link = ''
|
||||||
return self.execute(context)
|
return self.execute(context)
|
||||||
|
|
||||||
# compo linked, pop panel to choose replace or not
|
# compo linked, pop panel to choose replace or not
|
||||||
self.compo_out_from_link = out.inputs[0].links[0].from_node.name
|
self.compo_out_from_link = out.inputs[0].links[0].from_node.name
|
||||||
return context.window_manager.invoke_props_dialog(self)
|
return context.window_manager.invoke_props_dialog(self)
|
||||||
|
|
||||||
|
|
||||||
def draw(self, context):
|
def draw(self, context):
|
||||||
layout = self.layout
|
layout = self.layout
|
||||||
|
@ -189,11 +189,11 @@ class GPEXP_OT_set_active_fileout_to_compout(bpy.types.Operator):
|
||||||
idx = self.fo.active_input_index
|
idx = self.fo.active_input_index
|
||||||
sl = self.fo.file_slots[idx]
|
sl = self.fo.file_slots[idx]
|
||||||
sk = self.fo.inputs[idx]
|
sk = self.fo.inputs[idx]
|
||||||
|
|
||||||
if not sk.is_linked:
|
if not sk.is_linked:
|
||||||
self.report({'INFO'}, f'Outut changed to match {sl.path} (slot was not linked)')
|
self.report({'INFO'}, f'Outut changed to match {sl.path} (slot was not linked)')
|
||||||
return {'FINISHED'}
|
return {'FINISHED'}
|
||||||
|
|
||||||
## If linked replace links to Composite node
|
## If linked replace links to Composite node
|
||||||
if not self.relink_composite:
|
if not self.relink_composite:
|
||||||
return {'FINISHED'}
|
return {'FINISHED'}
|
||||||
|
@ -201,16 +201,16 @@ class GPEXP_OT_set_active_fileout_to_compout(bpy.types.Operator):
|
||||||
ntree = context.scene.node_tree
|
ntree = context.scene.node_tree
|
||||||
links = context.scene.node_tree.links
|
links = context.scene.node_tree.links
|
||||||
nodes = context.scene.node_tree.nodes
|
nodes = context.scene.node_tree.nodes
|
||||||
|
|
||||||
out = nodes.get('Composite')
|
out = nodes.get('Composite')
|
||||||
if not out:
|
if not out:
|
||||||
out = fn.create_node('COMPOSITE', tree=ntree)
|
out = fn.create_node('COMPOSITE', tree=ntree)
|
||||||
fo_loc = fn.real_loc(self.fo)
|
fo_loc = fn.real_loc(self.fo)
|
||||||
out.location = (fo_loc.x, fo_loc.y + 160)
|
out.location = (fo_loc.x, fo_loc.y + 160)
|
||||||
|
|
||||||
# if out.inputs[0].is_linked:
|
# if out.inputs[0].is_linked:
|
||||||
# self.report({'WARNING'}, f'Outut changed to match {sl.path} (Composite node already linked)')
|
# self.report({'WARNING'}, f'Outut changed to match {sl.path} (Composite node already linked)')
|
||||||
|
|
||||||
lnk = sk.links[0]
|
lnk = sk.links[0]
|
||||||
from_sk = sk.links[0].from_socket
|
from_sk = sk.links[0].from_socket
|
||||||
links.remove(lnk)
|
links.remove(lnk)
|
||||||
|
@ -226,7 +226,7 @@ GPEXP_OT_delete_render_layer,
|
||||||
GPEXP_OT_set_active_fileout_to_compout,
|
GPEXP_OT_set_active_fileout_to_compout,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ GPEXP_OT_set_crop_from_selection,
|
||||||
GPEXP_OT_export_crop_coord_to_json,
|
GPEXP_OT_export_crop_coord_to_json,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ def Export_AE_2d_position_json_data():
|
||||||
if not cam:
|
if not cam:
|
||||||
print('Active camera not "anim_cam"')
|
print('Active camera not "anim_cam"')
|
||||||
cam = scn.camera
|
cam = scn.camera
|
||||||
|
|
||||||
rx = scn.render
|
rx = scn.render
|
||||||
rx, ry = rd.resolution_x, rd.resolution_y
|
rx, ry = rd.resolution_x, rd.resolution_y
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ def export_AE_objects_position_keys():
|
||||||
result = {}
|
result = {}
|
||||||
print(f'Exporting 2d position (scene range: {scn.frame_start} - {scn.frame_end})')
|
print(f'Exporting 2d position (scene range: {scn.frame_start} - {scn.frame_end})')
|
||||||
for fr in range(scn.frame_start,scn.frame_end + 1):
|
for fr in range(scn.frame_start,scn.frame_end + 1):
|
||||||
|
|
||||||
print(f'frame: {fr}')
|
print(f'frame: {fr}')
|
||||||
scn.frame_set(fr)
|
scn.frame_set(fr)
|
||||||
|
|
||||||
|
@ -57,17 +57,17 @@ def export_AE_objects_position_keys():
|
||||||
if not result.get(o.name):
|
if not result.get(o.name):
|
||||||
result[o.name] = []
|
result[o.name] = []
|
||||||
proj2d = world_to_camera_view(scn, scn.camera, o.matrix_world.to_translation()) # + Vector((.5,.5,0))
|
proj2d = world_to_camera_view(scn, scn.camera, o.matrix_world.to_translation()) # + Vector((.5,.5,0))
|
||||||
|
|
||||||
# proj2d = correct_shift(proj2d, scn.camera) # needed ?
|
# proj2d = correct_shift(proj2d, scn.camera) # needed ?
|
||||||
x = (proj2d[0]) * scn.render.resolution_x
|
x = (proj2d[0]) * scn.render.resolution_x
|
||||||
y = -(proj2d[1]) * scn.render.resolution_y + scn.render.resolution_y
|
y = -(proj2d[1]) * scn.render.resolution_y + scn.render.resolution_y
|
||||||
|
|
||||||
result[o.name].append((fr,x,y))
|
result[o.name].append((fr,x,y))
|
||||||
|
|
||||||
for name,value in result.items():
|
for name,value in result.items():
|
||||||
|
|
||||||
txt = fn.get_ae_keyframe_clipboard_header(scn)
|
txt = fn.get_ae_keyframe_clipboard_header(scn)
|
||||||
|
|
||||||
for v in value:
|
for v in value:
|
||||||
txt += '\t%s\t%s\t%s\t0\t\n'%(v[0],v[1],v[2]) # add 0 for Z (probably not needed)
|
txt += '\t%s\t%s\t%s\t0\t\n'%(v[0],v[1],v[2]) # add 0 for Z (probably not needed)
|
||||||
|
|
||||||
|
@ -76,12 +76,12 @@ def export_AE_objects_position_keys():
|
||||||
blend = Path(bpy.data.filepath)
|
blend = Path(bpy.data.filepath)
|
||||||
keyfile = blend.parent / 'render' / f'pos_{name}.txt'
|
keyfile = blend.parent / 'render' / f'pos_{name}.txt'
|
||||||
keyfile.parent.mkdir(parents=False, exist_ok=True)
|
keyfile.parent.mkdir(parents=False, exist_ok=True)
|
||||||
|
|
||||||
print(f'exporting keys for {name} at {keyfile}')
|
print(f'exporting keys for {name} at {keyfile}')
|
||||||
|
|
||||||
## save forcing CRLF terminator (DOS style, damn windows)
|
## save forcing CRLF terminator (DOS style, damn windows)
|
||||||
## in case it's exported from linux
|
## in case it's exported from linux
|
||||||
with open(keyfile, 'w', newline='\r\n') as fd:
|
with open(keyfile, 'w', newline='\r\n') as fd:
|
||||||
fd.write(txt)
|
fd.write(txt)
|
||||||
|
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ class GPEXP_OT_fix_overscan_shift(bpy.types.Operator):
|
||||||
|
|
||||||
def draw(self, context):
|
def draw(self, context):
|
||||||
layout = self.layout
|
layout = self.layout
|
||||||
|
|
||||||
if self.use_selection:
|
if self.use_selection:
|
||||||
col = layout.column()
|
col = layout.column()
|
||||||
col.label(text=f'Camera "{self.cam_ob.name}" selected', icon='INFO')
|
col.label(text=f'Camera "{self.cam_ob.name}" selected', icon='INFO')
|
||||||
|
@ -196,7 +196,7 @@ class GPEXP_OT_fix_overscan_shift(bpy.types.Operator):
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
cam = self.cam_ob.data
|
cam = self.cam_ob.data
|
||||||
|
|
||||||
ratio_x = self.init_rx / context.scene.render.resolution_x
|
ratio_x = self.init_rx / context.scene.render.resolution_x
|
||||||
ratio_y = self.init_ry / context.scene.render.resolution_y
|
ratio_y = self.init_ry / context.scene.render.resolution_y
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ class GPEXP_OT_fix_overscan_shift(bpy.types.Operator):
|
||||||
else:
|
else:
|
||||||
if cam.shift_x != 1:
|
if cam.shift_x != 1:
|
||||||
cam.shift_x = cam.shift_x * ratio_x
|
cam.shift_x = cam.shift_x * ratio_x
|
||||||
|
|
||||||
if ratio_y != 1:
|
if ratio_y != 1:
|
||||||
if fn.has_keyframe(cam, 'shift_y'):
|
if fn.has_keyframe(cam, 'shift_y'):
|
||||||
fcu = cam.animation_data.action.fcurves.find('shift_y')
|
fcu = cam.animation_data.action.fcurves.find('shift_y')
|
||||||
|
@ -260,7 +260,7 @@ GPEXP_OT_fix_overscan_shift,
|
||||||
GPEXP_PT_extra_gprender_func
|
GPEXP_PT_extra_gprender_func
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
@ -274,4 +274,3 @@ def unregister():
|
||||||
|
|
||||||
for cls in reversed(classes):
|
for cls in reversed(classes):
|
||||||
bpy.utils.unregister_class(cls)
|
bpy.utils.unregister_class(cls)
|
||||||
|
|
|
@ -34,7 +34,7 @@ class GPEXP_OT_number_outputs(bpy.types.Operator):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
mode : bpy.props.StringProperty(default='SELECTED', options={'SKIP_SAVE'})
|
mode : bpy.props.StringProperty(default='SELECTED', options={'SKIP_SAVE'})
|
||||||
# ctrl : bpy.props.StringProperty(default=False, options={'SKIP_SAVE'}) # no need
|
# ctrl : bpy.props.StringProperty(default=False, options={'SKIP_SAVE'}) # no need
|
||||||
|
|
||||||
def invoke(self, context, event):
|
def invoke(self, context, event):
|
||||||
self.ctrl = event.ctrl
|
self.ctrl = event.ctrl
|
||||||
|
@ -45,7 +45,7 @@ class GPEXP_OT_number_outputs(bpy.types.Operator):
|
||||||
if not render:
|
if not render:
|
||||||
print('SKIP, no Render scene')
|
print('SKIP, no Render scene')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
ct = 0
|
ct = 0
|
||||||
nodes = render.node_tree.nodes
|
nodes = render.node_tree.nodes
|
||||||
for fo in nodes:
|
for fo in nodes:
|
||||||
|
@ -95,7 +95,7 @@ class GPEXP_OT_set_output_node_format(bpy.types.Operator):
|
||||||
for n in nodes:
|
for n in nodes:
|
||||||
if n.type != 'OUTPUT_FILE' or n == ref or not n.select:
|
if n.type != 'OUTPUT_FILE' or n == ref or not n.select:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for attr in dir(ref.format):
|
for attr in dir(ref.format):
|
||||||
if attr.startswith('__') or attr in {'rna_type','bl_rna', 'view_settings', 'display_settings','stereo_3d_format'}: # views_format
|
if attr.startswith('__') or attr in {'rna_type','bl_rna', 'view_settings', 'display_settings','stereo_3d_format'}: # views_format
|
||||||
continue
|
continue
|
||||||
|
@ -104,10 +104,10 @@ class GPEXP_OT_set_output_node_format(bpy.types.Operator):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"can't set attribute : {attr}")
|
print(f"can't set attribute : {attr}")
|
||||||
|
|
||||||
# n.format.file_format = file_format
|
# n.format.file_format = file_format
|
||||||
# n.format.color_mode = color_mode
|
# n.format.color_mode = color_mode
|
||||||
# n.format.color_depth = color_depth
|
# n.format.color_depth = color_depth
|
||||||
# n.format.compression = compression
|
# n.format.compression = compression
|
||||||
|
|
||||||
ct += 1
|
ct += 1
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ class GPEXP_OT_normalize_outnames(bpy.types.Operator):
|
||||||
fp_l = reslash.split(fp)
|
fp_l = reslash.split(fp)
|
||||||
for i, part in enumerate(fp_l):
|
for i, part in enumerate(fp_l):
|
||||||
fp_l[1] = re.sub(r'(^\d{3}_)?([A-Z]{2}_)?(.*?)(_[A-Z]{2})?(_)?', out_norm, part)
|
fp_l[1] = re.sub(r'(^\d{3}_)?([A-Z]{2}_)?(.*?)(_[A-Z]{2})?(_)?', out_norm, part)
|
||||||
|
|
||||||
fs.path = '/'.join(fp_l)
|
fs.path = '/'.join(fp_l)
|
||||||
|
|
||||||
ct += 1
|
ct += 1
|
||||||
|
@ -183,7 +183,7 @@ class GPEXP_OT_enable_all_viewlayers(bpy.types.Operator):
|
||||||
vl_list = [vl for vl in scn.view_layers if not vl.use and vl.name not in {'View Layer', 'exclude'}]
|
vl_list = [vl for vl in scn.view_layers if not vl.use and vl.name not in {'View Layer', 'exclude'}]
|
||||||
for v in vl_list:
|
for v in vl_list:
|
||||||
v.use = True
|
v.use = True
|
||||||
|
|
||||||
self.report({"INFO"}, f'{len(vl_list)} ViewLayers Reactivated')
|
self.report({"INFO"}, f'{len(vl_list)} ViewLayers Reactivated')
|
||||||
return {"FINISHED"}
|
return {"FINISHED"}
|
||||||
|
|
||||||
|
@ -206,7 +206,7 @@ class GPEXP_OT_activate_only_selected_layers(bpy.types.Operator):
|
||||||
vls = [scn.view_layers.get(n.layer) for n in rlayers_nodes if scn.view_layers.get(n.layer)]
|
vls = [scn.view_layers.get(n.layer) for n in rlayers_nodes if scn.view_layers.get(n.layer)]
|
||||||
for v in scn.view_layers:
|
for v in scn.view_layers:
|
||||||
v.use = v in vls
|
v.use = v in vls
|
||||||
|
|
||||||
self.report({"INFO"}, f'Now only {len(vls)} viewlayer active (/{len(scn.view_layers)})')
|
self.report({"INFO"}, f'Now only {len(vls)} viewlayer active (/{len(scn.view_layers)})')
|
||||||
return {"FINISHED"}
|
return {"FINISHED"}
|
||||||
|
|
||||||
|
@ -224,25 +224,25 @@ class GPEXP_OT_reset_render_settings(bpy.types.Operator):
|
||||||
if scn.name == 'Scene':
|
if scn.name == 'Scene':
|
||||||
# don't touch original scene
|
# don't touch original scene
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# set a unique preview output
|
# set a unique preview output
|
||||||
# - avoid possible write/sync overlap (point to tmp on linux ?)
|
# - avoid possible write/sync overlap (point to tmp on linux ?)
|
||||||
# - allow to monitor output of a scene and possibly use Overwrite
|
# - allow to monitor output of a scene and possibly use Overwrite
|
||||||
|
|
||||||
if scn.render.filepath.startswith('//render/preview/'):
|
if scn.render.filepath.startswith('//render/preview/'):
|
||||||
scn.render.filepath = f'//render/preview/{bpy.path.clean_name(scn.name.lower())}/preview_'
|
scn.render.filepath = f'//render/preview/{bpy.path.clean_name(scn.name.lower())}/preview_'
|
||||||
print(f'Scene {scn.name}: change output to {scn.render.filepath}')
|
print(f'Scene {scn.name}: change output to {scn.render.filepath}')
|
||||||
|
|
||||||
if not scn.use_nodes:
|
if not scn.use_nodes:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# set the settings depending on merges node presences
|
# set the settings depending on merges node presences
|
||||||
use_native_aa = True
|
use_native_aa = True
|
||||||
for n in scn.node_tree.nodes:
|
for n in scn.node_tree.nodes:
|
||||||
if n.name.startswith('merge_NG_'):
|
if n.name.startswith('merge_NG_'):
|
||||||
use_native_aa = False
|
use_native_aa = False
|
||||||
break
|
break
|
||||||
|
|
||||||
if scn.use_aa != use_native_aa:
|
if scn.use_aa != use_native_aa:
|
||||||
print(f'Scene {scn.name}: changed scene AA settings, native AA = {use_native_aa}')
|
print(f'Scene {scn.name}: changed scene AA settings, native AA = {use_native_aa}')
|
||||||
fn.scene_aa(scene=scn, toggle=use_native_aa)
|
fn.scene_aa(scene=scn, toggle=use_native_aa)
|
||||||
|
@ -262,7 +262,7 @@ GPEXP_OT_reset_render_settings,
|
||||||
# GPEXP_OT_normalize_outnames,
|
# GPEXP_OT_normalize_outnames,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ def merge_layers(rlayers, obname=None, active=None, disconnect=True, color=None)
|
||||||
# get node group
|
# get node group
|
||||||
# ng = rlayers[0].outputs[0].links[0].to_node
|
# ng = rlayers[0].outputs[0].links[0].to_node
|
||||||
|
|
||||||
# sort RL descending
|
# sort RL descending
|
||||||
rlayers.sort(key=lambda n: fn.real_loc(n).y, reverse=True)
|
rlayers.sort(key=lambda n: fn.real_loc(n).y, reverse=True)
|
||||||
|
|
||||||
node_tree = rlayers[0].id_data
|
node_tree = rlayers[0].id_data
|
||||||
|
@ -41,11 +41,11 @@ def merge_layers(rlayers, obname=None, active=None, disconnect=True, color=None)
|
||||||
obname = lname = bpy.path.clean_name(vl_name)
|
obname = lname = bpy.path.clean_name(vl_name)
|
||||||
base_path = f'//render/'
|
base_path = f'//render/'
|
||||||
slot_name = f'{lname}/{lname}_'
|
slot_name = f'{lname}/{lname}_'
|
||||||
|
|
||||||
|
|
||||||
# change colors of those nodes
|
# change colors of those nodes
|
||||||
disconnected_groups = []
|
disconnected_groups = []
|
||||||
if not color:
|
if not color:
|
||||||
color = fn.random_color()
|
color = fn.random_color()
|
||||||
for n in rlayers:
|
for n in rlayers:
|
||||||
n.use_custom_color = True
|
n.use_custom_color = True
|
||||||
|
@ -57,7 +57,7 @@ def merge_layers(rlayers, obname=None, active=None, disconnect=True, color=None)
|
||||||
if lnk.to_node.name.startswith('NG_'):
|
if lnk.to_node.name.startswith('NG_'):
|
||||||
disconnected_groups.append(lnk.to_node)
|
disconnected_groups.append(lnk.to_node)
|
||||||
links.remove(lnk)
|
links.remove(lnk)
|
||||||
|
|
||||||
disconnected_groups = list(set(disconnected_groups))
|
disconnected_groups = list(set(disconnected_groups))
|
||||||
|
|
||||||
ng_name = f'merge_NG_{obname}' # only object name
|
ng_name = f'merge_NG_{obname}' # only object name
|
||||||
|
@ -72,7 +72,7 @@ def merge_layers(rlayers, obname=None, active=None, disconnect=True, color=None)
|
||||||
while bpy.data.node_groups.get(ng_name): # nodes.get(ng_name)
|
while bpy.data.node_groups.get(ng_name): # nodes.get(ng_name)
|
||||||
if not re.search(r'(\d+)$', ng_name):
|
if not re.search(r'(\d+)$', ng_name):
|
||||||
ng_name += '_02' # if not ending with a number add _02
|
ng_name += '_02' # if not ending with a number add _02
|
||||||
ng_name = re.sub(r'(\d+)(?!.*\d)', lambda x: str(int(x.group(1))+1).zfill(len(x.group(1))), ng_name)
|
ng_name = re.sub(r'(\d+)(?!.*\d)', lambda x: str(int(x.group(1))+1).zfill(len(x.group(1))), ng_name)
|
||||||
|
|
||||||
# print(f'create merge nodegroup {ng_name}')
|
# print(f'create merge nodegroup {ng_name}')
|
||||||
ngroup = bpy.data.node_groups.new(ng_name, 'CompositorNodeTree')
|
ngroup = bpy.data.node_groups.new(ng_name, 'CompositorNodeTree')
|
||||||
|
@ -101,7 +101,7 @@ def merge_layers(rlayers, obname=None, active=None, disconnect=True, color=None)
|
||||||
out.base_path = base_path
|
out.base_path = base_path
|
||||||
out.file_slots.new(slot_name)
|
out.file_slots.new(slot_name)
|
||||||
links.new(ng.outputs[0], out.inputs[-1])
|
links.new(ng.outputs[0], out.inputs[-1])
|
||||||
|
|
||||||
fn.clear_disconnected(out)
|
fn.clear_disconnected(out)
|
||||||
out.update()
|
out.update()
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ class GPEXP_OT_merge_viewlayers_to_active(bpy.types.Operator):
|
||||||
# if not act.viewlayer_render:
|
# if not act.viewlayer_render:
|
||||||
# self.report({'ERROR'}, f'Active layer {act.info} has no viewlayer assigned')
|
# self.report({'ERROR'}, f'Active layer {act.info} has no viewlayer assigned')
|
||||||
# return {'CANCELLED'}
|
# return {'CANCELLED'}
|
||||||
|
|
||||||
ret = fn.merge_gplayer_viewlayers(ob, act=act, layers=layers)
|
ret = fn.merge_gplayer_viewlayers(ob, act=act, layers=layers)
|
||||||
if isinstance(ret, tuple):
|
if isinstance(ret, tuple):
|
||||||
self.report(*ret)
|
self.report(*ret)
|
||||||
|
@ -159,9 +159,9 @@ class GPEXP_OT_auto_merge_adjacent_prefix(bpy.types.Operator):
|
||||||
excluded_prefix : bpy.props.StringProperty(
|
excluded_prefix : bpy.props.StringProperty(
|
||||||
name='Excluded Prefix', default='GP,RG,PO',
|
name='Excluded Prefix', default='GP,RG,PO',
|
||||||
description='Exclude comma separated prefix from merging viewlayer')
|
description='Exclude comma separated prefix from merging viewlayer')
|
||||||
|
|
||||||
first_name : bpy.props.BoolProperty(name='Merge On Bottom Layer',
|
first_name : bpy.props.BoolProperty(name='Merge On Bottom Layer',
|
||||||
default=True,
|
default=True,
|
||||||
description='Keep the viewlayer of the bottom layer in groups, else upper layer')
|
description='Keep the viewlayer of the bottom layer in groups, else upper layer')
|
||||||
|
|
||||||
def invoke(self, context, event):
|
def invoke(self, context, event):
|
||||||
|
@ -209,7 +209,7 @@ class GPEXP_OT_merge_selected_dopesheet_layers(bpy.types.Operator):
|
||||||
render = bpy.data.scenes.get('Render')
|
render = bpy.data.scenes.get('Render')
|
||||||
if render:
|
if render:
|
||||||
nodes = render.node_tree.nodes
|
nodes = render.node_tree.nodes
|
||||||
|
|
||||||
clean_ob_name = bpy.path.clean_name(ob.name)
|
clean_ob_name = bpy.path.clean_name(ob.name)
|
||||||
rlayers = []
|
rlayers = []
|
||||||
for l in layers:
|
for l in layers:
|
||||||
|
@ -226,16 +226,16 @@ class GPEXP_OT_merge_selected_dopesheet_layers(bpy.types.Operator):
|
||||||
if not rlayer:
|
if not rlayer:
|
||||||
# send to function to generate the rlayer and connect
|
# send to function to generate the rlayer and connect
|
||||||
_vl, rl = gen_vlayer.get_set_viewlayer_from_gp(ob, l)
|
_vl, rl = gen_vlayer.get_set_viewlayer_from_gp(ob, l)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
rlayer.sort(key=lambda n: n.location.y, reverse=True)
|
rlayer.sort(key=lambda n: n.location.y, reverse=True)
|
||||||
rl = rlayer[0]
|
rl = rlayer[0]
|
||||||
|
|
||||||
if act == l:
|
if act == l:
|
||||||
nodes.active = rl # make it active so the merge use this one
|
nodes.active = rl # make it active so the merge use this one
|
||||||
|
|
||||||
rlayers.append(rl)
|
rlayers.append(rl)
|
||||||
|
|
||||||
color = None
|
color = None
|
||||||
if fn.has_channel_color(act): # and bpy.context.preferences.edit.use_anim_channel_group_colors
|
if fn.has_channel_color(act): # and bpy.context.preferences.edit.use_anim_channel_group_colors
|
||||||
color = act.channel_color
|
color = act.channel_color
|
||||||
|
@ -287,7 +287,7 @@ GPEXP_OT_merge_selected_dopesheet_layers,# unused
|
||||||
GPEXP_OT_merge_selected_viewlayer_nodes,
|
GPEXP_OT_merge_selected_viewlayer_nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -42,15 +42,15 @@ def renumber_sequence_on_disk_from_file_slots(apply=True, active_scene_only=Fals
|
||||||
obj_num = prenum.search(obj_full)
|
obj_num = prenum.search(obj_full)
|
||||||
if obj_num:
|
if obj_num:
|
||||||
obj_num = obj_num.group(0)
|
obj_num = obj_num.group(0)
|
||||||
|
|
||||||
## check if folder exists
|
## check if folder exists
|
||||||
folder_path = None
|
folder_path = None
|
||||||
|
|
||||||
for d in os.scandir(render):
|
for d in os.scandir(render):
|
||||||
if d.is_dir() and prenum.sub('', d.name) == obj:
|
if d.is_dir() and prenum.sub('', d.name) == obj:
|
||||||
folder_path = render / d.name
|
folder_path = render / d.name
|
||||||
break
|
break
|
||||||
|
|
||||||
if not folder_path:
|
if not folder_path:
|
||||||
print(f'Could not find obj folder for: {obj}')
|
print(f'Could not find obj folder for: {obj}')
|
||||||
continue
|
continue
|
||||||
|
@ -67,20 +67,20 @@ def renumber_sequence_on_disk_from_file_slots(apply=True, active_scene_only=Fals
|
||||||
continue # If no img_num no point in renaming sequences
|
continue # If no img_num no point in renaming sequences
|
||||||
|
|
||||||
img_dir_path = None
|
img_dir_path = None
|
||||||
|
|
||||||
for img_dir in os.scandir(folder_path):
|
for img_dir in os.scandir(folder_path):
|
||||||
if img_dir.is_dir() and prenum.sub('', img_dir.name) == img:
|
if img_dir.is_dir() and prenum.sub('', img_dir.name) == img:
|
||||||
img_dir_path = folder_path / img_dir.name
|
img_dir_path = folder_path / img_dir.name
|
||||||
break
|
break
|
||||||
|
|
||||||
if not img_dir_path:
|
if not img_dir_path:
|
||||||
print(f'Could not find img folder for: {img}')
|
print(f'Could not find img folder for: {img}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# if folder exists check if full name is ok
|
# if folder exists check if full name is ok
|
||||||
if img_full == img_dir_path.name:
|
if img_full == img_dir_path.name:
|
||||||
continue # name already (maybe not in sequence but should be good)
|
continue # name already (maybe not in sequence but should be good)
|
||||||
|
|
||||||
|
|
||||||
# rename sequence and image folder
|
# rename sequence and image folder
|
||||||
for frame in os.scandir(img_dir_path):
|
for frame in os.scandir(img_dir_path):
|
||||||
|
@ -91,7 +91,7 @@ def renumber_sequence_on_disk_from_file_slots(apply=True, active_scene_only=Fals
|
||||||
if apply:
|
if apply:
|
||||||
fp = Path(frame.path)
|
fp = Path(frame.path)
|
||||||
fp.rename(fp.parent / good)
|
fp.rename(fp.parent / good)
|
||||||
|
|
||||||
# rename image folder
|
# rename image folder
|
||||||
if img_dir_path.name != img_full:
|
if img_dir_path.name != img_full:
|
||||||
print(f' dir:{img_dir_path.name} > {img_full}')
|
print(f' dir:{img_dir_path.name} > {img_full}')
|
||||||
|
@ -120,11 +120,11 @@ class GPEXP_OT_renumber_files_on_disk(bpy.types.Operator):
|
||||||
def invoke(self, context, event):
|
def invoke(self, context, event):
|
||||||
# return self.execute(context)
|
# return self.execute(context)
|
||||||
return context.window_manager.invoke_props_dialog(self)
|
return context.window_manager.invoke_props_dialog(self)
|
||||||
|
|
||||||
dry_run: bpy.props.BoolProperty(name='Dry-run (no actions, prints in console only)',
|
dry_run: bpy.props.BoolProperty(name='Dry-run (no actions, prints in console only)',
|
||||||
default=False,
|
default=False,
|
||||||
description='Test mode. If checked, no action is actually performed')
|
description='Test mode. If checked, no action is actually performed')
|
||||||
|
|
||||||
active_scene_only: bpy.props.BoolProperty(name='Only Active Scene',
|
active_scene_only: bpy.props.BoolProperty(name='Only Active Scene',
|
||||||
default=False,
|
default=False,
|
||||||
description='use only file output of active scene instead of all scenes (skipping "Scene")')
|
description='use only file output of active scene instead of all scenes (skipping "Scene")')
|
||||||
|
@ -158,7 +158,7 @@ classes=(
|
||||||
GPEXP_OT_renumber_files_on_disk,
|
GPEXP_OT_renumber_files_on_disk,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ def export_all_selected_frame_as_svg():
|
||||||
if ob.type != 'GPENCIL':
|
if ob.type != 'GPENCIL':
|
||||||
continue
|
continue
|
||||||
frames += [f.frame_number for l in ob.data.layers if not l.hide for f in l.frames if len(f.strokes)]
|
frames += [f.frame_number for l in ob.data.layers if not l.hide for f in l.frames if len(f.strokes)]
|
||||||
|
|
||||||
if frames:
|
if frames:
|
||||||
frames = sorted(list(set(frames)))
|
frames = sorted(list(set(frames)))
|
||||||
else:
|
else:
|
||||||
|
@ -59,7 +59,7 @@ def export_all_selected_frame_as_svg():
|
||||||
if fp.exists():
|
if fp.exists():
|
||||||
print(f' already exists: {fp}')
|
print(f' already exists: {fp}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
bpy.context.scene.frame_current = fnum
|
bpy.context.scene.frame_current = fnum
|
||||||
bpy.ops.wm.gpencil_export_svg(filepath=str(fp),
|
bpy.ops.wm.gpencil_export_svg(filepath=str(fp),
|
||||||
check_existing=True,
|
check_existing=True,
|
||||||
|
@ -103,16 +103,16 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
def poll(cls, context):
|
def poll(cls, context):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
# rd_scn = bpy.data.scenes.get('Render')
|
# rd_scn = bpy.data.scenes.get('Render')
|
||||||
# if not rd_scn:
|
# if not rd_scn:
|
||||||
# self.report({'ERROR'}, 'Viewlayers needs to be generated first!')
|
# self.report({'ERROR'}, 'Viewlayers needs to be generated first!')
|
||||||
# return {'CANCELLED'}
|
# return {'CANCELLED'}
|
||||||
|
|
||||||
|
|
||||||
### store
|
### store
|
||||||
## dict all visible objects as key with value : sub dict {layer : hide_bool}
|
## dict all visible objects as key with value : sub dict {layer : hide_bool}
|
||||||
|
|
||||||
# obj_vis = [[o, o.hide_viewport, o.hide_render] for o in context.scene.objects if o.type == 'GPENCIL' and not (o.hide_get() or o.hide_viewport)]
|
# obj_vis = [[o, o.hide_viewport, o.hide_render] for o in context.scene.objects if o.type == 'GPENCIL' and not (o.hide_get() or o.hide_viewport)]
|
||||||
t0 = time()
|
t0 = time()
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
selection = [o for o in context.selected_objects]
|
selection = [o for o in context.selected_objects]
|
||||||
|
|
||||||
messages = []
|
messages = []
|
||||||
|
|
||||||
## adaptative resampling on all concerned objects
|
## adaptative resampling on all concerned objects
|
||||||
for ob in store.keys():
|
for ob in store.keys():
|
||||||
mod = ob.grease_pencil_modifiers.get('resample')
|
mod = ob.grease_pencil_modifiers.get('resample')
|
||||||
|
@ -136,7 +136,7 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
if ob.type != 'GPENCIL':
|
if ob.type != 'GPENCIL':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
mess = f'--- {ob.name}:'
|
mess = f'--- {ob.name}:'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
|
|
||||||
|
@ -149,14 +149,14 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
# for o in context.scene.objects:
|
# for o in context.scene.objects:
|
||||||
# o.hide_viewport = True
|
# o.hide_viewport = True
|
||||||
# ob.hide_viewport = False
|
# ob.hide_viewport = False
|
||||||
|
|
||||||
## manage layers
|
## manage layers
|
||||||
gpl = ob.data.layers
|
gpl = ob.data.layers
|
||||||
vl_dicts = {vl_name: list(layer_grp) for vl_name, layer_grp in groupby(gpl, lambda x: x.viewlayer_render)}
|
vl_dicts = {vl_name: list(layer_grp) for vl_name, layer_grp in groupby(gpl, lambda x: x.viewlayer_render)}
|
||||||
for vl_name, layer_list in vl_dicts.items():
|
for vl_name, layer_list in vl_dicts.items():
|
||||||
vl = context.scene.view_layers.get(vl_name)
|
vl = context.scene.view_layers.get(vl_name)
|
||||||
if not vl:
|
if not vl:
|
||||||
mess = f'/!\ {vl_name} viewlayer not exists : skipped {[l.info for l in layer_list]}'
|
mess = f'/!\ {vl_name} viewlayer not exists : skipped {[l.info for l in layer_list]}'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
continue
|
continue
|
||||||
|
@ -164,11 +164,11 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not vl.use:
|
if not vl.use:
|
||||||
mess = f'{vl_name} viewlayer disabled'
|
mess = f'{vl_name} viewlayer disabled'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
# Case of initially masked layer !
|
# Case of initially masked layer !
|
||||||
hide_ct = 0
|
hide_ct = 0
|
||||||
|
@ -191,34 +191,34 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
ng_name = f'NG_{ob.name}'
|
ng_name = f'NG_{ob.name}'
|
||||||
ng = context.scene.node_tree.nodes.get(ng_name)
|
ng = context.scene.node_tree.nodes.get(ng_name)
|
||||||
if not ng:
|
if not ng:
|
||||||
mess = f'Skip {vl_name}: Not found nodegroup {ng_name}'
|
mess = f'Skip {vl_name}: Not found nodegroup {ng_name}'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ng_socket = ng.outputs.get(vl_name)
|
ng_socket = ng.outputs.get(vl_name)
|
||||||
|
|
||||||
if not ng_socket:
|
if not ng_socket:
|
||||||
mess = f'Skip {vl_name}: Not found in nodegroup {ng_name} sockets'
|
mess = f'Skip {vl_name}: Not found in nodegroup {ng_name} sockets'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not len(ng_socket.links):
|
if not len(ng_socket.links):
|
||||||
mess = f' socket is disconnected in {ng_name} nodegroup'
|
mess = f' socket is disconnected in {ng_name} nodegroup'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
fo_node = ng_socket.links[0].to_node
|
fo_node = ng_socket.links[0].to_node
|
||||||
fo_socket = ng_socket.links[0].to_socket
|
fo_socket = ng_socket.links[0].to_socket
|
||||||
|
|
||||||
if fo_node.type != 'OUTPUT_FILE':
|
if fo_node.type != 'OUTPUT_FILE':
|
||||||
mess = f'Skip {vl_name}: node is not an output_file {fo_node.name}'
|
mess = f'Skip {vl_name}: node is not an output_file {fo_node.name}'
|
||||||
print(mess)
|
print(mess)
|
||||||
messages.append(mess)
|
messages.append(mess)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if fo_node.mute:
|
if fo_node.mute:
|
||||||
mess = f'Skip {vl_name}: output is muted {fo_node.name}'
|
mess = f'Skip {vl_name}: output is muted {fo_node.name}'
|
||||||
print(mess)
|
print(mess)
|
||||||
|
@ -231,12 +231,12 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
subpath = fo_node.file_slots[idx].path
|
subpath = fo_node.file_slots[idx].path
|
||||||
fp = Path(fo_node.base_path.rstrip('/')) / subpath
|
fp = Path(fo_node.base_path.rstrip('/')) / subpath
|
||||||
fp = Path(bpy.path.abspath(str(fp)).rstrip("/"))
|
fp = Path(bpy.path.abspath(str(fp)).rstrip("/"))
|
||||||
|
|
||||||
print(f'render {total} layers at: {fp.parent}') #Dbg
|
print(f'render {total} layers at: {fp.parent}') #Dbg
|
||||||
|
|
||||||
# hide all layer that are: not associated with VL (not in layer_list) or hided initially (store[ob][l])
|
# hide all layer that are: not associated with VL (not in layer_list) or hided initially (store[ob][l])
|
||||||
for l in gpl:
|
for l in gpl:
|
||||||
l.hide = l not in layer_list or store[ob][l]
|
l.hide = l not in layer_list or store[ob][l]
|
||||||
|
|
||||||
for l in gpl:
|
for l in gpl:
|
||||||
if not l.hide:
|
if not l.hide:
|
||||||
|
@ -250,12 +250,12 @@ class GPEXP_OT_export_as_pdf(bpy.types.Operator):
|
||||||
# ob.hide_viewport = False # no need
|
# ob.hide_viewport = False # no need
|
||||||
for l, h in layer_dic.items():
|
for l, h in layer_dic.items():
|
||||||
l.hide = h
|
l.hide = h
|
||||||
|
|
||||||
for o in selection:
|
for o in selection:
|
||||||
o.select_set(True)
|
o.select_set(True)
|
||||||
if act:
|
if act:
|
||||||
context.view_layer.objects.active = act
|
context.view_layer.objects.active = act
|
||||||
|
|
||||||
# for oviz in obj_vis:
|
# for oviz in obj_vis:
|
||||||
# oviz[0].hide_viewport = oviz[1]
|
# oviz[0].hide_viewport = oviz[1]
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ classes=(
|
||||||
GPEXP_OT_export_as_pdf,
|
GPEXP_OT_export_as_pdf,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ class GPEXP_OT_render_all_scenes(bpy.types.Operator):
|
||||||
if all(x.mute for x in outfiles):
|
if all(x.mute for x in outfiles):
|
||||||
print(f'\n -!-> Skip {scn.name}, All output file are muted')
|
print(f'\n -!-> Skip {scn.name}, All output file are muted')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(f'\n --> Rendering {scn.name}')
|
print(f'\n --> Rendering {scn.name}')
|
||||||
# bpy.context.window.scene = scn
|
# bpy.context.window.scene = scn
|
||||||
bpy.ops.render.render(animation=True, scene=scn.name)
|
bpy.ops.render.render(animation=True, scene=scn.name)
|
||||||
|
@ -61,13 +61,13 @@ def scene_render_popup_ui(self, context):
|
||||||
scn = bpy.data.scenes.get(si.name)
|
scn = bpy.data.scenes.get(si.name)
|
||||||
# compare to existing Rlayers (overkill ?)
|
# compare to existing Rlayers (overkill ?)
|
||||||
# vls = [scn.view_layers.get(n.layer) for n in rlayers_nodes if scn.view_layers.get(n.layer)]
|
# vls = [scn.view_layers.get(n.layer) for n in rlayers_nodes if scn.view_layers.get(n.layer)]
|
||||||
|
|
||||||
vls = [vl for vl in scn.view_layers if vl.name != 'View Layer']
|
vls = [vl for vl in scn.view_layers if vl.name != 'View Layer']
|
||||||
|
|
||||||
if vls:
|
if vls:
|
||||||
exclude_count = len([vl for vl in vls if not vl.use])
|
exclude_count = len([vl for vl in vls if not vl.use])
|
||||||
if exclude_count:
|
if exclude_count:
|
||||||
row.label(text=f'{exclude_count}/{len(vls)} excluded viewlayers', icon='ERROR')
|
row.label(text=f'{exclude_count}/{len(vls)} excluded viewlayers', icon='ERROR')
|
||||||
|
|
||||||
if not scn.use_nodes:
|
if not scn.use_nodes:
|
||||||
row.label(text='use_node deactivated', icon='ERROR')
|
row.label(text='use_node deactivated', icon='ERROR')
|
||||||
|
@ -77,13 +77,13 @@ def scene_render_popup_ui(self, context):
|
||||||
if not outfiles:
|
if not outfiles:
|
||||||
row.label(text='No output files nodes', icon='ERROR')
|
row.label(text='No output files nodes', icon='ERROR')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
outnum = len(outfiles)
|
outnum = len(outfiles)
|
||||||
muted = len([x for x in outfiles if x.mute])
|
muted = len([x for x in outfiles if x.mute])
|
||||||
if muted == outnum:
|
if muted == outnum:
|
||||||
row.label(text='All output file are muted', icon='ERROR')
|
row.label(text='All output file are muted', icon='ERROR')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
elif muted:
|
elif muted:
|
||||||
row.label(text=f'{muted}/{outnum} output file muted', icon='ERROR')
|
row.label(text=f'{muted}/{outnum} output file muted', icon='ERROR')
|
||||||
continue
|
continue
|
||||||
|
@ -174,7 +174,7 @@ class GPEXP_OT_bg_render_script_selected_scene(bpy.types.Operator):
|
||||||
|
|
||||||
def draw(self, context):
|
def draw(self, context):
|
||||||
scene_render_popup_ui(self, context)
|
scene_render_popup_ui(self, context)
|
||||||
|
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
d = fn.export_crop_to_json()
|
d = fn.export_crop_to_json()
|
||||||
|
@ -182,7 +182,7 @@ class GPEXP_OT_bg_render_script_selected_scene(bpy.types.Operator):
|
||||||
print('No crop to export, border disabled in all scenes')
|
print('No crop to export, border disabled in all scenes')
|
||||||
|
|
||||||
platform = sys.platform
|
platform = sys.platform
|
||||||
|
|
||||||
blend = Path(bpy.data.filepath)
|
blend = Path(bpy.data.filepath)
|
||||||
|
|
||||||
scn_to_render = [si.name for si in context.scene.scenes_list if si.select]
|
scn_to_render = [si.name for si in context.scene.scenes_list if si.select]
|
||||||
|
@ -210,11 +210,11 @@ class GPEXP_OT_bg_render_script_selected_scene(bpy.types.Operator):
|
||||||
else: # Unix : point same for each user
|
else: # Unix : point same for each user
|
||||||
cmd = f'"{bin_path}" -b "{bpy.data.filepath}" -S "{scn_name}" -a'
|
cmd = f'"{bin_path}" -b "{bpy.data.filepath}" -S "{scn_name}" -a'
|
||||||
script_text.append(cmd)
|
script_text.append(cmd)
|
||||||
|
|
||||||
|
|
||||||
script_text.append('echo --- END BATCH ---')
|
script_text.append('echo --- END BATCH ---')
|
||||||
script_text.append('pause')
|
script_text.append('pause')
|
||||||
|
|
||||||
with batch_file.open('w') as fd:
|
with batch_file.open('w') as fd:
|
||||||
fd.write('\n'.join(script_text))
|
fd.write('\n'.join(script_text))
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ GPEXP_OT_render_all_scenes,
|
||||||
GPEXP_OT_bg_render_script_selected_scene,
|
GPEXP_OT_bg_render_script_selected_scene,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
bpy.types.Scene.scenes_list = bpy.props.CollectionProperty(type=GPEXP_scene_select_prop)
|
bpy.types.Scene.scenes_list = bpy.props.CollectionProperty(type=GPEXP_scene_select_prop)
|
||||||
|
@ -238,5 +238,5 @@ def register():
|
||||||
def unregister():
|
def unregister():
|
||||||
for cls in reversed(classes):
|
for cls in reversed(classes):
|
||||||
bpy.utils.unregister_class(cls)
|
bpy.utils.unregister_class(cls)
|
||||||
|
|
||||||
del bpy.types.Scene.scenes_list
|
del bpy.types.Scene.scenes_list
|
|
@ -18,21 +18,21 @@ class GPEXP_OT_render_scene_switch(bpy.types.Operator):
|
||||||
if len(scenes) < 2:
|
if len(scenes) < 2:
|
||||||
self.report({'ERROR'},'No other scene to go to')
|
self.report({'ERROR'},'No other scene to go to')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
if context.scene.name == 'Render':
|
if context.scene.name == 'Render':
|
||||||
scn = scenes.get('Scene')
|
scn = scenes.get('Scene')
|
||||||
if not scn: # get the next available scene
|
if not scn: # get the next available scene
|
||||||
self.report({'WARNING'},'No scene named "Scene"')
|
self.report({'WARNING'},'No scene named "Scene"')
|
||||||
slist = [s.name for s in scenes]
|
slist = [s.name for s in scenes]
|
||||||
scn = scenes[(slist.index(bpy.context.scene.name) + 1) % len(scenes)]
|
scn = scenes[(slist.index(bpy.context.scene.name) + 1) % len(scenes)]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
scn = scenes.get('Render')
|
scn = scenes.get('Render')
|
||||||
if not scn:
|
if not scn:
|
||||||
self.report({'ERROR'},'No "Render" scene yet')
|
self.report({'ERROR'},'No "Render" scene yet')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
|
|
||||||
self.report({'INFO'},f'Switched to scene "{scn.name}"')
|
self.report({'INFO'},f'Switched to scene "{scn.name}"')
|
||||||
bpy.context.window.scene = scn
|
bpy.context.window.scene = scn
|
||||||
return {"FINISHED"}
|
return {"FINISHED"}
|
||||||
|
@ -50,7 +50,7 @@ class GPEXP_OT_swap_render_cams(bpy.types.Operator):
|
||||||
if not anim_cam or not bg_cam:
|
if not anim_cam or not bg_cam:
|
||||||
self.report({'ERROR'}, 'anim_cam or bg_cam is missing')
|
self.report({'ERROR'}, 'anim_cam or bg_cam is missing')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
cam = context.scene.camera
|
cam = context.scene.camera
|
||||||
if not cam:
|
if not cam:
|
||||||
context.scene.camera = anim_cam
|
context.scene.camera = anim_cam
|
||||||
|
@ -105,7 +105,7 @@ GPEXP_OT_swap_render_cams,
|
||||||
GPEXP_OT_set_gp_render_workspace,
|
GPEXP_OT_set_gp_render_workspace,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
|
@ -41,14 +41,14 @@ def check_outname(ob, l):
|
||||||
if fo_node.type != 'OUTPUT_FILE':
|
if fo_node.type != 'OUTPUT_FILE':
|
||||||
print(f'Skip {vl_name}: node is not an output_file {fo_node.name}')
|
print(f'Skip {vl_name}: node is not an output_file {fo_node.name}')
|
||||||
return
|
return
|
||||||
|
|
||||||
# fo_socket.name isn't right, have to iterate in paths
|
# fo_socket.name isn't right, have to iterate in paths
|
||||||
idx = [i for i in fo_node.inputs].index(fo_socket)
|
idx = [i for i in fo_node.inputs].index(fo_socket)
|
||||||
subpath = fo_node.file_slots[idx].path
|
subpath = fo_node.file_slots[idx].path
|
||||||
# fp = Path(fo_node.base_path.rstrip('/')) / subpath
|
# fp = Path(fo_node.base_path.rstrip('/')) / subpath
|
||||||
# fp = Path(bpy.path.abspath(str(fp)).rstrip("/")) # abspath on disk
|
# fp = Path(bpy.path.abspath(str(fp)).rstrip("/")) # abspath on disk
|
||||||
outname = subpath.split('/')[0] # folder name on disk
|
outname = subpath.split('/')[0] # folder name on disk
|
||||||
|
|
||||||
return outname
|
return outname
|
||||||
|
|
||||||
class GPEXP_OT_export_infos_for_compo(bpy.types.Operator):
|
class GPEXP_OT_export_infos_for_compo(bpy.types.Operator):
|
||||||
|
@ -93,7 +93,7 @@ class GPEXP_OT_export_infos_for_compo(bpy.types.Operator):
|
||||||
# skip non rendered layers
|
# skip non rendered layers
|
||||||
if l.hide:
|
if l.hide:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if l.info.startswith('MA_'):
|
if l.info.startswith('MA_'):
|
||||||
# No point in storing information of masking layers...
|
# No point in storing information of masking layers...
|
||||||
continue
|
continue
|
||||||
|
@ -101,7 +101,7 @@ class GPEXP_OT_export_infos_for_compo(bpy.types.Operator):
|
||||||
## Can't check viewlayers and final fileout name if Render scene not even created...
|
## Can't check viewlayers and final fileout name if Render scene not even created...
|
||||||
""" if not l.viewlayer_render or l.viewlayer_render == 'exclude':
|
""" if not l.viewlayer_render or l.viewlayer_render == 'exclude':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
fo_name = check_outname(o, l) # get name used for output file folder (same in AE)
|
fo_name = check_outname(o, l) # get name used for output file folder (same in AE)
|
||||||
if not fo_name:
|
if not fo_name:
|
||||||
print(f'! Could not found fileout name for {o.name} > {l.info}')
|
print(f'! Could not found fileout name for {o.name} > {l.info}')
|
||||||
|
@ -112,7 +112,7 @@ class GPEXP_OT_export_infos_for_compo(bpy.types.Operator):
|
||||||
## Check opacity, blend mode
|
## Check opacity, blend mode
|
||||||
if l.opacity < 1.0:
|
if l.opacity < 1.0:
|
||||||
ldic['opacity'] = l.opacity
|
ldic['opacity'] = l.opacity
|
||||||
|
|
||||||
if l.blend_mode != 'REGULAR':
|
if l.blend_mode != 'REGULAR':
|
||||||
ldic['blend_mode'] = l.blend_mode
|
ldic['blend_mode'] = l.blend_mode
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ class GPEXP_OT_export_infos_for_compo(bpy.types.Operator):
|
||||||
|
|
||||||
if multi_mask:
|
if multi_mask:
|
||||||
ldic['masks'] = multi_mask
|
ldic['masks'] = multi_mask
|
||||||
|
|
||||||
## add to full dic
|
## add to full dic
|
||||||
if ldic:
|
if ldic:
|
||||||
# add source object ? might be usefull to pin point layer
|
# add source object ? might be usefull to pin point layer
|
||||||
|
@ -167,25 +167,25 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
bl_description = "Display state of layer that migh need adjustement"
|
bl_description = "Display state of layer that migh need adjustement"
|
||||||
bl_options = {"REGISTER"} # , "UNDO"
|
bl_options = {"REGISTER"} # , "UNDO"
|
||||||
|
|
||||||
# clear_unused_view_layers :BoolProperty(name="Clear unused view layers",
|
# clear_unused_view_layers :BoolProperty(name="Clear unused view layers",
|
||||||
# description="Delete view layer that aren't used in the nodetree anymore",
|
# description="Delete view layer that aren't used in the nodetree anymore",
|
||||||
# default=True)
|
# default=True)
|
||||||
|
|
||||||
all_objects : BoolProperty(name='On All Object',
|
all_objects : BoolProperty(name='On All Object',
|
||||||
default=True, description='On All object, else use selected objects') # , options={'SKIP_SAVE'}
|
default=True, description='On All object, else use selected objects') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
set_full_opacity : BoolProperty(name='Set Full Opacity',
|
set_full_opacity : BoolProperty(name='Set Full Opacity',
|
||||||
default=True, description='Check/Set full opacity') # , options={'SKIP_SAVE'}
|
default=True, description='Check/Set full opacity') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
set_use_lights : BoolProperty(name='Disable Use Light',
|
set_use_lights : BoolProperty(name='Disable Use Light',
|
||||||
default=True, description='Check/Set use lights disabling') # , options={'SKIP_SAVE'}
|
default=True, description='Check/Set use lights disabling') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
set_blend_mode : BoolProperty(name='Set Regular Blend Mode',
|
set_blend_mode : BoolProperty(name='Set Regular Blend Mode',
|
||||||
default=True, description='Check/Set blend mode to regular') # , options={'SKIP_SAVE'}
|
default=True, description='Check/Set blend mode to regular') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
clear_frame_out_of_range : BoolProperty(name='Clear Frames Out Of Scene Range',
|
clear_frame_out_of_range : BoolProperty(name='Clear Frames Out Of Scene Range',
|
||||||
default=False, description='Delete frames that before scene start and after scene end range\nWith a tolerance of one frame to avoid problem\nAffect all layers)') # , options={'SKIP_SAVE'}
|
default=False, description='Delete frames that before scene start and after scene end range\nWith a tolerance of one frame to avoid problem\nAffect all layers)') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
opacity_exclude_list : StringProperty(name='Skip',
|
opacity_exclude_list : StringProperty(name='Skip',
|
||||||
default='MA', description='Skip prefixes from this list when changing opacity\nSeparate multiple value with a comma (ex: MA,IN)') # , options={'SKIP_SAVE'}
|
default='MA', description='Skip prefixes from this list when changing opacity\nSeparate multiple value with a comma (ex: MA,IN)') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
layout = self.layout
|
layout = self.layout
|
||||||
layout.prop(self, 'all_objects')
|
layout.prop(self, 'all_objects')
|
||||||
total = len([o for o in context.scene.objects if o.type == 'GPENCIL'])
|
total = len([o for o in context.scene.objects if o.type == 'GPENCIL'])
|
||||||
|
|
||||||
target_num = total if self.all_objects else len([o for o in context.selected_objects if o.type == 'GPENCIL'])
|
target_num = total if self.all_objects else len([o for o in context.selected_objects if o.type == 'GPENCIL'])
|
||||||
layout.label(text=f'{target_num}/{total} targeted GP')
|
layout.label(text=f'{target_num}/{total} targeted GP')
|
||||||
|
|
||||||
|
@ -224,7 +224,7 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
layout.prop(self, 'set_blend_mode')
|
layout.prop(self, 'set_blend_mode')
|
||||||
# layout.prop(self, 'clear_unused_view_layers')
|
# layout.prop(self, 'clear_unused_view_layers')
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
if self.all_objects:
|
if self.all_objects:
|
||||||
pool = [o for o in context.scene.objects if o.type == 'GPENCIL' and fn.is_valid_name(o.name)]
|
pool = [o for o in context.scene.objects if o.type == 'GPENCIL' and fn.is_valid_name(o.name)]
|
||||||
else:
|
else:
|
||||||
|
@ -235,7 +235,7 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
for ob in pool:
|
for ob in pool:
|
||||||
changes.append(f'>> {ob.name}')
|
changes.append(f'>> {ob.name}')
|
||||||
layers = ob.data.layers
|
layers = ob.data.layers
|
||||||
|
|
||||||
if self.clear_frame_out_of_range:
|
if self.clear_frame_out_of_range:
|
||||||
ct = fn.clear_frame_out_of_range(ob, verbose=False)
|
ct = fn.clear_frame_out_of_range(ob, verbose=False)
|
||||||
if ct:
|
if ct:
|
||||||
|
@ -243,11 +243,11 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
|
|
||||||
for l in layers:
|
for l in layers:
|
||||||
used = False
|
used = False
|
||||||
|
|
||||||
## mask check
|
## mask check
|
||||||
# if l.mask_layers:
|
# if l.mask_layers:
|
||||||
# print(f'-> masks')
|
# print(f'-> masks')
|
||||||
# state = '' if l.use_mask_layer else ' (disabled)'
|
# state = '' if l.use_mask_layer else ' (disabled)'
|
||||||
# print(f'{ob.name} > {l.info}{state}:')
|
# print(f'{ob.name} > {l.info}{state}:')
|
||||||
# used = True
|
# used = True
|
||||||
# for ml in l.mask_layers:
|
# for ml in l.mask_layers:
|
||||||
|
@ -263,7 +263,7 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
print(f'Skipped layer : {l.info}')
|
print(f'Skipped layer : {l.info}')
|
||||||
else:
|
else:
|
||||||
full_opacity_state = '' if self.set_full_opacity else ' (check only)'
|
full_opacity_state = '' if self.set_full_opacity else ' (check only)'
|
||||||
mess = f'{l.info} : opacity {l.opacity:.2f} >> 1.0{full_opacity_state}'
|
mess = f'{l.info} : opacity {l.opacity:.2f} >> 1.0{full_opacity_state}'
|
||||||
print(mess)
|
print(mess)
|
||||||
changes.append(mess)
|
changes.append(mess)
|
||||||
if self.set_full_opacity:
|
if self.set_full_opacity:
|
||||||
|
@ -319,7 +319,7 @@ class GPEXP_OT_layers_state(bpy.types.Operator):
|
||||||
# if not render:
|
# if not render:
|
||||||
# print('SKIP, no Render scene')
|
# print('SKIP, no Render scene')
|
||||||
# return {"CANCELLED"}
|
# return {"CANCELLED"}
|
||||||
|
|
||||||
return {"FINISHED"}
|
return {"FINISHED"}
|
||||||
|
|
||||||
class GPEXP_OT_lower_layers_name(bpy.types.Operator):
|
class GPEXP_OT_lower_layers_name(bpy.types.Operator):
|
||||||
|
@ -334,13 +334,13 @@ class GPEXP_OT_lower_layers_name(bpy.types.Operator):
|
||||||
|
|
||||||
all_objects : BoolProperty(name='On All Object',
|
all_objects : BoolProperty(name='On All Object',
|
||||||
default=True, description='On All object, else use selected objects') # , options={'SKIP_SAVE'}
|
default=True, description='On All object, else use selected objects') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
object_name : BoolProperty(name='Normalize Object Name',
|
object_name : BoolProperty(name='Normalize Object Name',
|
||||||
default=True, description='Make the object name lowercase') # , options={'SKIP_SAVE'}
|
default=True, description='Make the object name lowercase') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
layer_name : BoolProperty(name='Normalize Layers Names',
|
layer_name : BoolProperty(name='Normalize Layers Names',
|
||||||
default=True, description='Make the layers name lowercase') # , options={'SKIP_SAVE'}
|
default=True, description='Make the layers name lowercase') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
# dash_to_undescore : BoolProperty(name='Dash To Underscore',
|
# dash_to_undescore : BoolProperty(name='Dash To Underscore',
|
||||||
# default=True, description='Make the layers name lowercase') # , options={'SKIP_SAVE'}
|
# default=True, description='Make the layers name lowercase') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
|
@ -359,7 +359,7 @@ class GPEXP_OT_lower_layers_name(bpy.types.Operator):
|
||||||
gp_ct = len([o for o in context.scene.objects if o.type == 'GPENCIL' and fn.is_valid_name(o.name)])
|
gp_ct = len([o for o in context.scene.objects if o.type == 'GPENCIL' and fn.is_valid_name(o.name)])
|
||||||
else:
|
else:
|
||||||
gp_ct = len([o for o in context.selected_objects if o.type == 'GPENCIL'])
|
gp_ct = len([o for o in context.selected_objects if o.type == 'GPENCIL'])
|
||||||
|
|
||||||
layout.label(text=f'{gp_ct} to lower-case')
|
layout.label(text=f'{gp_ct} to lower-case')
|
||||||
layout.separator()
|
layout.separator()
|
||||||
layout.label(text=f'Choose what to rename:')
|
layout.label(text=f'Choose what to rename:')
|
||||||
|
@ -407,10 +407,10 @@ class GPEXP_OT_auto_number_object(bpy.types.Operator):
|
||||||
|
|
||||||
all_objects : BoolProperty(name='On All GP Object',
|
all_objects : BoolProperty(name='On All GP Object',
|
||||||
default=True, description='On All object, else use selected Grease Pencil objects') # , options={'SKIP_SAVE'}
|
default=True, description='On All object, else use selected Grease Pencil objects') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
rename_data : BoolProperty(name='Rename Gpencil Data',
|
rename_data : BoolProperty(name='Rename Gpencil Data',
|
||||||
default=True, description='Rename Also the Grease Pencil data using same name as object') # , options={'SKIP_SAVE'}
|
default=True, description='Rename Also the Grease Pencil data using same name as object') # , options={'SKIP_SAVE'}
|
||||||
|
|
||||||
delete : BoolProperty(default=False, options={'SKIP_SAVE'})
|
delete : BoolProperty(default=False, options={'SKIP_SAVE'})
|
||||||
|
|
||||||
def invoke(self, context, event):
|
def invoke(self, context, event):
|
||||||
|
@ -425,7 +425,7 @@ class GPEXP_OT_auto_number_object(bpy.types.Operator):
|
||||||
o.name = o.name[4:]
|
o.name = o.name[4:]
|
||||||
ct += 1
|
ct += 1
|
||||||
self.report({'INFO'}, f'{ct}/{len(gps)} number prefix removed from object names')
|
self.report({'INFO'}, f'{ct}/{len(gps)} number prefix removed from object names')
|
||||||
|
|
||||||
return {"FINISHED"}
|
return {"FINISHED"}
|
||||||
|
|
||||||
return context.window_manager.invoke_props_dialog(self)
|
return context.window_manager.invoke_props_dialog(self)
|
||||||
|
@ -437,7 +437,7 @@ class GPEXP_OT_auto_number_object(bpy.types.Operator):
|
||||||
gp_ct = len([o for o in context.scene.objects if o.type == 'GPENCIL' and fn.is_valid_name(o.name)])
|
gp_ct = len([o for o in context.scene.objects if o.type == 'GPENCIL' and fn.is_valid_name(o.name)])
|
||||||
else:
|
else:
|
||||||
gp_ct = len([o for o in context.selected_objects if o.type == 'GPENCIL'])
|
gp_ct = len([o for o in context.selected_objects if o.type == 'GPENCIL'])
|
||||||
|
|
||||||
layout.prop(self, 'rename_data')
|
layout.prop(self, 'rename_data')
|
||||||
layout.label(text=f'{gp_ct} objects to renumber')
|
layout.label(text=f'{gp_ct} objects to renumber')
|
||||||
if not gp_ct:
|
if not gp_ct:
|
||||||
|
@ -459,8 +459,8 @@ class GPEXP_OT_auto_number_object(bpy.types.Operator):
|
||||||
|
|
||||||
for i, o in reversed_enumerate(pool):
|
for i, o in reversed_enumerate(pool):
|
||||||
if o.show_in_front:
|
if o.show_in_front:
|
||||||
fronts.append(pool.pop(i))
|
fronts.append(pool.pop(i))
|
||||||
|
|
||||||
cam_loc = context.scene.camera.matrix_world.to_translation()
|
cam_loc = context.scene.camera.matrix_world.to_translation()
|
||||||
|
|
||||||
# filter by distance to camera object (considering origins)
|
# filter by distance to camera object (considering origins)
|
||||||
|
@ -473,15 +473,15 @@ class GPEXP_OT_auto_number_object(bpy.types.Operator):
|
||||||
regex_num = re.compile(r'^(\d{3})_')
|
regex_num = re.compile(r'^(\d{3})_')
|
||||||
for o in pool:
|
for o in pool:
|
||||||
renum = regex_num.search(o.name)
|
renum = regex_num.search(o.name)
|
||||||
|
|
||||||
if not renum:
|
if not renum:
|
||||||
o.name = f'{str(ct).zfill(3)}_{o.name}'
|
o.name = f'{str(ct).zfill(3)}_{o.name}'
|
||||||
|
|
||||||
else:
|
else:
|
||||||
## either replace or leave untouched
|
## either replace or leave untouched
|
||||||
# continue
|
# continue
|
||||||
o.name = f'{str(ct).zfill(3)}_{o.name[4:]}'
|
o.name = f'{str(ct).zfill(3)}_{o.name[4:]}'
|
||||||
|
|
||||||
ct += 10
|
ct += 10
|
||||||
if self.rename_data and o.name != o.data.name:
|
if self.rename_data and o.name != o.data.name:
|
||||||
o.data.name = o.name
|
o.data.name = o.name
|
||||||
|
@ -513,17 +513,17 @@ class GPEXP_OT_check_masks(bpy.types.Operator):
|
||||||
if not obj_stat in changes:
|
if not obj_stat in changes:
|
||||||
changes.append(obj_stat)
|
changes.append(obj_stat)
|
||||||
print(obj_stat)
|
print(obj_stat)
|
||||||
|
|
||||||
hide_state = ' (hided)' if l.hide else ''
|
hide_state = ' (hided)' if l.hide else ''
|
||||||
text = f' {l.info}{hide_state}:' # :masks:
|
text = f' {l.info}{hide_state}:' # :masks:
|
||||||
changes.append(text)
|
changes.append(text)
|
||||||
print(text)
|
print(text)
|
||||||
|
|
||||||
has_masks = False
|
has_masks = False
|
||||||
for ml in l.mask_layers:
|
for ml in l.mask_layers:
|
||||||
# 'hide', 'invert', 'name'
|
# 'hide', 'invert', 'name'
|
||||||
h = ' hided' if ml.hide else ''
|
h = ' hided' if ml.hide else ''
|
||||||
i = ' (inverted)' if ml.invert else ''
|
i = ' (inverted)' if ml.invert else ''
|
||||||
text = f' - {ml.name}{h}{i}'
|
text = f' - {ml.name}{h}{i}'
|
||||||
changes.append(text)
|
changes.append(text)
|
||||||
print(text)
|
print(text)
|
||||||
|
@ -532,7 +532,7 @@ class GPEXP_OT_check_masks(bpy.types.Operator):
|
||||||
if not has_masks:
|
if not has_masks:
|
||||||
text = 'No masks!'
|
text = 'No masks!'
|
||||||
changes.append(text)
|
changes.append(text)
|
||||||
print(text)
|
print(text)
|
||||||
changes.append('')
|
changes.append('')
|
||||||
|
|
||||||
if changes:
|
if changes:
|
||||||
|
@ -601,10 +601,10 @@ class GPEXP_OT_select_layer_in_comp(bpy.types.Operator):
|
||||||
print(f'{l.info} -> Select node {n.name}')
|
print(f'{l.info} -> Select node {n.name}')
|
||||||
selected.append(n.name)
|
selected.append(n.name)
|
||||||
n.select = True
|
n.select = True
|
||||||
|
|
||||||
if not infos and not selected:
|
if not infos and not selected:
|
||||||
self.report({'ERROR'}, 'Nothing selected')
|
self.report({'ERROR'}, 'Nothing selected')
|
||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
infos = infos + [f'-- Selected {len(selected)} nodes --'] + selected
|
infos = infos + [f'-- Selected {len(selected)} nodes --'] + selected
|
||||||
fn.show_message_box(_message=infos, _title="Selected viewlayer in compo", _icon='INFO')
|
fn.show_message_box(_message=infos, _title="Selected viewlayer in compo", _icon='INFO')
|
||||||
|
@ -620,7 +620,7 @@ GPEXP_OT_check_masks,
|
||||||
GPEXP_OT_select_layer_in_comp,
|
GPEXP_OT_select_layer_in_comp,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
|
|
||||||
|
|
12
__init__.py
12
__init__.py
|
@ -6,8 +6,8 @@ bl_info = {
|
||||||
"blender": (2, 93, 0),
|
"blender": (2, 93, 0),
|
||||||
"location": "View3D",
|
"location": "View3D",
|
||||||
"warning": "",
|
"warning": "",
|
||||||
"doc_url": "https://gitlab.com/autour-de-minuit/blender/gp_render",
|
"doc_url": "https://gitlab.com/autour-de-minuit/blender/gp_render",
|
||||||
"tracker_url": "https://gitlab.com/autour-de-minuit/blender/gp_render/-/issues",
|
"tracker_url": "https://gitlab.com/autour-de-minuit/blender/gp_render/-/issues",
|
||||||
"category": "Object" }
|
"category": "Object" }
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ bl_modules = (
|
||||||
)
|
)
|
||||||
|
|
||||||
def update_scene_aa(context, scene):
|
def update_scene_aa(context, scene):
|
||||||
scene_aa(toggle=bpy.context.scene.use_aa)
|
scene_aa(toggle=bpy.context.scene.use_aa)
|
||||||
|
|
||||||
import bpy
|
import bpy
|
||||||
|
|
||||||
|
@ -65,12 +65,12 @@ def register():
|
||||||
|
|
||||||
# bpy.types.Scene.pgroup_name = bpy.props.PointerProperty(type = PROJ_PGT_settings)
|
# bpy.types.Scene.pgroup_name = bpy.props.PointerProperty(type = PROJ_PGT_settings)
|
||||||
bpy.types.Scene.use_aa = bpy.props.BoolProperty(
|
bpy.types.Scene.use_aa = bpy.props.BoolProperty(
|
||||||
name='Use Native Anti Aliasing',
|
name='Use Native Anti Aliasing',
|
||||||
default=True,
|
default=True,
|
||||||
description='\
|
description='\
|
||||||
Should be Off only if tree contains a merge_NG or alpha-over-combined renderlayers.\n\
|
Should be Off only if tree contains a merge_NG or alpha-over-combined renderlayers.\n\
|
||||||
Auto-set to Off when using node merge button\n\
|
Auto-set to Off when using node merge button\n\
|
||||||
Toggle: AA settings of and muting AA nested-nodegroup',
|
Toggle: AA settings of and muting AA nested-nodegroup',
|
||||||
update=update_scene_aa)
|
update=update_scene_aa)
|
||||||
|
|
||||||
def unregister():
|
def unregister():
|
||||||
|
|
|
@ -14,7 +14,7 @@ def setup_gp_render_workspace():
|
||||||
for space in area.spaces:
|
for space in area.spaces:
|
||||||
if space.type == 'VIEW_3D':
|
if space.type == 'VIEW_3D':
|
||||||
space.region_3d.view_perspective = 'CAMERA'
|
space.region_3d.view_perspective = 'CAMERA'
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
bpy.app.handlers.load_post.append(setup_gp_render_workspace)
|
bpy.app.handlers.load_post.append(setup_gp_render_workspace)
|
||||||
|
|
||||||
|
|
174
fn.py
174
fn.py
|
@ -20,11 +20,11 @@ def is_valid_name(name):
|
||||||
|
|
||||||
if name.startswith('.'):
|
if name.startswith('.'):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
## FIXME: /!\ "note" as an exclude word is not good practice, temporary fix
|
## FIXME: /!\ "note" as an exclude word is not good practice, temporary fix
|
||||||
if name.lower() == 'note':
|
if name.lower() == 'note':
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
### -- node basic
|
### -- node basic
|
||||||
|
@ -38,7 +38,7 @@ def create_node(type, tree=None, **kargs):
|
||||||
node = tree.nodes.new(type)
|
node = tree.nodes.new(type)
|
||||||
for k,v in kargs.items():
|
for k,v in kargs.items():
|
||||||
setattr(node, k, v)
|
setattr(node, k, v)
|
||||||
|
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def new_aa_node(tree, **kargs):
|
def new_aa_node(tree, **kargs):
|
||||||
|
@ -62,7 +62,7 @@ def create_aa_nodegroup(tree):
|
||||||
|
|
||||||
sep = create_node('CompositorNodeSepRGBA', tree=ngroup, location=(-150,0))
|
sep = create_node('CompositorNodeSepRGBA', tree=ngroup, location=(-150,0))
|
||||||
comb = create_node('CompositorNodeCombRGBA', tree=ngroup, location=(350,25))
|
comb = create_node('CompositorNodeCombRGBA', tree=ngroup, location=(350,25))
|
||||||
|
|
||||||
# in AA
|
# in AA
|
||||||
# ngroup.links.new(comb.outputs[0], ng_out.inputs[0]) # <- connect without out AA
|
# ngroup.links.new(comb.outputs[0], ng_out.inputs[0]) # <- connect without out AA
|
||||||
aa = new_aa_node(ngroup, location=(-400, 0))
|
aa = new_aa_node(ngroup, location=(-400, 0))
|
||||||
|
@ -73,24 +73,24 @@ def create_aa_nodegroup(tree):
|
||||||
# ngroup.links.new(ng_in.outputs[0], sep.inputs[0])
|
# ngroup.links.new(ng_in.outputs[0], sep.inputs[0])
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
ngroup.links.new(sep.outputs[i], comb.inputs[i])
|
ngroup.links.new(sep.outputs[i], comb.inputs[i])
|
||||||
|
|
||||||
# alpha AA
|
# alpha AA
|
||||||
alpha_aa = new_aa_node(ngroup, location=(100,-150))
|
alpha_aa = new_aa_node(ngroup, location=(100,-150))
|
||||||
ngroup.links.new(sep.outputs[3], alpha_aa.inputs[0])
|
ngroup.links.new(sep.outputs[3], alpha_aa.inputs[0])
|
||||||
ngroup.links.new(alpha_aa.outputs[0], comb.inputs[3])
|
ngroup.links.new(alpha_aa.outputs[0], comb.inputs[3])
|
||||||
|
|
||||||
ngroup.links.new(comb.outputs[0], ng_out.inputs[0])
|
ngroup.links.new(comb.outputs[0], ng_out.inputs[0])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ng = create_node('CompositorNodeGroup', tree=tree)
|
ng = create_node('CompositorNodeGroup', tree=tree)
|
||||||
ng.node_tree = ngroup
|
ng.node_tree = ngroup
|
||||||
ng.name = ngroup.name
|
ng.name = ngroup.name
|
||||||
ng.hide=True
|
ng.hide=True
|
||||||
return ng
|
return ng
|
||||||
|
|
||||||
|
|
||||||
## -- object and scene settings
|
|
||||||
|
## -- object and scene settings
|
||||||
|
|
||||||
def activate_workspace(name='', context=None):
|
def activate_workspace(name='', context=None):
|
||||||
if not name:
|
if not name:
|
||||||
|
@ -105,11 +105,11 @@ def activate_workspace(name='', context=None):
|
||||||
if (wkspace := bpy.data.workspaces.get(name)):
|
if (wkspace := bpy.data.workspaces.get(name)):
|
||||||
context.window.workspace = wkspace
|
context.window.workspace = wkspace
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Same name with spaces as underscore
|
# Same name with spaces as underscore
|
||||||
dir_name = name.replace(' ', '_')
|
dir_name = name.replace(' ', '_')
|
||||||
filepath = Path(__file__).parent / 'app_templates' / dir_name / 'startup.blend'
|
filepath = Path(__file__).parent / 'app_templates' / dir_name / 'startup.blend'
|
||||||
|
|
||||||
ret = bpy.ops.workspace.append_activate(idname=name, filepath=str(filepath))
|
ret = bpy.ops.workspace.append_activate(idname=name, filepath=str(filepath))
|
||||||
if ret != {'FINISHED'}:
|
if ret != {'FINISHED'}:
|
||||||
print(f'Could not found {name} at {filepath}')
|
print(f'Could not found {name} at {filepath}')
|
||||||
|
@ -119,7 +119,7 @@ def activate_workspace(name='', context=None):
|
||||||
|
|
||||||
def copy_settings(obj_a, obj_b):
|
def copy_settings(obj_a, obj_b):
|
||||||
exclusion = ['bl_rna', 'id_data', 'identifier','name_property','rna_type','properties', 'stamp_note_text','use_stamp_note',
|
exclusion = ['bl_rna', 'id_data', 'identifier','name_property','rna_type','properties', 'stamp_note_text','use_stamp_note',
|
||||||
'settingsFilePath', 'settingsStamp', 'select', 'matrix_local', 'matrix_parent_inverse',
|
'settingsFilePath', 'settingsStamp', 'select', 'matrix_local', 'matrix_parent_inverse',
|
||||||
'matrix_basis','location','rotation_euler', 'rotation_quaternion', 'rotation_axis_angle', 'scale']
|
'matrix_basis','location','rotation_euler', 'rotation_quaternion', 'rotation_axis_angle', 'scale']
|
||||||
|
|
||||||
for attr in dir(obj_a):
|
for attr in dir(obj_a):
|
||||||
|
@ -169,7 +169,7 @@ def set_settings(scene=None, aa=True):
|
||||||
'''aa == using native AA, else disable scene AA'''
|
'''aa == using native AA, else disable scene AA'''
|
||||||
if not scene:
|
if not scene:
|
||||||
scene = bpy.context.scene
|
scene = bpy.context.scene
|
||||||
|
|
||||||
# specify scene settings for these kind of render
|
# specify scene settings for these kind of render
|
||||||
set_scene_aa_settings(scene=scene, aa=aa)
|
set_scene_aa_settings(scene=scene, aa=aa)
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ def set_settings(scene=None, aa=True):
|
||||||
scene.render.use_compositing = True
|
scene.render.use_compositing = True
|
||||||
scene.render.use_sequencer = False
|
scene.render.use_sequencer = False
|
||||||
scene.view_settings.view_transform = 'Standard'
|
scene.view_settings.view_transform = 'Standard'
|
||||||
|
|
||||||
scene.render.resolution_percentage = 100
|
scene.render.resolution_percentage = 100
|
||||||
|
|
||||||
# output (fast write settings since this is just to delete afterwards...)
|
# output (fast write settings since this is just to delete afterwards...)
|
||||||
|
@ -192,7 +192,7 @@ def scene_aa(scene=None, toggle=True):
|
||||||
scene=bpy.context.scene
|
scene=bpy.context.scene
|
||||||
|
|
||||||
# enable/disable native anti-alias on active scene
|
# enable/disable native anti-alias on active scene
|
||||||
set_scene_aa_settings(scene=scene, aa=toggle)
|
set_scene_aa_settings(scene=scene, aa=toggle)
|
||||||
# mute/unmute AA nodegroups
|
# mute/unmute AA nodegroups
|
||||||
for n in scene.node_tree.nodes:
|
for n in scene.node_tree.nodes:
|
||||||
if n.type == 'GROUP' and n.name.startswith('NG_'):
|
if n.type == 'GROUP' and n.name.startswith('NG_'):
|
||||||
|
@ -216,7 +216,7 @@ def new_scene_from(name, src_scn=None, regen=True, crop=True, link_cam=True, lin
|
||||||
for attr in ['frame_start', 'frame_end', 'frame_current', 'camera', 'world']:
|
for attr in ['frame_start', 'frame_end', 'frame_current', 'camera', 'world']:
|
||||||
setattr(scn, attr, getattr(src_scn, attr))
|
setattr(scn, attr, getattr(src_scn, attr))
|
||||||
copy_settings(src_scn.render, scn.render)
|
copy_settings(src_scn.render, scn.render)
|
||||||
|
|
||||||
## link cameras (and lights ?)
|
## link cameras (and lights ?)
|
||||||
for ob in src_scn.objects:
|
for ob in src_scn.objects:
|
||||||
if link_cam and ob.type == 'CAMERA':
|
if link_cam and ob.type == 'CAMERA':
|
||||||
|
@ -226,7 +226,7 @@ def new_scene_from(name, src_scn=None, regen=True, crop=True, link_cam=True, lin
|
||||||
|
|
||||||
# set adapted render settings
|
# set adapted render settings
|
||||||
set_settings(scn)
|
set_settings(scn)
|
||||||
|
|
||||||
if crop:
|
if crop:
|
||||||
scn.render.use_border = True
|
scn.render.use_border = True
|
||||||
scn.render.use_crop_to_border = True
|
scn.render.use_crop_to_border = True
|
||||||
|
@ -239,7 +239,7 @@ def get_render_scene():
|
||||||
if render_scn:
|
if render_scn:
|
||||||
return render_scn
|
return render_scn
|
||||||
|
|
||||||
## -- Create render scene
|
## -- Create render scene
|
||||||
current = bpy.context.scene
|
current = bpy.context.scene
|
||||||
|
|
||||||
## With data
|
## With data
|
||||||
|
@ -256,14 +256,14 @@ def get_render_scene():
|
||||||
for attr in ['frame_start', 'frame_end', 'frame_current', 'camera', 'world']:
|
for attr in ['frame_start', 'frame_end', 'frame_current', 'camera', 'world']:
|
||||||
setattr(render_scn, attr, getattr(current, attr))
|
setattr(render_scn, attr, getattr(current, attr))
|
||||||
copy_settings(current.render, render_scn.render)
|
copy_settings(current.render, render_scn.render)
|
||||||
|
|
||||||
## link cameras (and lights ?)
|
## link cameras (and lights ?)
|
||||||
for ob in current.objects:
|
for ob in current.objects:
|
||||||
if ob.type in ('CAMERA', 'LIGHT'):
|
if ob.type in ('CAMERA', 'LIGHT'):
|
||||||
render_scn.collection.objects.link(ob)
|
render_scn.collection.objects.link(ob)
|
||||||
|
|
||||||
render_scn.use_nodes = True
|
render_scn.use_nodes = True
|
||||||
|
|
||||||
## Clear node tree (initial view layer stuff)
|
## Clear node tree (initial view layer stuff)
|
||||||
render_scn.node_tree.nodes.clear()
|
render_scn.node_tree.nodes.clear()
|
||||||
# for n in reversed(render_scn.node_tree.nodes):
|
# for n in reversed(render_scn.node_tree.nodes):
|
||||||
|
@ -326,15 +326,15 @@ def get_frame_transform(f, node_tree=None):
|
||||||
|
|
||||||
childs = [n for n in node_tree.nodes if n.parent == f]
|
childs = [n for n in node_tree.nodes if n.parent == f]
|
||||||
# real_locs = [f.location + n.location for n in childs]
|
# real_locs = [f.location + n.location for n in childs]
|
||||||
|
|
||||||
xs = [n.location.x for n in childs] + [n.location.x + n.dimensions.x for n in childs]
|
xs = [n.location.x for n in childs] + [n.location.x + n.dimensions.x for n in childs]
|
||||||
ys = [n.location.y for n in childs] + [n.location.y - n.dimensions.y for n in childs]
|
ys = [n.location.y for n in childs] + [n.location.y - n.dimensions.y for n in childs]
|
||||||
xs.sort(key=lambda loc: loc) # x val : ascending
|
xs.sort(key=lambda loc: loc) # x val : ascending
|
||||||
ys.sort(key=lambda loc: loc) # ascending # , reversed=True) # y val : descending
|
ys.sort(key=lambda loc: loc) # ascending # , reversed=True) # y val : descending
|
||||||
|
|
||||||
loc = Vector((min(xs), max(ys)))
|
loc = Vector((min(xs), max(ys)))
|
||||||
dim = Vector((max(xs) - min(xs) + 60, max(ys) - min(ys) + 60))
|
dim = Vector((max(xs) - min(xs) + 60, max(ys) - min(ys) + 60))
|
||||||
|
|
||||||
return loc, dim
|
return loc, dim
|
||||||
|
|
||||||
|
|
||||||
|
@ -359,7 +359,7 @@ def bbox(f, frames):
|
||||||
ys += [loc.y, loc.y - n.dimensions.y] # - (n.dimensions.y/get_dpi_factor())
|
ys += [loc.y, loc.y - n.dimensions.y] # - (n.dimensions.y/get_dpi_factor())
|
||||||
|
|
||||||
|
|
||||||
# margin ~= 30
|
# margin ~= 30
|
||||||
# return xs and ys
|
# return xs and ys
|
||||||
return [min(xs)-30, max(xs)+30], [min(ys)-30, max(ys)+30]
|
return [min(xs)-30, max(xs)+30], [min(ys)-30, max(ys)+30]
|
||||||
|
|
||||||
|
@ -376,15 +376,15 @@ def get_frames_bbox(node_tree):
|
||||||
continue
|
continue
|
||||||
# also contains frames
|
# also contains frames
|
||||||
frames[n.parent].append(n)
|
frames[n.parent].append(n)
|
||||||
|
|
||||||
# Dic for bbox coord
|
# Dic for bbox coord
|
||||||
for f, nodes in frames.items():
|
for f, nodes in frames.items():
|
||||||
if f.parent:
|
if f.parent:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
xs, ys = bbox(f, frames)
|
xs, ys = bbox(f, frames)
|
||||||
# xs, ys = bbox(nodes, frames)
|
# xs, ys = bbox(nodes, frames)
|
||||||
|
|
||||||
## returning: list of corner coords
|
## returning: list of corner coords
|
||||||
# coords = [
|
# coords = [
|
||||||
# Vector((xs[0], ys[1])),
|
# Vector((xs[0], ys[1])),
|
||||||
|
@ -411,7 +411,7 @@ def merge_gplayer_viewlayers(ob, act=None, layers=None):
|
||||||
rd_scn = bpy.data.scenes.get('Render')
|
rd_scn = bpy.data.scenes.get('Render')
|
||||||
if not rd_scn:
|
if not rd_scn:
|
||||||
return ({'ERROR'}, 'Viewlayers needs to be generated first!')
|
return ({'ERROR'}, 'Viewlayers needs to be generated first!')
|
||||||
|
|
||||||
if not act.viewlayer_render:
|
if not act.viewlayer_render:
|
||||||
return ({'ERROR'}, f'Active layer {act.info} has no viewlayer assigned')
|
return ({'ERROR'}, f'Active layer {act.info} has no viewlayer assigned')
|
||||||
|
|
||||||
|
@ -433,14 +433,14 @@ def merge_gplayer_viewlayers(ob, act=None, layers=None):
|
||||||
sockout = grp.outputs.get(sockin.name)
|
sockout = grp.outputs.get(sockin.name)
|
||||||
if not sockout:
|
if not sockout:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for grplink in sockout.links:
|
for grplink in sockout.links:
|
||||||
if grplink.to_node.type != 'OUTPUT_FILE':
|
if grplink.to_node.type != 'OUTPUT_FILE':
|
||||||
continue
|
continue
|
||||||
fo_socket = grplink.to_socket
|
fo_socket = grplink.to_socket
|
||||||
fo = grplink.to_node
|
fo = grplink.to_node
|
||||||
fo.file_slots.remove(fo_socket)
|
fo.file_slots.remove(fo_socket)
|
||||||
|
|
||||||
# remove input and output from group
|
# remove input and output from group
|
||||||
# grp.inputs.remove(sockin) # do not clear inside !!
|
# grp.inputs.remove(sockin) # do not clear inside !!
|
||||||
# grp.outputs.remove(sockout) # do not clear inside !!
|
# grp.outputs.remove(sockout) # do not clear inside !!
|
||||||
|
@ -453,19 +453,19 @@ def merge_gplayer_viewlayers(ob, act=None, layers=None):
|
||||||
if grp.outputs[i].name == sockout.name:
|
if grp.outputs[i].name == sockout.name:
|
||||||
ngroup.outputs.remove(ngroup.outputs[i])
|
ngroup.outputs.remove(ngroup.outputs[i])
|
||||||
break
|
break
|
||||||
|
|
||||||
# remove render_layer node
|
# remove render_layer node
|
||||||
rd_scn.node_tree.nodes.remove(n)
|
rd_scn.node_tree.nodes.remove(n)
|
||||||
|
|
||||||
# assign view layer from active to selected
|
# assign view layer from active to selected
|
||||||
for l in layers:
|
for l in layers:
|
||||||
l.viewlayer_render = act.viewlayer_render
|
l.viewlayer_render = act.viewlayer_render
|
||||||
|
|
||||||
## delete unused_vl
|
## delete unused_vl
|
||||||
|
|
||||||
# used_vl_name = [n.layer for n in rd_scn.node_tree.nodes if n.type == 'R_LAYERS' and n.layer]
|
# used_vl_name = [n.layer for n in rd_scn.node_tree.nodes if n.type == 'R_LAYERS' and n.layer]
|
||||||
for vl in vls:
|
for vl in vls:
|
||||||
rd_scn.view_layers.remove(vl)
|
rd_scn.view_layers.remove(vl)
|
||||||
# if not vl.name in used_vl_name:
|
# if not vl.name in used_vl_name:
|
||||||
# rd_scn.view_layers.remove(vl)
|
# rd_scn.view_layers.remove(vl)
|
||||||
|
|
||||||
|
@ -478,13 +478,13 @@ def group_adjacent_layer_prefix_rlayer(ob, excluded_prefix=[], first_name=True):
|
||||||
|
|
||||||
from itertools import groupby
|
from itertools import groupby
|
||||||
re_prefix = re.compile(r'^([A-Z]{2})_')
|
re_prefix = re.compile(r'^([A-Z]{2})_')
|
||||||
|
|
||||||
if isinstance(excluded_prefix, str):
|
if isinstance(excluded_prefix, str):
|
||||||
excluded_prefix = [p.strip() for p in excluded_prefix.split(',')]
|
excluded_prefix = [p.strip() for p in excluded_prefix.split(',')]
|
||||||
|
|
||||||
## Create adjacent grp list: [('CO', [layer1, layer2]), ('LN', [layer3, layer4])]
|
## Create adjacent grp list: [('CO', [layer1, layer2]), ('LN', [layer3, layer4])]
|
||||||
adjacent_prefix_groups = [
|
adjacent_prefix_groups = [
|
||||||
(g[0], list(g[1])) for g in
|
(g[0], list(g[1])) for g in
|
||||||
groupby([l for l in ob.data.layers],
|
groupby([l for l in ob.data.layers],
|
||||||
key=lambda l: re_prefix.search(l.info).group(1) if re_prefix.search(l.info) else '')
|
key=lambda l: re_prefix.search(l.info).group(1) if re_prefix.search(l.info) else '')
|
||||||
]
|
]
|
||||||
|
@ -559,7 +559,7 @@ def rearrange_frames(node_tree):
|
||||||
## order the dict by frame.y location
|
## order the dict by frame.y location
|
||||||
frame_d = {key: value for key, value in sorted(frame_d.items(), key=lambda pair: pair[1][0].y - pair[1][1].y, reverse=True)}
|
frame_d = {key: value for key, value in sorted(frame_d.items(), key=lambda pair: pair[1][0].y - pair[1][1].y, reverse=True)}
|
||||||
frames = [[f, v[0], v[1].y] for f, v in frame_d.items()] # [frame_node, real_loc, real dimensions]
|
frames = [[f, v[0], v[1].y] for f, v in frame_d.items()] # [frame_node, real_loc, real dimensions]
|
||||||
|
|
||||||
top = frames[0][1].y # upper node location.y
|
top = frames[0][1].y # upper node location.y
|
||||||
# top = 0 #always start a 0
|
# top = 0 #always start a 0
|
||||||
offset = 0
|
offset = 0
|
||||||
|
@ -571,7 +571,7 @@ def rearrange_frames(node_tree):
|
||||||
f[0].location.y = (f[1].y - f[0].location.y) - offset # avoid offset when recalculating from 0 top
|
f[0].location.y = (f[1].y - f[0].location.y) - offset # avoid offset when recalculating from 0 top
|
||||||
# f[0].location.y = f[1].y - top - offset
|
# f[0].location.y = f[1].y - top - offset
|
||||||
offset += f[2] + 200 # gap
|
offset += f[2] + 200 # gap
|
||||||
|
|
||||||
f[0].update()
|
f[0].update()
|
||||||
|
|
||||||
def reorder_inputs(ng):
|
def reorder_inputs(ng):
|
||||||
|
@ -657,7 +657,7 @@ def all_connected_forward(n, nlist=[]):
|
||||||
else:
|
else:
|
||||||
return nlist
|
return nlist
|
||||||
else:
|
else:
|
||||||
nlist = all_connected_forward(lnk.to_node, nlist)
|
nlist = all_connected_forward(lnk.to_node, nlist)
|
||||||
if n in nlist:
|
if n in nlist:
|
||||||
return nlist
|
return nlist
|
||||||
return nlist + [n]
|
return nlist + [n]
|
||||||
|
@ -692,7 +692,7 @@ def reorder_nodegroup_content(ngroup):
|
||||||
n_thread = all_connected_forward_from_socket(out)
|
n_thread = all_connected_forward_from_socket(out)
|
||||||
if n_thread:
|
if n_thread:
|
||||||
n_threads.append(n_thread)
|
n_threads.append(n_thread)
|
||||||
|
|
||||||
level = grp_in.location.y
|
level = grp_in.location.y
|
||||||
for thread in n_threads:
|
for thread in n_threads:
|
||||||
top = max([n.location.y for n in thread])
|
top = max([n.location.y for n in thread])
|
||||||
|
@ -718,7 +718,7 @@ def clear_nodegroup_content_if_disconnected(ngroup):
|
||||||
continue
|
continue
|
||||||
if not connect_to_group_input(n) and not connect_to_group_output(n): # is disconnected from both side
|
if not connect_to_group_input(n) and not connect_to_group_output(n): # is disconnected from both side
|
||||||
ngroup.nodes.remove(n)
|
ngroup.nodes.remove(n)
|
||||||
|
|
||||||
reorder_nodegroup_content(ngroup)
|
reorder_nodegroup_content(ngroup)
|
||||||
|
|
||||||
def clean_nodegroup_inputs(ng, skip_existing_pass=True):
|
def clean_nodegroup_inputs(ng, skip_existing_pass=True):
|
||||||
|
@ -756,7 +756,7 @@ def bridge_reconnect_nodegroup(ng, socket_name=None):
|
||||||
ngroup.links.new(sockin, aa.inputs[0])
|
ngroup.links.new(sockin, aa.inputs[0])
|
||||||
ngroup.links.new(aa.outputs[0], sockout)
|
ngroup.links.new(aa.outputs[0], sockout)
|
||||||
print(f'{ng.name}: Bridged {sockin.name}')
|
print(f'{ng.name}: Bridged {sockin.name}')
|
||||||
|
|
||||||
|
|
||||||
def random_color(alpha=False):
|
def random_color(alpha=False):
|
||||||
import random
|
import random
|
||||||
|
@ -789,7 +789,7 @@ def nodegroup_merge_inputs(ngroup):
|
||||||
ao = create_node('CompositorNodeAlphaOver', tree=ngroup, location=(x,y), hide=True)
|
ao = create_node('CompositorNodeAlphaOver', tree=ngroup, location=(x,y), hide=True)
|
||||||
ngroup.links.new(prev.outputs[0], ao.inputs[1])
|
ngroup.links.new(prev.outputs[0], ao.inputs[1])
|
||||||
ngroup.links.new(inp, ao.inputs[2])
|
ngroup.links.new(inp, ao.inputs[2])
|
||||||
|
|
||||||
x += offset_x
|
x += offset_x
|
||||||
y += offset_y
|
y += offset_y
|
||||||
prev = ao
|
prev = ao
|
||||||
|
@ -798,7 +798,7 @@ def nodegroup_merge_inputs(ngroup):
|
||||||
aa = create_aa_nodegroup(ngroup) # new_aa_node(ngroup)
|
aa = create_aa_nodegroup(ngroup) # new_aa_node(ngroup)
|
||||||
aa.location = (ao.location.x + 200, ao.location.y)
|
aa.location = (ao.location.x + 200, ao.location.y)
|
||||||
ngroup.links.new(ao.outputs[0], aa.inputs[0]) # node_tree
|
ngroup.links.new(ao.outputs[0], aa.inputs[0]) # node_tree
|
||||||
|
|
||||||
# create one input and link
|
# create one input and link
|
||||||
out = ngroup.outputs.new('NodeSocketColor', ngroup.inputs[0].name)
|
out = ngroup.outputs.new('NodeSocketColor', ngroup.inputs[0].name)
|
||||||
ngroup.links.new(aa.outputs[0], ng_out.inputs[0])
|
ngroup.links.new(aa.outputs[0], ng_out.inputs[0])
|
||||||
|
@ -852,7 +852,7 @@ def delete_numbering(fo): # padding=3
|
||||||
elems = fs.path.split('/')
|
elems = fs.path.split('/')
|
||||||
for i, e in enumerate(elems):
|
for i, e in enumerate(elems):
|
||||||
elems[i] = re.sub(r'^\d{3}_', '', e)
|
elems[i] = re.sub(r'^\d{3}_', '', e)
|
||||||
|
|
||||||
new = '/'.join(elems)
|
new = '/'.join(elems)
|
||||||
fs.path = new
|
fs.path = new
|
||||||
|
|
||||||
|
@ -868,7 +868,7 @@ def renumber_keep_existing(fo, offset=10, invert=True):
|
||||||
|
|
||||||
if fo.type != 'OUTPUT_FILE': return
|
if fo.type != 'OUTPUT_FILE': return
|
||||||
ct = 10
|
ct = 10
|
||||||
|
|
||||||
if invert:
|
if invert:
|
||||||
reverse_fileout_inputs(fo)
|
reverse_fileout_inputs(fo)
|
||||||
|
|
||||||
|
@ -931,7 +931,7 @@ def renumber_keep_existing(fo, offset=10, invert=True):
|
||||||
add_fileslot_number(fs, prev_num + offset)
|
add_fileslot_number(fs, prev_num + offset)
|
||||||
else:
|
else:
|
||||||
add_fileslot_number(fs, ct)
|
add_fileslot_number(fs, ct)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if prev_num is not None:
|
if prev_num is not None:
|
||||||
# iterate rename
|
# iterate rename
|
||||||
|
@ -968,7 +968,7 @@ def normalize_layer_name(layer, prefix='', desc='', suffix='', lower=True, dash_
|
||||||
import re
|
import re
|
||||||
|
|
||||||
name = layer.info
|
name = layer.info
|
||||||
|
|
||||||
pattern = PATTERN
|
pattern = PATTERN
|
||||||
sep = '_'
|
sep = '_'
|
||||||
res = re.search(pattern, name.strip())
|
res = re.search(pattern, name.strip())
|
||||||
|
@ -993,7 +993,7 @@ def normalize_layer_name(layer, prefix='', desc='', suffix='', lower=True, dash_
|
||||||
# tag2 = prefix2.upper().strip() + sep
|
# tag2 = prefix2.upper().strip() + sep
|
||||||
if desc:
|
if desc:
|
||||||
name = desc
|
name = desc
|
||||||
|
|
||||||
if suffix:
|
if suffix:
|
||||||
if suffix == 'suffixkillcode':
|
if suffix == 'suffixkillcode':
|
||||||
sfix = ''
|
sfix = ''
|
||||||
|
@ -1013,7 +1013,7 @@ def normalize_layer_name(layer, prefix='', desc='', suffix='', lower=True, dash_
|
||||||
old = layer.info
|
old = layer.info
|
||||||
print(f'{old} >> {new}')
|
print(f'{old} >> {new}')
|
||||||
layer.info = new
|
layer.info = new
|
||||||
|
|
||||||
# Also change name string in modifier target !
|
# Also change name string in modifier target !
|
||||||
for ob in [o for o in bpy.data.objects if o.type == 'GPENCIL' and o.data == layer.id_data]:
|
for ob in [o for o in bpy.data.objects if o.type == 'GPENCIL' and o.data == layer.id_data]:
|
||||||
for m in ob.grease_pencil_modifiers:
|
for m in ob.grease_pencil_modifiers:
|
||||||
|
@ -1037,7 +1037,7 @@ def build_layers_targets_from_dopesheet(context):
|
||||||
gpl = context.object.data.layers
|
gpl = context.object.data.layers
|
||||||
act = gpl.active
|
act = gpl.active
|
||||||
dopeset = context.space_data.dopesheet
|
dopeset = context.space_data.dopesheet
|
||||||
|
|
||||||
|
|
||||||
if dopeset.show_only_selected:
|
if dopeset.show_only_selected:
|
||||||
pool = [o for o in context.selected_objects if o.type == 'GPENCIL']
|
pool = [o for o in context.selected_objects if o.type == 'GPENCIL']
|
||||||
|
@ -1048,7 +1048,7 @@ def build_layers_targets_from_dopesheet(context):
|
||||||
|
|
||||||
layer_pool = [l for o in pool for l in o.data.layers]
|
layer_pool = [l for o in pool for l in o.data.layers]
|
||||||
layer_pool = list(set(layer_pool)) # remove dupli-layers from same data source with
|
layer_pool = list(set(layer_pool)) # remove dupli-layers from same data source with
|
||||||
|
|
||||||
# apply search filter
|
# apply search filter
|
||||||
if dopeset.filter_text:
|
if dopeset.filter_text:
|
||||||
layer_pool = [l for l in layer_pool if (dopeset.filter_text.lower() in l.info.lower()) ^ dopeset.use_filter_invert]
|
layer_pool = [l for l in layer_pool if (dopeset.filter_text.lower() in l.info.lower()) ^ dopeset.use_filter_invert]
|
||||||
|
@ -1067,7 +1067,7 @@ def show_message_box(_message = "", _title = "Message Box", _icon = 'INFO'):
|
||||||
self.layout.label(text=l)
|
self.layout.label(text=l)
|
||||||
else:
|
else:
|
||||||
self.layout.label(text=l[0], icon=l[1])
|
self.layout.label(text=l[0], icon=l[1])
|
||||||
|
|
||||||
if isinstance(_message, str):
|
if isinstance(_message, str):
|
||||||
_message = [_message]
|
_message = [_message]
|
||||||
bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon)
|
bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon)
|
||||||
|
@ -1092,7 +1092,7 @@ def show_message_box(_message = "", _title = "Message Box", _icon = 'INFO'):
|
||||||
elif len(l) == 3: # ops
|
elif len(l) == 3: # ops
|
||||||
self.layout.operator_context = "INVOKE_DEFAULT"
|
self.layout.operator_context = "INVOKE_DEFAULT"
|
||||||
self.layout.operator(l[0], text=l[1], icon=l[2], emboss=False) # <- highligh the entry
|
self.layout.operator(l[0], text=l[1], icon=l[2], emboss=False) # <- highligh the entry
|
||||||
|
|
||||||
if isinstance(_message, str):
|
if isinstance(_message, str):
|
||||||
_message = [_message]
|
_message = [_message]
|
||||||
bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon)
|
bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon)
|
||||||
|
@ -1122,15 +1122,15 @@ def is_render_included(o, scn):
|
||||||
|
|
||||||
|
|
||||||
def get_crop_pixel_coord(scn):
|
def get_crop_pixel_coord(scn):
|
||||||
# width height probably not needed. might need
|
# width height probably not needed. might need
|
||||||
px_width = (scn.render.border_max_x - scn.render.border_min_x) * scn.render.resolution_x
|
px_width = (scn.render.border_max_x - scn.render.border_min_x) * scn.render.resolution_x
|
||||||
px_height = (scn.render.border_max_y - scn.render.border_min_y) * scn.render.resolution_y
|
px_height = (scn.render.border_max_y - scn.render.border_min_y) * scn.render.resolution_y
|
||||||
|
|
||||||
pos_x = (scn.render.border_min_x + ((scn.render.border_max_x - scn.render.border_min_x) / 2)) * scn.render.resolution_x
|
pos_x = (scn.render.border_min_x + ((scn.render.border_max_x - scn.render.border_min_x) / 2)) * scn.render.resolution_x
|
||||||
|
|
||||||
## coord y > image center coord from bottom-left (Blender)
|
## coord y > image center coord from bottom-left (Blender)
|
||||||
# pos_y = (scn.render.border_min_y + ((scn.render.border_max_y - scn.render.border_min_y) / 2)) * scn.render.resolution_y,
|
# pos_y = (scn.render.border_min_y + ((scn.render.border_max_y - scn.render.border_min_y) / 2)) * scn.render.resolution_y,
|
||||||
|
|
||||||
## image center coord from top-left (AE)
|
## image center coord from top-left (AE)
|
||||||
pos_y = ((1 - scn.render.border_max_y) + ((scn.render.border_max_y - scn.render.border_min_y) / 2)) * scn.render.resolution_y
|
pos_y = ((1 - scn.render.border_max_y) + ((scn.render.border_max_y - scn.render.border_min_y) / 2)) * scn.render.resolution_y
|
||||||
|
|
||||||
|
@ -1145,15 +1145,15 @@ def get_crop_pixel_coord(scn):
|
||||||
def export_crop_to_json():
|
def export_crop_to_json():
|
||||||
'''Export crop to json coords for AE
|
'''Export crop to json coords for AE
|
||||||
'''
|
'''
|
||||||
|
|
||||||
blend = Path(bpy.data.filepath)
|
blend = Path(bpy.data.filepath)
|
||||||
json_path = blend.parent / 'render' / f'{blend.stem}.json' #f'{ob.name}.json'
|
json_path = blend.parent / 'render' / f'{blend.stem}.json' #f'{ob.name}.json'
|
||||||
|
|
||||||
## per scene : json_path = Path(bpy.data.filepath).parent / 'render' / f'{scn.name}.json'
|
## per scene : json_path = Path(bpy.data.filepath).parent / 'render' / f'{scn.name}.json'
|
||||||
# json_path = Path(bpy.data.filepath).parent / 'render' / f'{scn.name}.json' #f'{ob.name}.json'
|
# json_path = Path(bpy.data.filepath).parent / 'render' / f'{scn.name}.json' #f'{ob.name}.json'
|
||||||
|
|
||||||
coord_dic = {}
|
coord_dic = {}
|
||||||
|
|
||||||
for scn in bpy.data.scenes:
|
for scn in bpy.data.scenes:
|
||||||
# if scn.name in {'Scene', 'Render'}:
|
# if scn.name in {'Scene', 'Render'}:
|
||||||
# if scn.name == 'Scene':
|
# if scn.name == 'Scene':
|
||||||
|
@ -1178,7 +1178,7 @@ def export_crop_to_json():
|
||||||
# save bbox
|
# save bbox
|
||||||
with json_path.open('w') as fd:
|
with json_path.open('w') as fd:
|
||||||
json.dump(coord_dic, fd, indent='\t')
|
json.dump(coord_dic, fd, indent='\t')
|
||||||
|
|
||||||
print(f'Coords saved at: {json_path}')
|
print(f'Coords saved at: {json_path}')
|
||||||
return coord_dic
|
return coord_dic
|
||||||
|
|
||||||
|
@ -1189,13 +1189,13 @@ def set_border_region_from_coord(coords, scn=None, margin=30, export_json=True):
|
||||||
'''
|
'''
|
||||||
|
|
||||||
scn = scn or bpy.context.scene
|
scn = scn or bpy.context.scene
|
||||||
|
|
||||||
coords2d_x = sorted([c[0] for c in coords])
|
coords2d_x = sorted([c[0] for c in coords])
|
||||||
coords2d_y = sorted([c[1] for c in coords])
|
coords2d_y = sorted([c[1] for c in coords])
|
||||||
|
|
||||||
margin_width = margin / scn.render.resolution_x
|
margin_width = margin / scn.render.resolution_x
|
||||||
margin_height = margin / scn.render.resolution_y
|
margin_height = margin / scn.render.resolution_y
|
||||||
|
|
||||||
# set crop
|
# set crop
|
||||||
scn.render.border_min_x = coords2d_x[0] - margin_width
|
scn.render.border_min_x = coords2d_x[0] - margin_width
|
||||||
scn.render.border_max_x = coords2d_x[-1] + margin_width
|
scn.render.border_max_x = coords2d_x[-1] + margin_width
|
||||||
|
@ -1223,17 +1223,17 @@ def set_border_region_from_coord(coords, scn=None, margin=30, export_json=True):
|
||||||
|
|
||||||
def get_gp_box_all_frame(ob, cam=None):
|
def get_gp_box_all_frame(ob, cam=None):
|
||||||
'''set crop to object bounding box considering whole animation. Cam should not be animated (render in bg_cam)
|
'''set crop to object bounding box considering whole animation. Cam should not be animated (render in bg_cam)
|
||||||
return 2d bbox in pixels
|
return 2d bbox in pixels
|
||||||
'''
|
'''
|
||||||
from bpy_extras.object_utils import world_to_camera_view
|
from bpy_extras.object_utils import world_to_camera_view
|
||||||
coords_cam_list = []
|
coords_cam_list = []
|
||||||
scn = bpy.context.scene
|
scn = bpy.context.scene
|
||||||
cam = cam or scn.camera
|
cam = cam or scn.camera
|
||||||
start = time()
|
start = time()
|
||||||
|
|
||||||
if ob.animation_data and ob.animation_data.action: # use frame set on all frames
|
if ob.animation_data and ob.animation_data.action: # use frame set on all frames
|
||||||
print(f'{ob.name} has anim')
|
print(f'{ob.name} has anim')
|
||||||
# frame_nums = sorted(list(set([f.frame_number for l in ob.data.layers if len(l.frames) for f in l.frames if len(f.strokes) and scn.frame_start <= f.frame_number <= scn.frame_end])))
|
# frame_nums = sorted(list(set([f.frame_number for l in ob.data.layers if len(l.frames) for f in l.frames if len(f.strokes) and scn.frame_start <= f.frame_number <= scn.frame_end])))
|
||||||
for num in range(scn.frame_start, scn.frame_end+1):
|
for num in range(scn.frame_start, scn.frame_end+1):
|
||||||
scn.frame_set(num)
|
scn.frame_set(num)
|
||||||
for l in ob.data.layers:
|
for l in ob.data.layers:
|
||||||
|
@ -1257,7 +1257,7 @@ def get_gp_box_all_frame(ob, cam=None):
|
||||||
if len(s.points) == 1: # skip isolated points
|
if len(s.points) == 1: # skip isolated points
|
||||||
continue
|
continue
|
||||||
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
||||||
|
|
||||||
print(time() - start) # Dbg-time
|
print(time() - start) # Dbg-time
|
||||||
return coords_cam_list
|
return coords_cam_list
|
||||||
|
|
||||||
|
@ -1276,7 +1276,7 @@ def has_keyframe(ob, attr):
|
||||||
def get_gp_box_all_frame_selection(oblist=None, scn=None, cam=None, timeout=40):
|
def get_gp_box_all_frame_selection(oblist=None, scn=None, cam=None, timeout=40):
|
||||||
'''
|
'''
|
||||||
get points of all selection
|
get points of all selection
|
||||||
return 2d bbox in pixels
|
return 2d bbox in pixels
|
||||||
return None if timeout (too long to process, better to do it visually)
|
return None if timeout (too long to process, better to do it visually)
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
@ -1289,7 +1289,7 @@ def get_gp_box_all_frame_selection(oblist=None, scn=None, cam=None, timeout=40):
|
||||||
|
|
||||||
cam = cam or scn.camera
|
cam = cam or scn.camera
|
||||||
start = time()
|
start = time()
|
||||||
|
|
||||||
if any(has_anim(ob) for ob in oblist):
|
if any(has_anim(ob) for ob in oblist):
|
||||||
print(f'at least one is animated: {oblist}')
|
print(f'at least one is animated: {oblist}')
|
||||||
for num in range(scn.frame_start, scn.frame_end+1):
|
for num in range(scn.frame_start, scn.frame_end+1):
|
||||||
|
@ -1304,7 +1304,7 @@ def get_gp_box_all_frame_selection(oblist=None, scn=None, cam=None, timeout=40):
|
||||||
if len(s.points) == 1: # skip isolated points
|
if len(s.points) == 1: # skip isolated points
|
||||||
continue
|
continue
|
||||||
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
||||||
|
|
||||||
if time() - t0 > timeout:
|
if time() - t0 > timeout:
|
||||||
print(f'timeout (more than {timeout}s to calculate) evaluating frame position of objects {oblist}')
|
print(f'timeout (more than {timeout}s to calculate) evaluating frame position of objects {oblist}')
|
||||||
return
|
return
|
||||||
|
@ -1325,16 +1325,16 @@ def get_gp_box_all_frame_selection(oblist=None, scn=None, cam=None, timeout=40):
|
||||||
if len(s.points) == 1: # skip isolated points
|
if len(s.points) == 1: # skip isolated points
|
||||||
continue
|
continue
|
||||||
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
||||||
|
|
||||||
|
|
||||||
print(f'{len(coords_cam_list)} gp points listed {time() - start:.1f}s')
|
print(f'{len(coords_cam_list)} gp points listed {time() - start:.1f}s')
|
||||||
return coords_cam_list
|
return coords_cam_list
|
||||||
|
|
||||||
def get_bbox_2d(ob, cam=None):
|
def get_bbox_2d(ob, cam=None):
|
||||||
from bpy_extras.object_utils import world_to_camera_view
|
from bpy_extras.object_utils import world_to_camera_view
|
||||||
scn = bpy.context.scene
|
scn = bpy.context.scene
|
||||||
cam = cam or scn.camera
|
cam = cam or scn.camera
|
||||||
coords2d = [world_to_camera_view(scn, cam, p) for p in get_bbox_3d(ob)]
|
coords2d = [world_to_camera_view(scn, cam, p) for p in get_bbox_3d(ob)]
|
||||||
coords2d_x = sorted([c[0] for c in coords2d])
|
coords2d_x = sorted([c[0] for c in coords2d])
|
||||||
coords2d_y = sorted([c[1] for c in coords2d])
|
coords2d_y = sorted([c[1] for c in coords2d])
|
||||||
|
|
||||||
|
@ -1348,9 +1348,9 @@ def get_bbox_2d(ob, cam=None):
|
||||||
return [Vector(b) for b in bbox2d_coords]
|
return [Vector(b) for b in bbox2d_coords]
|
||||||
|
|
||||||
def set_box_from_selected_objects(scn=None, cam=None, export_json=False):
|
def set_box_from_selected_objects(scn=None, cam=None, export_json=False):
|
||||||
scn = scn or bpy.context.scene
|
scn = scn or bpy.context.scene
|
||||||
cam = cam or scn.camera
|
cam = cam or scn.camera
|
||||||
|
|
||||||
selection = [o for o in scn.objects if o.select_get()] # selected_objects
|
selection = [o for o in scn.objects if o.select_get()] # selected_objects
|
||||||
coords = get_gp_box_all_frame_selection(oblist=selection, scn=scn, cam=cam)
|
coords = get_gp_box_all_frame_selection(oblist=selection, scn=scn, cam=cam)
|
||||||
if not coords:
|
if not coords:
|
||||||
|
@ -1445,10 +1445,10 @@ def get_collection_childs_recursive(col, cols=[], include_root=True):
|
||||||
cols.append(sub)
|
cols.append(sub)
|
||||||
if len(sub.children):
|
if len(sub.children):
|
||||||
cols = get_collection_childs_recursive(sub, cols)
|
cols = get_collection_childs_recursive(sub, cols)
|
||||||
|
|
||||||
if include_root and col not in cols: # add root col
|
if include_root and col not in cols: # add root col
|
||||||
cols.append(col)
|
cols.append(col)
|
||||||
|
|
||||||
return cols
|
return cols
|
||||||
|
|
||||||
def unlink_objects_from_scene(oblist, scn):
|
def unlink_objects_from_scene(oblist, scn):
|
||||||
|
@ -1473,7 +1473,7 @@ def split_object_to_scene():
|
||||||
active = bpy.context.object
|
active = bpy.context.object
|
||||||
scene_name = active.name
|
scene_name = active.name
|
||||||
objs = [o for o in bpy.context.selected_objects]
|
objs = [o for o in bpy.context.selected_objects]
|
||||||
|
|
||||||
if bpy.data.scenes.get(scene_name):
|
if bpy.data.scenes.get(scene_name):
|
||||||
print(f'Scene "{scene_name}" Already Exists')
|
print(f'Scene "{scene_name}" Already Exists')
|
||||||
raise Exception(f'Scene "{scene_name}" Already Exists')
|
raise Exception(f'Scene "{scene_name}" Already Exists')
|
||||||
|
@ -1497,7 +1497,7 @@ def split_object_to_scene():
|
||||||
continue
|
continue
|
||||||
if sob not in objs:
|
if sob not in objs:
|
||||||
col.objects.unlink(sob)
|
col.objects.unlink(sob)
|
||||||
|
|
||||||
frame_names = [n.label for n in new.node_tree.nodes if n.type == 'FRAME' if new.objects.get(n.label)]
|
frame_names = [n.label for n in new.node_tree.nodes if n.type == 'FRAME' if new.objects.get(n.label)]
|
||||||
remove_scene_nodes_by_obj_names(new, frame_names, negative=True)
|
remove_scene_nodes_by_obj_names(new, frame_names, negative=True)
|
||||||
|
|
||||||
|
@ -1511,7 +1511,7 @@ def split_object_to_scene():
|
||||||
|
|
||||||
## remove asset from original scene
|
## remove asset from original scene
|
||||||
#src_frame_names = [n.label for n in src.node_tree.nodes if n.type == 'FRAME' and n.label in [o.name for o in objs]]
|
#src_frame_names = [n.label for n in src.node_tree.nodes if n.type == 'FRAME' and n.label in [o.name for o in objs]]
|
||||||
#remove_scene_nodes_by_obj_names(src, src_frame_names)
|
#remove_scene_nodes_by_obj_names(src, src_frame_names)
|
||||||
remove_scene_nodes_by_obj_names(src, frame_names, negative=False)
|
remove_scene_nodes_by_obj_names(src, frame_names, negative=False)
|
||||||
|
|
||||||
# unlink objects ?
|
# unlink objects ?
|
||||||
|
@ -1522,7 +1522,7 @@ def split_object_to_scene():
|
||||||
coords = get_gp_box_all_frame_selection(oblist=gp_objs, scn=new, cam=new.camera)
|
coords = get_gp_box_all_frame_selection(oblist=gp_objs, scn=new, cam=new.camera)
|
||||||
if not coords:
|
if not coords:
|
||||||
return f'Scene "{scene_name}" created. But Border was not set (Timeout during GP analysis), should be done by hand if needed then use export crop to json'
|
return f'Scene "{scene_name}" created. But Border was not set (Timeout during GP analysis), should be done by hand if needed then use export crop to json'
|
||||||
|
|
||||||
set_border_region_from_coord(coords, margin=30, scn=new, export_json=True)
|
set_border_region_from_coord(coords, margin=30, scn=new, export_json=True)
|
||||||
export_crop_to_json()
|
export_crop_to_json()
|
||||||
|
|
||||||
|
@ -1548,7 +1548,7 @@ def clear_frame_out_of_range(o, verbose=False):
|
||||||
print(f'del: obj {o.name} > layer {l.info} > frame {f.frame_number}')
|
print(f'del: obj {o.name} > layer {l.info} > frame {f.frame_number}')
|
||||||
l.frames.remove(f)
|
l.frames.remove(f)
|
||||||
ct += 1
|
ct += 1
|
||||||
|
|
||||||
# before
|
# before
|
||||||
elif f.frame_number < scn.frame_start - 1:
|
elif f.frame_number < scn.frame_start - 1:
|
||||||
if first:
|
if first:
|
||||||
|
@ -1596,7 +1596,7 @@ def set_scene_output_from_active_fileout_item():
|
||||||
|
|
||||||
excluded = ['file_format', 'color_mode', 'color_depth',
|
excluded = ['file_format', 'color_mode', 'color_depth',
|
||||||
'view_settings', 'views_format']
|
'view_settings', 'views_format']
|
||||||
|
|
||||||
''' ## all attrs
|
''' ## all attrs
|
||||||
# 'cineon_black', 'cineon_gamma', 'cineon_white',
|
# 'cineon_black', 'cineon_gamma', 'cineon_white',
|
||||||
# 'color_depth', 'color_mode', 'compression', 'display_settings',
|
# 'color_depth', 'color_mode', 'compression', 'display_settings',
|
||||||
|
@ -1641,7 +1641,7 @@ def set_layer_colors(skip_if_colored=False):
|
||||||
continue
|
continue
|
||||||
print(l.info, '->', color)
|
print(l.info, '->', color)
|
||||||
l.channel_color = color
|
l.channel_color = color
|
||||||
|
|
||||||
bpy.context.preferences.edit.use_anim_channel_group_colors = True
|
bpy.context.preferences.edit.use_anim_channel_group_colors = True
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -12,10 +12,10 @@ def add_rlayer(layer_name, scene=None, location=None, color=None, node_name=None
|
||||||
node_name = layer_name # 'RL_' +
|
node_name = layer_name # 'RL_' +
|
||||||
|
|
||||||
if not scene:
|
if not scene:
|
||||||
scene=bpy.context.scene
|
scene=bpy.context.scene
|
||||||
|
|
||||||
nodes = scene.node_tree.nodes
|
nodes = scene.node_tree.nodes
|
||||||
|
|
||||||
comp = nodes.get(node_name)
|
comp = nodes.get(node_name)
|
||||||
if comp:
|
if comp:
|
||||||
if comp.layer == node_name:
|
if comp.layer == node_name:
|
||||||
|
@ -33,7 +33,7 @@ def add_rlayer(layer_name, scene=None, location=None, color=None, node_name=None
|
||||||
comp.location = location
|
comp.location = location
|
||||||
if color:
|
if color:
|
||||||
comp.color = color
|
comp.color = color
|
||||||
|
|
||||||
if width:
|
if width:
|
||||||
comp.width = width
|
comp.width = width
|
||||||
comp.show_preview = False
|
comp.show_preview = False
|
||||||
|
@ -51,14 +51,14 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
vl_name = rlayer.layer
|
vl_name = rlayer.layer
|
||||||
if not vl_name or vl_name == 'View Layer':
|
if not vl_name or vl_name == 'View Layer':
|
||||||
print(f'Bad layer for node {rlayer.name}')
|
print(f'Bad layer for node {rlayer.name}')
|
||||||
|
|
||||||
if not ' / ' in vl_name:
|
if not ' / ' in vl_name:
|
||||||
print(f'no slash (" / ") separator in vl_name {vl_name}, should be "obj.name / layer_name"')
|
print(f'no slash (" / ") separator in vl_name {vl_name}, should be "obj.name / layer_name"')
|
||||||
return
|
return
|
||||||
|
|
||||||
obname, lname = vl_name.split(' / ')
|
obname, lname = vl_name.split(' / ')
|
||||||
lname = bpy.path.clean_name(lname)
|
lname = bpy.path.clean_name(lname)
|
||||||
|
|
||||||
if not frame:
|
if not frame:
|
||||||
if rlayer.parent:
|
if rlayer.parent:
|
||||||
frame=rlayer.parent
|
frame=rlayer.parent
|
||||||
|
@ -96,7 +96,7 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
|
|
||||||
ng_in = fn.create_node('NodeGroupInput', tree=ngroup, location=(-600,0))
|
ng_in = fn.create_node('NodeGroupInput', tree=ngroup, location=(-600,0))
|
||||||
ng_out = fn.create_node('NodeGroupOutput', tree=ngroup, location=(600,0))
|
ng_out = fn.create_node('NodeGroupOutput', tree=ngroup, location=(600,0))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
ngroup = ng.node_tree
|
ngroup = ng.node_tree
|
||||||
ng_in = ngroup.nodes.get('Group Input')
|
ng_in = ngroup.nodes.get('Group Input')
|
||||||
|
@ -113,20 +113,20 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
|
|
||||||
## get nodes from frame
|
## get nodes from frame
|
||||||
# rl_nodes = [n for n in nodes if n.type == 'R_LAYERS' and n.layer != 'View Layer' and n.parent == frame]
|
# rl_nodes = [n for n in nodes if n.type == 'R_LAYERS' and n.layer != 'View Layer' and n.parent == frame]
|
||||||
|
|
||||||
# auto clean : if an input exists but is not linked and name not exists in rlayers of current frame
|
# auto clean : if an input exists but is not linked and name not exists in rlayers of current frame
|
||||||
for s in reversed(ng.inputs):
|
for s in reversed(ng.inputs):
|
||||||
if not s.is_linked: # and not any(x.layer == s.name for x in rl_nodes)
|
if not s.is_linked: # and not any(x.layer == s.name for x in rl_nodes)
|
||||||
print(f'removing grp unlinked input {s.name}')
|
print(f'removing grp unlinked input {s.name}')
|
||||||
ng.inputs.remove(s)
|
ng.inputs.remove(s)
|
||||||
|
|
||||||
## get nodes from linked NG inputs ??? maybe more clear...
|
## get nodes from linked NG inputs ??? maybe more clear...
|
||||||
# rl_nodes = [s.links[0].from_node for s in ng.inputs if s.links and s.links[0].from_node and s.links[0].from_node.type == 'R_LAYERS']
|
# rl_nodes = [s.links[0].from_node for s in ng.inputs if s.links and s.links[0].from_node and s.links[0].from_node.type == 'R_LAYERS']
|
||||||
|
|
||||||
## reorder
|
## reorder
|
||||||
fn.reorder_inputs(ng)
|
fn.reorder_inputs(ng)
|
||||||
ng.update()
|
ng.update()
|
||||||
|
|
||||||
# CREATE NG outsocket (individual, without taking merge)
|
# CREATE NG outsocket (individual, without taking merge)
|
||||||
connected = False
|
connected = False
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
if socket:
|
if socket:
|
||||||
connected = True
|
connected = True
|
||||||
groupout = ng.outputs.get(socket.name)
|
groupout = ng.outputs.get(socket.name)
|
||||||
|
|
||||||
ng.update()
|
ng.update()
|
||||||
|
|
||||||
if not connected:
|
if not connected:
|
||||||
|
@ -151,7 +151,7 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
# ng_in.outputs[vl_name]
|
# ng_in.outputs[vl_name]
|
||||||
ngroup.links.new(ng_in.outputs[vl_name], aa.inputs[0]) # node_tree
|
ngroup.links.new(ng_in.outputs[vl_name], aa.inputs[0]) # node_tree
|
||||||
ngroup.links.new(aa.outputs[0], ng_out.inputs[vl_name]) # node_tree
|
ngroup.links.new(aa.outputs[0], ng_out.inputs[vl_name]) # node_tree
|
||||||
|
|
||||||
aa.mute = scene.use_aa # mute if native AA is used
|
aa.mute = scene.use_aa # mute if native AA is used
|
||||||
|
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
ng.update()
|
ng.update()
|
||||||
# reorder output to match inputs
|
# reorder output to match inputs
|
||||||
fn.reorder_outputs(ng)
|
fn.reorder_outputs(ng)
|
||||||
|
|
||||||
ng.update()
|
ng.update()
|
||||||
|
|
||||||
# Clear : delete orphan nodes that are not connected from ng_in
|
# Clear : delete orphan nodes that are not connected from ng_in
|
||||||
|
@ -180,7 +180,7 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
if groupout.links and groupout.links[0].to_node.type == 'OUTPUT_FILE':
|
if groupout.links and groupout.links[0].to_node.type == 'OUTPUT_FILE':
|
||||||
# if already connected to outfile just skip cause user might have customised the name
|
# if already connected to outfile just skip cause user might have customised the name
|
||||||
return
|
return
|
||||||
|
|
||||||
slot_name = f'{lname}/{lname}_'
|
slot_name = f'{lname}/{lname}_'
|
||||||
out_name = f'OUT_{obname}' # or get output from frame
|
out_name = f'OUT_{obname}' # or get output from frame
|
||||||
if not out:
|
if not out:
|
||||||
|
@ -194,7 +194,7 @@ def connect_render_layer(rlayer, ng=None, out=None, frame=None):
|
||||||
out.base_path = f'//render/{bpy.path.clean_name(obname)}'
|
out.base_path = f'//render/{bpy.path.clean_name(obname)}'
|
||||||
|
|
||||||
## out_input = out.inputs.get(slot_name) # ok for non-numbered outputs
|
## out_input = out.inputs.get(slot_name) # ok for non-numbered outputs
|
||||||
|
|
||||||
out_input = None
|
out_input = None
|
||||||
out_input = fn.get_numbered_output(out, slot_name)
|
out_input = fn.get_numbered_output(out, slot_name)
|
||||||
|
|
||||||
|
@ -235,9 +235,9 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
# ob.data = ob.data.copy() # create duplicate (this will also affect the one in original scene !!!)
|
# ob.data = ob.data.copy() # create duplicate (this will also affect the one in original scene !!!)
|
||||||
scene.collection.objects.link(ob)
|
scene.collection.objects.link(ob)
|
||||||
ob.hide_viewport = ob.hide_render = False
|
ob.hide_viewport = ob.hide_render = False
|
||||||
|
|
||||||
## set object active in default viewlayer
|
## set object active in default viewlayer
|
||||||
|
|
||||||
# if (avl := scene.view_layers.get('ViewLayer')):
|
# if (avl := scene.view_layers.get('ViewLayer')):
|
||||||
# # This select the object in source scene
|
# # This select the object in source scene
|
||||||
# avl.objects.active = ob
|
# avl.objects.active = ob
|
||||||
|
@ -269,7 +269,7 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
|
|
||||||
if rlayer_list: # rlayer exists
|
if rlayer_list: # rlayer exists
|
||||||
print(f'{len(rlayer_list)} nodes using {vl_name}')
|
print(f'{len(rlayer_list)} nodes using {vl_name}')
|
||||||
|
|
||||||
# affect only the one within an object frame
|
# affect only the one within an object frame
|
||||||
framed_rl = [n for n in rlayer_list if n.parent and n.parent.label == ob.name]
|
framed_rl = [n for n in rlayer_list if n.parent and n.parent.label == ob.name]
|
||||||
if framed_rl:
|
if framed_rl:
|
||||||
|
@ -282,7 +282,7 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
cp.select = True # select so the user see that it existed
|
cp.select = True # select so the user see that it existed
|
||||||
return vl, cp
|
return vl, cp
|
||||||
|
|
||||||
# Returned if existed and OK
|
# Returned if existed and OK
|
||||||
|
|
||||||
if not ob.name in frame_dic.keys(): # and len(frame_dic[ob.name])
|
if not ob.name in frame_dic.keys(): # and len(frame_dic[ob.name])
|
||||||
print(f'\n{ob.name} -> {l.info} (first generation)')
|
print(f'\n{ob.name} -> {l.info} (first generation)')
|
||||||
|
@ -303,7 +303,7 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
frame.label = ob.name
|
frame.label = ob.name
|
||||||
frame.label_size = 50
|
frame.label_size = 50
|
||||||
frame.location = (loc[0], loc[1] + 20)
|
frame.location = (loc[0], loc[1] + 20)
|
||||||
|
|
||||||
cp = add_rlayer(vl_name, scene=scene, location=loc)
|
cp = add_rlayer(vl_name, scene=scene, location=loc)
|
||||||
cp.parent = frame
|
cp.parent = frame
|
||||||
# use same color as layer
|
# use same color as layer
|
||||||
|
@ -326,7 +326,7 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
if cp.layer != vl_name:
|
if cp.layer != vl_name:
|
||||||
print(f'problem with {cp}: {cp.layer} != {vl_name}')
|
print(f'problem with {cp}: {cp.layer} != {vl_name}')
|
||||||
return
|
return
|
||||||
|
|
||||||
if fn.has_channel_color(l):
|
if fn.has_channel_color(l):
|
||||||
cp.use_custom_color = True
|
cp.use_custom_color = True
|
||||||
cp.color = l.channel_color
|
cp.color = l.channel_color
|
||||||
|
@ -339,16 +339,16 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
top_loc = fn.real_loc(rl_nodes[0])
|
top_loc = fn.real_loc(rl_nodes[0])
|
||||||
else:
|
else:
|
||||||
top_loc = fn.get_frame_transform(frame[1], node_tree) - 60
|
top_loc = fn.get_frame_transform(frame[1], node_tree) - 60
|
||||||
|
|
||||||
# cp.location = (top_loc[0], top_loc[1] + 100) # temp location to adjust x loc
|
# cp.location = (top_loc[0], top_loc[1] + 100) # temp location to adjust x loc
|
||||||
|
|
||||||
# list of layer names in nodes order
|
# list of layer names in nodes order
|
||||||
rl_names = [n.layer.split(' / ')[1] for n in rl_nodes] # get True layer name from rl
|
rl_names = [n.layer.split(' / ')[1] for n in rl_nodes] # get True layer name from rl
|
||||||
# names with the right order WITH the new layer included
|
# names with the right order WITH the new layer included
|
||||||
names = [lay.info for lay in ob.data.layers if lay.info in rl_names or lay == l]
|
names = [lay.info for lay in ob.data.layers if lay.info in rl_names or lay == l]
|
||||||
|
|
||||||
rl_nodes.append(cp)
|
rl_nodes.append(cp)
|
||||||
|
|
||||||
# filter by getting index(layer_name)
|
# filter by getting index(layer_name)
|
||||||
cp.parent = frame
|
cp.parent = frame
|
||||||
rl_nodes.sort(key=lambda x : names.index(x.layer.split(' / ')[1])) # Sort True layer name from rl
|
rl_nodes.sort(key=lambda x : names.index(x.layer.split(' / ')[1])) # Sort True layer name from rl
|
||||||
|
@ -356,13 +356,13 @@ def get_set_viewlayer_from_gp(ob, l, scene=None):
|
||||||
offset = 0
|
offset = 0
|
||||||
# print(f'number of nodes in frame: {len(rl_nodes)}')
|
# print(f'number of nodes in frame: {len(rl_nodes)}')
|
||||||
ref_node = rl_nodes[0]
|
ref_node = rl_nodes[0]
|
||||||
|
|
||||||
# print('ref_node: ', ref_node.name, ref_node.location)
|
# print('ref_node: ', ref_node.name, ref_node.location)
|
||||||
for n in rl_nodes:
|
for n in rl_nodes:
|
||||||
# set x loc from first node in list (maybe use leftmost ?)
|
# set x loc from first node in list (maybe use leftmost ?)
|
||||||
n.location = Vector((fn.real_loc(ref_node)[0], top_loc[1] - offset)) - n.parent.location
|
n.location = Vector((fn.real_loc(ref_node)[0], top_loc[1] - offset)) - n.parent.location
|
||||||
offset += 180
|
offset += 180
|
||||||
|
|
||||||
n.update()
|
n.update()
|
||||||
# reorder render layers nodes within frame
|
# reorder render layers nodes within frame
|
||||||
|
|
||||||
|
@ -379,12 +379,12 @@ def export_gp_objects(oblist, exclude_list=[], scene=None):
|
||||||
oblist = [oblist]
|
oblist = [oblist]
|
||||||
if isinstance(exclude_list, str):
|
if isinstance(exclude_list, str):
|
||||||
exclude_list = [p.strip() for p in exclude_list.split(',')]
|
exclude_list = [p.strip() for p in exclude_list.split(',')]
|
||||||
|
|
||||||
# print('exclude_list: ', exclude_list)
|
# print('exclude_list: ', exclude_list)
|
||||||
|
|
||||||
for ob in oblist:
|
for ob in oblist:
|
||||||
for l in ob.data.layers:
|
for l in ob.data.layers:
|
||||||
# if l.hide:
|
# if l.hide:
|
||||||
# continue
|
# continue
|
||||||
if l.hide or any(x + '_' in l.info for x in exclude_list): # exclude hided ?
|
if l.hide or any(x + '_' in l.info for x in exclude_list): # exclude hided ?
|
||||||
print(f'Exclude export: {ob.name} : {l.info}')
|
print(f'Exclude export: {ob.name} : {l.info}')
|
||||||
|
|
4
prefs.py
4
prefs.py
|
@ -7,7 +7,7 @@ class gp_render_prefs(bpy.types.AddonPreferences):
|
||||||
# name='Resample on the fly',
|
# name='Resample on the fly',
|
||||||
# description="Allow smoother stroke when using pinch\nnote that stroke using textured materials will not be resampled",
|
# description="Allow smoother stroke when using pinch\nnote that stroke using textured materials will not be resampled",
|
||||||
# default=True)
|
# default=True)
|
||||||
|
|
||||||
advanced : bpy.props.BoolProperty(
|
advanced : bpy.props.BoolProperty(
|
||||||
name='Advanced Options', # Reproject On Guessed Plane
|
name='Advanced Options', # Reproject On Guessed Plane
|
||||||
description="Display advanced options",
|
description="Display advanced options",
|
||||||
|
@ -22,7 +22,7 @@ def get_addon_prefs():
|
||||||
function to read current addon preferences properties
|
function to read current addon preferences properties
|
||||||
access with : get_addon_prefs().super_special_option
|
access with : get_addon_prefs().super_special_option
|
||||||
'''
|
'''
|
||||||
import os
|
import os
|
||||||
addon_name = os.path.splitext(__name__)[0]
|
addon_name = os.path.splitext(__name__)[0]
|
||||||
preferences = bpy.context.preferences
|
preferences = bpy.context.preferences
|
||||||
addon_prefs = preferences.addons[addon_name].preferences
|
addon_prefs = preferences.addons[addon_name].preferences
|
||||||
|
|
|
@ -23,7 +23,7 @@ scn = bpy.context.scene
|
||||||
# - Import Fx3D (or render from Fx3D file... maybe easier consifering the number)
|
# - Import Fx3D (or render from Fx3D file... maybe easier consifering the number)
|
||||||
|
|
||||||
|
|
||||||
## tried to make color that fit in White theme
|
## tried to make color that fit in White theme
|
||||||
## (difficult for readability since this text color is not the same)
|
## (difficult for readability since this text color is not the same)
|
||||||
|
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ def set_layer_colors():
|
||||||
continue
|
continue
|
||||||
print(l.info, '->', color)
|
print(l.info, '->', color)
|
||||||
l.channel_color = color
|
l.channel_color = color
|
||||||
|
|
||||||
C.preferences.edit.use_anim_channel_group_colors = True
|
C.preferences.edit.use_anim_channel_group_colors = True
|
||||||
|
|
||||||
|
|
||||||
|
|
48
ui.py
48
ui.py
|
@ -17,26 +17,26 @@ class GPEXP_PT_gp_node_ui(Panel):
|
||||||
advanced = prefs.advanced
|
advanced = prefs.advanced
|
||||||
layout = self.layout
|
layout = self.layout
|
||||||
layout.operator('gp.render_scene_switch', icon='SCENE_DATA', text='Switch Scene')
|
layout.operator('gp.render_scene_switch', icon='SCENE_DATA', text='Switch Scene')
|
||||||
|
|
||||||
scn = context.scene
|
scn = context.scene
|
||||||
|
|
||||||
## Camera swapping
|
## Camera swapping
|
||||||
row = layout.row()
|
row = layout.row()
|
||||||
cam = scn.camera
|
cam = scn.camera
|
||||||
if cam:
|
if cam:
|
||||||
text = f'{cam.name} : {scn.render.resolution_x}x{scn.render.resolution_y}' # Cam:
|
text = f'{cam.name} : {scn.render.resolution_x}x{scn.render.resolution_y}' # Cam:
|
||||||
else:
|
else:
|
||||||
text = f'None' # Cam:
|
text = f'None' # Cam:
|
||||||
|
|
||||||
# if cam and cam_name == 'draw_cam':
|
# if cam and cam_name == 'draw_cam':
|
||||||
# cam_name = f'{cam.parent.name} > {cam_name}'
|
# cam_name = f'{cam.parent.name} > {cam_name}'
|
||||||
row.operator("gp.swap_render_cams", text=text, icon='OUTLINER_OB_CAMERA')
|
row.operator("gp.swap_render_cams", text=text, icon='OUTLINER_OB_CAMERA')
|
||||||
|
|
||||||
# Live checks
|
# Live checks
|
||||||
if scn.render.resolution_percentage != 100:
|
if scn.render.resolution_percentage != 100:
|
||||||
layout.label(text='Res Percentage not 100%', icon='ERROR')
|
layout.label(text='Res Percentage not 100%', icon='ERROR')
|
||||||
layout.prop(scn.render, 'resolution_percentage')
|
layout.prop(scn.render, 'resolution_percentage')
|
||||||
|
|
||||||
exclude_count = len([vl for vl in scn.view_layers if not vl.use and vl.name not in {'View Layer', 'exclude'}])
|
exclude_count = len([vl for vl in scn.view_layers if not vl.use and vl.name not in {'View Layer', 'exclude'}])
|
||||||
if exclude_count:
|
if exclude_count:
|
||||||
# layout.label(text=f'{exclude_count} Excluded View Layers !')
|
# layout.label(text=f'{exclude_count} Excluded View Layers !')
|
||||||
|
@ -45,21 +45,21 @@ class GPEXP_PT_gp_node_ui(Panel):
|
||||||
|
|
||||||
if not scn.use_nodes or not scn.node_tree:
|
if not scn.use_nodes or not scn.node_tree:
|
||||||
return
|
return
|
||||||
|
|
||||||
disabled_output = [n for n in scn.node_tree.nodes if n.type == 'OUTPUT_FILE' and n.mute]
|
disabled_output = [n for n in scn.node_tree.nodes if n.type == 'OUTPUT_FILE' and n.mute]
|
||||||
if disabled_output:
|
if disabled_output:
|
||||||
output_ct = len([n for n in scn.node_tree.nodes if n.type == 'OUTPUT_FILE'])
|
output_ct = len([n for n in scn.node_tree.nodes if n.type == 'OUTPUT_FILE'])
|
||||||
layout.label(text=f'{len(disabled_output)}/{output_ct} Output Muted', icon='INFO')
|
layout.label(text=f'{len(disabled_output)}/{output_ct} Output Muted', icon='INFO')
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
|
|
||||||
layout.label(text='View layers:')
|
layout.label(text='View layers:')
|
||||||
ct = len([n for n in context.scene.node_tree.nodes if n.type == 'R_LAYERS' and n.select])
|
ct = len([n for n in context.scene.node_tree.nodes if n.type == 'R_LAYERS' and n.select])
|
||||||
|
|
||||||
# col = layout.column(align=True)
|
# col = layout.column(align=True)
|
||||||
# row=col.row(align=True)
|
# row=col.row(align=True)
|
||||||
row=layout.row(align=True)
|
row=layout.row(align=True)
|
||||||
|
|
||||||
row1 = row.row(align=True)
|
row1 = row.row(align=True)
|
||||||
row1.operator('gp.activate_only_selected_layers', text=f'Activate Only {ct} Layer Nodes')
|
row1.operator('gp.activate_only_selected_layers', text=f'Activate Only {ct} Layer Nodes')
|
||||||
row1.enabled = ct > 0
|
row1.enabled = ct > 0
|
||||||
|
@ -76,7 +76,7 @@ class GPEXP_PT_gp_node_ui(Panel):
|
||||||
col.operator('gp.merge_selected_viewlayer_nodes', icon='NODETREE', text=txt).disconnect = True
|
col.operator('gp.merge_selected_viewlayer_nodes', icon='NODETREE', text=txt).disconnect = True
|
||||||
col.operator('gp.merge_selected_viewlayer_nodes', icon='NODETREE', text='Merge (keep connect)').disconnect = False
|
col.operator('gp.merge_selected_viewlayer_nodes', icon='NODETREE', text='Merge (keep connect)').disconnect = False
|
||||||
col.enabled = ct > 1
|
col.enabled = ct > 1
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
col = layout.column()
|
col = layout.column()
|
||||||
subcol = col.column()
|
subcol = col.column()
|
||||||
|
@ -87,18 +87,18 @@ class GPEXP_PT_gp_node_ui(Panel):
|
||||||
subcol.enabled = False
|
subcol.enabled = False
|
||||||
|
|
||||||
subcol.operator('gp.reconnect_render_layer', icon='ANIM', text=f'Reconnect {ct} Layer Node')
|
subcol.operator('gp.reconnect_render_layer', icon='ANIM', text=f'Reconnect {ct} Layer Node')
|
||||||
|
|
||||||
col.operator('gp.delete_render_layer', icon='TRACKING_CLEAR_FORWARDS', text=f'Delete {ct} Layer Node')
|
col.operator('gp.delete_render_layer', icon='TRACKING_CLEAR_FORWARDS', text=f'Delete {ct} Layer Node')
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
|
|
||||||
layout.label(text='All Outputs:')
|
layout.label(text='All Outputs:')
|
||||||
row=layout.row()
|
row=layout.row()
|
||||||
row.operator('gp.mute_toggle_output_nodes', icon='NODE_INSERT_ON', text='Mute').mute = True
|
row.operator('gp.mute_toggle_output_nodes', icon='NODE_INSERT_ON', text='Mute').mute = True
|
||||||
row.operator('gp.mute_toggle_output_nodes', icon='NODE_INSERT_OFF', text='Unmute').mute = False
|
row.operator('gp.mute_toggle_output_nodes', icon='NODE_INSERT_OFF', text='Unmute').mute = False
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
|
|
||||||
col=layout.column()
|
col=layout.column()
|
||||||
col.label(text='Clean and updates:')
|
col.label(text='Clean and updates:')
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ class GPEXP_PT_gp_node_ui(Panel):
|
||||||
subcol.operator('gp.number_outputs', icon='LINENUMBERS_ON', text=txt).mode = 'SELECTED'
|
subcol.operator('gp.number_outputs', icon='LINENUMBERS_ON', text=txt).mode = 'SELECTED'
|
||||||
# subcol.operator('gp.normalize_outnames', icon='SYNTAX_OFF', text=f'Normalize Paths {ct} Selected Ouptut') # not ready
|
# subcol.operator('gp.normalize_outnames', icon='SYNTAX_OFF', text=f'Normalize Paths {ct} Selected Ouptut') # not ready
|
||||||
# col.operator('gp.number_outputs', icon='LINENUMBERS_ON', text='Renumber all outputs').mode = 'ALL'
|
# col.operator('gp.number_outputs', icon='LINENUMBERS_ON', text='Renumber all outputs').mode = 'ALL'
|
||||||
|
|
||||||
if advanced:
|
if advanced:
|
||||||
subcol.operator('gp.set_output_node_format', icon='OUTPUT', text='Copy Active Output Format')
|
subcol.operator('gp.set_output_node_format', icon='OUTPUT', text='Copy Active Output Format')
|
||||||
subcol.operator('gp.set_active_fileout_to_compout', icon='OUTPUT', text='Active Slot to Composite')
|
subcol.operator('gp.set_active_fileout_to_compout', icon='OUTPUT', text='Active Slot to Composite')
|
||||||
|
@ -204,7 +204,7 @@ class GPEXP_PT_gp_dopesheet_ui(Panel):
|
||||||
# merge layers from dopesheet
|
# merge layers from dopesheet
|
||||||
row.operator('gp.merge_viewlayers_to_active', text=txt, icon='SELECT_EXTEND')
|
row.operator('gp.merge_viewlayers_to_active', text=txt, icon='SELECT_EXTEND')
|
||||||
row.enabled= ct > 1
|
row.enabled= ct > 1
|
||||||
|
|
||||||
col.operator('gpexp.auto_merge_adjacent_prefix', icon='SELECT_EXTEND')
|
col.operator('gpexp.auto_merge_adjacent_prefix', icon='SELECT_EXTEND')
|
||||||
|
|
||||||
## all and objects
|
## all and objects
|
||||||
|
@ -227,17 +227,17 @@ class GPEXP_PT_gp_dopesheet_ui(Panel):
|
||||||
col.operator('gp.export_infos_for_compo', icon='FILE', text='Export Layers Infos') # Not really need, called in Check layers invoke
|
col.operator('gp.export_infos_for_compo', icon='FILE', text='Export Layers Infos') # Not really need, called in Check layers invoke
|
||||||
col.operator('gp.layers_state', icon='CHECKMARK', text='Check layers')
|
col.operator('gp.layers_state', icon='CHECKMARK', text='Check layers')
|
||||||
col.operator('gp.check_masks', icon='MOD_MASK', text='Has Masks')
|
col.operator('gp.check_masks', icon='MOD_MASK', text='Has Masks')
|
||||||
|
|
||||||
# row = layout.row()
|
# row = layout.row()
|
||||||
layout.prop(bpy.context.preferences.edit, 'use_anim_channel_group_colors')
|
layout.prop(bpy.context.preferences.edit, 'use_anim_channel_group_colors')
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
|
|
||||||
row = layout.row()
|
row = layout.row()
|
||||||
row.operator('gp.export_as_pdf', icon='RENDER_STILL', text='Render All to PDF Sequences')
|
row.operator('gp.export_as_pdf', icon='RENDER_STILL', text='Render All to PDF Sequences')
|
||||||
if bpy.app.version < (3,0,0):
|
if bpy.app.version < (3,0,0):
|
||||||
row.label(text='Not Blender 3.0.0+')
|
row.label(text='Not Blender 3.0.0+')
|
||||||
|
|
||||||
## Append GP Render workspace (usefull for user with disabled 'load_UI')
|
## Append GP Render workspace (usefull for user with disabled 'load_UI')
|
||||||
if not bpy.data.workspaces.get('GP Render'):
|
if not bpy.data.workspaces.get('GP Render'):
|
||||||
layout.operator('gp.set_gp_render_workspace')
|
layout.operator('gp.set_gp_render_workspace')
|
||||||
|
@ -274,7 +274,7 @@ def viewlayer_layout(layout, scn):
|
||||||
# bl_label = "View Layers"
|
# bl_label = "View Layers"
|
||||||
# def draw(self, context):
|
# def draw(self, context):
|
||||||
# layout = self.layout
|
# layout = self.layout
|
||||||
# viewlayer_layout(layout, context)
|
# viewlayer_layout(layout, context)
|
||||||
|
|
||||||
class GPEXP_PT_viewlayers_ui(Panel):
|
class GPEXP_PT_viewlayers_ui(Panel):
|
||||||
bl_space_type = "NODE_EDITOR"
|
bl_space_type = "NODE_EDITOR"
|
||||||
|
@ -361,7 +361,7 @@ GPEXP_PT_gp_node_ui,
|
||||||
GPEXP_PT_gp_dopesheet_ui,
|
GPEXP_PT_gp_dopesheet_ui,
|
||||||
)
|
)
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
for cls in classes:
|
for cls in classes:
|
||||||
bpy.utils.register_class(cls)
|
bpy.utils.register_class(cls)
|
||||||
# bpy.types.DATA_PT_gpencil_layers.prepend(manager_ui)
|
# bpy.types.DATA_PT_gpencil_layers.prepend(manager_ui)
|
||||||
|
|
Loading…
Reference in New Issue