2066 lines
73 KiB
Python
2066 lines
73 KiB
Python
import bpy
|
|
import os
|
|
import re
|
|
import json
|
|
|
|
from mathutils import Vector
|
|
from pathlib import Path
|
|
from itertools import groupby
|
|
from math import isclose
|
|
from collections import defaultdict
|
|
from time import time
|
|
|
|
from . constant import RD_SCENE_NAME
|
|
|
|
### -- rules
|
|
|
|
def is_valid_name(name):
|
|
'''return True if name correspond to a valid object
|
|
Don't start with a dot '.'
|
|
Is not "note"
|
|
'''
|
|
|
|
if name.startswith('.'):
|
|
return False
|
|
|
|
## FIXME: /!\ "note" as an exclude word is not good practice, temporary fix
|
|
if name.lower() == 'note':
|
|
return False
|
|
|
|
return True
|
|
|
|
### -- node basic
|
|
|
|
def create_node(type, tree=None, **kargs):
|
|
'''Get a type, a tree to add in, and optionnaly multiple attribute to set
|
|
return created node
|
|
'''
|
|
tree = tree or bpy.context.scene.node_tree
|
|
|
|
node = tree.nodes.new(type)
|
|
for k,v in kargs.items():
|
|
setattr(node, k, v)
|
|
|
|
return node
|
|
|
|
def new_aa_node(tree, **kargs):
|
|
'''create AA node'''
|
|
aa = create_node('CompositorNodeAntiAliasing', tree) # type = ANTIALIASING
|
|
aa.threshold = 1.0 # 0.5
|
|
aa.contrast_limit = 0.25 # 0.5
|
|
aa.corner_rounding = 0.25
|
|
aa.hide = True
|
|
|
|
for k,v in kargs.items():
|
|
setattr(aa, k, v)
|
|
return aa
|
|
|
|
def create_aa_nodegroup(tree):
|
|
ngroup = bpy.data.node_groups.get('AA')
|
|
if not ngroup:
|
|
ngroup = bpy.data.node_groups.new('AA', 'CompositorNodeTree')
|
|
ng_in = create_node('NodeGroupInput', tree=ngroup, location=(-600,0))
|
|
ng_out = create_node('NodeGroupOutput', tree=ngroup, location=(600,0))
|
|
|
|
sep = create_node('CompositorNodeSepRGBA', tree=ngroup, location=(-150,0))
|
|
comb = create_node('CompositorNodeCombRGBA', tree=ngroup, location=(350,25))
|
|
|
|
if bpy.app.version < (4,0,0):
|
|
ngroup.inputs.new('NodeSocketColor', 'Image')
|
|
ngroup.outputs.new('NodeSocketColor', 'Image')
|
|
else:
|
|
ngroup.interface.new_socket('Image', in_out='INPUT', socket_type='NodeSocketColor')
|
|
ngroup.interface.new_socket('Image', in_out='OUTPUT', socket_type='NodeSocketColor')
|
|
|
|
# in AA
|
|
# ngroup.links.new(comb.outputs[0], ng_out.inputs[0]) # <- connect without out AA
|
|
aa = new_aa_node(ngroup, location=(-400, 0))
|
|
# ngroup.links.new(ng_in.outputs[0], sep.inputs[0])
|
|
ngroup.links.new(ng_in.outputs[0], aa.inputs[0])
|
|
ngroup.links.new(aa.outputs[0], sep.inputs[0])
|
|
|
|
# ngroup.links.new(ng_in.outputs[0], sep.inputs[0])
|
|
for i in range(3):
|
|
ngroup.links.new(sep.outputs[i], comb.inputs[i])
|
|
|
|
# alpha AA
|
|
alpha_aa = new_aa_node(ngroup, location=(100,-150))
|
|
ngroup.links.new(sep.outputs[3], alpha_aa.inputs[0])
|
|
ngroup.links.new(alpha_aa.outputs[0], comb.inputs[3])
|
|
|
|
ngroup.links.new(comb.outputs[0], ng_out.inputs[0])
|
|
|
|
|
|
|
|
ng = create_node('CompositorNodeGroup', tree=tree)
|
|
ng.node_tree = ngroup
|
|
ng.name = ngroup.name
|
|
ng.hide=True
|
|
return ng
|
|
|
|
|
|
## -- object and scene settings
|
|
|
|
def activate_workspace(name='', context=None):
|
|
if not name:
|
|
return
|
|
if context is None:
|
|
context = bpy.context
|
|
|
|
if context.window.workspace.name == name:
|
|
print(f'Already in {name} workspace')
|
|
return
|
|
|
|
if (wkspace := bpy.data.workspaces.get(name)):
|
|
context.window.workspace = wkspace
|
|
return True
|
|
|
|
# Same name with spaces as underscore
|
|
dir_name = name.replace(' ', '_')
|
|
filepath = Path(__file__).parent / 'app_templates' / dir_name / 'startup.blend'
|
|
|
|
ret = bpy.ops.workspace.append_activate(idname=name, filepath=str(filepath))
|
|
if ret != {'FINISHED'}:
|
|
print(f'Could not found {name} at {filepath}')
|
|
return False
|
|
|
|
return context.window.workspace
|
|
|
|
def copy_settings(obj_a, obj_b):
|
|
exclusion = ['bl_rna', 'id_data', 'identifier','name_property','rna_type','properties', 'stamp_note_text','use_stamp_note',
|
|
'settingsFilePath', 'settingsStamp', 'select', 'matrix_local', 'matrix_parent_inverse',
|
|
'matrix_basis','location','rotation_euler', 'rotation_quaternion', 'rotation_axis_angle', 'scale']
|
|
|
|
for attr in dir(obj_a):
|
|
if attr.startswith('__'):
|
|
continue
|
|
if attr in exclusion:
|
|
continue
|
|
# print('attr: ', attr)
|
|
# if obj_a.is_property_readonly(attr): # block when things aren't attribute
|
|
# continue
|
|
try:
|
|
val = getattr(obj_a, attr)
|
|
except AttributeError:
|
|
# print(f'cant get {attr}')
|
|
pass
|
|
|
|
try:
|
|
setattr(obj_b, attr, val)
|
|
except:
|
|
# print(f"can't set {attr}")
|
|
pass
|
|
|
|
def set_file_output_format(fo):
|
|
|
|
env_file_format = json.loads(os.environ.get('GP_RENDER_FILE_FORMAT', '{}'))
|
|
if not env_file_format:
|
|
env_file_format = {
|
|
'file_format': 'OPEN_EXR_MULTILAYER',
|
|
'exr_codec': 'ZIP',
|
|
'color_depth': '16',
|
|
'color_mode': 'RGBA'
|
|
}
|
|
|
|
for k, v in env_file_format.items():
|
|
setattr(fo.format, k, v)
|
|
|
|
def set_scene_aa_settings(scene=None, aa=True):
|
|
'''aa == using native AA, else disable scene AA'''
|
|
if not scene:
|
|
scene = bpy.context.scene
|
|
if aa:
|
|
scene.eevee.taa_render_samples = 32
|
|
scene.grease_pencil_settings.antialias_threshold = 1
|
|
else:
|
|
scene.eevee.taa_render_samples = 1
|
|
scene.grease_pencil_settings.antialias_threshold = 0
|
|
|
|
def set_settings(scene=None, aa=True):
|
|
'''aa == using native AA, else disable scene AA'''
|
|
if not scene:
|
|
scene = bpy.context.scene
|
|
|
|
# specify scene settings for these kind of render
|
|
set_scene_aa_settings(scene=scene, aa=aa)
|
|
|
|
scene.render.film_transparent = True
|
|
scene.render.use_compositing = True
|
|
scene.render.use_sequencer = False
|
|
scene.view_settings.view_transform = 'Standard'
|
|
|
|
scene.render.resolution_percentage = 100
|
|
|
|
# output (fast write settings since this is just to delete afterwards...)
|
|
scene.render.filepath = f'//render/preview/{scene.name}/preview_'
|
|
im_settings = scene.render.image_settings
|
|
# im_settings.file_format = 'JPEG'
|
|
# im_settings.color_mode = 'RGB'
|
|
# im_settings.quality = 0
|
|
|
|
## Same as output nodes
|
|
im_settings.file_format = 'OPEN_EXR'
|
|
im_settings.color_mode = 'RGBA'
|
|
im_settings.color_depth = '16'
|
|
im_settings.exr_codec = 'ZIP'
|
|
|
|
def scene_aa(scene=None, toggle=True):
|
|
'''Change scene AA settings and commute AA nodes according to toggle'''
|
|
if not scene:
|
|
scene=bpy.context.scene
|
|
|
|
# Enable/disable native anti-alias on active scene
|
|
set_scene_aa_settings(scene=scene, aa=toggle)
|
|
|
|
## Set AA on scene where object and viewlayers exists
|
|
local_nodes = scene.node_tree.nodes
|
|
if (group_node := next((n for n in local_nodes if n.name.startswith('NG_')), None)):
|
|
# Get a viewlayer connected to a NG_ and check which scene is referred
|
|
if (rlayer := next((i.links[0].from_node for i in group_node.inputs if i.links and i.links[0].from_node.type == 'R_LAYERS'), None)):
|
|
if rlayer.scene and rlayer.scene != scene:
|
|
print(f'Set AA to {toggle} on scene "{rlayer.scene.name}"')
|
|
set_scene_aa_settings(scene=rlayer.scene, aa=toggle)
|
|
|
|
## Mute/Unmute AA nodegroups
|
|
for n in local_nodes:
|
|
if n.type == 'GROUP' and n.name.startswith('NG_'):
|
|
# n.mute = False # mute whole nodegroup ?
|
|
for gn in n.node_tree.nodes:
|
|
if gn.type == 'GROUP' and gn.node_tree.name == 'AA':
|
|
gn.mute = toggle
|
|
|
|
def new_scene_from(name, src_scn=None, regen=True, crop=True, link_cam=True, link_light=True):
|
|
'''Get / Create a scene from name and source scene to get settings from'''
|
|
scn = bpy.data.scenes.get(name)
|
|
if scn and not regen:
|
|
return scn
|
|
elif scn and regen:
|
|
bpy.data.scenes.remove(scn)
|
|
|
|
src_scn = src_scn or bpy.context.scene # given scene, or active scene
|
|
scn = bpy.data.scenes.new(name)
|
|
## copy original settings over to new scene
|
|
# copy_settings(current, scn) # BAD
|
|
for attr in ['frame_start', 'frame_end', 'frame_current', 'camera', 'world']:
|
|
setattr(scn, attr, getattr(src_scn, attr))
|
|
copy_settings(src_scn.render, scn.render)
|
|
|
|
## link cameras (and lights ?)
|
|
for ob in src_scn.objects:
|
|
if link_cam and ob.type == 'CAMERA':
|
|
scn.collection.objects.link(ob)
|
|
if link_light and ob.type == 'LIGHT':
|
|
scn.collection.objects.link(ob)
|
|
|
|
# set adapted render settings
|
|
set_settings(scn)
|
|
|
|
if crop:
|
|
scn.render.use_border = True
|
|
scn.render.use_crop_to_border = True
|
|
scn.use_nodes = True
|
|
return scn
|
|
|
|
def get_compo_scene(scene_name=None, create=True):
|
|
'''Get / Create a dedicated compositing scene to link GP
|
|
use passed scene name, if no name is passed fall back to compo_scene propertie
|
|
return None if field is empty'''
|
|
|
|
scene_name = scene_name or bpy.context.scene.gp_render_settings.node_scene
|
|
if not scene_name:
|
|
# return None or render scene
|
|
return
|
|
|
|
scn = bpy.data.scenes.get(scene_name)
|
|
if scn:
|
|
return scn
|
|
|
|
if not create:
|
|
return
|
|
|
|
## -- Create render scene
|
|
current = bpy.context.scene
|
|
|
|
## With data
|
|
scn = bpy.data.scenes.new(scene_name)
|
|
|
|
## copy original settings over to new scene
|
|
for attr in ['frame_start', 'frame_end', 'frame_current']: # , 'camera', 'world'
|
|
setattr(scn, attr, getattr(current, attr))
|
|
copy_settings(current.render, scn.render)
|
|
|
|
## Copy markers
|
|
# for marker in current.timeline_markers:
|
|
# new_marker = scn.timeline_markers.new(marker.name, frame=marker.frame)
|
|
# new_marker.camera = marker.camera
|
|
|
|
scn.use_nodes = True
|
|
|
|
## Clear node tree
|
|
scn.node_tree.nodes.clear()
|
|
set_settings(scn)
|
|
scn.gp_render_settings['use_aa'] = True
|
|
# Set compo scene target in it's own property as well
|
|
scn.gp_render_settings.node_scene = scene_name
|
|
return scn
|
|
|
|
def get_render_scene(scene_name=None, create=True):
|
|
'''Get / Create a dedicated render scene to link GP'''
|
|
|
|
scene_name = scene_name or RD_SCENE_NAME
|
|
|
|
render_scn = bpy.data.scenes.get(scene_name)
|
|
if render_scn:
|
|
return render_scn
|
|
|
|
if not create:
|
|
return
|
|
|
|
## -- Create render scene
|
|
current = bpy.context.scene
|
|
|
|
## With data
|
|
render_scn = bpy.data.scenes.new(scene_name)
|
|
|
|
## copy original settings over to new scene
|
|
# copy_settings(current, render_scn) # BAD
|
|
for attr in ['frame_start', 'frame_end', 'frame_current', 'camera', 'world']:
|
|
setattr(render_scn, attr, getattr(current, attr))
|
|
copy_settings(current.render, render_scn.render)
|
|
|
|
## link cameras (and lights ?)
|
|
for ob in current.objects:
|
|
if ob.type in ('CAMERA', 'LIGHT'):
|
|
render_scn.collection.objects.link(ob)
|
|
|
|
## Copy markers
|
|
for marker in current.timeline_markers:
|
|
new_marker = render_scn.timeline_markers.new(marker.name, frame=marker.frame)
|
|
new_marker.camera = marker.camera
|
|
|
|
render_scn.use_nodes = True
|
|
|
|
## Clear node tree (initial view layer stuff)
|
|
render_scn.node_tree.nodes.clear()
|
|
# for n in reversed(render_scn.node_tree.nodes):
|
|
# render_scn.node_tree.nodes.remove(n)
|
|
|
|
set_settings(render_scn)
|
|
render_scn.gp_render_settings['use_aa'] = True
|
|
return render_scn
|
|
|
|
def get_view_layer(name, scene=None):
|
|
'''get viewlayer name
|
|
return existing/created viewlayer
|
|
'''
|
|
|
|
scene = scene or get_render_scene()
|
|
|
|
### pass double letter prefix as suffix
|
|
## pass_name = re.sub(r'^([A-Z]{2})(_)(.*)', r'\3\2\1', 'name')
|
|
## pass_name = f'{name}_{passe}'
|
|
pass_vl = scene.view_layers.get(name)
|
|
if not pass_vl:
|
|
pass_vl = scene.view_layers.new(name)
|
|
pass_vl.use_pass_z = True
|
|
return pass_vl
|
|
|
|
def set_resolution_from_cam_prop(cam=None, scene=None):
|
|
if scene is None:
|
|
scene = bpy.context.scene
|
|
if not cam:
|
|
cam = scene.camera
|
|
if not cam:
|
|
return ('ERROR', 'No active camera')
|
|
|
|
res = cam.get('resolution')
|
|
if not res:
|
|
return ('ERROR', 'Cam has no resolution attribute')
|
|
|
|
rd = scene.render
|
|
if rd.resolution_x == res[0] and rd.resolution_y == res[1]:
|
|
return ('INFO', f'Resolution already at {res[0]}x{res[1]}')
|
|
else:
|
|
rd.resolution_x, rd.resolution_y = res[0], res[1]
|
|
return ('INFO', f'Resolution to {res[0]}x{res[1]}')
|
|
|
|
## -- node location tweaks
|
|
|
|
def real_loc(n):
|
|
if not n.parent:
|
|
return n.location
|
|
return n.location + real_loc(n.parent)
|
|
|
|
def get_frame_transform(f, node_tree=None):
|
|
'''Return real transform location of a frame node
|
|
only works with one level of nesting (not recursive)
|
|
'''
|
|
if not node_tree:
|
|
node_tree = f.id_data
|
|
if f.type != 'FRAME':
|
|
return
|
|
# return real_loc(f), f.dimensions
|
|
|
|
childs = [n for n in node_tree.nodes if n.parent == f]
|
|
if not childs:
|
|
return f.location, f.dimensions
|
|
|
|
# real_locs = [f.location + n.location for n in childs]
|
|
|
|
xs = [n.location.x for n in childs] + [n.location.x + n.dimensions.x for n in childs]
|
|
ys = [n.location.y for n in childs] + [n.location.y - n.dimensions.y for n in childs]
|
|
xs.sort(key=lambda loc: loc) # x val : ascending
|
|
ys.sort(key=lambda loc: loc) # ascending # , reversed=True) # y val : descending
|
|
|
|
loc = Vector((min(xs), max(ys)))
|
|
dim = Vector((max(xs) - min(xs) + 60, max(ys) - min(ys) + 60))
|
|
|
|
return loc, dim
|
|
|
|
|
|
## -- get all frames with their real transform.
|
|
|
|
def bbox(f, frames):
|
|
xs=[]
|
|
ys=[]
|
|
for n in frames[f]: # nodes of passed frame
|
|
# Better as Vectors ?
|
|
if n.type == 'FRAME':
|
|
if n not in frames.keys():
|
|
# print(f'frame {n.name} not in frame list')
|
|
continue
|
|
all_xs, all_ys = bbox(n, frames) # frames[n]
|
|
xs += all_xs
|
|
ys += all_ys
|
|
|
|
else:
|
|
loc = real_loc(n)
|
|
xs += [loc.x, loc.x + n.dimensions.x] # + (n.dimensions.x/get_dpi_factor())
|
|
ys += [loc.y, loc.y - n.dimensions.y] # - (n.dimensions.y/get_dpi_factor())
|
|
|
|
|
|
# margin ~= 30
|
|
# return xs and ys
|
|
return [min(xs)-30, max(xs)+30], [min(ys)-30, max(ys)+30]
|
|
|
|
def get_frames_bbox(node_tree):
|
|
'''Return a dic with all frames
|
|
ex: {frame_node: (location, dimension), ...}
|
|
'''
|
|
|
|
# create dic of frame object with his direct child nodes nodes
|
|
frames = defaultdict(list)
|
|
frames_bbox = {}
|
|
for n in node_tree.nodes:
|
|
if not n.parent:
|
|
continue
|
|
# also contains frames
|
|
frames[n.parent].append(n)
|
|
|
|
# Dic for bbox coord
|
|
for f, nodes in frames.items():
|
|
if f.parent:
|
|
continue
|
|
|
|
xs, ys = bbox(f, frames)
|
|
# xs, ys = bbox(nodes, frames)
|
|
|
|
## returning: list of corner coords
|
|
# coords = [
|
|
# Vector((xs[0], ys[1])),
|
|
# Vector((xs[1], ys[1])),
|
|
# Vector((xs[1], ys[0])),
|
|
# Vector((xs[0], ys[0])),
|
|
# ]
|
|
# frames_bbox[f] = coords
|
|
|
|
## returning: (loc vector, dimensions vector)
|
|
frames_bbox[f] = Vector((xs[0], ys[1])), Vector((xs[1] - xs[0], ys[1] - ys[0]))
|
|
|
|
return frames_bbox
|
|
|
|
## -- nodes helper functions
|
|
|
|
def remove_nodes_by_viewlayer(viewlayer_list, scene=None):
|
|
'''Take a list of viewlayer and optionaly a scene to target nodetree
|
|
remove nodes related to this viewlayer in nodetree
|
|
'''
|
|
|
|
scene = scene or bpy.context.scene
|
|
|
|
vl_names = [v.name for v in viewlayer_list]
|
|
|
|
for n in reversed(scene.node_tree.nodes):
|
|
if n.type == 'R_LAYERS' and n.layer in vl_names:
|
|
for lnk in n.outputs[0].links:
|
|
grp = lnk.to_node
|
|
if grp.type != 'GROUP':
|
|
continue
|
|
if not grp.name.startswith('NG'):
|
|
continue
|
|
sockin = lnk.to_socket
|
|
sockout = grp.outputs.get(sockin.name)
|
|
if not sockout:
|
|
continue
|
|
|
|
for grplink in sockout.links:
|
|
if grplink.to_node.type != 'OUTPUT_FILE':
|
|
continue
|
|
fo_socket = grplink.to_socket
|
|
fo = grplink.to_node
|
|
fo.file_slots.remove(fo_socket)
|
|
|
|
# remove input and output from group
|
|
# grp.inputs.remove(sockin) # do not clear inside !!
|
|
# grp.outputs.remove(sockout) # do not clear inside !!
|
|
|
|
ngroup = grp.node_tree
|
|
if bpy.app.version < (4,0,0):
|
|
for i in range(len(grp.inputs))[::-1]:
|
|
if grp.inputs[i].name == sockin.name:
|
|
ngroup.inputs.remove(ngroup.inputs[i])
|
|
break
|
|
for i in range(len(grp.outputs))[::-1]:
|
|
if grp.outputs[i].name == sockout.name:
|
|
ngroup.outputs.remove(ngroup.outputs[i])
|
|
break
|
|
else:
|
|
g_inputs = [s for s in ngroup.interface.items_tree if s.in_out == 'INPUT']
|
|
g_outputs = [s for s in ngroup.interface.items_tree if s.in_out == 'OUTPUT']
|
|
for i in range(len(grp.inputs))[::-1]:
|
|
if grp.inputs[i].name == sockin.name:
|
|
ngroup.interface.remove(g_inputs[i])
|
|
break
|
|
for i in range(len(grp.outputs))[::-1]:
|
|
if grp.outputs[i].name == sockout.name:
|
|
ngroup.interface.remove(g_outputs[i])
|
|
break
|
|
|
|
# Remove render_layer node
|
|
scene.node_tree.nodes.remove(n)
|
|
|
|
def merge_gplayer_viewlayers(ob=None, act=None, layers=None, scene=None):
|
|
'''ob is not needed if active and layers are passed'''
|
|
if ob is None:
|
|
ob = bpy.context.object
|
|
if act is None:
|
|
act = ob.data.layers.active
|
|
if layers is None:
|
|
layers = [l for l in ob.data.layers if l.select and l != act]
|
|
|
|
if act is None:
|
|
return ({'ERROR'}, 'Active layer not found. Should be active layer on active object!')
|
|
|
|
rd_scn = scene or get_render_scene(create=False) # bpy.context.scene
|
|
node_scene = get_compo_scene(create=False) or rd_scn
|
|
|
|
if not act.viewlayer_render:
|
|
return ({'ERROR'}, f'Active layer {act.info} has no viewlayer assigned')
|
|
|
|
# List layers and viewlayers
|
|
vls = [rd_scn.view_layers.get(l.viewlayer_render) for l in layers
|
|
if l.viewlayer_render and l.viewlayer_render != act.viewlayer_render and rd_scn.view_layers.get(l.viewlayer_render)]
|
|
# Remove duplication
|
|
vls = list(set(vls))
|
|
|
|
|
|
# Remove viewlayer related nodes
|
|
# FIXME it's possible nodes are nodes searched in the right scene if launched from RenderGP
|
|
remove_nodes_by_viewlayer(vls, node_scene) # send compositing scene
|
|
|
|
# Assign view layer from active to selected
|
|
for l in layers:
|
|
l.viewlayer_render = act.viewlayer_render
|
|
|
|
## Delete unused viewlayers ()
|
|
|
|
used_vl_name = [n.layer for n in rd_scn.node_tree.nodes if n.type == 'R_LAYERS' and n.layer]
|
|
used_vl_name = list(set(used_vl_name))
|
|
|
|
for vl in vls:
|
|
# rd_scn.view_layers.remove(vl)
|
|
if vl.name == 'exclude':
|
|
# keep exclude
|
|
continue
|
|
if not vl.name in used_vl_name:
|
|
rd_scn.view_layers.remove(vl)
|
|
|
|
def group_adjacent_layer_prefix_rlayer(ob, excluded_prefix=[], first_name=True):
|
|
'''Set viewlayer and renderlayers by Gp layer adjacent prefix
|
|
Call merge_gplayer_viewlayers with grouped prefix
|
|
:excluded_prefix: List of prefixes to exclude from merge or str with comma separated values
|
|
:first_name: Keep the viewlayer of the bottom layer in group, else last
|
|
'''
|
|
|
|
pattern = os.environ.get('GP_RENDER_MERGE_PATTERN', r'^([A-Z]{2})_')
|
|
re_prefix = re.compile(pattern, flags=re.IGNORECASE)
|
|
|
|
if isinstance(excluded_prefix, str):
|
|
excluded_prefix = [p.strip() for p in excluded_prefix.split(',')]
|
|
|
|
## Create adjacent grp list: [('CO', [layer1, layer2]), ('LN', [layer3, layer4])]
|
|
adjacent_prefix_groups = [
|
|
(g[0], list(g[1])) for g in
|
|
groupby([l for l in ob.data.layers],
|
|
key=lambda l: re_prefix.search(l.info).group(1) if re_prefix.search(l.info) else '')
|
|
]
|
|
|
|
for prefix, layer_grp in adjacent_prefix_groups:
|
|
## Remove layer that are in excluded viewlayer
|
|
## Else None/exclusion vl can expand rest of the adjacent layers
|
|
for l in reversed(layer_grp):
|
|
if not l.viewlayer_render or l.viewlayer_render == 'exclude':
|
|
print(f'prefix "{prefix}": remove "{l.info}" from grouping adjacent layers')
|
|
layer_grp.remove(l) # remove traget the layer directly
|
|
|
|
if len(layer_grp) < 2:
|
|
continue
|
|
if not prefix or prefix in excluded_prefix:
|
|
continue
|
|
|
|
ref = layer_grp[0] if first_name else layer_grp[-1]
|
|
|
|
merge_gplayer_viewlayers(ob, act=ref, layers=layer_grp)
|
|
|
|
|
|
def clear_nodegroup(name, full_clear=False):
|
|
'''remove duplication of a nodegroup (.???)
|
|
also remove the base one if full_clear True
|
|
'''
|
|
|
|
for ng in reversed(bpy.data.node_groups):
|
|
pattern = name + r'\.\d{3}'
|
|
|
|
if not full_clear and ng.users:
|
|
continue
|
|
|
|
if re.search(pattern, ng.name):
|
|
bpy.data.node_groups.remove(ng)
|
|
|
|
if full_clear and ng.name == name:
|
|
# if full clear
|
|
bpy.data.node_groups.remove(ng)
|
|
|
|
def rearrange_rlayers_in_frames(node_tree):
|
|
'''rearrange RL nodes in all frames in nodetree'''
|
|
frames_l = [n for n in node_tree.nodes if n.type == 'FRAME']
|
|
for f in frames_l:
|
|
all_in_frames = [n for n in node_tree.nodes if n.parent == f]
|
|
rlayers = [n for n in all_in_frames if n.type == 'R_LAYERS']
|
|
if not rlayers:
|
|
continue
|
|
all_in_frames.sort(key=lambda x: x.location.y, reverse=True) # descending
|
|
rlayers.sort(key=lambda x: x.location.y, reverse=True) # descending
|
|
|
|
top = all_in_frames[0].location.y
|
|
for rl in rlayers:
|
|
# move to top with equal size
|
|
rl.location.y = top
|
|
|
|
if rl.dimensions.y == 0:
|
|
# Newly created nodes
|
|
top -= 180 + 20 # down by probable size + gap of 20
|
|
else:
|
|
top -= rl.dimensions.y + 20 # place next down by height + gap of 20
|
|
|
|
|
|
def rearrange_frames(node_tree):
|
|
frame_d = get_frames_bbox(node_tree) # dic : {frame_node:(loc vector, dimensions vector), ...}
|
|
if not frame_d:
|
|
print('no frame found')
|
|
return
|
|
|
|
# print([f.name for f in frame_d.keys()])
|
|
|
|
## order the dict by frame.y location
|
|
frame_d = {key: value for key, value in sorted(frame_d.items(), key=lambda pair: pair[1][0].y - pair[1][1].y, reverse=True)}
|
|
frames = [[f, v[0], v[1].y] for f, v in frame_d.items()] # [frame_node, real_loc, real dimensions]
|
|
|
|
top = frames[0][1].y # upper node location.y
|
|
# top = 0 #always start a 0
|
|
offset = 0
|
|
for f in frames:
|
|
## f[1] : real loc Vector
|
|
## f[0] : frame
|
|
|
|
## move frame by offset needed (delta between real_loc and "fake" loc , minus offset)
|
|
f[0].location.y = (f[1].y - f[0].location.y) - offset # avoid offset when recalculating from 0 top
|
|
# f[0].location.y = f[1].y - top - offset
|
|
offset += f[2] + 200 # gap
|
|
|
|
f[0].update()
|
|
|
|
def reorder_inputs(ng):
|
|
rl_nodes = [s.links[0].from_node for s in ng.inputs if s.is_linked and s.links and s.links[0].from_node.type == 'R_LAYERS']
|
|
rl_nodes.sort(key=lambda x: x.location.y, reverse=True)
|
|
names = [n.layer for n in rl_nodes]
|
|
inputs_names = [s.name for s in ng.inputs]
|
|
filtered_names = [n for n in names if n in inputs_names]
|
|
|
|
if bpy.app.version < (4,0,0):
|
|
for dest, name in enumerate(filtered_names):
|
|
## rebuild list at each iteration so index are good
|
|
inputs_names = [s.name for s in ng.inputs]
|
|
src = inputs_names.index(name)
|
|
# reorder on node_tree not directly on node!
|
|
ng.node_tree.inputs.move(src, dest)
|
|
|
|
else:
|
|
n_inputs = [s for s in ng.node_tree.interface.items_tree if s.in_out == 'INPUT']
|
|
for dest, name in enumerate(filtered_names):
|
|
item = next((s for s in ng.node_tree.interface.items_tree if s.in_out == 'INPUT' and s.name == name), None)
|
|
if not item: # Dbg
|
|
print(f'!PROBLEM with input "{name}"')
|
|
continue
|
|
# Need to offset index (inputs are listed after output in item_tree list)
|
|
dest = dest + n_inputs[0].position
|
|
ng.node_tree.interface.move(item, dest)
|
|
ng.node_tree.interface_update(bpy.context)
|
|
|
|
def reorder_outputs(ng):
|
|
ordered_out_name = [nis.name for nis in ng.inputs if nis.name in [o.name for o in ng.outputs]]
|
|
for s_name in ordered_out_name:
|
|
all_outnames = [o.name for o in ng.outputs]
|
|
# reorder on nodetree, not on node !
|
|
if bpy.app.version < (4,0,0):
|
|
ng.node_tree.outputs.move(all_outnames.index(s_name), ordered_out_name.index(s_name))
|
|
else:
|
|
item = next((s for s in ng.node_tree.interface.items_tree if s.in_out == 'OUTPUT' and s.name == s_name), None)
|
|
if not item: # Dbg
|
|
print(f'!PROBLEM with output "{s_name}"')
|
|
continue
|
|
ng.node_tree.interface.move(item, ordered_out_name.index(s_name))
|
|
ng.node_tree.interface_update(bpy.context)
|
|
|
|
def clear_disconnected(fo):
|
|
for inp in reversed(fo.inputs):
|
|
if not inp.is_linked:
|
|
print(f'Deleting unlinked fileout slot: {inp.name}')
|
|
fo.inputs.remove(inp)
|
|
|
|
def reorder_fileout(fo, ng=None):
|
|
if not ng: # get connected nodegroup
|
|
for s in fo.inputs:
|
|
if s.is_linked and s.links and s.links[0].from_node.type == 'GROUP':
|
|
ng = s.links[0].from_node
|
|
break
|
|
if not ng:
|
|
print(f'No nodegroup to refer to filter {fo.name}')
|
|
return
|
|
ordered = [o.links[0].to_socket.name for o in ng.outputs if o.is_linked and o.is_linked and o.links[0].to_node == fo]
|
|
for s_name in ordered:
|
|
all_outnames = [s.name for s in fo.inputs] # same as [fs.path for fs in fo.file_slots]
|
|
fo.inputs.move(all_outnames.index(s_name), ordered.index(s_name))
|
|
|
|
def reorganise_NG_nodegroup(ng):
|
|
'''refit node content to avoid overlap'''
|
|
ngroup = ng.node_tree
|
|
ng_in = ngroup.nodes.get('Group Input')
|
|
offset = 35
|
|
y = 0
|
|
for s in ng_in.outputs:
|
|
if s.is_linked:
|
|
s.links[0].to_node.location.y = y
|
|
y -= offset
|
|
|
|
def connect_to_group_output(n):
|
|
for o in n.outputs:
|
|
if o.is_linked:
|
|
if o.links[0].to_node.type == 'GROUP_OUTPUT':
|
|
return o.links[0].to_socket
|
|
val = connect_to_group_output(o.links[0].to_node)
|
|
if val:
|
|
return val
|
|
return False
|
|
|
|
def connect_to_group_input(n):
|
|
for i in n.inputs:
|
|
if i.is_linked:
|
|
if i.links[0].from_node.type == 'GROUP_INPUT':
|
|
return i.links[0].from_socket
|
|
val = connect_to_group_input(i.links[0].from_node)
|
|
if val:
|
|
return val
|
|
return False
|
|
|
|
def all_connected_forward(n, nlist=[]):
|
|
'''return list of all forward connected nodes recursively (include passed nodes)'''
|
|
for o in n.outputs:
|
|
if o.is_linked:
|
|
for lnk in o.links:
|
|
if lnk.to_node.type == 'GROUP_OUTPUT':
|
|
if n not in nlist:
|
|
return nlist + [n]
|
|
else:
|
|
return nlist
|
|
else:
|
|
nlist = all_connected_forward(lnk.to_node, nlist)
|
|
if n in nlist:
|
|
return nlist
|
|
return nlist + [n]
|
|
|
|
def all_connected_forward_from_socket(socket):
|
|
'''return a list of all nodes connected forward after socket'''
|
|
node_list = []
|
|
for ln in socket.links:
|
|
for n in all_connected_forward(ln.to_node):
|
|
if n not in node_list:
|
|
node_list.append(n)
|
|
# node_list = list(set(node_list))
|
|
return node_list
|
|
|
|
def node_height(n):
|
|
return n.height if not n.hide else 30
|
|
|
|
def reorder_nodegroup_content(ngroup):
|
|
if isinstance(ngroup, bpy.types.Node):
|
|
ngroup = ngroup.node_tree
|
|
|
|
grp_in = None
|
|
for n in ngroup.nodes:
|
|
if n.type == 'GROUP_INPUT':
|
|
grp_in = n
|
|
break
|
|
if not grp_in:
|
|
return
|
|
|
|
n_threads = []
|
|
for out in grp_in.outputs:
|
|
n_thread = all_connected_forward_from_socket(out)
|
|
if n_thread:
|
|
n_threads.append(n_thread)
|
|
|
|
level = grp_in.location.y
|
|
for thread in n_threads:
|
|
top = max([n.location.y for n in thread])
|
|
bottom = min([n.location.y - node_height(n) for n in thread])
|
|
thread_h = top - bottom
|
|
# move all nodes to adjust to level
|
|
diff_to_add = level - top
|
|
for n in thread:
|
|
n.location.y += diff_to_add
|
|
# move level to bottom
|
|
level -= thread_h + 2 # add a gap of two
|
|
|
|
def clear_nodegroup_content_if_disconnected(ngroup):
|
|
'''Get a nodegroup.node_tree
|
|
delete orphan nodes that are not connected from group input node
|
|
'''
|
|
if isinstance(ngroup, bpy.types.Node):
|
|
# case where a node is sent instead of the group
|
|
ngroup = ngroup.node_tree
|
|
|
|
for n in reversed(ngroup.nodes):
|
|
if n.type in ('GROUP_INPUT', 'GROUP_OUTPUT'):
|
|
continue
|
|
if not connect_to_group_input(n) and not connect_to_group_output(n): # is disconnected from both side
|
|
ngroup.nodes.remove(n)
|
|
|
|
reorder_nodegroup_content(ngroup)
|
|
|
|
def clean_nodegroup_inputs(ng, skip_existing_pass=True):
|
|
'''Clear inputs to output of passed nodegroup if not connected'''
|
|
ngroup = ng.node_tree
|
|
rl_nodes = [n.layer for n in ng.id_data.nodes if n.type == 'R_LAYERS']
|
|
for i in range(len(ng.inputs))[::-1]:
|
|
if not ng.inputs[i].is_linked:
|
|
if skip_existing_pass and any(ng.inputs[i].name == x for x in rl_nodes):
|
|
# a render layer of this name still exists
|
|
continue
|
|
ngroup.inputs.remove(ngroup.inputs[i])
|
|
# clear_nodegroup_content_if_disconnected(ngroup)
|
|
|
|
def bridge_reconnect_nodegroup(ng, socket_name=None):
|
|
'''
|
|
Reconnect group_in and group out that have been disconnected
|
|
:socket: only use this specific socket type
|
|
'''
|
|
ngroup = ng.node_tree
|
|
ng_in = ngroup.nodes.get('Group Input')
|
|
ng_out = ngroup.nodes.get('Group Output')
|
|
for sockin in ng_in.outputs:
|
|
if socket_name and sockin.name != socket_name:
|
|
continue
|
|
if not sockin.name: # last empty output is listed
|
|
continue
|
|
sockout = ng_out.inputs.get(sockin.name)
|
|
if not sockout:
|
|
continue
|
|
if len(sockin.links) and connect_to_group_output(sockin.links[0].to_node):
|
|
continue
|
|
## need reconnect
|
|
aa = create_aa_nodegroup(ngroup)
|
|
ngroup.links.new(sockin, aa.inputs[0])
|
|
ngroup.links.new(aa.outputs[0], sockout)
|
|
print(f'{ng.name}: Bridged {sockin.name}')
|
|
|
|
|
|
def random_color(alpha=False):
|
|
import random
|
|
if alpha:
|
|
return (random.uniform(0,1), random.uniform(0,1), random.uniform(0,1), 1)
|
|
return (random.uniform(0,1), random.uniform(0,1), random.uniform(0,1))
|
|
|
|
def nodegroup_merge_inputs(ngroup, aa=True):
|
|
'''Get a nodegroup
|
|
merge every group inputs with alpha over
|
|
then connect to antialias and a new output
|
|
'''
|
|
|
|
ng_in = ngroup.nodes.get('Group Input')
|
|
ng_out = ngroup.nodes.get('Group Output')
|
|
|
|
x, y = ng_in.location.x + 200, 0
|
|
|
|
offset_x, offset_y = 150, -100
|
|
|
|
# merge all inputs in alphaover nodes
|
|
prev = None
|
|
for i in range(len(ng_in.outputs)-1): # skip waiting point
|
|
inp = ng_in.outputs[i]
|
|
if not prev:
|
|
prev = ng_in
|
|
continue
|
|
|
|
# live connect
|
|
ao = create_node('CompositorNodeAlphaOver', tree=ngroup, location=(x,y), hide=True)
|
|
ngroup.links.new(prev.outputs[0], ao.inputs[1])
|
|
ngroup.links.new(inp, ao.inputs[2])
|
|
|
|
x += offset_x
|
|
y += offset_y
|
|
prev = ao
|
|
|
|
# Create one output and link
|
|
out_name = 'preview'
|
|
if bpy.app.version < (4,0,0):
|
|
# out_name = ngroup.inputs[0].name # name like first inputout_name
|
|
out = ngroup.outputs.new('NodeSocketColor', out_name)
|
|
else:
|
|
# out_name = next((s for s in ngroup.interface.items_tree if s.in_out == 'INPUT')).name # name like first input
|
|
out = ngroup.interface.new_socket(out_name, in_out='OUTPUT', socket_type='NodeSocketColor')
|
|
|
|
## create a merged name as output ??
|
|
if aa:
|
|
# create AA and link
|
|
aa = create_aa_nodegroup(ngroup) # new_aa_node(ngroup)
|
|
aa.location = (ao.location.x + 200, ao.location.y)
|
|
ngroup.links.new(ao.outputs[0], aa.inputs[0]) # node_tree
|
|
ngroup.links.new(aa.outputs[0], ng_out.inputs[0])
|
|
else:
|
|
# link directly
|
|
ngroup.links.new(ao.outputs[0], ng_out.inputs[0])
|
|
|
|
|
|
## -- renumbering funcs
|
|
|
|
def get_numbered_output(out, slot_name):
|
|
'''Return output slot name without looking for numbering ???_
|
|
'''
|
|
pattern = r'^(?:\d{3}_)?' # optional non capture group of 3 digits + _
|
|
pattern = f'{pattern}{slot_name}'
|
|
for inp in out.inputs:
|
|
if re.match(pattern, inp.name):
|
|
return inp
|
|
|
|
|
|
def add_fileslot_number(fs, number):
|
|
field_attr = 'name' if hasattr(fs, 'name') else 'path'
|
|
|
|
elems = getattr(fs, field_attr).split('/')
|
|
for i, e in enumerate(elems):
|
|
if re.match(r'^\d{3}_', e):
|
|
elems[i] = re.sub(r'^(\d{3})', lambda x: str(number).zfill(3), e)
|
|
else:
|
|
elems[i] = f'{str(number).zfill(3)}_{e}'
|
|
new = '/'.join(elems)
|
|
|
|
setattr(fs, field_attr, new)
|
|
return new
|
|
|
|
def renumber(fo, offset=10):
|
|
'''Force renumber all the slots with a 3'''
|
|
|
|
if fo.type != 'OUTPUT_FILE': return
|
|
ct = 10 # start at 10
|
|
slots = fo.layer_slots if fo.format.file_format == 'OPEN_EXR_MULTILAYER' else fo.file_slots
|
|
for fs in slots:
|
|
add_fileslot_number(fs, ct)
|
|
ct += offset
|
|
|
|
def get_num(string) -> int:
|
|
'''get a tring or a file_slot object
|
|
return leading number or None
|
|
'''
|
|
if not isinstance(string, str):
|
|
if hasattr(string, 'path'):
|
|
string = string.path
|
|
else:
|
|
string = string.name
|
|
|
|
num = re.search(r'^(\d{3})_', string)
|
|
if num:
|
|
return int(num.group(1))
|
|
|
|
def delete_numbering(fo): # padding=3
|
|
'''Delete prefix numbering on all slots on passed file output'''
|
|
|
|
if fo.type != 'OUTPUT_FILE': return
|
|
|
|
if fo.format.file_format == 'OPEN_EXR_MULTILAYER':
|
|
slots = fo.layer_slots
|
|
field_attr = 'name'
|
|
else:
|
|
slots = fo.file_slots
|
|
field_attr = 'path'
|
|
|
|
for fs in slots:
|
|
elems = getattr(fs, field_attr).split('/')
|
|
for i, e in enumerate(elems):
|
|
elems[i] = re.sub(r'^\d{3}_', '', e)
|
|
|
|
new = '/'.join(elems)
|
|
setattr(fs, field_attr, new)
|
|
|
|
def reverse_fileout_inputs(fo):
|
|
count = len(fo.inputs)
|
|
for i in range(count):
|
|
fo.inputs.move(count-1, i)
|
|
|
|
def renumber_keep_existing(fo, offset=10, invert=True):
|
|
'''Renumber by keeping existing numbers and inserting new one whenever possible
|
|
Big and ugly function that do the trick nonetheless...
|
|
'''
|
|
|
|
if fo.type != 'OUTPUT_FILE': return
|
|
ct = 10
|
|
|
|
if invert:
|
|
reverse_fileout_inputs(fo)
|
|
|
|
fsl = fo.layer_slots if fo.format.file_format == 'OPEN_EXR_MULTILAYER' else fo.file_slots
|
|
|
|
last_idx = len(fsl) - 1
|
|
prev = None
|
|
prev_num = None
|
|
for idx, fs in enumerate(fsl):
|
|
# print('-->', idx, fs.path)
|
|
|
|
if idx == last_idx: # handle last
|
|
if get_num(fs) is not None:
|
|
break
|
|
if idx > 0:
|
|
prev = fsl[idx-1]
|
|
num = get_num(prev)
|
|
if num is not None:
|
|
add_fileslot_number(fs, num + offset)
|
|
else:
|
|
add_fileslot_number(fs, ct)
|
|
else:
|
|
add_fileslot_number(fs, 10) # there is only one slot (maybe don't number ?)
|
|
break
|
|
|
|
# update the ct with the current taken number if any
|
|
number = get_num(fs)
|
|
if number is not None:
|
|
prev = fs
|
|
ct = number + offset
|
|
continue # skip already numbered
|
|
|
|
# analyse all next slots until there is numbered
|
|
divider = 0
|
|
# print(f'range(1, {len(fsl) - idx}')
|
|
for i in range(1, len(fsl) - idx):
|
|
next_num = get_num(fsl[idx + i])
|
|
if next_num is not None:
|
|
divider = i+1
|
|
break
|
|
|
|
if idx == 0: # handle first
|
|
prev_num = 0
|
|
prev = None
|
|
if next_num is None:
|
|
add_fileslot_number(fs, 0)
|
|
elif next_num == 0:
|
|
print(f'Cannot insert value before 0 to {fsl.path}')
|
|
continue
|
|
else:
|
|
add_fileslot_number(fs, int(next_num / 2))
|
|
else:
|
|
prev = fsl[idx-1]
|
|
test_prev = get_num(prev)
|
|
if test_prev is not None:
|
|
prev_num = test_prev
|
|
|
|
if not divider:
|
|
if prev_num is not None:
|
|
add_fileslot_number(fs, prev_num + offset)
|
|
else:
|
|
add_fileslot_number(fs, ct)
|
|
|
|
else:
|
|
if prev_num is not None:
|
|
# iterate rename
|
|
gap_inc = int((next_num - prev_num) / divider)
|
|
if gap_inc < 1: # same values !
|
|
print(f'cannot insert a median value at {fs.path} between {prev_num} and {next_num}')
|
|
continue
|
|
|
|
ct = prev_num
|
|
for temp_id in range(idx, idx+i):
|
|
ct += gap_inc
|
|
add_fileslot_number(fsl[temp_id], ct)
|
|
else:
|
|
print("what's going on ?\n")
|
|
|
|
# first check if it has a number (if not bas)
|
|
prev = fs
|
|
ct += offset
|
|
|
|
if invert:
|
|
reverse_fileout_inputs(fo)
|
|
|
|
def has_channel_color(layer):
|
|
'''Return True if gp_layer.channel_color is different than the default (0.2, 0.2, 0.2) '''
|
|
if not any(isclose(i, 0.2, abs_tol=0.001) for i in layer.channel_color):
|
|
return True
|
|
|
|
def normalize(text):
|
|
return text.lower().replace('-', '_')
|
|
|
|
PATTERN = r'^(?P<grp>-\s)?(?P<tag>[A-Z]{2}_)?(?P<name>.*?)(?P<sfix>_[A-Z]{2})?(?P<inc>\.\d{3})?$' # numering
|
|
def normalize_layer_name(layer, prefix='', desc='', suffix='', lower=True, dash_to_underscore=True, point_to_underscore=True, get_only=False):
|
|
'''GET a layer and argument to build and assign name'''
|
|
import re
|
|
|
|
name = layer.info
|
|
|
|
pattern = PATTERN
|
|
sep = '_'
|
|
res = re.search(pattern, name.strip())
|
|
|
|
|
|
grp = '' if res.group('grp') is None else res.group('grp')
|
|
tag = '' if res.group('tag') is None else res.group('tag')
|
|
# tag2 = '' if res.group('tag2') is None else res.group('tag2')
|
|
name = '' if res.group('name') is None else res.group('name')
|
|
sfix = '' if res.group('sfix') is None else res.group('sfix')
|
|
inc = '' if res.group('inc') is None else res.group('inc')
|
|
|
|
if grp:
|
|
grp = ' ' + grp # name is strip(), so grp first spaces are gones.
|
|
|
|
if prefix:
|
|
if prefix == 'prefixkillcode':
|
|
tag = ''
|
|
else:
|
|
tag = prefix.upper().strip() + sep
|
|
# if prefix2:
|
|
# tag2 = prefix2.upper().strip() + sep
|
|
if desc:
|
|
name = desc
|
|
|
|
if suffix:
|
|
if suffix == 'suffixkillcode':
|
|
sfix = ''
|
|
else:
|
|
sfix = sep + suffix.upper().strip()
|
|
|
|
# check if name is available without the increment ending
|
|
if lower:
|
|
name = name.lower()
|
|
if dash_to_underscore:
|
|
name = name.replace('-', '_')
|
|
if point_to_underscore:
|
|
name = name.replace('.', '_')
|
|
|
|
new = f'{grp}{tag}{name}{sfix}' # lower suffix ?
|
|
if get_only:
|
|
return new
|
|
if new != layer.info:
|
|
old = layer.info
|
|
print(f'{old} >> {new}')
|
|
layer.info = new
|
|
|
|
# Also change name string in modifier target !
|
|
for ob in [o for o in bpy.data.objects if o.type == 'GPENCIL' and o.data == layer.id_data]:
|
|
for m in ob.grease_pencil_modifiers:
|
|
if hasattr(m, 'layer') and m.layer:
|
|
if m.layer == old:
|
|
print(f' - updated in {ob.name} modifier {m.name}')
|
|
m.layer = new
|
|
|
|
# unused currently
|
|
def build_dope_gp_list(layer_list):
|
|
'''Take a list of GP layers return a dict with pairs {gp data : own layer list}'''
|
|
from collections import defaultdict
|
|
gps = defaultdict(list)
|
|
for l in layer_list:
|
|
gps[l.id_data].append(l)
|
|
return gps
|
|
|
|
def build_layers_targets_from_dopesheet(context):
|
|
'''Return all selected layers on context GP dopesheet according to seelction and filters'''
|
|
ob = context.object
|
|
gpl = context.object.data.layers
|
|
act = gpl.active
|
|
dopeset = context.space_data.dopesheet
|
|
|
|
|
|
if dopeset.show_only_selected:
|
|
pool = [o for o in context.selected_objects if o.type == 'GPENCIL']
|
|
else:
|
|
pool = [o for o in context.scene.objects if o.type == 'GPENCIL']
|
|
if not dopeset.show_hidden:
|
|
pool = [o for o in pool if o.visible_get()]
|
|
|
|
layer_pool = [l for o in pool for l in o.data.layers]
|
|
layer_pool = list(set(layer_pool)) # remove dupli-layers from same data source with
|
|
|
|
# apply search filter
|
|
if dopeset.filter_text:
|
|
layer_pool = [l for l in layer_pool if (dopeset.filter_text.lower() in l.info.lower()) ^ dopeset.use_filter_invert]
|
|
|
|
return layer_pool
|
|
|
|
""" # old show message gox without operator support
|
|
def show_message_box(_message = "", _title = "Message Box", _icon = 'INFO'):
|
|
'''get a str to display or a list of [str, str]
|
|
can have an icon [[str, icon], str, [str, icon]]
|
|
'''
|
|
|
|
def draw(self, context):
|
|
for l in _message:
|
|
if isinstance(l, str):
|
|
self.layout.label(text=l)
|
|
else:
|
|
self.layout.label(text=l[0], icon=l[1])
|
|
|
|
if isinstance(_message, str):
|
|
_message = [_message]
|
|
bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon)
|
|
"""
|
|
|
|
def show_message_box(_message = "", _title = "Message Box", _icon = 'INFO'):
|
|
'''Show message box with element passed as string or list
|
|
if _message if a list of lists:
|
|
if sublist have 2 element:
|
|
considered a label [text,icon]
|
|
if sublist have 3 element:
|
|
considered as an operator [ops_id_name, text, icon]
|
|
'''
|
|
|
|
def draw(self, context):
|
|
for l in _message:
|
|
if isinstance(l, str):
|
|
self.layout.label(text=l)
|
|
else:
|
|
if len(l) == 2: # label with icon
|
|
self.layout.label(text=l[0], icon=l[1])
|
|
elif len(l) == 3: # ops
|
|
self.layout.operator_context = "INVOKE_DEFAULT"
|
|
self.layout.operator(l[0], text=l[1], icon=l[2], emboss=False) # <- highligh the entry
|
|
|
|
if isinstance(_message, str):
|
|
_message = [_message]
|
|
bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon)
|
|
|
|
|
|
## -- camera framing and object anim checks
|
|
|
|
def get_bbox_3d(ob) -> list:
|
|
bbox_coords = ob.bound_box
|
|
return [ob.matrix_world @ Vector(b) for b in bbox_coords]
|
|
|
|
def is_render_included(o, scn) -> bool:
|
|
'''return True if object is in at least one non-excluded collection
|
|
in all passed scene viewlayer
|
|
'''
|
|
|
|
if o.hide_render:
|
|
return False
|
|
for vl in scn.view_layers:
|
|
all_cols = get_collection_childs_recursive(vl.layer_collection)
|
|
for c in all_cols:
|
|
print(c.name)
|
|
if o in c.collection.objects[:]:
|
|
if not c.exclude:
|
|
return True
|
|
return False
|
|
|
|
|
|
def get_crop_pixel_coord(scn) -> dict:
|
|
# width height probably not needed. might need
|
|
px_width = (scn.render.border_max_x - scn.render.border_min_x) * scn.render.resolution_x
|
|
px_height = (scn.render.border_max_y - scn.render.border_min_y) * scn.render.resolution_y
|
|
|
|
pos_x = (scn.render.border_min_x + ((scn.render.border_max_x - scn.render.border_min_x) / 2)) * scn.render.resolution_x
|
|
|
|
## coord y > image center coord from bottom-left (Blender)
|
|
# pos_y = (scn.render.border_min_y + ((scn.render.border_max_y - scn.render.border_min_y) / 2)) * scn.render.resolution_y,
|
|
|
|
## image center coord from top-left (AE)
|
|
pos_y = ((1 - scn.render.border_max_y) + ((scn.render.border_max_y - scn.render.border_min_y) / 2)) * scn.render.resolution_y
|
|
|
|
coord = {
|
|
'position_x' : round(pos_x),
|
|
'position_y' : round(pos_y),
|
|
'width' : round(px_width),
|
|
'height' : round(px_height),
|
|
'scene_res_x': scn.render.resolution_x,
|
|
'scene_res_y': scn.render.resolution_y,
|
|
}
|
|
return coord
|
|
|
|
def export_crop_to_json() -> dict:
|
|
'''Export crop to json coords for AE'''
|
|
|
|
blend = Path(bpy.data.filepath)
|
|
## Use a fixed name (easier to load from After effects)
|
|
json_path = blend.parent / 'render' / 'crop_infos.json'
|
|
## Use blend name (to support version)
|
|
# json_path = blend.parent / 'render' / f'{blend.stem}.json'
|
|
|
|
## per scene : json_path = Path(bpy.data.filepath).parent / 'render' / f'{scn.name}.json'
|
|
# json_path = Path(bpy.data.filepath).parent / 'render' / f'{scn.name}.json' # f'{ob.name}.json'
|
|
|
|
coord_dic = {}
|
|
|
|
for scn in bpy.data.scenes:
|
|
if scn.render.use_border and scn.render.use_crop_to_border: # Only usefull if cropped
|
|
scn_border = get_crop_pixel_coord(scn)
|
|
## Only scn name (meaning only one name to refer if multiple GP)
|
|
# coord_dic[scn.name] = scn_border
|
|
|
|
## use name of first found visible GP (scene name if no visible GP)
|
|
gps = [o for o in scn.objects if o.type == 'GPENCIL' if is_render_included(o, scn)] # o.visible_get() < only work on active window
|
|
if gps and scn.name != 'Scene': # always export Scene with Scene name...
|
|
for ob in gps:
|
|
coord_dic[ob.name] = scn_border
|
|
print(f'Added gp {ob.name} crop info')
|
|
else:
|
|
coord_dic[scn.name] = scn_border
|
|
print(f'Added scene {scn.name} crop info')
|
|
|
|
if coord_dic:
|
|
json_path.parent.mkdir(parents=False, exist_ok=True)
|
|
# save bbox
|
|
with json_path.open('w') as fd:
|
|
json.dump(coord_dic, fd, indent='\t')
|
|
|
|
print(f'Coords saved at: {json_path}')
|
|
return coord_dic
|
|
|
|
def set_border_region_from_coord(coords, scn=None, margin=30, export_json=True):
|
|
'''Get a list of point coord in worldcamera view space (0 to 1) on each axis
|
|
set border (of passed scene :scn: ) with given coordinate
|
|
return the coords list as pixel coordinate
|
|
'''
|
|
|
|
scn = scn or bpy.context.scene
|
|
|
|
coords2d_x = sorted([c[0] for c in coords])
|
|
coords2d_y = sorted([c[1] for c in coords])
|
|
|
|
margin_width = margin / scn.render.resolution_x
|
|
margin_height = margin / scn.render.resolution_y
|
|
|
|
# set crop
|
|
scn.render.border_min_x = coords2d_x[0] - margin_width
|
|
scn.render.border_max_x = coords2d_x[-1] + margin_width
|
|
|
|
scn.render.border_min_y = coords2d_y[0] - margin_height
|
|
scn.render.border_max_y = coords2d_y[-1] + margin_height
|
|
|
|
## get clamped relative value
|
|
# relative_bbox2d_coords = [
|
|
# (scn.render.border_min_x, scn.render.border_min_y),
|
|
# (scn.render.border_min_x, scn.render.border_max_y),
|
|
# (scn.render.border_max_x, scn.render.border_max_y),
|
|
# (scn.render.border_max_x, scn.render.border_min_y),
|
|
# ]
|
|
|
|
pixel_bbox2d_coords = [
|
|
(scn.render.border_min_x*scn.render.resolution_x, scn.render.border_min_y*scn.render.resolution_y),
|
|
(scn.render.border_min_x*scn.render.resolution_x, scn.render.border_max_y*scn.render.resolution_y),
|
|
(scn.render.border_max_x*scn.render.resolution_x, scn.render.border_max_y*scn.render.resolution_y),
|
|
(scn.render.border_max_x*scn.render.resolution_x, scn.render.border_min_y*scn.render.resolution_y),
|
|
]
|
|
# if export_json:
|
|
# export_crop_to_json(scn)
|
|
return pixel_bbox2d_coords
|
|
|
|
def get_gp_box_all_frame(ob, cam=None):
|
|
'''set crop to object bounding box considering whole animation. Cam should not be animated (render in bg_cam)
|
|
return 2d bbox in pixels
|
|
'''
|
|
from bpy_extras.object_utils import world_to_camera_view
|
|
coords_cam_list = []
|
|
scn = bpy.context.scene
|
|
cam = cam or scn.camera
|
|
start = time()
|
|
|
|
if ob.animation_data and ob.animation_data.action: # use frame set on all frames
|
|
print(f'{ob.name} has anim')
|
|
# frame_nums = sorted(list(set([f.frame_number for l in ob.data.layers if len(l.frames) for f in l.frames if len(f.strokes) and scn.frame_start <= f.frame_number <= scn.frame_end])))
|
|
for num in range(scn.frame_start, scn.frame_end+1):
|
|
scn.frame_set(num)
|
|
for l in ob.data.layers:
|
|
if l.hide or l.opacity == 0.0:
|
|
continue
|
|
if l.active_frame:
|
|
for s in l.active_frame.strokes:
|
|
if len(s.points) == 1: # skip isolated points
|
|
continue
|
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
|
else:
|
|
# if object is not animated no need to frame_set to update object position
|
|
print(f'{ob.name} no anim')
|
|
for l in ob.data.layers:
|
|
if l.hide or l.opacity == 0.0:
|
|
continue
|
|
for f in l.frames:
|
|
if not (scn.frame_start <= f.frame_number <= scn.frame_end):
|
|
continue
|
|
for s in f.strokes:
|
|
if len(s.points) == 1: # skip isolated points
|
|
continue
|
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
|
|
|
print(time() - start) # Dbg-time
|
|
return coords_cam_list
|
|
|
|
def has_anim(ob):
|
|
# TODO make a better check (check if there is only one key in each channel, count as not animated)
|
|
return ob.animation_data and ob.animation_data.action
|
|
|
|
def has_keyframe(ob, attr):
|
|
anim = ob.animation_data
|
|
if anim is not None and anim.action is not None:
|
|
for fcu in anim.action.fcurves:
|
|
if fcu.data_path == attr:
|
|
return len(fcu.keyframe_points) > 0
|
|
return False
|
|
|
|
def get_gp_box_all_frame_selection(oblist=None, scn=None, cam=None, timeout=40):
|
|
'''
|
|
get points of all selection
|
|
return 2d bbox in pixels
|
|
return None if timeout (too long to process, better to do it visually)
|
|
'''
|
|
|
|
from bpy_extras.object_utils import world_to_camera_view
|
|
|
|
t0 = time()
|
|
coords_cam_list = []
|
|
scn = scn or bpy.context.scene
|
|
oblist = oblist or [o for o in scn.objects if o.select_get()]
|
|
|
|
cam = cam or scn.camera
|
|
start = time()
|
|
|
|
if any(has_anim(ob) for ob in oblist):
|
|
print(f'at least one is animated: {oblist}')
|
|
for num in range(scn.frame_start, scn.frame_end+1):
|
|
scn.frame_set(num)
|
|
for ob in oblist:
|
|
for l in ob.data.layers:
|
|
if l.hide or l.opacity == 0.0 or l.info.startswith('MA_'): # hardcoded Mask exclusion !
|
|
continue
|
|
if not l.active_frame:
|
|
continue
|
|
for s in l.active_frame.strokes:
|
|
if len(s.points) == 1: # skip isolated points
|
|
continue
|
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
|
|
|
if time() - t0 > timeout:
|
|
print(f'timeout (more than {timeout}s to calculate) evaluating frame position of objects {oblist}')
|
|
return
|
|
else:
|
|
print(f'No anim')
|
|
for ob in oblist:
|
|
# if object is not animated no need to frame_set to update object position
|
|
for l in ob.data.layers:
|
|
if l.hide or l.opacity == 0.0 or l.info.startswith('MA_'): # hardcoded Mask exclusion !
|
|
continue
|
|
for f in l.frames:
|
|
if time() - t0 > timeout:
|
|
print(f'timeout (more than {timeout}s to calculate) evaluating frame position of objects {oblist}')
|
|
return
|
|
if not (scn.frame_start <= f.frame_number <= scn.frame_end):
|
|
continue
|
|
for s in f.strokes:
|
|
if len(s.points) == 1: # skip isolated points
|
|
continue
|
|
coords_cam_list += [world_to_camera_view(scn, cam, ob.matrix_world @ p.co) for p in s.points]
|
|
|
|
|
|
print(f'{len(coords_cam_list)} gp points listed {time() - start:.1f}s')
|
|
return coords_cam_list
|
|
|
|
def get_bbox_2d(ob, cam=None):
|
|
from bpy_extras.object_utils import world_to_camera_view
|
|
scn = bpy.context.scene
|
|
cam = cam or scn.camera
|
|
coords2d = [world_to_camera_view(scn, cam, p) for p in get_bbox_3d(ob)]
|
|
coords2d_x = sorted([c[0] for c in coords2d])
|
|
coords2d_y = sorted([c[1] for c in coords2d])
|
|
|
|
bbox2d_coords = [
|
|
(coords2d_x[0], coords2d_y[0]),
|
|
(coords2d_x[0], coords2d_y[-1]),
|
|
(coords2d_x[-1], coords2d_y[-1]),
|
|
(coords2d_x[-1], coords2d_y[0]),
|
|
]
|
|
|
|
return [Vector(b) for b in bbox2d_coords]
|
|
|
|
def set_box_from_selected_objects(scn=None, cam=None, export_json=False):
|
|
scn = scn or bpy.context.scene
|
|
cam = cam or scn.camera
|
|
|
|
selection = [o for o in scn.objects if o.select_get()] # selected_objects
|
|
coords = get_gp_box_all_frame_selection(oblist=selection, scn=scn, cam=cam)
|
|
if not coords:
|
|
return f'Border not set: Timeout during analysis of {len(selection)} objects'
|
|
|
|
_bbox_px = set_border_region_from_coord(coords, margin=30, scn=scn, export_json=export_json)
|
|
|
|
def get_cam_frame_center_world(cam):
|
|
'''get camera frame center world position in 3d space'''
|
|
## ortho cam note: scale must be 1,1,1 (parent too) to fit right in cam-frame rectangle
|
|
|
|
import numpy as np
|
|
frame = cam.data.view_frame()
|
|
mat = cam.matrix_world
|
|
frame = [mat @ v for v in frame]
|
|
|
|
# return np.add.reduce(frame) / 4
|
|
return Vector(np.sum(frame, axis=0) / 4)
|
|
|
|
def get_coord_in_cam_space(scene, cam_ob, co, ae=False):
|
|
'''Get 2d coordinate of vector in cam space
|
|
:scene: scene where camera is used (needed to get resolution)
|
|
:cam_ob: camera object
|
|
:co: the Vector3 coordinate to find in cam space
|
|
:ae: if True, Return after effects coord, top-left corner origin (blender is bottom-left)
|
|
|
|
'''
|
|
import bpy_extras
|
|
co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam_ob, co)
|
|
|
|
if ae:
|
|
# y coordinate from top
|
|
co_2d = Vector((co_2d.x, 1 - co_2d.y))
|
|
|
|
## Convert to pixel values based on scene resolution and percentage
|
|
render_scale = scene.render.resolution_percentage / 100
|
|
render_size = (
|
|
int(scene.render.resolution_x * render_scale),
|
|
int(scene.render.resolution_y * render_scale),
|
|
)
|
|
|
|
return (
|
|
round(co_2d.x * render_size[0]), # x
|
|
round(co_2d.y * render_size[1]), # y
|
|
)
|
|
|
|
|
|
## -- After effects exports
|
|
|
|
def get_ae_keyframe_clipboard_header(scn):
|
|
'''Need to use tabs for AE'''
|
|
|
|
## multiline version, but space instead of tabs break syntax for AE
|
|
# import textwrap
|
|
# t = f'''\
|
|
# Adobe After Effects 8.0 Keyframe Data
|
|
|
|
# Units Per Second {scn.render.fps}
|
|
# Source Width {scn.render.resolution_x}
|
|
# Source Height {scn.render.resolution_y}
|
|
# Source Pixel Aspect Ratio 1
|
|
# Comp Pixel Aspect Ratio 1
|
|
|
|
# Transform Position
|
|
# Frame X pixels Y pixels Z pixels
|
|
# '''
|
|
# t = textwrap.dedent(t)
|
|
# ## spaces to tab
|
|
# t = re.sub(r'\s{3-4}', u'\t', t) # ! Still spaces
|
|
|
|
## Direct use of tabs is safer
|
|
t = 'Adobe After Effects 8.0 Keyframe Data\n\n'
|
|
t += '\tUnits Per Second\t%s\n'%scn.render.fps
|
|
t += '\tSource Width\t%s\n'%scn.render.resolution_x
|
|
t += '\tSource Height\t%s\n'%scn.render.resolution_y
|
|
t += '\tSource Pixel Aspect Ratio\t1\n'
|
|
t += '\tComp Pixel Aspect Ratio\t1\n\n'
|
|
t += 'Transform\tPosition\n'
|
|
t += '\tFrame\tX pixels\tY pixels\tZ pixels\t\n'
|
|
|
|
return t
|
|
|
|
## -- Collection handle
|
|
|
|
def get_collection_childs_recursive(col, cols=[], include_root=True):
|
|
'''return a list of all the sub-collections in passed col'''
|
|
# force start from fresh list (otherwise same cols list is used at next call)
|
|
cols = cols or []
|
|
|
|
for sub in col.children:
|
|
if sub not in cols:
|
|
cols.append(sub)
|
|
if len(sub.children):
|
|
cols = get_collection_childs_recursive(sub, cols)
|
|
|
|
if include_root and col not in cols: # add root col
|
|
cols.append(col)
|
|
|
|
return cols
|
|
|
|
def unlink_objects_from_scene(oblist, scn):
|
|
all_col = get_collection_childs_recursive(scn.collection)
|
|
for col in all_col:
|
|
for ob in reversed(col.objects):
|
|
if ob in oblist:
|
|
col.objects.unlink(ob)
|
|
|
|
def remove_scene_nodes_by_obj_names(scn, name_list, negative=False):
|
|
for n in reversed(scn.node_tree.nodes):
|
|
if negative:
|
|
if (n.parent and n.parent.label not in name_list) or (n.type == 'FRAME' and n.label not in name_list):
|
|
scn.node_tree.nodes.remove(n)
|
|
else:
|
|
if (n.parent and n.parent.label in name_list) or (n.type == 'FRAME' and n.label in name_list):
|
|
scn.node_tree.nodes.remove(n)
|
|
|
|
def split_object_to_scene(objs=None, scene_name=None):
|
|
'''Create a new scene from object selection'''
|
|
|
|
if not scene_name:
|
|
active = bpy.context.object
|
|
scene_name = active.name
|
|
if not objs:
|
|
objs = [o for o in bpy.context.selected_objects]
|
|
|
|
if bpy.data.scenes.get(scene_name):
|
|
print(f'Scene "{scene_name}" Already Exists')
|
|
raise Exception(f'Scene "{scene_name}" Already Exists')
|
|
|
|
src = bpy.context.scene
|
|
|
|
bpy.ops.scene.new(type='LINK_COPY')
|
|
new = bpy.context.scene
|
|
new.name = scene_name
|
|
|
|
## OPT
|
|
## set individual output for composite or go in /tmp ? (might not be needed)
|
|
# new.render.filepath = f'//render/preview/{bpy.path.clean_name(new.name.lower())}/preview_'
|
|
# new.render.filepath = f'/tmp/'
|
|
|
|
## unlink unwanted objects from collection
|
|
all_col = get_collection_childs_recursive(new.collection)
|
|
for col in all_col:
|
|
for sob in reversed(col.objects):
|
|
if sob.type in ('CAMERA', 'LIGHT'):
|
|
continue
|
|
if sob not in objs:
|
|
col.objects.unlink(sob)
|
|
|
|
frame_names = [n.label for n in new.node_tree.nodes if n.type == 'FRAME' if new.objects.get(n.label)]
|
|
remove_scene_nodes_by_obj_names(new, frame_names, negative=True)
|
|
|
|
bpy.ops.gp.clean_compo_tree()
|
|
|
|
# add crop
|
|
new.render.use_border = True
|
|
new.render.use_crop_to_border = True
|
|
new.render.use_compositing = True
|
|
new.render.use_sequencer = False
|
|
|
|
## remove asset from original scene
|
|
#src_frame_names = [n.label for n in src.node_tree.nodes if n.type == 'FRAME' and n.label in [o.name for o in objs]]
|
|
#remove_scene_nodes_by_obj_names(src, src_frame_names)
|
|
remove_scene_nodes_by_obj_names(src, frame_names, negative=False)
|
|
|
|
# unlink objects ?
|
|
unlink_objects_from_scene(objs, src)
|
|
|
|
# border to GP objects of the scene
|
|
gp_objs = [o for o in new.objects if o.type == 'GPENCIL']
|
|
coords = get_gp_box_all_frame_selection(oblist=gp_objs, scn=new, cam=new.camera)
|
|
if not coords:
|
|
return f'Scene "{scene_name}" created. But Border was not set (Timeout during GP analysis), should be done by hand if needed then use export crop to json'
|
|
|
|
set_border_region_from_coord(coords, margin=30, scn=new, export_json=True)
|
|
export_crop_to_json()
|
|
|
|
|
|
def clear_frame_out_of_range(o, verbose=False):
|
|
'''get a GP object
|
|
delete frame out of active scene range in all layers
|
|
return number of deleted frame
|
|
'''
|
|
|
|
scn = bpy.context.scene
|
|
ct = 0
|
|
if o.type != 'GPENCIL':
|
|
print(f'{o.name} not a Gpencil')
|
|
return 0
|
|
for l in o.data.layers:
|
|
first = True
|
|
for f in reversed(l.frames):
|
|
|
|
# after
|
|
if f.frame_number > scn.frame_end + 1:
|
|
if verbose:
|
|
print(f'del: obj {o.name} > layer {l.info} > frame {f.frame_number}')
|
|
l.frames.remove(f)
|
|
ct += 1
|
|
|
|
# before
|
|
elif f.frame_number < scn.frame_start - 1:
|
|
if first:
|
|
first = False
|
|
continue
|
|
if verbose:
|
|
print(f'del: obj {o.name} > layer {l.info} > frame {f.frame_number}')
|
|
l.frames.remove(f)
|
|
ct += 1
|
|
|
|
# print('INFO', f'{ct} frames deleted')
|
|
return ct
|
|
|
|
|
|
## not used
|
|
def clear_frame_out_of_range_all_object():
|
|
scene = bpy.context.scene
|
|
ct = 0
|
|
for o in scene.objects:
|
|
if o.type == 'GPENCIL':
|
|
nct = clear_frame_out_of_range(o, verbose=False)
|
|
print(f'{o.name}: {nct} frames deleted')
|
|
ct += nct
|
|
print(f'{ct} gp frames deleted')
|
|
return ct
|
|
|
|
def set_scene_output_from_active_fileout_item():
|
|
scn = bpy.context.scene
|
|
rd = scn.render
|
|
ntree =scn.node_tree
|
|
fo = ntree.nodes.active
|
|
|
|
if fo.type != 'OUTPUT_FILE':
|
|
return
|
|
sl = fo.file_slots[fo.active_input_index]
|
|
full_path = os.path.join(fo.base_path, sl.path)
|
|
|
|
rd.filepath = full_path
|
|
|
|
fmt = fo.format if sl.use_node_format else sl.format
|
|
## set those attr first to avoid error settings other attributes in next loop
|
|
rd.image_settings.file_format = fmt.file_format
|
|
rd.image_settings.color_mode = fmt.color_mode
|
|
rd.image_settings.color_depth = fmt.color_depth if fmt.color_depth else 8 # Force set since Sometimes it's weirdly set to "" (not in enum choice)
|
|
|
|
excluded = ['file_format', 'color_mode', 'color_depth',
|
|
'view_settings', 'views_format']
|
|
|
|
''' ## all attrs
|
|
# 'cineon_black', 'cineon_gamma', 'cineon_white',
|
|
# 'color_depth', 'color_mode', 'compression', 'display_settings',
|
|
# 'exr_codec', 'file_format', 'jpeg2k_codec', 'quality',
|
|
# 'rna_type', 'stereo_3d_format', 'tiff_codec', 'use_cineon_log',
|
|
# 'use_jpeg2k_cinema_48', 'use_jpeg2k_cinema_preset', 'use_jpeg2k_ycc',
|
|
# 'use_preview', 'use_zbuffer']
|
|
'''
|
|
|
|
for attr in dir(fmt):
|
|
if attr.startswith('__') or attr.startswith('bl_') or attr in excluded:
|
|
continue
|
|
if hasattr(scn.render.image_settings, attr) and not scn.render.image_settings.is_property_readonly(attr):
|
|
setattr(scn.render.image_settings, attr, getattr(fmt, attr))
|
|
|
|
|
|
def set_layer_colors(skip_if_colored=False):
|
|
'''Set hardcoded color to grease pencil layer channel according to prefixes'''
|
|
|
|
## UW -> TO (here used fo CU): (0.015996, 0.246201, 0.246201) # Indigo
|
|
## invisible (close to violet light) in UW: (0.246201, 0.132868, 0.496933)
|
|
prefix_color = {
|
|
# 'MA_': (0.09, 0.08, 0.46), # Vivid blue
|
|
'MA_': (0.65, 0.4, 0.6), # Pink Light
|
|
|
|
'FX_': (0.12, 0.33, 0.58), # (0.3, 0.49, 0.63) # Blue Light
|
|
# 'CO_': (0.35, 0.0085, 0.25),
|
|
'CO_': (0.5,0.1,0.5), # Clear Pink
|
|
# 'CU': (0.092070, 0.177356, 0.447959), # Blue clear
|
|
'CU_': (0.02, 0.27, 0.27), # Indigo
|
|
}
|
|
|
|
for ob in bpy.context.scene.objects:
|
|
if ob.type != 'GPENCIL':
|
|
continue
|
|
if skip_if_colored and any([has_channel_color(l) for l in ob.data.layers]):
|
|
continue
|
|
for l in ob.data.layers:
|
|
# if l.info.startswith(prefix_color.keys()):
|
|
color = prefix_color.get(l.info[:3])
|
|
if not color:
|
|
continue
|
|
print(l.info, '->', color)
|
|
l.channel_color = color
|
|
|
|
bpy.context.preferences.edit.use_anim_channel_group_colors = True
|
|
|
|
|
|
def different_gp_mat(mata, matb):
|
|
'''return None if no difference (False), string describing color difference (True)'''
|
|
a = mata.grease_pencil
|
|
b = matb.grease_pencil
|
|
if a.color[:] != b.color[:]:
|
|
return f'{mata.name} and {matb.name} stroke color is different'
|
|
if a.fill_color[:] != b.fill_color[:]:
|
|
return f'{mata.name} and {matb.name} fill_color color is different'
|
|
if a.show_stroke != b.show_stroke:
|
|
return f'{mata.name} and {matb.name} stroke has different state'
|
|
if a.show_fill != b.show_fill:
|
|
return f'{mata.name} and {matb.name} fill has different state'
|
|
|
|
## Clean dups
|
|
def clean_mats_duplication(ob, skip_different_materials=True):
|
|
'''Clean object material stack of duplication
|
|
if a material is named "mat.001" and a "mat" exists, replace with the one with original name
|
|
|
|
:skip_different_materials: Don't replace a "mat.???" if orignal "mat" has different color
|
|
'''
|
|
|
|
import re
|
|
diff_ct = 0
|
|
todel = []
|
|
if ob.type != 'GPENCIL':
|
|
return
|
|
if not hasattr(ob, 'material_slots'):
|
|
return
|
|
for i, ms in enumerate(ob.material_slots):
|
|
mat = ms.material
|
|
if not mat:
|
|
continue
|
|
match = re.search(r'(.*)\.\d{3}$', mat.name)
|
|
if not match:
|
|
continue
|
|
basemat = bpy.data.materials.get(match.group(1))
|
|
if not basemat:
|
|
continue
|
|
if not basemat.is_grease_pencil:
|
|
## Skip material with matching "stem" but not being GP material
|
|
continue
|
|
diff = different_gp_mat(mat, basemat)
|
|
if diff:
|
|
print(f'! {ob.name} : {diff}')
|
|
diff_ct += 1
|
|
if skip_different_materials:
|
|
continue
|
|
|
|
if mat not in todel:
|
|
todel.append(mat)
|
|
ms.material = basemat
|
|
print(f'{ob.name} : slot {i} >> replaced {mat.name}')
|
|
mat.use_fake_user = False
|
|
|
|
### delete (only when using on all objects loop, else can delete another objects mat...)
|
|
## for m in reversed(todel):
|
|
## bpy.data.materials.remove(m)
|
|
|
|
if diff_ct:
|
|
print(f'{diff_ct} mat skipped >> same name but different color settings!')
|
|
# return ('INFO', f'{diff_ct} mat skipped >> same name but different color settings!')
|
|
|
|
def recursive_node_connect_check(l, target_node):
|
|
if l.to_node == target_node:
|
|
return True
|
|
for o in l.to_node.outputs:
|
|
for sl in o.links:
|
|
if recursive_node_connect_check(sl, target_node):
|
|
return True
|
|
return False
|
|
|
|
def connect_to_file_output(node_list, file_out=None, base_path='', excludes=None, remap_names=None, file_format=None):
|
|
"""Connect selected nodes output to file output(s)
|
|
if a file output is selected, add intputs on it
|
|
|
|
Args:
|
|
node_list (list[bpy.types.Nodes,]): Nodes to connect
|
|
|
|
file_out (bpy.types.CompositorNode, optional): File output node to connect to instead of new
|
|
Defaults to None
|
|
base_path (str, optional): Directory of images to render.
|
|
if not passed, will use source node layer name
|
|
Defaults to ''.
|
|
|
|
file_format (dict, optionnal): converts each dictionary key into a file output format
|
|
attribute and assigns the corresponding value.
|
|
Defaults to None.
|
|
|
|
excludes (dict, optionnal): List of output names to exclude {node_name: [outputs,]}.
|
|
Defaults toNone.
|
|
|
|
remap_names (dict, optionnal): List of output names to remap {node_name: {output_name: new_name}}.
|
|
|
|
frame (bpy.types.CompositorNode, optional): If given, create nodes into a frame.
|
|
Defaults to None.
|
|
|
|
Returns:
|
|
list[bpy.types.CompositorNode]: All nodes created.
|
|
"""
|
|
|
|
scene = bpy.context.scene
|
|
nodes = scene.node_tree.nodes
|
|
links = scene.node_tree.links
|
|
if not isinstance(node_list, list):
|
|
node_list = [node_list]
|
|
node_list = [n for n in node_list if n.type != 'OUTPUT_FILE']
|
|
if not node_list:
|
|
return
|
|
|
|
excludes = excludes or {}
|
|
|
|
for node in node_list:
|
|
exclusions = excludes.get(node.name) or []
|
|
## create one output facing node and connect all
|
|
outs = [o for o in node.outputs if not o.is_unavailable and not 'crypto' in o.name.lower() and o.name not in exclusions]
|
|
cryptout = [o for o in node.outputs if not o.is_unavailable and 'crypto' in o.name.lower() and o.name not in exclusions]
|
|
|
|
if node.type == 'R_LAYERS':
|
|
out_base = node.layer
|
|
elif node.label:
|
|
out_base = node.label
|
|
else:
|
|
out_base = node.name
|
|
bpy.path.clean_name(out_base)
|
|
out_name = f'OUT_{out_base}'
|
|
|
|
if outs:
|
|
fo = file_out
|
|
if not fo:
|
|
fo = nodes.get(out_name)
|
|
if not fo:
|
|
# color = (0.2,0.3,0.5)
|
|
fo = create_node('CompositorNodeOutputFile', tree=scene.node_tree, location=(real_loc(node)[0]+500, real_loc(node)[1]+50), width=600)
|
|
fo.inputs.remove(fo.inputs[0]) # Remove default image input
|
|
if file_format:
|
|
for k, v in file_format.items():
|
|
setattr(fo.format, k, v)
|
|
else:
|
|
set_file_output_format(fo)
|
|
|
|
fo.name = out_name
|
|
if node.parent:
|
|
fo.parent = node.parent
|
|
|
|
if base_path:
|
|
fo.base_path = base_path
|
|
else:
|
|
if fo.format.file_format == 'OPEN_EXR_MULTILAYER':
|
|
fo.base_path = f'//render/{out_base}/{out_base}_'
|
|
else:
|
|
fo.base_path = f'//render/{out_base}'
|
|
|
|
for o in outs:
|
|
if next((l for l in o.links if recursive_node_connect_check(l, fo)), None):
|
|
continue
|
|
|
|
if (socket_remaps := remap_names.get(node.name)) and (custom_name := socket_remaps.get(o.name)):
|
|
slot_name = bpy.path.clean_name(custom_name) # clean name ?
|
|
else:
|
|
slot_name = bpy.path.clean_name(o.name)
|
|
|
|
# if fo.format.file_format == 'OPEN_EXR_MULTILAYER':
|
|
# slot_name = slot_name
|
|
# else:
|
|
# slot_name = f'{slot_name}/{slot_name}_'
|
|
# fo.file_slots.new(slot_name)
|
|
fs = fo.file_slots.new('tmp') # slot_name)
|
|
ls = fo.layer_slots.new('tmp') # slot_name + 'layer')
|
|
|
|
ls = fo.layer_slots[-1]
|
|
ls.name = slot_name
|
|
|
|
fs = fo.file_slots[-1]
|
|
fs.path = f'{slot_name}/{slot_name}_' # Error 'NodeSocketColor' object has no attribute 'path'
|
|
|
|
|
|
out_input = fo.inputs[-1]
|
|
links.new(o, out_input)
|
|
|
|
clear_disconnected(fo)
|
|
fo.update()
|
|
|
|
## Create separate file out for cryptos
|
|
if cryptout:
|
|
out_name += '_cryptos'
|
|
fo = file_out
|
|
if not fo:
|
|
fo = nodes.get(out_name)
|
|
if not fo:
|
|
# color = (0.2,0.3,0.5)
|
|
fo = create_node('CompositorNodeOutputFile', tree=scene.node_tree, location=(real_loc(node)[0]+400, real_loc(node)[1]-200), width=220)
|
|
fo.inputs.remove(fo.inputs[0]) # Remove default image input
|
|
if file_format:
|
|
for k, v in file_format.items():
|
|
setattr(fo.format, k, v)
|
|
else:
|
|
set_file_output_format(fo) # OPEN_EXR_MULTILAYER, RGBA, ZIP
|
|
fo.format.color_depth = '32' # For crypto force 32bit
|
|
|
|
fo.name = out_name
|
|
if node.parent:
|
|
fo.parent = node.parent
|
|
|
|
if base_path:
|
|
fo.base_path = base_path
|
|
else:
|
|
if fo.format.file_format == 'OPEN_EXR_MULTILAYER':
|
|
## FIXME: find a better organization for separated crypto pass
|
|
fo.base_path = f'//render/{out_base}/cryptos/cryptos_'
|
|
else:
|
|
fo.base_path = f'//render/{out_base}'
|
|
|
|
for o in cryptout:
|
|
## Skip already connected
|
|
## TODO Test recusively to find fo (some have interconnected sockets)
|
|
# if next((l for l in o.links if l.to_node == fo), None):
|
|
if next((l for l in o.links if recursive_node_connect_check(l, fo)), None):
|
|
continue
|
|
|
|
# if remap_names and (custom_name := remap_names.get(o.name)):
|
|
if (socket_remaps := remap_names.get(node.name)) and (custom_name := socket_remaps.get(o.name)):
|
|
slot_name = bpy.path.clean_name(custom_name) # clean name ?
|
|
else:
|
|
slot_name = bpy.path.clean_name(o.name) # directly use name in multi layer exr
|
|
|
|
# if fo.format.file_format == 'OPEN_EXR_MULTILAYER':
|
|
# slot_name = slot_name
|
|
# else:
|
|
# slot_name = f'{slot_name}/{slot_name}_'
|
|
# fo.file_slots.new(slot_name)
|
|
|
|
# Setting both file_slots and layer_slots...
|
|
fs = fo.file_slots.new('tmp')
|
|
ls = fo.layer_slots.new('tmp')
|
|
|
|
ls = fo.layer_slots[-1]
|
|
ls.name = slot_name
|
|
|
|
fs = fo.file_slots[-1]
|
|
fs.path = f'{slot_name}/{slot_name}_' # Error 'NodeSocketColor' object has no attribute 'path'
|
|
|
|
|
|
out_input = fo.inputs[-1]
|
|
links.new(o, out_input)
|
|
clear_disconnected(fo)
|
|
fo.update()
|
|
|
|
|