From 98c6739003349cbcc594c6e507640c3d73508b3a Mon Sep 17 00:00:00 2001 From: Pullusb Date: Sun, 10 Jan 2021 16:47:17 +0100 Subject: [PATCH] first public version commit --- .gitignore | 4 + GP_guided_colorize/GP_colorize.py | 78 + GP_guided_colorize/OP_create_empty_frames.py | 58 + GP_guided_colorize/OP_gmic.py | 424 +++++ GP_guided_colorize/OP_line_closer.py | 444 +++++ GP_guided_colorize/func_gmic.py | 184 ++ LICENSE.txt | 674 +++++++ OP_box_deform.py | 579 ++++++ OP_breakdowner.py | 339 ++++ OP_canvas_rotate.py | 284 +++ OP_copy_paste.py | 709 +++++++ OP_cursor_snap_canvas.py | 182 ++ OP_file_checker.py | 300 +++ OP_helpers.py | 546 ++++++ OP_palettes.py | 259 +++ OP_playblast.py | 201 ++ OP_playblast_bg.py | 384 ++++ OP_pseudo_tint.py | 129 ++ OP_render.py | 482 +++++ OP_temp_cutter.py | 166 ++ README.md | 286 +++ UI_tools.py | 391 ++++ __init__.py | 462 +++++ addon_updater.py | 1673 +++++++++++++++++ addon_updater_ops.py | 1503 +++++++++++++++ functions.py | 381 ++++ .../gp_toolbox_public_updater_status.json | 9 + keymaps.py | 45 + properties.py | 67 + utils.py | 688 +++++++ 30 files changed, 11931 insertions(+) create mode 100644 .gitignore create mode 100644 GP_guided_colorize/GP_colorize.py create mode 100644 GP_guided_colorize/OP_create_empty_frames.py create mode 100644 GP_guided_colorize/OP_gmic.py create mode 100644 GP_guided_colorize/OP_line_closer.py create mode 100644 GP_guided_colorize/func_gmic.py create mode 100644 LICENSE.txt create mode 100644 OP_box_deform.py create mode 100644 OP_breakdowner.py create mode 100644 OP_canvas_rotate.py create mode 100644 OP_copy_paste.py create mode 100644 OP_cursor_snap_canvas.py create mode 100644 OP_file_checker.py create mode 100644 OP_helpers.py create mode 100644 OP_palettes.py create mode 100644 OP_playblast.py create mode 100644 OP_playblast_bg.py create mode 100644 OP_pseudo_tint.py create mode 100644 OP_render.py create mode 100644 OP_temp_cutter.py create mode 100644 README.md create mode 100644 UI_tools.py create mode 100644 __init__.py create mode 100644 addon_updater.py create mode 100644 addon_updater_ops.py create mode 100644 functions.py create mode 100644 gp_toolbox_public_updater/gp_toolbox_public_updater_status.json create mode 100644 keymaps.py create mode 100644 properties.py create mode 100644 utils.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0f97c4c --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +__pycache__ +*.pyc +gp_toolbox_updater +gp_toolbox_updater/GP_toolbox_updater_status.json diff --git a/GP_guided_colorize/GP_colorize.py b/GP_guided_colorize/GP_colorize.py new file mode 100644 index 0000000..9f298dc --- /dev/null +++ b/GP_guided_colorize/GP_colorize.py @@ -0,0 +1,78 @@ +## Can be renamed and used as standalone __init__.py file +bl_info = { +"name": "GP guided colorize", +"description": "Blender <> G'MIC bridge for auto color", +"author": "Samuel Bernou", +"version": (0, 1, 0), +"blender": (2, 82, 0), +"location": "3D view > Gpencil > Colorize", +"warning": "WIP", +"doc_url": "",#2.8 > 2.82 : "wiki_url":"", +"category": "3D View", +} + +import bpy +from . import OP_gmic +# from .OP_gmic import (GMICOLOR_OT_propagate_spots, +# GMICOLOR_OT_clear_cam_bg_images, +# GMICOLOR_OT_open_gmic_tool_folder, +# GMICOLOR_PT_auto_color_panel,) + +from . import OP_line_closer +from . import OP_create_empty_frames +# from .OP_line_closer import (GPSTK_OT_extend_lines, +# GPSTK_PT_line_closer_panel, +# GPSTK_OT_comma_finder,) + +## Colorize properties +class GPCOLOR_PG_settings(bpy.types.PropertyGroup) : + res_percentage: bpy.props.IntProperty( + name="Gmic out resolution", description="Overrides resolution percentage for playblast", default = 50, min=1, max=200, soft_min=10, soft_max=100, subtype='PERCENTAGE')#, precision=0 + + # extend_layers: bpy.props.BoolProperty(name='Extend layers' default=True, description="Work on selected layers, else only active") + extend_layer_tgt : bpy.props.EnumProperty( + name="Extend layers", description="Choose which layer to target", + default='ACTIVE', + items=( + ('ACTIVE', 'Active only', 'Target active layer only', 0),#include icon name in fourth position + ('SELECTED', 'Selected', 'Target selected layers in GP dopesheet', 1), + ('ALL_VISIBLE', 'All visible', 'target all visible layers', 2), + ('ALL', 'All', 'All (even locked and hided layer)', 2), + )) + + extend_selected: bpy.props.BoolProperty(name='Extend selected', default=False, description="Work on selected stroke only if True, else All stroke") + extend_length: bpy.props.FloatProperty(name='Extend length', default=0.01, precision=3, step=0.01, description="Length for extending strokes boundary") + + deviation_tolerance : bpy.props.FloatProperty( + name="Deviation angle", description="Deviation angle tolerance of last point(s) to be considered accidental trace", + default=1.22, min=0.017, max=3.124, soft_min=0.017, soft_max=1.92, step=3, precision=2, unit='ROTATION')#, subtype='ANGLE') + + + +classes = ( +GPCOLOR_PG_settings, +# GMICOLOR_OT_propagate_spots, +# GMICOLOR_OT_clear_cam_bg_images, +# GMICOLOR_OT_open_gmic_tool_folder, +# GMICOLOR_PT_auto_color_panel, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + OP_create_empty_frames.register() + OP_line_closer.register() + OP_gmic.register() + bpy.types.Scene.gpcolor_props = bpy.props.PointerProperty(type = GPCOLOR_PG_settings) + +def unregister(): + OP_gmic.unregister() + OP_line_closer.unregister() + OP_create_empty_frames.unregister() + for cls in reversed(classes): + bpy.utils.unregister_class(cls) + del bpy.types.Scene.gpcolor_props + + +if __name__ == "__main__": + register() \ No newline at end of file diff --git a/GP_guided_colorize/OP_create_empty_frames.py b/GP_guided_colorize/OP_create_empty_frames.py new file mode 100644 index 0000000..d9f37ab --- /dev/null +++ b/GP_guided_colorize/OP_create_empty_frames.py @@ -0,0 +1,58 @@ +## Create empty keyframe where keyframe exists in layers above. +import bpy + +class GP_OT_create_empty_frames(bpy.types.Operator): + bl_idname = "gp.create_empty_frames" + bl_label = "Create empty frames" + bl_description = "Create new empty frames on active layer where there is a frame in layer above\n(usefull in color layers to match line frames)" + bl_options = {'REGISTER','UNDO'} + + @classmethod + def poll(cls, context): + return context.active_object is not None and context.active_object.type == 'GPENCIL' + + def execute(self, context): + obj = context.object + gpl = obj.data.layers + gpl.active_index + + ## Only possible on 'fill' layer ?? + # if not 'fill' in gpl.active.info.lower(): + # self.report({'ERROR'}, f"There must be 'fill' text in layer name") + # return {'CANCELLED'} + + frame_id_list = [] + for i, l in enumerate(gpl): + # don't list layer below + if i <= gpl.active_index: + continue + # print(l.info, "index:", i) + for f in l.frames: + frame_id_list.append(f.frame_number) + + frame_id_list = list(set(frame_id_list)) + frame_id_list.sort() + + current_frames = [f.frame_number for f in gpl.active.frames] + + fct = 0 + for num in frame_id_list: + if num in current_frames: + continue + #Create empty frame + gpl.active.frames.new(num, active=False) + fct += 1 + + if fct: + self.report({'INFO'}, f"{fct} frame created on layer {gpl.active.info}") + else: + self.report({'WARNING'}, f"No frames to create !") + + return {'FINISHED'} + + +def register(): + bpy.utils.register_class(GP_OT_create_empty_frames) + +def unregister(): + bpy.utils.unregister_class(GP_OT_create_empty_frames) \ No newline at end of file diff --git a/GP_guided_colorize/OP_gmic.py b/GP_guided_colorize/OP_gmic.py new file mode 100644 index 0000000..483e489 --- /dev/null +++ b/GP_guided_colorize/OP_gmic.py @@ -0,0 +1,424 @@ +from .func_gmic import * +from ..utils import get_addon_prefs, open_folder +import bpy +from os.path import join, basename, exists, dirname, abspath, splitext + +'''#decorator mod +def with_renderfile(filepath): + def with_renderfile_decorator(func): + def decorator(*args, **kwargs): + r = bpy.context.scene.render + old_filepath, r.filepath = r.filepath, filepath + try: + func(*args, **kwargs) + finally: + r.filepath = old_filepath + + return decorator + + return with_renderfile_decorator + +@with_renderfile("//myfile") +''' + +# self with implementation +def render_filepath(filepath): + class RenderFileRestorer: + + def __enter__(self): + bpy.context.scene.render.film_transparent = True + bpy.context.scene.render.filepath = filepath + bpy.context.scene.render.resolution_percentage = bpy.context.scene.gpcolor_props.res_percentage + + def __exit__(self, type, value, traceback): + bpy.context.scene.render.filepath = old_filepath + bpy.context.scene.render.film_transparent = transparent + bpy.context.scene.render.resolution_percentage = old_res + + transparent = bpy.context.scene.render.film_transparent + old_filepath = bpy.context.scene.render.filepath + old_res = bpy.context.scene.render.resolution_percentage + return RenderFileRestorer() + + +def layer_state(gp_data): + class LayerStateRestorer: + + def __enter__(self): + # mask/restore other GP object ? + self.layers_state = {l:l.hide for l in gp_data.layers} + + def __exit__(self, type, value, traceback): + for k, v in self.layers_state.items(): + k.hide = v + + return LayerStateRestorer() + +def cursor_state(): + class CursorStateRestorer: + + def __enter__(self): + ... + def __exit__(self, type, value, traceback): + bpy.context.window_manager.progress_end() + + return CursorStateRestorer() + +def imtool_fp(dest='tool',name=None): + if name: + return join(bpy.path.abspath('//'), dest, name) + return join(bpy.path.abspath('//'), dest) + +def generate_seq_placeholder(): + with cursor_state(): + wm = bpy.context.window_manager + wm.progress_begin(bpy.context.scene.frame_start, bpy.context.scene.frame_end) + for i in range(bpy.context.scene.frame_start,bpy.context.scene.frame_end+1): + fp = imtool_fp(name = f'colotmp_{str(i).zfill(4)}.png') + wm.progress_update(i)#remap from frame range to 0-1000 with transfer_value(Value, OldMin, OldMax, NewMin, NewMax) + if not exists(fp): + generate_empty_image(fp) + +def set_bg_img_settings(bgimg): + bgimg.display_depth = 'FRONT' if bpy.data.version[1] < 83 else 'BACK' + bgimg.alpha = 0.6 + bgimg.frame_method = 'FIT'#'STRETCH' + + +def load_bg_image(colo_fp): + # load new image in camera background + cam = bpy.context.scene.camera.data + cam.show_background_images = True + + colo = basename(colo_fp) + colo_img = bpy.data.images.get(colo) + + # load as images + if colo_img: + colo_img.reload() + else: + if not exists(colo_fp):return + colo_img = bpy.data.images.load(colo_fp) + + bgimg = None + for bg in cam.background_images: + if bg.image == colo_img: + bgimg = bg + break + + if not bgimg: + bgimg = cam.background_images.new() + bgimg.image = colo_img + + set_bg_img_settings(bgimg) + return bgimg + +def load_bg_movieclip(): + # load new image in camera background + cam = bpy.context.scene.camera.data + cam.show_background_images = True + + ## load first image + colo = f'colotmp_{str(bpy.context.scene.frame_start).zfill(4)}.png' + colo_fp = join(imtool_fp(), colo) + if not exists(colo_fp): + try: + generate_empty_image(colo_fp) + pass + # TODO generate empty alpha image with GMIC or fast lib (numpy ?) + except Exception as identifier: + print('In load_bg_movieclip') + print(f'NOT FOUND : {colo_fp}') + return + return + + # load as movie clip + colo_img = bpy.data.movieclips.get(colo) + if colo_img: + pass# colo_img.reload()# video has no reload prop + #TODO find a way to trigger refresh automagically !! + else: + if not exists(colo_fp):return + colo_img = bpy.data.movieclips.load(colo_fp) + + bgimg = None + for bg in cam.background_images: + if bg.clip == colo_img: + bgimg = bg + break + + if not bgimg: + bgimg = cam.background_images.new() + bgimg.source = 'MOVIE_CLIP' + bgimg.clip = colo_img + + set_bg_img_settings(bgimg) + return bgimg + + +def guide_color(anim=False): + '''render lines and spots separately > gmic > feeback into cam background''' + scene = bpy.context.scene + #### other solution + ### how about conerting polylines to stroke under the hood with opencv in camera space according to layer + ### then feed generated array to gmic, might be faster (but dont take stroke thickness into account... problem or feature ?) + + # Generate temporary (local for speed) folder or render in /tmp if not specified ? + if not bpy.context.object or bpy.context.object.type != 'GPENCIL': return 1 + + gp = bpy.context.object.data + frame = str(scene.frame_current).zfill(4) + line = f"line_{frame}.png" + spot = f"spot_{frame}.png" + colo = f"colotmp_{frame}.png" + with layer_state(gp): + # show/hide layers to render lines/spots only + for l in gp.layers: + l.hide = any(x in l.info for x in ('spot', 'colo'))#keep only lines + # better hide by material namespace ? + + with render_filepath(f"//tool/{line}"): + bpy.ops.render.render(animation = anim, write_still=True) + + # show/hide layers to render spots only + for l in gp.layers: + l.hide = 'spot' not in l.info#keep only spots + + with render_filepath(f"//tool/{spot}"): + bpy.ops.render.render(animation = anim, write_still=True) + line_fp = join(imtool_fp(), line) + spot_fp = join(imtool_fp(), spot) + colo_fp = join(imtool_fp(), colo) + + propagate_color(line_fp, spot_fp, colo_fp) + ## ~4.6sec + + + # now try and check if openCV or gmic can smooth vectorize the stuff + + ## ! surely possible to avoid writting the file like giving it back as a numpy array and keep it within blender ! + ## >> just feed numpy array to gmic and get output ? + ## Again maybe possible to avoid writing to disk but more complicated to send to gmic (use slot 8 and 9 ?) + + # clear line/spot render if necessary... + + +class GMICOLOR_OT_propagate_spots(bpy.types.Operator): + """ + Propagate the spots with a gmic call + use shift+clic to force reload after operation. + """ + bl_idname = "bgmic.propagate_color" + bl_label = "Gmic propagate color" + + @classmethod + def poll(cls, context): + return context.active_object is not None and context.active_object.type == 'GPENCIL' + + anim : bpy.props.BoolProperty( + name="animation", description="render and propagate color for whole animation", default=False, subtype='NONE', options={'ANIMATABLE'})#HIDDEN + + ## TODO set preview mode (with low percentage) + + # subtype (string) – Enumerator in ['FILE_PATH', 'DIR_PATH', 'FILE_NAME', 'BYTE_STRING', 'PASSWORD', 'NONE']. + # options (set) – Enumerator in ['HIDDEN', 'SKIP_SAVE', 'ANIMATABLE', 'LIBRARY_EDITABLE', 'PROPORTIONAL','TEXTEDIT_UPDATE']. + mode : bpy.props.StringProperty( + name="mode", description="Set mode for operator", default="render", maxlen=0, subtype='NONE', options={'ANIMATABLE'}) + + load : bpy.props.BoolProperty(default=False) + + def execute(self, context): + ## TODO make a warning if animation mode is on and there is a consequent number of frame with big resolutions... + frame = str(context.scene.frame_current).zfill(4) + colo = f'colotmp_{frame}.png' + colo_fp = join(imtool_fp(), colo) + bgimg = None + + ## render one image or full-anim + if self.mode == 'render': + ## Or maybe use the technique of BG rendering... with subprocess call + bgimg = guide_color(self.anim) + + + ## re-load + if self.mode == 'load' or self.load: + """ if not exists(colo_fp): + print(f'/!\\ {frame}: color was not generated') + return {'CANCELLED'} """ + bpy.ops.bgmic.clear_cam_bg_images(real_clear=False)#clear before load + if self.anim: + bgimg = load_bg_movieclip() + generate_seq_placeholder()#generate all placeholders + else: + bgimg = load_bg_image(colo_fp) + + if not bgimg: + mess = f"Not Found, Loading {'anim' if self.anim else 'image'} has failed at {dirname(colo_fp)}" + self.report({'ERROR'}, mess)#WARNING, ERROR + return {'CANCELLED'} + + return {'FINISHED'} + + def invoke(self, context, event): + self.load = event.shift#if shift is pressed, force inclusion of load + return self.execute(context) + +class GMICOLOR_OT_clear_cam_bg_images(bpy.types.Operator): + """ + Disable and clear background images from scene camera (mask non spot imgs) + Shift+clic for a real clear (delete all refs images from camera bg images) + """ + bl_idname = "bgmic.clear_cam_bg_images" + bl_label = "Clear camera background color images" + + @classmethod + def poll(cls, context): + return context.scene.camera + + real_clear : bpy.props.BoolProperty(default=False) + + def execute(self, context): + # TODO be selective to user cam (filter on name) + # TODO need to delete only "spot" name instead of full clear + if self.real_clear: + context.scene.camera.data.background_images.clear() + else: + to_del = [] + for bgimg in context.scene.camera.data.background_images: + if bgimg.source == 'MOVIE_CLIP': + if not bgimg.clip or 'spot' in bgimg.clip.name or 'colotmp' in bgimg.clip.name: + to_del.append(bgimg) + continue + else:#just hide + bgimg.show_background_image = False + else: + if not bgimg.image or 'spot' in bgimg.image.name or 'colotmp' in bgimg.image.name: + to_del.append(bgimg) + continue + else:#just hide + bgimg.show_background_image = False + + for bimg in reversed(to_del): + context.scene.camera.data.background_images.remove(bimg) + + + context.scene.camera.data.show_background_images = False#toggle off then on to force refresh + # context.scene.camera.data.show_background_images = True + return {'FINISHED'} + + def invoke(self, context, event): + self.real_clear = event.shift#if shift is pressed, force inclusion of load + return self.execute(context) + +class GMICOLOR_OT_open_gmic_tool_folder(bpy.types.Operator): + """ + Disable and clear background images from scene camera (mask non spot imgs) + Shift+clic for a real clear (delete all refs images from camera bg images) + """ + bl_idname = "bgmic.open_gmic_tool_folder" + bl_label = "Open spot-image-tool Folder" + bl_options = {'REGISTER', 'INTERNAL'} + + def execute(self, context): + fp = join(bpy.path.abspath('//'), 'tool') + if not exists(fp): + mess = f'{fp} not found' + self.report({'WARNING'}, mess) + return {'CANCELLED'} + + open_folder(fp) + self.report({'INFO'}, 'Gmic tool folder opened') + return {'FINISHED'} + + +## base panel +class GMICOLOR_PT_auto_color_panel(bpy.types.Panel): + bl_label = "GP Colorize"# title + # bl_parent_id # If set, the panel becomes a sub-panel + + ## bl_options = {'DEFAULT_CLOSED', 'HIDE_HEADER' }# closed by default, collapse the panel and the label + ## is_popover = False # if ommited + ## bl_space_type = ['EMPTY', 'VIEW_3D', 'IMAGE_EDITOR', 'NODE_EDITOR', 'SEQUENCE_EDITOR', 'CLIP_EDITOR', 'DOPESHEET_EDITOR', 'GRAPH_EDITOR', 'NLA_EDITOR', 'TEXT_EDITOR', 'CONSOLE', 'INFO', 'TOPBAR', 'STATUSBAR', 'OUTLINER', 'PROPERTIES', 'FILE_BROWSER', 'PREFERENCES'], default 'EMPTY' + + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil"#name of the tab + + + # activating on some context only + ## bl_context : object, objectmode, mesh_edit, curve_edit, surface_edit, text_edit, armature_edit, mball_edit, lattice_edit, pose_mode, imagepaint, weightpaint, vertexpaint, particlemode + #bl_context = "objectmode"#render + + #need to be in object mode + @classmethod + def poll(cls, context): + return get_addon_prefs().use_color_tools#(context.object is not None and context.object.type == 'GPENCIL') + + ## draw stuff inside the header (place before main label) + # def draw_header(self, context): + # layout = self.layout + # layout.label(text="More text in header") + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + prefs = get_addon_prefs() + if not prefs.gmic_path: + layout.label(text='Gmic path missing in addon prefs', icon='ERROR') + return + + if not [l for l in context.object.data.layers if 'spot' in l.info]: + layout.label(text='Need at least one spots layer !', icon='ERROR') + layout.label(text='("spot" in name to identify)') + return + # row = layout.row() + layout.prop(context.scene.gpcolor_props, 'res_percentage') + + ## render and load frame + ops = layout.operator("bgmic.propagate_color", icon = 'FILE_IMAGE') + ops.mode = 'render' + ops.anim = False + + ## render and load anim + ops = layout.operator("bgmic.propagate_color", text='Gmic propagate animation', icon = 'FILE_MOVIE') + ops.mode = 'render' + ops.anim = True + + layout.separator() + + ## Load frame + ops = layout.operator("bgmic.propagate_color", text='load frame', icon = 'FILE_IMAGE') + ops.mode = 'load' + ops.anim = False + + ## Load anim + ops = layout.operator("bgmic.propagate_color", text='Load animation', icon = 'FILE_MOVIE') + ops.mode = 'load' + ops.anim = True + + layout.separator() + layout.operator("bgmic.clear_cam_bg_images") + + ## Open gmic tool location + layout.operator("bgmic.open_gmic_tool_folder", icon = 'FILE_FOLDER')#, text='Open img tool folder' + + +## TODO : Add an operator to generate empty alpha image to avoid the pink flashes... +## todo : choose to overwrite or not +## Add button to delete current "frame" (delete image on disk...) + + +classes = ( +GMICOLOR_OT_propagate_spots, +GMICOLOR_OT_clear_cam_bg_images, +GMICOLOR_OT_open_gmic_tool_folder, +GMICOLOR_PT_auto_color_panel, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + +def unregister(): + for cls in reversed(classes): + bpy.utils.unregister_class(cls) diff --git a/GP_guided_colorize/OP_line_closer.py b/GP_guided_colorize/OP_line_closer.py new file mode 100644 index 0000000..71dbf17 --- /dev/null +++ b/GP_guided_colorize/OP_line_closer.py @@ -0,0 +1,444 @@ +from .func_gmic import * +from ..utils import (location_to_region, + region_to_location, + vector_length_2d, + vector_length, + draw_gp_stroke, + extrapolate_points_by_length, + simple_draw_gp_stroke) + +import bpy +from math import degrees +from mathutils import Vector +# from os.path import join, basename, exists, dirname, abspath, splitext + +# iterate over selected layer and all/selected frame and close gaps between line extermities with a tolerance level + +def get_closeline_mat(ob): + # ob = C.object + gp = ob.data + + # get material + closeline = bpy.data.materials.get('closeline') + if not closeline: + print('Creating line closing material in material database') + closeline = bpy.data.materials.new('closeline') + bpy.data.materials.create_gpencil_data(closeline)#make it GP + closeline.grease_pencil.color = [0.012318, 0.211757, 0.607766, 1.000000]#blue - [0.778229, 0.759283, 0.000000, 1.000000]# yellow urgh + + if not closeline.name in gp.materials: + gp.materials.append(closeline) + + # get index in list + index = None + for i, ms in enumerate(ob.material_slots): + if ms.material == closeline: + index = i + break + + if not index: + print(f'could not find material {closeline.name} in material list') + + return index + +def create_gap_stroke(f, ob, tol=10, mat_id=None): + '''Take a frame, original object, an optional tolerance value and material ID + Get all extremity points + + for each one, analyse if he is close to another in screen space + - if it's the case mark this point as used by the other in the list (avoid to redo the other way) + dic encounter[point:[list of points already encountered]] + ''' + from collections import defaultdict + encounter = defaultdict(list) + plist = [] + matrix = ob.matrix_world + for s in f.strokes:#add first and last + smat = ob.material_slots[s.material_index].material + if not smat:continue#no material on line + if smat.grease_pencil.show_fill:continue# skip fill lines -> #smat.grease_pencil.show_stroke + if len(s.points) < 2:continue#avoid 0 or 1 points + plist.append(s.points[0]) + plist.append(s.points[-1]) + # plist.extend([s.points[0], s.points[-1])# is extend faster ? + + # pnum = len(plist) + ctl = 0 + passed = [] + for i, p in enumerate(plist): + # print(f'{i+1}/{pnum}') + for op in plist:#other points + if p == op:# print('same point') + continue + gap2d = vector_length_2d(location_to_region(matrix @ p.co), location_to_region(matrix @ op.co)) + # print('gap2d: ', gap2d) + if gap2d > tol: + continue + if gap2d < 1:#less than one pixel no need + continue + + # print('create_boundary_stroke') + + ## dont evaluate a point twice (skip if > 1 intersection) + passed.append(op) + if p in passed: + # print('op in passed') + continue + + ## Filter to avoid same stroke to be recreated switched + pairlist = encounter.get(op) + if pairlist: + # print('is in dic')#Dbg + if p in pairlist: + # print('found it')#Dbg + #already encountered, skip + continue + + # from pprint import pprint#Dbg + # pprint(encounter)#Dbg + + # print('new line', p, op) + # not met before, mark as encountered and create line. + encounter[p].append(op) + + + simple_draw_gp_stroke([p.co, op.co], f, width = 2, mat_id = mat_id) + ctl += 1 + + print(f'{ctl} line created') + +##test_call: #create_gap_stroke(C.object.data.layers.active.active_frame, C.object, mat_id=C.object.active_material_index) + +def create_closing_line(tolerance=0.2): + for ob in bpy.context.selected_objects: + if ob.type != 'GPENCIL': + continue + + mat_id = get_closeline_mat(ob)# get a the closing material + + if not mat_id: + print(f'object {ob.name} has no line closing material and could not create one !') + continue + # can do something to delete all line already there using this material + gp = ob.data + gpl = gp.layers + if not gpl:continue#print(f'obj {ob.name} has no layers') + + for l in gpl: + ## filter on selected + if not l.select:continue# comment this line for all + # for f in l.frames:#not all for now + f = l.active_frame + ## create gap stroke + create_gap_stroke(f, ob, tol=tolerance, mat_id=mat_id) + +def is_deviating_by(s, deviation=0.75): + '''get a stroke and a deviation angle (radians, 0.75~=42 degrees) + return true if end points angle pass the threshold''' + + if len(s.points) < 3: + return + + pa = s.points[-1] + pb = s.points[-2] + pc = s.points[-3] + + a = location_to_region(pa.co) + b = location_to_region(pb.co) + c = location_to_region(pc.co) + + #cb-> compare angle with ba-> + angle = (b-c).angle(a-b) + + print('angle: ', degrees(angle)) + pa.select = angle > deviation + return angle > deviation + +def extend_stroke_tips(s,f,ob,length, mat_id): + '''extend line boundary by given length''' + for id_pair in [ [1,0], [-2,-1] ]:# start and end pair + ## 2D mode + # a = location_to_region(ob.matrix_world @ s.points[id_pair[0]].co) + # b_loc = ob.matrix_world @ s.points[id_pair[1]].co + # b = location_to_region(b_loc) + # c = extrapolate_points_by_length(a,b,length)#print(vector_length_2d(b,c)) + # c_loc = region_to_location(c, b_loc) + # simple_draw_gp_stroke([ob.matrix_world.inverted() @ b_loc, ob.matrix_world.inverted() @ c_loc], f, width=2, mat_id=mat_id) + + ## 3D + a = s.points[id_pair[0]].co# ob.matrix_world @ + b = s.points[id_pair[1]].co# ob.matrix_world @ + c = extrapolate_points_by_length(a,b,length)#print(vector_length(b,c)) + simple_draw_gp_stroke([b,c], f, width=2, mat_id=mat_id) + +def change_extension_length(ob, strokelist, length, selected=False): + mat_id = get_closeline_mat(ob) + if not mat_id: + print('could not get/set closeline mat') + return + + ct = 0 + for s in strokelist: + if s.material_index != mat_id:#is NOT a closeline + continue + if len(s.points) < 2:#not enough point to evaluate + continue + if selected and not s.select:# select filter + continue + + ## Change length of current length to designated + # Vector point A to point B (direction), push point B in this direction + a = s.points[-2].co + bp = s.points[-1]#end-point + b = bp.co + ab = b - a + if not ab: + continue + # new pos of B is A + new length in the AB direction + newb = a + (ab.normalized() * length) + bp.co = newb + ct += 1 + + return ct + +def extend_all_strokes_tips(ob, frame, length=10, selected=False): + '''extend all strokes boundary by calling extend_stroke_tips''' + # ob = bpy.context.object + mat_id = get_closeline_mat(ob) + if not mat_id: + print('could not get/set closeline mat') + return + + # TODO need custom filters or go in GP refine strokes... + # frame = ob.data.layers.active.active_frame + + if not frame: return + ct = 0 + #TODO need to delete previous closing lines on frame before launching + + # iterate in a copy of stroke list to avoid growing frame.strokes as we loop in ! + for s in list(frame.strokes): + if s.material_index == mat_id:#is a closeline + continue + if len(s.points) < 2:#not enough point to evaluate + continue + if selected and not s.select:#filter by selection + continue + + extend_stroke_tips(s, frame, ob, length, mat_id=mat_id) + ct += 1 + + return ct + + +class GPSTK_OT_extend_lines(bpy.types.Operator): + """ + Extend lines on stroke boundarys + """ + bl_idname = "gp.extend_close_lines" + bl_label = "Gpencil extend closing lines" + bl_options = {'REGISTER','UNDO'} + + @classmethod + def poll(cls, context): + return context.active_object is not None and context.active_object.type == 'GPENCIL' + + # mode : bpy.props.StringProperty( + # name="mode", description="Set mode for operator", default="render", maxlen=0, subtype='NONE', options={'ANIMATABLE'}) + + layer_tgt : bpy.props.EnumProperty( + name="Extend layers", description="Choose which layer to target", + default='ACTIVE', + items=( + ('ACTIVE', 'Active only', 'Target active layer only', 0),#include icon name in fourth position + ('SELECTED', 'Selected', 'Target selected layers in GP dopesheet (only visible)', 1), + ('ALL_VISIBLE', 'All visible', 'target all visible layers', 2), + ('ALL', 'All', 'All (even locked and hided layer)', 3), + )) + + selected : bpy.props.BoolProperty(name='Selected', default=False, description="Work on selected stroke only if True, else All stroke") + + length : bpy.props.FloatProperty(name='Length', default=0.2, precision=3, step=0.01, description="length of the extended strokes") + + def invoke(self, context, event): + # self.selected = event.shift#if shift is pressed, force inclusion of load + ## get init value from scene prop settings + self.selected = context.scene.gpcolor_props.extend_selected + self.length = context.scene.gpcolor_props.extend_length + self.layer_tgt = context.scene.gpcolor_props.extend_layer_tgt + return self.execute(context) + + def execute(self, context): + ob = context.object + if self.layer_tgt == 'ACTIVE': + lays = [ob.data.layers.active] + elif self.layer_tgt == 'SELECTED': + lays = [l for l in ob.data.layers if l.select and not l.hide] + elif self.layer_tgt == 'ALL_VISIBLE': + lays = [l for l in ob.data.layers if not l.hide] + else: + lays = [l for l in ob.data.layers if not any(x in l.info for x in ('spot', 'colo'))] + + fct = 0 + for l in lays: + if not l.active_frame: + print(f'{l.info} has no active frame') + continue + fct += extend_all_strokes_tips(ob, l.active_frame, length = self.length, selected = self.selected) + + if not fct: + mess = "No strokes extended... see console" + self.report({'WARNING'}, mess)#WARNING, ERROR + return {'CANCELLED'} + + mess = f"{fct} strokes extended with closing lines" + self.report({'INFO'}, mess)#WARNING, ERROR + return {'FINISHED'} + + +class GPSTK_OT_change_closeline_length(bpy.types.Operator): + """ + Change extended lines length + """ + bl_idname = "gp.change_close_lines_extension" + bl_label = "Change closeline length (use F9 to call redo panel)" + bl_options = {'REGISTER','UNDO'} + + @classmethod + def poll(cls, context): + return context.active_object is not None and context.active_object.type == 'GPENCIL' + + layer_tgt : bpy.props.EnumProperty( + name="Extend layers", description="Choose which layer to target", + default='ACTIVE', + items=( + ('ACTIVE', 'Active only', 'Target active layer only', 0),#include icon name in fourth position + ('SELECTED', 'Selected', 'Target selected layers in GP dopesheet (only visible)', 1), + ('ALL_VISIBLE', 'All visible', 'target all visible layers', 2), + ('ALL', 'All', 'All (even locked and hided layer)', 3), + )) + + selected : bpy.props.BoolProperty(name='Selected', default=False, description="Work on selected stroke only if True, else All stroke") + + length : bpy.props.FloatProperty(name='Length', default=0.2, precision=3, step=0.01, description="length of the extended strokes")#step=0.00, + + def invoke(self, context, event): + ## get init value from scene prop settings + self.selected = context.scene.gpcolor_props.extend_selected + self.length = context.scene.gpcolor_props.extend_length + self.layer_tgt = context.scene.gpcolor_props.extend_layer_tgt + return self.execute(context) + + def execute(self, context): + ob = context.object + if self.layer_tgt == 'ACTIVE': + lays = [ob.data.layers.active] + elif self.layer_tgt == 'SELECTED': + lays = [l for l in ob.data.layers if l.select and not l.hide] + elif self.layer_tgt == 'ALL_VISIBLE': + lays = [l for l in ob.data.layers if not l.hide] + else: + lays = [l for l in ob.data.layers if not any(x in l.info for x in ('spot', 'colo'))] + + fct = 0 + for l in lays: + if not l.active_frame: + print(f'{l.info} has no active frame') + continue + fct += change_extension_length(ob, [s for s in l.active_frame.strokes], length = self.length, selected = self.selected) + + if not fct: + mess = "No extension modified... see console" + self.report({'WARNING'}, mess) + return {'CANCELLED'} + + mess = f"{fct} extension tweaked" + self.report({'INFO'}, mess) + return {'FINISHED'} + + +class GPSTK_OT_comma_finder(bpy.types.Operator): + """ + Tester to identify accidental comma trace + """ + bl_idname = "gp.comma_finder" + bl_label = "Strokes comma finder" + bl_options = {'REGISTER','UNDO'} + + @classmethod + def poll(cls, context): + return context.active_object is not None and context.active_object.type == 'GPENCIL' + + def execute(self, context): + ct = 0 + ob = context.object + lays = [l for l in ob.data.layers if not l.hide and not l.lock] + for l in lays: + if not l.active_frame:continue + for s in l.active_frame.strokes: + if is_deviating_by(s, context.scene.gpcolor_props.deviation_tolerance): + ct+=1 + + self.report({'INFO'}, f'{ct} endpoint found')#WARNING, ERROR + return {'FINISHED'} + + +class GPSTK_PT_line_closer_panel(bpy.types.Panel): + bl_label = "GP line stopper"# title + ## bl_options = {'DEFAULT_CLOSED', 'HIDE_HEADER' }# closed by default, collapse the panel and the label + ## is_popover = False # if ommited + ## bl_space_type = ['EMPTY', 'VIEW_3D', 'IMAGE_EDITOR', 'NODE_EDITOR', 'SEQUENCE_EDITOR', 'CLIP_EDITOR', 'DOPESHEET_EDITOR', 'GRAPH_EDITOR', 'NLA_EDITOR', 'TEXT_EDITOR', 'CONSOLE', 'INFO', 'TOPBAR', 'STATUSBAR', 'OUTLINER', 'PROPERTIES', 'FILE_BROWSER', 'PREFERENCES'], default 'EMPTY' + + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil"#name of the tab + #attach in gmic colorize ? (bad idea since gmicolor not in init and may be disable) + # bl_parent_id = "GMICOLOR_PT_auto_color_panel" + + @classmethod + def poll(cls, context): + return (context.object is not None)# and context.object.type == 'GPENCIL' + + ## draw stuff inside the header (place before main label) + # def draw_header(self, context): + # layout = self.layout + # layout.label(text="More text in header") + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + # prefs = get_addon_prefs() + layout.prop(context.scene.gpcolor_props, 'extend_layer_tgt') + layout.prop(context.scene.gpcolor_props, 'extend_selected') + layout.prop(context.scene.gpcolor_props, 'extend_length') + layout.operator("gp.extend_close_lines", icon = 'SNAP_MIDPOINT') + + #diplay closeline visibility + if context.object.type == 'GPENCIL' and context.object.data.materials.get('closeline'): + row=layout.row() + row.prop(context.object.data.materials['closeline'].grease_pencil, 'hide', text='Stop lines') + row.operator("gp.change_close_lines_extension", text='Length', icon = 'DRIVER_DISTANCE') + else: + layout.label(text='-no stop lines-') + + layout.separator() + layout.prop(context.scene.gpcolor_props, 'deviation_tolerance') + layout.operator("gp.comma_finder", icon = 'INDIRECT_ONLY_OFF') + #TODO change length (on selection, on all) + #TODO remove all line (in unlocked frame, on all) + + +classes = ( +GPSTK_OT_extend_lines, +GPSTK_OT_change_closeline_length, +GPSTK_OT_comma_finder, +GPSTK_PT_line_closer_panel, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + +def unregister(): + for cls in reversed(classes): + bpy.utils.unregister_class(cls) \ No newline at end of file diff --git a/GP_guided_colorize/func_gmic.py b/GP_guided_colorize/func_gmic.py new file mode 100644 index 0000000..1a91cc0 --- /dev/null +++ b/GP_guided_colorize/func_gmic.py @@ -0,0 +1,184 @@ +import os, re +from os.path import join, basename, exists, dirname, abspath, splitext +import subprocess +import time, datetime + +from ..utils import get_addon_prefs, transfer_value + +def get_gmic(): + prefs = get_addon_prefs() + return prefs.gmic_path + + +## globals +image_exts = ('.png', '.jpg', '.tiff', '.tga', '.jpeg',) +Rnum = re.compile(r'(\d+)(?!.*\d)') + +def is_img(fp): + if splitext(basename(fp))[1].lower() in image_exts: + return True + +def is_img_folder(d): + ct = 0 + for f in os.listdir(d): + ct += 1 + #if os.path.isfile(d) + if not is_img(f): + #print("not an image:", f)#Dbg + return 0 + + if not ct:#return false if nothing found + #print('nothing in folder') + return -1 + + ###all files has been evaluated as images + return 1 + + +def auto_colo(line, colo, out): + gmic = get_gmic() + opt = r'fx_colorize_lineart_smart 2,96,0,0,0,1,24,197,0,90.4,1,34.05,22.27,48.96,0.57,6.4,1,0,20,50.18,7.5,0.5,0' + if not exists(line) or not exists(colo): + print('one source directory not exists') + return + + line_l = [join(line, i) for i in os.listdir(line) if i.endswith(('.png', '.jpg'))] + colo_l = [join(colo, i) for i in os.listdir(colo) if i.endswith(('.png', '.jpg'))] + line_l.sort() + colo_l.sort() + + #print(line_l) + #print(colo_l) + + if len(line_l) != len(colo_l): + print('lists of line and colo files have not the same lenght') + return + + outfolder_name = basename(dirname(line)) + '_autocolo' + outfolder = join(out, outfolder_name) + if not exists(outfolder): + os.mkdir(outfolder) + + ct = 0 + for i, f in enumerate(line_l): + ct += 1 + filename = outfolder_name + '_'+ str(i+101).zfill(4) +'.png' + outfile = join(outfolder, filename) + #print('-', f,outfile) + cmd = '{} {} {} {} -o[1] {}'.format(gmic, line_l[i], colo_l[i], opt, outfile) + #note on -o[1] + #this filter output 2 img, original line and the new color, here specify only color (keep your name clean) + print(cmd) + os.system(cmd) + # if ct > 3:return#limiter + + print('Done') + + +def random_fill(line, out): + '''gmic command to convert line file to pseudo color out file''' + gmic = get_gmic() + #Todo, handle png compression, will be deleted so can be uncompressed + start_time = time.time()#timer + opt = r'fx_colorize_lineart_smart 0,100,0,0,0,80,184,0,90.5,2,0,0,0,0,4,1,2' + cmd = [gmic, line] + opt.split(' ') + ['-o[1]', out]# + ['to_rgba'] + print(cmd) + subprocess.call(cmd) + print("elapsed", time.time() - start_time)#timer + +def random_fill_folder(src, dest): + '''gmic command to convert a line folder to pseudo color output in another folder''' + # opt = r'fx_colorize_lineart_smart 0,97.2,0,0,0,0,210,213,0,86.5,1,49.41,28.67,37.26,0.29,12.7,0,0,0,0,0,0,0' + if not exists(src): + print('source directory not exists') + return + + line_l = [join(src, i) for i in os.listdir(src) if is_img(i)] + line_l.sort() + + ''' + outfolder_name = basename(dirname(src)) + '_randomcolo' + outfolder = join(dest, outfolder_name) + if not exists(outfolder): + os.mkdir(outfolder) + ''' + + #ct = 0 + for i, f in enumerate(line_l): + outfile = join(dest, 'random_fill_' + str(i+101).zfill(4) +'.png') + random_fill(f, outfile) + + print('Done') + +def generate_empty_image(fp): + ''' + Generate an empty 1x1 pixel full transparent + width,height,depth,spectrum(channels) + doc: https://gmic.eu/tutorial/_input.shtml + generate a file of ~200byte + ''' + gmic = get_gmic() + cmd = [gmic, '1,1,1,4', '-o', fp] + # cmd = [gmic, '16,9,1,4', '-o', fp] #classic ratio... influence nothing + subprocess.call(cmd) + +def propagate_color(line, spot, out): + gmic = get_gmic() + start_time = time.time()#timer + ## colorize + opt = ['fx_colorize_lineart', '1,3,0,0.02'] + + ## antialias + aa = ['fx_smooth_antialias', '10,10.1,1.22,0,50,50']# smooth aa + # aa = ['gcd_anti_alias', '60,0.1,0,0']# basic aa + + ## BG remove + # vert pur 0,255,0,255 for BG color to delete, rose pale : [234,139,147,255] : [0.822786, 0.258183, 0.291771, 1.000000] #EA8B93 + # 2 first value tolerance(1~3),smoothness(need0) + del_bg = ['to_rgba', 'replace_color', '3,0,1,255,1,255,255,255,255,0'] + print('gmic: ', gmic) + # gmic = f'"{gmic}"' + cmd = [gmic, line, spot] + opt + del_bg + ['-o[1]', out]# + aa + print('cmd: ', cmd) + subprocess.call(cmd) + print("elapsed", time.time() - start_time)#timer + + + +def propagate_color_folder(line_fp, spot_fp, outfolder): + print('Starting', datetime.datetime.now())# print full current date + start_time = time.time()# get start time + + + # [gmic_krita_qt]./apply/ v -99 fx_colorize_lineart 1,3,0,0.02 + if not exists(line_fp) or not exists(spot_fp) or not exists(outfolder): + print(f'''some directories not exists: + {exists(line_fp)}: {line_fp} + {exists(spot_fp)}: {spot_fp} + {exists(outfolder)}: {outfolder}''') + return 1 + + lines = sorted([f.path for f in os.scandir(line_fp) if is_img(f.name) and Rnum.search(f.name)]) + spots = sorted([f.path for f in os.scandir(spot_fp) if is_img(f.name) and Rnum.search(f.name)]) + + # if len(lines) != len(spots):#file number test...valid but disabled for tests + # print('lists of line and colo files have not the same lenght') + # return + + for l, s in zip(lines, spots): + lframe = int(Rnum.search(l).group(1)) + sframe = int(Rnum.search(s).group(1)) + if lframe != sframe: + print(f'line img has not the same number as spot img {lframe} != {sframe}') + continue + + out = join(outfolder, f'colo_{str(lframe).zfill(3)}.png') + + print(f'frame {lframe}') + propagate_color(l,s,out) + + elapsed_time = time.time() - start_time# seconds + full_time = str(datetime.timedelta(seconds=elapsed_time))# hh:mm:ss format + + print("elapsed time", elapsed_time) + print(full_time) \ No newline at end of file diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/OP_box_deform.py b/OP_box_deform.py new file mode 100644 index 0000000..2b99d8e --- /dev/null +++ b/OP_box_deform.py @@ -0,0 +1,579 @@ +# ##### BEGIN GPL LICENSE BLOCK ##### +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# ##### END GPL LICENSE BLOCK ##### + +''' +Based on Box_deform addon +! Standalone file ! Stripped preference, and set best default auto transform) +''' + + +import bpy +import numpy as np + +""" def get_addon_prefs(): + import os + addon_name = os.path.splitext(__name__)[0] + preferences = bpy.context.preferences + addon_prefs = preferences.addons[addon_name].preferences + return (addon_prefs) """ + +def location_to_region(worldcoords): + from bpy_extras import view3d_utils + return view3d_utils.location_3d_to_region_2d(bpy.context.region, bpy.context.space_data.region_3d, worldcoords) + +def region_to_location(viewcoords, depthcoords): + from bpy_extras import view3d_utils + return view3d_utils.region_2d_to_location_3d(bpy.context.region, bpy.context.space_data.region_3d, viewcoords, depthcoords) + +def assign_vg(obj, vg_name): + ## create vertex group + vg = obj.vertex_groups.get(vg_name) + if vg: + # remove to start clean + obj.vertex_groups.remove(vg) + vg = obj.vertex_groups.new(name=vg_name) + bpy.ops.gpencil.vertex_group_assign() + return vg + +def view_cage(obj): + + lattice_interp = 'KEY_LINEAR'#get_addon_prefs().default_deform_type + + gp = obj.data + gpl = gp.layers + + coords = [] + initial_mode = bpy.context.mode + + ## get points + if bpy.context.mode == 'EDIT_GPENCIL': + for l in gpl: + if l.lock or l.hide or not l.active_frame:#or len(l.frames) + continue + if gp.use_multiedit: + target_frames = [f for f in l.frames if f.select] + else: + target_frames = [l.active_frame] + + for f in target_frames: + for s in f.strokes: + if not s.select: + continue + for p in s.points: + if p.select: + # get real location + coords.append(obj.matrix_world @ p.co) + + elif bpy.context.mode == 'OBJECT':#object mode -> all points + for l in gpl:# if l.hide:continue# only visible ? (might break things) + if not len(l.frames): + continue#skip frameless layer + for s in l.active_frame.strokes: + for p in s.points: + coords.append(obj.matrix_world @ p.co) + + elif bpy.context.mode == 'PAINT_GPENCIL': + # get last stroke points coordinated + if not gpl.active or not gpl.active.active_frame: + return 'No frame to deform' + + if not len(gpl.active.active_frame.strokes): + return 'No stroke found to deform' + + paint_id = -1 + if bpy.context.scene.tool_settings.use_gpencil_draw_onback: + paint_id = 0 + coords = [obj.matrix_world @ p.co for p in gpl.active.active_frame.strokes[paint_id].points] + + else: + return 'Wrong mode!' + + if not coords: + ## maybe silent return instead (need special str code to manage errorless return) + return 'No points found!' + + if bpy.context.mode in ('EDIT_GPENCIL', 'PAINT_GPENCIL') and len(coords) < 2: + # Dont block object mod + return 'Less than two point selected' + + vg_name = 'lattice_cage_deform_group' + + if bpy.context.mode == 'EDIT_GPENCIL': + vg = assign_vg(obj, vg_name) + + if bpy.context.mode == 'PAINT_GPENCIL': + # points cannot be assign to API yet(ugly and slow workaround but only way) + # -> https://developer.blender.org/T56280 so, hop'in'ops ! + + # store selection and deselect all + plist = [] + for s in gpl.active.active_frame.strokes: + for p in s.points: + plist.append([p, p.select]) + p.select = False + + # select + ## foreach_set does not update + # gpl.active.active_frame.strokes[paint_id].points.foreach_set('select', [True]*len(gpl.active.active_frame.strokes[paint_id].points)) + for p in gpl.active.active_frame.strokes[paint_id].points: + p.select = True + + # assign + bpy.ops.object.mode_set(mode='EDIT_GPENCIL') + vg = assign_vg(obj, vg_name) + + # restore + for pl in plist: + pl[0].select = pl[1] + + + ## View axis Mode --- + + ## get view coordinate of all points + coords2D = [location_to_region(co) for co in coords] + + # find centroid for depth (or more economic, use obj origin...) + centroid = np.mean(coords, axis=0) + + # not a mean ! a mean of extreme ! centroid2d = np.mean(coords2D, axis=0) + all_x, all_y = np.array(coords2D)[:, 0], np.array(coords2D)[:, 1] + min_x, min_y = np.min(all_x), np.min(all_y) + max_x, max_y = np.max(all_x), np.max(all_y) + + width = (max_x - min_x) + height = (max_y - min_y) + center_x = min_x + (width/2) + center_y = min_y + (height/2) + + centroid2d = (center_x,center_y) + center = region_to_location(centroid2d, centroid) + # bpy.context.scene.cursor.location = center#Dbg + + + #corner Bottom-left to Bottom-right + x0 = region_to_location((min_x, min_y), centroid) + x1 = region_to_location((max_x, min_y), centroid) + x_worldsize = (x0 - x1).length + + #corner Bottom-left to top-left + y0 = region_to_location((min_x, min_y), centroid) + y1 = region_to_location((min_x, max_y), centroid) + y_worldsize = (y0 - y1).length + + ## in case of 3 + + lattice_name = 'lattice_cage_deform' + # cleaning + cage = bpy.data.objects.get(lattice_name) + if cage: + bpy.data.objects.remove(cage) + + lattice = bpy.data.lattices.get(lattice_name) + if lattice: + bpy.data.lattices.remove(lattice) + + # create lattice object + lattice = bpy.data.lattices.new(lattice_name) + cage = bpy.data.objects.new(lattice_name, lattice) + cage.show_in_front = True + + ## Master (root) collection + bpy.context.scene.collection.objects.link(cage) + + # spawn cage and align it to view (Again ! align something to a vector !!! argg) + + r3d = bpy.context.space_data.region_3d + viewmat = r3d.view_matrix + + cage.matrix_world = viewmat.inverted() + cage.scale = (x_worldsize, y_worldsize, 1) + ## Z aligned in view direction (need minus X 90 degree to be aligned FRONT) + # cage.rotation_euler.x -= radians(90) + # cage.scale = (x_worldsize, 1, y_worldsize) + cage.location = center + + lattice.points_u = 2 + lattice.points_v = 2 + lattice.points_w = 1 + + lattice.interpolation_type_u = lattice_interp#'KEY_LINEAR'-'KEY_BSPLINE' + lattice.interpolation_type_v = lattice_interp#'KEY_LINEAR'-'KEY_BSPLINE' + lattice.interpolation_type_w = lattice_interp#'KEY_LINEAR'-'KEY_BSPLINE' + + mod = obj.grease_pencil_modifiers.new('tmp_lattice', 'GP_LATTICE') + + # move to top if modifiers exists + for _ in range(len(obj.grease_pencil_modifiers)): + bpy.ops.object.gpencil_modifier_move_up(modifier='tmp_lattice') + + mod.object = cage + + if initial_mode == 'PAINT_GPENCIL': + mod.layer = gpl.active.info + + # note : if initial was Paint, changed to Edit + # so vertex attribution is valid even for paint + if bpy.context.mode == 'EDIT_GPENCIL': + mod.vertex_group = vg.name + + #Go in object mode if not already + if bpy.context.mode != 'OBJECT': + bpy.ops.object.mode_set(mode='OBJECT') + + # Store name of deformed object in case of 'revive modal' + cage.vertex_groups.new(name=obj.name) + + ## select and make cage active + # cage.select_set(True) + bpy.context.view_layer.objects.active = cage + obj.select_set(False)#deselect GP object + bpy.ops.object.mode_set(mode='EDIT')# go in lattice edit mode + bpy.ops.lattice.select_all(action='SELECT')# select all points + + ## Eventually change tool mode to tweak for direct point editing (reset after before leaving) + bpy.ops.wm.tool_set_by_id(name="builtin.select")# Tweaktoolcode + return cage + + +def back_to_obj(obj, gp_mode, org_lattice_toolset, context): + if context.mode == 'EDIT_LATTICE' and org_lattice_toolset:# Tweaktoolcode - restore the active tool used by lattice edit.. + bpy.ops.wm.tool_set_by_id(name = org_lattice_toolset)# Tweaktoolcode + + # gp object active and selected + bpy.ops.object.mode_set(mode='OBJECT') + obj.select_set(True) + bpy.context.view_layer.objects.active = obj + + +def delete_cage(cage): + lattice = cage.data + bpy.data.objects.remove(cage) + bpy.data.lattices.remove(lattice) + +def apply_cage(gp_obj, cage): + mod = gp_obj.grease_pencil_modifiers.get('tmp_lattice') + if mod: + bpy.ops.object.gpencil_modifier_apply(apply_as='DATA', modifier=mod.name) + else: + print('tmp_lattice modifier not found to apply...') + + delete_cage(cage) + +def cancel_cage(gp_obj, cage): + #remove modifier + mod = gp_obj.grease_pencil_modifiers.get('tmp_lattice') + if mod: + gp_obj.grease_pencil_modifiers.remove(mod) + else: + print('tmp_lattice modifier not found to remove...') + + delete_cage(cage) + + +class GP_OT_latticeGpDeform(bpy.types.Operator): + """Create a lattice to use as quad corner transform""" + bl_idname = "gp.latticedeform" + bl_label = "Box deform" + bl_description = "Use lattice for free box transforms on grease pencil points (Ctrl+T)" + bl_options = {"REGISTER", "UNDO"} + + @classmethod + def poll(cls, context): + return context.object is not None and context.object.type in ('GPENCIL','LATTICE') + + # local variable + tab_press_ct = 0 + + def modal(self, context, event): + display_text = f"Deform Cage size: {self.lat.points_u}x{self.lat.points_v} (1-9 or ctrl + ←→↑↓]) | \ +mode (M) : {'Linear' if self.lat.interpolation_type_u == 'KEY_LINEAR' else 'Spline'} | \ +valid:Spacebar/Enter/Tab, cancel:Del/Backspace" + context.area.header_text_set(display_text) + + + ## Handle ctrl+Z + if event.type in {'Z'} and event.value == 'PRESS' and event.ctrl: + ## Disable (capture key) + return {"RUNNING_MODAL"} + ## Not found how possible to find modal start point in undo stack to + # print('ops list', context.window_manager.operators.keys()) + # if context.window_manager.operators:#can be empty + # print('\nlast name', context.window_manager.operators[-1].name) + + # Auto interpo check + if self.auto_interp: + if event.type in {'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE', 'ZERO',} and event.value == 'PRESS': + self.set_lattice_interp('KEY_BSPLINE') + if event.type in {'DOWN_ARROW', "UP_ARROW", "RIGHT_ARROW", "LEFT_ARROW"} and event.value == 'PRESS' and event.ctrl: + self.set_lattice_interp('KEY_BSPLINE') + if event.type in {'ONE'} and event.value == 'PRESS': + self.set_lattice_interp('KEY_LINEAR') + + # Single keys + if event.type in {'H'} and event.value == 'PRESS': + # self.report({'INFO'}, "Can't hide") + return {"RUNNING_MODAL"} + + if event.type in {'ONE'} and event.value == 'PRESS':# , 'NUMPAD_1' + self.lat.points_u = self.lat.points_v = 2 + return {"RUNNING_MODAL"} + + if event.type in {'TWO'} and event.value == 'PRESS':# , 'NUMPAD_2' + self.lat.points_u = self.lat.points_v = 3 + return {"RUNNING_MODAL"} + + if event.type in {'THREE'} and event.value == 'PRESS':# , 'NUMPAD_3' + self.lat.points_u = self.lat.points_v = 4 + return {"RUNNING_MODAL"} + + if event.type in {'FOUR'} and event.value == 'PRESS':# , 'NUMPAD_4' + self.lat.points_u = self.lat.points_v = 5 + return {"RUNNING_MODAL"} + + if event.type in {'FIVE'} and event.value == 'PRESS':# , 'NUMPAD_5' + self.lat.points_u = self.lat.points_v = 6 + return {"RUNNING_MODAL"} + + if event.type in {'SIX'} and event.value == 'PRESS':# , 'NUMPAD_6' + self.lat.points_u = self.lat.points_v = 7 + return {"RUNNING_MODAL"} + + if event.type in {'SEVEN'} and event.value == 'PRESS':# , 'NUMPAD_7' + self.lat.points_u = self.lat.points_v = 8 + return {"RUNNING_MODAL"} + + if event.type in {'EIGHT'} and event.value == 'PRESS':# , 'NUMPAD_8' + self.lat.points_u = self.lat.points_v = 9 + return {"RUNNING_MODAL"} + + if event.type in {'NINE'} and event.value == 'PRESS':# , 'NUMPAD_9' + self.lat.points_u = self.lat.points_v = 10 + return {"RUNNING_MODAL"} + + if event.type in {'ZERO'} and event.value == 'PRESS':# , 'NUMPAD_0' + self.lat.points_u = 2 + self.lat.points_v = 1 + return {"RUNNING_MODAL"} + + if event.type in {'RIGHT_ARROW'} and event.value == 'PRESS' and event.ctrl: + if self.lat.points_u < 20: + self.lat.points_u += 1 + return {"RUNNING_MODAL"} + + if event.type in {'LEFT_ARROW'} and event.value == 'PRESS' and event.ctrl: + if self.lat.points_u > 1: + self.lat.points_u -= 1 + return {"RUNNING_MODAL"} + + if event.type in {'UP_ARROW'} and event.value == 'PRESS' and event.ctrl: + if self.lat.points_v < 20: + self.lat.points_v += 1 + return {"RUNNING_MODAL"} + + if event.type in {'DOWN_ARROW'} and event.value == 'PRESS' and event.ctrl: + if self.lat.points_v > 1: + self.lat.points_v -= 1 + return {"RUNNING_MODAL"} + + + # change modes + if event.type in {'M'} and event.value == 'PRESS': + self.auto_interp = False + interp = 'KEY_BSPLINE' if self.lat.interpolation_type_u == 'KEY_LINEAR' else 'KEY_LINEAR' + self.set_lattice_interp(interp) + return {"RUNNING_MODAL"} + + # Valid + if event.type in {'RET', 'SPACE'}: + if event.value == 'PRESS': + #bpy.ops.ed.flush_edits()# TODO: find a way to get rid of undo-registered lattices tweaks + self.restore_prefs(context) + back_to_obj(self.gp_obj, self.gp_mode, self.org_lattice_toolset, context) + apply_cage(self.gp_obj, self.cage)#must be in object mode + + # back to original mode + if self.gp_mode != 'OBJECT': + bpy.ops.object.mode_set(mode=self.gp_mode) + + context.area.header_text_set(None)#reset header + + return {'FINISHED'} + + # Abort --- + # One Warning for Tab cancellation. + if event.type == 'TAB' and event.value == 'PRESS': + self.tab_press_ct += 1 + if self.tab_press_ct < 2: + self.report({'WARNING'}, "Pressing TAB again will Cancel") + return {"RUNNING_MODAL"} + + if event.type in {'T'} and event.value == 'PRESS' and event.ctrl:# Retyped same shortcut + self.cancel(context) + return {'CANCELLED'} + + if event.type in {'DEL', 'BACK_SPACE'} or self.tab_press_ct >= 2:#'ESC', + self.cancel(context) + return {'CANCELLED'} + + return {'PASS_THROUGH'} + + def set_lattice_interp(self, interp): + self.lat.interpolation_type_u = self.lat.interpolation_type_v = self.lat.interpolation_type_w = interp + + def cancel(self, context): + self.restore_prefs(context) + back_to_obj(self.gp_obj, self.gp_mode, self.org_lattice_toolset, context) + cancel_cage(self.gp_obj, self.cage) + context.area.header_text_set(None) + if self.gp_mode != 'OBJECT': + bpy.ops.object.mode_set(mode=self.gp_mode) + + def store_prefs(self, context): + # store_valierables <-< preferences + self.use_drag_immediately = context.preferences.inputs.use_drag_immediately + self.drag_threshold_mouse = context.preferences.inputs.drag_threshold_mouse + self.drag_threshold_tablet = context.preferences.inputs.drag_threshold_tablet + self.use_overlays = context.space_data.overlay.show_overlays + + def restore_prefs(self, context): + # preferences <-< store_valierables + context.preferences.inputs.use_drag_immediately = self.use_drag_immediately + context.preferences.inputs.drag_threshold_mouse = self.drag_threshold_mouse + context.preferences.inputs.drag_threshold_tablet = self.drag_threshold_tablet + context.space_data.overlay.show_overlays = self.use_overlays + + def set_prefs(self, context): + context.preferences.inputs.use_drag_immediately = True + context.preferences.inputs.drag_threshold_mouse = 1 + context.preferences.inputs.drag_threshold_tablet = 3 + context.space_data.overlay.show_overlays = True + + def invoke(self, context, event): + ## Restrict to 3D view + if context.area.type != 'VIEW_3D': + self.report({'WARNING'}, "View3D not found, cannot run operator") + return {'CANCELLED'} + + if not context.object:#do it in poll ? + self.report({'ERROR'}, "No active objects found") + return {'CANCELLED'} + + # self.prefs = get_addon_prefs()#get_prefs + self.org_lattice_toolset = None + self.gp_mode = 'EDIT_GPENCIL' + + # --- special Case of lattice revive modal, just after ctrl+Z back into lattice with modal stopped + if context.mode == 'EDIT_LATTICE' and context.object.name == 'lattice_cage_deform' and len(context.object.vertex_groups): + self.gp_obj = context.scene.objects.get(context.object.vertex_groups[0].name) + if not self.gp_obj: + self.report({'ERROR'}, "/!\\ Box Deform : Cannot find object to target") + return {'CANCELLED'} + if not self.gp_obj.grease_pencil_modifiers.get('tmp_lattice'): + self.report({'ERROR'}, "/!\\ No 'tmp_lattice' modifiers on GP object") + return {'CANCELLED'} + self.cage = context.object + self.lat = self.cage.data + self.set_prefs(context) + + context.window_manager.modal_handler_add(self) + return {'RUNNING_MODAL'} + + if context.object.type != 'GPENCIL': + # self.report({'ERROR'}, "Works only on gpencil objects") + ## silent return + return {'CANCELLED'} + + #paint need VG workaround. object need good shortcut + if context.mode not in ('EDIT_GPENCIL', 'OBJECT', 'PAINT_GPENCIL'): + # self.report({'WARNING'}, "Works only in following GPencil modes: edit")# ERROR + ## silent return + return {'CANCELLED'} + + # bpy.ops.ed.undo_push(message="Box deform step")#don't work as expected (+ might be obsolete) + # https://developer.blender.org/D6147 <- undo forget + + self.gp_obj = context.object + # Clean potential failed previous job (delete tmp lattice) + mod = self.gp_obj.grease_pencil_modifiers.get('tmp_lattice') + if mod: + print('Deleted remaining lattice modifiers') + self.gp_obj.grease_pencil_modifiers.remove(mod) + + phantom_obj = context.scene.objects.get('lattice_cage_deform') + if phantom_obj: + print('Deleted remaining lattice object') + delete_cage(phantom_obj) + + if [m for m in self.gp_obj.grease_pencil_modifiers if m.type == 'GP_LATTICE']: + self.report({'ERROR'}, "Grease pencil object already has a lattice modifier (can only have one)") + return {'CANCELLED'} + + + self.gp_mode = context.mode#store mode for restore + + # All good, create lattice and start modal + + # Create lattice (and switch to lattice edit) ---- + self.cage = view_cage(self.gp_obj) + if isinstance(self.cage, str):#error, cage not created, display error + self.report({'ERROR'}, self.cage) + return {'CANCELLED'} + + self.lat = self.cage.data + + ## usability toggles + + ## pref for clic drag -> if self.prefs.use_clic_drag:#Store the active tool since we will change it + self.org_lattice_toolset = bpy.context.workspace.tools.from_space_view3d_mode(bpy.context.mode, create=False).idname# Tweaktoolcode + + self.auto_interp = True#self.prefs.auto_swap_deform_type + #store (scene properties needed in case of ctrlZ revival) + self.store_prefs(context) + self.set_prefs(context) + context.window_manager.modal_handler_add(self) + return {'RUNNING_MODAL'} + +## --- KEYMAP + +addon_keymaps = [] +def register_keymaps(): + addon = bpy.context.window_manager.keyconfigs.addon + + km = addon.keymaps.new(name = "Grease Pencil", space_type = "EMPTY", region_type='WINDOW') + kmi = km.keymap_items.new("gp.latticedeform", type ='T', value = "PRESS", ctrl = True) + kmi.repeat = False + addon_keymaps.append((km, kmi)) + +def unregister_keymaps(): + for km, kmi in addon_keymaps: + km.keymap_items.remove(kmi) + addon_keymaps.clear() + +### --- REGISTER --- + +def register(): + if bpy.app.background: + return + bpy.utils.register_class(GP_OT_latticeGpDeform) + register_keymaps() + +def unregister(): + if bpy.app.background: + return + unregister_keymaps() + bpy.utils.unregister_class(GP_OT_latticeGpDeform) \ No newline at end of file diff --git a/OP_breakdowner.py b/OP_breakdowner.py new file mode 100644 index 0000000..46f91f8 --- /dev/null +++ b/OP_breakdowner.py @@ -0,0 +1,339 @@ +#Breakdowner object mode V1 +import bpy +import re +from mathutils import Vector, Matrix +from math import radians, degrees + +# exemple for future improve: https://justinsbarrett.com/tweenmachine/ + +def get_surrounding_points(fc, frame): + '''Take an Fcurve and a frame and return previous and next frames''' + if not frame: frame = bpy.context.scene.frame_current + p_pt = n_pt = None + mins = [] + maxs = [] + for pt in fc.keyframe_points: + if pt.co[0] < frame: + p_pt = pt + if pt.co[0] > frame: + n_pt = pt + break + + return p_pt, n_pt + +## unused direct breackdown func +def breakdown_keys(percentage=50, channels=('location', 'rotation_euler', 'scale'), axe=(0,1,2)): + cf = bpy.context.scene.frame_current# use operator context (may be unsynced timeline) + axes_name = ('x', 'y', 'z') + obj = bpy.context.object# better use self.context + if not obj: + print('no active object') + return + + anim_data = obj.animation_data + if not anim_data: + print(f'no animation data on obj: {obj.name}') + return + + action = anim_data.action + if not action: + print(f'no action on animation data of obj: {obj.name}') + return + + skipping = [] + + for fc in action.fcurves: + # if fc.data_path.split('"')[1] in bone_names_filter:# bones + # if fc.data_path.split('.')[-1] in channels and fc.array_index in axe:# bones + if fc.data_path in channels and fc.array_index in axe:# .split('.')[-1] + fc_name = f'{fc.data_path}.{axes_name[fc.array_index]}' + print(fc_name) + pkf, nkf = get_surrounding_points(fc, frame=cf) + # check previous, next keyframe (if one or both is missing, skip) + if pkf is None or nkf is None: + skipping.append(fc_name) + continue + + prv, nxt = pkf.co[1], nkf.co[1] + if prv == nxt: + nval = prv + else: + nval = ((percentage * (nxt - prv)) / 100) + prv#intermediate val + print('value:', nval) + + fc.keyframe_points.add(1) + fc.keyframe_points[-1].co[0] = cf + fc.keyframe_points[-1].co[1] = nval + fc.keyframe_points[-1].type = pkf.type# make same type ? + fc.keyframe_points[-1].interpolation = pkf.interpolation + fc.update() + # obj.keyframe_insert(fc.data_path, index=fc.array_index, ) + +### breakdown_keys(channels=('location', 'rotation_euler', 'scale')) + +class OBJ_OT_breakdown_obj_anim(bpy.types.Operator): + """Breakdown percentage between two keyframes like bone pose mode""" + bl_idname = "object.breakdown_anim" + bl_label = "breakdown object keyframe" + bl_description = "Percentage value between previous dans next keyframes, " + bl_options = {"REGISTER", "UNDO"} + + pressed_ctrl = False + pressed_shift = False + # pressed_alt = False + str_val = '' + step = 5 + + @classmethod + def poll(cls, context): + return context.mode == 'OBJECT'and context.object + + def percentage(self): + return (self.xmouse - self.xmin) / self.width * 100 + + def assign_transforms(self, percentage): + for obj, path_dic in self.obdic.items(): + for data_path, index_dic in path_dic.items(): + for index, vals in index_dic.items():# prv, nxt = vals + # exec(f'bpy.data.objects["{obj.name}"].{data_path}[{index}] = {((self.percentage() * (vals[1] - vals[0])) / 100) + vals[0]}') + getattr(obj, data_path)[index] = ((percentage * (vals[1] - vals[0])) / 100) + vals[0] + + def modal(self, context, event): + context.area.tag_redraw() + refresh = False + ### /TESTER - keycode printer (flood console but usefull to know a keycode name) + # if event.type not in {'MOUSEMOVE', 'INBETWEEN_MOUSEMOVE'}:#avoid flood of mouse move. + # print('key:', event.type, 'value:', event.value) + ### TESTER/ + + ## Handle modifier keys state + if event.type in {'LEFT_SHIFT', 'RIGHT_SHIFT'}: self.pressed_shift = event.value == 'PRESS' + if event.type in {'LEFT_CTRL', 'RIGHT_CTRL'}: self.pressed_ctrl = event.value == 'PRESS' + # if event.type in {'LEFT_ALT', 'RIGHT_ALT'}: self.pressed_alt = event.value == 'PRESS' + + ### KEYBOARD SINGLE PRESS + if event.value == 'PRESS': + refresh=True + if event.type in {'NUMPAD_MINUS'}:#, 'LEFT_BRACKET', 'WHEELDOWNMOUSE' + if self.str_val.startswith('-'): + self.str_val = self.str_val.strip('-') + else: + self.str_val = '-' + self.str_val#.strip('-') + + ## number + if event.type in {'ZERO', 'NUMPAD_0'}: self.str_val += '0' + if event.type in {'ONE', 'NUMPAD_1'}: self.str_val += '1' + if event.type in {'TWO', 'NUMPAD_2'}: self.str_val += '2' + if event.type in {'THREE', 'NUMPAD_3'}: self.str_val += '3' + if event.type in {'FOUR', 'NUMPAD_4'}: self.str_val += '4' + if event.type in {'FIVE', 'NUMPAD_5'}: self.str_val += '5' + if event.type in {'SIX', 'NUMPAD_6'}: self.str_val += '6' + if event.type in {'SEVEN', 'NUMPAD_7'}: self.str_val += '7' + if event.type in {'EIGHT', 'NUMPAD_8'}: self.str_val += '8' + if event.type in {'NINE', 'NUMPAD_9'}: self.str_val += '9' + + if event.type in {'NUMPAD_PERIOD', 'COMMA'}: + if not '.' in self.str_val: self.str_val += '.' + + # remove end chars + if event.type in {'DEL', 'BACK_SPACE'}: self.str_val = self.str_val[:-1] + + # TODO lock transforms + # if event.type in {'G'}:pass# grab translate only + # if event.type in {'R'}:pass# rotation only + # if event.type in {'S'}:pass# scale only + + ## TODO need to check if self.str_val is valid and if not : display warning and return running modal + + if re.search(r'\d', self.str_val): + use_num = True + percentage = float(self.str_val) + + display_percentage = f'{percentage:.1f}' if '.' in self.str_val else f'{percentage:.0f}' + display_text = f'Breakdown: [{display_percentage}]% | manual type, erase for mouse control' + + else:# use mouse + use_num = False + percentage = self.percentage() + if self.pressed_ctrl:# round + percentage = int(percentage) + if self.pressed_shift:# by step of 5 + modulo = percentage % self.step + if modulo < self.step/2.0: + percentage = int( percentage - modulo ) + else: + percentage = int( percentage + (self.step - modulo) ) + + display_percentage = f'{percentage:.1f}' if isinstance(percentage, float) else str(percentage) + display_text = f'Breakdown: {display_percentage}% | MODES ctrl: round - shift: 5 steps' + + context.area.header_text_set(display_text) + + ## Get mouse move + if event.type in {'MOUSEMOVE'}:# , 'INBETWEEN_MOUSEMOVE' + if not use_num:#avoid compute on mouse move when manual type on + refresh = True + ## percentage of xmouse in screen + self.xmouse = event.mouse_region_x + ## assign stuff + + if refresh: + self.assign_transforms(percentage) + + + # Valid + if event.type in {'RET', 'SPACE', 'LEFTMOUSE'}: + ## 'INSERTKEY_AVAILABLE' ? ? filter + context.area.header_text_set(None) + context.window.cursor_set("DEFAULT") + + if context.scene.tool_settings.use_keyframe_insert_auto:# auto key OK + if context.scene.tool_settings.use_keyframe_insert_keyingset and context.scene.keying_sets_all.active: + bpy.ops.anim.keyframe_insert('INVOKE_DEFAULT')#type='DEFAULT' + else: + bpy.ops.anim.keyframe_insert('INVOKE_DEFAULT', type='Available') + # "DEFAULT" not found in ('Available', 'Location', 'Rotation', 'Scaling', 'BUILTIN_KSI_LocRot', 'LocRotScale', 'BUILTIN_KSI_LocScale', 'BUILTIN_KSI_RotScale', 'BUILTIN_KSI_DeltaLocation', 'BUILTIN_KSI_DeltaRotation', 'BUILTIN_KSI_DeltaScale', 'BUILTIN_KSI_VisualLoc', 'BUILTIN_KSI_VisualRot', 'BUILTIN_KSI_VisualScaling', 'BUILTIN_KSI_VisualLocRot', 'BUILTIN_KSI_VisualLocRotScale', 'BUILTIN_KSI_VisualLocScale', 'BUILTIN_KSI_VisualRotScale') + return {'FINISHED'} + + # Abort + if event.type in {'RIGHTMOUSE', 'ESC'}: + ## Remove draw handler (if there was any) + # bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') + + context.scene.frame_set(self.cf)# reset object pos (update scene to re-evaluate anim) + context.area.header_text_set(None)#reset header + context.window.cursor_set("DEFAULT") + # print('Breakdown Cancelled')#Dbg + return {'CANCELLED'} + return {'RUNNING_MODAL'} + + def invoke(self, context, event): + ## cursors + ## 'DEFAULT', 'NONE', 'WAIT', 'CROSSHAIR', 'MOVE_X', 'MOVE_Y', 'KNIFE', 'TEXT', 'PAINT_BRUSH', 'PAINT_CROSS', 'DOT', 'ERASER', 'HAND', 'SCROLL_X', 'SCROLL_Y', 'SCROLL_XY', 'EYEDROPPER' + ## start checks + message = None + if context.area.type != 'VIEW_3D':message = 'View3D not found, cannot run operator' + obj = bpy.context.object# better use self.context + if not obj:message = 'no active object' + anim_data = obj.animation_data + if not anim_data:message = f'no animation data on obj: {obj.name}' + action = anim_data.action + if not action:message = f'no action on animation data of obj: {obj.name}' + if message: + self.report({'WARNING'}, message)# ERROR + return {'CANCELLED'} + + ## initiate variable to use + self.width = context.area.width# include sidebar... + ## with exclude sidebar >>> C.screen.areas[3].regions[5].width + + self.xmin = context.area.x + + self.xmouse = event.mouse_region_x + self.pressed_alt = event.alt + self.pressed_ctrl = event.ctrl + self.pressed_shift = event.shift + + self.cf = context.scene.frame_current + self.channels = ('location', 'rotation_euler', 'rotation_quaternion', 'scale') + + skipping = [] + found = 0 + same = 0 + + self.obdic = {} + + ## TODO for ob in context.selected objects, need to reduce list with upper filters... + + for fc in action.fcurves: + # if fc.data_path.split('"')[1] in bone_names_filter:# bones + # if fc.data_path.split('.')[-1] in channels and fc.array_index in axe:# bones + if fc.data_path in self.channels:# .split('.')[-1]# and fc.array_index in axe + fc_name = f'{fc.data_path}.{fc.array_index}' + pkf, nkf = get_surrounding_points(fc, frame = self.cf) + + if pkf is None or nkf is None: + # check previous, next keyframe (if one or both is missing, skip) + skipping.append(fc_name) + continue + + found +=1 + prv, nxt = pkf.co[1], nkf.co[1] + + if not obj in self.obdic: + self.obdic[obj] = {} + + if not fc.data_path in self.obdic[obj]: + self.obdic[obj][fc.data_path] = {} + + self.obdic[obj][fc.data_path][fc.array_index] = [prv, nxt] + + if prv == nxt: + same += 1 + else: + # exec(f'bpy.data.objects["{obj.name}"].{fc.data_path}[{fc.array_index}] = {((self.percentage() * (nxt - prv)) / 100) + prv}') + getattr(obj, fc.data_path)[fc.array_index] = ((self.percentage() * (nxt - prv)) / 100) + prv + + '''# debug print value dic + import pprint + print('\nDIC print: ') + pprint.pprint(self.obdic) + ''' + + if not found: + self.report({'ERROR'}, "No key pairs to breakdown found ! need to be between a key pair")# + return {'CANCELLED'} + + if found == same: + self.report({'ERROR'}, "All Key pairs found have same values")# + return {'CANCELLED'} + ## Starts the modal + context.window.cursor_set("SCROLL_X") + context.window_manager.modal_handler_add(self) + return {'RUNNING_MODAL'} + + +### --- KEYMAP --- + +breakdowner_addon_keymaps = [] +def register_keymaps(): + # pref = get_addon_prefs() + # if not pref.breakdowner_use_shortcut: + # return + + addon = bpy.context.window_manager.keyconfigs.addon + + try: + km = bpy.context.window_manager.keyconfigs.addon.keymaps["3D View"] + except Exception as e: + km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D") + pass + + ops_id = 'object.breakdown_anim' + if ops_id not in km.keymap_items: + km = addon.keymaps.new(name='3D View', space_type='VIEW_3D')#EMPTY + kmi = km.keymap_items.new(ops_id, type="E", value="PRESS", shift=True) + breakdowner_addon_keymaps.append((km, kmi)) + +def unregister_keymaps(): + for km, kmi in breakdowner_addon_keymaps: + km.keymap_items.remove(kmi) + + breakdowner_addon_keymaps.clear() + # del breakdowner_addon_keymaps[:] + +### --- REGISTER --- + +def register(): + if not bpy.app.background: + bpy.utils.register_class(OBJ_OT_breakdown_obj_anim) + register_keymaps() + +def unregister(): + if not bpy.app.background: + unregister_keymaps() + bpy.utils.unregister_class(OBJ_OT_breakdown_obj_anim) + + +if __name__ == "__main__": + register() \ No newline at end of file diff --git a/OP_canvas_rotate.py b/OP_canvas_rotate.py new file mode 100644 index 0000000..bcad0a8 --- /dev/null +++ b/OP_canvas_rotate.py @@ -0,0 +1,284 @@ +from .utils import get_addon_prefs + +## known issue: auto-perspective mess up when triggered out after rotation + +import bpy +import math +import mathutils +from bpy_extras.view3d_utils import location_3d_to_region_2d + +## draw utils +import gpu +import bgl +import blf +from gpu_extras.batch import batch_for_shader +from gpu_extras.presets import draw_circle_2d + +""" +Notes: + Samuel.B: + OpenGL drawing can be disabled by passing self.hud to False in invoke (mainly used for debugging) + + Base script by Jum, simplified and modified to work in both view and camera with rotate axis method suggested by Christophe Seux + + Jum: + Script base. Thanks to bigLarry and Jum + https://blender.stackexchange.com/questions/136183/rotating-camera-view-in-grease-pencil-draw-mode-in-blender-2-8 +""" + + +def draw_callback_px(self, context): + # 50% alpha, 2 pixel width line + shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') + bgl.glEnable(bgl.GL_BLEND) + bgl.glLineWidth(2) + + # init + batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": [self.center, self.initial_pos]})#self.vector_initial + shader.bind() + shader.uniform_float("color", (0.5, 0.5, 0.8, 0.6)) + batch.draw(shader) + + batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": [self.center, self.pos_current]}) + shader.bind() + shader.uniform_float("color", (0.3, 0.7, 0.2, 0.5)) + batch.draw(shader) + + ## vector init vector current (substracted by center) + # batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": [self.vector_initial, self.vector_current]}) + # shader.bind() + # shader.uniform_float("color", (0.5, 0.5, 0.5, 0.5)) + # batch.draw(shader) + + # restore opengl defaults + bgl.glLineWidth(1) + bgl.glDisable(bgl.GL_BLEND) + + ## text + + font_id = 0 + ## draw text debug infos + blf.position(font_id, 15, 30, 0) + blf.size(font_id, 20, 72) + blf.draw(font_id, f'angle: {math.degrees(self.angle):.1f}') + + +class RC_OT_RotateCanvas(bpy.types.Operator): + bl_idname = 'view3d.rotate_canvas' + bl_label = 'Rotate Canvas' + bl_options = {"REGISTER", "UNDO"} + + # @classmethod + # def poll(cls, context): + # return context.region_data.view_perspective == 'CAMERA' + """ + def get_center_view(self, area, cam): + ''' + https://blender.stackexchange.com/questions/6377/coordinates-of-corners-of-camera-view-border + Thanks to ideasman42 + ''' + region_3d = area.spaces[0].region_3d + for region in area.regions: + if region.type == 'WINDOW': + frame = cam.data.view_frame() + # if cam.parent: + # mat = cam.matrix_parent_inverse @ cam.matrix_world + # # mat = cam.parent.matrix_world @ cam.matrix_world# not inverse from parent + # else: + # mat = cam.matrix_world + + mat = cam.matrix_world + frame = [mat @ v for v in frame] + ## bpy.context.scene.cursor.location = frame[1]#DEBUG + frame_px = [location_3d_to_region_2d(region, region_3d, v) for v in frame] + + center_x = frame_px[2].x + (frame_px[0].x - frame_px[2].x)/2 + center_y = frame_px[1].y + (frame_px[0].y - frame_px[1].y)/2 + return mathutils.Vector((center_x, center_y)) + return None """ + + def get_center_view(self, context, cam): + ''' + https://blender.stackexchange.com/questions/6377/coordinates-of-corners-of-camera-view-border + Thanks to ideasman42 + ''' + + frame = cam.data.view_frame() + mat = cam.matrix_world + frame = [mat @ v for v in frame] + frame_px = [location_3d_to_region_2d(context.region, context.space_data.region_3d, v) for v in frame] + center_x = frame_px[2].x + (frame_px[0].x - frame_px[2].x)/2 + center_y = frame_px[1].y + (frame_px[0].y - frame_px[1].y)/2 + + return mathutils.Vector((center_x, center_y)) + + def execute(self, context): + if self.hud: + bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') + context.area.tag_redraw() + if self.in_cam: + self.cam.rotation_mode = self.org_rotation_mode + return {'FINISHED'} + + def modal(self, context, event): + if event.type in {'MOUSEMOVE','INBETWEEN_MOUSEMOVE'}: + # Get current mouse coordination (region) + self.pos_current = mathutils.Vector((event.mouse_region_x, event.mouse_region_y)) + # Get current vector + self.vector_current = (self.pos_current - self.center).normalized() + # Calculates the angle between initial and current vectors + self.angle = self.vector_initial.angle_signed(self.vector_current)#radian + # print (math.degrees(self.angle), self.vector_initial, self.vector_current) + + if self.in_cam: + self.cam.matrix_world = self.cam_matrix + # self.cam.rotation_euler = self.cam_init_euler + self.cam.rotation_euler.rotate_axis("Z", self.angle) + + else:#free view + context.space_data.region_3d.view_matrix = self.view_matrix + rot = context.space_data.region_3d.view_rotation + rot = rot.to_euler() + rot.rotate_axis("Z", self.angle) + context.space_data.region_3d.view_rotation = rot.to_quaternion() + + if event.type in {'RIGHTMOUSE', 'LEFTMOUSE', 'MIDDLEMOUSE'} and event.value == 'RELEASE': + self.execute(context) + return {'FINISHED'} + + if event.type == 'ESC':#Cancel + self.execute(context) + if self.in_cam: + self.cam.matrix_world = self.cam_matrix + else: + context.space_data.region_3d.view_matrix = self.view_matrix + return {'CANCELLED'} + + + return {'RUNNING_MODAL'} + + def invoke(self, context, event): + self.hud = False + self.angle = 0.0# for draw degub, else not needed + self.in_cam = context.region_data.view_perspective == 'CAMERA' + + if self.in_cam: + # Get camera from scene + self.cam = bpy.context.scene.camera + + ## avoid manipulating real cam or locked cams + if not 'manip_cams' in [c.name for c in self.cam.users_collection]: + self.report({'WARNING'}, 'Not in manipulation cam (draw/obj cam)') + return {'CANCELLED'} + + if self.cam.lock_rotation[:] != (False, False, False): + self.report({'WARNING'}, 'Camera rotation is locked') + return {'CANCELLED'} + + self.center = self.get_center_view(context, self.cam) + # store original rotation mode + self.org_rotation_mode = self.cam.rotation_mode + # set to euler to works with quaternions, restored at finish + self.cam.rotation_mode = 'XYZ' + # store camera matrix world + self.cam_matrix = self.cam.matrix_world.copy() + # self.cam_init_euler = self.cam.rotation_euler.copy() + + else: + self.center = mathutils.Vector((context.area.width/2, context.area.height/2)) + # store current view matrix + self.view_matrix = context.space_data.region_3d.view_matrix.copy() + + # Get current mouse coordination + self.pos_current = mathutils.Vector((event.mouse_region_x, event.mouse_region_y)) + + self.initial_pos = self.pos_current# for draw debug, else no need + # Calculate inital vector + self.vector_initial = self.pos_current - self.center + self.vector_initial.normalize() + + # Initializes the current vector with the same initial vector. + self.vector_current = self.vector_initial.copy() + + args = (self, context) + if self.hud: + self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL') + context.window_manager.modal_handler_add(self) + return {'RUNNING_MODAL'} + + +class PREFS_OT_rebind(bpy.types.Operator): + """Rebind shortcuts canvas rotate shortcuts""" + bl_idname = "prefs.rebind_shortcut" + bl_label = "Rebind canvas rotate shortcut" + bl_options = {'REGISTER', 'INTERNAL'} + + def execute(self, context): + unregister_keymaps() + register_keymaps() + return{'FINISHED'} + +addon_keymaps = [] +def register_keymaps(): + pref = get_addon_prefs() + if not pref.canvas_use_shortcut: + return + addon = bpy.context.window_manager.keyconfigs.addon + + + """ ## NATIVE FREENAV BIND (left to right) + km = bpy.context.window_manager.keyconfigs.addon.keymaps.get("3D View") + if not km: + km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D") + # print('BINDING CANVAS ROTATE KEYMAPS')#Dbg + if 'view3d.view_roll' not in km.keymap_items: + # print('creating view3d.view_roll')#Dbg + # kmi = km.keymap_items.new("view3d.view_roll", type = 'MIDDLEMOUSE', value = "PRESS", ctrl=True, shift=False, alt=True)#PRESS#CLICK_DRAG + kmi = km.keymap_items.new("view3d.view_roll", type=pref.mouse_click, value = "PRESS", alt=pref.use_alt, ctrl=pref.use_ctrl, shift=pref.use_shift, any=False)#PRESS#CLICK_DRAG + kmi.properties.type = 'ANGLE' + addon_keymaps.append(km) + """ + km = bpy.context.window_manager.keyconfigs.addon.keymaps.get("3D View") + if not km: + km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D") + + if 'view3d.rotate_canvas' not in km.keymap_items: + # print('creating view3d.rotate_canvas')#Dbg + ## keymap to operator cam space (in grease pencil mode only ?) + km = addon.keymaps.new(name='3D View', space_type='VIEW_3D')#EMPTY #Grease Pencil + # kmi = km.keymap_items.new('view3d.rotate_canvas', 'MIDDLEMOUSE', 'PRESS', ctrl=True, shift=False, alt=True) + kmi = km.keymap_items.new('view3d.rotate_canvas', type=pref.mouse_click, value="PRESS", alt=pref.use_alt, ctrl=pref.use_ctrl, shift=pref.use_shift, any=False) + addon_keymaps.append((km, kmi)) + # print(addon_keymaps) + +def unregister_keymaps(): + # print('UNBIND CANVAS ROTATE KEYMAPS')#Dbg + for km, kmi in addon_keymaps: + km.keymap_items.remove(kmi) + addon_keymaps.clear() + # del addon_keymaps[:] + +canvas_classes = ( +PREFS_OT_rebind, +RC_OT_RotateCanvas, +# RC_OT_RotateCanvasFreeNav +) + +def register(): + if not bpy.app.background: + for cls in canvas_classes: + bpy.utils.register_class(cls) + register_keymaps() + # wm = bpy.context.window_manager + # km = wm.keyconfigs.addon.keymaps.new(name='Grease Pencil', space_type='EMPTY') + # kmi = km.keymap_items.new('view3d.rotate_canvas', 'MIDDLEMOUSE', 'PRESS', ctrl=True, shift=False, alt=True) + # addon_keymaps.append(km) + +def unregister(): + if not bpy.app.background: + unregister_keymaps() + for cls in reversed(canvas_classes): + bpy.utils.unregister_class(cls) + +# if __name__ == "__main__": +# register() \ No newline at end of file diff --git a/OP_copy_paste.py b/OP_copy_paste.py new file mode 100644 index 0000000..579f9aa --- /dev/null +++ b/OP_copy_paste.py @@ -0,0 +1,709 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +## basec on GPclipboard 1.3.1 (without addon prefs) + +bl_info = { + "name": "GP clipboard", + "description": "Copy/Cut/Paste Grease Pencil strokes to/from OS clipboard across layers and blends", + "author": "Samuel Bernou", + "version": (1, 3, 1), + "blender": (2, 83, 0), + "location": "View3D > Toolbar > Gpencil > GP clipboard", + "warning": "", + "doc_url": "https://github.com/Pullusb/GP_clipboard", + "category": "Object" } + + +import bpy +import os +import mathutils +from mathutils import Vector +import json +from time import time +from operator import itemgetter +from itertools import groupby +# from pprint import pprint + +def convertAttr(Attr): + '''Convert given value to a Json serializable format''' + if isinstance(Attr, (mathutils.Vector,mathutils.Color)): + return Attr[:] + elif isinstance(Attr, mathutils.Matrix): + return [v[:] for v in Attr] + elif isinstance(Attr,bpy.types.bpy_prop_array): + return [Attr[i] for i in range(0,len(Attr))] + else: + return(Attr) + +def getMatrix (layer) : + matrix = mathutils.Matrix.Identity(4) + + if layer.is_parented: + if layer.parent_type == 'BONE': + object = layer.parent + bone = object.pose.bones[layer.parent_bone] + matrix = bone.matrix @ object.matrix_world + matrix = matrix.copy() @ layer.matrix_inverse + else : + matrix = layer.parent.matrix_world @ layer.matrix_inverse + + return matrix.copy() + +def dump_gp_point(p, l, obj): + '''add properties of a given points to a dic and return it''' + pdic = {} + #point_attr_list = ('co', 'pressure', 'select', 'strength') #select#'rna_type' + #for att in point_attr_list: + # pdic[att] = convertAttr(getattr(p, att)) + if l.parent: + mat = getMatrix(l) + pdic['co'] = convertAttr(obj.matrix_world @ mat @ getattr(p,'co')) + else: + pdic['co'] = convertAttr(obj.matrix_world @ getattr(p,'co')) + pdic['pressure'] = convertAttr(getattr(p,'pressure')) + # pdic['select'] = convertAttr(getattr(p,'select'))# need selection ? + pdic['strength'] = convertAttr(getattr(p,'strength')) + + ## get vertex color (long...) + if p.vertex_color[:] != (0.0, 0.0, 0.0, 0.0): + pdic['vertex_color'] = convertAttr(getattr(p,'vertex_color')) + + return pdic + + +def dump_gp_stroke_range(s, sid, l, obj): + '''Get a grease pencil stroke and return a dic with attribute + (points attribute being a dic of dics to store points and their attributes) + ''' + + sdic = {} + stroke_attr_list = ('line_width',) #'select'#read-only: 'triangles' + for att in stroke_attr_list: + sdic[att] = getattr(s, att) + + ## Dump following these value only if they are non default + if s.material_index != 0: + sdic['material_index'] = s.material_index + + if s.draw_cyclic: + sdic['draw_cyclic'] = s.draw_cyclic + + if s.uv_scale != 1.0: + sdic['uv_scale'] = s.uv_scale + + if s.uv_rotation != 0.0: + sdic['uv_rotation'] = s.uv_rotation + + if s.hardness != 1.0: + sdic['hardness'] = s.hardness + + if s.uv_translation != Vector((0.0, 0.0)): + sdic['uv_translation'] = convertAttr(s.uv_translation) + + if s.vertex_color_fill[:] != (0,0,0,0): + sdic['vertex_color_fill'] = convertAttr(s.vertex_color_fill) + + points = [] + if sid is None:#no ids, just full points... + for p in s.points: + points.append(dump_gp_point(p,l,obj)) + else: + for pid in sid: + points.append(dump_gp_point(s.points[pid],l,obj)) + sdic['points'] = points + return sdic + + + +def copycut_strokes(layers=None, copy=True, keep_empty=True):# (mayber allow filter) + ''' + copy all visibles selected strokes on active frame + layers can be None, a single layer object or list of layer object as filter + if keep_empty is False the frame is deleted when all strokes are cutted + ''' + t0 = time() + + ### must iterate in all layers ! (since all layers are selectable / visible !) + obj = bpy.context.object + gp = obj.data + gpl = gp.layers + # if not color:#get active color name + # color = gp.palettes.active.colors.active.name + if not layers: + #by default all visible layers + layers = [l for l in gpl if not l.hide and not l.lock]#[] + if not isinstance(layers, list): + #if a single layer object is send put in a list + layers = [layers] + + stroke_list = []#one stroke list for all layers. + + for l in layers: + f = l.active_frame + + if f:#active frame can be None + if not copy: + staylist = []#init part of strokes that must survive on this layer + + for s in f.strokes: + if s.select: + # separate in multiple stroke if parts of the strokes a selected. + sel = [i for i, p in enumerate(s.points) if p.select] + substrokes = []# list of list containing isolated selection + for k, g in groupby(enumerate(sel), lambda x:x[0]-x[1]):# continuity stroke have same substract result between point index and enumerator + group = list(map(itemgetter(1), g)) + substrokes.append(group) + + for ss in substrokes: + if len(ss) > 1:#avoid copy isolated points + stroke_list.append(dump_gp_stroke_range(s,ss,l,obj)) + + #Cutting operation + if not copy: + maxindex = len(s.points)-1 + if len(substrokes) == maxindex+1:#si un seul substroke, c'est le stroke entier + f.strokes.remove(s) + else: + neg = [i for i, p in enumerate(s.points) if not p.select] + + staying = [] + for k, g in groupby(enumerate(neg), lambda x:x[0]-x[1]): + group = list(map(itemgetter(1), g)) + #extend group to avoid gap when cut, a bit dirty + if group[0] > 0: + group.insert(0,group[0]-1) + if group[-1] < maxindex: + group.append(group[-1]+1) + staying.append(group) + + for ns in staying: + if len(ns) > 1: + staylist.append(dump_gp_stroke_range(s,ns,l,obj)) + #make a negative list containing all last index + + + '''#full stroke version + # if s.colorname == color: #line for future filters + stroke_list.append(dump_gp_stroke(s,l)) + #delete stroke on the fly + if not copy: + f.strokes.remove(s) + ''' + + if not copy: + # delete all selected strokes... + for s in f.strokes: + if s.select: + f.strokes.remove(s) + # ...recreate these uncutted ones + #pprint(staylist) + if staylist: + add_multiple_strokes(staylist, l) + #for ns in staylist:#weirdly recreate the stroke twice ! + # add_stroke(ns, f, l) + + #if nothing left on the frame choose to leave an empty frame or delete it (let previous frame appear) + if not copy and not keep_empty:# + if not len(f.strokes): + l.frames.remove(f) + + + + print(len(stroke_list), 'strokes copied in', time()-t0, 'seconds') + #print(stroke_list) + return stroke_list + + +"""# Unused +def copy_all_strokes(layers=None): + ''' + copy all stroke, not affected by selection on active frame + layers can be None, a single layer object or list of layer object as filter + if keep_empty is False the frame is deleted when all strokes are cutted + ''' + t0 = time() + + scene = bpy.context.scene + obj = bpy.context.object + gp = obj.data + gpl = gp.layers + + if not layers: + # by default all visible layers + layers = [l for l in gpl if not l.hide and not l.lock]# include locked ? + if not isinstance(layers, list): + # if a single layer object is send put in a list + layers = [layers] + + stroke_list = []# one stroke list for all layers. + + for l in layers: + f = l.active_frame + + if not f: + continue# active frame can be None + + for s in f.strokes: + ## full stroke version + # if s.select: + stroke_list.append(dump_gp_stroke_range(s, None, l, obj)) + + print(len(stroke_list), 'strokes copied in', time()-t0, 'seconds') + #print(stroke_list) + return stroke_list +""" + +def copy_all_strokes_in_frame(frame=None, layers=None, obj=None): + ''' + copy all stroke, not affected by selection on active frame + layers can be None, a single layer object or list of layer object as filter + if keep_empty is False the frame is deleted when all strokes are cutted + ''' + t0 = time() + scene = bpy.context.scene + obj = bpy.context.object + gp = obj.data + gpl = gp.layers + + if not frame or not obj: + return + + if not layers: + # by default all visible layers + layers = [l for l in gpl if not l.hide and not l.lock]# include locked ? + if not isinstance(layers, list): + # if a single layer object is send put in a list + layers = [layers] + + stroke_list = [] + + for l in layers: + f = l.active_frame + + if not f: + continue# active frame can be None + + for s in f.strokes: + ## full stroke version + # if s.select: + # send index of all points to get the whole stroke with "range" + stroke_list.append( dump_gp_stroke_range(s, [i for i in range(len(s.points))], l, obj) ) + + print(len(stroke_list), 'strokes copied in', time()-t0, 'seconds') + #print(stroke_list) + return stroke_list + +def add_stroke(s, frame, layer, obj): + '''add stroke on a given frame, (layer is for parentage setting)''' + # print(3*'-',s) + ns = frame.strokes.new() + + for att, val in s.items(): + if att not in ('points'): + setattr(ns, att, val) + pts_to_add = len(s['points']) + # print(pts_to_add, 'points')#dbg + + ns.points.add(pts_to_add) + + ob_mat_inv = obj.matrix_world.inverted() + + ## patch pressure 1 + # pressure_flat_list = [di['pressure'] for di in s['points']] #get all pressure flatened + + if layer.is_parented: + mat = getMatrix(layer).inverted() + for i, pt in enumerate(s['points']): + for k, v in pt.items(): + if k == 'co': + setattr(ns.points[i], k, v) + ns.points[i].co = ob_mat_inv @ mat @ ns.points[i].co# invert of object * invert of layer * coordinate + else: + setattr(ns.points[i], k, v) + else: + for i, pt in enumerate(s['points']): + for k, v in pt.items(): + if k == 'co': + setattr(ns.points[i], k, v) + ns.points[i].co = ob_mat_inv @ ns.points[i].co# invert of object * coordinate + else: + setattr(ns.points[i], k, v) + + + ## patch pressure 2 + # ns.points.foreach_set('pressure', pressure_flat_list) + +def add_multiple_strokes(stroke_list, layer=None, use_current_frame=True): + ''' + add a list of strokes to active frame of given layer + if no layer specified, active layer is used + if use_current_frame is True, a new frame will be created only if needed + ''' + scene = bpy.context.scene + obj = bpy.context.object + gp = obj.data + gpl = gp.layers + + #default: active + if not layer: + layer = gpl.active + + fnum = scene.frame_current + target_frame = False + act = layer.active_frame + for s in stroke_list: + if act: + if use_current_frame or act.frame_number == fnum: + #work on current frame if exists + # use current frame anyway if one key exist at this scene.frame + target_frame = act + + if not target_frame: + #no active frame + #or active exists but not aligned scene.current with use_current_frame disabled + target_frame = layer.frames.new(fnum) + + add_stroke(s, target_frame, layer, obj) + ''' + for s in stroke_data: + add_stroke(s, target_frame) + ''' + print(len(stroke_list), 'strokes pasted') + + +### OPERATORS + +class GPCLIP_OT_copy_strokes(bpy.types.Operator): + bl_idname = "gp.copy_strokes" + bl_label = "GP Copy strokes" + bl_description = "Copy strokes to str in paperclip" + bl_options = {"REGISTER"} + + #copy = bpy.props.BoolProperty(default=True) + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + # if not context.object or not context.object.type == 'GPENCIL': + # self.report({'ERROR'},'No GP object selected') + # return {"CANCELLED"} + + t0 = time() + #ct = check_pressure() + strokelist = copycut_strokes(copy=True, keep_empty=True) + if not strokelist: + self.report({'ERROR'},'rien a copier') + return {"CANCELLED"} + bpy.context.window_manager.clipboard = json.dumps(strokelist)#copy=self.copy + #if ct: + # self.report({'ERROR'}, "Copie OK\n{} points ont une épaisseur supérieure a 1.0 (max = {:.2f})\nCes épaisseurs seront plafonnées à 1 au 'coller'".format(ct[0], ct[1])) + self.report({'INFO'}, f'Copied (time : {time() - t0:.4f})') + # print('copy total time:', time() - t0) + return {"FINISHED"} + + +class GPCLIP_OT_cut_strokes(bpy.types.Operator): + bl_idname = "gp.cut_strokes" + bl_label = "GP Cut strokes" + bl_description = "Cut strokes to str in paperclip" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + # if not context.object or not context.object.type == 'GPENCIL': + # self.report({'ERROR'},'No GP object selected') + # return {"CANCELLED"} + + t0 = time() + strokelist = copycut_strokes(copy=False, keep_empty=True)#ct = check_pressure() + if not strokelist: + self.report({'ERROR'},'Nothing to cut') + return {"CANCELLED"} + bpy.context.window_manager.clipboard = json.dumps(strokelist) + + self.report({'INFO'}, f'Cutted (time : {time() - t0:.4f})') + return {"FINISHED"} + +class GPCLIP_OT_paste_strokes(bpy.types.Operator): + bl_idname = "gp.paste_strokes" + bl_label = "GP Paste strokes" + bl_description = "paste stroke from paperclip" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + # if not context.object or not context.object.type == 'GPENCIL': + # self.report({'ERROR'},'No GP object selected to paste on') + # return {"CANCELLED"} + + t0 = time() + #add a validity check por the content of the paperclip (check if not data.startswith('[{') ? ) + try: + data = json.loads(bpy.context.window_manager.clipboard) + except: + mess = 'Clipboard does not contain drawing data (load error)' + self.report({'ERROR'}, mess) + return {"CANCELLED"} + + print('data loaded', time() - t0) + add_multiple_strokes(data, use_current_frame=True) + print('total_time', time() - t0) + + return {"FINISHED"} + +### --- multi copy + +class GPCLIP_OT_copy_multi_strokes(bpy.types.Operator): + bl_idname = "gp.copy_multi_strokes" + bl_label = "GP Copy multi strokes" + bl_description = "Copy multiple layers>frames>strokes (unlocked and unhided ones) to str in paperclip" + bl_options = {"REGISTER"} + + #copy = bpy.props.BoolProperty(default=True) + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + bake_moves = True + skip_empty_frame = False + + org_frame = context.scene.frame_current + obj = context.object + gpl = obj.data.layers + t0 = time() + #ct = check_pressure() + layerdic = {} + + layerpool = [l for l in gpl if not l.hide and l.select]# and not l.lock + if not layerpool: + self.report({'ERROR'}, 'No layers selected in GP dopesheet (needs to be visible and selected to be copied)\nHint: Changing active layer reset selection to active only') + return {"CANCELLED"} + + if not bake_moves:# copy only drawed frames as is. + for l in layerpool: + if not l.frames: + continue# skip empty layers + + frame_dic = {} + for f in l.frames: + if skip_empty_frame and not len(f.strokes): + continue + context.scene.frame_set(f.frame_number)#use matrix of this frame + strokelist = copy_all_strokes_in_frame(frame=f, layers=l, obj=obj) + + frame_dic[f.frame_number] = strokelist + + layerdic[l.info] = frame_dic + + else:# bake position: copy frame where object as moved even if frame is unchanged + for l in layerpool: + if not l.frames: + continue# skip empty layers + + frame_dic = {} + + fnums_dic = {f.frame_number: f for f in l.frames} + + context.scene.frame_set(context.scene.frame_start) + curmat = prevmat = obj.matrix_world.copy() + + for i in range(context.scene.frame_start, context.scene.frame_end): + context.scene.frame_set(i)#use matrix of this frame + curmat = obj.matrix_world.copy() + + # if object has moved or current time is on a draw key + if prevmat != curmat or i in fnums_dic.keys(): + # get the current used frame + for j in fnums_dic.keys(): + if j >= i: + f = fnums_dic[j] + break + + ## skip empty frame if specified + if skip_empty_frame and not len(f.strokes): + continue + + strokelist = copy_all_strokes_in_frame(frame=f, layers=l, obj=obj) + frame_dic[i] = strokelist + + prevmat = curmat + layerdic[l.info] = frame_dic + + ## All to clipboard manager + bpy.context.window_manager.clipboard = json.dumps(layerdic) + + # reset original frame. + context.scene.frame_set(org_frame) + self.report({'INFO'}, f'Copied layers (time : {time() - t0:.4f})') + # print('copy total time:', time() - t0) + return {"FINISHED"} + +class GPCLIP_OT_paste_multi_strokes(bpy.types.Operator): + bl_idname = "gp.paste_multi_strokes" + bl_label = "GP paste multi strokes" + bl_description = "Paste multiple layers>frames>strokes from paperclip" + bl_options = {"REGISTER"} + + #copy = bpy.props.BoolProperty(default=True) + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + org_frame = context.scene.frame_current + obj = context.object + gpl = obj.data.layers + t0 = time() + #add a validity check por the content of the paperclip (check if not data.startswith('[{') ? ) + try: + data = json.loads(bpy.context.window_manager.clipboard) + except: + mess = 'Clipboard does not contain drawing data (load error)' + self.report({'ERROR'}, mess) + return {"CANCELLED"} + + print('data loaded', time() - t0) + # add layers (or merge with existing names ?) + + ### structure + # {layername : + # {1: [strokelist of frame 1], 3: [strokelist of frame 3]} + # } + + for layname, allframes in data.items(): + layer = gpl.get(layname) + if not layer: + layer = gpl.new(layname) + for fnum, fstrokes in allframes.items(): + context.scene.frame_set(int(fnum))#use matrix of this frame for copying (maybe just evaluate depsgraph for object + add_multiple_strokes(fstrokes, use_current_frame=False)#create a new frame at each encoutered + + print('total_time', time() - t0) + + # reset original frame. + context.scene.frame_set(org_frame) + self.report({'INFO'}, f'Copied layers (time : {time() - t0:.4f})') + # print('copy total time:', time() - t0) + return {"FINISHED"} + +##--PANEL + +class GPCLIP_PT_clipboard_ui(bpy.types.Panel): + # bl_idname = "gp_clipboard_panel" + bl_label = "GP Clipboard" + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil" + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, context): + layout = self.layout + row = layout.row(align=True) + row.operator('gp.copy_strokes', text='Copy strokes', icon='COPYDOWN') + row.operator('gp.cut_strokes', text='Cut strokes', icon='PASTEFLIPUP') + layout.operator('gp.paste_strokes', text='Paste strokes', icon='PASTEDOWN') + layout.separator() + layout.operator('gp.copy_multi_strokes', text='Copy layers', icon='COPYDOWN') + layout.operator('gp.paste_multi_strokes', text='Paste layers', icon='PASTEDOWN') + +###---TEST zone + +""" +##main defs +def copy_strokes_to_paperclip(): + bpy.context.window_manager.clipboard = json.dumps(copycut_strokes(copy=True, keep_empty=True))#default layers are visible one + +def cut_strokes_to_paperclip(): + bpy.context.window_manager.clipboard = json.dumps(copycut_strokes(copy=False, keep_empty=True)) + +def paste_strokes_from_paperclip(): + #add condition to detect if clipboard contains loadable values + add_multiple_strokes(json.loads(bpy.context.window_manager.clipboard), use_current_frame=True)#layer= layers.active + +#copy_strokes_to_paperclip() +#paste_strokes_from_paperclip() + +#test direct +#li = copycut_strokes(copy=True) +#add_multiple_strokes(li, bpy.context.scene.grease_pencil.layers['correct']) +""" + + +#use directly operator idname in shortcut settings : +# gp.copy_strokes +# gp.cut_strokes +# gp.paste_strokes +# gp.copy_multi_strokes +# gp.paste_multi_strokes + +###---REGISTER + copy cut paste keymapping + +addon_keymaps = [] +def register_keymaps(): + addon = bpy.context.window_manager.keyconfigs.addon + km = addon.keymaps.new(name = "Grease Pencil", space_type = "EMPTY", region_type='WINDOW')# in Grease context + # km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D")# in 3D context + # km = addon.keymaps.new(name = "Window", space_type = "EMPTY")# from everywhere + + kmi = km.keymap_items.new("gp.copy_strokes", type = "C", value = "PRESS", ctrl=True, shift=True) + kmi.repeat = False + addon_keymaps.append((km, kmi)) + + kmi = km.keymap_items.new("gp.cut_strokes", type = "X", value = "PRESS", ctrl=True, shift=True) + kmi.repeat = False + addon_keymaps.append((km, kmi)) + + kmi = km.keymap_items.new("gp.paste_strokes", type = "V", value = "PRESS", ctrl=True, shift=True) + kmi.repeat = False + addon_keymaps.append((km, kmi)) + +def unregister_keymaps(): + # wm = bpy.context.window_manager + for km, kmi in addon_keymaps: + km.keymap_items.remove(kmi) + # wm.keyconfigs.addon.keymaps.remove(km) + addon_keymaps.clear() + + +classes = ( +GPCLIP_OT_copy_strokes, +GPCLIP_OT_cut_strokes, +GPCLIP_OT_paste_strokes, +GPCLIP_OT_copy_multi_strokes, +GPCLIP_OT_paste_multi_strokes, +GPCLIP_PT_clipboard_ui, +) + +def register(): + for cl in classes: + bpy.utils.register_class(cl) + + ## make scene property for empty key preservation and bake movement for layers... + register_keymaps() + +def unregister(): + unregister_keymaps() + for cl in reversed(classes): + bpy.utils.unregister_class(cl) + +if __name__ == "__main__": + register() diff --git a/OP_cursor_snap_canvas.py b/OP_cursor_snap_canvas.py new file mode 100644 index 0000000..bfa910a --- /dev/null +++ b/OP_cursor_snap_canvas.py @@ -0,0 +1,182 @@ +## snap 3D cursor on active grease pencil object canvas surfaces +import bpy +import mathutils +from bpy_extras import view3d_utils +from .utils import get_gp_draw_plane, region_to_location, get_view_origin_position + +## override all sursor snap shortcut with this in keymap +class GPTB_OT_cusor_snap(bpy.types.Operator): + bl_idname = "view3d.cusor_snap" + bl_label = "Snap cursor to GP" + bl_description = "Snap 3d cursor to active GP object canvas (else use normal place)" + bl_options = {"REGISTER"}#, "INTERNAL" + + # @classmethod + # def poll(cls, context): + # return context.object and context.object.type == 'GPENCIL' + + def invoke(self, context, event): + #print('-!SNAP!-') + self.mouse_co = mathutils.Vector((event.mouse_region_x, event.mouse_region_y)) + # print('self.mouse_co: ', self.mouse_co) + self.execute(context) + return {"FINISHED"} + + def execute(self, context): + if not context.object or context.object.type != 'GPENCIL': + self.report({'INFO'}, 'Not GP, Cursor surface project') + bpy.ops.view3d.cursor3d('INVOKE_DEFAULT', use_depth=True, orientation='NONE')#'NONE', 'VIEW', 'XFORM', 'GEOM' + return {"FINISHED"} + + if context.region_data.view_perspective == 'ORTHO': + bpy.ops.view3d.cursor3d('INVOKE_DEFAULT', use_depth=True, orientation='NONE')#'NONE', 'VIEW', 'XFORM', 'GEOM' + self.report({'WARNING'}, 'Ortholinear ! not snaped to GP plane Cursor surface project)') + return {"FINISHED"} + + self.report({'INFO'}, 'Using GP picking') + settings = context.scene.tool_settings + orient = settings.gpencil_sculpt.lock_axis#'VIEW', 'AXIS_Y', 'AXIS_X', 'AXIS_Z', 'CURSOR' + loc = settings.gpencil_stroke_placement_view3d#'ORIGIN', 'CURSOR', 'SURFACE', 'STROKE' + + warning = [] + if not "AXIS" in orient: + warning.append(f'Orientation is {orient}, no depth picking') + + if loc != "ORIGIN": + warning.append(f"Location is '{loc}' not object 'ORIGIN'") + + if warning: + self.report({'WARNING'}, ', '.join(warning)) + + plane_co, plane_no = get_gp_draw_plane(context) + + if not plane_co:#default to object location + plane_co = context.object.matrix_world.to_translation()#context.object.location + + + if not plane_no:# use view depth (region_to_location instead of ) + coord = region_to_location(self.mouse_co, plane_co) + else: + #projected on given plane from view (intersect on plane with a vector from view origin) + origin = get_view_origin_position()#get view origin + region = bpy.context.region + rv3d = bpy.context.region_data + coord = mathutils.geometry.intersect_line_plane(origin, origin - view3d_utils.region_2d_to_vector_3d(region, rv3d, self.mouse_co), plane_co, plane_no) + # If no plane is crossed, intersect_line_plane return None which naturally goes to traceback... + + if not coord: + self.report({'WARNING'}, 'Ortholinear view, used basic cursor snap (no depth picking)') + + context.scene.cursor.location = coord + return {"FINISHED"} + + +#TODO auto-cursor (attach cursor to object) + + +''' cursor native snap +https://docs.blender.org/api/current/bpy.ops.view3d.html#bpy.ops.view3d.cursor3d +bpy.ops.view3d.cursor3d(use_depth=True, orientation='VIEW') + +Set the location of the 3D cursor +Parameters + use_depth (boolean, (optional)) – Surface Project, Project onto the surface + orientation (enum in ['NONE', 'VIEW', 'XFORM', 'GEOM'], (optional)) – + Orientation, Preset viewpoint to use + NONE None, Leave orientation unchanged. + VIEW View, Orient to the viewport. + XFORM Transform, Orient to the current transform setting. + GEOM Geometry, Match the surface normal. +''' + +def swap_keymap_by_id(org_idname, new_idname): + '''Replace id operator by another in user keymap''' + wm = bpy.context.window_manager + for cat, keymap in wm.keyconfigs.user.keymaps.items():#wm.keyconfigs.addon.keymaps.items(): + for k in keymap.keymap_items: + if k.idname != org_idname: + continue + ## Print changes + mods = ' + '.join([m for m in ('ctrl','shift','alt') if getattr(k, m)]) + val = f' ({k.value.lower()})' if k.value != 'PRESS' else '' + # ({keymap.space_type}) #VIEW_3D + print(f"Hotswap: {cat} - {k.name}: {mods + ' ' if mods else ''}{k.type}{val} : {k.idname} --> {new_idname}") + + k.idname = new_idname + + +# prev_matrix = mathutils.Matrix() +prev_matrix = None + +# @call_once(bpy.app.handlers.frame_change_post) + +def cursor_follow_update(self,context): + '''append or remove cursor_follow handler according a boolean''' + global prev_matrix + # imported in properties to register in boolprop update + if self.cursor_follow:#True + if not cursor_follow.__name__ in [hand.__name__ for hand in bpy.app.handlers.frame_change_post]: + if context.object: + prev_matrix = context.object.matrix_world + + bpy.app.handlers.frame_change_post.append(cursor_follow) + + else:#False + if cursor_follow.__name__ in [hand.__name__ for hand in bpy.app.handlers.frame_change_post]: + prev_matrix = None + bpy.app.handlers.frame_change_post.remove(cursor_follow) + + +def cursor_follow(scene): + '''Handler to make the cursor follow active object matrix changes on frame change''' + ## TODO update global prev_matrix to equal current_matrix on selection change (need another handler)... + if not bpy.context.object: + return + global prev_matrix + ob = bpy.context.object + current_matrix = ob.matrix_world + if not prev_matrix: + prev_matrix = current_matrix.copy() + return + + # debug prints : HANDLER CALLED TWICE in time line when clic (clic press, and clic release)!!! + # print(scene.frame_current) + # print('prev: ', [[f'{j:.2f}' for j in i] for i in prev_matrix[:2] ]) + # print('curr: ', [[f'{j:.2f}' for j in i] for i in current_matrix[:2] ]) + + ## translation only + # scene.cursor.location += (current_matrix - prev_matrix).to_translation() + + # print('offset:', (current_matrix - prev_matrix).to_translation()) + + ## full + scene.cursor.location = current_matrix @ (prev_matrix.inverted() @ scene.cursor.location) + + # store for next use + prev_matrix = current_matrix.copy() + + +classes = ( +GPTB_OT_cusor_snap, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + # swap_keymap_by_id('view3d.cursor3d','view3d.cursor_snap')#auto swap to custom GP snap wrap + + # bpy.app.handlers.frame_change_post.append(cursor_follow) + + +def unregister(): + # bpy.app.handlers.frame_change_post.remove(cursor_follow) + + # swap_keymap_by_id('view3d.cursor_snap','view3d.cursor3d')#Restore normal snap + + for cls in reversed(classes): + bpy.utils.unregister_class(cls) + + # force remove handler if it's there at unregister + if cursor_follow.__name__ in [hand.__name__ for hand in bpy.app.handlers.frame_change_post]: + bpy.app.handlers.frame_change_post.remove(cursor_follow) \ No newline at end of file diff --git a/OP_file_checker.py b/OP_file_checker.py new file mode 100644 index 0000000..6281745 --- /dev/null +++ b/OP_file_checker.py @@ -0,0 +1,300 @@ +import bpy +import os +from pathlib import Path +from .utils import show_message_box, get_addon_prefs + +class GPTB_OT_file_checker(bpy.types.Operator): + bl_idname = "gp.file_checker" + bl_label = "File check" + bl_description = "Check / correct some aspect of the file, properties and such and report" + bl_options = {"REGISTER"} + + # @classmethod + # def poll(cls, context): + # return context.region_data.view_perspective == 'CAMERA' + + ## list of action : + # Lock main cam: + # set scene res + # set scene percentage at 100: + # set show slider and sync range + # set fps + # set cursor type + # GP use additive drawing (else creating a frame in dopesheet makes it blank...) + # GP stroke placement/projection check + # Disabled animation + # Set onion skin filter to 'All type' + + def execute(self, context): + prefs = get_addon_prefs() + problems = [] + + ## Lock main cam: + if not 'layout' in Path(bpy.data.filepath).stem:#dont touch layout cameras + if context.scene.camera: + cam = context.scene.camera + if cam.name == 'draw_cam' and cam.parent: + if cam.parent.type == 'CAMERA': + cam = cam.parent + else: + cam = None + if cam: + triple = (True,True,True) + if cam.lock_location[:] != triple or cam.lock_rotation[:] != triple: + problems.append('Lock main camera') + cam.lock_location = cam.lock_rotation = triple + + ## set scene res at pref res according to addon pref + rx, ry = prefs.render_res_x, prefs.render_res_y + if context.scene.render.resolution_x != rx or context.scene.render.resolution_y != ry: + problems.append(f'Resolution {context.scene.render.resolution_x}x{context.scene.render.resolution_y} >> {rx}x{ry}') + context.scene.render.resolution_x, context.scene.render.resolution_y = rx, ry + + ## set scene percentage at 100: + if context.scene.render.resolution_percentage != 100: + problems.append('Resolution output to 100%') + context.scene.render.resolution_percentage = 100 + + ## set show slider and sync range + for window in bpy.context.window_manager.windows: + screen = window.screen + for area in screen.areas: + if area.type == 'DOPESHEET_EDITOR': + if hasattr(area.spaces[0], 'show_sliders'): + setattr(area.spaces[0], 'show_sliders', True) + + if hasattr(area.spaces[0], 'show_locked_time'): + setattr(area.spaces[0], 'show_locked_time', True) + + ## set fps according to preferences settings + if context.scene.render.fps != prefs.fps: + problems.append( (f"framerate corrected {context.scene.render.fps} >> {prefs.fps}", 'ERROR') ) + context.scene.render.fps = prefs.fps + + ## set cursor type (according to prefs ?) + if context.mode in ("EDIT_GPENCIL", "SCULPT_GPENCIL"): + tool = prefs.select_active_tool + if tool != 'none': + if bpy.context.workspace.tools.from_space_view3d_mode(bpy.context.mode, create=False).idname != tool: + bpy.ops.wm.tool_set_by_id(name=tool)# Tweaktoolcode + problems.append(f'tool changed to {tool.split(".")[1]}') + + ## GP use additive drawing (else creating a frame in dopesheet makes it blank...) + if not context.scene.tool_settings.use_gpencil_draw_additive: + problems.append(f'Activated Gp additive drawing mode (snowflake)') + context.scene.tool_settings.use_gpencil_draw_additive = True + + ## GP stroke placement/projection check + if context.scene.tool_settings.gpencil_sculpt.lock_axis != 'AXIS_Y': + problems.append('/!\\ Draw axis not "Front" (Need Manual change if not Ok)') + + if bpy.context.scene.tool_settings.gpencil_stroke_placement_view3d != 'ORIGIN': + problems.append('/!\\ Draw placement not "Origin" (Need Manual change if not Ok)') + + ## Disabled animation + fcu_ct = 0 + for act in bpy.data.actions: + if not act.users: + continue + for fcu in act.fcurves: + if fcu.mute: + fcu_ct += 1 + print(f"muted: {act.name} > {fcu.data_path}") + if fcu_ct: + problems.append(f'{fcu_ct} anim channel disabled (details -> console)') + + ## Set onion skin filter to 'All type' + fix_kf_type = 0 + for gp in bpy.data.grease_pencils:#from data + if not gp.is_annotation: + if gp.onion_keyframe_type != 'ALL': + gp.onion_keyframe_type = 'ALL' + fix_kf_type += 1 + if fix_kf_type: + problems.append(f"{fix_kf_type} GP onion skin filter to 'All type'") + # for ob in context.scene.objects:#from object + # if ob.type == 'GPENCIL': + # ob.data.onion_keyframe_type = 'ALL' + + #### --- print fix/problems report + if problems: + print('===File check===') + for p in problems: + if isinstance(p, str): + print(p) + else: + print(p[0]) + # Show in viewport + show_message_box(problems, _title = "Changed Settings", _icon = 'INFO') + else: + self.report({'INFO'}, 'All good') + return {"FINISHED"} + + +""" OLD links checker with show_message_box +class GPTB_OT_links_checker(bpy.types.Operator): + bl_idname = "gp.links_checker" + bl_label = "Links check" + bl_description = "Check states of file direct links" + bl_options = {"REGISTER"} + + def execute(self, context): + all_lnks = [] + has_broken_link = False + ## check for broken links + for current, lib in zip(bpy.utils.blend_paths(local=True), bpy.utils.blend_paths(absolute=True, local=True)): + lfp = Path(lib) + realib = Path(current) + if not lfp.exists(): + has_broken_link = True + all_lnks.append( (f"Broken link: {realib.as_posix()}", 'LIBRARY_DATA_BROKEN') )#lfp.as_posix() + else: + if realib.as_posix().startswith('//'): + all_lnks.append( (f"Link: {realib.as_posix()}", 'LINKED') )#lfp.as_posix() + else: + all_lnks.append( (f"Link: {realib.as_posix()}", 'LIBRARY_DATA_INDIRECT') )#lfp.as_posix() + + all_lnks.sort(key=lambda x: x[1], reverse=True) + if all_lnks: + print('===File check===') + for p in all_lnks: + if isinstance(p, str): + print(p) + else: + print(p[0]) + # Show in viewport + show_message_box(all_lnks, _title = "Links", _icon = 'INFO') + return {"FINISHED"} """ + + +class GPTB_OT_links_checker(bpy.types.Operator): + bl_idname = "gp.links_checker" + bl_label = "Links check" + bl_description = "Check states of file direct links" + bl_options = {"REGISTER"} + + def execute(self, context): + return {"FINISHED"} + + def draw(self, context): + layout = self.layout + layout.label(text=self.title) + if self.broke_ct: + layout.label(text="You can try to scan for missing files:") + + ## How to launch directly without filebrowser ? + # in Shot folder + layout.operator('file.find_missing_files', text='in parent hierarchy').directory = Path(bpy.data.filepath).parents[1].as_posix() + if self.proj: + # In Library + layout.operator('file.find_missing_files', text='in library').directory = (Path(self.proj)/'library').as_posix() + # In all project + layout.operator('file.find_missing_files', text='in all project (last resort)').directory = self.proj + + + layout.separator() + for l in self.all_lnks: + if l[1] == 'LIBRARY_DATA_BROKEN': + layout.label(text=l[0], icon=l[1]) + else: + split=layout.split(factor=0.75) + split.label(text=l[0], icon=l[1]) + split.operator('wm.path_open', text='Open folder', icon='FILE_FOLDER').filepath = Path(bpy.path.abspath(l[0])).resolve().parent.as_posix() + split.operator('wm.path_open', text='Open file', icon='FILE_TICK').filepath = Path(bpy.path.abspath(l[0])).resolve().as_posix()#os.path.abspath(bpy.path.abspath(dirname(l[0]))) + + def invoke(self, context, event): + self.all_lnks = [] + self.title = '' + self.broke_ct = 0 + abs_ct = 0 + rel_ct = 0 + ## check for broken links + for current, lib in zip(bpy.utils.blend_paths(local=True), bpy.utils.blend_paths(absolute=True, local=True)): + lfp = Path(lib) + realib = Path(current) + if not lfp.exists(): + self.broke_ct += 1 + self.all_lnks.append( (f"{realib.as_posix()}", 'LIBRARY_DATA_BROKEN') )#lfp.as_posix() + else: + if realib.as_posix().startswith('//'): + rel_ct += 1 + self.all_lnks.append( (f"{realib.as_posix()}", 'LINKED') )#lfp.as_posix() + else: + abs_ct += 1 + self.all_lnks.append( (f"{realib.as_posix()}", 'LIBRARY_DATA_INDIRECT') )#lfp.as_posix() + + if not self.all_lnks: + self.report({'INFO'}, 'No external links in files') + return {"FINISHED"} + + bct = f"{self.broke_ct} broken " if self.broke_ct else '' + act = f"{abs_ct} absolute " if abs_ct else '' + rct = f"{rel_ct} clean " if rel_ct else '' + + self.title = f"{bct}{act}{rct}" + + self.all_lnks.sort(key=lambda x: x[1], reverse=True) + if self.all_lnks: + print('===File check===') + for p in self.all_lnks: + if isinstance(p, str): + print(p) + else: + print(p[0]) + # Show in viewport + + # if broke_ct == 0: + # show_message_box(self.all_lnks, _title = self.title, _icon = 'INFO')# Links + # return {"FINISHED"} + try: + self.proj = context.preferences.addons['pipe_sync'].preferences['local_folder'] + except: + self.proj = None + return context.window_manager.invoke_props_dialog(self, width=800) + +'''### OLD +class GPTB_OT_check_scene(bpy.types.Operator): + bl_idname = "gp.scene_check" + bl_label = "Check GP scene" + bl_description = "Check and fix scene settings" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return True + + def execute(self, context): + ## check scene resolution / 100% / framerate + context.scene.render.resolution_percentage = 100 + context.scene.render.resolution_x = 3072# define addon properties to make generic ? + context.scene.render.resolution_y = 1620# define addon properties to make generic ? + context.scene.render.fps = 24# define addon properties to make generic ? + + ## check GP datas name + gp_os = [o for o in context.scene.objects if o.type == 'GPENCIL' if o.data.users == 1]#no multiple users + + for gpo in gp_os: + if gpo.data.name.startswith('Stroke'):# dont touch already renamed group + if gpo.data.name != gpo.name: + print('renaming GP data:', gpo.data.name, '-->', gpo.name) + gpo.data.name = gpo.name + + ## disable autolock + context.scene.tool_settings.lock_object_mode = False + + return {"FINISHED"} +''' + +classes = ( +# GPTB_OT_check_scene, +GPTB_OT_file_checker, +GPTB_OT_links_checker, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + +def unregister(): + for cls in reversed(classes): + bpy.utils.unregister_class(cls) \ No newline at end of file diff --git a/OP_helpers.py b/OP_helpers.py new file mode 100644 index 0000000..fab7429 --- /dev/null +++ b/OP_helpers.py @@ -0,0 +1,546 @@ +import bpy +from mathutils import Vector#, Matrix +from pathlib import Path +from math import radians +from .utils import get_gp_objects, set_collection, show_message_box + +class GPTB_OT_copy_text(bpy.types.Operator): + bl_idname = "wm.copytext" + bl_label = "Copy to clipboard" + bl_description = "Insert passed text to clipboard" + bl_options = {"REGISTER", "INTERNAL"} + + text : bpy.props.StringProperty(name="cliptext", description="text to clip", default="") + + def execute(self, context): + context.window_manager.clipboard = self.text + mess = f'Clipboard: {context.window_manager.clipboard}' + self.report({'INFO'}, mess) + return {"FINISHED"} + +class GPTB_OT_flipx_view(bpy.types.Operator): + bl_idname = "gp.mirror_flipx" + bl_label = "cam mirror flipx" + bl_description = "Invert X scale on camera to flip image horizontally" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.region_data.view_perspective == 'CAMERA' + + def execute(self, context): + context.scene.camera.scale.x *= -1 + return {"FINISHED"} + + +class GPTB_OT_jump_gp_keyframe(bpy.types.Operator): + bl_idname = "screen.gp_keyframe_jump" + bl_label = "Jump to GPencil keyframe" + bl_description = "Jump to prev/next keyframe on active and selected layers of active grease pencil object" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + next : bpy.props.BoolProperty( + name="Next GP keyframe", description="Go to next active GP keyframe", default=True) + + target : bpy.props.EnumProperty( + name="Target layer", description="Choose wich layer to evaluate for keyframe change", default='ACTIVE',# options={'ANIMATABLE'}, update=None, get=None, set=None, + items=( + ('ACTIVE', 'Active and selected', 'jump in keyframes of active and other selected layers ', 0), + ('VISIBLE', 'Visibles layers', 'jump in keyframes of visibles layers', 1), + ('ACCESSIBLE', 'Visible and unlocked layers', 'jump in keyframe of all layers', 2), + )) + #(key, label, descr, id[, icon]) + + def execute(self, context): + if not context.object.data.layers.active: + self.report({'ERROR'}, 'No active layer on current GPencil object') + return {"CANCELLED"} + + layer = [] + if self.target == 'ACTIVE': + gpl = [l for l in context.object.data.layers if l.select and not l.hide] + if not context.object.data.layers.active in gpl: + gpl.append(context.object.data.layers.active) + + elif self.target == 'VISIBLE': + gpl = [l for l in context.object.data.layers if not l.hide] + + elif self.target == 'ACCESSIBLE': + gpl = [l for l in context.object.data.layers if not l.hide and not l.lock] + + + current = context.scene.frame_current + p = n = None + + mins = [] + maxs = [] + for l in gpl: + for f in l.frames: + if f.frame_number < current: + p = f.frame_number + if f.frame_number > current: + n = f.frame_number + break + mins.append(p) + maxs.append(n) + p = n = None + + mins = [i for i in mins if i is not None] + maxs = [i for i in maxs if i is not None] + + if mins: + p = max(mins) + if maxs: + n = min(maxs) + + if self.next and n is not None: + context.scene.frame_set(n) + elif not self.next and p is not None: + context.scene.frame_set(p) + else: + self.report({'INFO'}, 'No keyframe in this direction') + return {"CANCELLED"} + + return {"FINISHED"} + + +class GPTB_OT_rename_data_from_obj(bpy.types.Operator): + bl_idname = "gp.rename_data_from_obj" + bl_label = "Rename GP from object" + bl_description = "Rename the GP datablock with the same name as the object" + bl_options = {"REGISTER"} + + rename_all : bpy.props.BoolProperty(default=False) + + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + if not self.rename_all: + obj = context.object + if obj.name == obj.data.name: + self.report({'WARNING'}, 'Nothing to rename') + return {"FINISHED"} + old = obj.data.name + obj.data.name = obj.name + self.report({'INFO'}, f'GP data renamed: {old} -> {obj.data.name}') + else: + oblist = [] + for o in context.scene.objects: + if o.type == 'GPENCIL': + if o.name == o.data.name: + continue + oblist.append(f'{o.data.name} -> {o.name}') + o.data.name = o.name + print('\nrenamed GP datablock:') + for i in oblist: + print(i) + self.report({'INFO'}, f'{len(oblist)} data renamed (see console for detail)') + + return {"FINISHED"} + +# TODO make secondary cam +# 2 solution : +# - parenting to main cam except for roll axis (drivers or simple parents) +# - Facing current "peg" (object) and parented to it to keep distance +# --> reset roll means aligning to object again (to main camera if one) or maybe align to global Z (as possible). + +# other solution, button to disable all object Fcu evaluation (fix object movement while moving in timeline) + +# 1 ops to enter in manip/draw Cam (create if not exists) +# 1 ops to reset rotation +# 1 ops to swap between cam follow or object follow (toggle or two button), maybe accessible only when drawcam is active + +# hide camera that isn't used (playblast should always get main camera) + +def get_gp_alignement_vector(context): +#SETTINGS + settings = context.scene.tool_settings + orient = settings.gpencil_sculpt.lock_axis#'VIEW', 'AXIS_Y', 'AXIS_X', 'AXIS_Z', 'CURSOR' + loc = settings.gpencil_stroke_placement_view3d#'ORIGIN', 'CURSOR', 'SURFACE', 'STROKE' + + ### CHOOSE HOW TO PROJECT + + """ # -> placement + if loc == "CURSOR": + plane_co = scn.cursor.location + else:#ORIGIN (also on origin if set to 'SURFACE', 'STROKE') + plane_co = obj.location """ + + # -> orientation + if orient == 'VIEW': + #only depth is important, no need to get view vector + return None + + elif orient == 'AXIS_Y':#front (X-Z) + return Vector((0,1,0)) + + elif orient == 'AXIS_X':#side (Y-Z) + return Vector((1,0,0)) + + elif orient == 'AXIS_Z':#top (X-Y) + return Vector((0,0,1)) + + elif orient == 'CURSOR': + return Vector((0,0,1))#.rotate(context.scene.cursor.matrix) + +class GPTB_OT_draw_cam(bpy.types.Operator): + bl_idname = "gp.draw_cam_switch" + bl_label = "Draw cam switch" + bl_description = "switch between main camera and draw (manipulate) camera" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.scene.camera + # return context.region_data.view_perspective == 'CAMERA'# check if in camera + + cam_mode : bpy.props.StringProperty() + + def execute(self, context): + created=False + + if self.cam_mode == 'draw': + dcam_name = 'draw_cam' + else: + dcam_name = 'obj_cam' + act = context.object + if not act: + self.report({'ERROR'}, "No active object to lock on") + return {"CANCELLED"} + + if context.region_data.view_perspective == 'ORTHO': + self.report({'ERROR'}, "Can't be set in othographic view, swith to persp (numpad 5)") + return {"CANCELLED"} + + camcol_name = 'manip_cams' + if not context.scene.camera: + self.report({'ERROR'}, "No camera to return to") + return {"CANCELLED"} + + ## if already in draw_cam BACK to main camera + if context.scene.camera.name in ('draw_cam', 'obj_cam'): + drawcam = context.scene.camera + # get main cam and error if not available + if drawcam.name == 'draw_cam': + maincam = drawcam.parent + + else: + maincam = None + main_name = drawcam.get('maincam_name')# Custom prop with previous avtive cam. + if main_name: + maincam = context.scene.objects.get(main_name) + + if not maincam: + cams = [ob for ob in context.scene.objects if ob.type == 'CAMERA' and not ob.name in ("draw_cam", "obj_cam")] + if not cams: + self.report({'ERROR'}, "Can't find any other camera to switch to...") + return {"CANCELLED"} + maincam = cams[0] + + # dcam_col = bpy.data.collections.get(camcol_name) + # if not dcam_col: + set_collection(drawcam, camcol_name) + + # Swap to it, unhide if necessary and hide previous + context.scene.camera = maincam + + ## hide cam object + drawcam.hide_viewport = True + maincam.hide_viewport = False + + ## if in main camera GO to drawcam + elif context.scene.camera.name not in ('draw_cam', 'obj_cam'): + # use current cam as main cam (more flexible than naming convention) + maincam = context.scene.camera + drawcam = context.scene.objects.get(dcam_name) + + if not drawcam: + created=True + drawcam = bpy.data.objects.new(dcam_name, context.scene.camera.data) + drawcam.show_name = True + set_collection(drawcam, 'manip_cams') + + if dcam_name == 'draw_cam': + drawcam.parent = maincam + if created:#set to main at creation time + drawcam.matrix_world = maincam.matrix_world + drawcam.lock_location = (True,True,True) + + else: + if created: + drawcam['maincam_name'] = context.scene.camera.name + drawcam.parent = act + drawcam.matrix_world = context.space_data.region_3d.view_matrix.inverted() + # Place cam from current view + ''' + drawcam.parent = act + vec = Vector((0,1,0)) + + if act.type == 'GPENCIL': + #change vector according to alignement + vec = get_gp_alignement_vector(context) + + vec = None #!# FORCE creation of cam at current viewpoint + if vec: + # Place drawcam at distance at standard distance from the object facing it + drawcam.location = act.matrix_world @ (vec * -6) + drawcam.rotation_euler = act.rotation_euler + drawcam.rotation_euler.x -= radians(-90) + else: + #Create cam at view point + drawcam.matrix_world = context.space_data.region_3d.view_matrix.inverted() + ''' + + ## hide cam object + context.scene.camera = drawcam + drawcam.hide_viewport = False + maincam.hide_viewport = True + + if created and drawcam.name == 'obj_cam':#Go in camera view + context.region_data.view_perspective = 'CAMERA' + # ## make active + # bpy.context.view_layer.objects.active = ob + + return {"FINISHED"} + + +class GPTB_OT_set_view_as_cam(bpy.types.Operator): + bl_idname = "gp.set_view_as_cam" + bl_label = "Cam at view" + bl_description = "Place the active camera at current viewpoint, parent to active object. (need to be out of camera)" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.region_data.view_perspective != 'CAMERA'# need to be out of camera + # return context.scene.camera and not context.scene.camera.name.startswith('Cam') + + def execute(self, context): + if context.region_data.view_perspective == 'ORTHO': + self.report({'ERROR'}, "Can't be set in othographic view") + return {"CANCELLED"} + ## switching to persp work in 2 times, but need update before... + #context.area.tag_redraw() + #context.region_data.view_perspective = 'PERSP' + + cam = context.scene.camera + if not cam: + self.report({'ERROR'}, "No camera to set") + return {"CANCELLED"} + + obj = context.object + if obj and obj.type != 'CAMERA':# parent to object + cam.parent = obj + + if not cam.parent: + self.report({'WARNING'}, "No parents...") + + + # set view + cam.matrix_world = context.space_data.region_3d.view_matrix.inverted() + # Enter in cam view + #https://blender.stackexchange.com/questions/30643/how-to-toggle-to-camera-view-via-python + context.region_data.view_perspective = 'CAMERA' + + return {"FINISHED"} + + +class GPTB_OT_reset_cam_rot(bpy.types.Operator): + bl_idname = "gp.reset_cam_rot" + bl_label = "Reset rotation" + bl_description = "Reset rotation of the draw manipulation camera" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.scene.camera and not context.scene.camera.name.startswith('Cam') + # return context.region_data.view_perspective == 'CAMERA'# check if in camera + + def execute(self, context): + # dcam_name = 'draw_cam' + # camcol_name = 'manip_cams' + drawcam = context.scene.camera + if drawcam.parent.type == 'CAMERA': + ## align to parent camera + drawcam.matrix_world = drawcam.parent.matrix_world#wrong, get the parent rotation offset + # drawcam.rotation_euler = drawcam.parent.rotation_euler#wrong, get the parent rotation offset + elif drawcam.parent: + ## there is a parent, so align the Y of the camera to object's Z + # drawcam.rotation_euler.rotate(drawcam.parent.matrix_world)# wrong + pass + else: + self.report({'ERROR'}, "No parents to refer to for rotation reset") + return {"CANCELLED"} + + + return {"FINISHED"} + +class GPTB_OT_toggle_mute_animation(bpy.types.Operator): + bl_idname = "gp.toggle_mute_animation" + bl_label = "Toggle animation mute" + bl_description = "Enable/Disable animation evaluation\n(shift+clic to affect selection only)" + bl_options = {"REGISTER"} + + + mute : bpy.props.BoolProperty(default=False) + skip_gp : bpy.props.BoolProperty(default=False) + skip_obj : bpy.props.BoolProperty(default=False) + + def invoke(self, context, event): + self.selection = event.shift + return self.execute(context) + + def execute(self, context): + + if self.selection: + pool = context.selected_objects + else: + pool = context.scene.objects + + for o in pool: + if self.skip_gp and o.type == 'GPENCIL': + continue + if self.skip_obj and o.type != 'GPENCIL': + continue + + if not o.animation_data: + continue + act = o.animation_data.action + if not act: + continue + + for i, fcu in enumerate(act.fcurves): + print(i, fcu.data_path, fcu.array_index) + fcu.mute = self.mute + + return {'FINISHED'} + +class GPTB_OT_list_disabled_anims(bpy.types.Operator): + bl_idname = "gp.list_disabled_anims" + bl_label = "List disabled anims" + bl_description = "List disabled animations channels in scene. (shit+clic to list only on seleciton)" + bl_options = {"REGISTER"} + + mute : bpy.props.BoolProperty(default=False) + # skip_gp : bpy.props.BoolProperty(default=False) + # skip_obj : bpy.props.BoolProperty(default=False) + + def invoke(self, context, event): + self.selection = event.shift + return self.execute(context) + + def execute(self, context): + li = [] + oblist = [] + if self.selection: + pool = context.selected_objects + else: + pool = context.scene.objects + + for o in pool: + # if self.skip_gp and o.type == 'GPENCIL': + # continue + # if self.skip_obj and o.type != 'GPENCIL': + # continue + if not o.animation_data: + continue + act = o.animation_data.action + if not act: + continue + for i, fcu in enumerate(act.fcurves): + # print(i, fcu.data_path, fcu.array_index) + if fcu.mute: + if o not in oblist: + oblist.append(o) + li.append(f'{o.name} : {fcu.data_path} {fcu.array_index}') + else: + li.append(f'{" "*len(o.name)} - {fcu.data_path} {fcu.array_index}') + if li: + show_message_box(li) + else: + self.report({'INFO'}, f"No animation disabled on {'selection' if self.selection else 'scene'}") + return {'FINISHED'} + + +## TODO presets are still not used... need to make a custom preset save/remove/quickload manager to be efficient (UIlist ?) + +class GPTB_OT_overlay_presets(bpy.types.Operator): + bl_idname = "gp.overlay_presets" + bl_label = "Overlay presets" + bl_description = "Overlay save/load presets for showing only whats needed" + bl_options = {"REGISTER"} + + # @classmethod + # def poll(cls, context): + # return context.region_data.view_perspective == 'CAMERA' + val_dic = {} + + def execute(self, context): + self.zones = [bpy.context.space_data.overlay] + exclude = ( + ### add lines here to exclude specific attribute + 'bl_rna', 'identifier','name_property','rna_type','properties', 'compare', 'to_string',#basic + ) + if not self.val_dic: + ## store attribute of data_path in self.zones list. + for data_path in self.zones: + self.val_dic[data_path] = {} + for attr in dir(data_path):#iterate in attribute of given datapath + if attr not in exclude and not attr.startswith('__') and not callable(getattr(data_path, attr)) and not data_path.is_property_readonly(attr): + self.val_dic[data_path][attr] = getattr(data_path, attr) + # Do tomething with the dic (backup to a json ?) + + else: + ## restore attribute from self.zones list + for data_path, prop_dic in self.val_dic.items(): + for attr, val in prop_dic.items(): + try: + setattr(data_path, attr, val) + except Exception as e: + print(f"/!\ Impossible to re-assign: {attr} = {val}") + print(e) + ''' + overlay = context.space_data.overlay + # still need ref + + overlay.show_extras = not val + overlay.show_outline_selected = val + overlay.show_object_origins = val + overlay.show_motion_paths = val + overlay.show_relationship_lines = val + overlay.show_bones = val + overlay.show_annotation = val + overlay.show_text = val + overlay.show_cursor = val + overlay.show_floor = val + overlay.show_axis_y = val + overlay.show_axis_x = val + ''' + + return {'FINISHED'} + +classes = ( +GPTB_OT_copy_text, +GPTB_OT_flipx_view, +GPTB_OT_jump_gp_keyframe, +GPTB_OT_rename_data_from_obj, +GPTB_OT_draw_cam, +GPTB_OT_set_view_as_cam, +GPTB_OT_reset_cam_rot, +GPTB_OT_toggle_mute_animation, +GPTB_OT_list_disabled_anims, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + +def unregister(): + for cls in reversed(classes): + bpy.utils.unregister_class(cls) \ No newline at end of file diff --git a/OP_palettes.py b/OP_palettes.py new file mode 100644 index 0000000..b33e7cc --- /dev/null +++ b/OP_palettes.py @@ -0,0 +1,259 @@ +import bpy +import json +import os +from bpy_extras.io_utils import ImportHelper, ExportHelper +from pathlib import Path +from .utils import convert_attr, get_addon_prefs + + +### --- Json serialized material load/save + +def load_palette(context, filepath): + with open(filepath, 'r') as fd: + mat_dic = json.load(fd) + # from pprint import pprint + # pprint(mat_dic) + + ob = context.object + for mat_name, attrs in mat_dic.items(): + curmat = bpy.data.materials.get(mat_name) + if curmat:#exists + if curmat.is_grease_pencil: + if curmat not in ob.data.materials[:]:# add only if it's not already there + ob.data.materials.append(curmat) + continue + else: + mat_name = mat_name+'.01'#rename to avoid conflict + + ## to create a GP mat (from https://developer.blender.org/T67102) + mat = bpy.data.materials.new(name=mat_name) + bpy.data.materials.create_gpencil_data(mat)#cast to GP mat + + ob.data.materials.append(mat) + for attr, value in attrs.items(): + setattr(mat.grease_pencil, attr, value) + + +class GPTB_OT_load_default_palette(bpy.types.Operator): + bl_idname = "gp.load_default_palette" + bl_label = "Load basic palette" + bl_description = "Load a material palette on the current GP object\nif material name already exists in scene it will uses these" + bl_options = {"REGISTER", "INTERNAL"} + + # path_to_pal : bpy.props.StringProperty(name="paht to palette", description="path to the palette", default="") + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + def execute(self, context): + # Start Clean (delete unuesed sh*t) + bpy.ops.object.material_slot_remove_unused() + #Rename default solid stroke if still there + line = context.object.data.materials.get('Black') + if line: + line.name = 'line' + if not line: + line = context.object.data.materials.get('Solid Stroke') + if line: + line.name = 'line' + + # load json + pfp = Path(bpy.path.abspath(get_addon_prefs().palette_path)) + if not pfp.exists(): + self.report({'ERROR'}, f'Palette path not found') + return {"CANCELLED"} + + base = pfp / 'base.json' + if not base.exists(): + self.report({'ERROR'}, f'base.json palette not found in {pfp.as_posix()}') + return {"CANCELLED"} + + load_palette(context, base) + self.report({'INFO'}, f'Loaded base Palette') + + return {"FINISHED"} + + +class GPTB_OT_load_palette(bpy.types.Operator, ImportHelper): + bl_idname = "gp.load_palette" + bl_label = "Load palette" + bl_description = "Load a material palette on the current GP object\nif material name already exists in scene it will uses these" + #bl_options = {"REGISTER", "INTERNAL"} + + # path_to_pal : bpy.props.StringProperty(name="paht to palette", description="path to the palette", default="") + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + filename_ext = '.json' + + filter_glob: bpy.props.StringProperty(default='*.json', options={'HIDDEN'} )#*.jpg;*.jpeg;*.png;*.tif;*.tiff;*.bmp + + filepath : bpy.props.StringProperty( + name="File Path", + description="File path used for import", + maxlen= 1024) + + def execute(self, context): + # load json + load_palette(context, self.filepath) + self.report({'INFO'}, f'settings loaded from: {os.path.basename(self.filepath)}') + + return {"FINISHED"} + + +class GPTB_OT_save_palette(bpy.types.Operator, ExportHelper): + bl_idname = "gp.save_palette" + bl_label = "save palette" + bl_description = "Save a material palette from material on current GP object." + #bl_options = {"REGISTER", "INTERNAL"} + + # path_to_pal : bpy.props.StringProperty(name="paht to palette", description="path to the palette", default="") + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + filter_glob: bpy.props.StringProperty(default='*.json', options={'HIDDEN'})#*.jpg;*.jpeg;*.png;*.tif;*.tiff;*.bmp + + filename_ext = '.json' + + filepath : bpy.props.StringProperty( + name="File Path", + description="File path used for export", + maxlen= 1024) + + def execute(self, context): + ob = context.object + + exclusions = ('bl_rna', 'rna_type') + # save json + dic = {} + allmat=[] + for mat in ob.data.materials: + if not mat.is_grease_pencil: + continue + if mat in allmat: + continue + allmat.append(mat) + + dic[mat.name] = {} + + for attr in dir(mat.grease_pencil): + if attr.startswith('__'): + continue + if attr in exclusions: + continue + if mat.grease_pencil.bl_rna.properties[attr].is_readonly:#avoid readonly + continue + + dic[mat.name][attr] = convert_attr(getattr(mat.grease_pencil, attr)) + + if not dic: + self.report({'ERROR'}, f'No materials on this GP object') + return {"CANCELLED"} + + # export + with open(self.filepath, 'w') as fd: + json.dump(dic, fd, indent='\t') + + self.report({'INFO'}, f'Palette saved: {self.filepath}')#WARNING, ERROR + return {"FINISHED"} + + +### --- Direct material append/link from blend file + + +def load_blend_palette(context, filepath): + '''Load materials on current active object from current chosen blend''' + #from pathlib import Path + #palette_fp = C.preferences.addons['GP_toolbox'].preferences['palette_path'] + #fp = Path(palette_fp) / 'christina.blend' + print(f'-- import palette from : {filepath} --') + for ob in context.selected_objects: + if ob.type != 'GPENCIL': + print(f'{ob.name} not a GP object') + continue + + print('\n', ob.name, ':') + obj_mats = [m.name for m in ob.data.materials if m]# can found Nonetype + scene_mats = [m.name for m in bpy.data.materials] + + # Link into the blend file + with bpy.data.libraries.load(filepath, link=False) as (data_from, data_to): + for name in data_from.materials: + if name.lower() in ('bg', 'line', 'dots stroke'): + continue + + if name in obj_mats: + print(f"!- {name} already in object materials") + continue + + if name in scene_mats: + print(f'- {name} (found in scene)') + ob.data.materials.append(bpy.data.materials[name]) + continue + ## TODO find a way to Update color !... complex... + + data_to.materials.append(name) + + if not data_to.materials: + # print('Nothing to link/append from lib palette!') + continue + + print('From palette append:') + for mat in data_to.materials: + print(f'- {mat.name}') + ob.data.materials.append(mat) + + print(f'-- import Done --') + + ## list sources in a palette txt data block + palette_txt = bpy.data.texts.get('palettes') + if not palette_txt: + palette_txt = bpy.data.texts.new('palettes') + + lines = [l.body for l in palette_txt.lines] + if not os.path.basename(filepath) in lines: + palette_txt.write('\n' + os.path.basename(filepath)) + +class GPTB_OT_load_blend_palette(bpy.types.Operator, ImportHelper): + bl_idname = "gp.load_blend_palette" + bl_label = "Load colo palette" + bl_description = "Load a material palette from blend file on the current GP object\nif material name already exists in scene it will uses these" + #bl_options = {"REGISTER", "INTERNAL"} + + # path_to_pal : bpy.props.StringProperty(name="paht to palette", description="path to the palette", default="") + @classmethod + def poll(cls, context): + return context.object and context.object.type == 'GPENCIL' + + filename_ext = '.blend' + + filter_glob: bpy.props.StringProperty(default='*.blend', options={'HIDDEN'} )#*.jpg;*.jpeg;*.png;*.tif;*.tiff;*.bmp + + filepath : bpy.props.StringProperty( + name="File Path", + description="File path used for import", + maxlen= 1024) + + def execute(self, context): + # load json + load_blend_palette(context, self.filepath) + self.report({'INFO'}, f'materials loaded from: {os.path.basename(self.filepath)}') + + return {"FINISHED"} + +classes = ( +GPTB_OT_load_palette, +GPTB_OT_save_palette, +GPTB_OT_load_default_palette, +GPTB_OT_load_blend_palette, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + +def unregister(): + for cls in reversed(classes): + bpy.utils.unregister_class(cls) \ No newline at end of file diff --git a/OP_playblast.py b/OP_playblast.py new file mode 100644 index 0000000..8b3012d --- /dev/null +++ b/OP_playblast.py @@ -0,0 +1,201 @@ +import bpy +import os +from os import listdir, scandir +from os.path import join, dirname, basename, exists, isfile, isdir, splitext +import re, fnmatch, glob +from pathlib import Path +from time import strftime +C = bpy.context +D = bpy.data + +from .utils import open_file, open_folder, get_addon_prefs + +exclude = ( +### add lines here to exclude specific attribute +'bl_rna', 'identifier','name_property','rna_type','properties', 'compare', 'to_string',#basic +) + +""" + rd_keep = [ + "resolution_percentage", + "resolution_x", + "resolution_y", + "filepath", + "use_stamp", + "stamp_font_size", + ] + im_keep = [ + 'file_format', + 'color_mode', + 'quality', + 'compression', + ] + ff_keep = [ + 'codec', + 'format', + 'constant_rate_factor', + 'ffmpeg_preset', + 'gopsize', + 'audio_codec', + 'audio_bitrate', + ] + """ +def render_with_restore(): + class RenderFileRestorer: + rd = bpy.context.scene.render + im = rd.image_settings + ff = rd.ffmpeg + # ffmpeg (ff) need to be before image_settings(im) in list + # otherwise __exit__ may try to restore settings of image mode in video mode ! + # ex : "RGBA" not found in ('BW', 'RGB') (will still not stop thx to try block) + + zones = [rd, ff, im] + + val_dic = {} + cam = bpy.context.scene.camera + def __enter__(self): + ## store attribute of data_path in self.zones list. + for data_path in self.zones: + self.val_dic[data_path] = {} + for attr in dir(data_path):#iterate in attribute of given datapath + if attr not in exclude and not attr.startswith('__') and not callable(getattr(data_path, attr)) and not data_path.is_property_readonly(attr): + self.val_dic[data_path][attr] = getattr(data_path, attr) + + if self.cam and self.cam.name == 'draw_cam': + if self.cam.parent: + bpy.context.scene.camera = self.cam.parent + + def __exit__(self, type, value, traceback): + ## restore attribute from self.zones list + for data_path, prop_dic in self.val_dic.items(): + for attr, val in prop_dic.items(): + try: + setattr(data_path, attr, val) + except Exception as e: + print(f"/!\ Impossible to re-assign: {attr} = {val}") + print(e) + + if self.cam: + bpy.context.scene.camera = self.cam + + + return RenderFileRestorer() + + +def playblast(viewport = False, stamping = True): + scn = bpy.context.scene + res_factor = scn.gptoolprops.resolution_percentage + rd = scn.render + ff = rd.ffmpeg + with render_with_restore(): + ### can add propeties for personalisation as toolsetting props + + rd.resolution_percentage = res_factor + while ( rd.resolution_x * res_factor / 100 ) % 2 != 0:# rd.resolution_percentage + rd.resolution_x = rd.resolution_x + 1 + while ( rd.resolution_y * res_factor / 100 ) % 2 != 0:# rd.resolution_percentage + rd.resolution_y = rd.resolution_y + 1 + + rd.image_settings.file_format = 'FFMPEG' + ff.format = 'MPEG4' + ff.codec = 'H264' + ff.constant_rate_factor = 'HIGH'# MEDIUM + ff.ffmpeg_preset = 'REALTIME' + ff.gopsize = 10 + ff.audio_codec = 'AAC' + ff.audio_bitrate = 128 + rd.use_sequencer = False + rd.stamp_background = (0.0, 0.0, 0.0, 0.75)# blacker notes BG (default 0.25) + # rd.use_compositing + + # rd.filepath = join(dirname(bpy.data.filepath), basename(bpy.data.filepath)) + # rd.frame_path(frame=0, preview=0, view="_sauce")## give absolute render filepath with some suffix + # rd.is_movie_format# check if its movie mode + + ## set filepath + # mode incermental or just use fulldate (cannot create conflict and filter OK but long name) + blend = Path(bpy.data.filepath) + date_format = "%Y-%m-%d_%H-%M-%S" + fp = join(blend.parent, "images", f'playblast_{blend.stem}_{strftime(date_format)}.mp4') + + #may need a properties for choosing location : bpy.types.Scene.qrd_savepath = bpy.props.StringProperty(subtype='DIR_PATH', description="Export location, if not specify, create a 'quick_render' directory aside blend location")#(change defaut name in user_prefernece) + rd.filepath = fp + rd.use_stamp = stamping# toolsetting.use_stamp# True for playblast + #stamp options + rd.stamp_font_size = rd.stamp_font_size * res_factor / 100# rd.resolution_percentage + + # bpy.ops.render.render_wrap(use_view=viewport) + ### render + if viewport:## openGL + bpy.ops.render.opengl(animation=True, view_context=True)# 'INVOKE_DEFAULT', + + else:## normal render + bpy.ops.render.render(animation=True)# 'INVOKE_DEFAULT', + + # print("Playblast Done :", fp)#Dbg + return fp + + +class PBLAST_OT_playblast_anim(bpy.types.Operator): + bl_idname = "render.playblast_anim" + bl_label = "Playblast anim" + bl_description = "Launch animation playblast, use resolution percentage (Lock blender during process)" + bl_options = {"REGISTER"} + + use_view : bpy.props.BoolProperty(name='use_view', default=False) + + def execute(self, context): + if not bpy.data.is_saved: + self.report({'ERROR'}, 'File is not saved, Playblast cancelled') + return {"CANCELLED"} + + + fp = playblast(viewport = self.use_view, stamping = True) + if fp: + self.report({'INFO'}, f'File saved at: {fp}') + addon_prefs = get_addon_prefs() + if addon_prefs: + if addon_prefs.playblast_auto_play: + open_file(fp) + if addon_prefs.playblast_auto_open_folder: + open_folder(dirname(fp)) + + return {"FINISHED"} + +def register(): + bpy.utils.register_class(PBLAST_OT_playblast_anim) + +def unregister(): + bpy.utils.unregister_class(PBLAST_OT_playblast_anim) + +''' +## Potential cancelling method for image sequence rendering. +for cfra in range(start, end+1): + print("Baking frame " + str(cfra)) + + # update scene to new frame and bake to template image + scene.frame_set(cfra) + ret = bpy.ops.object.bake_image() + if 'CANCELLED' in ret: + return {'CANCELLED'} +''' + +""" +class PBLAST_OT_render_wrap(bpy.types.Operator): + bl_idname = "render.render_wrap" + bl_label = "Render wraped" + bl_description = "render" + bl_options = {"REGISTER"}## need hide + + use_view : bpy.props.BoolProperty(name='use_view', default=False) + + def execute(self, context): + if self.use_view:## openGL + ret = bpy.ops.render.opengl('INVOKE_DEFAULT', animation=True, view_context=True) + else:## normal render + ret = bpy.ops.render.render('INVOKE_DEFAULT', animation=True) + return {"FINISHED"} + """ + +""" if __name__ == "__main__": + register() """ \ No newline at end of file diff --git a/OP_playblast_bg.py b/OP_playblast_bg.py new file mode 100644 index 0000000..32c6e55 --- /dev/null +++ b/OP_playblast_bg.py @@ -0,0 +1,384 @@ +import bpy +import os +from os import listdir, scandir +from os.path import join, dirname, basename, exists, isfile, isdir, splitext +import re, fnmatch, glob +from pathlib import Path +from time import strftime + +import subprocess, threading + +# viewport draw +import gpu, blf +from gpu_extras.batch import batch_for_shader + +from .utils import open_file, open_folder, get_addon_prefs, detect_OS + +## based on playblaster + +exclude = ( +### add lines here to exclude specific attribute +'bl_rna', 'identifier','name_property','rna_type','properties', 'compare', 'to_string',#basic +) + +def delete_file(filepath) : + try: + if os.path.isfile(filepath) : + print('removing', filepath) + os.remove(filepath) + return True + except PermissionError: + print(f'impossible to remove {filepath}') + return False + +# render function +def render_function(cmd, total_frame, scene) : + debug = bpy.context.window_manager.pblast_debug + # launch rendering + if debug : print(cmd) + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + frame_count = 0 + while True : + if not bpy.context.window_manager.pblast_is_rendering : + # print('!!! Not rendering') + if detect_OS() == 'Windows': + subprocess.Popen(f"TASKKILL /F /PID {process.pid} /T") + else: + process.kill() + break + + line = process.stdout.readline() + if line != '' : + #debug + if debug : print(line) + if b'Traceback' in line: + print('/!\\ Traceback in line return') + if b"Append frame " in line : + frame_count += 1 + try : + # print('frame_count: ', frame_count, 'total_frame: ', total_frame) + bpy.context.window_manager.pblast_completion = frame_count / total_frame * 100 + except AttributeError : + #debug + if debug : print("AttributeError avoided") + pass + + if b"Blender quit" in line : + break + else: + break + + print('ENDED') + +# launch threading +def threading_render(arguments) : + render_thread = threading.Thread(target=render_function, args=arguments) + render_thread.start() + +# callback for loading bar in 3D view +def draw_callback_px(self, context): + # get color and size of progress bar + # prefs = get_addon_prefs() + + color_bar = [1, 1, 1] # prefs.progress_bar_color + background = [0.2, 0.2, 0.2] # prefs.progress_bar_background_color + bar_thickness = 10 # prefs.progress_bar_size + + # Progress Bar + width = context.area.width + + # context.window_manager.pblast_completion + complete = self.completion / 100# context.window_manager.pblast_completion / 100 + size = int(width * complete) + + # rectangle background + vertices_2 = ( + (0, 0), (width, 0), + (0, bar_thickness + 20), (width, bar_thickness + 20)) + + indices = ( + (0, 1, 2), (2, 1, 3)) + + shader2 = gpu.shader.from_builtin('2D_UNIFORM_COLOR') + batch2 = batch_for_shader(shader2, 'TRIS', {"pos": vertices_2}, indices=indices) + + shader2.bind() + shader2.uniform_float("color", [*background, 1]) + batch2.draw(shader2) + + # rectangle 1 + vertices = ( + (0, 0), (size, 0), + (0, bar_thickness), (size, bar_thickness)) + + shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') + batch = batch_for_shader(shader, 'TRIS', {"pos": vertices}, indices=indices) + + shader.bind() + shader.uniform_float("color", [*color_bar, 1]) + batch.draw(shader) + + # Text + # text = f'Playblast in Progress ({complete}%) - Shift + Esc to Cancel' + text = f'Playblast in Progress - Shift + Esc to Cancel' + + blf.color(0, *color_bar, 1) + blf.size(0, 12, 72) + blf.position(0, 10, bar_thickness + 5, 0) + blf.draw(0, text) + +class BGBLAST_OT_playblast_modal_check(bpy.types.Operator): + '''Modal and external render from Samy Tichadou (Tonton)''' + + bl_idname = "render.playblast_modal_check" + bl_label = "Playblast Modal Check" + bl_options = {'INTERNAL'} + + _timer = None + + blend_pb : bpy.props.StringProperty() + video_pb : bpy.props.StringProperty() + + @classmethod + def poll(cls, context): + return context.window_manager.pblast_is_rendering + + def modal(self, context, event): + self.completion = context.window_manager.pblast_completion + + # redraw area + try: + context.area.tag_redraw() + except AttributeError: + pass + + # handle cancelling + if event.type in {'ESC'} and event.shift: + self.cancel(context) + return {'CANCELLED'} + + if event.type == 'TIMER' : + if self.completion == 100 : + self.finish(context) + return {'FINISHED'} + + return {'PASS_THROUGH'} + + # def invoke(self, context, event): + def execute(self, context): + context.window_manager.pblast_completion = 0 + wm = context.window_manager + args = (self, context) + wm = context.window_manager + self.debug = wm.pblast_debug + self.completion = wm.pblast_completion + + self._timer = wm.event_timer_add(0.1, window = context.window) + self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL') + wm.modal_handler_add(self) + return {'RUNNING_MODAL'} + + def cancel(self, context): + print('in CANCEL') + bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') + wm = context.window_manager + wm.event_timer_remove(self._timer) + + # turn off is_rendering + wm.pblast_is_rendering = False + wm.pblast_completion = 0 + + # delete temp file + delete_file(self.blend_pb) + self.blend_pb = "" + + # delete temp video + delete_file(self.video_pb) + self.video_pb = "" + + self.report({'WARNING'}, "Render Canceled") + + def finish(self, context): + print('in FINISH') + bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW') + wm = context.window_manager + wm.event_timer_remove(self._timer) + + # turn off is_rendering + wm.pblast_is_rendering = False + wm.pblast_completion = 0 + + # debug + if self.debug : + print("blend temp : " + self.blend_pb) + print("video temp : " + self.video_pb) + + # delete temp file + delete_file(self.blend_pb) + + # open video file and/or folder + prefs = get_addon_prefs() + if prefs.playblast_auto_play: + # open_file(self.video_pb) + bpy.ops.wm.path_open(filepath=self.video_pb) + + if prefs.playblast_auto_open_folder: + # open_folder(dirname(self.video_pb)) + bpy.ops.wm.path_open(filepath=dirname(self.video_pb)) + + wm.pblast_previous_render = self.video_pb + + self.report({'INFO'}, "Render Finished") + + +### classic sauce + +def render_with_restore(): + class RenderFileRestorer: + rd = bpy.context.scene.render + im = rd.image_settings + ff = rd.ffmpeg + # ffmpeg (ff) need to be before image_settings(im) in list + # otherwise __exit__ may try to restore settings of image mode in video mode ! + # ex : "RGBA" not found in ('BW', 'RGB') (will still not stop thx to try block) + + zones = [rd, ff, im] + + val_dic = {} + + def __enter__(self): + ## store attribute of data_path in self.zones list. + for data_path in self.zones: + self.val_dic[data_path] = {} + for attr in dir(data_path):#iterate in attribute of given datapath + if attr not in exclude and not attr.startswith('__') and not callable(getattr(data_path, attr)) and not data_path.is_property_readonly(attr): + self.val_dic[data_path][attr] = getattr(data_path, attr) + + def __exit__(self, type, value, traceback): + ## restore attribute from self.zones list + for data_path, prop_dic in self.val_dic.items(): + for attr, val in prop_dic.items(): + try: + setattr(data_path, attr, val) + except Exception as e: + print(f"/!\ Impossible to re-assign: {attr} = {val}") + print(e) + + return RenderFileRestorer() + + +def playblast(context, viewport = False, stamping = True): + scn = bpy.context.scene + res_factor = scn.gptoolprops.resolution_percentage + rd = scn.render + ff = rd.ffmpeg + + prefix = 'tempblast_' + + # delete old playblast and blend files + folder = dirname(bpy.data.filepath) + for f in os.listdir(folder): + if f.startswith(prefix): + delete_file(join(folder, f)) + + pblast_folder = join(folder, 'playblast') + if exists(pblast_folder): + for f in os.listdir(pblast_folder): + if f.startswith(prefix): + delete_file(join(pblast_folder, f)) + + tempblend = str( Path(folder) / f'{prefix}{basename(bpy.data.filepath)}' ) + + with render_with_restore(): + ### can add propeties for personalisation as toolsetting props + + rd.resolution_percentage = res_factor + while ( rd.resolution_x * res_factor / 100 ) % 2 != 0:# rd.resolution_percentage + rd.resolution_x = rd.resolution_x + 1 + while ( rd.resolution_y * res_factor / 100 ) % 2 != 0:# rd.resolution_percentage + rd.resolution_y = rd.resolution_y + 1 + + backup_img_settings = rd.image_settings.file_format + rd.image_settings.file_format = 'FFMPEG' + ff.format = 'MPEG4' + ff.codec = 'H264' + ff.constant_rate_factor = 'HIGH'# MEDIUM + ff.ffmpeg_preset = 'REALTIME' + ff.gopsize = 10 + ff.audio_codec = 'AAC' + ff.audio_bitrate = 128 + rd.use_sequencer = False + # rd.use_compositing + + # rd.filepath = join(dirname(bpy.data.filepath), basename(bpy.data.filepath)) + # rd.frame_path(frame=0, preview=0, view="_sauce")## give absolute render filepath with some suffix + # rd.is_movie_format# check if its movie mode + + ## set filepath + # mode incermental or just use fulldate (cannot create conflict and filter OK but long name) + blend = Path(bpy.data.filepath) + date_format = "%Y-%m-%d_%H-%M-%S" + fp = join(blend.parent, "playblast", f'{prefix}{blend.stem}_{strftime(date_format)}.mp4') + + #may need a properties for choosing location : bpy.types.Scene.qrd_savepath = bpy.props.StringProperty(subtype='DIR_PATH', description="Export location, if not specify, create a 'quick_render' directory aside blend location")#(change defaut name in user_prefernece) + rd.filepath = fp + rd.use_stamp = stamping# toolsetting.use_stamp# True for playblast + #stamp options + rd.stamp_font_size = rd.stamp_font_size * res_factor / 100# rd.resolution_percentage + + + # get total number of frames + total_frame = context.scene.frame_end - context.scene.frame_start + 1 + # bpy.ops.render.render_wrap(use_view=viewport) + + bpy.ops.wm.save_as_mainfile(filepath = tempblend, copy=True) + cmd = f'"{bpy.app.binary_path}" -b "{tempblend}" -a' + + # cmd = '"' + bpy.app.binary_path + '"' + " -b " + '"' + new_blend_filepath + '"' + " -E " + render_engine + " -a" + + threading_render([cmd, total_frame, context.scene]) + rd.image_settings.file_format = backup_img_settings#seems it tries to set some properties before the man imgs settings + + # lauch BG render modal (ovrerride ?) + context.window_manager.pblast_completion = 0 + context.window_manager.pblast_is_rendering = True + bpy.ops.render.playblast_modal_check(blend_pb=tempblend,video_pb=fp) + + # print("Playblast Done :", fp)#Dbg + return fp + + +class BGBLAST_OT_playblast_anim(bpy.types.Operator): + bl_idname = "render.thread_playblast" + bl_label = "Playblast anim" + bl_description = "Launch animation playblast in a thread (non-blocking)" + bl_options = {"REGISTER"} + + def execute(self, context): + if not bpy.data.is_saved: + self.report({'ERROR'}, 'File is not saved, Playblast cancelled') + return {"CANCELLED"} + + playblast(context, viewport = False, stamping = True) + + return {"FINISHED"} + + +def register(): + bpy.types.WindowManager.pblast_is_rendering = bpy.props.BoolProperty() + bpy.types.WindowManager.pblast_completion = bpy.props.IntProperty(min = 0, max = 100) + bpy.types.WindowManager.pblast_previous_render = bpy.props.StringProperty() + bpy.types.WindowManager.pblast_debug = bpy.props.BoolProperty('INVOKE_DEFAULT', name="Debug", default=False) + + bpy.utils.register_class(BGBLAST_OT_playblast_anim) + bpy.utils.register_class(BGBLAST_OT_playblast_modal_check) + +def unregister(): + bpy.utils.unregister_class(BGBLAST_OT_playblast_modal_check) + bpy.utils.unregister_class(BGBLAST_OT_playblast_anim) + + del bpy.types.WindowManager.pblast_is_rendering + del bpy.types.WindowManager.pblast_completion + del bpy.types.WindowManager.pblast_previous_render + del bpy.types.WindowManager.pblast_debug \ No newline at end of file diff --git a/OP_pseudo_tint.py b/OP_pseudo_tint.py new file mode 100644 index 0000000..9caef6f --- /dev/null +++ b/OP_pseudo_tint.py @@ -0,0 +1,129 @@ +from .utils import get_gp_objects, get_gp_datas, get_addon_prefs +import bpy + +def translate_range(OldValue, OldMin, OldMax, NewMax, NewMin): + return (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin + +def get_hue_by_name(name, offset=0): + ''' + Get a string and return a hue value + offsetted by int [offset] value based on a range of 255 + ''' + + val = [] + add = 0 + for i in name: + add += ord(i)*8 + #val.append(str(ord(i))) + #number = ''.join(val) + #print("number", number)#Dbg + + # print(add, "% 255 =", add % 1000)#Dbg + + moduled = (add + offset) % 1000 + + ##avoid red + hue = translate_range(moduled, 0, 1000, 0.1, 0.9) + + ##avoid pink + #hue = translate_range(moduled, 0, 255, 0.0, 0.7) + + return hue + + +class GPT_OT_auto_tint_gp_layers(bpy.types.Operator): + bl_idname = "gp.auto_tint_gp_layers" + bl_label = "Pseudo tint layers" + bl_description = "Put a tint on layers according to namespace (except background)" + bl_options = {"REGISTER", "UNDO"} + + # bpy.types.Scene.gp_autotint_offset = bpy.props.IntProperty(name="Tint hue offset", description="offset the tint by this value for better color", default=0, min=-5000, max=5000, soft_min=-999, soft_max=999, step=1)#, subtype='PERCENTAGE' + # bpy.types.Scene.gp_autotint_namespace = bpy.props.BoolProperty(name="use prefix", description="Put same color on layers unsing the same prefix (separated by '_') of full name withjout separator", default=True) + + autotint_offset : bpy.props.IntProperty(name="Tint hue offset", + default=0, min=-5000, max=5000, soft_min=-999, soft_max=999, step=1)#, subtype='PERCENTAGE' + + reset : bpy.props.BoolProperty(name="Reset GP tints", + description="Put all tint factor to 0", default=False) + + selected_GP : bpy.props.BoolProperty(name="Selected", + description="Work on all selected grease pencil objects, else only active one", default=True) + + def execute(self, context): + ## TODO create a scene string variable to store serialized list of pre-tinted layers + addon_prefs = get_addon_prefs() + separator = addon_prefs.separator + if not separator:separator = '_' + # Define GP object to work on + gp_datas = get_gp_datas(selection = self.selected_GP) + + if self.reset: + for gp in gp_datas: + gpl = gp.layers + for l in gpl: + l.tint_factor = 0 + # l.tint_color.hsv = (0,0,0)#Reset tint ? + # reset color stored if it was different than black on change + return {"FINISHED"} + + for gp in gp_datas: + gpl = gp.layers + layer_ct = len(gpl) + hue_offset = self.autotint_offset#context.scene.gptoolprops.autotint_offset + #context.scene.gp_autotint_offset# scene property or self property + + + # namespace_order + namespaces=[] + for l in gpl: + ns= l.info.lower().split(separator, 1)[0] + if ns not in namespaces: + namespaces.append(ns) + + ns_len =len(namespaces) + namespaces.reverse() + #print("namespaces", namespaces)#Dbg + #print("ns_len", ns_len)#Dbg + + + print('--------') + ### step from 0.1 to 0.9 + + for i, l in enumerate(gpl): + if l.info.lower() not in ('background',): + print() + print('>', l.info) + ns= l.info.lower().split(separator, 1)[0]#get namespace from separator + print("namespace", ns)#Dbg + + if context.scene.gptoolprops.autotint_namespace: + h = get_hue_by_name(ns, hue_offset)#l.info == individuels + + else: + h = translate_range((i + hue_offset/100)%layer_ct, 0, layer_ct, 0.1, 0.9) + # h = hueval + hue_offset/10 + # hueval += step + print("hue", h)#Dbg + + ## Desaturate for each color per namespace index between defined range (reperesent define depth). + # s = translate_range(namespaces.index(ns), 0, ns_len, 0.5, 0.8) + s = 0.8 + + print("index", namespaces.index(ns), '/', ns_len)#Dbg + print("sat", s)#Dbg + #v = 0.8 + v = s + l.tint_factor = 1 + l.tint_color.hsv = (h,s,v) + + return {"FINISHED"} + + + def draw(self, context): + layout = self.layout + layout.prop(self, 'autotint_offset') + # layout.prop(context.scene, 'gp_autotint_offset')#, text = "offset" + + def invoke(self, context, event): + self.autotint_offset = context.scene.gptoolprops.autotint_offset + return self.execute(context) \ No newline at end of file diff --git a/OP_render.py b/OP_render.py new file mode 100644 index 0000000..71aed48 --- /dev/null +++ b/OP_render.py @@ -0,0 +1,482 @@ +import bpy +import os +from os import listdir, scandir +from os.path import join, dirname, basename, exists, isfile, isdir, splitext +import re, fnmatch, glob +from pathlib import Path +from time import strftime +C = bpy.context +D = bpy.data + +from .utils import open_file, open_folder, get_addon_prefs + +### render the png sequences +def initial_render_checks(context=None): + if not context: + context=bpy.context + + if not bpy.data.is_saved: + return "File is not saved, render cancelled" + + cam = context.scene.camera + if not cam: + return "No active Camera" + + if cam.name == 'draw_cam': + if not cam.parent: + return "Camera is draw_cam but has no parent cam to render from..." + context.scene.camera = cam.parent + + if cam.name == 'obj_cam': + if not cam.get('maincam_name'): + return "Cannot found main camera from obj_cam. Set main camera manually" + + main_cam = context.scene.objects.get(cam['maincam_name']) + if not main_cam: + return f"Main camera not found with name: {cam['main_cam']}" + + context.scene.camera = main_cam + + return + + +exclude = ( +### add lines here to exclude specific attribute +'bl_rna', 'identifier','name_property','rna_type','properties', 'compare', 'to_string',#basic +) + +""" + rd_keep = [ + "resolution_percentage", + "resolution_x", + "resolution_y", + "filepath", + "use_stamp", + "stamp_font_size", + ] + im_keep = [ + 'file_format', + 'color_mode', + 'quality', + 'compression', + ] + ff_keep = [ + 'codec', + 'format', + 'constant_rate_factor', + 'ffmpeg_preset', + 'gopsize', + 'audio_codec', + 'audio_bitrate', + ] + """ + +def render_with_restore(): + class RenderFileRestorer: + rd = bpy.context.scene.render + im = rd.image_settings + ff = rd.ffmpeg + # ffmpeg (ff) need to be before image_settings(im) in list + # otherwise __exit__ may try to restore settings of image mode in video mode ! + # ex : "RGBA" not found in ('BW', 'RGB') (will still not stop thx to try block) + + zones = [rd, ff, im] + obviz = {} + # layviz = [] + # matviz = [] + closeline = False + val_dic = {} + cam = bpy.context.scene.camera + enter_context = None + + def __enter__(self): + self.enter_context = bpy.context + ## store attribute of data_path in self.zones list. + for data_path in self.zones: + self.val_dic[data_path] = {} + for attr in dir(data_path):#iterate in attribute of given datapath + if attr not in exclude and not attr.startswith('__') and not callable(getattr(data_path, attr)) and not data_path.is_property_readonly(attr): + self.val_dic[data_path][attr] = getattr(data_path, attr) + + # cam + if self.cam and self.cam.name == 'draw_cam': + if self.cam.parent: + bpy.context.scene.camera = self.cam.parent + + #case of obj cam + if self.cam.name == 'obj_cam': + bpy.context.scene.camera = bpy.context.scene.objects.get(self.cam['main_cam']) + + for ob in bpy.context.scene.objects: + self.obviz[ob.name] = ob.hide_render + + close_mat = bpy.data.materials.get('closeline') + if close_mat and not close_mat.grease_pencil.hide: + close_mat.grease_pencil.hide = True + self.closeline = True + + # for gpo in bpy.context.scene.objects: + # if gpo.type != 'GPENCIL': + # continue + # if not gpo.materials.get('closeline'): + # continue + # self.closelines[gpo] = gpo.materials['closeline'].hide_render + + def __exit__(self, type, value, traceback): + ## reset header text + # self.enter_context.area.header_text_set(None) + + ### maybe keep render settings for custom output with right mode + """ + ## restore attribute from self.zones list + for data_path, prop_dic in self.val_dic.items(): + for attr, val in prop_dic.ietms(): + try: + setattr(data_path, attr, val) + except Exception as e: + print(f"/!\ Impossible to re-assign: {attr} = {val}") + print(e) + """ + if self.cam: + bpy.context.scene.camera = self.cam + + for obname, val in self.obviz.items(): + bpy.context.scene.objects[obname].hide_render = val + + if self.closeline: + close_mat = bpy.data.materials.get('closeline') + if close_mat: + close_mat.grease_pencil.hide = False + + return RenderFileRestorer() + + +def set_render_settings(): + prefs = get_addon_prefs() + rd = bpy.context.scene.render + rd.use_sequencer = False + rd.use_compositing = False + rd.use_overwrite = True + rd.image_settings.file_format = 'PNG' + rd.image_settings.color_mode = 'RGBA' + rd.image_settings.color_depth = '16' + rd.image_settings.compression = 80 #maybe up the compression a bit... + rd.resolution_percentage = 100 + rd.resolution_x, rd.resolution_y = prefs.render_res_x, prefs.render_res_y + rd.use_stamp = False + rd.film_transparent = True + + +def render_invididually(context, render_list): + '''Receive a list of object to render individually isolated''' + prefs = get_addon_prefs() + scn = context.scene + rd = scn.render + error_list = [] + with render_with_restore(): + set_render_settings() + + # rd.filepath = join(dirname(bpy.data.filepath), basename(bpy.data.filepath)) + # rd.frame_path(frame=0, preview=0, view="_sauce")## give absolute render filepath with some suffix + + ## set filepath + blend = Path(bpy.data.filepath) + + ### render by object in list + for obname in render_list: + the_obj = scn.objects.get(obname) + if not the_obj: + error_list.append(f'! Could not found {obname} in scene, skipped !') + continue + + ## Kill renderability of all + for o in scn.objects: + o.hide_render = True + + the_obj.hide_render = False + + # f'{blend.stem}_' + # fp = blend.parents[1] / "compo" / "base" / obname / (obname+'_') + fp = (blend.parent / prefs.output_path.lstrip(r'\/')).resolve() / obname / (obname+'_') + + rd.filepath = str(fp) + + # Freeze so impossible to display advance + # context.area.header_text_set(f'rendering > {obname} ...') + + ### render + # bpy.ops.render.render_wrap(use_view=viewport) + bpy.ops.render.render(animation=True) + + # print("render Done :", fp)#Dbg + return error_list + +def render_grouped(context, render_list): + '''Receive a list of object to render grouped''' + + scn = context.scene + rd = scn.render + error_list = [] + + with render_with_restore(): + set_render_settings() + + ## Kill renderability of all + for o in scn.objects: + o.hide_render = True + + ### show all object of the list + for obname in render_list: + the_obj = scn.objects.get(obname) + if not the_obj: + error_list.append(f'! Could not found {obname} in scene, skipped !') + continue + the_obj.hide_render = False + + ## Use current file path of setup output path else following : + blend = Path(bpy.data.filepath) + outname = context.scene.gptoolprops.name_for_current_render + # fp = blend.parents[1] / "compo" / "base" / outname / (outname+'_') + fp = (blend.parent / prefs.output_path.lstrip(r'\/')).resolve() / outname / (outname+'_') + rd.filepath = str(fp) + + ### render + # bpy.ops.render.render_wrap(use_view=viewport) + bpy.ops.render.render(animation=True) + + # print("render Done :", fp)#Dbg + return error_list + + +class GPTRD_OT_render_anim(bpy.types.Operator): + bl_idname = "render.render_anim" + bl_label = "render anim" + bl_description = "Launch animation render" + bl_options = {"REGISTER"} + + # use_view : bpy.props.BoolProperty(name='use_view', default=False) + + to_render = [] + + + mode : bpy.props.StringProperty(name="render mode", + description="change render mode for list rendering", default="INDIVIDUAL") + + render_bool : bpy.props.BoolVectorProperty(name="render bools", + description="", default=tuple([True]*32), size=32, subtype='NONE') + + def invoke(self, context, event): + # prefs = get_addons_prefs_and_set() + # if not prefs.local_folder: + # self.report({'ERROR'}, f'Project local folder is not specified in addon preferences') + # return {'CANCELLED'} + if self.mode == 'GROUP' and not context.scene.gptoolprops.name_for_current_render: + self.report({'ERROR'}, 'Need to set ouput name') + return {'CANCELLED'} + + prefs = get_addon_prefs() + print('exclusions list ->', prefs.render_obj_exclusion) + exclusion_obj = [name.strip() for name in prefs.render_obj_exclusion.split(',')] + print('object exclusion list: ', exclusion_obj) + print('initial self.to_render: ', self.to_render) + self.to_render = []#reset + ## check object to render with basic filter + for ob in context.scene.objects: + if ob.type != 'GPENCIL': + continue + if any(x in ob.name.lower() for x in exclusion_obj): #('old', 'rough', 'trash', 'test') + print('Skip', ob.name) + continue + self.to_render.append(ob.name) + + if not self.to_render: + self.report({'ERROR'}, 'No GP to render') + return {'CANCELLED'} + + ## Reset at each render + # self.render_bool = tuple([True]*32)# reset all True + + ## disable for some name (ex: BG) + + wm = context.window_manager + return wm.invoke_props_dialog(self) + + def draw(self, context): + layout = self.layout + layout.label(text='Tick objects to render') + for i, name in enumerate(self.to_render): + row = layout.row() + row.prop(self, 'render_bool', index = i, text = name) + + # for i, set in enumerate(SETS): + # column.row().prop(context.scene.spritesheet, 'sets', index=i, text=set) + + def execute(self, context): + prefs = get_addon_prefs() + err = initial_render_checks(context) + if err: + self.report({'ERROR'}, err) + return {"CANCELLED"} + + render_list = [] + for i, name in enumerate(self.to_render): + if self.render_bool[i]: + render_list.append(name) + + if not render_list: + self.report({'ERROR'}, 'Nothing to render') + return {"CANCELLED"} + + # self.report({'INFO'}, f'rendering {render_list}')#Dgb + # return {"FINISHED"}#Dgb + if self.mode == 'INDIVIDUAL': + errlist = render_invididually(context, render_list) + elif self.mode == 'GROUP': + errlist = render_grouped(context, render_list) + + + blend = Path(bpy.data.filepath) + # out = blend.parents[1] / "compo" / "base" + out = (blend.parent / prefs.output_path.lstrip(r'\/')).resolve() + if out.exists(): + open_folder(str(out)) + else: + errlist.append('No compo/base folder created') + + if errlist: + self.report({'ERROR'}, '\n'.join(errlist)) + + return {"FINISHED"} + + +### ---- Setup render path + +class GPTRD_OT_setup_render_path(bpy.types.Operator): + bl_idname = "render.setup_render_path" + bl_label = "Setup render" + bl_description = "Setup render settings for normal render of the current state\nHint: F12 to check one frame, ctrl+F12 to render animation" + bl_options = {"REGISTER"} + + def execute(self, context): + #get name and check + prefs = get_addon_prefs() + outname = context.scene.gptoolprops.name_for_current_render + if not outname: + self.report({'ERROR'}, 'No output name has been set') + return {"CANCELLED"} + + err = initial_render_checks(context) + if err: + self.report({'ERROR'}, err) + return {"CANCELLED"} + + set_render_settings() + + blend = Path(bpy.data.filepath) + # out = blend.parents[1] / "compo" / "base" + + out = (blend.parent / prefs.output_path.lstrip(r'\/')).resolve() + fp = out / outname / (outname+'_') + context.scene.render.filepath = str(fp) + self.report({'INFO'}, f'output setup for "{outname}"') + return {"FINISHED"} + + +class GPTRD_OT_use_active_object_infos(bpy.types.Operator): + bl_idname = "render.use_active_object_name" + bl_label = "Use object Name" + bl_description = "Write active object name (active layer name with shift click on the button)" + bl_options = {"REGISTER"} + + @classmethod + def poll(cls, context): + return context.object + + def invoke(self, context, event): + # wm = context.window_manager + # return wm.invoke_props_dialog(self) + self.shift = event.shift + return self.execute(context) + + def execute(self, context): + ob = context.object + #get name and check + if self.shift: + if ob.type != "GPENCIL": + self.report({'ERROR'}, 'Not a GP, no access to layers') + return {"CANCELLED"} + lay = ob.data.layers.active + if not lay: + self.report({'ERROR'}, 'No active layer found') + return {"CANCELLED"} + context.scene.gptoolprops.name_for_current_render = lay.info + + else: + context.scene.gptoolprops.name_for_current_render = ob.name + + # self.report({'INFO'}, 'Output Name changed') + return {"FINISHED"} + + +""" class GPTRD_OT_render_as_is(bpy.types.Operator): + bl_idname = "render.render_as_is" + bl_label = "render current" + bl_description = "Launch animation render with current setup" + bl_options = {"REGISTER"} + + def execute(self, context): + err = initial_render_checks(context) + if err: + self.report({'ERROR'}, err) + return {"CANCELLED"} + + return {"FINISHED"} """ + +### --- REGISTER + +classes = ( +GPTRD_OT_render_anim, +GPTRD_OT_setup_render_path, +GPTRD_OT_use_active_object_infos, +) + +def register(): + for cl in classes: + bpy.utils.register_class(cl) + +def unregister(): + for cl in classes: + bpy.utils.unregister_class(cl) + + +''' +## Potential cancelling method for image sequence rendering. +for cfra in range(start, end+1): + print("Baking frame " + str(cfra)) + + # update scene to new frame and bake to template image + scene.frame_set(cfra) + ret = bpy.ops.object.bake_image() + if 'CANCELLED' in ret: + return {'CANCELLED'} +''' + +""" +class PBLAST_OT_render_wrap(bpy.types.Operator): + bl_idname = "render.render_wrap" + bl_label = "Render wraped" + bl_description = "render" + bl_options = {"REGISTER"}## need hide + + use_view : bpy.props.BoolProperty(name='use_view', default=False) + + def execute(self, context): + if self.use_view:## openGL + ret = bpy.ops.render.opengl('INVOKE_DEFAULT', animation=True, view_context=True) + else:## normal render + ret = bpy.ops.render.render('INVOKE_DEFAULT', animation=True) + return {"FINISHED"} + """ + +""" if __name__ == "__main__": + register() """ \ No newline at end of file diff --git a/OP_temp_cutter.py b/OP_temp_cutter.py new file mode 100644 index 0000000..b3f4496 --- /dev/null +++ b/OP_temp_cutter.py @@ -0,0 +1,166 @@ +import bpy + +# https://blenderartists.org/t/how-to-execute-operator-once-when-keymap-is-held-down/1166009 +class GPTB_OT_temp_cutter(bpy.types.Operator): + bl_idname = "wm.temp_cutter" + bl_label = "Temporary cutter" + bl_description = "Temporary cutter during press in GP mode" + bl_options = {'REGISTER'}#, 'UNDO' avoid register undo step + + _is_running = False# block subsequent 'PRESS' events + bpy.types.Scene.tmp_cutter_org_mode = bpy.props.StringProperty( + name="temp cutter previous mode", description="Use to store mode used before cutter", default="") + # original_mode = None + + def execute(self, context): + print('exe so cute') + bpy.ops.wm.tool_set_by_id(name='builtin.cutter') + return {'FINISHED'} + + def invoke(self, context, event): + if event.value == 'RELEASE': + __class__._is_running = False + + # if self.original_mode: + # bpy.ops.wm.tool_set_by_id(name = self.original_mode) + if context.scene.tmp_cutter_org_mode: + bpy.ops.wm.tool_set_by_id(name = context.scene.tmp_cutter_org_mode) + # self.original_mode = None + + # return 'CANCELLED' unless the code is important, + # this prevents updating the view layer unecessarily + return {'CANCELLED'} + + + elif event.value == 'PRESS': + if not self._is_running: + # self.original_mode = bpy.context.workspace.tools.from_space_view3d_mode(bpy.context.mode, create=False).idname + context.scene.tmp_cutter_org_mode = bpy.context.workspace.tools.from_space_view3d_mode(bpy.context.mode, create=False).idname + + __class__._is_running = True + return self.execute(context) + + return {'CANCELLED'} + +class GPTB_OT_sticky_cutter(bpy.types.Operator): + bl_idname = "wm.sticky_cutter" + bl_label = "Sticky cutter" + bl_description = "Sticky cutter tool" + bl_options = {'REGISTER'}#, 'UNDO' + + def execute(self, context): + # toggle code + # if self.original_mode == 'builtin.cutter':#if on cutter, return to draw + # bpy.ops.wm.tool_set_by_id(name='builtin_brush.Draw') + return {'FINISHED'} + + def modal(self, context, event): + if event.type == self.key and event.value == 'PRESS': + return {'RUNNING_MODAL'} + + elif event.type == self.key and event.value == 'RELEASE': + + if self.timeout: + # use release code in here + bpy.ops.wm.tool_set_by_id(name=self.original_mode) + print("released") + return {'FINISHED'} + + wm = context.window_manager + wm.event_timer_remove(self.handler) + # return {'FINISHED'} + return self.execute(context) + + elif event.type == 'TIMER': + self.timeout = True + wm = context.window_manager + wm.event_timer_remove(self.handler) + + if self.timeout: + pass + # print("repeating holding down") + # use holding down code in here + # bpy.ops.wm.tool_set_by_id(name='builtin.cutter')#builtin.cutter cursor + + return {'PASS_THROUGH'} + + def invoke(self, context, event): + self.key = '' + #get key from keymap + wm = bpy.context.window_manager + #addons : #wm.keyconfigs.addon.keymaps.items() + for cat, keymap in wm.keyconfigs.user.keymaps.items():#user set + for k in keymap.keymap_items: + if k.idname == 'wm.sticky_cutter': + self.key = k.type + if not self.key: + self.report({'ERROR'}, 'Could not found dedicated key in user keymap for "wm.sticky_cutter"') + return {'CANCELLED'} + + self.original_mode = bpy.context.workspace.tools.from_space_view3d_mode(bpy.context.mode, create=False).idname + if event.value == 'PRESS': + self.timeout = False + wm = context.window_manager + wm.modal_handler_add(self) + bpy.ops.wm.tool_set_by_id(name='builtin.cutter') + self.handler = wm.event_timer_add( + time_step=0.2, window=context.window) + return {'RUNNING_MODAL'} + return {'CANCELLED'} + + +## keymaps +''' +tmp_cutter_addon_keymaps = [] +def register_keymaps(): + # pref = get_addon_prefs() + # if not pref.temp_cutter_use_shortcut: + # return + + addon = bpy.context.window_manager.keyconfigs.addon + + try: + km = bpy.context.window_manager.keyconfigs.addon.keymaps["3D View"]# Grease Pencil + except Exception as e: + km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D") #3D View + pass + + ops_id = 'wm.temp_cutter'# 'wm.sticky_cutter' + if ops_id not in km.keymap_items: + ## keymap to operator cam space (in grease pencil mode only ?) + km = addon.keymaps.new(name='3D View', space_type='VIEW_3D')#EMPTY #Grease Pencil #3D View + + # use 'ANY' to map both 'PRESS' and 'RELEASE' to the operator + # then use the operator's invoke to deal with each articulation + kmi = km.keymap_items.new(ops_id, type="T", value="ANY")#, alt=pref.use_alt, ctrl=pref.use_ctrl, shift=pref.use_shift, any=False) + tmp_cutter_addon_keymaps.append(km) + +def unregister_keymaps(): + # wm = bpy.context.window_manager + for km in tmp_cutter_addon_keymaps: + for kmi in km.keymap_items: + km.keymap_items.remove(kmi) + # wm.keyconfigs.addon.keymaps.remove(km)#dont use new km field... + tmp_cutter_addon_keymaps.clear() + # del tmp_cutter_addon_keymaps[:] +''' + +def register(): + if not bpy.app.background: + bpy.utils.register_class(GPTB_OT_temp_cutter) + bpy.utils.register_class(GPTB_OT_sticky_cutter) + # register_keymaps() + + + + + +def unregister(): + if not bpy.app.background: + # unregister_keymaps() + bpy.utils.unregister_class(GPTB_OT_temp_cutter) + bpy.utils.unregister_class(GPTB_OT_sticky_cutter) + + +if __name__ == "__main__": + register() \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..3aeb654 --- /dev/null +++ b/README.md @@ -0,0 +1,286 @@ +# GP toolbox + +Blender addon - Various tool to help with grease pencil in animation productions. + +**[Download latest](https://gitlab.com/autour-de-minuit/blender/GP_toolbox/-/archive/master/GP_toolbox-master.zi)** + + + +--- + +## Description + + + + +In sidebar (N) > Gpencil > Toolbox + + + +## Addon preferences + +important point of addon preferences: + +Set path to the palette folder (there is a json palette IO but you an also put a blend and use a blend importer) + +Note about palette : For now thoe importer are not working with linked palette is not easy for animator (there are properies of the material you cannot access and the link grey-out fade the r eal color in UIlist preview) + + +- Mirror flip : If in cam view flip the camera X scale value (you can see and draw mnirrored to see problems) + + + +- quick access to scene camera passepartout toggle and opacity + +- quick access to scene camera background images visibility with individual references toggle. + +- Basic playblast and viewport playblast: + + - dedicated resolution percentage value + + - can auto launch and/or auto open folder at finished (option in addon preferences) + +- jump to GP keyframe operator + + - you need to set two keymap shortcut in _windows_ or _screen(global)_ with indentifier `screen.gp_keyframe_jump` + +- Rotate canvas with a clic + modifier combo (default is `ctrl + alt + MID-mouse`), can be change in addon preferences. + +- GP paint cutter tool temporary switch shortcut + - Map manually to a key with `wm.temp_cutter` (This one needs "Any" as press mode) or `wm.sticky_cutter` (Modal sticky-key version) + +- Snap cursor to GP canvas operator accessible with `view3d.cusor_snap` + - Map nanually (might be autoreplaced according to version) by replacing entry using `view3d.cursor3d` in 3D View category (defaut shortcut `Shift + Right-clic`) + +- Follow cursor toggle : When activated the cursor follow the active the active object + +- [breakdowner operator for object mode](https://blenderartists.org/t/pose-mode-animation-tools-for-object-mode/1221322), auto-keymap on : Shift + E + +- Line extender help closing gaps between lines with control over layer target (-> need also control over frame targets) + +- copy paste + +### Where ? + +Panel in sidebar : 3D view > sidebar 'N' > Gpencil + + + +--- + +## Changelog: + + +0.9.1: + +- Public release +- prefs: added fps as part of project settings + - check file use pref fps value (previously used harcoded 24fps value) + + +0.8.0: + +- feat: Added background_rendering playblast, derivating from Tonton's playblaster + - stripped associated properties from properties.py and passed as wm props. + +0.7.2: + +- fix: Palette importer bug + +0.7.0: + +- feat: auto create empty frame on color layer + +0.6.3: + +- shortcut: added 1,2,3 to change sculpt mask mode (like native edit mode shortcut) + +0.6.2: + +- feat: colorisation, Option to change stop lines length +- Change behavior of `cursor_snap` ops when a non-GP object is selected to mode: `surface project` +- Minor refactor for submodule register + +0.6.1: + +- feat: render objects grouped, one anim render with all ticked object using manual output name + +0.6.0: + +- feat: Include GP clipoard's "In place" custom cut/copy/paste using OS clipboard + +0.5.9: + +- feat: render exporter + - Render a selection of GP object isolated from the rest + - added exclusions names for GP object listing + - setup settings and output according to a name + - open render folder +- check file: set onion skin keyframe filter to 'All_type' on all GP datablock +- check file: set scene resolution to settings in prefs (default 2048x1080) + +0.5.8: + +- feat: GP material append on active object from single blend file + +0.5.7: + +- Added warning message for cursor snapping + +0.5.5 - 0.5.6: + +- check file: added check for placement an projection mode for Gpencil. +- add a slider to change edit_lines_opacity globally for all GP data at once +- check file: auto-check additive drawing (to avoid empty frame with "only selected channel" in Dopesheet) + +0.5.4: + +- feat: anim manager in his own GP_toolbox submenu: + - button to list disabled anim (allow to quickly check state of the scene) + - disable/enable all fcurve in for GP object or other object separately to paint + - shift clic to target selection only +- check file: added disabled fcurved counter alert with detail in console + +0.5.3: + +- fix: broken obj cam (add custom prop on objcam to track wich was main cam) +- check file option: change select active tool (choice added in addon preferences) + +0.5.2: + +- Revert back obj_cam operator for following object (native lock view follow only translation) +- Changed method for canvas rotation to more robust rotate axis. +- Add operators on link checker to open containing folder/file of link +- Refactor: file checkers in their own file + +0.5.1: + +- fix: error when empty material slot on GP object. +- fix: cursor snap on GP canvas when GP is parented +- change: Deleted obj cam (and related set view) operator +- change: blacker note background for playblast (stamp_background) +- feat: Always playblast from main camera (if in draw_cam) +- feat: Handler added to Remap relative on save (pre) +- ops: Check for broken links with porposition to find missing files +- ops: Added basic hardcoded file checker + - Lock main cam + - set scene percentage at 100 + - set show slider and sync range + - set fps to 24 + +0.4.6: + +- feat: basic Palette manager with base material check and warning + +0.4.5: + +- open blender config folder from addon preference +- fix: obj cam parent on selected object +- added wip rotate canvas axis file. still not ready to replace current canvas rotate: + - freeview : bug when rotating free viewfrom cardianl views + - camview: potential bug when cam is parented with some specific angle (could not reproduce) + + +0.4.4: + +- feat: added cursor follow handlers and UI toggle + +0.4.3: + +- change playblast out to 'images' and add playblast as name prefix + +0.4.2: + +- feat: GP canvas cursor snap wiht new `view3d.cusor_snap` operator +- fix: canvas rotate works with parented camera ! +- wip: added an attmpt to replicate camera rotate modal with view matrix but no luck. + +0.4.1: + +- feat: Alternative cameras: parent to main cam (roll without affecting main cam), parent to active object at current view (follow current Grease pencil object) + +0.4.0: + +- Added a standalone working version of box_deform (stripped preferences keeping only best configuration with autoswap) + +0.3.8: + +- UI: expose onion skin in interface +- UI: expose autolock in interface +- UI : putted tint layers in a submenu +- code: refactor, pushed most of class register in their owner file +- tool: tool to rename current or all grease pencil datablock with different name than container object + +0.3.7: + +- UI: new interface with tabs for addon preferences +- UI: possible to disable color panel from preference (might be deleted if unusable) +- docs: change readme changelog format and correct doc + +0.3.6: + +- UI: Stoplines : add a button for quickly set stoplines visibility. + +0.3.5: + +- Fix : No more camera rotation undo when ctrl+Z on next stroke (canvas rotate push and undo) +- Fix: Enter key added to valid object-breakdown modal. + +0.3.3: + +- version 1 beta (stable) of line gap closing tools for better bucket fill tool performance with UI + +0.3.3: + +- version 1 beta of gmic colorize +- variant of `screen.gp_keyframe_jump` through keymap seetings + +0.3.0: + +- new homemade [breakdowner operator for object](https://blenderartists.org/t/pose-mode-animation-tools-for-object-mode/1221322) mode with auto keymap : Shift + E +- GP cutter shortcut ops to map with `wm.temp_cutter` (with "Any" as press mode) or `wm.sticky_cutter` (Modal sticky-key version) + +0.2.3: + +- add operator to `screen.gp_keyframe_jump` +- add shortcut to rotate canvas +- fix duplicate class + +0.2.2: + +- separated props resolution_percentage parameter +- playblast options for launching folder and opening folder + +0.2.1: + +- playblast feature +- Button to go zoom 100% or fit screen +- display scene resolution with res indicator +- Fix reference panel : works with video and display in a box layout. +- close pseudo-color panel by default (plan to move it to Gpencil tab) + +0.2.0: + +- UI: Toggle camera background images from Toolbox panel +- UI: quick access to passepartout +- Feature: option to use namespace for pseudo color + +0.1.5: + +- added CGC-auto-updater + +0.1.3: + +- flip cam x +- inital stage of overlay toggle (need pref/multiple pref) + +0.1.2: + +- subpanel of GP data (instead of direct append) +- initial commit with GP pseudo color \ No newline at end of file diff --git a/UI_tools.py b/UI_tools.py new file mode 100644 index 0000000..ec586db --- /dev/null +++ b/UI_tools.py @@ -0,0 +1,391 @@ +from . import addon_updater_ops +from .utils import get_addon_prefs +import bpy +from pathlib import Path + + +## UI in properties + +### dataprop_panel not used --> transferred to sidebar +""" +class GPTB_PT_dataprop_panel(bpy.types.Panel): + bl_space_type = 'PROPERTIES' + bl_region_type = 'WINDOW' + # bl_space_type = 'VIEW_3D' + # bl_region_type = 'UI' + # bl_category = "Tool" + # bl_idname = "ADDONID_PT_panel_name"# identifier, if ommited, takes the name of the class. + bl_label = "Pseudo color"# title + bl_parent_id = "DATA_PT_gpencil_layers"#subpanel of this ID + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + settings = context.scene.gptoolprops + + col = layout.column(align = True) + row = col.split(align=False, factor=0.63) + row.prop(settings, 'autotint_offset') + row.prop(settings, 'autotint_namespace') + + col.operator("gp.auto_tint_gp_layers", icon = "COLOR").reset = False + col.operator("gp.auto_tint_gp_layers", text = "Reset tint", icon = "COLOR").reset = True + """ + +## UI in Gpencil sidebar menu + +class GPTB_PT_sidebar_panel(bpy.types.Panel): + bl_label = "Toolbox" + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil" + + def draw(self, context): + layout = self.layout + # layout.use_property_split = True + rd = context.scene.render + # check for update + addon_updater_ops.check_for_update_background() + + # layout.label(text='View options:') + ## flip X cam + if context.scene.camera and context.scene.camera.scale.x < 0: + # layout.label(text='! Flipped !') + row = layout.row(align=True) + + row.operator('gp.mirror_flipx', text = 'Mirror flip', icon = 'MOD_MIRROR')# ARROW_LEFTRIGHT + row.label(text='',icon='LOOP_BACK') + else: + layout.operator('gp.mirror_flipx', text = 'Mirror flip', icon = 'MOD_MIRROR')# ARROW_LEFTRIGHT + + ## draw/manipulation camera + col = layout.column() + if context.scene.camera and context.scene.camera.name.startswith(('draw', 'obj')): + row = col.row(align=True) + row.operator('gp.draw_cam_switch', text = 'Main cam', icon = 'OUTLINER_OB_CAMERA') + row.label(text='',icon='LOOP_BACK') + if context.scene.camera.name.startswith('draw'): + col.operator('gp.reset_cam_rot', text='reset rotation')#.swapmethod ? = CAM + else: + col.operator('gp.set_view_as_cam', text='set view')#.swapmethod ? = CAM + + else: + row = col.row(align=True) + row.operator('gp.draw_cam_switch', text = 'Draw cam', icon = 'CON_CAMERASOLVER').cam_mode = 'draw' + row.operator('gp.draw_cam_switch', text = 'Object cam', icon = 'CON_CAMERASOLVER').cam_mode = 'object' + col.label(text='In main camera', icon = 'OUTLINER_OB_CAMERA') + + # layout.operator('gp.overlay_presets', text = 'Toggle overlays', icon = 'OVERLAY') + + if context.scene.camera: + row = layout.row(align=True)# .split(factor=0.5) + row.label(text='Passepartout') + row.prop(context.scene.camera.data, 'show_passepartout',text='', icon ='OBJECT_HIDDEN' ) + row.prop(context.scene.camera.data, 'passepartout_alpha', text='') + row = layout.row(align=True) + row.operator('view3d.zoom_camera_1_to_1', text = 'Zoom 1:1', icon = 'ZOOM_PREVIOUS')# FULLSCREEN_EXIT + row.operator('view3d.view_center_camera', text = 'Zoom fit', icon = 'FULLSCREEN_ENTER') + + ## background images/videos + if context.scene.camera.data.background_images: + layout.separator() + icon_bg = 'RESTRICT_VIEW_OFF' if context.scene.camera.data.show_background_images else 'RESTRICT_VIEW_ON'# IMAGE_BACKGROUND#IMAGE_PLANE + # icon_bg = 'TRIA_DOWN' if context.scene.camera.data.show_background_images else 'IMAGE_BACKGROUND' + box = layout.box() + box.prop(context.scene.camera.data, 'show_background_images', text='Ref in cam', icon=icon_bg) + if context.scene.camera.data.show_background_images: + # box = layout.box() + for bg_img in context.scene.camera.data.background_images: + if bg_img.source == 'IMAGE' and bg_img.image: + row = box.row(align=True) + row.label(text=bg_img.image.name, icon='IMAGE_RGB')# FILE_IMAGE + # row.prop(bg_img, 'alpha', text='')# options={'HIDDEN'} + row.prop(bg_img, 'show_background_image', text='')# options={'HIDDEN'} + if bg_img.source == 'MOVIE_CLIP' and bg_img.clip: + row = box.row(align=True) + row.label(text=bg_img.clip.name, icon='FILE_MOVIE') + # row.prop(bg_img, 'alpha', text='')# options={'HIDDEN'} + row.prop(bg_img, 'show_background_image', text='')# options={'HIDDEN'} + + ## playblast params + layout.separator() + layout.label(text = 'Playblast:') + row = layout.row(align=False)#split(factor=0.6) + row.label(text = f'{rd.resolution_x * context.scene.gptoolprops.resolution_percentage // 100} x {rd.resolution_y * context.scene.gptoolprops.resolution_percentage // 100}') + row.prop(context.scene.gptoolprops, 'resolution_percentage', text='') + # row.prop(rd, 'resolution_percentage', text='')#real percent scene percentage + + row = layout.row(align=True) + row.operator('render.thread_playblast', text = 'Playblast', icon = 'RENDER_ANIMATION')# non blocking background render playblast + # row.operator('render.playblast_anim', text = 'Playblast', icon = 'RENDER_ANIMATION').use_view = False # old (but robust) blocking playblast + row.operator('render.playblast_anim', text = 'Viewport').use_view = True + + else: + layout.label(text='No camera !', icon = 'ERROR') + + ## Options + layout.separator() + layout.label(text = 'Options:') + # row = layout.row(align=False) + ## maybe remove cursor_follow icon that look like + text, icon = ('Cursor Follow On', 'PIVOT_CURSOR') if context.scene.gptoolprops.cursor_follow else ('Cursor Follow Off', 'CURSOR') + layout.prop(context.scene.gptoolprops, 'cursor_follow', text=text, icon=icon) + layout.prop(context.space_data.overlay, 'use_gpencil_onion_skin') + + + if context.object and context.object.type == 'GPENCIL': + layout.prop(context.object.data, 'use_autolock_layers') + layout.prop(context.object, 'show_in_front', text='X-ray')#default text "In Front" + + ## rename datablock temporary layout + if context.object.name != context.object.data.name: + box = layout.box() + box.label(text='different name for object and data:', icon='INFO') + row = box.row(align=False) + row.operator('gp.rename_data_from_obj').rename_all = False + row.operator('gp.rename_data_from_obj', text='Rename all').rename_all = True + + ## Check base palette + if not all(x in [m.name for m in context.object.data.materials if m] for x in ("line", "invisible")): + box = layout.box() + box.label(text='Missing base material setup', icon='INFO') + box.operator('gp.load_default_palette') + + else: + layout.label(text='No GP object selected') + + + layout.prop(context.scene.gptoolprops, 'edit_lines_opacity') + + ## Create empty frame on layer (ops stored under GP_colorize... might be best to separate in another panel ) + layout.operator('gp.create_empty_frames', icon = 'DECORATE_KEYFRAME') + + ## File checker + row = layout.row(align=True) + row.operator('gp.file_checker', text = 'Check file', icon = 'SCENE_DATA') + row.operator('gp.links_checker', text = 'Check links', icon = 'UNLINKED') + + # Mention update as notice + addon_updater_ops.update_notice_box_ui(self, context) + + + # row = layout.row(align=False) + # row.label(text='arrow choice') + # row.operator("my_operator.multi_op", text='', icon='TRIA_LEFT').left = 1 + # row.operator("my_operator.multi_op", text='', icon='TRIA_RIGHT').left = 0 + +class GPTB_PT_anim_manager(bpy.types.Panel): + bl_label = "Animation manager" + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil" + bl_parent_id = "GPTB_PT_sidebar_panel" + bl_options = {'DEFAULT_CLOSED'} + + # def draw_header(self,context): + # self.layout.prop(context.scene.camera.data, "show_background_images", text="") + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + ## Animation enable disable anim (shift click to select) OP_helpers.GPTB_OT_toggle_mute_animation + + layout.operator('gp.list_disabled_anims') + ## Objs () + row = layout.row(align=True) + row.label(text='Obj anims:') + ops = row.operator('gp.toggle_mute_animation', text = 'ON')#, icon = 'GRAPH' + ops.skip_gp = True + ops.skip_obj = False + ops.mute = False + + ops = row.operator('gp.toggle_mute_animation', text = 'OFF')#, icon = 'GRAPH' + ops.skip_gp = True + ops.skip_obj = False + ops.mute = True + ## Gps + row = layout.row(align=True) + row.label(text='Gp anims:') + ops = row.operator('gp.toggle_mute_animation', text = 'ON')#, icon = 'GRAPH' + ops.skip_gp = False + ops.skip_obj = True + ops.mute = False + + ops = row.operator('gp.toggle_mute_animation', text = 'OFF')#, icon = 'GRAPH' + ops.skip_gp = False + ops.skip_obj = True + ops.mute = True + +class GPTB_PT_tint_layers(bpy.types.Panel): + bl_label = "Tint layers" + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil" + bl_parent_id = "GPTB_PT_sidebar_panel" + bl_options = {'DEFAULT_CLOSED'} + + @classmethod + def poll(cls, context): + return context.scene.camera + + # def draw_header(self,context): + # self.layout.prop(context.scene.camera.data, "show_background_images", text="") + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + ## pseudo color layers + # layout.separator() + col = layout.column(align = True) + row = col.split(align=False, factor=0.63) + row.prop(context.scene.gptoolprops, 'autotint_offset') + row.prop(context.scene.gptoolprops, 'autotint_namespace') + + col.operator("gp.auto_tint_gp_layers", icon = "COLOR").reset = False + col.operator("gp.auto_tint_gp_layers", text = "Reset tint", icon = "COLOR").reset = True + +class GPTB_PT_render(bpy.types.Panel): + bl_label = "Render" + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil" + bl_parent_id = "GPTB_PT_sidebar_panel" + bl_options = {'DEFAULT_CLOSED'} + + @classmethod + def poll(cls, context): + return context.scene.camera + + # def draw_header(self,context): + # self.layout.prop(context.scene.camera.data, "show_background_images", text="") + + def draw(self, context): + layout = self.layout + layout.operator('render.render_anim', text = 'Render invividually', icon = 'RENDERLAYERS').mode = 'INDIVIDUAL'#RENDER_STILL #RESTRICT_RENDER_OFF + layout.operator('render.render_anim', text = 'Render grouped', icon = 'IMAGE_RGB').mode = 'GROUP' + + layout.separator() + row = layout.row() + row.prop(context.scene.gptoolprops, 'name_for_current_render', text = 'Output name')#icon = 'OUTPUT' + row.operator('render.use_active_object_name', text = '', icon='OUTLINER_DATA_GP_LAYER')#icon = 'OUTPUT' + + layout.operator('render.setup_render_path', text = 'Setup output', icon = 'TOOL_SETTINGS')#SETTINGS + + blend = Path(bpy.data.filepath) + out = blend.parents[1] / "compo" / "base" + layout.operator("wm.path_open", text='Open render folder', icon='FILE_FOLDER').filepath = str(out) + + + + +""" +## unused -- (integrated in sidebar_panel) +class GPTB_PT_cam_ref_panel(bpy.types.Panel): + bl_label = "Background imgs" + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Gpencil" + bl_parent_id = "GPTB_PT_sidebar_panel" + + @classmethod + def poll(cls, context): + return context.scene.camera + + def draw_header(self,context): + self.layout.prop(context.scene.camera.data, "show_background_images", text="") + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + + if context.scene.camera.data.show_background_images: + for bg_img in context.scene.camera.data.background_images: + if bg_img.image: + row = layout.row(align=False) + row.label(text=bg_img.image.name, icon='IMAGE_RGB') + row.prop(bg_img, 'show_background_image', text='')# options={'HIDDEN'} + """ + + +def palette_manager_menu(self, context): + """Palette menu to append in existing menu""" + # GPENCIL_MT_material_context_menu + layout = self.layout + # {'EDIT_GPENCIL', 'PAINT_GPENCIL','SCULPT_GPENCIL','WEIGHT_GPENCIL', 'VERTEX_GPENCIL'} + layout.separator() + prefs = get_addon_prefs() + + layout.operator("gp.load_palette", text='Load json Palette', icon='IMPORT').filepath = prefs.palette_path + layout.operator("gp.save_palette", text='Save json Palette', icon='EXPORT').filepath = prefs.palette_path + layout.operator("gp.load_blend_palette", text='Load color Palette', icon='COLOR').filepath = prefs.palette_path + + +classes = ( +GPTB_PT_sidebar_panel, +GPTB_PT_anim_manager, +GPTB_PT_tint_layers, +GPTB_PT_render, +## GPTB_PT_cam_ref_panel, +) + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + bpy.types.GPENCIL_MT_material_context_menu.append(palette_manager_menu) + +def unregister(): + bpy.types.GPENCIL_MT_material_context_menu.remove(palette_manager_menu) + for cls in reversed(classes): + bpy.utils.unregister_class(cls) + + +""" +## direct panel def append (no submenu with arrow) +## need to use append and remove in register/unregister +# bpy.types.DATA_PT_gpencil_layers.append(UI_tools.GPdata_toolbox_panel) +# bpy.types.DATA_PT_gpencil_layers.remove(UI_tools.GPdata_toolbox_panel) + +def GPdata_toolbox_panel(self, context): + layout = self.layout + layout.use_property_split = True + settings = context.scene.gptoolprops + + col = layout.column(align = True) + col.prop(settings, 'autotint_offset') + col.operator("gp.auto_tint_gp_layers", icon = "COLOR").reset = False + col.operator("gp.auto_tint_gp_layers", text = "Reset tint", icon = "COLOR").reset = True +""" + + + +### old + +""" + col = layout.column(align = True) + col.operator("gpencil.stroke_change_color", text="Move to Color",icon = "COLOR") + col.operator("transform.shear", text="Shear") + col.operator("gpencil.stroke_cyclical_set", text="Toggle Cyclic").type = 'TOGGLE' + col.operator("gpencil.stroke_subdivide", text="Subdivide",icon = "OUTLINER_DATA_MESH") + + row = layout.row(align = True) + row.operator("gpencil.stroke_join", text="Join").type = 'JOIN' + row.operator("grease_pencil.stroke_separate", text = "Separate") + col.operator("gpencil.stroke_flip", text="Flip Direction",icon = "ARROW_LEFTRIGHT") + + col = layout.column(align = True) + col.operator("gptools.randomise",icon = 'RNDCURVE') + col.operator("gptools.thickness",icon = 'LINE_DATA') + col.operator("gptools.angle_split",icon = 'MOD_BEVEL',text='Angle Splitting') + col.operator("gptools.stroke_uniform_density",icon = 'MESH_DATA',text = 'Density') + + row = layout.row(align = True) + row.prop(settings,"extra_tools",text='',icon = "DOWNARROW_HLT" if settings.extra_tools else "RIGHTARROW",emboss = False) + row.label("Extra tools") + + if settings.extra_tools : + layout.operator_menu_enum("gpencil.stroke_arrange", text="Arrange Strokes...", property="direction") """ + + + + diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..0b28b0e --- /dev/null +++ b/__init__.py @@ -0,0 +1,462 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +bl_info = { +"name": "GP toolbox", +"description": "Set of tools for Grease Pencil in animation production", +"author": "Samuel Bernou", +"version": (0, 9, 1), +"blender": (2, 91, 0), +"location": "sidebar (N menu) > Gpencil > Toolbox / Gpencil properties", +"warning": "", +"doc_url": "https://gitlab.com/autour-de-minuit/blender/gp_toolbox", +"category": "3D View", +} + +from . import addon_updater_ops + +from .utils import * +from .functions import * + +## GMIC +from .GP_guided_colorize import GP_colorize + +## direct tools +from . import OP_breakdowner +from . import OP_temp_cutter +from . import OP_canvas_rotate +from . import OP_playblast_bg +from . import OP_playblast +from . import OP_helpers +from . import OP_box_deform +from . import OP_cursor_snap_canvas +from . import OP_palettes +from . import OP_file_checker +from . import OP_render +from . import OP_copy_paste +from . import keymaps + +from .OP_pseudo_tint import GPT_OT_auto_tint_gp_layers + +from . import UI_tools + +from .properties import GP_PG_ToolsSettings + +from bpy.props import (FloatProperty, + BoolProperty, + EnumProperty, + StringProperty, + IntProperty) + +import bpy +from bpy.app.handlers import persistent +from pathlib import Path +# from .eyedrop import EyeDropper +# from .properties import load_icons,remove_icons + + +### prefs +# def set_palette_path(self, context): +# print('value set') +# self.palette_path = Path(bpy.path.abspath(self["palette_path"])).as_posix() + + +class GPTB_prefs(bpy.types.AddonPreferences): + bl_idname = __name__ + + ## tabs + + pref_tabs : bpy.props.EnumProperty( + items=(('PREF', "Preferences", "Change some preferences of the modal"), + ('MAN_OPS', "Operator", "Operator to add Manually"), + # ('TUTO', "Tutorial", "How to use the tool"), + # ('GMIC', "Gmic color", "Options to use gmic to colorize"), + ('UPDATE', "Update", "Check and apply updates"), + # ('KEYMAP', "Keymap", "customise the default keymap"), + ), + default='PREF') + + ## addon pref updater props + + auto_check_update : BoolProperty( + name="Auto-check for Update", + description="If enabled, auto-check for updates using an interval", + default=False, + ) + + updater_intrval_months : IntProperty( + name='Months', + description="Number of months between checking for updates", + default=0, + min=0 + ) + updater_intrval_days : IntProperty( + name='Days', + description="Number of days between checking for updates", + default=7, + min=0, + max=31 + ) + updater_intrval_hours : IntProperty( + name='Hours', + description="Number of hours between checking for updates", + default=0, + min=0, + max=23 + ) + updater_intrval_minutes : IntProperty( + name='Minutes', + description="Number of minutes between checking for updates", + default=0, + min=0, + max=59 + ) + + ## addon prefs + + ## Project preferences + # subtype (string) – Enumerator in ['FILE_PATH', 'DIR_PATH', 'FILE_NAME', 'BYTE_STRING', 'PASSWORD', 'NONE']. + + ## fps + + fps : IntProperty( + name='Frame Rate', + description="Fps of the project, Used to conform the file when you use Check file operator", + default=25, + min=1, + max=10000 + ) + + ## output settings for automated renders + output_parent_level = IntProperty( + name='Parent level', + description="Go up in folder to define a render path relative to the file in upper directotys", + default=0, + min=0, + max=20 + ) + + output_path : StringProperty( + name="Output path", + description="Path relative to blend to place render", + default="//render", maxlen=0, subtype='DIR_PATH') + + separator : StringProperty( + name="Namespace separator", + description="Character delimiter to use for detecting namespace (prefix), default is '_', space if nothing specified", + default="_", maxlen=0, subtype='NONE') + + palette_path : StringProperty( + name="Palettes directory", + description="Path to palette containing palette.json files to save and load", + default="//", maxlen=0, subtype='DIR_PATH')#, update = set_palette_path + + ## Playblast prefs + playblast_auto_play : BoolProperty( + name="Playblast auto play", + description="Open rendered playblast when finished", + default=True, + ) + + playblast_auto_open_folder : BoolProperty( + name="Playblast auto open location", + description="Open folder of rendered playblast when finished", + default=False, + ) + + ## Canvas rotate + canvas_use_shortcut: BoolProperty( + name = "Use Default Shortcut", + description = "Use default shortcut: mouse double-click + modifier", + default = True) + + mouse_click : EnumProperty( + name="Mouse button", description="click on right/left/middle mouse button in combination with a modifier to trigger alignement", + default='RIGHTMOUSE', + items=( + ('RIGHTMOUSE', 'Right click', 'Use click on Right mouse button', 'MOUSE_RMB', 0), + ('LEFTMOUSE', 'Left click', 'Use click on Left mouse button', 'MOUSE_LMB', 1), + ('MIDDLEMOUSE', 'Mid click', 'Use click on Mid mouse button', 'MOUSE_MMB', 2), + )) + + use_shift: BoolProperty( + name = "combine with shift", + description = "add shift combined with double click to trigger alignement", + default = False) + + use_alt: BoolProperty( + name = "combine with alt", + description = "add alt combined with double click to trigger alignement (default)", + default = True) + + use_ctrl: BoolProperty( + name = "combine with ctrl", + description = "add ctrl combined with double click to trigger alignement", + default = True) + + ## default active tool to use + select_active_tool : EnumProperty( + name="Default selection tool", description="Active tool to set when launching check fix scene", + default='builtin.select_lasso', + items=( + ('none', 'Dont change', 'Let the current active tool without change', 0),#'MOUSE_RMB' + ('builtin.select', 'Select tweak', 'Use active select tweak active tool', 1),#'MOUSE_RMB' + ('builtin.select_box', 'Select box', 'Use active select box active tool', 2),#'MOUSE_LMB' + ('builtin.select_circle', 'Select circle', 'Use active select circle active tool', 3),#'MOUSE_MMB' + ('builtin.select_lasso', 'Select lasso', 'Use active select lasso active tool', 4),#'MOUSE_MMB' + )) + + + ## render settings + render_obj_exclusion : StringProperty( + name="GP obj exclude filter", + description="List comma separated words to exclude from render list", + default="old,rough,trash,test")#, subtype='FILE_PATH') + + render_res_x : IntProperty( + name='Resolution X', + description="Resolution on X", + default=2048, + min=1, + max=10000 + ) + render_res_y : IntProperty( + name='Resolution Y', + description="Resolution on Y", + default=1080, + min=1, + max=10000 + ) + + + ## GMIColor + use_color_tools : BoolProperty( + name = "Use color tools", + description = "Enable guided color tools panel", + default = False) + + #-# gmic tools not ready + gmic_path : StringProperty( + name="Path to gmic", description="Need to specify path to gmic binary to allow color pixel propagation features", + default="", subtype='FLIE_PATH') + + ## Temp cutter + # temp_cutter_use_shortcut: BoolProperty( + # name = "Use temp cutter Shortcut", + # description = "Auto assign shortcut for temp_cutter", + # default = True) + + def draw(self, context): + layout = self.layout## random color + # layout.use_property_split = True + + row= layout.row(align=True) + row.prop(self, "pref_tabs", expand=True) + + + if self.pref_tabs == 'PREF': + box = layout.box() + box.label(text='Random color options:') + box.prop(self, 'separator') + + box = layout.box() + box.label(text='Project settings') + + ## Render + # box.label(text='Render option:') + box.prop(self, 'fps') + row = box.row(align = True) + row.label(text='Render resolution') + row.prop(self, 'render_res_x', text='X') + row.prop(self, 'render_res_y', text='Y') + + ## Palette + box.label(text='Palette library folder:') + box.prop(self, 'palette_path') + + ## render output + + ## ?? maybe add an option for absolute path (not really usefull in prod) ?? + box.prop(self, 'output_path') + + ### TODO add render settings + + # layout.separator()## Playblast + box = layout.box() + box.label(text='Playblast options:') + box.prop(self, 'playblast_auto_play') + box.prop(self, 'playblast_auto_open_folder') + + # box.separator()## Canvas + box = layout.box() + box.label(text='Canvas rotate options:') + box.prop(self, "canvas_use_shortcut", text='Bind shortcuts') + + if self.canvas_use_shortcut: + row = box.row() + row.label(text="After changes, use the Bind/Rebind button")#icon="" + row.operator("prefs.rebind_shortcut", text='Bind/Rebind shortcuts', icon='FILE_REFRESH')#EVENT_SPACEKEY + row = box.row(align = True) + row.prop(self, "use_ctrl", text='Ctrl')#, expand=True + row.prop(self, "use_alt", text='Alt')#, expand=True + row.prop(self, "use_shift", text='Shift')#, expand=True + row.prop(self, "mouse_click",text='')#expand=True + + if not self.use_ctrl and not self.use_alt and not self.use_shift: + box.label(text="Choose at least one modifier to combine with click (default: Ctrl+Alt)", icon="ERROR")# INFO + + else: + box.label(text="No hotkey has been set automatically. Following operators needs to be set manually:", icon="ERROR") + box.label(text="view3d.rotate_canvas") + + # TODO get that off once proper register update from properties + if self.canvas_use_shortcut: + OP_canvas_rotate.register_keymaps() + else: + OP_canvas_rotate.unregister_keymaps() + + ## Active tool + box = layout.box() + box.label(text='Autofix check button options:') + box.prop(self, "select_active_tool", icon='RESTRICT_SELECT_OFF') + + + box.prop(self, "render_obj_exclusion", icon='FILTER')# + + + # if self.pref_tabs == 'GMIC': + + # gmic (Disabled - tool complete WIP) + box = layout.box() + box.label(text='Colorisation:') + box.prop(self, 'use_color_tools') + if self.use_color_tools: + col = box.column(align=False) + ## Delete if gmic is unused + col.prop(self, 'gmic_path') + if not self.gmic_path: + box=col.box() + box.label(text='Gmic is missing. (needed for pixel color tools)', icon='INFO') + row = box.row() + row.label(text='1.Download GMIC CLI (Command-line interface) here:') + row.operator("wm.url_open", text="Get gmic").url = "https://gmic.eu/download.shtml" + box.label(text='2.unzip it somewhere and point to gmic.exe') + box.label(text='(If gmic is already in your PATH, just write "gmic")') + + if self.pref_tabs == 'MAN_OPS': + # layout.separator()## notes + # layout.label(text='Notes:') + layout.label(text='Following operators ID have to be set manually :') + + ## keyframe jump + box = layout.box() + box.label(text='GP keyframe jump (consider only GP keyframe, multiple options available at setup)') + row = box.row() + row.label(text='screen.gp_keyframe_jump') + row.operator('wm.copytext', icon='COPYDOWN').text = 'screen.gp_keyframe_jump' + + # layout.separator() + ## Snap cursor to GP + box = layout.box() + box.label(text='Snap cursor to GP canvas (if not autoset)') + row = box.row() + row.label(text='Look for default 3d snap operators by searching "view3d.cursor3d"') + row.operator('wm.copytext', text='Copy "view3d.cursor3d"', icon='COPYDOWN').text = 'view3d.cursor3d' + row = box.row() + row.label(text='Replace wanted by "view3d.cusor_snap"') + row.operator('wm.copytext', text='Copy "view3d.cusor_snap"', icon='COPYDOWN').text = 'view3d.cusor_snap' + box.label(text='Or just create a new shortcut using cursor_snap') + + ## user prefs + box = layout.box() + box.label(text='Note: You can access user pref file and startup file in config folder') + box.operator("wm.path_open", text='Open config location').filepath = bpy.utils.user_resource('CONFIG') + + if self.pref_tabs == 'UPDATE': + addon_updater_ops.update_settings_ui(self, context) + + +### --- REGISTER --- + +# class GP_PG_ToolsSettings(bpy.types.PropertyGroup) : +# autotint_offset = bpy.props.IntProperty(name="Tint hue offset", description="offset the tint by this value for better color", default=0, min=-5000, max=5000, soft_min=-999, soft_max=999, step=1)#, subtype='PERCENTAGE' + + + + +@persistent +def remap_relative(dummy): + all_path = [lib for lib in bpy.utils.blend_paths(local=True)] + bpy.ops.file.make_paths_relative() + for i, lib in enumerate(bpy.utils.blend_paths(local=True)): + if all_path[i] != lib: + print('Remapped:', all_path[i], '\n>> ', lib) + +classes = ( +GPTB_prefs, +GP_PG_ToolsSettings, +GPT_OT_auto_tint_gp_layers, +) + +# register, unregister = bpy.utils.register_classes_factory(classes) + + +def register(): + addon_updater_ops.register(bl_info) + for cls in classes: + bpy.utils.register_class(cls) + OP_box_deform.register() + OP_helpers.register() + OP_file_checker.register() + OP_breakdowner.register() + OP_temp_cutter.register() + GP_colorize.register()## GP_guided_colorize. + OP_playblast_bg.register() + OP_playblast.register() + OP_palettes.register() + OP_canvas_rotate.register() + OP_cursor_snap_canvas.register() + OP_render.register() + OP_copy_paste.register() + UI_tools.register() + keymaps.register() + bpy.types.Scene.gptoolprops = bpy.props.PointerProperty(type = GP_PG_ToolsSettings) + + bpy.app.handlers.save_pre.append(remap_relative) + +def unregister(): + bpy.app.handlers.save_pre.remove(remap_relative) + + keymaps.unregister() + addon_updater_ops.unregister() + for cls in reversed(classes): + bpy.utils.unregister_class(cls) + UI_tools.unregister() + OP_copy_paste.unregister() + OP_render.unregister() + OP_cursor_snap_canvas.unregister() + OP_canvas_rotate.unregister() + OP_palettes.unregister() + OP_file_checker.unregister() + OP_helpers.unregister() + OP_breakdowner.unregister() + OP_temp_cutter.unregister() + GP_colorize.unregister()## GP_guided_colorize. + OP_playblast_bg.unregister() + OP_playblast.unregister() + OP_box_deform.unregister() + del bpy.types.Scene.gptoolprops + + +if __name__ == "__main__": + register() diff --git a/addon_updater.py b/addon_updater.py new file mode 100644 index 0000000..064c269 --- /dev/null +++ b/addon_updater.py @@ -0,0 +1,1673 @@ +# ##### BEGIN GPL LICENSE BLOCK ##### +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# ##### END GPL LICENSE BLOCK ##### + + +""" +See documentation for usage +https://github.com/CGCookie/blender-addon-updater + +""" + +__version__ = "1.0.8" + +import errno +import platform +import ssl +import urllib.request +import urllib +import os +import json +import zipfile +import shutil +import threading +import fnmatch +from datetime import datetime, timedelta + +# blender imports, used in limited cases +import bpy +import addon_utils + +# ----------------------------------------------------------------------------- +# Define error messages/notices & hard coded globals +# ----------------------------------------------------------------------------- + +# currently not used +DEFAULT_TIMEOUT = 10 +DEFAULT_PER_PAGE = 30 + + +# ----------------------------------------------------------------------------- +# The main class +# ----------------------------------------------------------------------------- + +class Singleton_updater(object): + """ + This is the singleton class to reference a copy from, + it is the shared module level class + """ + def __init__(self): + + self._engine = GitlabEngine()#GithubEngine() + self._user = None + self._repo = None + self._website = None + self._current_version = None + self._subfolder_path = None + self._tags = [] + self._tag_latest = None + self._tag_names = [] + self._latest_release = None + self._use_releases = False + self._include_branches = False + self._include_branch_list = ['master'] + self._include_branch_autocheck = False + self._manual_only = False + self._version_min_update = None + self._version_max_update = None + + # by default, backup current addon if new is being loaded + self._backup_current = True + self._backup_ignore_patterns = None + + # set patterns for what files to overwrite on update + self._overwrite_patterns = ["*.py","*.pyc"] + self._remove_pre_update_patterns = [] + + # by default, don't auto enable/disable the addon on update + # as it is slightly less stable/won't always fully reload module + self._auto_reload_post_update = False + + # settings relating to frequency and whether to enable auto background check + self._check_interval_enable = False + self._check_interval_months = 0 + self._check_interval_days = 1 + self._check_interval_hours = 0 + self._check_interval_minutes = 0 + + # runtime variables, initial conditions + self._verbose = False + self._fake_install = False + self._async_checking = False # only true when async daemon started + self._update_ready = None + self._update_link = None + self._update_version = None + self._source_zip = None + self._check_thread = None + self._select_link = None + self.skip_tag = None + + # get from module data + self._addon = __package__.lower() + self._addon_package = __package__ # must not change + self._updater_path = os.path.join(os.path.dirname(__file__), + self._addon+"_updater") + self._addon_root = os.path.dirname(__file__) + self._json = {} + self._error = None + self._error_msg = None + self._prefiltered_tag_count = 0 + + # UI code only, ie not used within this module but still useful + # properties to have + + # to verify a valid import, in place of placeholder import + self.showpopups = True # used in UI to show or not show update popups + self.invalidupdater = False + + # pre-assign basic select-link function + def select_link_function(self, tag): + return tag["zipball_url"] + + self._select_link = select_link_function + + + # ------------------------------------------------------------------------- + # Getters and setters + # ------------------------------------------------------------------------- + + + @property + def addon(self): + return self._addon + @addon.setter + def addon(self, value): + self._addon = str(value) + + @property + def api_url(self): + return self._engine.api_url + @api_url.setter + def api_url(self, value): + if self.check_is_url(value) == False: + raise ValueError("Not a valid URL: " + value) + self._engine.api_url = value + + @property + def async_checking(self): + return self._async_checking + + @property + def auto_reload_post_update(self): + return self._auto_reload_post_update + @auto_reload_post_update.setter + def auto_reload_post_update(self, value): + try: + self._auto_reload_post_update = bool(value) + except: + raise ValueError("Must be a boolean value") + + @property + def backup_current(self): + return self._backup_current + @backup_current.setter + def backup_current(self, value): + if value == None: + self._backup_current = False + return + else: + self._backup_current = value + + @property + def backup_ignore_patterns(self): + return self._backup_ignore_patterns + @backup_ignore_patterns.setter + def backup_ignore_patterns(self, value): + if value == None: + self._backup_ignore_patterns = None + return + elif type(value) != type(['list']): + raise ValueError("Backup pattern must be in list format") + else: + self._backup_ignore_patterns = value + + @property + def check_interval(self): + return (self._check_interval_enable, + self._check_interval_months, + self._check_interval_days, + self._check_interval_hours, + self._check_interval_minutes) + + @property + def current_version(self): + return self._current_version + @current_version.setter + def current_version(self, tuple_values): + if tuple_values==None: + self._current_version = None + return + elif type(tuple_values) is not tuple: + try: + tuple(tuple_values) + except: + raise ValueError( + "Not a tuple! current_version must be a tuple of integers") + for i in tuple_values: + if type(i) is not int: + raise ValueError( + "Not an integer! current_version must be a tuple of integers") + self._current_version = tuple(tuple_values) + + @property + def engine(self): + return self._engine.name + @engine.setter + def engine(self, value): + if value.lower()=="github": + self._engine = GithubEngine() + elif value.lower()=="gitlab": + self._engine = GitlabEngine() + elif value.lower()=="bitbucket": + self._engine = BitbucketEngine() + else: + raise ValueError("Invalid engine selection") + + @property + def error(self): + return self._error + + @property + def error_msg(self): + return self._error_msg + + @property + def fake_install(self): + return self._fake_install + @fake_install.setter + def fake_install(self, value): + if type(value) != type(False): + raise ValueError("fake_install must be a boolean value") + self._fake_install = bool(value) + + # not currently used + @property + def include_branch_autocheck(self): + return self._include_branch_autocheck + @include_branch_autocheck.setter + def include_branch_autocheck(self, value): + try: + self._include_branch_autocheck = bool(value) + except: + raise ValueError("include_branch_autocheck must be a boolean value") + + @property + def include_branch_list(self): + return self._include_branch_list + @include_branch_list.setter + def include_branch_list(self, value): + try: + if value == None: + self._include_branch_list = ['master'] + elif type(value) != type(['master']) or value==[]: + raise ValueError("include_branch_list should be a list of valid branches") + else: + self._include_branch_list = value + except: + raise ValueError("include_branch_list should be a list of valid branches") + + @property + def include_branches(self): + return self._include_branches + @include_branches.setter + def include_branches(self, value): + try: + self._include_branches = bool(value) + except: + raise ValueError("include_branches must be a boolean value") + + @property + def json(self): + if self._json == {}: + self.set_updater_json() + return self._json + + @property + def latest_release(self): + if self._latest_release == None: + return None + return self._latest_release + + @property + def manual_only(self): + return self._manual_only + @manual_only.setter + def manual_only(self, value): + try: + self._manual_only = bool(value) + except: + raise ValueError("manual_only must be a boolean value") + + @property + def overwrite_patterns(self): + return self._overwrite_patterns + @overwrite_patterns.setter + def overwrite_patterns(self, value): + if value == None: + self._overwrite_patterns = ["*.py","*.pyc"] + elif type(value) != type(['']): + raise ValueError("overwrite_patterns needs to be in a list format") + else: + self._overwrite_patterns = value + + @property + def private_token(self): + return self._engine.token + @private_token.setter + def private_token(self, value): + if value==None: + self._engine.token = None + else: + self._engine.token = str(value) + + @property + def remove_pre_update_patterns(self): + return self._remove_pre_update_patterns + @remove_pre_update_patterns.setter + def remove_pre_update_patterns(self, value): + if value == None: + self._remove_pre_update_patterns = [] + elif type(value) != type(['']): + raise ValueError("remove_pre_update_patterns needs to be in a list format") + else: + self._remove_pre_update_patterns = value + + @property + def repo(self): + return self._repo + @repo.setter + def repo(self, value): + try: + self._repo = str(value) + except: + raise ValueError("User must be a string") + + @property + def select_link(self): + return self._select_link + @select_link.setter + def select_link(self, value): + # ensure it is a function assignment, with signature: + # input self, tag; returns link name + if not hasattr(value, "__call__"): + raise ValueError("select_link must be a function") + self._select_link = value + + @property + def stage_path(self): + return self._updater_path + @stage_path.setter + def stage_path(self, value): + if value == None: + if self._verbose: print("Aborting assigning stage_path, it's null") + return + elif value != None and not os.path.exists(value): + try: + os.makedirs(value) + except: + if self._verbose: print("Error trying to staging path") + return + self._updater_path = value + + @property + def subfolder_path(self): + return self._subfolder_path + @subfolder_path.setter + def subfolder_path(self, value): + self._subfolder_path = value + + @property + def tags(self): + if self._tags == []: + return [] + tag_names = [] + for tag in self._tags: + tag_names.append(tag["name"]) + return tag_names + + @property + def tag_latest(self): + if self._tag_latest == None: + return None + return self._tag_latest["name"] + + @property + def update_link(self): + return self._update_link + + @property + def update_ready(self): + return self._update_ready + + @property + def update_version(self): + return self._update_version + + @property + def use_releases(self): + return self._use_releases + @use_releases.setter + def use_releases(self, value): + try: + self._use_releases = bool(value) + except: + raise ValueError("use_releases must be a boolean value") + + @property + def user(self): + return self._user + @user.setter + def user(self, value): + try: + self._user = str(value) + except: + raise ValueError("User must be a string value") + + @property + def verbose(self): + return self._verbose + @verbose.setter + def verbose(self, value): + try: + self._verbose = bool(value) + if self._verbose == True: + print(self._addon+" updater verbose is enabled") + except: + raise ValueError("Verbose must be a boolean value") + + @property + def version_max_update(self): + return self._version_max_update + @version_max_update.setter + def version_max_update(self, value): + if value == None: + self._version_max_update = None + return + if type(value) != type((1,2,3)): + raise ValueError("Version maximum must be a tuple") + for subvalue in value: + if type(subvalue) != int: + raise ValueError("Version elements must be integers") + self._version_max_update = value + + @property + def version_min_update(self): + return self._version_min_update + @version_min_update.setter + def version_min_update(self, value): + if value == None: + self._version_min_update = None + return + if type(value) != type((1,2,3)): + raise ValueError("Version minimum must be a tuple") + for subvalue in value: + if type(subvalue) != int: + raise ValueError("Version elements must be integers") + self._version_min_update = value + + @property + def website(self): + return self._website + @website.setter + def website(self, value): + if self.check_is_url(value) == False: + raise ValueError("Not a valid URL: " + value) + self._website = value + + + # ------------------------------------------------------------------------- + # Parameter validation related functions + # ------------------------------------------------------------------------- + + + def check_is_url(self, url): + if not ("http://" in url or "https://" in url): + return False + if "." not in url: + return False + return True + + def get_tag_names(self): + tag_names = [] + self.get_tags() + for tag in self._tags: + tag_names.append(tag["name"]) + return tag_names + + def set_check_interval(self,enable=False,months=0,days=14,hours=0,minutes=0): + # enabled = False, default initially will not check against frequency + # if enabled, default is then 2 weeks + + if type(enable) is not bool: + raise ValueError("Enable must be a boolean value") + if type(months) is not int: + raise ValueError("Months must be an integer value") + if type(days) is not int: + raise ValueError("Days must be an integer value") + if type(hours) is not int: + raise ValueError("Hours must be an integer value") + if type(minutes) is not int: + raise ValueError("Minutes must be an integer value") + + if enable==False: + self._check_interval_enable = False + else: + self._check_interval_enable = True + + self._check_interval_months = months + self._check_interval_days = days + self._check_interval_hours = hours + self._check_interval_minutes = minutes + + # declare how the class gets printed + + def __repr__(self): + return "".format(a=__file__) + + def __str__(self): + return "Updater, with user: {a}, repository: {b}, url: {c}".format( + a=self._user, + b=self._repo, c=self.form_repo_url()) + + + # ------------------------------------------------------------------------- + # API-related functions + # ------------------------------------------------------------------------- + + def form_repo_url(self): + return self._engine.form_repo_url(self) + + def form_tags_url(self): + return self._engine.form_tags_url(self) + + def form_branch_url(self, branch): + return self._engine.form_branch_url(branch, self) + + def get_tags(self): + request = self.form_tags_url() + if self._verbose: print("Getting tags from server") + + # get all tags, internet call + all_tags = self._engine.parse_tags(self.get_api(request), self) + if all_tags is not None: + self._prefiltered_tag_count = len(all_tags) + else: + self._prefiltered_tag_count = 0 + all_tags = [] + + # pre-process to skip tags + if self.skip_tag != None: + self._tags = [tg for tg in all_tags if self.skip_tag(self, tg)==False] + else: + self._tags = all_tags + + # get additional branches too, if needed, and place in front + # Does NO checking here whether branch is valid + if self._include_branches == True: + temp_branches = self._include_branch_list.copy() + temp_branches.reverse() + for branch in temp_branches: + request = self.form_branch_url(branch) + include = { + "name":branch.title(), + "zipball_url":request + } + self._tags = [include] + self._tags # append to front + + if self._tags == None: + # some error occurred + self._tag_latest = None + self._tags = [] + return + elif self._prefiltered_tag_count == 0 and self._include_branches == False: + self._tag_latest = None + if self._error == None: # if not None, could have had no internet + self._error = "No releases found" + self._error_msg = "No releases or tags found on this repository" + if self._verbose: print("No releases or tags found on this repository") + elif self._prefiltered_tag_count == 0 and self._include_branches == True: + if not self._error: self._tag_latest = self._tags[0] + if self._verbose: + branch = self._include_branch_list[0] + print("{} branch found, no releases".format(branch), self._tags[0]) + elif (len(self._tags)-len(self._include_branch_list)==0 and self._include_branches==True) \ + or (len(self._tags)==0 and self._include_branches==False) \ + and self._prefiltered_tag_count > 0: + self._tag_latest = None + self._error = "No releases available" + self._error_msg = "No versions found within compatible version range" + if self._verbose: print("No versions found within compatible version range") + else: + if self._include_branches == False: + self._tag_latest = self._tags[0] + if self._verbose: print("Most recent tag found:",self._tags[0]['name']) + else: + # don't return branch if in list + n = len(self._include_branch_list) + self._tag_latest = self._tags[n] # guaranteed at least len()=n+1 + if self._verbose: print("Most recent tag found:",self._tags[n]['name']) + + + # all API calls to base url + def get_raw(self, url): + # print("Raw request:", url) + request = urllib.request.Request(url) + try: + context = ssl._create_unverified_context() + except: + # some blender packaged python versions don't have this, largely + # useful for local network setups otherwise minimal impact + context = None + + # setup private request headers if appropriate + if self._engine.token != None: + if self._engine.name == "gitlab": + request.add_header('PRIVATE-TOKEN',self._engine.token) + else: + if self._verbose: print("Tokens not setup for engine yet") + + # Always set user agent + request.add_header('User-Agent', "Python/"+str(platform.python_version())) + + # run the request + try: + if context: + result = urllib.request.urlopen(request, context=context) + else: + result = urllib.request.urlopen(request) + except urllib.error.HTTPError as e: + if str(e.code) == "403": + self._error = "HTTP error (access denied)" + self._error_msg = str(e.code) + " - server error response" + print(self._error, self._error_msg) + else: + self._error = "HTTP error" + self._error_msg = str(e.code) + print(self._error, self._error_msg) + self._update_ready = None + except urllib.error.URLError as e: + reason = str(e.reason) + if "TLSV1_ALERT" in reason or "SSL" in reason.upper(): + self._error = "Connection rejected, download manually" + self._error_msg = reason + print(self._error, self._error_msg) + else: + self._error = "URL error, check internet connection" + self._error_msg = reason + print(self._error, self._error_msg) + self._update_ready = None + return None + else: + result_string = result.read() + result.close() + return result_string.decode() + + + # result of all api calls, decoded into json format + def get_api(self, url): + # return the json version + get = None + get = self.get_raw(url) + if get != None: + try: + return json.JSONDecoder().decode(get) + except Exception as e: + self._error = "API response has invalid JSON format" + self._error_msg = str(e.reason) + self._update_ready = None + print(self._error, self._error_msg) + return None + else: + return None + + + # create a working directory and download the new files + def stage_repository(self, url): + + local = os.path.join(self._updater_path,"update_staging") + error = None + + # make/clear the staging folder + # ensure the folder is always "clean" + if self._verbose: print("Preparing staging folder for download:\n",local) + if os.path.isdir(local) == True: + try: + shutil.rmtree(local) + os.makedirs(local) + except: + error = "failed to remove existing staging directory" + else: + try: + os.makedirs(local) + except: + error = "failed to create staging directory" + + if error != None: + if self._verbose: print("Error: Aborting update, "+error) + self._error = "Update aborted, staging path error" + self._error_msg = "Error: {}".format(error) + return False + + if self._backup_current==True: + self.create_backup() + if self._verbose: print("Now retrieving the new source zip") + + self._source_zip = os.path.join(local,"source.zip") + + if self._verbose: print("Starting download update zip") + try: + request = urllib.request.Request(url) + context = ssl._create_unverified_context() + + # setup private token if appropriate + if self._engine.token != None: + if self._engine.name == "gitlab": + request.add_header('PRIVATE-TOKEN',self._engine.token) + else: + if self._verbose: print("Tokens not setup for selected engine yet") + + # Always set user agent + request.add_header('User-Agent', "Python/"+str(platform.python_version())) + + self.urlretrieve(urllib.request.urlopen(request,context=context), self._source_zip) + # add additional checks on file size being non-zero + if self._verbose: print("Successfully downloaded update zip") + return True + except Exception as e: + self._error = "Error retrieving download, bad link?" + self._error_msg = "Error: {}".format(e) + if self._verbose: + print("Error retrieving download, bad link?") + print("Error: {}".format(e)) + return False + + + def create_backup(self): + if self._verbose: print("Backing up current addon folder") + local = os.path.join(self._updater_path,"backup") + tempdest = os.path.join(self._addon_root, + os.pardir, + self._addon+"_updater_backup_temp") + + if self._verbose: print("Backup destination path: ",local) + + if os.path.isdir(local): + try: + shutil.rmtree(local) + except: + if self._verbose:print("Failed to removed previous backup folder, contininuing") + + # remove the temp folder; shouldn't exist but could if previously interrupted + if os.path.isdir(tempdest): + try: + shutil.rmtree(tempdest) + except: + if self._verbose:print("Failed to remove existing temp folder, contininuing") + # make the full addon copy, which temporarily places outside the addon folder + if self._backup_ignore_patterns != None: + shutil.copytree( + self._addon_root,tempdest, + ignore=shutil.ignore_patterns(*self._backup_ignore_patterns)) + else: + shutil.copytree(self._addon_root,tempdest) + shutil.move(tempdest,local) + + # save the date for future ref + now = datetime.now() + self._json["backup_date"] = "{m}-{d}-{yr}".format( + m=now.strftime("%B"),d=now.day,yr=now.year) + self.save_updater_json() + + def restore_backup(self): + if self._verbose: print("Restoring backup") + + if self._verbose: print("Backing up current addon folder") + backuploc = os.path.join(self._updater_path,"backup") + tempdest = os.path.join(self._addon_root, + os.pardir, + self._addon+"_updater_backup_temp") + tempdest = os.path.abspath(tempdest) + + # make the copy + shutil.move(backuploc,tempdest) + shutil.rmtree(self._addon_root) + os.rename(tempdest,self._addon_root) + + self._json["backup_date"] = "" + self._json["just_restored"] = True + self._json["just_updated"] = True + self.save_updater_json() + + self.reload_addon() + + def unpack_staged_zip(self,clean=False): + """Unzip the downloaded file, and validate contents""" + if os.path.isfile(self._source_zip) == False: + if self._verbose: print("Error, update zip not found") + self._error = "Install failed" + self._error_msg = "Downloaded zip not found" + return -1 + + # clear the existing source folder in case previous files remain + outdir = os.path.join(self._updater_path, "source") + try: + shutil.rmtree(outdir) + if self._verbose: + print("Source folder cleared") + except: + pass + + # Create parent directories if needed, would not be relevant unless + # installing addon into another location or via an addon manager + try: + os.mkdir(outdir) + except Exception as err: + print("Error occurred while making extract dir:") + print(str(err)) + self._error = "Install failed" + self._error_msg = "Failed to make extract directory" + return -1 + + if not os.path.isdir(outdir): + print("Failed to create source directory") + self._error = "Install failed" + self._error_msg = "Failed to create extract directory" + return -1 + + if self._verbose: + print("Begin extracting source from zip:", self._source_zip) + zfile = zipfile.ZipFile(self._source_zip, "r") + + if not zfile: + if self._verbose: + print("Resulting file is not a zip, cannot extract") + self._error = "Install failed" + self._error_msg = "Resulting file is not a zip, cannot extract" + return -1 + + # Now extract directly from the first subfolder (not root) + # this avoids adding the first subfolder to the path length, + # which can be too long if the download has the SHA in the name + zsep = '/' #os.sep # might just always be / even on windows + for name in zfile.namelist(): + if zsep not in name: + continue + top_folder = name[:name.index(zsep)+1] + if name == top_folder + zsep: + continue # skip top level folder + subpath = name[name.index(zsep)+1:] + if name.endswith(zsep): + try: + os.mkdir(os.path.join(outdir, subpath)) + if self._verbose: + print("Extract - mkdir: ", os.path.join(outdir, subpath)) + except OSError as exc: + if exc.errno != errno.EEXIST: + self._error = "Install failed" + self._error_msg = "Could not create folder from zip" + return -1 + else: + with open(os.path.join(outdir, subpath), "wb") as outfile: + data = zfile.read(name) + outfile.write(data) + if self._verbose: + print("Extract - create:", os.path.join(outdir, subpath)) + + if self._verbose: + print("Extracted source") + + unpath = os.path.join(self._updater_path, "source") + if not os.path.isdir(unpath): + self._error = "Install failed" + self._error_msg = "Extracted path does not exist" + print("Extracted path does not exist: ", unpath) + return -1 + + if self._subfolder_path: + self._subfolder_path.replace('/', os.path.sep) + self._subfolder_path.replace('\\', os.path.sep) + + # either directly in root of zip/one subfolder, or use specified path + if os.path.isfile(os.path.join(unpath,"__init__.py")) == False: + dirlist = os.listdir(unpath) + if len(dirlist)>0: + if self._subfolder_path == "" or self._subfolder_path == None: + unpath = os.path.join(unpath, dirlist[0]) + else: + unpath = os.path.join(unpath, self._subfolder_path) + + # smarter check for additional sub folders for a single folder + # containing __init__.py + if os.path.isfile(os.path.join(unpath,"__init__.py")) == False: + if self._verbose: + print("not a valid addon found") + print("Paths:") + print(dirlist) + self._error = "Install failed" + self._error_msg = "No __init__ file found in new source" + return -1 + + # merge code with running addon directory, using blender default behavior + # plus any modifiers indicated by user (e.g. force remove/keep) + self.deepMergeDirectory(self._addon_root, unpath, clean) + + # Now save the json state + # Change to True, to trigger the handler on other side + # if allowing reloading within same blender instance + self._json["just_updated"] = True + self.save_updater_json() + self.reload_addon() + self._update_ready = False + return 0 + + + def deepMergeDirectory(self,base,merger,clean=False): + """Merge folder 'merger' into folder 'base' without deleting existing""" + if not os.path.exists(base): + if self._verbose: + print("Base path does not exist:", base) + return -1 + elif not os.path.exists(merger): + if self._verbose: + print("Merger path does not exist") + return -1 + + # paths to be aware of and not overwrite/remove/etc + staging_path = os.path.join(self._updater_path,"update_staging") + backup_path = os.path.join(self._updater_path,"backup") + + # If clean install is enabled, clear existing files ahead of time + # note: will not delete the update.json, update folder, staging, or staging + # but will delete all other folders/files in addon directory + error = None + if clean==True: + try: + # implement clearing of all folders/files, except the + # updater folder and updater json + # Careful, this deletes entire subdirectories recursively... + # make sure that base is not a high level shared folder, but + # is dedicated just to the addon itself + if self._verbose: print("clean=True, clearing addon folder to fresh install state") + + # remove root files and folders (except update folder) + files = [f for f in os.listdir(base) if os.path.isfile(os.path.join(base,f))] + folders = [f for f in os.listdir(base) if os.path.isdir(os.path.join(base,f))] + + for f in files: + os.remove(os.path.join(base,f)) + print("Clean removing file {}".format(os.path.join(base,f))) + for f in folders: + if os.path.join(base,f)==self._updater_path: continue + shutil.rmtree(os.path.join(base,f)) + print("Clean removing folder and contents {}".format(os.path.join(base,f))) + + except Exception as err: + error = "failed to create clean existing addon folder" + print(error, str(err)) + + # Walk through the base addon folder for rules on pre-removing + # but avoid removing/altering backup and updater file + for path, dirs, files in os.walk(base): + # prune ie skip updater folder + dirs[:] = [d for d in dirs if os.path.join(path,d) not in [self._updater_path]] + for file in files: + for ptrn in self.remove_pre_update_patterns: + if fnmatch.filter([file],ptrn): + try: + fl = os.path.join(path,file) + os.remove(fl) + if self._verbose: print("Pre-removed file "+file) + except OSError: + print("Failed to pre-remove "+file) + + # Walk through the temp addon sub folder for replacements + # this implements the overwrite rules, which apply after + # the above pre-removal rules. This also performs the + # actual file copying/replacements + for path, dirs, files in os.walk(merger): + # verify this structure works to prune updater sub folder overwriting + dirs[:] = [d for d in dirs if os.path.join(path,d) not in [self._updater_path]] + relPath = os.path.relpath(path, merger) + destPath = os.path.join(base, relPath) + if not os.path.exists(destPath): + os.makedirs(destPath) + for file in files: + # bring in additional logic around copying/replacing + # Blender default: overwrite .py's, don't overwrite the rest + destFile = os.path.join(destPath, file) + srcFile = os.path.join(path, file) + + # decide whether to replace if file already exists, and copy new over + if os.path.isfile(destFile): + # otherwise, check each file to see if matches an overwrite pattern + replaced=False + for ptrn in self._overwrite_patterns: + if fnmatch.filter([file],ptrn): + replaced=True + break + if replaced: + os.remove(destFile) + os.rename(srcFile, destFile) + if self._verbose: print("Overwrote file "+os.path.basename(destFile)) + else: + if self._verbose: print("Pattern not matched to "+os.path.basename(destFile)+", not overwritten") + else: + # file did not previously exist, simply move it over + os.rename(srcFile, destFile) + if self._verbose: print("New file "+os.path.basename(destFile)) + + # now remove the temp staging folder and downloaded zip + try: + shutil.rmtree(staging_path) + except: + error = "Error: Failed to remove existing staging directory, consider manually removing "+staging_path + if self._verbose: print(error) + + + def reload_addon(self): + # if post_update false, skip this function + # else, unload/reload addon & trigger popup + if self._auto_reload_post_update == False: + print("Restart blender to reload addon and complete update") + return + + if self._verbose: print("Reloading addon...") + addon_utils.modules(refresh=True) + bpy.utils.refresh_script_paths() + + # not allowed in restricted context, such as register module + # toggle to refresh + bpy.ops.wm.addon_disable(module=self._addon_package) + bpy.ops.wm.addon_refresh() + bpy.ops.wm.addon_enable(module=self._addon_package) + + + # ------------------------------------------------------------------------- + # Other non-api functions and setups + # ------------------------------------------------------------------------- + + def clear_state(self): + self._update_ready = None + self._update_link = None + self._update_version = None + self._source_zip = None + self._error = None + self._error_msg = None + + # custom urlretrieve implementation + def urlretrieve(self, urlfile, filepath): + chunk = 1024*8 + f = open(filepath, "wb") + while 1: + data = urlfile.read(chunk) + if not data: + #print("done.") + break + f.write(data) + #print("Read %s bytes"%len(data)) + f.close() + + + def version_tuple_from_text(self,text): + if text == None: return () + + # should go through string and remove all non-integers, + # and for any given break split into a different section + segments = [] + tmp = '' + for l in str(text): + if l.isdigit()==False: + if len(tmp)>0: + segments.append(int(tmp)) + tmp = '' + else: + tmp+=l + if len(tmp)>0: + segments.append(int(tmp)) + + if len(segments)==0: + if self._verbose: print("No version strings found text: ",text) + if self._include_branches == False: + return () + else: + return (text) + return tuple(segments) + + # called for running check in a background thread + def check_for_update_async(self, callback=None): + + if self._json != None and "update_ready" in self._json and self._json["version_text"]!={}: + if self._json["update_ready"] == True: + self._update_ready = True + self._update_link = self._json["version_text"]["link"] + self._update_version = str(self._json["version_text"]["version"]) + # cached update + callback(True) + return + + # do the check + if self._check_interval_enable == False: + return + elif self._async_checking == True: + if self._verbose: print("Skipping async check, already started") + return # already running the bg thread + elif self._update_ready == None: + self.start_async_check_update(False, callback) + + + def check_for_update_now(self, callback=None): + + self._error = None + self._error_msg = None + + if self._verbose: + print("Check update pressed, first getting current status") + if self._async_checking == True: + if self._verbose: print("Skipping async check, already started") + return # already running the bg thread + elif self._update_ready == None: + self.start_async_check_update(True, callback) + else: + self._update_ready = None + self.start_async_check_update(True, callback) + + + # this function is not async, will always return in sequential fashion + # but should have a parent which calls it in another thread + def check_for_update(self, now=False): + if self._verbose: print("Checking for update function") + + # clear the errors if any + self._error = None + self._error_msg = None + + # avoid running again in, just return past result if found + # but if force now check, then still do it + if self._update_ready != None and now == False: + return (self._update_ready,self._update_version,self._update_link) + + if self._current_version == None: + raise ValueError("current_version not yet defined") + if self._repo == None: + raise ValueError("repo not yet defined") + if self._user == None: + raise ValueError("username not yet defined") + + self.set_updater_json() # self._json + + if now == False and self.past_interval_timestamp()==False: + if self._verbose: + print("Aborting check for updated, check interval not reached") + return (False, None, None) + + # check if using tags or releases + # note that if called the first time, this will pull tags from online + if self._fake_install == True: + if self._verbose: + print("fake_install = True, setting fake version as ready") + self._update_ready = True + self._update_version = "(999,999,999)" + self._update_link = "http://127.0.0.1" + + return (self._update_ready, self._update_version, self._update_link) + + # primary internet call + self.get_tags() # sets self._tags and self._tag_latest + + self._json["last_check"] = str(datetime.now()) + self.save_updater_json() + + # can be () or ('master') in addition to branches, and version tag + new_version = self.version_tuple_from_text(self.tag_latest) + + if len(self._tags)==0: + self._update_ready = False + self._update_version = None + self._update_link = None + return (False, None, None) + if self._include_branches == False: + link = self.select_link(self, self._tags[0]) + else: + n = len(self._include_branch_list) + if len(self._tags)==n: + # effectively means no tags found on repo + # so provide the first one as default + link = self.select_link(self, self._tags[0]) + else: + link = self.select_link(self, self._tags[n]) + + if new_version == (): + self._update_ready = False + self._update_version = None + self._update_link = None + return (False, None, None) + elif str(new_version).lower() in self._include_branch_list: + # handle situation where master/whichever branch is included + # however, this code effectively is not triggered now + # as new_version will only be tag names, not branch names + if self._include_branch_autocheck == False: + # don't offer update as ready, + # but set the link for the default + # branch for installing + self._update_ready = False + self._update_version = new_version + self._update_link = link + self.save_updater_json() + return (True, new_version, link) + else: + raise ValueError("include_branch_autocheck: NOT YET DEVELOPED") + # bypass releases and look at timestamp of last update + # from a branch compared to now, see if commit values + # match or not. + + else: + # situation where branches not included + + if new_version > self._current_version: + + self._update_ready = True + self._update_version = new_version + self._update_link = link + self.save_updater_json() + return (True, new_version, link) + + # elif new_version != self._current_version: + # self._update_ready = False + # self._update_version = new_version + # self._update_link = link + # self.save_updater_json() + # return (True, new_version, link) + + # if no update, set ready to False from None + self._update_ready = False + self._update_version = None + self._update_link = None + return (False, None, None) + + + def set_tag(self, name): + """Assign the tag name and url to update to""" + tg = None + for tag in self._tags: + if name == tag["name"]: + tg = tag + break + if tg: + new_version = self.version_tuple_from_text(self.tag_latest) + self._update_version = new_version + self._update_link = self.select_link(self, tg) + elif self._include_branches and name in self._include_branch_list: + # scenario if reverting to a specific branch name instead of tag + tg = name + link = self.form_branch_url(tg) + self._update_version = name # this will break things + self._update_link = link + if not tg: + raise ValueError("Version tag not found: "+name) + + + def run_update(self,force=False,revert_tag=None,clean=False,callback=None): + """Runs an install, update, or reversion of an addon from online source + + Arguments: + force: Install assigned link, even if self.update_ready is False + revert_tag: Version to install, if none uses detected update link + clean: not used, but in future could use to totally refresh addon + callback: used to run function on update completion + """ + self._json["update_ready"] = False + self._json["ignore"] = False # clear ignore flag + self._json["version_text"] = {} + + if revert_tag != None: + self.set_tag(revert_tag) + self._update_ready = True + + # clear the errors if any + self._error = None + self._error_msg = None + + if self._verbose: print("Running update") + + if self._fake_install == True: + # change to True, to trigger the reload/"update installed" handler + if self._verbose: + print("fake_install=True") + print("Just reloading and running any handler triggers") + self._json["just_updated"] = True + self.save_updater_json() + if self._backup_current == True: + self.create_backup() + self.reload_addon() + self._update_ready = False + res = True # fake "success" zip download flag + + elif force==False: + if self._update_ready != True: + if self._verbose: + print("Update stopped, new version not ready") + if callback: + callback( + self._addon_package, + "Update stopped, new version not ready") + return "Update stopped, new version not ready" + elif self._update_link == None: + # this shouldn't happen if update is ready + if self._verbose: + print("Update stopped, update link unavailable") + if callback: + callback( + self._addon_package, + "Update stopped, update link unavailable") + return "Update stopped, update link unavailable" + + if self._verbose and revert_tag==None: + print("Staging update") + elif self._verbose: + print("Staging install") + + res = self.stage_repository(self._update_link) + if res !=True: + print("Error in staging repository: "+str(res)) + if callback != None: + callback(self._addon_package, self._error_msg) + return self._error_msg + res = self.unpack_staged_zip(clean) + if res<0: + if callback: + callback(self._addon_package, self._error_msg) + return res + + else: + if self._update_link == None: + if self._verbose: + print("Update stopped, could not get link") + return "Update stopped, could not get link" + if self._verbose: + print("Forcing update") + + res = self.stage_repository(self._update_link) + if res !=True: + print("Error in staging repository: "+str(res)) + if callback: + callback(self._addon_package, self._error_msg) + return self._error_msg + res = self.unpack_staged_zip(clean) + if res<0: + return res + # would need to compare against other versions held in tags + + # run the front-end's callback if provided + if callback: + callback(self._addon_package) + + # return something meaningful, 0 means it worked + return 0 + + + def past_interval_timestamp(self): + if self._check_interval_enable == False: + return True # ie this exact feature is disabled + + if "last_check" not in self._json or self._json["last_check"] == "": + return True + + now = datetime.now() + last_check = datetime.strptime(self._json["last_check"], + "%Y-%m-%d %H:%M:%S.%f") + next_check = last_check + offset = timedelta( + days=self._check_interval_days + 30*self._check_interval_months, + hours=self._check_interval_hours, + minutes=self._check_interval_minutes + ) + + delta = (now - offset) - last_check + if delta.total_seconds() > 0: + if self._verbose: + print("{} Updater: Time to check for updates!".format(self._addon)) + return True + + if self._verbose: + print("{} Updater: Determined it's not yet time to check for updates".format(self._addon)) + return False + + def get_json_path(self): + """Returns the full path to the JSON state file used by this updater. + + Will also rename old file paths to addon-specific path if found + """ + json_path = os.path.join(self._updater_path, + "{}_updater_status.json".format(self._addon_package)) + old_json_path = os.path.join(self._updater_path, "updater_status.json") + + # rename old file if it exists + try: + os.rename(old_json_path, json_path) + except FileNotFoundError: + pass + except Exception as err: + print("Other OS error occurred while trying to rename old JSON") + print(err) + return json_path + + def set_updater_json(self): + """Load or initialize JSON dictionary data for updater state""" + if self._updater_path == None: + raise ValueError("updater_path is not defined") + elif os.path.isdir(self._updater_path) == False: + os.makedirs(self._updater_path) + + jpath = self.get_json_path() + if os.path.isfile(jpath): + with open(jpath) as data_file: + self._json = json.load(data_file) + if self._verbose: + print("{} Updater: Read in JSON settings from file".format( + self._addon)) + else: + # set data structure + self._json = { + "last_check":"", + "backup_date":"", + "update_ready":False, + "ignore":False, + "just_restored":False, + "just_updated":False, + "version_text":{} + } + self.save_updater_json() + + + def save_updater_json(self): + # first save the state + if self._update_ready == True: + if type(self._update_version) == type((0,0,0)): + self._json["update_ready"] = True + self._json["version_text"]["link"]=self._update_link + self._json["version_text"]["version"]=self._update_version + else: + self._json["update_ready"] = False + self._json["version_text"] = {} + else: + self._json["update_ready"] = False + self._json["version_text"] = {} + + jpath = self.get_json_path() + outf = open(jpath,'w') + data_out = json.dumps(self._json, indent=4) + outf.write(data_out) + outf.close() + if self._verbose: + print(self._addon+": Wrote out updater JSON settings to file, with the contents:") + print(self._json) + + def json_reset_postupdate(self): + self._json["just_updated"] = False + self._json["update_ready"] = False + self._json["version_text"] = {} + self.save_updater_json() + + def json_reset_restore(self): + self._json["just_restored"] = False + self._json["update_ready"] = False + self._json["version_text"] = {} + self.save_updater_json() + self._update_ready = None # reset so you could check update again + + def ignore_update(self): + self._json["ignore"] = True + self.save_updater_json() + + + # ------------------------------------------------------------------------- + # ASYNC stuff + # ------------------------------------------------------------------------- + + def start_async_check_update(self, now=False, callback=None): + """Start a background thread which will check for updates""" + if self._async_checking is True: + return + if self._verbose: + print("{} updater: Starting background checking thread".format( + self._addon)) + check_thread = threading.Thread(target=self.async_check_update, + args=(now,callback,)) + check_thread.daemon = True + self._check_thread = check_thread + check_thread.start() + + def async_check_update(self, now, callback=None): + """Perform update check, run as target of background thread""" + self._async_checking = True + if self._verbose: + print("{} BG thread: Checking for update now in background".format( + self._addon)) + + try: + self.check_for_update(now=now) + except Exception as exception: + print("Checking for update error:") + print(exception) + if not self._error: + self._update_ready = False + self._update_version = None + self._update_link = None + self._error = "Error occurred" + self._error_msg = "Encountered an error while checking for updates" + + self._async_checking = False + self._check_thread = None + + if self._verbose: + print("{} BG thread: Finished checking for update, doing callback".format(self._addon)) + if callback: + callback(self._update_ready) + + def stop_async_check_update(self): + """Method to give impression of stopping check for update. + + Currently does nothing but allows user to retry/stop blocking UI from + hitting a refresh button. This does not actually stop the thread, as it + will complete after the connection timeout regardless. If the thread + does complete with a successful response, this will be still displayed + on next UI refresh (ie no update, or update available). + """ + if self._check_thread != None: + if self._verbose: print("Thread will end in normal course.") + # however, "There is no direct kill method on a thread object." + # better to let it run its course + #self._check_thread.stop() + self._async_checking = False + self._error = None + self._error_msg = None + + +# ----------------------------------------------------------------------------- +# Updater Engines +# ----------------------------------------------------------------------------- + + +class BitbucketEngine(object): + """Integration to Bitbucket API for git-formatted repositories""" + + def __init__(self): + self.api_url = 'https://api.bitbucket.org' + self.token = None + self.name = "bitbucket" + + def form_repo_url(self, updater): + return self.api_url+"/2.0/repositories/"+updater.user+"/"+updater.repo + + def form_tags_url(self, updater): + return self.form_repo_url(updater) + "/refs/tags?sort=-name" + + def form_branch_url(self, branch, updater): + return self.get_zip_url(branch, updater) + + def get_zip_url(self, name, updater): + return "https://bitbucket.org/{user}/{repo}/get/{name}.zip".format( + user=updater.user, + repo=updater.repo, + name=name) + + def parse_tags(self, response, updater): + if response == None: + return [] + return [{"name": tag["name"], "zipball_url": self.get_zip_url(tag["name"], updater)} for tag in response["values"]] + + +class GithubEngine(object): + """Integration to Github API""" + + def __init__(self): + self.api_url = 'https://api.github.com' + self.token = None + self.name = "github" + + def form_repo_url(self, updater): + return "{}{}{}{}{}".format(self.api_url,"/repos/",updater.user, + "/",updater.repo) + + def form_tags_url(self, updater): + if updater.use_releases: + return "{}{}".format(self.form_repo_url(updater),"/releases") + else: + return "{}{}".format(self.form_repo_url(updater),"/tags") + + def form_branch_list_url(self, updater): + return "{}{}".format(self.form_repo_url(updater),"/branches") + + def form_branch_url(self, branch, updater): + return "{}{}{}".format(self.form_repo_url(updater), + "/zipball/",branch) + + def parse_tags(self, response, updater): + if response == None: + return [] + return response + + +class GitlabEngine(object): + """Integration to GitLab API""" + + def __init__(self): + self.api_url = 'https://gitlab.com' + self.token = None + self.name = "gitlab" + + def form_repo_url(self, updater): + return "{}{}{}".format(self.api_url,"/api/v4/projects/",updater.repo) + + def form_tags_url(self, updater): + return "{}{}".format(self.form_repo_url(updater),"/repository/tags") + + def form_branch_list_url(self, updater): + # does not validate branch name. + return "{}{}".format( + self.form_repo_url(updater), + "/repository/branches") + + def form_branch_url(self, branch, updater): + # Could clash with tag names and if it does, it will + # download TAG zip instead of branch zip to get + # direct path, would need. + return "{}{}{}".format( + self.form_repo_url(updater), + "/repository/archive.zip?sha=", + branch) + + def get_zip_url(self, sha, updater): + return "{base}/repository/archive.zip?sha={sha}".format( + base=self.form_repo_url(updater), + sha=sha) + + # def get_commit_zip(self, id, updater): + # return self.form_repo_url(updater)+"/repository/archive.zip?sha:"+id + + def parse_tags(self, response, updater): + if response == None: + return [] + return [{"name": tag["name"], "zipball_url": self.get_zip_url(tag["commit"]["id"], updater)} for tag in response] + + +# ----------------------------------------------------------------------------- +# The module-shared class instance, +# should be what's imported to other files +# ----------------------------------------------------------------------------- + +Updater = Singleton_updater() diff --git a/addon_updater_ops.py b/addon_updater_ops.py new file mode 100644 index 0000000..90b57ed --- /dev/null +++ b/addon_updater_ops.py @@ -0,0 +1,1503 @@ +# ##### BEGIN GPL LICENSE BLOCK ##### +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# ##### END GPL LICENSE BLOCK ##### + +"""Blender UI integrations for the addon updater. + +Implements draw calls, popups, and operators that use the addon_updater. +""" + +import os + +import bpy +from bpy.app.handlers import persistent + +# updater import, import safely +# Prevents popups for users with invalid python installs e.g. missing libraries +try: + from .addon_updater import Updater as updater +except Exception as e: + print("ERROR INITIALIZING UPDATER") + print(str(e)) + class Singleton_updater_none(object): + def __init__(self): + self.addon = None + self.verbose = False + self.invalidupdater = True # used to distinguish bad install + self.error = None + self.error_msg = None + self.async_checking = None + def clear_state(self): + self.addon = None + self.verbose = False + self.invalidupdater = True + self.error = None + self.error_msg = None + self.async_checking = None + def run_update(self): pass + def check_for_update(self): pass + updater = Singleton_updater_none() + updater.error = "Error initializing updater module" + updater.error_msg = str(e) + +# Must declare this before classes are loaded +# otherwise the bl_idname's will not match and have errors. +# Must be all lowercase and no spaces +updater.addon = "gp_toolbox" + + +# ----------------------------------------------------------------------------- +# Blender version utils +# ----------------------------------------------------------------------------- + + +def make_annotations(cls): + """Add annotation attribute to class fields to avoid Blender 2.8 warnings""" + if not hasattr(bpy.app, "version") or bpy.app.version < (2, 80): + return cls + bl_props = {k: v for k, v in cls.__dict__.items() if isinstance(v, tuple)} + if bl_props: + if '__annotations__' not in cls.__dict__: + setattr(cls, '__annotations__', {}) + annotations = cls.__dict__['__annotations__'] + for k, v in bl_props.items(): + annotations[k] = v + delattr(cls, k) + return cls + + +def layout_split(layout, factor=0.0, align=False): + """Intermediate method for pre and post blender 2.8 split UI function""" + if not hasattr(bpy.app, "version") or bpy.app.version < (2, 80): + return layout.split(percentage=factor, align=align) + return layout.split(factor=factor, align=align) + + +def get_user_preferences(context=None): + """Intermediate method for pre and post blender 2.8 grabbing preferences""" + if not context: + context = bpy.context + prefs = None + if hasattr(context, "user_preferences"): + prefs = context.user_preferences.addons.get(__package__, None) + elif hasattr(context, "preferences"): + prefs = context.preferences.addons.get(__package__, None) + if prefs: + return prefs.preferences + # To make the addon stable and non-exception prone, return None + # raise Exception("Could not fetch user preferences") + return None + + +# ----------------------------------------------------------------------------- +# Updater operators +# ----------------------------------------------------------------------------- + + +# simple popup for prompting checking for update & allow to install if available +class addon_updater_install_popup(bpy.types.Operator): + """Check and install update if available""" + bl_label = "Update {x} addon".format(x=updater.addon) + bl_idname = updater.addon+".updater_install_popup" + bl_description = "Popup menu to check and display current updates available" + bl_options = {'REGISTER', 'INTERNAL'} + + # if true, run clean install - ie remove all files before adding new + # equivalent to deleting the addon and reinstalling, except the + # updater folder/backup folder remains + clean_install = bpy.props.BoolProperty( + name="Clean install", + description="If enabled, completely clear the addon's folder before installing new update, creating a fresh install", + default=False, + options={'HIDDEN'} + ) + ignore_enum = bpy.props.EnumProperty( + name="Process update", + description="Decide to install, ignore, or defer new addon update", + items=[ + ("install","Update Now","Install update now"), + ("ignore","Ignore", "Ignore this update to prevent future popups"), + ("defer","Defer","Defer choice till next blender session") + ], + options={'HIDDEN'} + ) + + def check (self, context): + return True + + def invoke(self, context, event): + return context.window_manager.invoke_props_dialog(self) + + def draw(self, context): + layout = self.layout + if updater.invalidupdater == True: + layout.label(text="Updater module error") + return + elif updater.update_ready == True: + col = layout.column() + col.scale_y = 0.7 + col.label(text="Update {} ready!".format(str(updater.update_version)), + icon="LOOP_FORWARDS") + col.label(text="Choose 'Update Now' & press OK to install, ",icon="BLANK1") + col.label(text="or click outside window to defer",icon="BLANK1") + row = col.row() + row.prop(self,"ignore_enum",expand=True) + col.split() + elif updater.update_ready == False: + col = layout.column() + col.scale_y = 0.7 + col.label(text="No updates available") + col.label(text="Press okay to dismiss dialog") + # add option to force install + else: + # case: updater.update_ready = None + # we have not yet checked for the update + layout.label(text="Check for update now?") + + # potentially in future, could have UI for 'check to select old version' + # to revert back to. + + def execute(self,context): + + # in case of error importing updater + if updater.invalidupdater == True: + return {'CANCELLED'} + + if updater.manual_only==True: + bpy.ops.wm.url_open(url=updater.website) + elif updater.update_ready == True: + + # action based on enum selection + if self.ignore_enum=='defer': + return {'FINISHED'} + elif self.ignore_enum=='ignore': + updater.ignore_update() + return {'FINISHED'} + #else: "install update now!" + + res = updater.run_update( + force=False, + callback=post_update_callback, + clean=self.clean_install) + # should return 0, if not something happened + if updater.verbose: + if res==0: + print("Updater returned successful") + else: + print("Updater returned {}, error occurred".format(res)) + elif updater.update_ready == None: + _ = updater.check_for_update(now=True) + + # re-launch this dialog + atr = addon_updater_install_popup.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + else: + if updater.verbose: + print("Doing nothing, not ready for update") + return {'FINISHED'} + + +# User preference check-now operator +class addon_updater_check_now(bpy.types.Operator): + bl_label = "Check now for "+updater.addon+" update" + bl_idname = updater.addon+".updater_check_now" + bl_description = "Check now for an update to the {x} addon".format( + x=updater.addon) + bl_options = {'REGISTER', 'INTERNAL'} + + def execute(self,context): + if updater.invalidupdater == True: + return {'CANCELLED'} + + if updater.async_checking == True and updater.error == None: + # Check already happened + # Used here to just avoid constant applying settings below + # Ignoring if error, to prevent being stuck on the error screen + return {'CANCELLED'} + + # apply the UI settings + settings = get_user_preferences(context) + if not settings: + if updater.verbose: + print("Could not get {} preferences, update check skipped".format( + __package__)) + return {'CANCELLED'} + updater.set_check_interval(enable=settings.auto_check_update, + months=settings.updater_intrval_months, + days=settings.updater_intrval_days, + hours=settings.updater_intrval_hours, + minutes=settings.updater_intrval_minutes + ) # optional, if auto_check_update + + # input is an optional callback function + # this function should take a bool input, if true: update ready + # if false, no update ready + updater.check_for_update_now(ui_refresh) + + return {'FINISHED'} + + +class addon_updater_update_now(bpy.types.Operator): + bl_label = "Update "+updater.addon+" addon now" + bl_idname = updater.addon+".updater_update_now" + bl_description = "Update to the latest version of the {x} addon".format( + x=updater.addon) + bl_options = {'REGISTER', 'INTERNAL'} + + # if true, run clean install - ie remove all files before adding new + # equivalent to deleting the addon and reinstalling, except the + # updater folder/backup folder remains + clean_install = bpy.props.BoolProperty( + name="Clean install", + description="If enabled, completely clear the addon's folder before installing new update, creating a fresh install", + default=False, + options={'HIDDEN'} + ) + + def execute(self,context): + + # in case of error importing updater + if updater.invalidupdater == True: + return {'CANCELLED'} + + if updater.manual_only == True: + bpy.ops.wm.url_open(url=updater.website) + if updater.update_ready == True: + # if it fails, offer to open the website instead + try: + res = updater.run_update( + force=False, + callback=post_update_callback, + clean=self.clean_install) + + # should return 0, if not something happened + if updater.verbose: + if res==0: print("Updater returned successful") + else: print("Updater returned "+str(res)+", error occurred") + except Exception as e: + updater._error = "Error trying to run update" + updater._error_msg = str(e) + atr = addon_updater_install_manually.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + elif updater.update_ready == None: + (update_ready, version, link) = updater.check_for_update(now=True) + # re-launch this dialog + atr = addon_updater_install_popup.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + + elif updater.update_ready == False: + self.report({'INFO'}, "Nothing to update") + return {'CANCELLED'} + else: + self.report({'ERROR'}, "Encountered problem while trying to update") + return {'CANCELLED'} + + return {'FINISHED'} + + +class addon_updater_update_target(bpy.types.Operator): + bl_label = updater.addon+" version target" + bl_idname = updater.addon+".updater_update_target" + bl_description = "Install a targeted version of the {x} addon".format( + x=updater.addon) + bl_options = {'REGISTER', 'INTERNAL'} + + def target_version(self, context): + # in case of error importing updater + if updater.invalidupdater == True: + ret = [] + + ret = [] + i=0 + for tag in updater.tags: + ret.append( (tag,tag,"Select to install "+tag) ) + i+=1 + return ret + + target = bpy.props.EnumProperty( + name="Target version to install", + description="Select the version to install", + items=target_version + ) + + # if true, run clean install - ie remove all files before adding new + # equivalent to deleting the addon and reinstalling, except the + # updater folder/backup folder remains + clean_install = bpy.props.BoolProperty( + name="Clean install", + description="If enabled, completely clear the addon's folder before installing new update, creating a fresh install", + default=False, + options={'HIDDEN'} + ) + + @classmethod + def poll(cls, context): + if updater.invalidupdater == True: return False + return updater.update_ready != None and len(updater.tags)>0 + + def invoke(self, context, event): + return context.window_manager.invoke_props_dialog(self) + + def draw(self, context): + layout = self.layout + if updater.invalidupdater == True: + layout.label(text="Updater error") + return + split = layout_split(layout, factor=0.66) + subcol = split.column() + subcol.label(text="Select install version") + subcol = split.column() + subcol.prop(self, "target", text="") + + + def execute(self,context): + + # in case of error importing updater + if updater.invalidupdater == True: + return {'CANCELLED'} + + res = updater.run_update( + force=False, + revert_tag=self.target, + callback=post_update_callback, + clean=self.clean_install) + + # should return 0, if not something happened + if res==0: + if updater.verbose: + print("Updater returned successful") + else: + if updater.verbose: + print("Updater returned "+str(res)+", error occurred") + return {'CANCELLED'} + + return {'FINISHED'} + + +class addon_updater_install_manually(bpy.types.Operator): + """As a fallback, direct the user to download the addon manually""" + bl_label = "Install update manually" + bl_idname = updater.addon+".updater_install_manually" + bl_description = "Proceed to manually install update" + bl_options = {'REGISTER', 'INTERNAL'} + + error = bpy.props.StringProperty( + name="Error Occurred", + default="", + options={'HIDDEN'} + ) + + def invoke(self, context, event): + return context.window_manager.invoke_popup(self) + + def draw(self, context): + layout = self.layout + + if updater.invalidupdater == True: + layout.label(text="Updater error") + return + + # use a "failed flag"? it shows this label if the case failed. + if self.error!="": + col = layout.column() + col.scale_y = 0.7 + col.label(text="There was an issue trying to auto-install",icon="ERROR") + col.label(text="Press the download button below and install",icon="BLANK1") + col.label(text="the zip file like a normal addon.",icon="BLANK1") + else: + col = layout.column() + col.scale_y = 0.7 + col.label(text="Install the addon manually") + col.label(text="Press the download button below and install") + col.label(text="the zip file like a normal addon.") + + # if check hasn't happened, i.e. accidentally called this menu + # allow to check here + + row = layout.row() + + if updater.update_link != None: + row.operator("wm.url_open", + text="Direct download").url=updater.update_link + else: + row.operator("wm.url_open", + text="(failed to retrieve direct download)") + row.enabled = False + + if updater.website != None: + row = layout.row() + row.operator("wm.url_open",text="Open website").url=\ + updater.website + else: + row = layout.row() + row.label(text="See source website to download the update") + + def execute(self,context): + return {'FINISHED'} + + +class addon_updater_updated_successful(bpy.types.Operator): + """Addon in place, popup telling user it completed or what went wrong""" + bl_label = "Installation Report" + bl_idname = updater.addon+".updater_update_successful" + bl_description = "Update installation response" + bl_options = {'REGISTER', 'INTERNAL', 'UNDO'} + + error = bpy.props.StringProperty( + name="Error Occurred", + default="", + options={'HIDDEN'} + ) + + def invoke(self, context, event): + return context.window_manager.invoke_props_popup(self, event) + + def draw(self, context): + layout = self.layout + + if updater.invalidupdater == True: + layout.label(text="Updater error") + return + + saved = updater.json + if self.error != "": + col = layout.column() + col.scale_y = 0.7 + col.label(text="Error occurred, did not install", icon="ERROR") + if updater.error_msg: + msg = updater.error_msg + else: + msg = self.error + col.label(text=str(msg), icon="BLANK1") + rw = col.row() + rw.scale_y = 2 + rw.operator("wm.url_open", + text="Click for manual download.", + icon="BLANK1" + ).url=updater.website + # manual download button here + elif updater.auto_reload_post_update == False: + # tell user to restart blender + if "just_restored" in saved and saved["just_restored"] == True: + col = layout.column() + col.label(text="Addon restored", icon="RECOVER_LAST") + alert_row = col.row() + alert_row.alert = True + alert_row.operator( + "wm.quit_blender", + text="Restart blender to reload", + icon="BLANK1") + updater.json_reset_restore() + else: + col = layout.column() + col.label(text="Addon successfully installed", icon="FILE_TICK") + alert_row = col.row() + alert_row.alert = True + alert_row.operator( + "wm.quit_blender", + text="Restart blender to reload", + icon="BLANK1") + + else: + # reload addon, but still recommend they restart blender + if "just_restored" in saved and saved["just_restored"] == True: + col = layout.column() + col.scale_y = 0.7 + col.label(text="Addon restored", icon="RECOVER_LAST") + col.label(text="Consider restarting blender to fully reload.", + icon="BLANK1") + updater.json_reset_restore() + else: + col = layout.column() + col.scale_y = 0.7 + col.label(text="Addon successfully installed", icon="FILE_TICK") + col.label(text="Consider restarting blender to fully reload.", + icon="BLANK1") + + def execute(self, context): + return {'FINISHED'} + + +class addon_updater_restore_backup(bpy.types.Operator): + """Restore addon from backup""" + bl_label = "Restore backup" + bl_idname = updater.addon+".updater_restore_backup" + bl_description = "Restore addon from backup" + bl_options = {'REGISTER', 'INTERNAL'} + + @classmethod + def poll(cls, context): + try: + return os.path.isdir(os.path.join(updater.stage_path,"backup")) + except: + return False + + def execute(self, context): + # in case of error importing updater + if updater.invalidupdater == True: + return {'CANCELLED'} + updater.restore_backup() + return {'FINISHED'} + + +class addon_updater_ignore(bpy.types.Operator): + """Prevent future update notice popups""" + bl_label = "Ignore update" + bl_idname = updater.addon+".updater_ignore" + bl_description = "Ignore update to prevent future popups" + bl_options = {'REGISTER', 'INTERNAL'} + + @classmethod + def poll(cls, context): + if updater.invalidupdater == True: + return False + elif updater.update_ready == True: + return True + else: + return False + + def execute(self, context): + # in case of error importing updater + if updater.invalidupdater == True: + return {'CANCELLED'} + updater.ignore_update() + self.report({"INFO"},"Open addon preferences for updater options") + return {'FINISHED'} + + +class addon_updater_end_background(bpy.types.Operator): + """Stop checking for update in the background""" + bl_label = "End background check" + bl_idname = updater.addon+".end_background_check" + bl_description = "Stop checking for update in the background" + bl_options = {'REGISTER', 'INTERNAL'} + + # @classmethod + # def poll(cls, context): + # if updater.async_checking == True: + # return True + # else: + # return False + + def execute(self, context): + # in case of error importing updater + if updater.invalidupdater == True: + return {'CANCELLED'} + updater.stop_async_check_update() + return {'FINISHED'} + + +# ----------------------------------------------------------------------------- +# Handler related, to create popups +# ----------------------------------------------------------------------------- + + +# global vars used to prevent duplicate popup handlers +ran_autocheck_install_popup = False +ran_update_sucess_popup = False + +# global var for preventing successive calls +ran_background_check = False + +@persistent +def updater_run_success_popup_handler(scene): + global ran_update_sucess_popup + ran_update_sucess_popup = True + + # in case of error importing updater + if updater.invalidupdater == True: + return + + try: + if "scene_update_post" in dir(bpy.app.handlers): + bpy.app.handlers.scene_update_post.remove( + updater_run_success_popup_handler) + else: + bpy.app.handlers.depsgraph_update_post.remove( + updater_run_success_popup_handler) + except: + pass + + atr = addon_updater_updated_successful.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + + +@persistent +def updater_run_install_popup_handler(scene): + global ran_autocheck_install_popup + ran_autocheck_install_popup = True + + # in case of error importing updater + if updater.invalidupdater == True: + return + + try: + if "scene_update_post" in dir(bpy.app.handlers): + bpy.app.handlers.scene_update_post.remove( + updater_run_install_popup_handler) + else: + bpy.app.handlers.depsgraph_update_post.remove( + updater_run_install_popup_handler) + except: + pass + + if "ignore" in updater.json and updater.json["ignore"] == True: + return # don't do popup if ignore pressed + # elif type(updater.update_version) != type((0,0,0)): + # # likely was from master or another branch, shouldn't trigger popup + # updater.json_reset_restore() + # return + elif "version_text" in updater.json and "version" in updater.json["version_text"]: + version = updater.json["version_text"]["version"] + ver_tuple = updater.version_tuple_from_text(version) + + if ver_tuple < updater.current_version: + # user probably manually installed to get the up to date addon + # in here. Clear out the update flag using this function + if updater.verbose: + print("{} updater: appears user updated, clearing flag".format(\ + updater.addon)) + updater.json_reset_restore() + return + atr = addon_updater_install_popup.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + + +def background_update_callback(update_ready): + """Passed into the updater, background thread updater""" + global ran_autocheck_install_popup + + # in case of error importing updater + if updater.invalidupdater == True: + return + if updater.showpopups == False: + return + if update_ready != True: + return + + # see if we need add to the update handler to trigger the popup + handlers = [] + if "scene_update_post" in dir(bpy.app.handlers): # 2.7x + handlers = bpy.app.handlers.scene_update_post + else: # 2.8x + handlers = bpy.app.handlers.depsgraph_update_post + in_handles = updater_run_install_popup_handler in handlers + + if in_handles or ran_autocheck_install_popup: + return + + if "scene_update_post" in dir(bpy.app.handlers): # 2.7x + bpy.app.handlers.scene_update_post.append( + updater_run_install_popup_handler) + else: # 2.8x + bpy.app.handlers.depsgraph_update_post.append( + updater_run_install_popup_handler) + ran_autocheck_install_popup = True + + +def post_update_callback(module_name, res=None): + """Callback for once the run_update function has completed + + Only makes sense to use this if "auto_reload_post_update" == False, + i.e. don't auto-restart the addon + + Arguments: + module_name: returns the module name from updater, but unused here + res: If an error occurred, this is the detail string + """ + + # in case of error importing updater + if updater.invalidupdater == True: + return + + if res==None: + # this is the same code as in conditional at the end of the register function + # ie if "auto_reload_post_update" == True, comment out this code + if updater.verbose: + print("{} updater: Running post update callback".format(updater.addon)) + + atr = addon_updater_updated_successful.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + global ran_update_sucess_popup + ran_update_sucess_popup = True + else: + # some kind of error occurred and it was unable to install, + # offer manual download instead + atr = addon_updater_updated_successful.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT',error=res) + return + + +def ui_refresh(update_status): + # find a way to just re-draw self? + # callback intended for trigger by async thread + for windowManager in bpy.data.window_managers: + for window in windowManager.windows: + for area in window.screen.areas: + area.tag_redraw() + + +def check_for_update_background(): + """Function for asynchronous background check. + + *Could* be called on register, but would be bad practice. + """ + if updater.invalidupdater == True: + return + global ran_background_check + if ran_background_check == True: + # Global var ensures check only happens once + return + elif updater.update_ready != None or updater.async_checking == True: + # Check already happened + # Used here to just avoid constant applying settings below + return + + # apply the UI settings + settings = get_user_preferences(bpy.context) + if not settings: + return + updater.set_check_interval(enable=settings.auto_check_update, + months=settings.updater_intrval_months, + days=settings.updater_intrval_days, + hours=settings.updater_intrval_hours, + minutes=settings.updater_intrval_minutes + ) # optional, if auto_check_update + + # input is an optional callback function + # this function should take a bool input, if true: update ready + # if false, no update ready + if updater.verbose: + print("{} updater: Running background check for update".format(\ + updater.addon)) + updater.check_for_update_async(background_update_callback) + ran_background_check = True + + +def check_for_update_nonthreaded(self, context): + """Can be placed in front of other operators to launch when pressed""" + if updater.invalidupdater == True: + return + + # only check if it's ready, ie after the time interval specified + # should be the async wrapper call here + settings = get_user_preferences(bpy.context) + if not settings: + if updater.verbose: + print("Could not get {} preferences, update check skipped".format( + __package__)) + return + updater.set_check_interval(enable=settings.auto_check_update, + months=settings.updater_intrval_months, + days=settings.updater_intrval_days, + hours=settings.updater_intrval_hours, + minutes=settings.updater_intrval_minutes + ) # optional, if auto_check_update + + (update_ready, version, link) = updater.check_for_update(now=False) + if update_ready == True: + atr = addon_updater_install_popup.bl_idname.split(".") + getattr(getattr(bpy.ops, atr[0]),atr[1])('INVOKE_DEFAULT') + else: + if updater.verbose: print("No update ready") + self.report({'INFO'}, "No update ready") + + +def showReloadPopup(): + """For use in register only, to show popup after re-enabling the addon + + Must be enabled by developer + """ + if updater.invalidupdater == True: + return + saved_state = updater.json + global ran_update_sucess_popup + + has_state = saved_state != None + just_updated = "just_updated" in saved_state + updated_info = saved_state["just_updated"] + + if not (has_state and just_updated and updated_info): + return + + updater.json_reset_postupdate() # so this only runs once + + # no handlers in this case + if updater.auto_reload_post_update == False: + return + + # see if we need add to the update handler to trigger the popup + handlers = [] + if "scene_update_post" in dir(bpy.app.handlers): # 2.7x + handlers = bpy.app.handlers.scene_update_post + else: # 2.8x + handlers = bpy.app.handlers.depsgraph_update_post + in_handles = updater_run_success_popup_handler in handlers + + if in_handles or ran_update_sucess_popup is True: + return + + if "scene_update_post" in dir(bpy.app.handlers): # 2.7x + bpy.app.handlers.scene_update_post.append( + updater_run_success_popup_handler) + else: # 2.8x + bpy.app.handlers.depsgraph_update_post.append( + updater_run_success_popup_handler) + ran_update_sucess_popup = True + + +# ----------------------------------------------------------------------------- +# Example UI integrations +# ----------------------------------------------------------------------------- + + +def update_notice_box_ui(self, context): + """ Panel - Update Available for placement at end/beginning of panel + + After a check for update has occurred, this function will draw a box + saying an update is ready, and give a button for: update now, open website, + or ignore popup. Ideal to be placed at the end / beginning of a panel + """ + + if updater.invalidupdater == True: + return + + saved_state = updater.json + if updater.auto_reload_post_update == False: + if "just_updated" in saved_state and saved_state["just_updated"] == True: + layout = self.layout + box = layout.box() + col = box.column() + alert_row = col.row() + alert_row.alert = True + alert_row.operator( + "wm.quit_blender", + text="Restart blender", + icon="ERROR") + col.label(text="to complete update") + + return + + # if user pressed ignore, don't draw the box + if "ignore" in updater.json and updater.json["ignore"] == True: + return + if updater.update_ready != True: + return + + layout = self.layout + box = layout.box() + col = box.column(align=True) + col.label(text="Update ready!",icon="ERROR") + col.separator() + row = col.row(align=True) + split = row.split(align=True) + colL = split.column(align=True) + colL.scale_y = 1.5 + colL.operator(addon_updater_ignore.bl_idname,icon="X",text="Ignore") + colR = split.column(align=True) + colR.scale_y = 1.5 + if updater.manual_only==False: + colR.operator(addon_updater_update_now.bl_idname, + text="Update", icon="LOOP_FORWARDS") + col.operator("wm.url_open", text="Open website").url = updater.website + #col.operator("wm.url_open",text="Direct download").url=updater.update_link + col.operator(addon_updater_install_manually.bl_idname, + text="Install manually") + else: + #col.operator("wm.url_open",text="Direct download").url=updater.update_link + col.operator("wm.url_open", text="Get it now").url = updater.website + + +def update_settings_ui(self, context, element=None): + """Preferences - for drawing with full width inside user preferences + + Create a function that can be run inside user preferences panel for prefs UI + Place inside UI draw using: addon_updater_ops.updaterSettingsUI(self, context) + or by: addon_updater_ops.updaterSettingsUI(context) + """ + + # element is a UI element, such as layout, a row, column, or box + if element==None: + element = self.layout + box = element.box() + + # in case of error importing updater + if updater.invalidupdater == True: + box.label(text="Error initializing updater code:") + box.label(text=updater.error_msg) + return + settings = get_user_preferences(context) + if not settings: + box.label(text="Error getting updater preferences", icon='ERROR') + return + + # auto-update settings + box.label(text="Updater Settings") + row = box.row() + + # special case to tell user to restart blender, if set that way + if updater.auto_reload_post_update == False: + saved_state = updater.json + if "just_updated" in saved_state and saved_state["just_updated"] == True: + row.alert = True + row.operator( + "wm.quit_blender", + text="Restart blender to complete update", + icon="ERROR") + return + + split = layout_split(row, factor=0.4) + subcol = split.column() + subcol.prop(settings, "auto_check_update") + subcol = split.column() + + if settings.auto_check_update==False: + subcol.enabled = False + subrow = subcol.row() + subrow.label(text="Interval between checks") + subrow = subcol.row(align=True) + checkcol = subrow.column(align=True) + checkcol.prop(settings,"updater_intrval_months") + checkcol = subrow.column(align=True) + checkcol.prop(settings,"updater_intrval_days") + checkcol = subrow.column(align=True) + + # Consider un-commenting for local dev (e.g. to set shorter intervals) + # checkcol.prop(settings,"updater_intrval_hours") + # checkcol = subrow.column(align=True) + # checkcol.prop(settings,"updater_intrval_minutes") + + # checking / managing updates + row = box.row() + col = row.column() + if updater.error != None: + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.scale_y = 2 + if "ssl" in updater.error_msg.lower(): + split.enabled = True + split.operator(addon_updater_install_manually.bl_idname, + text=updater.error) + else: + split.enabled = False + split.operator(addon_updater_check_now.bl_idname, + text=updater.error) + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + elif updater.update_ready == None and updater.async_checking == False: + col.scale_y = 2 + col.operator(addon_updater_check_now.bl_idname) + elif updater.update_ready == None: # async is running + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.enabled = False + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text="Checking...") + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_end_background.bl_idname, + text = "", icon="X") + + elif updater.include_branches==True and \ + len(updater.tags)==len(updater.include_branch_list) and \ + updater.manual_only==False: + # no releases found, but still show the appropriate branch + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_update_now.bl_idname, + text="Update directly to "+str(updater.include_branch_list[0])) + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + elif updater.update_ready==True and updater.manual_only==False: + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_update_now.bl_idname, + text="Update now to "+str(updater.update_version)) + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + elif updater.update_ready==True and updater.manual_only==True: + col.scale_y = 2 + col.operator("wm.url_open", + text="Download "+str(updater.update_version)).url=updater.website + else: # i.e. that updater.update_ready == False + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.enabled = False + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text="Addon is up to date") + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + if updater.manual_only == False: + col = row.column(align=True) + #col.operator(addon_updater_update_target.bl_idname, + if updater.include_branches == True and len(updater.include_branch_list)>0: + branch = updater.include_branch_list[0] + col.operator(addon_updater_update_target.bl_idname, + text="Install latest {} / old version".format(branch)) + else: + col.operator(addon_updater_update_target.bl_idname, + text="Reinstall / install old version") + lastdate = "none found" + backuppath = os.path.join(updater.stage_path,"backup") + if "backup_date" in updater.json and os.path.isdir(backuppath): + if updater.json["backup_date"] == "": + lastdate = "Date not found" + else: + lastdate = updater.json["backup_date"] + backuptext = "Restore addon backup ({})".format(lastdate) + col.operator(addon_updater_restore_backup.bl_idname, text=backuptext) + + row = box.row() + row.scale_y = 0.7 + lastcheck = updater.json["last_check"] + if updater.error != None and updater.error_msg != None: + row.label(text=updater.error_msg) + elif lastcheck != "" and lastcheck != None: + lastcheck = lastcheck[0: lastcheck.index(".") ] + row.label(text="Last update check: " + lastcheck) + else: + row.label(text="Last update check: Never") + + +def update_settings_ui_condensed(self, context, element=None): + """Preferences - Condensed drawing within preferences + + Alternate draw for user preferences or other places, does not draw a box + """ + + # element is a UI element, such as layout, a row, column, or box + if element==None: + element = self.layout + row = element.row() + + # in case of error importing updater + if updater.invalidupdater == True: + row.label(text="Error initializing updater code:") + row.label(text=updater.error_msg) + return + settings = get_user_preferences(context) + if not settings: + row.label(text="Error getting updater preferences", icon='ERROR') + return + + # special case to tell user to restart blender, if set that way + if updater.auto_reload_post_update == False: + saved_state = updater.json + if "just_updated" in saved_state and saved_state["just_updated"] == True: + row.alert = True # mark red + row.operator( + "wm.quit_blender", + text="Restart blender to complete update", + icon="ERROR") + return + + col = row.column() + if updater.error != None: + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.scale_y = 2 + if "ssl" in updater.error_msg.lower(): + split.enabled = True + split.operator(addon_updater_install_manually.bl_idname, + text=updater.error) + else: + split.enabled = False + split.operator(addon_updater_check_now.bl_idname, + text=updater.error) + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + elif updater.update_ready == None and updater.async_checking == False: + col.scale_y = 2 + col.operator(addon_updater_check_now.bl_idname) + elif updater.update_ready == None: # async is running + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.enabled = False + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text="Checking...") + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_end_background.bl_idname, + text = "", icon="X") + + elif updater.include_branches==True and \ + len(updater.tags)==len(updater.include_branch_list) and \ + updater.manual_only==False: + # no releases found, but still show the appropriate branch + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_update_now.bl_idname, + text="Update directly to "+str(updater.include_branch_list[0])) + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + elif updater.update_ready==True and updater.manual_only==False: + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_update_now.bl_idname, + text="Update now to "+str(updater.update_version)) + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + elif updater.update_ready==True and updater.manual_only==True: + col.scale_y = 2 + col.operator("wm.url_open", + text="Download "+str(updater.update_version)).url=updater.website + else: # i.e. that updater.update_ready == False + subcol = col.row(align=True) + subcol.scale_y = 1 + split = subcol.split(align=True) + split.enabled = False + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text="Addon is up to date") + split = subcol.split(align=True) + split.scale_y = 2 + split.operator(addon_updater_check_now.bl_idname, + text = "", icon="FILE_REFRESH") + + row = element.row() + row.prop(settings, "auto_check_update") + + row = element.row() + row.scale_y = 0.7 + lastcheck = updater.json["last_check"] + if updater.error != None and updater.error_msg != None: + row.label(text=updater.error_msg) + elif lastcheck != "" and lastcheck != None: + lastcheck = lastcheck[0: lastcheck.index(".") ] + row.label(text="Last check: " + lastcheck) + else: + row.label(text="Last check: Never") + + +def skip_tag_function(self, tag): + """A global function for tag skipping + + A way to filter which tags are displayed, + e.g. to limit downgrading too far + input is a tag text, e.g. "v1.2.3" + output is True for skipping this tag number, + False if the tag is allowed (default for all) + Note: here, "self" is the acting updater shared class instance + """ + + # in case of error importing updater + if self.invalidupdater == True: + return False + + # ---- write any custom code here, return true to disallow version ---- # + # + # # Filter out e.g. if 'beta' is in name of release + # if 'beta' in tag.lower(): + # return True + # ---- write any custom code above, return true to disallow version --- # + + if self.include_branches == True: + for branch in self.include_branch_list: + if tag["name"].lower() == branch: return False + + # function converting string to tuple, ignoring e.g. leading 'v' + tupled = self.version_tuple_from_text(tag["name"]) + if type(tupled) != type( (1,2,3) ): return True + + # select the min tag version - change tuple accordingly + if self.version_min_update != None: + if tupled < self.version_min_update: + return True # skip if current version below this + + # select the max tag version + if self.version_max_update != None: + if tupled >= self.version_max_update: + return True # skip if current version at or above this + + # in all other cases, allow showing the tag for updating/reverting + return False + + +def select_link_function(self, tag): + """Only customize if trying to leverage "attachments" in *GitHub* releases + + A way to select from one or multiple attached donwloadable files from the + server, instead of downloading the default release/tag source code + """ + + # -- Default, universal case (and is the only option for GitLab/Bitbucket) + link = tag["zipball_url"] + + # -- Example: select the first (or only) asset instead source code -- + #if "assets" in tag and "browser_download_url" in tag["assets"][0]: + # link = tag["assets"][0]["browser_download_url"] + + # -- Example: select asset based on OS, where multiple builds exist -- + # # not tested/no error checking, modify to fit your own needs! + # # assume each release has three attached builds: + # # release_windows.zip, release_OSX.zip, release_linux.zip + # # This also would logically not be used with "branches" enabled + # if platform.system() == "Darwin": # ie OSX + # link = [asset for asset in tag["assets"] if 'OSX' in asset][0] + # elif platform.system() == "Windows": + # link = [asset for asset in tag["assets"] if 'windows' in asset][0] + # elif platform.system() == "Linux": + # link = [asset for asset in tag["assets"] if 'linux' in asset][0] + + return link + + +# ----------------------------------------------------------------------------- +# Register, should be run in the register module itself +# ----------------------------------------------------------------------------- + + +classes = ( + addon_updater_install_popup, + addon_updater_check_now, + addon_updater_update_now, + addon_updater_update_target, + addon_updater_install_manually, + addon_updater_updated_successful, + addon_updater_restore_backup, + addon_updater_ignore, + addon_updater_end_background +) + + +def register(bl_info): + """Registering the operators in this module""" + # safer failure in case of issue loading module + if updater.error: + print("Exiting updater registration, " + updater.error) + return + updater.clear_state() # clear internal vars, avoids reloading oddities + + # confirm your updater "engine" (Github is default if not specified) + # updater.engine = "Github" + updater.engine = "GitLab" + # updater.engine = "Bitbucket" + + # If using private repository, indicate the token here + # Must be set after assigning the engine. + # **WARNING** Depending on the engine, this token can act like a password!! + # Only provide a token if the project is *non-public*, see readme for + # other considerations and suggestions from a security standpoint + updater.private_token = "" # "tokenstring" + + # choose your own username, must match website (not needed for GitLab) + updater.user = "" + + # choose your own repository, must match git name + updater.repo = "23569019" + + #updater.addon = # define at top of module, MUST be done first + + # Website for manual addon download, optional but recommended to set + updater.website = "https://gitlab.com/autour-de-minuit/blender/gp_toolbox" + + # Addon subfolder path + # "sample/path/to/addon" + # default is "" or None, meaning root + updater.subfolder_path = "" + + # used to check/compare versions + updater.current_version = bl_info["version"] + + # Optional, to hard-set update frequency, use this here - however, + # this demo has this set via UI properties. + # updater.set_check_interval( + # enable=False,months=0,days=0,hours=0,minutes=2) + + # Optional, consider turning off for production or allow as an option + # This will print out additional debugging info to the console + updater.verbose = True # make False for production default + + # Optional, customize where the addon updater processing subfolder is, + # essentially a staging folder used by the updater on its own + # Needs to be within the same folder as the addon itself + # Need to supply a full, absolute path to folder + # updater.updater_path = # set path of updater folder, by default: + # /addons/{__package__}/{__package__}_updater + + # auto create a backup of the addon when installing other versions + updater.backup_current = True # True by default + + # Sample ignore patterns for when creating backup of current during update + updater.backup_ignore_patterns = ["__pycache__",".gitignore",".git","*.json"] + # Alternate example patterns + # updater.backup_ignore_patterns = [".git", "__pycache__", "*.bat", ".gitignore", "*.exe"] + + # Patterns for files to actively overwrite if found in new update + # file and are also found in the currently installed addon. Note that + + # by default (ie if set to []), updates are installed in the same way as blender: + # .py files are replaced, but other file types (e.g. json, txt, blend) + # will NOT be overwritten if already present in current install. Thus + # if you want to automatically update resources/non py files, add them + # as a part of the pattern list below so they will always be overwritten by an + # update. If a pattern file is not found in new update, no action is taken + # This does NOT detele anything, only defines what is allowed to be overwritten + updater.overwrite_patterns = ["*.png","*.jpg","README.md","LICENSE.txt"] + # updater.overwrite_patterns = [] + # other examples: + # ["*"] means ALL files/folders will be overwritten by update, was the behavior pre updater v1.0.4 + # [] or ["*.py","*.pyc"] matches default blender behavior, ie same effect if user installs update manually without deleting the existing addon first + # e.g. if existing install and update both have a resource.blend file, the existing installed one will remain + # ["some.py"] means if some.py is found in addon update, it will overwrite any existing some.py in current addon install, if any + # ["*.json"] means all json files found in addon update will overwrite those of same name in current install + # ["*.png","README.md","LICENSE.txt"] means the readme, license, and all pngs will be overwritten by update + + # Patterns for files to actively remove prior to running update + # Useful if wanting to remove old code due to changes in filenames + # that otherwise would accumulate. Note: this runs after taking + # a backup (if enabled) but before placing in new update. If the same + # file name removed exists in the update, then it acts as if pattern + # is placed in the overwrite_patterns property. Note this is effectively + # ignored if clean=True in the run_update method + updater.remove_pre_update_patterns = ["*.py", "*.pyc"] + # Note setting ["*"] here is equivalent to always running updates with + # clean = True in the run_update method, ie the equivalent of a fresh, + # new install. This would also delete any resources or user-made/modified + # files setting ["__pycache__"] ensures the pycache folder is always removed + # The configuration of ["*.py","*.pyc"] is a safe option as this + # will ensure no old python files/caches remain in event different addon + # versions have different filenames or structures + + # Allow branches like 'master' as an option to update to, regardless + # of release or version. + # Default behavior: releases will still be used for auto check (popup), + # but the user has the option from user preferences to directly + # update to the master branch or any other branches specified using + # the "install {branch}/older version" operator. + updater.include_branches = True + + # (GitHub only) This options allows the user to use releases over tags for data, + # which enables pulling down release logs/notes, as well as specify installs from + # release-attached zips (instead of just the auto-packaged code generated with + # a release/tag). Setting has no impact on BitBucket or GitLab repos + updater.use_releases = False + # note: Releases always have a tag, but a tag may not always be a release + # Therefore, setting True above will filter out any non-annoted tags + # note 2: Using this option will also display the release name instead of + # just the tag name, bear this in mind given the skip_tag_function filtering above + + # if using "include_branches", + # updater.include_branch_list defaults to ['master'] branch if set to none + # example targeting another multiple branches allowed to pull from + # updater.include_branch_list = ['master', 'dev'] # example with two branches + updater.include_branch_list = None # None is the equivalent to setting ['master'] + + # Only allow manual install, thus prompting the user to open + # the addon's web page to download, specifically: updater.website + # Useful if only wanting to get notification of updates but not + # directly install. + updater.manual_only = False + + # Used for development only, "pretend" to install an update to test + # reloading conditions + updater.fake_install = False # Set to true to test callback/reloading + + # Show popups, ie if auto-check for update is enabled or a previous + # check for update in user preferences found a new version, show a popup + # (at most once per blender session, and it provides an option to ignore + # for future sessions); default behavior is set to True + updater.showpopups = True + # note: if set to false, there will still be an "update ready" box drawn + # using the `update_notice_box_ui` panel function. + + # Override with a custom function on what tags + # to skip showing for updater; see code for function above. + # Set the min and max versions allowed to install. + # Optional, default None + # min install (>=) will install this and higher + updater.version_min_update = (0,7,1) + # updater.version_min_update = None # if not wanting to define a min + + # max install (<) will install strictly anything lower + # updater.version_max_update = (9,9,9) + updater.version_max_update = None # set to None if not wanting to set max + + # Function defined above, customize as appropriate per repository + updater.skip_tag = skip_tag_function # min and max used in this function + + # Function defined above, customize as appropriate per repository; not required + updater.select_link = select_link_function + + # The register line items for all operators/panels + # If using bpy.utils.register_module(__name__) to register elsewhere + # in the addon, delete these lines (also from unregister) + for cls in classes: + # apply annotations to remove Blender 2.8 warnings, no effect on 2.7 + make_annotations(cls) + # comment out this line if using bpy.utils.register_module(__name__) + bpy.utils.register_class(cls) + + # special situation: we just updated the addon, show a popup + # to tell the user it worked + # should be enclosed in try/catch in case other issues arise + showReloadPopup() + + +def unregister(): + for cls in reversed(classes): + # comment out this line if using bpy.utils.unregister_module(__name__) + bpy.utils.unregister_class(cls) + + # clear global vars since they may persist if not restarting blender + updater.clear_state() # clear internal vars, avoids reloading oddities + + global ran_autocheck_install_popup + ran_autocheck_install_popup = False + + global ran_update_sucess_popup + ran_update_sucess_popup = False + + global ran_background_check + ran_background_check = False diff --git a/functions.py b/functions.py new file mode 100644 index 0000000..c54155a --- /dev/null +++ b/functions.py @@ -0,0 +1,381 @@ +import bpy +from random import randint +from mathutils import Vector +from math import radians +from random import random as rand +import numpy as np +from bpy_extras.object_utils import world_to_camera_view as cam_space +import bmesh +from .utils import link_vert,gp_stroke_to_bmesh,draw_gp_stroke,remapping + + +def get_view_origin_position(): + #method 1 + from bpy_extras import view3d_utils + region = bpy.context.region + rv3d = bpy.context.region_data + view_loc = view3d_utils.region_2d_to_origin_3d(region, rv3d, (region.width/2.0, region.height/2.0)) + print("view_loc1", view_loc)#Dbg + + #method 2 + r3d = bpy.context.space_data.region_3d + view_loc2 = r3d.view_matrix.inverted().translation + print("view_loc2", view_loc2)#Dbg + if view_loc != view_loc2: print('there might be an errror when finding view coordinate') + + return view_loc + + +def to_bl_image(array,img): + # Write the result to Blender preview + width = len(array[0]) + height = len(array) + + + image = bpy.data.images.get(img) + if not image : + image = bpy.data.images.new(img,width,height) + + image.generated_width = width + image.generated_height = height + + output_pixels = [] + for y in range (0,height): + for x in range(0,width): + col = array[y][x] + + if not isinstance(col,list) : + col = [col]*3 + #print(col) + + output_pixels.append(col[0]) + output_pixels.append(col[1]) + output_pixels.append(col[2]) + output_pixels.append(1) + + image.pixels = output_pixels + + +def bm_angle_split(bm,angle) : + bm.verts.ensure_lookup_table() + loop = link_vert(bm.verts[0],[bm.verts[0]]) + splitted = [] + verts_to_split = [v for v in loop if len(v.link_edges) == 2 and v.calc_edge_angle() > radians(angle)] + for i,v in enumerate(verts_to_split) : + split_verts = bmesh.utils.vert_separate(v, v.link_edges) + + splitted.append(split_verts[0]) + + if i == 0 : + splitted.append(split_verts[1]) + + bm.verts.ensure_lookup_table() + + if splitted : + loops = [] + for v in splitted : + loop = link_vert(v,[v]) + + loops.append(loop) + + else : + loops = [loop] + + return loops + +def bm_uniform_density(bm,cam,max_spacing): + from bpy_extras.object_utils import world_to_camera_view as cam_space + scene = bpy.context.scene + ratio = scene.render.resolution_y/scene.render.resolution_x + for edge in bm.edges[:] : + first = Vector(cam_space(scene,cam,edge.verts[0].co)[:-1]) + last = Vector(cam_space(scene,cam,edge.verts[1].co)[:-1]) + + first[1]*= ratio + last[1]*= ratio + + length = (last-first).length + #print(length) + if length > max_spacing : + bmesh.ops.subdivide_edges(bm, edges = [edge],cuts = round(length/max_spacing)-1) + + return bm + + +def gp_stroke_angle_split (frame,strokes,angle): + strokes_info = gp_stroke_to_bmesh(strokes) + + new_strokes = [] + for stroke_info in strokes_info : + bm = stroke_info['bmesh'] + palette = stroke_info['color'] + line_width = stroke_info['line_width'] + strength = bm.verts.layers.float['strength'] + pressure = bm.verts.layers.float['pressure'] + select = bm.verts.layers.int['select'] + + splitted_loops = bm_angle_split(bm,angle) + + frame.strokes.remove(stroke_info['stroke']) + for loop in splitted_loops : + loop_info = [{'co':v.co,'strength': v[strength], 'pressure' :v[pressure],'select':v[select]} for v in loop] + new_stroke = draw_gp_stroke(loop_info,frame,palette,width = line_width) + new_strokes.append(new_stroke) + + return new_strokes + + +def gp_stroke_uniform_density(cam,frame,strokes,max_spacing): + strokes_info = gp_stroke_to_bmesh(strokes) + + new_strokes = [] + + for stroke_info in strokes_info : + bm = stroke_info['bmesh'].copy() + palette = stroke_info['color'] + line_width = stroke_info['line_width'] + strength = bm.verts.layers.float['strength'] + pressure = bm.verts.layers.float['pressure'] + select = bm.verts.layers.int['select'] + + bm_uniform_density(bm,cam,max_spacing) + + frame.strokes.remove(stroke_info['stroke']) + bm.verts.ensure_lookup_table() + + loop = link_vert(bm.verts[0],[bm.verts[0]]) + loop_info = [{'co':v.co,'strength': v[strength], 'pressure' :v[pressure],'select':v[select]} for v in loop] + + new_stroke = draw_gp_stroke(loop_info,frame,palette,width = line_width) + new_strokes.append(new_stroke) + + return new_strokes + + +def along_stroke(stroke,attr,length,min,max) : + strokelen = len(stroke.points) + for index,point in enumerate(stroke.points) : + value = getattr(point,attr) + if index < length : + remap = remapping(index/length,0,1,min,max) + setattr(point,attr,value*remap) + + if index > strokelen-length : + remap = remapping((strokelen-index)/length,0,1,min,max) + setattr(point,attr,value*remap) + +def randomise_points(mat,points,attr,strength) : + for point in points : + if attr is 'co' : + random_x = (rand()-0.5) + random_y = (rand()-0.5) + + x = (random_x*strength, 0.0, 0.0) + y = (0.0, random_y*strength, 0.0) + + point.co+= mat * Vector(x) - mat.to_translation() + point.co+= mat * Vector(y) - mat.to_translation() + + else : + value = getattr(point,attr) + random = (rand()-0.5) + setattr(point,attr,value+random*strength) + + + +def zoom_to_object(cam,resolution,box,margin=0.01) : + min_x= box[0] + max_x= box[1] + min_y= box[2] + max_y= box[3] + + ratio = resolution[0]/resolution[1] + + zoom_cam = cam.copy() + zoom_cam.data = zoom_cam.data.copy() + + center = ((max_x+min_x)/2,(max_y+min_y)/2) + + factor = max((max_x-min_x),(max_y-min_y))+margin + + + zoom_cam.data.shift_x += (center[0]-0.5)/factor + zoom_cam.data.shift_y += (center[1]-0.5)/factor/ratio + + + zoom_cam.data.lens /= factor + + bpy.context.scene.objects.link(zoom_cam) + + + resolution = (int(resolution[0]*factor),int(resolution[1]*factor)) + + + scene = bpy.context.scene + res_x = scene.render.resolution_x + res_y =scene.render.resolution_y + + scene.render.resolution_x = resolution[0] + scene.render.resolution_y = resolution[1] + + frame = zoom_cam.data.view_frame(scene) + frame = [zoom_cam.matrix_world * corner for corner in frame] + + modelview_matrix = zoom_cam.matrix_world.inverted().copy() + projection_matrix = zoom_cam.calc_matrix_camera(resolution[0],resolution[1],1,1).copy() + + #bpy.data.cameras.remove(zoom_cam.data) + #bpy.data.objects.remove(zoom_cam) + #bpy.context.scene.objects.link(zoom_cam) + + scene.render.resolution_x = res_x + scene.render.resolution_y = res_y + #print(matrix,resolution) + return modelview_matrix,projection_matrix,frame,resolution + + + +def set_viewport_matrix(width,height,mat): + from bgl import glViewport,glMatrixMode,GL_PROJECTION,glLoadMatrixf,Buffer,GL_FLOAT,glMatrixMode,GL_MODELVIEW,glLoadIdentity + + glViewport(0,0,width,height) + + #glLoadIdentity() + + glMatrixMode(GL_PROJECTION) + + projection = [mat[j][i] for i in range(4) for j in range(4)] + glLoadMatrixf(Buffer(GL_FLOAT, 16, projection)) + + #glMatrixMode( GL_MODELVIEW ) + #glLoadIdentity() + + + + + +# get object info +def get_object_info(mesh_groups,order_list = []) : + scene = bpy.context.scene + cam = scene.camera + #scale = scene.render.resolution_percentage / 100.0 + res_x = int(scene.render.resolution_x) + res_y = int(scene.render.resolution_y) + + scene.render.resolution_x = 1024 + scene.render.resolution_y = 1024 + + cam_coord = cam.matrix_world.to_translation() + + convert_table = {(255,255,255):-1,(0,0,0):0} + mesh_info = [] + color_index = 1 + for i,mesh_group in enumerate(mesh_groups) : + for ob in mesh_group["objects"] : + ob_info = {"object": ob, "materials" : [],"group_index" : i,'color_indexes':[]} + + namespace = mesh_group['namespace'] + ob_info['namespace'] = namespace + + l_name = ob.name + if l_name.startswith(namespace+'_') : + l_name = namespace+'_'+'COLO_'+ob.name.split('_',1)[1] + else : + l_name = namespace+'_'+'COLO_'+l_name + + ob_info['name'] = l_name + + bm = bmesh.new() + bm.from_object(ob,scene) + ob_info["bm"] = bm + + if not bm.verts : continue + + ob_info["matrix"] = ob.matrix_world + + if mesh_group.get("dupli_object") : + ob_info["matrix"] = mesh_group["dupli_object"].matrix_world * ob.matrix_world + + + global_bbox = [ob_info["matrix"] * Vector(v) for v in ob.bound_box] + global_bbox_center = Vector(np.mean(global_bbox,axis =0)) + + bbox_cam_space = [cam_space(scene,cam,p)[:-1] for p in global_bbox] + + sorted_x = sorted(bbox_cam_space,key = lambda x : x[0]) + sorted_y = sorted(bbox_cam_space,key = lambda x : x[1]) + + + ob_info['box_2d']=[sorted_x[0][0],sorted_x[-1][0],sorted_y[0][1],sorted_y[-1][1]] + + #print(ob_info['box_2d']) + + ''' + { + 'x' : int(sorted_x[0][0]*res_x)-1, + 'y' : int(sorted_y[0][1]*res_y)-1, + 'width' : int(sorted_x[-1][0]*res_x - sorted_x[0][0]*res_x)+1, + 'height' : int(sorted_y[-1][1]*res_y - sorted_y[0][1]*res_y)+1, + } + ''' + #bbox_depth = [Vector(p - cam_coord).length for p in global_bbox] + #ob_info["depth"] = min(bbox_depth) + ob_info["depth"] = Vector(global_bbox_center - cam_coord).length + + for slot in ob.material_slots : + mat = slot.material + mat_info = {'index' : color_index} + if mat : + color = [pow(v,1/2.2) for v in mat.diffuse_color] + name = mat.name + else : + color = [1,0,1] + name = "default" + + #seed(i) + random_color = (randint(0,255),randint(0,255),randint(0,255)) + + if name.startswith(namespace+'_') : + name = namespace+'_'+'COLO_'+ name.split('_',1)[1] + else : + name = namespace+'_'+'COLO_'+name + + mat_info["name"] = name + mat_info["color"] = color + mat_info["random_color"] = random_color + + ob_info["materials"].append(mat_info) + ob_info["color_indexes"].append(color_index) + + convert_table[random_color] = color_index + + color_index +=1 + + if not ob.material_slots : + random_color = (randint(0,255),randint(0,255),randint(0,255)) + ob_info["random_color"] = random_color + ob_info["color"] = (0.5,0.5,0.5) + ob_info["color_indexes"].append(color_index) + convert_table[random_color] = color_index + color_index +=1 + + mesh_info.append(ob_info) + + + mesh_info = sorted(mesh_info,key = lambda x : x['depth'],reverse=True) + + #print("###") + #print([i['name'] for i in mesh_info]) + + if order_list : + for name in [i['name'] for i in mesh_info] : + if name not in order_list : + order_list.append(name) + + mesh_info = sorted(mesh_info,key = lambda x : order_list.index(x['name'])) + + scene.render.resolution_x = res_x + scene.render.resolution_y = res_y + + + return mesh_info,convert_table diff --git a/gp_toolbox_public_updater/gp_toolbox_public_updater_status.json b/gp_toolbox_public_updater/gp_toolbox_public_updater_status.json new file mode 100644 index 0000000..952445e --- /dev/null +++ b/gp_toolbox_public_updater/gp_toolbox_public_updater_status.json @@ -0,0 +1,9 @@ +{ + "last_check": "", + "backup_date": "", + "update_ready": false, + "ignore": false, + "just_restored": false, + "just_updated": false, + "version_text": {} +} \ No newline at end of file diff --git a/keymaps.py b/keymaps.py new file mode 100644 index 0000000..e76e7ca --- /dev/null +++ b/keymaps.py @@ -0,0 +1,45 @@ +## Pure keymaping additions + +import bpy +addon_keymaps = [] + +def register_keymaps(): + addon = bpy.context.window_manager.keyconfigs.addon + # km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D")# in 3D context + # km = addon.keymaps.new(name = "Window", space_type = "EMPTY")# from everywhere + + ## try initiate + km = addon.keymaps.new(name = "Grease Pencil Stroke Sculpt Mode", space_type = "EMPTY", region_type='WINDOW') + + kmi = km.keymap_items.new('wm.context_toggle', type='ONE', value='PRESS') + kmi.properties.data_path='scene.tool_settings.use_gpencil_select_mask_point' + addon_keymaps.append((km, kmi)) + + kmi = km.keymap_items.new('wm.context_toggle', type='TWO', value='PRESS') + kmi.properties.data_path='scene.tool_settings.use_gpencil_select_mask_stroke' + addon_keymaps.append((km, kmi)) + + kmi = km.keymap_items.new('wm.context_toggle', type='THREE', value='PRESS') + kmi.properties.data_path='scene.tool_settings.use_gpencil_select_mask_segment' + addon_keymaps.append((km, kmi)) + +def unregister_keymaps(): + # wm = bpy.context.window_manager + for km, kmi in addon_keymaps: + km.keymap_items.remove(kmi) + + # wm.keyconfigs.addon.keymaps.remove(km) + addon_keymaps.clear() + + + +def register(): + if not bpy.app.background: + register_keymaps() + +def unregister(): + if not bpy.app.background: + unregister_keymaps() + +if __name__ == "__main__": + register() diff --git a/properties.py b/properties.py new file mode 100644 index 0000000..5480a66 --- /dev/null +++ b/properties.py @@ -0,0 +1,67 @@ +import bpy +from bpy.props import ( + IntProperty, + BoolProperty, + StringProperty, + FloatProperty, + ) + +from .OP_cursor_snap_canvas import cursor_follow_update + +def change_edit_lines_opacity(self, context): + # for o in context.scene.objects: + # if o.type != 'GPENCIL': + # continue + # o.data.edit_line_color[3]=self.edit_lines_opacity + for gp in bpy.data.grease_pencils: + if not gp.is_annotation: + gp.edit_line_color[3]=self.edit_lines_opacity + +class GP_PG_ToolsSettings(bpy.types.PropertyGroup) : + autotint_offset : IntProperty( + name="Tint hue offset", description="offset the tint by this value for better color", + default=0, min=-5000, max=5000, soft_min=-999, soft_max=999, step=1, + options={'HIDDEN'})#, subtype='PERCENTAGE' + + autotint_namespace : BoolProperty( + name="Use prefix", description="Put same color on layers unsing the same prefix (separated by '_') of full name withjout separator", + default=True, + options={'HIDDEN'}) + + resolution_percentage: IntProperty( + name="Resolution %", description="Overrides resolution percentage for playblast", + default = 50, min=1, max= 100, subtype='PERCENTAGE')#, precision=0 + + cursor_follow : BoolProperty( + name='Cursor Follow', description="3D cursor follow active object animation when activated", + default=False, update=cursor_follow_update) + + edit_lines_opacity : FloatProperty( + name="edit lines Opacity", description="Change edit lines opacity for all grease pencils", default=0.5, min=0.0, max=1.0, step=3, precision=2, update=change_edit_lines_opacity)#, get=None, set=None + + ## render + name_for_current_render : StringProperty( + name="Render_name", description="Name use for render current", + default="") + + + """ + reconnect_parent = bpy.props.PointerProperty(type =bpy.types.Object,poll=poll_armature) + render_settings = bpy.props.BoolProperty(default = False) + render_color = bpy.props.BoolProperty(default = True) + render_contour = bpy.props.BoolProperty(default = False) + precision = bpy.props.IntProperty(default = 75,subtype = 'PERCENTAGE',min=0,max=100) + + border_render = bpy.props.BoolProperty(default = False) + + spacialize = bpy.props.BoolProperty(default = False) + depth = bpy.props.FloatProperty(default = 2.0) + + extra_tools = bpy.props.BoolProperty(default = False) + enable_ob_filter = bpy.props.BoolProperty(default = False) + auto_cursor = bpy.props.BoolProperty(default = True) + + opacity_layers = bpy.props.FloatProperty(min=0,max=1,default = 1,update = update_layers_opacity) + + stroke_select = bpy.props.EnumProperty(items = [("POINT","Point",""),("STROKE","Stroke","")],update = update_selection_mode) + """ \ No newline at end of file diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..6b34c2a --- /dev/null +++ b/utils.py @@ -0,0 +1,688 @@ +import bpy, os +import numpy as np +import bmesh +import mathutils +from mathutils import Vector +from math import sqrt +from sys import platform +import subprocess + + + +""" def get_gp_parent(layer) : + if layer.parent_type == "BONE" and layer.parent_bone : + return layer.parent.pose.bones.get(layer.parent_bone) + else : + return layer.parent + """ +def get_matrix(ob) : + '''return a copy of the world_matrix, applied object matrix if its a bone''' + if isinstance(ob, bpy.types.PoseBone) : + return ob.id_data.matrix_world @ ob.matrix.copy()# * ? + else : + return ob.matrix_world.copy() + +def set_matrix(gp_frame,mat): + for stroke in gp_frame.strokes : + for point in stroke.points : + point.co = mat @ point.co + +# get view vector location (the 2 methods work fine) +def get_view_origin_position(): + #method 1 + # from bpy_extras import view3d_utils + # region = bpy.context.region + # rv3d = bpy.context.region_data + # view_loc = view3d_utils.region_2d_to_origin_3d(region, rv3d, (region.width/2.0, region.height/2.0)) + # print("view_loc1", view_loc)#Dbg + + #method 2 + r3d = bpy.context.space_data.region_3d + view_loc2 = r3d.view_matrix.inverted().translation + # print("view_loc2", view_loc2)#Dbg + # if view_loc != view_loc2: print('Might be an error when finding view coordinate') + + return view_loc2 + +def location_to_region(worldcoords): + from bpy_extras import view3d_utils + return view3d_utils.location_3d_to_region_2d(bpy.context.region, bpy.context.space_data.region_3d, worldcoords) + +def region_to_location(viewcoords, depthcoords): + from bpy_extras import view3d_utils + return view3d_utils.region_2d_to_location_3d(bpy.context.region, bpy.context.space_data.region_3d, viewcoords, depthcoords) + +def vector_len_from_coord(a, b): + '''get either two points or world coordinates and return length''' + from mathutils import Vector + if type(a) is Vector: + return (a - b).length + else: + return (a.co - b.co).length + +def transfer_value(Value, OldMin, OldMax, NewMin, NewMax): + '''map a value from a range to another (transfer/translate value)''' + return (((Value - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin + +def object_derived_get(ob, scene): + if ob.dupli_type != 'NONE' : + ob.dupli_list_create(scene) + ob_matrix_pairs = [(dob.object, dob.matrix.copy()) for dob in ob.dupli_list] + ob.dupli_list_clear() + else: + ob_matrix_pairs = [(ob, ob.matrix_world.copy())] + + return ob_matrix_pairs + + +def link_vert(v,ordered_vert) : + for e in v.link_edges : + other_vert = e.other_vert(v) + if other_vert not in ordered_vert : + ordered_vert.append(other_vert) + link_vert(other_vert,ordered_vert) + + return ordered_vert + + +def find_loops(bm) : + verts = [] + loops = [] + + print([v for v in bm.verts if len(v.link_edges)==1]) + for v in [v for v in bm.verts if len(v.link_edges)==1] : + if v not in verts : + loop = link_vert(v,[v]) + loops.append(loop) + for vert in loop : + verts.append(vert) + + return loops + + +def get_perimeter(points) : + perimeter = 0 + + print('pointlen',len(points)) + for i,point in enumerate(points) : + if i != 0 : + perimeter += (Vector(point) -Vector(points[i-1])).length + + return perimeter + +def points_to_bm_face(points,depth=0) : + bm = bmesh.new() + for point in points : + bm.verts.new((point[0],point[1],depth)) + bm.faces.new(bm.verts) + + bm.faces.ensure_lookup_table() + return bm + +def gp_stroke_to_bmesh(strokes): + strokes_info = [] + for stroke in strokes : + + info = {'stroke' : stroke ,'color':stroke.colorname,'line_width':stroke.line_width} + bm = bmesh.new() + strength = bm.verts.layers.float.new('strength') + pressure = bm.verts.layers.float.new('pressure') + select = bm.verts.layers.int.new('select') + + verts = [] + for i,point in enumerate(stroke.points) : + v = bm.verts.new(point.co) + v[strength] = point.strength + v[pressure] = point.pressure + v[select] = point.select + + verts.append(v) + if i > 0 : + e = bm.edges.new([verts[-1],verts[-2]]) + + info['bmesh']= bm + strokes_info.append(info) + + return strokes_info + + +def simple_draw_gp_stroke(pts,frame,width = 2, mat_id = 0): + ''' + draw basic stroke by passing list of point 3D coordinate + the frame to draw on and optional width parameter (default = 2) + ''' + stroke = frame.strokes.new() + stroke.line_width = width + stroke.display_mode = '3DSPACE' + stroke.material_index = mat_id + # readonly -> stroke.is_nofill_stroke# boundary_stroke + + stroke.points.add(len(pts)) + seq = [i for vec in pts for i in vec]## foreach_set flatlist for speed + stroke.points.foreach_set('co', seq) + ## one by one + # for i, pt in enumerate(pts): + # stroke.points.add() + # dest_point = stroke.points[i] + # dest_point.co = pt + return stroke + +## OLD - need update +def draw_gp_stroke(loop_info,frame,palette,width = 2) : + stroke = frame.strokes.new(palette) + + stroke.line_width = width + stroke.display_mode = '3DSPACE'# old->draw_mode + + for i,info in enumerate(loop_info) : + stroke.points.add() + dest_point = stroke.points[i] + for attr,value in info.items() : + setattr(dest_point,attr,value) + + return stroke + +def get_camera_frame_info(cam, distance = 1): + ''' + return a list with 4 screen corner top-right first rotating CC + 4-------1 + | | + 3-------2 + ''' + cam_coord = cam.matrix_world.to_translation() + scene = bpy.context.scene + + #shift_x = cam.data.shift_x + #shift_y = cam.data.shift_y + + #cam.data.shift_x = 0 + #cam.data.shift_y = 0 + + frame = cam.data.view_frame(scene) + frame = [cam.matrix_world * corner for corner in frame] + + #frame = [corner+(corner-cam_coord).normalized()*distance for corner in frame] + + #cam.data.shift_x = shift_x + #cam.data.shift_y = shift_y + + # bpy.context.scene.cursor_location = frame[0]# test by placing cursor + + return frame + + +def id_convert(fimg,id,operation = 'EQUAL',border = True): + new_img = fimg.copy() + + width = len(new_img[0]) + + if operation == 'EQUAL' : + thresh_mask = new_img[...] == id + + elif operation == 'GREATER' : + thresh_mask = new_img[...] > id + + elif operation == 'LOWER' : + thresh_mask = new_img[...] < id + else : + return + + new_img[:] = 1.0 + new_img[thresh_mask] = 0.0 + + if border : + # Adding black around the image + new_img = np.concatenate((new_img,[[1]*width])) + new_img = np.concatenate(([[1]*width],new_img)) + + new_img = np.insert(new_img,width,1,axis = 1) + new_img = np.insert(new_img,0,1,axis = 1) + + return new_img + +def remapping(value, leftMin, leftMax, rightMin, rightMax): + # Figure out how 'wide' each range is + leftSpan = leftMax - leftMin + rightSpan = rightMax - rightMin + + # Convert the left range into a 0-1 range (float) + valueScaled = float(value - leftMin) / float(leftSpan) + + # Convert the 0-1 range into a value in the right range. + return rightMin + (valueScaled * rightSpan) + +#### GP funcs + +def get_gp_draw_plane(context): + ''' return tuple with plane coordinate and normal + of the curent drawing accordign to geometry''' + + settings = context.scene.tool_settings + orient = settings.gpencil_sculpt.lock_axis#'VIEW', 'AXIS_Y', 'AXIS_X', 'AXIS_Z', 'CURSOR' + loc = settings.gpencil_stroke_placement_view3d#'ORIGIN', 'CURSOR', 'SURFACE', 'STROKE' + mat = context.object.matrix_world if context.object else None + # -> placement + if loc == "CURSOR": + plane_co = context.scene.cursor.location + else:#ORIGIN (also on origin if set to 'SURFACE', 'STROKE') + if not context.object: + plane_co = None + else: + plane_co = context.object.matrix_world.to_translation()# context.object.location + + + # -> orientation + if orient == 'VIEW': + #only depth is important, no need to get view vector + plane_no = None + + elif orient == 'AXIS_Y':#front (X-Z) + plane_no = Vector((0,1,0)) + plane_no.rotate(mat) + + elif orient == 'AXIS_X':#side (Y-Z) + plane_no = Vector((1,0,0)) + plane_no.rotate(mat) + + elif orient == 'AXIS_Z':#top (X-Y) + plane_no = Vector((0,0,1)) + plane_no.rotate(mat) + + elif orient == 'CURSOR': + plane_no = Vector((0,0,1)) + plane_no.rotate(context.scene.cursor.matrix) + + return plane_co, plane_no + +## need big update +def create_gp_palette(gp_data_block,info) : + palette = gp_data_block.palettes.active + + name = info["name"] + + if palette.colors.get(name) : + return palette.colors.get(name) + + else : + p = palette.colors.new() + for attr,value in info.items() : + setattr(p,attr,value) + + return p + + +""" def get_gp_data_block() : + scene = bpy.context.scene + if scene.tool_settings.grease_pencil_source == 'OBJECT' and ob and ob.grease_pencil: + gp_data_block = ob.grease_pencil + elif scene.grease_pencil : + gp_data_block = scene.grease_pencil + else : + gp_data_block =bpy.data.grease_pencil.new('GPencil') + scene.grease_pencil = gp_data_block + + palette = gp_data_block.palettes.active + if not palette : + palette = gp_data_block.palettes.new("palette") + + return gp_data_block,palette """ + +def get_gp_objects(selection=True): + '''return selected objects or only the active one''' + if not bpy.context.active_object or bpy.context.active_object.type != 'GPENCIL': + print('No active GP object') + return [] + + active = bpy.context.active_object + if selection: + selection = [o for o in bpy.context.selected_objects if o.type == 'GPENCIL'] + if not active in selection: + selection += [active] + return selection + + if bpy.context.active_object and bpy.context.active_object.type == 'GPENCIL': + return [active] + return [] + +def get_gp_datas(selection=True): + '''return selected objects or only the active one''' + if not bpy.context.active_object or bpy.context.active_object.type != 'GPENCIL': + print('No active GP object') + return [] + + active_data = bpy.context.active_object.data + if selection: + selected = [] + for o in bpy.context.selected_objects: + if o.type == 'GPENCIL': + if o.data not in selected: + selected.append(o.data) + # selected = [o.data for o in bpy.context.selected_objects if o.type == 'GPENCIL'] + if not active_data in selected: + selected += [active_data] + return selected + + if bpy.context.active_object and bpy.context.active_object.type == 'GPENCIL': + return [active_data] + + print('EOL. No active GP object') + return [] + +def get_gp_layer(gp_data_block,name) : + gp_layer = gp_data_block.layers.get(name) + if not gp_layer : + gp_layer = gp_data_block.layers.new(name) + + return gp_layer + +def get_gp_frame(layer,frame_nb = None) : + scene = bpy.context.scene + if not frame_nb : + frame_nb = scene.frame_current + + frames={} + for i,f in enumerate(layer.frames) : + frames[f.frame_number]=i + + if not scene.frame_current in frames.keys(): + dest_frame = layer.frames.new(frame_nb) + else : + dest_frame = layer.frames[frames[frame_nb]] + dest_frame.clear() + + return dest_frame + +def get_active_frame(layer_name=None): + '''Return active frame of active layer or from layer name passed''' + if layer_name: + lay = bpy.context.scene.grease_pencil.layers.get(layer_name) + if lay: + frame = lay.active_frame + if frame: + return frame + else: + print ('no active frame for layer', layer_name) + else: + print('no layers named', layer_name, 'in scene layers') + + else:#active layer + frame = bpy.context.scene.grease_pencil.layers.active.active_frame + if frame: + return frame + else: + print ('no active frame on active layer') + +def get_stroke_2D_coords(stroke): + '''return a list containing points 2D coordinates of passed gp stroke object''' + return [location_to_region(p.co) for p in stroke.points] + + '''#foreach method for retreiving multiple other attribute quickly and stack them + point_nb = len(stroke.points) + seq = [0]*(point_nb*3) + stroke.points.foreach_get("co",seq) + print("raw_list", seq)#Dbg + import numpy as np + #can use np.stack to add points infos on same index (on different line/dimension) + #https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.stack.html + ''' + +def get_all_stroke_2D_coords(frame): + '''return a list of lists with all strokes's points 2D location''' + ## using modification from get_stroke_2D_coords func' + return [get_stroke_2D_coords(s) for s in frame.strokes] + ## direct + #return[[location_to_region(p.co) for p in s.points] for s in frame.strokes] + +def selected_strokes(frame): + '''return all stroke having a point selected as a list of strokes objects''' + stlist = [] + for i, s in enumerate(frame.strokes): + if any(pt.select for pt in s.points): + stlist.append(s) + return stlist + +from math import sqrt +from mathutils import Vector + + +# ----------------- +### Vector utils 3d +# ----------------- + +def single_vector_length(v): + return sqrt((v[0] * v[0]) + (v[1] * v[1]) + (v[2] * v[2])) + +def vector_length(A,B): + ''''take two Vector3 and return length''' + return sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2 + (A[2] - B[2])**2) + +def vector_length_coeff(size, A, B): + ''' + Calculate the vector lenght + return the coefficient to multiply this vector + to obtain a vector of the size given in paramerter + ''' + Vlength = sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2 + (A[2] - B[2])**2) + if Vlength == 0: + print('problem Vector lenght == 0 !') + return (1) + return (size / Vlength) + + +def cross_vector_coord(foo, bar, size): + '''Return the coord in space of a cross vector between the two point with specified size''' + between = foo - bar + #create a generic Up vector (on Y or Z) + up = Vector([1.0,0,0]) + new = Vector.cross(up, between)#the cross product return a 90 degree Vector + if new == Vector([0.0000, 0.0000, 0.0000]): + #new == 0 if up vector and between are aligned ! (so change up vector) + up = Vector([0,-1.0,0]) + new = Vector.cross(up, between)#the cross product return a 90 degree Vector + + perpendicular = foo + new + coeff = vector_length_coeff(size, foo, perpendicular) + #position the point in space by adding the new vector multiplied by coeff value to get wanted lenght + return (foo + (new * coeff)) + + +def midpoint(p1, p2): + '''middle location between 2 vector is calculated by adding the two vector and divide by two''' + ##mid = (foo + bar) / 2 + return (Vector([(p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2, (p1[2] + p2[2]) / 2])) + + +def extrapolate_points_by_length(a,b, length): + ''' + Return a third point C from by continuing in AB direction + Length define BC distance. both vector2 and vector3 + ''' + # return b + ((b - a).normalized() * length)# one shot + ab = b - a + if not ab: return None + return b + (ab.normalized() * length) + +# ----------------- +### Vector utils 2d +# ----------------- + +def single_vector_length_2d(v): + return sqrt((v[0] * v[0]) + (v[1] * v[1])) + + +def vector_length_2d(A,B): + ''''take two Vector and return length''' + return sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2) + + +def vector_length_coeff_2d(size, A, B): + ''' + Calculate the vector lenght + return the coefficient to multiply this vector + to obtain a vector of the size given in paramerter + ''' + Vlength = sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2) + if Vlength == 0: + print('problem Vector lenght == 0 !') + return (1) + return (size / Vlength) + +def cross_vector_coord_2d(foo, bar, size): + '''Return the coord in space of a cross vector between the two point with specified size''' + ###middle location between 2 vector is calculated by adding the two vector and divide by two + ##mid = (foo + bar) / 2 + between = foo - bar + #create a generic Up vector (on Y or Z) + up = Vector([0,1.0]) + new = Vector.cross(up, between)#the cross product return a 90 degree Vector + if new == Vector([0.0000, 0.0000]): + #new == 0 if up vector and between are aligned ! (so change up vector) + up = Vector([0,-1.0,0]) + new = Vector.cross(up, between)#the cross product return a 90 degree Vector + + perpendicular = foo + new + coeff = vector_length_coeff(size, foo, perpendicular) + #position the point in space by adding the new vector multiplied by coeff value to get wanted lenght + return (foo + (new * coeff)) + + +def midpoint_2d(p1, p2): + return (Vector([(p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2])) + + +# ----------------- +### Collection management +# ----------------- + +def set_collection(ob, collection, unlink=True) : + ''' link an object in a collection and create it if necessary, if unlink object is removed from other collections''' + scn = bpy.context.scene + col = None + visible = False + linked = False + + # check if collection exist or create it + for c in bpy.data.collections : + if c.name == collection : col = c + if not col : col = bpy.data.collections.new(name=collection) + + # link the collection to the scene's collection if necessary + for c in scn.collection.children : + if c.name == col.name : visible = True + if not visible : scn.collection.children.link(col) + + # check if the object is already in the collection and link it if necessary + for o in col.objects : + if o == ob : linked = True + if not linked : col.objects.link(ob) + + # remove object from scene's collection + for o in scn.collection.objects : + if o == ob : scn.collection.objects.unlink(ob) + + # if unlink flag we remove the object from other collections + if unlink : + for c in ob.users_collection : + if c.name != collection : c.objects.unlink(ob) + + +# ----------------- +### Path utils +# ----------------- + +def get_addon_prefs(): + ''' + function to read current addon preferences properties + + access a prop like this : + prefs = get_addon_prefs() + option_state = prefs.super_special_option + + oneliner : get_addon_prefs().super_special_option + ''' + import os + addon_name = os.path.splitext(__name__)[0] + preferences = bpy.context.preferences + addon_prefs = preferences.addons[addon_name].preferences + return (addon_prefs) + + +def open_file(file_path) : + '''Open filepath with default browser''' + if platform.lower() == 'darwin': + subprocess.call(('open', file_path)) + + elif platform.lower().startswith('win'): + os.startfile(file_path) + # subprocess.call(('start', file_path)) + + else:#linux + subprocess.call(('xdg-open', file_path)) + +def open_folder(folderpath): + '''Open the folder at given path with default browser''' + myOS = platform + if myOS.startswith('linux') or myOS.startswith('freebsd'): + cmd = 'xdg-open' + elif myOS.startswith('win'): + cmd = 'explorer' + if not folderpath: + return('/') + else:#elif myOS == "darwin": + cmd = 'open' + + if not folderpath: + return('//') + + folderpath = os.path.normpath(folderpath)# to prevent bad path string + fullcmd = [cmd, folderpath] + print(fullcmd) + # subprocess.call(fullcmd) + subprocess.Popen(fullcmd) + + return ' '.join(fullcmd)#back to string to return and print + + +def detect_OS(): + """return str of os name : Linux, Windows, Mac (None if undetected)""" + myOS = platform + + if myOS.startswith('linux') or myOS.startswith('freebsd'):# linux + # print("operating system : Linux") + return ("Linux") + + elif myOS.startswith('win'):# Windows + # print("operating system : Windows") + return ("Windows") + + elif myOS == "darwin":# OS X + # print("operating system : Mac") + return ('Mac') + + else:# undetected + print("Cannot detect OS, python 'sys.platform' give :", myOS) + return None + +def convert_attr(Attr): + '''Convert given value to a Json serializable format''' + if isinstance(Attr, (mathutils.Vector,mathutils.Color)): + return Attr[:] + elif isinstance(Attr, mathutils.Matrix): + return [v[:] for v in Attr] + elif isinstance(Attr,bpy.types.bpy_prop_array): + return [Attr[i] for i in range(0,len(Attr))] + else: + return(Attr) + +## confirm pop-up message: +def show_message_box(_message = "", _title = "Message Box", _icon = 'INFO'): + def draw(self, context): + for l in _message: + if isinstance(l, str): + self.layout.label(text=l) + else: + self.layout.label(text=l[0], icon=l[1]) + + if isinstance(_message, str): + _message = [_message] + bpy.context.window_manager.popup_menu(draw, title = _title, icon = _icon) \ No newline at end of file