Discover Python API functions on RNA types (Context.copy, etc.)

Pick up Python functions defined on RNA classes that have RST docstrings
(:rtype: or :type:), indicating they are public API methods rather than
operator/panel implementation details.

Also add Iterable and mathutils imports to bpy.types stub generator
for functions that reference these types in their annotations.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Joseph HENRY 2026-03-31 12:34:19 +02:00
parent cfcb66620a
commit a30e772cf8
2 changed files with 1107 additions and 950 deletions

View File

@ -98,43 +98,21 @@ def collect_all_type_strings(module_data: ModuleData) -> list[str]:
return all_types
def collect_imports(module_data: ModuleData) -> set[str]:
"""Collect all import statements needed for the stub file."""
imports: set[str] = set()
all_types = collect_all_type_strings(module_data)
joined = " ".join(all_types)
_TOKEN_IMPORTS: tuple[tuple[str, str], ...] = (
("Sequence", "from collections.abc import Sequence"),
("Iterable", "from collections.abc import Iterable"),
("Callable", "from collections.abc import Callable"),
("Iterator", "from collections.abc import Iterator"),
("Literal", "from typing import Literal"),
("Any", "from typing import Any"),
("Generator", "from collections.abc import Generator"),
("Mapping", "from collections.abc import Mapping"),
("Collection", "from collections.abc import Collection"),
("TypeAlias", "from typing import TypeAlias"),
("Generic", "from typing import Generic, TypeVar"),
)
def _has_type(name: str) -> bool:
return bool(re.search(rf"\b{name}\b", joined))
if _has_type("Sequence"):
imports.add("from collections.abc import Sequence")
if _has_type("Iterable"):
imports.add("from collections.abc import Iterable")
if _has_type("Callable"):
imports.add("from collections.abc import Callable")
if _has_type("Iterator"):
imports.add("from collections.abc import Iterator")
if _has_type("Literal"):
imports.add("from typing import Literal")
if _has_type("Any"):
imports.add("from typing import Any")
if _has_type("Generator"):
imports.add("from collections.abc import Generator")
if _has_type("Mapping"):
imports.add("from collections.abc import Mapping")
if _has_type("Collection"):
imports.add("from collections.abc import Collection")
if "Self" in joined:
imports.add("from typing import Self")
if _has_type("TypeAlias"):
imports.add("from typing import TypeAlias")
if _has_type("Generic"):
imports.add("from typing import Generic, TypeVar")
# Detect module-qualified references (e.g. bpy.types.Object, mathutils.Vector)
# Detect qualified module references
known_modules = [
_KNOWN_TYPED_MODULES: tuple[str, ...] = (
"bpy.types",
"bpy.props",
"bpy.app",
@ -145,26 +123,62 @@ def collect_imports(module_data: ModuleData) -> set[str]:
"freestyle.types",
"bmesh.types",
"bmesh",
]
if "datetime" in joined:
imports.add("import datetime")
if "types.ModuleType" in joined:
imports.add("import types")
if "collections.OrderedDict" in joined:
imports.add("import collections")
if "collections.abc." in joined:
imports.add("import collections.abc")
for mod in known_modules:
if mod + "." in joined or re.search(rf"\b{re.escape(mod)}\b", joined):
imports.add(f"import {mod}")
)
# fixup_shadowed_builtins will add builtins.X references for structs
# with property names that shadow builtins — need the import
_SPECIAL_IMPORT_MARKERS: tuple[tuple[str, str], ...] = (
("datetime", "import datetime"),
("types.ModuleType", "import types"),
("collections.OrderedDict", "import collections"),
("collections.abc.", "import collections.abc"),
)
def _has_token(joined_types: str, token: str) -> bool:
"""Return whether a type token appears as a standalone identifier."""
return bool(re.search(rf"\b{token}\b", joined_types))
def _collect_token_imports(joined_types: str) -> set[str]:
"""Collect import lines triggered by standalone type tokens."""
imports: set[str] = set()
for token, import_line in _TOKEN_IMPORTS:
if _has_token(joined_types, token):
imports.add(import_line)
if "Self" in joined_types:
imports.add("from typing import Self")
return imports
def _collect_module_reference_imports(joined_types: str) -> set[str]:
"""Collect import lines required by module-qualified type references."""
imports: set[str] = set()
for marker, import_line in _SPECIAL_IMPORT_MARKERS:
if marker in joined_types:
imports.add(import_line)
for mod in _KNOWN_TYPED_MODULES:
if mod + "." in joined_types or re.search(
rf"\b{re.escape(mod)}\b", joined_types
):
imports.add(f"import {mod}")
return imports
def _requires_builtins_import(module_data: ModuleData) -> bool:
"""Return whether builtins import is needed for builtin-name shadowing."""
for struct in module_data.get("structs", []):
prop_names = {p["name"] for p in struct["properties"]}
if prop_names & BUILTIN_NAMES:
return True
return False
def collect_imports(module_data: ModuleData) -> set[str]:
"""Collect all import statements needed for the stub file."""
all_types = collect_all_type_strings(module_data)
joined = " ".join(all_types)
imports = _collect_token_imports(joined) | _collect_module_reference_imports(joined)
if _requires_builtins_import(module_data):
imports.add("import builtins")
break
return imports
@ -216,9 +230,16 @@ def format_docstring(doc: str, indent: str = " ") -> str:
return result
def generate_function_stub(func: FunctionData) -> str:
"""Generate a stub for a single function."""
# Build parameter list, separating by kind and default presence
def _build_signature_params(
params: list[ParamData],
leading_params: list[str] | None = None,
skip_names: set[str] | None = None,
) -> str:
"""Build a function signature parameter list string from ParamData entries."""
filtered_params = [
param for param in params if param["name"] not in (skip_names or set())
]
positional_only_no_default: list[str] = []
positional_only_with_default: list[str] = []
positional_no_default: list[str] = []
@ -226,7 +247,7 @@ def generate_function_stub(func: FunctionData) -> str:
keyword_params: list[str] = []
has_positional_only = False
for param in func["params"]:
for param in filtered_params:
formatted = format_param(param, force_type=True)
kind = param.get("kind", "POSITIONAL_OR_KEYWORD")
if kind == "POSITIONAL_ONLY":
@ -248,7 +269,7 @@ def generate_function_stub(func: FunctionData) -> str:
else:
positional_no_default.append(formatted)
all_params: list[str] = []
all_params: list[str] = list(leading_params or [])
if has_positional_only and not (
positional_only_with_default and positional_no_default
):
@ -260,21 +281,30 @@ def generate_function_stub(func: FunctionData) -> str:
# Merge positional-only into regular params when / would be invalid
positional_no_default = positional_only_no_default + positional_no_default
positional_with_default = positional_only_with_default + positional_with_default
# Non-default positional params must come before default ones
all_params.extend(positional_no_default)
all_params.extend(positional_with_default)
if keyword_params:
# Insert * separator if there are keyword-only args and no VAR_POSITIONAL.
# Don't insert * if the only keyword params are **kwargs (already captures all).
has_var_positional = any(
p.get("kind") == "VAR_POSITIONAL" for p in func["params"]
p.get("kind") == "VAR_POSITIONAL" for p in filtered_params
)
has_named_keyword = any(
p.get("kind") == "KEYWORD_ONLY" for p in filtered_params
)
has_named_keyword = any(p.get("kind") == "KEYWORD_ONLY" for p in func["params"])
if not has_var_positional and has_named_keyword:
all_params.append("*")
all_params.extend(keyword_params)
params_str = ", ".join(all_params)
return ", ".join(all_params)
def generate_function_stub(func: FunctionData) -> str:
"""Generate a stub for a single function."""
params_str = _build_signature_params(func["params"])
# Return type
ret = f" -> {func['return_type']}" if func["return_type"] else " -> None"
@ -303,63 +333,9 @@ def generate_method_stub(
"""Generate a stub for a method inside a class."""
is_cls = func.get("is_classmethod", False)
first_param = "cls" if is_cls else "self"
# Categorize params by kind, keeping positional ordering correct
positional_only_no_default: list[str] = []
positional_only_with_default: list[str] = []
positional_no_default: list[str] = []
positional_with_default: list[str] = []
keyword_params: list[str] = []
has_positional_only = False
for param in func["params"]:
if param["name"] in ("cls", "self"):
continue
formatted = format_param(param, force_type=True)
kind = param.get("kind", "POSITIONAL_OR_KEYWORD")
if kind == "POSITIONAL_ONLY":
has_positional_only = True
if param["default"] is not None:
positional_only_with_default.append(formatted)
else:
positional_only_no_default.append(formatted)
elif kind == "KEYWORD_ONLY":
keyword_params.append(formatted)
elif kind == "VAR_POSITIONAL":
type_ann = f": {param['type']}" if param["type"] else ": object"
positional_no_default.append(f"*{param['name']}{type_ann}")
elif kind == "VAR_KEYWORD":
type_ann = f": {param['type']}" if param["type"] else ": object"
keyword_params.append(f"**{param['name']}{type_ann}")
elif param["default"] is not None:
positional_with_default.append(formatted)
else:
positional_no_default.append(formatted)
all_params: list[str] = [first_param]
if has_positional_only and not (
positional_only_with_default and positional_no_default
):
# Only emit / when it won't cause "non-default follows default" errors
all_params.extend(positional_only_no_default)
all_params.extend(positional_only_with_default)
all_params.append("/")
else:
# Merge positional-only into regular params when / would be invalid
positional_no_default = positional_only_no_default + positional_no_default
positional_with_default = positional_only_with_default + positional_with_default
all_params.extend(positional_no_default)
all_params.extend(positional_with_default)
if keyword_params:
has_var_positional = any(
p.get("kind") == "VAR_POSITIONAL" for p in func["params"]
params_str = _build_signature_params(
func["params"], leading_params=[first_param], skip_names={"cls", "self"}
)
has_named_keyword = any(p.get("kind") == "KEYWORD_ONLY" for p in func["params"])
if not has_var_positional and has_named_keyword:
all_params.append("*")
all_params.extend(keyword_params)
params_str = ", ".join(all_params)
ret = f" -> {func['return_type']}" if func["return_type"] else " -> None"
decorators = ""
if is_override:
@ -583,6 +559,8 @@ def generate_types_stub(
abc_imports.append("Callable")
if "MutableSequence[" in all_type_strs:
abc_imports.append("MutableSequence")
if re.search(r"(?<!\.)\bIterable\[", all_type_strs):
abc_imports.append("Iterable")
# Don't import Sequence directly — bpy.types.Sequence (video sequencer strip)
# shadows it. Always use collections.abc.Sequence via the qualified import.
@ -602,6 +580,8 @@ def generate_types_stub(
imports.append("import collections")
imports.append("import collections.abc")
imports.append("from collections.abc import Sequence as _Sequence")
if "mathutils." in all_type_strs:
imports.append("import mathutils")
parts: list[str] = []
if doc:
@ -697,6 +677,105 @@ def strip_self_module_prefix(
return content
def _apply_generic_bases(module_data: ModuleData) -> None:
"""Add Generic[_T] bases when classes are used as parameterized generics."""
all_types = collect_all_type_strings(module_data)
joined_types = " ".join(all_types)
struct_names = {s["name"] for s in module_data.get("structs", [])}
for struct in module_data.get("structs", []):
name = struct["name"]
if name in struct_names and re.search(rf"\b{re.escape(name)}\[", joined_types):
base = struct.get("base") or ""
if "Generic[" not in base:
struct["base"] = f"{base}, Generic[_T]" if base else "Generic[_T]"
def _prepare_module_imports(
module_data: ModuleData,
module_name: str,
submodule_names: list[str],
) -> tuple[set[str], set[str], set[str]]:
"""Collect, filter, and classify imports for a generated module stub."""
imports = collect_imports(module_data)
for sub in submodule_names:
imports.add(f"from . import {sub} as {sub}")
imports = {i for i in imports if i != f"import {module_name}"}
class_names = {s["name"] for s in module_data.get("structs", [])}
clashing_imports: set[str] = set()
for imp in imports:
for name in class_names:
if f"import {name}" in imp:
clashing_imports.add(imp)
return imports - clashing_imports, clashing_imports, class_names
def _append_module_members(
parts: list[str],
module_data: ModuleData,
submodule_names: list[str],
) -> None:
"""Append variables, functions, and classes to a module stub body."""
sub_names = set(submodule_names)
if module_data["variables"]:
parts.append("")
for var in module_data["variables"]:
if var["name"] not in sub_names:
parts.append(generate_variable_stub(var))
for func in module_data["functions"]:
parts.append("")
parts.append(generate_function_stub(func))
for struct in module_data.get("structs", []):
parts.append("")
parts.append(generate_struct_stub(struct))
def _qualify_shadowed_builtin_annotations(
result: str,
module_data: ModuleData,
) -> str:
"""Qualify builtin type names when module-level variables shadow them."""
builtins_used_as_types = {"object", "type", "int", "str", "float", "bool", "set"}
var_names = {v["name"] for v in module_data.get("variables", [])}
shadowed = var_names & builtins_used_as_types
if shadowed:
for name in shadowed:
result = re.sub(
rf"(?<=: ){name}\b",
f"builtins.{name}",
result,
)
if "import builtins" not in result:
result = "import builtins\n" + result
return result
def _qualify_clashing_type_names(
result: str,
clashing_imports: set[str],
class_names: set[str],
) -> str:
"""Qualify type references when imported names clash with local class names."""
for clash in clashing_imports:
match = re.search(r"from ([\w.]+) import (\w+)", clash)
if match:
full_module = match.group(1)
name = match.group(2)
if name in class_names:
result = re.sub(
rf"(?<!\w){name}\[",
f"{full_module}.{name}[",
result,
)
if f"import {full_module}" not in result:
result = f"import {full_module}\n" + result
return result
def generate_module_stub(
module_data: ModuleData,
python_version: str = "3.11",
@ -711,41 +790,17 @@ def generate_module_stub(
module_data["structs"], python_version, module_data["doc"]
)
# Detect classes used as generics (e.g. BMElemSeq[BMVert]) and add
# Generic[_T] as base if they don't already have one.
# Must happen before collect_imports so Generic/TypeVar get imported.
all_types = collect_all_type_strings(module_data)
joined_types = " ".join(all_types)
struct_names = {s["name"] for s in module_data.get("structs", [])}
for struct in module_data.get("structs", []):
name = struct["name"]
if name in struct_names and re.search(rf"\b{re.escape(name)}\[", joined_types):
base = struct.get("base") or ""
if "Generic[" not in base:
struct["base"] = f"{base}, Generic[_T]" if base else "Generic[_T]"
# Detect classes used as generics and add Generic[_T] base when needed.
_apply_generic_bases(module_data)
parts: list[str] = []
# Module docstring
if module_data["doc"]:
parts.append(f'"""{module_data["doc"]}"""\n')
# Imports
imports = collect_imports(module_data)
# Re-export submodules so type checkers can resolve e.g. bpy.utils.
# Use "X as X" pattern to mark as intentional re-export in stubs.
for sub in submodule_names or []:
imports.add(f"from . import {sub} as {sub}")
# Remove exact self-imports (but keep submodule imports like bmesh.types for bmesh)
imports = {i for i in imports if i != f"import {module_name}"}
# Remove imports that clash with class names in this module
class_names = {s["name"] for s in module_data.get("structs", [])}
clashing_imports: set[str] = set()
for i in imports:
for name in class_names:
if f"import {name}" in i:
clashing_imports.add(i)
imports -= clashing_imports
imports, clashing_imports, class_names = _prepare_module_imports(
module_data, module_name, submodule_names or []
)
if imports:
parts.append("")
for imp in sorted(imports):
@ -759,57 +814,12 @@ def generate_module_stub(
parts.append('_T = TypeVar("_T")')
parts.append("")
# Variables (skip those that clash with submodule re-exports)
sub_names = set(submodule_names or [])
if module_data["variables"]:
parts.append("")
for var in module_data["variables"]:
if var["name"] not in sub_names:
parts.append(generate_variable_stub(var))
# Functions
for func in module_data["functions"]:
parts.append("")
parts.append(generate_function_stub(func))
# Classes
for struct in module_data.get("structs", []):
parts.append("")
parts.append(generate_struct_stub(struct))
_append_module_members(parts, module_data, submodule_names or [])
result = "\n".join(parts)
class_names = {s["name"] for s in module_data.get("structs", [])}
result = strip_self_module_prefix(result, module_name, class_names)
# If any variable/class name shadows a Python builtin used as a type
# annotation (e.g. bpy.ops has a variable named "object"), qualify it.
_BUILTINS_USED_AS_TYPES = {"object", "type", "int", "str", "float", "bool", "set"}
var_names = {v["name"] for v in module_data.get("variables", [])}
shadowed = var_names & _BUILTINS_USED_AS_TYPES
if shadowed:
for name in shadowed:
result = re.sub(
rf"(?<=: ){name}\b",
f"builtins.{name}",
result,
)
if "import builtins" not in result:
result = "import builtins\n" + result
# Qualify type names that clash with class names in the same module
for clash in clashing_imports:
match = re.search(r"from ([\w.]+) import (\w+)", clash)
if match:
full_module = match.group(1)
name = match.group(2)
if name in class_names:
result = re.sub(
rf"(?<!\w){name}\[",
f"{full_module}.{name}[",
result,
)
if f"import {full_module}" not in result:
result = f"import {full_module}\n" + result
result = _qualify_shadowed_builtin_annotations(result, module_data)
result = _qualify_clashing_type_names(result, clashing_imports, class_names)
return prune_unused_imports(result)

File diff suppressed because it is too large Load Diff