2023-06-15 13:09:04 +10:00
|
|
|
|
# SPDX-FileCopyrightText: 2013-2023 Blender Foundation
|
|
|
|
|
#
|
2022-02-11 09:07:11 +11:00
|
|
|
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# Populate a template file (POT format currently) from Blender RNA/py/C data.
|
|
|
|
|
# XXX: This script is meant to be used from inside Blender!
|
|
|
|
|
# You should not directly use this script, rather use update_msg.py!
|
|
|
|
|
|
|
|
|
|
import datetime
|
|
|
|
|
import os
|
|
|
|
|
import re
|
|
|
|
|
import sys
|
2022-08-23 11:43:39 +02:00
|
|
|
|
import glob
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# XXX Relative import does not work here when used from Blender...
|
2013-03-19 08:33:24 +00:00
|
|
|
|
from bl_i18n_utils import settings as settings_i18n, utils
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
import bpy
|
|
|
|
|
|
|
|
|
|
##### Utils #####
|
|
|
|
|
|
|
|
|
|
# check for strings like "+%f°"
|
|
|
|
|
ignore_reg = re.compile(r"^(?:[-*.()/\\+%°0-9]|%d|%f|%s|%r|\s)*$")
|
|
|
|
|
filter_message = ignore_reg.match
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def init_spell_check(settings, lang="en_US"):
|
|
|
|
|
try:
|
2013-03-19 08:33:24 +00:00
|
|
|
|
from bl_i18n_utils import utils_spell_check
|
|
|
|
|
return utils_spell_check.SpellChecker(settings, lang)
|
2023-07-30 16:14:13 +10:00
|
|
|
|
except BaseException as ex:
|
|
|
|
|
print("Failed to import utils_spell_check ({})".format(str(ex)))
|
2013-02-24 08:50:55 +00:00
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _gen_check_ctxt(settings):
|
|
|
|
|
return {
|
|
|
|
|
"multi_rnatip": set(),
|
|
|
|
|
"multi_lines": set(),
|
|
|
|
|
"py_in_rna": set(),
|
|
|
|
|
"not_capitalized": set(),
|
|
|
|
|
"end_point": set(),
|
|
|
|
|
"undoc_ops": set(),
|
|
|
|
|
"spell_checker": init_spell_check(settings),
|
|
|
|
|
"spell_errors": {},
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-29 15:35:06 +11:00
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
def _diff_check_ctxt(check_ctxt, minus_check_ctxt):
|
2015-05-29 11:08:26 +02:00
|
|
|
|
"""Removes minus_check_ctxt from check_ctxt"""
|
2013-04-09 08:56:35 +00:00
|
|
|
|
for key in check_ctxt:
|
|
|
|
|
if isinstance(check_ctxt[key], set):
|
|
|
|
|
for warning in minus_check_ctxt[key]:
|
|
|
|
|
if warning in check_ctxt[key]:
|
|
|
|
|
check_ctxt[key].remove(warning)
|
|
|
|
|
elif isinstance(check_ctxt[key], dict):
|
|
|
|
|
for warning in minus_check_ctxt[key]:
|
|
|
|
|
if warning in check_ctxt[key]:
|
|
|
|
|
del check_ctxt[key][warning]
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2015-01-29 15:35:06 +11:00
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
def _gen_reports(check_ctxt):
|
|
|
|
|
return {
|
|
|
|
|
"check_ctxt": check_ctxt,
|
|
|
|
|
"rna_structs": [],
|
|
|
|
|
"rna_structs_skipped": [],
|
|
|
|
|
"rna_props": [],
|
|
|
|
|
"rna_props_skipped": [],
|
|
|
|
|
"py_messages": [],
|
|
|
|
|
"py_messages_skipped": [],
|
|
|
|
|
"src_messages": [],
|
|
|
|
|
"src_messages_skipped": [],
|
|
|
|
|
"messages_skipped": set(),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check(check_ctxt, msgs, key, msgsrc, settings):
|
|
|
|
|
"""
|
|
|
|
|
Performs a set of checks over the given key (context, message)...
|
|
|
|
|
"""
|
|
|
|
|
if check_ctxt is None:
|
|
|
|
|
return
|
|
|
|
|
multi_rnatip = check_ctxt.get("multi_rnatip")
|
|
|
|
|
multi_lines = check_ctxt.get("multi_lines")
|
|
|
|
|
py_in_rna = check_ctxt.get("py_in_rna")
|
|
|
|
|
not_capitalized = check_ctxt.get("not_capitalized")
|
|
|
|
|
end_point = check_ctxt.get("end_point")
|
|
|
|
|
undoc_ops = check_ctxt.get("undoc_ops")
|
|
|
|
|
spell_checker = check_ctxt.get("spell_checker")
|
|
|
|
|
spell_errors = check_ctxt.get("spell_errors")
|
|
|
|
|
|
|
|
|
|
if multi_rnatip is not None:
|
|
|
|
|
if key in msgs and key not in multi_rnatip:
|
|
|
|
|
multi_rnatip.add(key)
|
|
|
|
|
if multi_lines is not None:
|
|
|
|
|
if '\n' in key[1]:
|
|
|
|
|
multi_lines.add(key)
|
|
|
|
|
if py_in_rna is not None:
|
|
|
|
|
if key in py_in_rna[1]:
|
|
|
|
|
py_in_rna[0].add(key)
|
|
|
|
|
if not_capitalized is not None:
|
2022-08-30 16:15:45 +10:00
|
|
|
|
if (key[1] not in settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED and
|
2013-02-24 08:50:55 +00:00
|
|
|
|
key[1][0].isalpha() and not key[1][0].isupper()):
|
|
|
|
|
not_capitalized.add(key)
|
|
|
|
|
if end_point is not None:
|
2018-07-07 08:48:35 +02:00
|
|
|
|
if (
|
|
|
|
|
key[1].strip().endswith('.') and
|
|
|
|
|
(not key[1].strip().endswith('...')) and
|
|
|
|
|
key[1] not in settings.WARN_MSGID_END_POINT_ALLOWED
|
|
|
|
|
):
|
2013-02-24 08:50:55 +00:00
|
|
|
|
end_point.add(key)
|
|
|
|
|
if undoc_ops is not None:
|
|
|
|
|
if key[1] == settings.UNDOC_OPS_STR:
|
|
|
|
|
undoc_ops.add(key)
|
|
|
|
|
if spell_checker is not None and spell_errors is not None:
|
|
|
|
|
err = spell_checker.check(key[1])
|
|
|
|
|
if err:
|
|
|
|
|
spell_errors[key] = err
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def print_info(reports, pot):
|
|
|
|
|
def _print(*args, **kwargs):
|
|
|
|
|
kwargs["file"] = sys.stderr
|
|
|
|
|
print(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
pot.update_info()
|
|
|
|
|
|
|
|
|
|
_print("{} RNA structs were processed (among which {} were skipped), containing {} RNA properties "
|
|
|
|
|
"(among which {} were skipped).".format(len(reports["rna_structs"]), len(reports["rna_structs_skipped"]),
|
|
|
|
|
len(reports["rna_props"]), len(reports["rna_props_skipped"])))
|
|
|
|
|
_print("{} messages were extracted from Python UI code (among which {} were skipped), and {} from C source code "
|
|
|
|
|
"(among which {} were skipped).".format(len(reports["py_messages"]), len(reports["py_messages_skipped"]),
|
|
|
|
|
len(reports["src_messages"]), len(reports["src_messages_skipped"])))
|
|
|
|
|
_print("{} messages were rejected.".format(len(reports["messages_skipped"])))
|
|
|
|
|
_print("\n")
|
|
|
|
|
_print("Current POT stats:")
|
2013-03-25 08:30:38 +00:00
|
|
|
|
pot.print_info(prefix="\t", output=_print)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
_print("\n")
|
|
|
|
|
|
|
|
|
|
check_ctxt = reports["check_ctxt"]
|
|
|
|
|
if check_ctxt is None:
|
|
|
|
|
return
|
|
|
|
|
multi_rnatip = check_ctxt.get("multi_rnatip")
|
|
|
|
|
multi_lines = check_ctxt.get("multi_lines")
|
|
|
|
|
py_in_rna = check_ctxt.get("py_in_rna")
|
|
|
|
|
not_capitalized = check_ctxt.get("not_capitalized")
|
|
|
|
|
end_point = check_ctxt.get("end_point")
|
|
|
|
|
undoc_ops = check_ctxt.get("undoc_ops")
|
|
|
|
|
spell_errors = check_ctxt.get("spell_errors")
|
|
|
|
|
|
|
|
|
|
# XXX Temp, no multi_rnatip nor py_in_rna, see below.
|
2020-02-17 13:00:01 +01:00
|
|
|
|
# Also, multi-lines tooltips are valid now.
|
|
|
|
|
keys = not_capitalized | end_point | undoc_ops | spell_errors.keys()
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if keys:
|
|
|
|
|
_print("WARNINGS:")
|
|
|
|
|
for key in keys:
|
|
|
|
|
if undoc_ops and key in undoc_ops:
|
|
|
|
|
_print("\tThe following operators are undocumented!")
|
|
|
|
|
else:
|
|
|
|
|
_print("\t“{}”|“{}”:".format(*key))
|
2019-08-01 12:52:10 +02:00
|
|
|
|
# We support multi-lines tooltips now...
|
|
|
|
|
# ~ if multi_lines and key in multi_lines:
|
|
|
|
|
# ~ _print("\t\t-> newline in this message!")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if not_capitalized and key in not_capitalized:
|
|
|
|
|
_print("\t\t-> message not capitalized!")
|
|
|
|
|
if end_point and key in end_point:
|
|
|
|
|
_print("\t\t-> message with endpoint!")
|
|
|
|
|
# XXX Hide this one for now, too much false positives.
|
|
|
|
|
# if multi_rnatip and key in multi_rnatip:
|
|
|
|
|
# _print("\t\t-> tip used in several RNA items")
|
|
|
|
|
# if py_in_rna and key in py_in_rna:
|
|
|
|
|
# _print("\t\t-> RNA message also used in py UI code!")
|
|
|
|
|
if spell_errors and spell_errors.get(key):
|
2018-07-07 08:48:35 +02:00
|
|
|
|
lines = [
|
|
|
|
|
"\t\t-> {}: misspelled, suggestions are ({})".format(w, "'" + "', '".join(errs) + "'")
|
|
|
|
|
for w, errs in spell_errors[key]
|
|
|
|
|
]
|
2013-02-24 08:50:55 +00:00
|
|
|
|
_print("\n".join(lines))
|
|
|
|
|
_print("\t\t{}".format("\n\t\t".join(pot.msgs[key].sources)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt, settings):
|
|
|
|
|
if filter_message(msgid):
|
|
|
|
|
reports["messages_skipped"].add((msgid, msgsrc))
|
|
|
|
|
return
|
|
|
|
|
if not msgctxt:
|
|
|
|
|
# We do *not* want any "" context!
|
|
|
|
|
msgctxt = settings.DEFAULT_CONTEXT
|
|
|
|
|
# Always unescape keys!
|
|
|
|
|
msgctxt = utils.I18nMessage.do_unescape(msgctxt)
|
|
|
|
|
msgid = utils.I18nMessage.do_unescape(msgid)
|
|
|
|
|
key = (msgctxt, msgid)
|
|
|
|
|
check(check_ctxt, msgs, key, msgsrc, settings)
|
|
|
|
|
msgsrc = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + msgsrc
|
|
|
|
|
if key not in msgs:
|
|
|
|
|
msgs[key] = utils.I18nMessage([msgctxt], [msgid], [], [msgsrc], settings=settings)
|
|
|
|
|
else:
|
|
|
|
|
msgs[key].comment_lines.append(msgsrc)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
##### RNA #####
|
2013-04-12 12:19:50 +00:00
|
|
|
|
def dump_rna_messages(msgs, reports, settings, verbose=False):
|
2013-02-24 08:50:55 +00:00
|
|
|
|
"""
|
|
|
|
|
Dump into messages dict all RNA-defined UI messages (labels en tooltips).
|
|
|
|
|
"""
|
|
|
|
|
def class_blacklist():
|
2013-04-09 08:56:35 +00:00
|
|
|
|
blacklist_rna_class = {getattr(bpy.types, cls_id) for cls_id in (
|
2018-07-07 08:48:35 +02:00
|
|
|
|
# core classes
|
2023-04-18 01:13:17 +02:00
|
|
|
|
"Context", "Event", "Function", "UILayout", "UnknownType", "Struct",
|
2018-07-07 08:48:35 +02:00
|
|
|
|
# registerable classes
|
|
|
|
|
"Panel", "Menu", "Header", "RenderEngine", "Operator", "OperatorMacro", "Macro", "KeyingSetInfo",
|
|
|
|
|
)
|
2013-04-09 08:56:35 +00:00
|
|
|
|
}
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
# More builtin classes we don't need to parse.
|
|
|
|
|
blacklist_rna_class |= {cls for cls in bpy.types.Property.__subclasses__()}
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
return blacklist_rna_class
|
|
|
|
|
|
|
|
|
|
check_ctxt_rna = check_ctxt_rna_tip = None
|
|
|
|
|
check_ctxt = reports["check_ctxt"]
|
|
|
|
|
if check_ctxt:
|
|
|
|
|
check_ctxt_rna = {
|
|
|
|
|
"multi_lines": check_ctxt.get("multi_lines"),
|
|
|
|
|
"not_capitalized": check_ctxt.get("not_capitalized"),
|
|
|
|
|
"end_point": check_ctxt.get("end_point"),
|
|
|
|
|
"undoc_ops": check_ctxt.get("undoc_ops"),
|
|
|
|
|
"spell_checker": check_ctxt.get("spell_checker"),
|
|
|
|
|
"spell_errors": check_ctxt.get("spell_errors"),
|
|
|
|
|
}
|
|
|
|
|
check_ctxt_rna_tip = check_ctxt_rna
|
|
|
|
|
check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
|
|
|
|
|
|
|
|
|
|
default_context = settings.DEFAULT_CONTEXT
|
|
|
|
|
|
|
|
|
|
# Function definitions
|
|
|
|
|
def walk_properties(cls):
|
2021-11-03 15:56:39 +01:00
|
|
|
|
# This handles properties whose name is the same as their identifier.
|
|
|
|
|
# Usually, it means that those are internal properties not exposed in the UI, however there are some cases
|
|
|
|
|
# where the UI label is actually defined and same as the identifier (color spaces e.g., `RGB` etc.).
|
|
|
|
|
# So we only exclude those properties in case they belong to an operator for now.
|
|
|
|
|
def prop_name_validate(cls, prop_name, prop_identifier):
|
|
|
|
|
if prop_name != prop_identifier:
|
|
|
|
|
return True
|
|
|
|
|
# Heuristic: A lot of operator's HIDDEN properties have no UI label/description.
|
|
|
|
|
# While this is not ideal (for API doc purposes, description should always be provided),
|
|
|
|
|
# for now skip those properties.
|
|
|
|
|
# NOTE: keep in sync with C code in ui_searchbox_region_draw_cb__operator().
|
|
|
|
|
if issubclass(cls, bpy.types.OperatorProperties) and "_OT_" in cls.__name__:
|
|
|
|
|
return False
|
|
|
|
|
# Heuristic: If UI label is not capitalized, it is likely a private (undocumented) property,
|
|
|
|
|
# that can be skipped.
|
|
|
|
|
if prop_name and not prop_name[0].isupper():
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
bl_rna = cls.bl_rna
|
|
|
|
|
# Get our parents' properties, to not export them multiple times.
|
|
|
|
|
bl_rna_base = bl_rna.base
|
2021-11-03 15:56:39 +01:00
|
|
|
|
bl_rna_base_props = set()
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if bl_rna_base:
|
2021-11-03 15:56:39 +01:00
|
|
|
|
bl_rna_base_props |= set(bl_rna_base.properties.values())
|
2022-09-05 15:36:56 +02:00
|
|
|
|
if hasattr(cls, "__bases__"):
|
|
|
|
|
for cls_base in cls.__bases__:
|
|
|
|
|
bl_rna_base = getattr(cls_base, "bl_rna", None)
|
|
|
|
|
if not bl_rna_base:
|
|
|
|
|
continue
|
|
|
|
|
bl_rna_base_props |= set(bl_rna_base.properties.values())
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2015-09-28 20:19:54 +02:00
|
|
|
|
props = sorted(bl_rna.properties, key=lambda p: p.identifier)
|
|
|
|
|
for prop in props:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Only write this property if our parent hasn't got it.
|
|
|
|
|
if prop in bl_rna_base_props:
|
|
|
|
|
continue
|
2021-11-03 15:56:39 +01:00
|
|
|
|
if prop.identifier in {"rna_type", "bl_icon", "icon"}:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
continue
|
|
|
|
|
reports["rna_props"].append((cls, prop))
|
|
|
|
|
|
|
|
|
|
msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
|
|
|
|
|
msgctxt = prop.translation_context or default_context
|
|
|
|
|
|
2021-11-03 15:56:39 +01:00
|
|
|
|
if prop.name and prop_name_validate(cls, prop.name, prop.identifier):
|
2013-02-24 08:50:55 +00:00
|
|
|
|
process_msg(msgs, msgctxt, prop.name, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
if prop.description:
|
|
|
|
|
process_msg(msgs, default_context, prop.description, msgsrc, reports, check_ctxt_rna_tip, settings)
|
|
|
|
|
|
|
|
|
|
if isinstance(prop, bpy.types.EnumProperty):
|
2016-02-09 12:44:06 +01:00
|
|
|
|
done_items = set()
|
2013-02-24 08:50:55 +00:00
|
|
|
|
for item in prop.enum_items:
|
|
|
|
|
msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier, prop.identifier, item.identifier)
|
2016-02-09 12:44:06 +01:00
|
|
|
|
done_items.add(item.identifier)
|
2021-11-03 15:56:39 +01:00
|
|
|
|
if item.name and prop_name_validate(cls, item.name, item.identifier):
|
2016-02-09 12:44:06 +01:00
|
|
|
|
process_msg(msgs, msgctxt, item.name, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
if item.description:
|
|
|
|
|
process_msg(msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
|
|
|
|
|
settings)
|
|
|
|
|
for item in prop.enum_items_static:
|
|
|
|
|
if item.identifier in done_items:
|
|
|
|
|
continue
|
|
|
|
|
msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier, prop.identifier, item.identifier)
|
|
|
|
|
done_items.add(item.identifier)
|
2021-11-03 15:56:39 +01:00
|
|
|
|
if item.name and prop_name_validate(cls, item.name, item.identifier):
|
2013-02-24 08:50:55 +00:00
|
|
|
|
process_msg(msgs, msgctxt, item.name, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
if item.description:
|
|
|
|
|
process_msg(msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
|
|
|
|
|
settings)
|
|
|
|
|
|
2019-03-31 18:49:11 +02:00
|
|
|
|
def walk_tools_definitions(cls):
|
|
|
|
|
from bl_ui.space_toolsystem_common import ToolDef
|
|
|
|
|
|
|
|
|
|
bl_rna = cls.bl_rna
|
|
|
|
|
op_default_context = bpy.app.translations.contexts.operator_default
|
|
|
|
|
|
|
|
|
|
def process_tooldef(tool_context, tool):
|
|
|
|
|
if not isinstance(tool, ToolDef):
|
|
|
|
|
if callable(tool):
|
|
|
|
|
for t in tool(None):
|
|
|
|
|
process_tooldef(tool_context, t)
|
|
|
|
|
return
|
|
|
|
|
msgsrc = "bpy.types.{} Tools: '{}', '{}'".format(bl_rna.identifier, tool_context, tool.idname)
|
|
|
|
|
if tool.label:
|
|
|
|
|
process_msg(msgs, op_default_context, tool.label, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
# Callable (function) descriptions must handle their translations themselves.
|
|
|
|
|
if tool.description and not callable(tool.description):
|
|
|
|
|
process_msg(msgs, default_context, tool.description, msgsrc, reports, check_ctxt_rna_tip, settings)
|
|
|
|
|
|
|
|
|
|
for tool_context, tools_defs in cls.tools_all():
|
|
|
|
|
for tools_group in tools_defs:
|
|
|
|
|
if tools_group is None:
|
|
|
|
|
continue
|
|
|
|
|
elif isinstance(tools_group, tuple) and not isinstance(tools_group, ToolDef):
|
|
|
|
|
for tool in tools_group:
|
|
|
|
|
process_tooldef(tool_context, tool)
|
|
|
|
|
else:
|
|
|
|
|
process_tooldef(tool_context, tools_group)
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
blacklist_rna_class = class_blacklist()
|
|
|
|
|
|
|
|
|
|
def walk_class(cls):
|
|
|
|
|
bl_rna = cls.bl_rna
|
|
|
|
|
msgsrc = "bpy.types." + bl_rna.identifier
|
|
|
|
|
msgctxt = bl_rna.translation_context or default_context
|
|
|
|
|
|
2022-05-09 12:40:26 +02:00
|
|
|
|
if bl_rna.name and (bl_rna.name != bl_rna.identifier or
|
2023-04-18 10:42:00 +10:00
|
|
|
|
(msgctxt != default_context and not hasattr(bl_rna, "bl_label"))):
|
2013-02-24 08:50:55 +00:00
|
|
|
|
process_msg(msgs, msgctxt, bl_rna.name, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
|
|
|
|
|
if bl_rna.description:
|
|
|
|
|
process_msg(msgs, default_context, bl_rna.description, msgsrc, reports, check_ctxt_rna_tip, settings)
|
2013-03-10 17:42:08 +00:00
|
|
|
|
elif cls.__doc__: # XXX Some classes (like KeyingSetInfo subclasses) have void description... :(
|
|
|
|
|
process_msg(msgs, default_context, cls.__doc__, msgsrc, reports, check_ctxt_rna_tip, settings)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2014-01-23 22:29:04 +01:00
|
|
|
|
# Panels' "tabs" system.
|
2023-04-18 10:42:00 +10:00
|
|
|
|
if hasattr(bl_rna, "bl_category") and bl_rna.bl_category:
|
2014-01-23 22:29:04 +01:00
|
|
|
|
process_msg(msgs, default_context, bl_rna.bl_category, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
|
2023-04-18 10:42:00 +10:00
|
|
|
|
if hasattr(bl_rna, "bl_label") and bl_rna.bl_label:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
process_msg(msgs, msgctxt, bl_rna.bl_label, msgsrc, reports, check_ctxt_rna, settings)
|
|
|
|
|
|
2019-03-31 18:49:11 +02:00
|
|
|
|
# Tools Panels definitions.
|
2023-04-18 10:42:00 +10:00
|
|
|
|
if hasattr(bl_rna, "tools_all") and bl_rna.tools_all:
|
2019-03-31 18:49:11 +02:00
|
|
|
|
walk_tools_definitions(cls)
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
walk_properties(cls)
|
|
|
|
|
|
2022-08-09 12:22:30 +02:00
|
|
|
|
def walk_keymap_modal_events(keyconfigs, keymap_name, msgsrc_prev, km_i18n_context):
|
|
|
|
|
for keyconfig in keyconfigs:
|
|
|
|
|
keymap = keyconfig.keymaps.get(keymap_name, None)
|
|
|
|
|
if keymap and keymap.is_modal:
|
|
|
|
|
for modal_event in keymap.modal_event_values:
|
|
|
|
|
msgsrc = msgsrc_prev + ":'{}'".format(modal_event.identifier)
|
|
|
|
|
if modal_event.name:
|
|
|
|
|
process_msg(msgs, km_i18n_context, modal_event.name, msgsrc, reports, None, settings)
|
|
|
|
|
if modal_event.description:
|
|
|
|
|
process_msg(msgs, default_context, modal_event.description, msgsrc, reports, None, settings)
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
def walk_keymap_hierarchy(hier, msgsrc_prev):
|
2013-03-25 11:35:42 +00:00
|
|
|
|
km_i18n_context = bpy.app.translations.contexts.id_windowmanager
|
2013-02-24 08:50:55 +00:00
|
|
|
|
for lvl in hier:
|
|
|
|
|
msgsrc = msgsrc_prev + "." + lvl[1]
|
2018-12-23 22:03:12 +01:00
|
|
|
|
if isinstance(lvl[0], str): # Can be a function too, now, with tool system...
|
2022-08-09 12:22:30 +02:00
|
|
|
|
keymap_name = lvl[0]
|
|
|
|
|
process_msg(msgs, km_i18n_context, keymap_name, msgsrc, reports, None, settings)
|
|
|
|
|
walk_keymap_modal_events(bpy.data.window_managers[0].keyconfigs, keymap_name, msgsrc, km_i18n_context)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if lvl[3]:
|
|
|
|
|
walk_keymap_hierarchy(lvl[3], msgsrc)
|
|
|
|
|
|
|
|
|
|
# Dump Messages
|
2018-07-07 08:48:35 +02:00
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
def process_cls_list(cls_list):
|
|
|
|
|
if not cls_list:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def full_class_id(cls):
|
2018-06-27 14:41:53 +02:00
|
|
|
|
"""Gives us 'ID.Light.AreaLight' which is best for sorting."""
|
2013-04-12 12:19:50 +00:00
|
|
|
|
# Always the same issue, some classes listed in blacklist should actually no more exist (they have been
|
|
|
|
|
# unregistered), but are still listed by __subclasses__() calls... :/
|
|
|
|
|
if cls in blacklist_rna_class:
|
|
|
|
|
return cls.__name__
|
2013-02-24 08:50:55 +00:00
|
|
|
|
cls_id = ""
|
2021-10-04 12:16:55 +02:00
|
|
|
|
bl_rna = getattr(cls, "bl_rna", None)
|
|
|
|
|
# It seems that py-defined 'wrappers' RNA classes (like `MeshEdge` in `bpy_types.py`) need to be accessed
|
|
|
|
|
# once from `bpy.types` before they have a valid `bl_rna` member.
|
|
|
|
|
# Weirdly enough, this is only triggered on release builds, debug builds somehow do not have that issue.
|
|
|
|
|
if bl_rna is None:
|
|
|
|
|
if getattr(bpy.types, cls.__name__, None) is not None:
|
|
|
|
|
bl_rna = getattr(cls, "bl_rna", None)
|
|
|
|
|
if bl_rna is None:
|
|
|
|
|
raise TypeError("Unknown RNA class")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
while bl_rna:
|
|
|
|
|
cls_id = bl_rna.identifier + "." + cls_id
|
|
|
|
|
bl_rna = bl_rna.base
|
|
|
|
|
return cls_id
|
2016-08-23 21:48:16 +02:00
|
|
|
|
|
2013-04-12 12:19:50 +00:00
|
|
|
|
if verbose:
|
|
|
|
|
print(cls_list)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
cls_list.sort(key=full_class_id)
|
|
|
|
|
for cls in cls_list:
|
2013-04-12 12:19:50 +00:00
|
|
|
|
if verbose:
|
|
|
|
|
print(cls)
|
2013-04-09 08:56:35 +00:00
|
|
|
|
reports["rna_structs"].append(cls)
|
|
|
|
|
# Ignore those Operator sub-classes (anyway, will get the same from OperatorProperties sub-classes!)...
|
|
|
|
|
if (cls in blacklist_rna_class) or issubclass(cls, bpy.types.Operator):
|
|
|
|
|
reports["rna_structs_skipped"].append(cls)
|
|
|
|
|
else:
|
|
|
|
|
walk_class(cls)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Recursively process subclasses.
|
|
|
|
|
process_cls_list(cls.__subclasses__())
|
|
|
|
|
|
2022-04-11 15:32:17 +02:00
|
|
|
|
# FIXME Workaround weird new (blender 3.2) issue where some classes (like `bpy.types.Modifier`)
|
|
|
|
|
# are not listed by `bpy.types.ID.__base__.__subclasses__()` until they are accessed from
|
|
|
|
|
# `bpy.types` (eg just executing `bpy.types.Modifier`).
|
|
|
|
|
cls_dir = dir(bpy.types)
|
|
|
|
|
for cls_name in cls_dir:
|
|
|
|
|
getattr(bpy.types, cls_name)
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Parse everything (recursively parsing from bpy_struct "class"...).
|
|
|
|
|
process_cls_list(bpy.types.ID.__base__.__subclasses__())
|
|
|
|
|
|
2022-09-05 15:36:56 +02:00
|
|
|
|
# Parse keymap preset preferences
|
|
|
|
|
for preset_filename in sorted(
|
|
|
|
|
os.listdir(os.path.join(settings.PRESETS_DIR, "keyconfig"))):
|
|
|
|
|
preset_path = os.path.join(settings.PRESETS_DIR, "keyconfig", preset_filename)
|
|
|
|
|
if not (os.path.isfile(preset_path) and preset_filename.endswith(".py")):
|
|
|
|
|
continue
|
|
|
|
|
preset_name, _ = os.path.splitext(preset_filename)
|
|
|
|
|
|
|
|
|
|
bpy.utils.keyconfig_set(preset_path)
|
|
|
|
|
preset = bpy.data.window_managers[0].keyconfigs[preset_name]
|
|
|
|
|
if preset.preferences is not None:
|
|
|
|
|
walk_properties(preset.preferences)
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# And parse keymaps!
|
2018-11-20 11:36:44 +11:00
|
|
|
|
from bl_keymap_utils import keymap_hierarchy
|
|
|
|
|
walk_keymap_hierarchy(keymap_hierarchy.generate(), "KM_HIERARCHY")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
##### Python source code #####
|
|
|
|
|
def dump_py_messages_from_files(msgs, reports, files, settings):
|
|
|
|
|
"""
|
2023-04-18 10:42:00 +10:00
|
|
|
|
Dump text inlined in the python files given, e.g. "My Name" in:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
layout.prop("someprop", text="My Name")
|
|
|
|
|
"""
|
|
|
|
|
import ast
|
|
|
|
|
|
|
|
|
|
bpy_struct = bpy.types.ID.__base__
|
2013-03-28 15:03:47 +00:00
|
|
|
|
i18n_contexts = bpy.app.translations.contexts
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2013-02-27 16:24:20 +00:00
|
|
|
|
root_paths = tuple(bpy.utils.resource_path(t) for t in ('USER', 'LOCAL', 'SYSTEM'))
|
2018-07-07 08:48:35 +02:00
|
|
|
|
|
2013-02-27 16:24:20 +00:00
|
|
|
|
def make_rel(path):
|
|
|
|
|
for rp in root_paths:
|
|
|
|
|
if path.startswith(rp):
|
2013-08-18 15:17:33 +00:00
|
|
|
|
try: # can't always find the relative path (between drive letters on windows)
|
|
|
|
|
return os.path.relpath(path, rp)
|
|
|
|
|
except ValueError:
|
|
|
|
|
return path
|
2013-02-27 16:24:20 +00:00
|
|
|
|
# Use binary's dir as fallback...
|
2013-08-18 15:17:33 +00:00
|
|
|
|
try: # can't always find the relative path (between drive letters on windows)
|
|
|
|
|
return os.path.relpath(path, os.path.dirname(bpy.app.binary_path))
|
|
|
|
|
except ValueError:
|
|
|
|
|
return path
|
2013-02-27 16:24:20 +00:00
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Helper function
|
|
|
|
|
def extract_strings_ex(node, is_split=False):
|
|
|
|
|
"""
|
|
|
|
|
Recursively get strings, needed in case we have "Blah" + "Blah", passed as an argument in that case it won't
|
|
|
|
|
evaluate to a string. However, break on some kind of stopper nodes, like e.g. Subscript.
|
|
|
|
|
"""
|
2023-03-03 21:36:43 +01:00
|
|
|
|
if type(node) == ast.Constant:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
eval_str = ast.literal_eval(node)
|
2020-09-13 19:50:08 +02:00
|
|
|
|
if eval_str and type(eval_str) == str:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
yield (is_split, eval_str, (node,))
|
|
|
|
|
else:
|
|
|
|
|
is_split = (type(node) in separate_nodes)
|
|
|
|
|
for nd in ast.iter_child_nodes(node):
|
|
|
|
|
if type(nd) not in stopper_nodes:
|
|
|
|
|
yield from extract_strings_ex(nd, is_split=is_split)
|
|
|
|
|
|
|
|
|
|
def _extract_string_merge(estr_ls, nds_ls):
|
|
|
|
|
return "".join(s for s in estr_ls if s is not None), tuple(n for n in nds_ls if n is not None)
|
|
|
|
|
|
|
|
|
|
def extract_strings(node):
|
|
|
|
|
estr_ls = []
|
|
|
|
|
nds_ls = []
|
|
|
|
|
for is_split, estr, nds in extract_strings_ex(node):
|
|
|
|
|
estr_ls.append(estr)
|
|
|
|
|
nds_ls.extend(nds)
|
|
|
|
|
ret = _extract_string_merge(estr_ls, nds_ls)
|
|
|
|
|
return ret
|
2013-03-28 19:33:14 +00:00
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
def extract_strings_split(node):
|
|
|
|
|
"""
|
2015-09-28 20:19:54 +02:00
|
|
|
|
Returns a list args as returned by 'extract_strings()', but split into groups based on separate_nodes, this way
|
2018-09-03 16:49:08 +02:00
|
|
|
|
expressions like ("A" if test else "B") won't be merged but "A" + "B" will.
|
2013-02-24 08:50:55 +00:00
|
|
|
|
"""
|
|
|
|
|
estr_ls = []
|
|
|
|
|
nds_ls = []
|
|
|
|
|
bag = []
|
|
|
|
|
for is_split, estr, nds in extract_strings_ex(node):
|
|
|
|
|
if is_split:
|
|
|
|
|
bag.append((estr_ls, nds_ls))
|
|
|
|
|
estr_ls = []
|
|
|
|
|
nds_ls = []
|
|
|
|
|
|
|
|
|
|
estr_ls.append(estr)
|
|
|
|
|
nds_ls.extend(nds)
|
|
|
|
|
|
|
|
|
|
bag.append((estr_ls, nds_ls))
|
|
|
|
|
|
|
|
|
|
return [_extract_string_merge(estr_ls, nds_ls) for estr_ls, nds_ls in bag]
|
|
|
|
|
|
2013-03-28 15:03:47 +00:00
|
|
|
|
i18n_ctxt_ids = {v for v in bpy.app.translations.contexts_C_to_py.values()}
|
2018-07-07 08:48:35 +02:00
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
def _ctxt_to_ctxt(node):
|
2013-03-28 15:03:47 +00:00
|
|
|
|
# We must try, to some extend, to get contexts from vars instead of only literal strings...
|
|
|
|
|
ctxt = extract_strings(node)[0]
|
|
|
|
|
if ctxt:
|
|
|
|
|
return ctxt
|
|
|
|
|
# Basically, we search for attributes matching py context names, for now.
|
|
|
|
|
# So non-literal contexts should be used that way:
|
|
|
|
|
# i18n_ctxt = bpy.app.translations.contexts
|
|
|
|
|
# foobar(text="Foo", text_ctxt=i18n_ctxt.id_object)
|
|
|
|
|
if type(node) == ast.Attribute:
|
|
|
|
|
if node.attr in i18n_ctxt_ids:
|
2023-03-01 22:12:18 +11:00
|
|
|
|
# print(node, node.attr, getattr(i18n_contexts, node.attr))
|
2013-03-28 15:03:47 +00:00
|
|
|
|
return getattr(i18n_contexts, node.attr)
|
|
|
|
|
return i18n_contexts.default
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
def _op_to_ctxt(node):
|
2016-01-14 16:00:15 +01:00
|
|
|
|
# Some smart coders like things like:
|
2019-03-02 00:21:05 +11:00
|
|
|
|
# >>> row.operator("preferences.addon_disable" if is_enabled else "preferences.addon_enable", ...)
|
2016-01-14 16:00:15 +01:00
|
|
|
|
# We only take first arg into account here!
|
|
|
|
|
bag = extract_strings_split(node)
|
|
|
|
|
opname, _ = bag[0]
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if not opname:
|
2022-11-15 12:03:18 +01:00
|
|
|
|
return i18n_contexts.operator_default
|
2013-02-24 08:50:55 +00:00
|
|
|
|
op = bpy.ops
|
|
|
|
|
for n in opname.split('.'):
|
|
|
|
|
op = getattr(op, n)
|
|
|
|
|
try:
|
2018-09-13 19:59:15 +10:00
|
|
|
|
return op.get_rna_type().translation_context
|
2023-07-30 16:14:13 +10:00
|
|
|
|
except BaseException as ex:
|
2013-03-28 15:03:47 +00:00
|
|
|
|
default_op_context = i18n_contexts.operator_default
|
2023-07-30 16:14:13 +10:00
|
|
|
|
print("ERROR: ", str(ex))
|
2013-02-24 08:50:55 +00:00
|
|
|
|
print(" Assuming default operator context '{}'".format(default_op_context))
|
|
|
|
|
return default_op_context
|
|
|
|
|
|
|
|
|
|
# Gather function names.
|
|
|
|
|
# In addition of UI func, also parse pgettext ones...
|
|
|
|
|
# Tuples of (module name, (short names, ...)).
|
|
|
|
|
pgettext_variants = (
|
|
|
|
|
("pgettext", ("_",)),
|
|
|
|
|
("pgettext_iface", ("iface_",)),
|
2013-03-28 15:03:47 +00:00
|
|
|
|
("pgettext_tip", ("tip_",)),
|
|
|
|
|
("pgettext_data", ("data_",)),
|
2013-02-24 08:50:55 +00:00
|
|
|
|
)
|
|
|
|
|
pgettext_variants_args = {"msgid": (0, {"msgctxt": 1})}
|
|
|
|
|
|
|
|
|
|
# key: msgid keywords.
|
|
|
|
|
# val: tuples of ((keywords,), context_getter_func) to get a context for that msgid.
|
|
|
|
|
# Note: order is important, first one wins!
|
|
|
|
|
translate_kw = {
|
|
|
|
|
"text": ((("text_ctxt",), _ctxt_to_ctxt),
|
|
|
|
|
(("operator",), _op_to_ctxt),
|
|
|
|
|
),
|
2018-07-07 08:48:35 +02:00
|
|
|
|
"msgid": ((("msgctxt",), _ctxt_to_ctxt),
|
|
|
|
|
),
|
2013-03-10 16:14:29 +00:00
|
|
|
|
"message": (),
|
2020-12-21 11:45:59 +01:00
|
|
|
|
"heading": (),
|
2013-02-24 08:50:55 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
context_kw_set = {}
|
|
|
|
|
for k, ctxts in translate_kw.items():
|
|
|
|
|
s = set()
|
|
|
|
|
for c, _ in ctxts:
|
|
|
|
|
s |= set(c)
|
|
|
|
|
context_kw_set[k] = s
|
|
|
|
|
|
|
|
|
|
# {func_id: {msgid: (arg_pos,
|
|
|
|
|
# {msgctxt: arg_pos,
|
|
|
|
|
# ...
|
|
|
|
|
# }
|
|
|
|
|
# ),
|
|
|
|
|
# ...
|
|
|
|
|
# },
|
|
|
|
|
# ...
|
|
|
|
|
# }
|
|
|
|
|
func_translate_args = {}
|
|
|
|
|
|
|
|
|
|
# First, functions from UILayout
|
|
|
|
|
# First loop is for msgid args, second one is for msgctxt args.
|
|
|
|
|
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
|
|
|
|
|
# check it has one or more arguments as defined in translate_kw
|
|
|
|
|
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
|
|
|
|
|
if ((arg_kw in translate_kw) and (not arg.is_output) and (arg.type == 'STRING')):
|
|
|
|
|
func_translate_args.setdefault(func_id, {})[arg_kw] = (arg_pos, {})
|
|
|
|
|
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
|
|
|
|
|
if func_id not in func_translate_args:
|
|
|
|
|
continue
|
|
|
|
|
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
|
|
|
|
|
if (not arg.is_output) and (arg.type == 'STRING'):
|
|
|
|
|
for msgid, msgctxts in context_kw_set.items():
|
|
|
|
|
if arg_kw in msgctxts:
|
|
|
|
|
func_translate_args[func_id][msgid][1][arg_kw] = arg_pos
|
2013-03-10 16:14:29 +00:00
|
|
|
|
# The report() func of operators.
|
|
|
|
|
for func_id, func in bpy.types.Operator.bl_rna.functions.items():
|
|
|
|
|
# check it has one or more arguments as defined in translate_kw
|
|
|
|
|
for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
|
|
|
|
|
if ((arg_kw in translate_kw) and (not arg.is_output) and (arg.type == 'STRING')):
|
|
|
|
|
func_translate_args.setdefault(func_id, {})[arg_kw] = (arg_pos, {})
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# We manually add funcs from bpy.app.translations
|
|
|
|
|
for func_id, func_ids in pgettext_variants:
|
|
|
|
|
func_translate_args[func_id] = pgettext_variants_args
|
2020-01-30 11:45:36 +11:00
|
|
|
|
for sub_func_id in func_ids:
|
|
|
|
|
func_translate_args[sub_func_id] = pgettext_variants_args
|
2018-07-07 08:48:35 +02:00
|
|
|
|
# print(func_translate_args)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# Break recursive nodes look up on some kind of nodes.
|
2015-05-25 21:17:49 +02:00
|
|
|
|
# E.g. we don't want to get strings inside subscripts (blah["foo"])!
|
|
|
|
|
# we don't want to get strings from comparisons (foo.type == 'BAR').
|
|
|
|
|
stopper_nodes = {ast.Subscript, ast.Compare}
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Consider strings separate: ("a" if test else "b")
|
|
|
|
|
separate_nodes = {ast.IfExp}
|
|
|
|
|
|
|
|
|
|
check_ctxt_py = None
|
|
|
|
|
if reports["check_ctxt"]:
|
|
|
|
|
check_ctxt = reports["check_ctxt"]
|
|
|
|
|
check_ctxt_py = {
|
|
|
|
|
"py_in_rna": (check_ctxt.get("py_in_rna"), set(msgs.keys())),
|
|
|
|
|
"multi_lines": check_ctxt.get("multi_lines"),
|
|
|
|
|
"not_capitalized": check_ctxt.get("not_capitalized"),
|
|
|
|
|
"end_point": check_ctxt.get("end_point"),
|
|
|
|
|
"spell_checker": check_ctxt.get("spell_checker"),
|
|
|
|
|
"spell_errors": check_ctxt.get("spell_errors"),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for fp in files:
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print("Checking File ", fp)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
with open(fp, 'r', encoding="utf8") as filedata:
|
|
|
|
|
root_node = ast.parse(filedata.read(), fp, 'exec')
|
|
|
|
|
|
2013-02-27 16:24:20 +00:00
|
|
|
|
fp_rel = make_rel(fp)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
for node in ast.walk(root_node):
|
|
|
|
|
if type(node) == ast.Call:
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print("found function at")
|
|
|
|
|
# ~ print("%s:%d" % (fp, node.lineno))
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# We can't skip such situations! from blah import foo\nfoo("bar") would also be an ast.Name func!
|
|
|
|
|
if type(node.func) == ast.Name:
|
|
|
|
|
func_id = node.func.id
|
|
|
|
|
elif hasattr(node.func, "attr"):
|
|
|
|
|
func_id = node.func.attr
|
|
|
|
|
# Ugly things like getattr(self, con.type)(context, box, con)
|
|
|
|
|
else:
|
|
|
|
|
continue
|
|
|
|
|
|
2023-03-03 21:39:06 +01:00
|
|
|
|
# Skip function if it's marked as not translatable.
|
|
|
|
|
do_translate = True
|
|
|
|
|
for kw in node.keywords:
|
|
|
|
|
if kw.arg == "translate" and not kw.value.value:
|
|
|
|
|
do_translate = False
|
|
|
|
|
break
|
|
|
|
|
if not do_translate:
|
|
|
|
|
continue
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
func_args = func_translate_args.get(func_id, {})
|
|
|
|
|
|
|
|
|
|
# First try to get i18n contexts, for every possible msgid id.
|
|
|
|
|
msgctxts = dict.fromkeys(func_args.keys(), "")
|
|
|
|
|
for msgid, (_, context_args) in func_args.items():
|
|
|
|
|
context_elements = {}
|
|
|
|
|
for arg_kw, arg_pos in context_args.items():
|
|
|
|
|
if arg_pos < len(node.args):
|
|
|
|
|
context_elements[arg_kw] = node.args[arg_pos]
|
|
|
|
|
else:
|
|
|
|
|
for kw in node.keywords:
|
|
|
|
|
if kw.arg == arg_kw:
|
|
|
|
|
context_elements[arg_kw] = kw.value
|
|
|
|
|
break
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print(context_elements)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
for kws, proc in translate_kw[msgid]:
|
|
|
|
|
if set(kws) <= context_elements.keys():
|
|
|
|
|
args = tuple(context_elements[k] for k in kws)
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print("running ", proc, " with ", args)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
ctxt = proc(*args)
|
|
|
|
|
if ctxt:
|
|
|
|
|
msgctxts[msgid] = ctxt
|
|
|
|
|
break
|
|
|
|
|
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print(func_args)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# do nothing if not found
|
|
|
|
|
for arg_kw, (arg_pos, _) in func_args.items():
|
|
|
|
|
msgctxt = msgctxts[arg_kw]
|
|
|
|
|
estr_lst = [(None, ())]
|
|
|
|
|
if arg_pos < len(node.args):
|
|
|
|
|
estr_lst = extract_strings_split(node.args[arg_pos])
|
|
|
|
|
else:
|
|
|
|
|
for kw in node.keywords:
|
|
|
|
|
if kw.arg == arg_kw:
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print(kw.arg, kw.value)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
estr_lst = extract_strings_split(kw.value)
|
|
|
|
|
break
|
|
|
|
|
for estr, nds in estr_lst:
|
2020-09-13 19:50:08 +02:00
|
|
|
|
# ~ print(estr, nds)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if estr:
|
|
|
|
|
if nds:
|
|
|
|
|
msgsrc = "{}:{}".format(fp_rel, sorted({nd.lineno for nd in nds})[0])
|
|
|
|
|
else:
|
|
|
|
|
msgsrc = "{}:???".format(fp_rel)
|
|
|
|
|
process_msg(msgs, msgctxt, estr, msgsrc, reports, check_ctxt_py, settings)
|
|
|
|
|
reports["py_messages"].append((msgctxt, estr, msgsrc))
|
|
|
|
|
|
|
|
|
|
|
2013-02-27 16:24:20 +00:00
|
|
|
|
def dump_py_messages(msgs, reports, addons, settings, addons_only=False):
|
2013-02-24 08:50:55 +00:00
|
|
|
|
def _get_files(path):
|
2013-02-27 16:24:20 +00:00
|
|
|
|
if not os.path.exists(path):
|
|
|
|
|
return []
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if os.path.isdir(path):
|
2013-02-27 16:24:20 +00:00
|
|
|
|
return [os.path.join(dpath, fn) for dpath, _, fnames in os.walk(path) for fn in fnames
|
2023-04-01 17:55:26 +02:00
|
|
|
|
if fn.endswith(".py") and (fn == "__init__.py"
|
|
|
|
|
or not fn.startswith("_"))]
|
2013-02-24 08:50:55 +00:00
|
|
|
|
return [path]
|
|
|
|
|
|
|
|
|
|
files = []
|
2013-02-27 16:24:20 +00:00
|
|
|
|
if not addons_only:
|
|
|
|
|
for path in settings.CUSTOM_PY_UI_FILES:
|
|
|
|
|
for root in (bpy.utils.resource_path(t) for t in ('USER', 'LOCAL', 'SYSTEM')):
|
|
|
|
|
files += _get_files(os.path.join(root, path))
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2013-02-27 16:24:20 +00:00
|
|
|
|
# Add all given addons.
|
2013-02-24 08:50:55 +00:00
|
|
|
|
for mod in addons:
|
|
|
|
|
fn = mod.__file__
|
|
|
|
|
if os.path.basename(fn) == "__init__.py":
|
|
|
|
|
files += _get_files(os.path.dirname(fn))
|
|
|
|
|
else:
|
|
|
|
|
files.append(fn)
|
|
|
|
|
|
2013-02-27 16:24:20 +00:00
|
|
|
|
dump_py_messages_from_files(msgs, reports, sorted(files), settings)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
##### C source code #####
|
|
|
|
|
def dump_src_messages(msgs, reports, settings):
|
|
|
|
|
def get_contexts():
|
|
|
|
|
"""Return a mapping {C_CTXT_NAME: ctxt_value}."""
|
|
|
|
|
return {k: getattr(bpy.app.translations.contexts, n) for k, n in bpy.app.translations.contexts_C_to_py.items()}
|
|
|
|
|
|
|
|
|
|
contexts = get_contexts()
|
|
|
|
|
|
|
|
|
|
# Build regexes to extract messages (with optional contexts) from C source.
|
|
|
|
|
pygettexts = tuple(re.compile(r).search for r in settings.PYGETTEXT_KEYWORDS)
|
|
|
|
|
|
|
|
|
|
_clean_str = re.compile(settings.str_clean_re).finditer
|
2018-07-07 08:48:35 +02:00
|
|
|
|
|
|
|
|
|
def clean_str(s):
|
2021-02-22 18:29:52 +01:00
|
|
|
|
# The encode/decode to/from 'raw_unicode_escape' allows to transform the C-type unicode hexadecimal escapes
|
|
|
|
|
# (like '\u2715' for the '×' symbol) back into a proper unicode character.
|
2021-10-24 19:12:34 +11:00
|
|
|
|
return "".join(
|
|
|
|
|
m.group("clean") for m in _clean_str(s)
|
|
|
|
|
).encode('raw_unicode_escape').decode('raw_unicode_escape')
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
def dump_src_file(path, rel_path, msgs, reports, settings):
|
|
|
|
|
def process_entry(_msgctxt, _msgid):
|
|
|
|
|
# Context.
|
|
|
|
|
msgctxt = settings.DEFAULT_CONTEXT
|
|
|
|
|
if _msgctxt:
|
|
|
|
|
if _msgctxt in contexts:
|
|
|
|
|
msgctxt = contexts[_msgctxt]
|
|
|
|
|
elif '"' in _msgctxt or "'" in _msgctxt:
|
|
|
|
|
msgctxt = clean_str(_msgctxt)
|
|
|
|
|
else:
|
|
|
|
|
print("WARNING: raw context “{}” couldn’t be resolved!".format(_msgctxt))
|
|
|
|
|
# Message.
|
|
|
|
|
msgid = ""
|
|
|
|
|
if _msgid:
|
|
|
|
|
if '"' in _msgid or "'" in _msgid:
|
|
|
|
|
msgid = clean_str(_msgid)
|
|
|
|
|
else:
|
|
|
|
|
print("WARNING: raw message “{}” couldn’t be resolved!".format(_msgid))
|
|
|
|
|
return msgctxt, msgid
|
|
|
|
|
|
|
|
|
|
check_ctxt_src = None
|
|
|
|
|
if reports["check_ctxt"]:
|
|
|
|
|
check_ctxt = reports["check_ctxt"]
|
|
|
|
|
check_ctxt_src = {
|
|
|
|
|
"multi_lines": check_ctxt.get("multi_lines"),
|
|
|
|
|
"not_capitalized": check_ctxt.get("not_capitalized"),
|
|
|
|
|
"end_point": check_ctxt.get("end_point"),
|
|
|
|
|
"spell_checker": check_ctxt.get("spell_checker"),
|
|
|
|
|
"spell_errors": check_ctxt.get("spell_errors"),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data = ""
|
2020-03-20 20:49:48 +01:00
|
|
|
|
with open(path, encoding="utf8") as f:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
data = f.read()
|
|
|
|
|
for srch in pygettexts:
|
|
|
|
|
m = srch(data)
|
|
|
|
|
line = pos = 0
|
|
|
|
|
while m:
|
|
|
|
|
d = m.groupdict()
|
|
|
|
|
# Line.
|
|
|
|
|
line += data[pos:m.start()].count('\n')
|
|
|
|
|
msgsrc = rel_path + ":" + str(line)
|
|
|
|
|
_msgid = d.get("msg_raw")
|
2022-07-15 11:41:13 +02:00
|
|
|
|
if _msgid not in {'""', "''"}:
|
|
|
|
|
# First, try the "multi-contexts" stuff!
|
|
|
|
|
_msgctxts = tuple(d.get("ctxt_raw{}".format(i)) for i in range(settings.PYGETTEXT_MAX_MULTI_CTXT))
|
|
|
|
|
if _msgctxts[0]:
|
|
|
|
|
for _msgctxt in _msgctxts:
|
|
|
|
|
if not _msgctxt:
|
|
|
|
|
break
|
|
|
|
|
msgctxt, msgid = process_entry(_msgctxt, _msgid)
|
|
|
|
|
process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt_src, settings)
|
|
|
|
|
reports["src_messages"].append((msgctxt, msgid, msgsrc))
|
|
|
|
|
else:
|
|
|
|
|
_msgctxt = d.get("ctxt_raw")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
msgctxt, msgid = process_entry(_msgctxt, _msgid)
|
|
|
|
|
process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt_src, settings)
|
|
|
|
|
reports["src_messages"].append((msgctxt, msgid, msgsrc))
|
|
|
|
|
|
|
|
|
|
pos = m.end()
|
|
|
|
|
line += data[m.start():pos].count('\n')
|
|
|
|
|
m = srch(data, pos)
|
|
|
|
|
|
|
|
|
|
forbidden = set()
|
|
|
|
|
forced = set()
|
|
|
|
|
if os.path.isfile(settings.SRC_POTFILES):
|
2020-03-20 20:49:48 +01:00
|
|
|
|
with open(settings.SRC_POTFILES, encoding="utf8") as src:
|
2013-02-24 08:50:55 +00:00
|
|
|
|
for l in src:
|
|
|
|
|
if l[0] == '-':
|
|
|
|
|
forbidden.add(l[1:].rstrip('\n'))
|
|
|
|
|
elif l[0] != '#':
|
|
|
|
|
forced.add(l.rstrip('\n'))
|
|
|
|
|
for root, dirs, files in os.walk(settings.POTFILES_SOURCE_DIR):
|
|
|
|
|
if "/.svn" in root:
|
|
|
|
|
continue
|
|
|
|
|
for fname in files:
|
|
|
|
|
if os.path.splitext(fname)[1] not in settings.PYGETTEXT_ALLOWED_EXTS:
|
|
|
|
|
continue
|
|
|
|
|
path = os.path.join(root, fname)
|
2013-08-18 15:17:33 +00:00
|
|
|
|
try: # can't always find the relative path (between drive letters on windows)
|
|
|
|
|
rel_path = os.path.relpath(path, settings.SOURCE_DIR)
|
|
|
|
|
except ValueError:
|
|
|
|
|
rel_path = path
|
2013-02-24 08:50:55 +00:00
|
|
|
|
if rel_path in forbidden:
|
|
|
|
|
continue
|
|
|
|
|
elif rel_path not in forced:
|
|
|
|
|
forced.add(rel_path)
|
|
|
|
|
for rel_path in sorted(forced):
|
|
|
|
|
path = os.path.join(settings.SOURCE_DIR, rel_path)
|
|
|
|
|
if os.path.exists(path):
|
|
|
|
|
dump_src_file(path, rel_path, msgs, reports, settings)
|
|
|
|
|
|
|
|
|
|
|
2022-08-01 14:09:41 +02:00
|
|
|
|
def dump_preset_messages(msgs, reports, settings):
|
|
|
|
|
files = []
|
|
|
|
|
for dpath, _, fnames in os.walk(settings.PRESETS_DIR):
|
|
|
|
|
for fname in fnames:
|
|
|
|
|
if fname.startswith("_") or not fname.endswith(".py"):
|
|
|
|
|
continue
|
|
|
|
|
path = os.path.join(dpath, fname)
|
|
|
|
|
try: # can't always find the relative path (between drive letters on windows)
|
|
|
|
|
rel_path = os.path.relpath(path, settings.PRESETS_DIR)
|
|
|
|
|
except ValueError:
|
|
|
|
|
rel_path = path
|
|
|
|
|
files.append(rel_path)
|
2022-08-09 12:42:16 +02:00
|
|
|
|
for rel_path in sorted(files):
|
2022-08-01 14:09:41 +02:00
|
|
|
|
msgsrc, msgid = os.path.split(rel_path)
|
|
|
|
|
msgsrc = "Preset from " + msgsrc
|
|
|
|
|
msgid = bpy.path.display_name(msgid, title_case=False)
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, msgid, msgsrc, reports, None, settings)
|
|
|
|
|
|
|
|
|
|
|
2022-08-23 11:43:39 +02:00
|
|
|
|
def dump_template_messages(msgs, reports, settings):
|
2022-08-23 23:13:47 +10:00
|
|
|
|
bfiles = [""] # General template, no name needed.
|
2022-08-23 11:43:39 +02:00
|
|
|
|
bfiles += glob.glob(settings.TEMPLATES_DIR + "/**/*.blend", recursive=True)
|
|
|
|
|
|
|
|
|
|
workspace_names = {}
|
|
|
|
|
|
|
|
|
|
for bfile in bfiles:
|
|
|
|
|
template = os.path.dirname(bfile)
|
|
|
|
|
template = os.path.basename(template)
|
|
|
|
|
bpy.ops.wm.read_homefile(use_factory_startup=True, app_template=template)
|
|
|
|
|
for ws in bpy.data.workspaces:
|
|
|
|
|
names = workspace_names.setdefault(ws.name, [])
|
|
|
|
|
names.append(template or "General")
|
|
|
|
|
|
|
|
|
|
from bpy.app.translations import contexts as i18n_contexts
|
|
|
|
|
msgctxt = i18n_contexts.id_workspace
|
|
|
|
|
for workspace_name in sorted(workspace_names):
|
|
|
|
|
for msgsrc in sorted(workspace_names[workspace_name]):
|
|
|
|
|
msgsrc = "Workspace from template " + msgsrc
|
|
|
|
|
process_msg(msgs, msgctxt, workspace_name, msgsrc,
|
|
|
|
|
reports, None, settings)
|
|
|
|
|
|
|
|
|
|
|
2023-03-13 22:41:11 +01:00
|
|
|
|
def dump_asset_messages(msgs, reports, settings):
|
|
|
|
|
# Where to search for assets, relative to the local user resources.
|
|
|
|
|
assets_dir = os.path.join(bpy.utils.resource_path('LOCAL'), "datafiles", "assets")
|
|
|
|
|
|
|
|
|
|
# Parse the catalog sidecar file
|
|
|
|
|
catalog_file = os.path.join(assets_dir, settings.ASSET_CATALOG_FILE)
|
|
|
|
|
with open(catalog_file, encoding="utf8") as f:
|
|
|
|
|
data = f.readlines()
|
|
|
|
|
|
|
|
|
|
catalogs = set()
|
|
|
|
|
|
|
|
|
|
for line in data:
|
|
|
|
|
if (line == "\n" or line.startswith("VERSION") or line.startswith("#")):
|
|
|
|
|
continue
|
|
|
|
|
_UUID, catalog_path, _simple_catalog_name = line.split(":")
|
|
|
|
|
catalogs.update(catalog_path.split("/"))
|
|
|
|
|
|
|
|
|
|
msgsrc = "Asset catalog from " + settings.ASSET_CATALOG_FILE
|
|
|
|
|
for catalog in sorted(catalogs):
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, catalog, msgsrc,
|
|
|
|
|
reports, None, settings)
|
|
|
|
|
|
|
|
|
|
# Parse the asset blend files
|
|
|
|
|
asset_files = {}
|
|
|
|
|
|
|
|
|
|
bfiles = glob.glob(assets_dir + "/**/*.blend", recursive=True)
|
|
|
|
|
for bfile in bfiles:
|
|
|
|
|
basename = os.path.basename(bfile)
|
|
|
|
|
bpy.ops.wm.open_mainfile(filepath=bfile)
|
|
|
|
|
# For now, only parse node groups.
|
|
|
|
|
# Perhaps some other assets will need to be extracted later?
|
|
|
|
|
for asset_type in ("node_groups",):
|
|
|
|
|
for asset in getattr(bpy.data, asset_type):
|
|
|
|
|
if asset.asset_data is None: # Not an asset
|
|
|
|
|
continue
|
|
|
|
|
assets = asset_files.setdefault(basename, [])
|
|
|
|
|
assets.append((asset.name, asset.asset_data.description))
|
|
|
|
|
|
|
|
|
|
for asset_file in sorted(asset_files):
|
|
|
|
|
for asset in sorted(asset_files[asset_file]):
|
|
|
|
|
name, description = asset
|
|
|
|
|
msgsrc = "Asset name from file " + asset_file
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, name, msgsrc,
|
|
|
|
|
reports, None, settings)
|
|
|
|
|
msgsrc = "Asset description from file " + asset_file
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, description, msgsrc,
|
|
|
|
|
reports, None, settings)
|
|
|
|
|
|
|
|
|
|
|
2022-08-29 14:02:24 +02:00
|
|
|
|
def dump_addon_bl_info(msgs, reports, module, settings):
|
2023-03-10 23:05:13 +01:00
|
|
|
|
for prop in ('name', 'location', 'description', 'warning'):
|
2022-08-30 16:15:45 +10:00
|
|
|
|
process_msg(
|
|
|
|
|
msgs,
|
|
|
|
|
settings.DEFAULT_CONTEXT,
|
|
|
|
|
module.bl_info[prop],
|
|
|
|
|
"Add-on " +
|
|
|
|
|
module.bl_info['name'] +
|
|
|
|
|
" info: " +
|
|
|
|
|
prop,
|
|
|
|
|
reports,
|
|
|
|
|
None,
|
|
|
|
|
settings,
|
|
|
|
|
)
|
2022-08-29 14:02:24 +02:00
|
|
|
|
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
##### Main functions! #####
|
|
|
|
|
def dump_messages(do_messages, do_checks, settings):
|
|
|
|
|
bl_ver = "Blender " + bpy.app.version_string
|
2013-11-25 21:58:54 +01:00
|
|
|
|
bl_hash = bpy.app.build_hash
|
2013-02-24 08:50:55 +00:00
|
|
|
|
bl_date = datetime.datetime.strptime(bpy.app.build_date.decode() + "T" + bpy.app.build_time.decode(),
|
|
|
|
|
"%Y-%m-%dT%H:%M:%S")
|
2013-11-04 18:26:56 +00:00
|
|
|
|
pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, bl_ver, bl_hash, bl_date, bl_date.year,
|
2013-02-24 08:50:55 +00:00
|
|
|
|
settings=settings)
|
|
|
|
|
msgs = pot.msgs
|
|
|
|
|
|
|
|
|
|
# Enable all wanted addons.
|
|
|
|
|
# For now, enable all official addons, before extracting msgids.
|
2013-04-09 08:56:35 +00:00
|
|
|
|
addons = utils.enable_addons(support={"OFFICIAL"})
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Note this is not needed if we have been started with factory settings, but just in case...
|
2019-09-09 17:28:15 +02:00
|
|
|
|
# XXX This is not working well, spent a whole day trying to understand *why* we still have references of
|
2021-11-16 18:44:04 -05:00
|
|
|
|
# those removed classes in things like `bpy.types.OperatorProperties.__subclasses__()`
|
2019-09-09 17:28:15 +02:00
|
|
|
|
# (could not even reproduce it from regular py console in Blender with UI...).
|
|
|
|
|
# For some reasons, cleanup does not happen properly, *and* we have no way to tell which class is valid
|
|
|
|
|
# and which has been unregistered. So for now, just go for the dirty, easy way: do not disable add-ons. :(
|
|
|
|
|
# ~ utils.enable_addons(support={"COMMUNITY", "TESTING"}, disable=True)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
reports = _gen_reports(_gen_check_ctxt(settings) if do_checks else None)
|
|
|
|
|
|
|
|
|
|
# Get strings from RNA.
|
2013-04-09 08:56:35 +00:00
|
|
|
|
dump_rna_messages(msgs, reports, settings)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# Get strings from UI layout definitions text="..." args.
|
|
|
|
|
dump_py_messages(msgs, reports, addons, settings)
|
|
|
|
|
|
|
|
|
|
# Get strings from C source code.
|
|
|
|
|
dump_src_messages(msgs, reports, settings)
|
|
|
|
|
|
2022-08-01 14:09:41 +02:00
|
|
|
|
# Get strings from presets.
|
|
|
|
|
dump_preset_messages(msgs, reports, settings)
|
|
|
|
|
|
2022-08-23 11:43:39 +02:00
|
|
|
|
# Get strings from startup templates.
|
2023-03-13 22:41:11 +01:00
|
|
|
|
# This loads each startup blend file in turn.
|
2022-08-23 11:43:39 +02:00
|
|
|
|
dump_template_messages(msgs, reports, settings)
|
|
|
|
|
|
2022-08-29 14:02:24 +02:00
|
|
|
|
# Get strings from addons' bl_info.
|
|
|
|
|
import addon_utils
|
|
|
|
|
for module in addon_utils.modules():
|
2022-10-06 10:28:18 +02:00
|
|
|
|
# Only process official add-ons, i.e. those marked as 'OFFICIAL' and
|
|
|
|
|
# existing in the system add-ons directory (not user-installed ones).
|
|
|
|
|
if (module.bl_info['support'] != 'OFFICIAL'
|
|
|
|
|
or not bpy.path.is_subdir(module.__file__, bpy.utils.system_resource('SCRIPTS'))):
|
2022-08-29 14:02:24 +02:00
|
|
|
|
continue
|
|
|
|
|
dump_addon_bl_info(msgs, reports, module, settings)
|
|
|
|
|
|
2013-02-24 15:40:28 +00:00
|
|
|
|
# Get strings from addons' categories.
|
2022-10-06 10:28:18 +02:00
|
|
|
|
system_categories = set()
|
|
|
|
|
for module in addon_utils.modules():
|
|
|
|
|
if bpy.path.is_subdir(module.__file__, bpy.utils.system_resource('SCRIPTS')):
|
|
|
|
|
system_categories.add(module.bl_info['category'])
|
2021-10-24 19:12:34 +11:00
|
|
|
|
for uid, label, tip in bpy.types.WindowManager.addon_filter.keywords['items'](
|
|
|
|
|
bpy.context.window_manager,
|
|
|
|
|
bpy.context,
|
|
|
|
|
):
|
2022-10-06 10:28:18 +02:00
|
|
|
|
if label in system_categories:
|
|
|
|
|
# Only process add-on if it a system one (i.e shipped with Blender). Also,
|
|
|
|
|
# we do want to translate official categories, even if they have no official add-ons,
|
|
|
|
|
# hence the different test than below.
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, label, "Add-ons' categories", reports, None, settings)
|
|
|
|
|
elif tip:
|
|
|
|
|
# Only special categories get a tip (All and User).
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, label, "Add-ons' categories", reports, None, settings)
|
2014-11-14 12:17:25 +01:00
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, tip, "Add-ons' categories", reports, None, settings)
|
2013-02-24 15:40:28 +00:00
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Get strings specific to translations' menu.
|
|
|
|
|
for lng in settings.LANGUAGES:
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, lng[1], "Languages’ labels from bl_i18n_utils/settings.py",
|
|
|
|
|
reports, None, settings)
|
|
|
|
|
for cat in settings.LANGUAGES_CATEGORIES:
|
|
|
|
|
process_msg(msgs, settings.DEFAULT_CONTEXT, cat[1],
|
|
|
|
|
"Language categories’ labels from bl_i18n_utils/settings.py", reports, None, settings)
|
|
|
|
|
|
2023-03-13 22:41:11 +01:00
|
|
|
|
# Get strings from asset catalogs and blend files.
|
|
|
|
|
# This loads each asset blend file in turn.
|
|
|
|
|
dump_asset_messages(msgs, reports, settings)
|
|
|
|
|
|
2018-07-07 08:48:35 +02:00
|
|
|
|
# pot.check()
|
2013-02-24 08:50:55 +00:00
|
|
|
|
pot.unescape() # Strings gathered in py/C source code may contain escaped chars...
|
|
|
|
|
print_info(reports, pot)
|
2018-07-07 08:48:35 +02:00
|
|
|
|
# pot.check()
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
if do_messages:
|
|
|
|
|
print("Writing messages…")
|
|
|
|
|
pot.write('PO', settings.FILE_NAME_POT)
|
|
|
|
|
|
|
|
|
|
print("Finished extracting UI messages!")
|
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
return pot # Not used currently, but may be useful later (and to be consistent with dump_addon_messages!).
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
def dump_addon_messages(module_name, do_checks, settings):
|
|
|
|
|
import addon_utils
|
|
|
|
|
|
|
|
|
|
# Get current addon state (loaded or not):
|
|
|
|
|
was_loaded = addon_utils.check(module_name)[1]
|
|
|
|
|
|
|
|
|
|
# Enable our addon.
|
|
|
|
|
addon = utils.enable_addons(addons={module_name})[0]
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
addon_info = addon_utils.module_bl_info(addon)
|
2013-04-09 08:56:35 +00:00
|
|
|
|
ver = addon_info["name"] + " " + ".".join(str(v) for v in addon_info["version"])
|
|
|
|
|
rev = 0
|
|
|
|
|
date = datetime.datetime.now()
|
2013-02-24 08:50:55 +00:00
|
|
|
|
pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
|
|
|
|
|
settings=settings)
|
|
|
|
|
msgs = pot.msgs
|
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
minus_pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
|
|
|
|
|
settings=settings)
|
|
|
|
|
minus_msgs = minus_pot.msgs
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
check_ctxt = _gen_check_ctxt(settings) if do_checks else None
|
|
|
|
|
minus_check_ctxt = _gen_check_ctxt(settings) if do_checks else None
|
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
# Get strings from RNA, our addon being enabled.
|
|
|
|
|
print("A")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
reports = _gen_reports(check_ctxt)
|
2013-04-09 08:56:35 +00:00
|
|
|
|
print("B")
|
|
|
|
|
dump_rna_messages(msgs, reports, settings)
|
|
|
|
|
print("C")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# Now disable our addon, and rescan RNA.
|
2013-04-09 08:56:35 +00:00
|
|
|
|
utils.enable_addons(addons={module_name}, disable=True)
|
|
|
|
|
print("D")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
reports["check_ctxt"] = minus_check_ctxt
|
2013-04-09 08:56:35 +00:00
|
|
|
|
print("E")
|
|
|
|
|
dump_rna_messages(minus_msgs, reports, settings)
|
|
|
|
|
print("F")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# Restore previous state if needed!
|
|
|
|
|
if was_loaded:
|
2013-04-09 08:56:35 +00:00
|
|
|
|
utils.enable_addons(addons={module_name})
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# and make the diff!
|
|
|
|
|
for key in minus_msgs:
|
2013-03-28 15:03:47 +00:00
|
|
|
|
if key != settings.PO_HEADER_KEY:
|
2022-07-20 11:04:38 +02:00
|
|
|
|
if key in msgs:
|
|
|
|
|
del msgs[key]
|
|
|
|
|
else:
|
|
|
|
|
# This should not happen, but some messages seem to have
|
|
|
|
|
# leaked on add-on unregister and register?
|
|
|
|
|
print(f"Key not found in msgs: {key}")
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
if check_ctxt:
|
2015-05-29 11:08:26 +02:00
|
|
|
|
_diff_check_ctxt(check_ctxt, minus_check_ctxt)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
# and we are done with those!
|
2013-04-09 08:56:35 +00:00
|
|
|
|
del minus_pot
|
2013-02-24 08:50:55 +00:00
|
|
|
|
del minus_msgs
|
|
|
|
|
del minus_check_ctxt
|
|
|
|
|
|
|
|
|
|
# get strings from UI layout definitions text="..." args
|
|
|
|
|
reports["check_ctxt"] = check_ctxt
|
2013-03-28 15:03:47 +00:00
|
|
|
|
dump_py_messages(msgs, reports, {addon}, settings, addons_only=True)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2022-08-29 14:02:24 +02:00
|
|
|
|
# Get strings from the addon's bl_info
|
|
|
|
|
dump_addon_bl_info(msgs, reports, addon, settings)
|
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
pot.unescape() # Strings gathered in py/C source code may contain escaped chars...
|
2013-02-24 08:50:55 +00:00
|
|
|
|
print_info(reports, pot)
|
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
|
print("Finished extracting UI messages!")
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
return pot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
try:
|
|
|
|
|
import bpy
|
|
|
|
|
except ImportError:
|
|
|
|
|
print("This script must run from inside blender")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
import sys
|
2015-06-03 17:15:00 +02:00
|
|
|
|
import argparse
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
|
# Get rid of Blender args!
|
2015-06-03 17:15:00 +02:00
|
|
|
|
argv = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description="Process UI messages from inside Blender.")
|
|
|
|
|
parser.add_argument('-c', '--no_checks', default=True, action="store_false", help="No checks over UI messages.")
|
|
|
|
|
parser.add_argument('-m', '--no_messages', default=True, action="store_false", help="No export of UI messages.")
|
|
|
|
|
parser.add_argument('-o', '--output', default=None, help="Output POT file path.")
|
|
|
|
|
parser.add_argument('-s', '--settings', default=None,
|
|
|
|
|
help="Override (some) default settings. Either a JSon file name, or a JSon string.")
|
2015-06-03 17:15:00 +02:00
|
|
|
|
args = parser.parse_args(argv)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
2013-03-19 08:33:24 +00:00
|
|
|
|
settings = settings_i18n.I18nSettings()
|
2018-12-23 22:03:12 +01:00
|
|
|
|
settings.load(args.settings)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
|
|
if args.output:
|
|
|
|
|
settings.FILE_NAME_POT = args.output
|
|
|
|
|
|
|
|
|
|
dump_messages(do_messages=args.no_messages, do_checks=args.no_checks, settings=settings)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
print("\n\n *** Running {} *** \n".format(__file__))
|
|
|
|
|
main()
|