2023-08-16 00:20:26 +10:00
|
|
|
# SPDX-FileCopyrightText: 2012-2023 Blender Authors
|
2023-06-15 13:09:04 +10:00
|
|
|
#
|
2022-02-11 09:07:11 +11:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-or-later
|
2012-07-02 19:51:06 +00:00
|
|
|
|
|
|
|
# Some misc utilities...
|
|
|
|
|
2024-12-02 11:29:05 +11:00
|
|
|
__all__ = (
|
|
|
|
"I18n",
|
|
|
|
"I18nMessage",
|
|
|
|
"I18nMessages",
|
|
|
|
"enable_addons",
|
|
|
|
"find_best_isocode_matches",
|
|
|
|
"get_po_files_from_dir",
|
|
|
|
"list_po_dir",
|
|
|
|
)
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
import collections
|
2012-07-02 19:51:06 +00:00
|
|
|
import os
|
2024-08-09 17:48:01 +02:00
|
|
|
import platform
|
2013-01-12 16:49:06 +00:00
|
|
|
import re
|
2013-02-24 08:50:55 +00:00
|
|
|
import struct
|
|
|
|
import tempfile
|
2024-01-18 11:13:55 +01:00
|
|
|
import time
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2015-01-27 17:46:07 +11:00
|
|
|
from bl_i18n_utils import (
|
2018-07-07 08:48:35 +02:00
|
|
|
settings,
|
|
|
|
utils_rtl,
|
|
|
|
)
|
2012-07-02 19:51:06 +00:00
|
|
|
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
##### Misc Utils #####
|
|
|
|
_valid_po_path_re = re.compile(r"^\S+:[0-9]+$")
|
2018-07-07 08:48:35 +02:00
|
|
|
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def is_valid_po_path(path):
|
|
|
|
return bool(_valid_po_path_re.match(path))
|
|
|
|
|
|
|
|
|
2013-02-12 17:32:54 +00:00
|
|
|
def get_best_similar(data):
|
|
|
|
import difflib
|
|
|
|
key, use_similar, similar_pool = data
|
|
|
|
|
|
|
|
# try to find some close key in existing messages...
|
|
|
|
# Optimized code inspired by difflib.get_close_matches (as we only need the best match).
|
|
|
|
# We also consider to never make a match when len differs more than -len_key / 2, +len_key * 2 (which is valid
|
|
|
|
# as long as use_similar is not below ~0.7).
|
|
|
|
# Gives an overall ~20% of improvement!
|
2018-07-07 08:48:35 +02:00
|
|
|
|
|
|
|
# tmp = difflib.get_close_matches(key[1], similar_pool, n=1, cutoff=use_similar)
|
|
|
|
# if tmp:
|
|
|
|
# tmp = tmp[0]
|
2013-02-12 17:32:54 +00:00
|
|
|
tmp = None
|
|
|
|
s = difflib.SequenceMatcher()
|
|
|
|
s.set_seq2(key[1])
|
|
|
|
len_key = len(key[1])
|
|
|
|
min_len = len_key // 2
|
|
|
|
max_len = len_key * 2
|
|
|
|
for x in similar_pool:
|
|
|
|
if min_len < len(x) < max_len:
|
|
|
|
s.set_seq1(x)
|
2013-04-19 16:23:02 +00:00
|
|
|
if s.real_quick_ratio() >= use_similar and s.quick_ratio() >= use_similar:
|
2013-02-12 17:32:54 +00:00
|
|
|
sratio = s.ratio()
|
|
|
|
if sratio >= use_similar:
|
|
|
|
tmp = x
|
|
|
|
use_similar = sratio
|
|
|
|
return key, tmp
|
|
|
|
|
|
|
|
|
2023-09-15 12:55:33 +02:00
|
|
|
_locale_explode_re = re.compile(r"^([a-z]{2,})(?:_([A-Za-z]{2,}))?(?:@([a-z]{2,}))?$")
|
2020-12-04 15:04:28 +01:00
|
|
|
|
|
|
|
|
|
|
|
def locale_explode(locale):
|
|
|
|
"""Copies behavior of `BLT_lang_locale_explode`, keep them in sync."""
|
|
|
|
ret = (None, None, None, None, None)
|
|
|
|
m = _locale_explode_re.match(locale)
|
|
|
|
if m:
|
|
|
|
lang, country, variant = m.groups()
|
2025-01-31 13:20:53 +11:00
|
|
|
return (
|
|
|
|
lang, country, variant,
|
|
|
|
"{:s}_{:s}".format(lang, country) if country else None,
|
|
|
|
"{:s}@{:s}".format(lang, variant) if variant else None
|
|
|
|
)
|
2020-12-04 15:04:28 +01:00
|
|
|
try:
|
|
|
|
import bpy.app.translations as bpy_translations
|
2022-09-14 16:18:59 +10:00
|
|
|
assert ret == bpy_translations.locale_explode(locale)
|
2020-12-04 15:04:28 +01:00
|
|
|
except ModuleNotFoundError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def locale_match(loc1, loc2):
|
|
|
|
"""
|
|
|
|
Return:
|
2025-04-26 00:48:04 +00:00
|
|
|
-n if loc1 is a subtype of loc2 (e.g. ``fr_FR`` is a subtype of ``fr``).
|
2013-02-24 08:50:55 +00:00
|
|
|
+n if loc2 is a subtype of loc1.
|
2025-04-26 00:48:04 +00:00
|
|
|
n becomes smaller when both locales are more similar
|
|
|
|
... (e.g. (``sr, sr_SR``) are more similar than (``sr, sr_SR@latin``)).
|
2013-02-24 08:50:55 +00:00
|
|
|
0 if they are exactly the same.
|
|
|
|
... (Ellipsis) if they cannot match!
|
2025-04-26 00:48:04 +00:00
|
|
|
Note: We consider that ``sr_SR@latin`` is a subtype of ``sr@latin``, ``sr_SR`` and ``sr``,
|
|
|
|
but ``sr_SR`` and ``sr@latin`` won't match (will return ...)!
|
2013-02-24 08:50:55 +00:00
|
|
|
Note: About similarity, diff in variants are more important than diff in countries, currently here are the cases:
|
2025-04-26 00:48:04 +00:00
|
|
|
(``sr, sr_SR``) -> 1
|
|
|
|
(``sr@latin, sr_SR@latin``) -> 1
|
|
|
|
(``sr, sr@latin``) -> 2
|
|
|
|
(``sr_SR, sr_SR@latin``) -> 2
|
|
|
|
(``sr, sr_SR@latin``) -> 3
|
2013-02-24 08:50:55 +00:00
|
|
|
"""
|
|
|
|
if loc1 == loc2:
|
|
|
|
return 0
|
|
|
|
l1, c1, v1, *_1 = locale_explode(loc1)
|
|
|
|
l2, c2, v2, *_2 = locale_explode(loc2)
|
|
|
|
|
|
|
|
if l1 == l2:
|
|
|
|
if c1 == c2:
|
|
|
|
if v1 == v2:
|
|
|
|
return 0
|
|
|
|
elif v2 is None:
|
|
|
|
return -2
|
|
|
|
elif v1 is None:
|
|
|
|
return 2
|
|
|
|
return ...
|
|
|
|
elif c2 is None:
|
|
|
|
if v1 == v2:
|
|
|
|
return -1
|
|
|
|
elif v2 is None:
|
|
|
|
return -3
|
|
|
|
return ...
|
|
|
|
elif c1 is None:
|
|
|
|
if v1 == v2:
|
|
|
|
return 1
|
|
|
|
elif v1 is None:
|
|
|
|
return 3
|
|
|
|
return ...
|
|
|
|
return ...
|
|
|
|
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
def find_best_isocode_matches(uid, iso_codes):
|
2013-04-18 11:09:25 +00:00
|
|
|
"""
|
|
|
|
Return an ordered tuple of elements in iso_codes that can match the given uid, from most similar to lesser ones.
|
|
|
|
"""
|
2013-04-09 08:56:35 +00:00
|
|
|
tmp = ((e, locale_match(e, uid)) for e in iso_codes)
|
|
|
|
return tuple(e[0] for e in sorted((e for e in tmp if e[1] is not ... and e[1] >= 0), key=lambda e: e[1]))
|
|
|
|
|
|
|
|
|
2013-04-18 15:26:22 +00:00
|
|
|
def get_po_files_from_dir(root_dir, langs=set()):
|
2013-04-18 11:09:25 +00:00
|
|
|
"""
|
2023-09-05 10:49:20 +10:00
|
|
|
Yield tuples (uid, po_path) of translations for each po file found in the given directory, which should be either
|
|
|
|
a directory containing po files using language uid's as names (e.g. fr.po, es_ES.po, etc.), or
|
|
|
|
a directory containing directories which names are language uids, and containing po files of the same names.
|
2013-04-18 11:09:25 +00:00
|
|
|
"""
|
|
|
|
found_uids = set()
|
|
|
|
for p in os.listdir(root_dir):
|
2013-04-18 15:26:22 +00:00
|
|
|
uid = None
|
|
|
|
po_file = os.path.join(root_dir, p)
|
|
|
|
print(p)
|
|
|
|
if p.endswith(".po") and os.path.isfile(po_file):
|
2013-04-18 11:09:25 +00:00
|
|
|
uid = p[:-3]
|
|
|
|
if langs and uid not in langs:
|
|
|
|
continue
|
|
|
|
elif os.path.isdir(p):
|
|
|
|
uid = p
|
|
|
|
if langs and uid not in langs:
|
|
|
|
continue
|
|
|
|
po_file = os.path.join(root_dir, p, p + ".po")
|
|
|
|
if not os.path.isfile(po_file):
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
if uid in found_uids:
|
2022-07-20 11:04:38 +02:00
|
|
|
print("WARNING! {} id has been found more than once! only first one has been loaded!".format(uid))
|
2013-04-18 11:09:25 +00:00
|
|
|
continue
|
|
|
|
found_uids.add(uid)
|
|
|
|
yield uid, po_file
|
|
|
|
|
|
|
|
|
2020-12-04 15:10:43 +01:00
|
|
|
def list_po_dir(root_path, settings):
|
|
|
|
"""
|
|
|
|
Generator. List given directory (expecting one sub-directory per languages)
|
|
|
|
and return all files matching languages listed in settings.
|
2020-12-11 14:59:08 +11:00
|
|
|
|
2020-12-04 15:10:43 +01:00
|
|
|
Yield tuples (can_use, uid, num_id, name, isocode, po_path)
|
2020-12-11 14:59:08 +11:00
|
|
|
|
2020-12-04 15:10:43 +01:00
|
|
|
Note that po_path may not actually exists.
|
|
|
|
"""
|
|
|
|
isocodes = ((e, os.path.join(root_path, e, e + ".po")) for e in os.listdir(root_path))
|
|
|
|
isocodes = dict(e for e in isocodes if os.path.isfile(e[1]))
|
|
|
|
for num_id, name, uid in settings.LANGUAGES[2:]: # Skip "default" and "en" languages!
|
|
|
|
best_po = find_best_isocode_matches(uid, isocodes)
|
2023-03-01 22:12:18 +11:00
|
|
|
# print(uid, "->", best_po)
|
2020-12-04 15:10:43 +01:00
|
|
|
if best_po:
|
|
|
|
isocode = best_po[0]
|
|
|
|
yield (True, uid, num_id, name, isocode, isocodes[isocode])
|
|
|
|
else:
|
|
|
|
yielded = False
|
|
|
|
language, _1, _2, language_country, language_variant = locale_explode(uid)
|
|
|
|
for isocode in (language, language_variant, language_country, uid):
|
|
|
|
p = os.path.join(root_path, isocode, isocode + ".po")
|
|
|
|
if not os.path.exists(p):
|
|
|
|
yield (True, uid, num_id, name, isocode, p)
|
|
|
|
yielded = True
|
|
|
|
break
|
|
|
|
if not yielded:
|
|
|
|
yield (False, uid, num_id, name, None, None)
|
|
|
|
|
|
|
|
|
2015-05-31 15:56:22 +10:00
|
|
|
def enable_addons(addons=None, support=None, disable=False, check_only=False):
|
2013-04-09 08:56:35 +00:00
|
|
|
"""
|
|
|
|
Enable (or disable) addons based either on a set of names, or a set of 'support' types.
|
|
|
|
Returns the list of all affected addons (as fake modules)!
|
|
|
|
If "check_only" is set, no addon will be enabled nor disabled.
|
|
|
|
"""
|
|
|
|
import addon_utils
|
|
|
|
|
2020-12-04 15:08:11 +01:00
|
|
|
try:
|
|
|
|
import bpy
|
|
|
|
except ModuleNotFoundError:
|
2021-10-03 12:06:09 +11:00
|
|
|
print("Could not import bpy, enable_addons must be run from within Blender.")
|
2020-12-04 15:08:11 +01:00
|
|
|
return
|
|
|
|
|
2015-05-31 15:56:22 +10:00
|
|
|
if addons is None:
|
|
|
|
addons = {}
|
|
|
|
if support is None:
|
|
|
|
support = {}
|
|
|
|
|
2018-12-21 12:47:44 +11:00
|
|
|
prefs = bpy.context.preferences
|
2024-02-01 17:03:40 +11:00
|
|
|
used_addon_module_names = {addon.module for addon in prefs.addons}
|
2019-08-01 12:26:21 +02:00
|
|
|
# In case we need to blacklist some add-ons...
|
|
|
|
black_list = {}
|
2013-04-09 08:56:35 +00:00
|
|
|
|
2018-07-07 08:48:35 +02:00
|
|
|
ret = [
|
|
|
|
mod for mod in addon_utils.modules()
|
2018-12-23 22:03:12 +01:00
|
|
|
if (((addons and mod.__name__ in addons) or
|
|
|
|
(not addons and addon_utils.module_bl_info(mod)["support"] in support)) and
|
|
|
|
(mod.__name__ not in black_list))
|
2018-07-07 08:48:35 +02:00
|
|
|
]
|
2013-04-09 08:56:35 +00:00
|
|
|
|
|
|
|
if not check_only:
|
|
|
|
for mod in ret:
|
2018-12-23 22:03:12 +01:00
|
|
|
try:
|
2024-02-01 17:03:40 +11:00
|
|
|
addon_module_name = mod.__name__
|
2018-12-23 22:03:12 +01:00
|
|
|
if disable:
|
2024-02-01 17:03:40 +11:00
|
|
|
if addon_module_name not in used_addon_module_names:
|
2018-12-23 22:03:12 +01:00
|
|
|
continue
|
2024-02-01 17:03:40 +11:00
|
|
|
print(" Disabling module ", addon_module_name)
|
|
|
|
bpy.ops.preferences.addon_disable(module=addon_module_name)
|
2018-12-23 22:03:12 +01:00
|
|
|
else:
|
2024-02-01 17:03:40 +11:00
|
|
|
if addon_module_name in used_addon_module_names:
|
2018-12-23 22:03:12 +01:00
|
|
|
continue
|
2024-02-01 17:03:40 +11:00
|
|
|
print(" Enabling module ", addon_module_name)
|
|
|
|
bpy.ops.preferences.addon_enable(module=addon_module_name)
|
2024-10-01 13:18:46 +10:00
|
|
|
except Exception as ex: # XXX TEMP WORKAROUND
|
2023-07-30 16:14:13 +10:00
|
|
|
print(ex)
|
2013-04-09 08:56:35 +00:00
|
|
|
|
|
|
|
# XXX There are currently some problems with bpy/rna...
|
|
|
|
# *Very* tricky to solve!
|
|
|
|
# So this is a hack to make all newly added operator visible by
|
|
|
|
# bpy.types.OperatorProperties.__subclasses__()
|
|
|
|
for cat in dir(bpy.ops):
|
|
|
|
cat = getattr(bpy.ops, cat)
|
|
|
|
for op in dir(cat):
|
2018-09-13 20:10:56 +10:00
|
|
|
getattr(cat, op).get_rna_type()
|
2013-04-09 08:56:35 +00:00
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
##### Main Classes #####
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
class I18nMessage:
|
|
|
|
"""
|
|
|
|
Internal representation of a message.
|
|
|
|
"""
|
2013-02-24 08:50:55 +00:00
|
|
|
__slots__ = ("msgctxt_lines", "msgid_lines", "msgstr_lines", "comment_lines", "is_fuzzy", "is_commented",
|
|
|
|
"settings")
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2015-05-31 15:56:22 +10:00
|
|
|
def __init__(self, msgctxt_lines=None, msgid_lines=None, msgstr_lines=None, comment_lines=None,
|
2013-02-24 08:50:55 +00:00
|
|
|
is_commented=False, is_fuzzy=False, settings=settings):
|
|
|
|
self.settings = settings
|
2015-05-31 15:56:22 +10:00
|
|
|
self.msgctxt_lines = msgctxt_lines or []
|
|
|
|
self.msgid_lines = msgid_lines or []
|
|
|
|
self.msgstr_lines = msgstr_lines or []
|
|
|
|
self.comment_lines = comment_lines or []
|
2013-01-12 16:49:06 +00:00
|
|
|
self.is_fuzzy = is_fuzzy
|
|
|
|
self.is_commented = is_commented
|
|
|
|
|
2020-05-05 18:04:22 +02:00
|
|
|
# ~ def __getstate__(self):
|
|
|
|
# ~ return {key: getattr(self, key) for key in self.__slots__}
|
|
|
|
|
|
|
|
# ~ def __getstate__(self):
|
|
|
|
# ~ return {key: getattr(self, key) for key in self.__slots__}
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def _get_msgctxt(self):
|
2013-02-24 08:50:55 +00:00
|
|
|
return "".join(self.msgctxt_lines)
|
2018-07-07 08:48:35 +02:00
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def _set_msgctxt(self, ctxt):
|
|
|
|
self.msgctxt_lines = [ctxt]
|
|
|
|
msgctxt = property(_get_msgctxt, _set_msgctxt)
|
|
|
|
|
|
|
|
def _get_msgid(self):
|
2013-02-24 08:50:55 +00:00
|
|
|
return "".join(self.msgid_lines)
|
2018-07-07 08:48:35 +02:00
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def _set_msgid(self, msgid):
|
|
|
|
self.msgid_lines = [msgid]
|
|
|
|
msgid = property(_get_msgid, _set_msgid)
|
|
|
|
|
|
|
|
def _get_msgstr(self):
|
2013-02-24 08:50:55 +00:00
|
|
|
return "".join(self.msgstr_lines)
|
2018-07-07 08:48:35 +02:00
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def _set_msgstr(self, msgstr):
|
|
|
|
self.msgstr_lines = [msgstr]
|
|
|
|
msgstr = property(_get_msgstr, _set_msgstr)
|
|
|
|
|
|
|
|
def _get_sources(self):
|
2013-02-24 08:50:55 +00:00
|
|
|
lstrip1 = len(self.settings.PO_COMMENT_PREFIX_SOURCE)
|
|
|
|
lstrip2 = len(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
|
|
|
|
return ([l[lstrip1:] for l in self.comment_lines if l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE)] +
|
|
|
|
[l[lstrip2:] for l in self.comment_lines
|
2018-07-07 08:48:35 +02:00
|
|
|
if l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)])
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def _set_sources(self, sources):
|
2013-02-24 08:50:55 +00:00
|
|
|
cmmlines = self.comment_lines.copy()
|
2013-01-12 16:49:06 +00:00
|
|
|
for l in cmmlines:
|
2018-07-07 08:48:35 +02:00
|
|
|
if (
|
|
|
|
l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE) or
|
|
|
|
l.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
|
|
|
|
):
|
2013-01-12 16:49:06 +00:00
|
|
|
self.comment_lines.remove(l)
|
|
|
|
lines_src = []
|
|
|
|
lines_src_custom = []
|
2013-03-28 19:33:14 +00:00
|
|
|
for src in sources:
|
2013-01-12 16:49:06 +00:00
|
|
|
if is_valid_po_path(src):
|
2013-02-24 08:50:55 +00:00
|
|
|
lines_src.append(self.settings.PO_COMMENT_PREFIX_SOURCE + src)
|
2013-01-12 16:49:06 +00:00
|
|
|
else:
|
2013-02-24 08:50:55 +00:00
|
|
|
lines_src_custom.append(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + src)
|
2013-01-12 16:49:06 +00:00
|
|
|
self.comment_lines += lines_src_custom + lines_src
|
|
|
|
sources = property(_get_sources, _set_sources)
|
|
|
|
|
|
|
|
def _get_is_tooltip(self):
|
2023-09-05 10:49:20 +10:00
|
|
|
# XXX For now, we assume that all messages > 30 chars are tool-tips!
|
2013-01-12 16:49:06 +00:00
|
|
|
return len(self.msgid) > 30
|
|
|
|
is_tooltip = property(_get_is_tooltip)
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def copy(self):
|
2023-09-05 10:49:20 +10:00
|
|
|
# Deep-copy everything but the settings!
|
2013-02-24 08:50:55 +00:00
|
|
|
return self.__class__(msgctxt_lines=self.msgctxt_lines[:], msgid_lines=self.msgid_lines[:],
|
|
|
|
msgstr_lines=self.msgstr_lines[:], comment_lines=self.comment_lines[:],
|
|
|
|
is_commented=self.is_commented, is_fuzzy=self.is_fuzzy, settings=self.settings)
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def normalize(self, max_len=80):
|
|
|
|
"""
|
|
|
|
Normalize this message, call this before exporting it...
|
|
|
|
Currently normalize msgctxt, msgid and msgstr lines to given max_len (if below 1, make them single line).
|
|
|
|
"""
|
|
|
|
max_len -= 2 # The two quotes!
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
def _splitlines(text):
|
|
|
|
lns = text.splitlines()
|
|
|
|
return [l + "\n" for l in lns[:-1]] + lns[-1:]
|
|
|
|
|
2023-09-05 10:49:20 +10:00
|
|
|
# We do not need the full power of text-wrap... We just split first at escaped new lines, then into each line
|
2013-01-12 16:49:06 +00:00
|
|
|
# if needed... No word splitting, nor fancy spaces handling!
|
|
|
|
def _wrap(text, max_len, init_len):
|
|
|
|
if len(text) + init_len < max_len:
|
|
|
|
return [text]
|
2013-02-24 08:50:55 +00:00
|
|
|
lines = _splitlines(text)
|
2013-01-12 16:49:06 +00:00
|
|
|
ret = []
|
|
|
|
for l in lines:
|
|
|
|
tmp = []
|
|
|
|
cur_len = 0
|
|
|
|
words = l.split(' ')
|
|
|
|
for w in words:
|
|
|
|
cur_len += len(w) + 1
|
|
|
|
if cur_len > (max_len - 1) and tmp:
|
|
|
|
ret.append(" ".join(tmp) + " ")
|
|
|
|
del tmp[:]
|
|
|
|
cur_len = len(w) + 1
|
|
|
|
tmp.append(w)
|
|
|
|
if tmp:
|
|
|
|
ret.append(" ".join(tmp))
|
|
|
|
return ret
|
2013-02-24 08:50:55 +00:00
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
if max_len < 1:
|
2013-02-24 08:50:55 +00:00
|
|
|
self.msgctxt_lines = _splitlines(self.msgctxt)
|
|
|
|
self.msgid_lines = _splitlines(self.msgid)
|
|
|
|
self.msgstr_lines = _splitlines(self.msgstr)
|
2013-01-12 16:49:06 +00:00
|
|
|
else:
|
2013-02-24 08:50:55 +00:00
|
|
|
init_len = len(self.settings.PO_MSGCTXT) + 1
|
2013-01-12 16:49:06 +00:00
|
|
|
if self.is_commented:
|
2013-02-24 08:50:55 +00:00
|
|
|
init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
|
|
|
|
self.msgctxt_lines = _wrap(self.msgctxt, max_len, init_len)
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
init_len = len(self.settings.PO_MSGID) + 1
|
2013-01-12 16:49:06 +00:00
|
|
|
if self.is_commented:
|
2013-02-24 08:50:55 +00:00
|
|
|
init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
|
|
|
|
self.msgid_lines = _wrap(self.msgid, max_len, init_len)
|
2012-07-29 12:07:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
init_len = len(self.settings.PO_MSGSTR) + 1
|
2013-01-12 16:49:06 +00:00
|
|
|
if self.is_commented:
|
2013-02-24 08:50:55 +00:00
|
|
|
init_len += len(self.settings.PO_COMMENT_PREFIX_MSG)
|
|
|
|
self.msgstr_lines = _wrap(self.msgstr, max_len, init_len)
|
|
|
|
|
|
|
|
# Be sure comment lines are not duplicated (can happen with sources...).
|
|
|
|
tmp = []
|
|
|
|
for l in self.comment_lines:
|
|
|
|
if l not in tmp:
|
|
|
|
tmp.append(l)
|
|
|
|
self.comment_lines = tmp
|
|
|
|
|
|
|
|
_esc_quotes = re.compile(r'(?!<\\)((?:\\\\)*)"')
|
|
|
|
_unesc_quotes = re.compile(r'(?!<\\)((?:\\\\)*)\\"')
|
|
|
|
_esc_names = ("msgctxt_lines", "msgid_lines", "msgstr_lines")
|
|
|
|
_esc_names_all = _esc_names + ("comment_lines",)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def do_escape(cls, txt):
|
|
|
|
"""Replace some chars by their escaped versions!"""
|
2013-03-01 14:28:38 +00:00
|
|
|
if "\n" in txt:
|
|
|
|
txt = txt.replace("\n", r"\n")
|
|
|
|
if "\t" in txt:
|
|
|
|
txt.replace("\t", r"\t")
|
|
|
|
if '"' in txt:
|
|
|
|
txt = cls._esc_quotes.sub(r'\1\"', txt)
|
2013-02-24 08:50:55 +00:00
|
|
|
return txt
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def do_unescape(cls, txt):
|
|
|
|
"""Replace escaped chars by real ones!"""
|
2013-03-01 14:28:38 +00:00
|
|
|
if r"\n" in txt:
|
|
|
|
txt = txt.replace(r"\n", "\n")
|
|
|
|
if r"\t" in txt:
|
|
|
|
txt = txt.replace(r"\t", "\t")
|
|
|
|
if r'\"' in txt:
|
|
|
|
txt = cls._unesc_quotes.sub(r'\1"', txt)
|
2013-02-24 08:50:55 +00:00
|
|
|
return txt
|
|
|
|
|
|
|
|
def escape(self, do_all=False):
|
|
|
|
names = self._esc_names_all if do_all else self._esc_names
|
|
|
|
for name in names:
|
|
|
|
setattr(self, name, [self.do_escape(l) for l in getattr(self, name)])
|
|
|
|
|
|
|
|
def unescape(self, do_all=True):
|
|
|
|
names = self._esc_names_all if do_all else self._esc_names
|
|
|
|
for name in names:
|
|
|
|
setattr(self, name, [self.do_unescape(l) for l in getattr(self, name)])
|
2012-10-14 14:18:30 +00:00
|
|
|
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
class I18nMessages:
|
2012-07-02 19:51:06 +00:00
|
|
|
"""
|
2013-01-12 16:49:06 +00:00
|
|
|
Internal representation of messages for one language (iso code), with additional stats info.
|
2012-07-02 19:51:06 +00:00
|
|
|
"""
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
# Avoid parsing again!
|
|
|
|
# Keys should be (pseudo) file-names, values are tuples (hash, I18nMessages)
|
|
|
|
# Note: only used by po parser currently!
|
2023-03-01 22:12:18 +11:00
|
|
|
# _parser_cache = {}
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def __init__(self, uid=None, kind=None, key=None, src=None, settings=settings):
|
|
|
|
self.settings = settings
|
|
|
|
self.uid = uid if uid is not None else settings.PARSER_TEMPLATE_ID
|
2013-01-12 16:49:06 +00:00
|
|
|
self.msgs = self._new_messages()
|
|
|
|
self.trans_msgs = set()
|
|
|
|
self.fuzzy_msgs = set()
|
|
|
|
self.comm_msgs = set()
|
|
|
|
self.ttip_msgs = set()
|
|
|
|
self.contexts = set()
|
|
|
|
self.nbr_msgs = 0
|
|
|
|
self.nbr_trans_msgs = 0
|
|
|
|
self.nbr_ttips = 0
|
|
|
|
self.nbr_trans_ttips = 0
|
|
|
|
self.nbr_comm_msgs = 0
|
|
|
|
self.nbr_signs = 0
|
|
|
|
self.nbr_trans_signs = 0
|
|
|
|
self.parsing_errors = []
|
|
|
|
if kind and src:
|
|
|
|
self.parse(kind, key, src)
|
|
|
|
self.update_info()
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
self._reverse_cache = None
|
|
|
|
|
2020-05-05 18:04:22 +02:00
|
|
|
def __getstate__(self):
|
|
|
|
return (self.settings, self.uid, self.msgs, self.parsing_errors)
|
|
|
|
|
|
|
|
def __setstate__(self, data):
|
|
|
|
self.__init__()
|
|
|
|
self.settings, self.uid, self.msgs, self.parsing_errors = data
|
|
|
|
self.update_info()
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
@staticmethod
|
|
|
|
def _new_messages():
|
2023-04-18 10:42:00 +10:00
|
|
|
return getattr(collections, "OrderedDict", dict)()
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
@classmethod
|
2024-01-18 11:13:55 +01:00
|
|
|
def gen_empty_messages(cls, uid, blender_ver, blender_hash, bl_time, default_copyright=True, settings=settings):
|
2013-01-12 16:49:06 +00:00
|
|
|
"""Generate an empty I18nMessages object (only header is present!)."""
|
2013-02-24 08:50:55 +00:00
|
|
|
fmt = settings.PO_HEADER_MSGSTR
|
2024-01-18 11:13:55 +01:00
|
|
|
msgstr = fmt.format(
|
|
|
|
blender_ver=str(blender_ver),
|
|
|
|
blender_hash=blender_hash,
|
|
|
|
time=time.strftime("%Y-%m-%d %H:%M%z", bl_time),
|
|
|
|
uid=str(uid))
|
2013-01-12 16:49:06 +00:00
|
|
|
comment = ""
|
|
|
|
if default_copyright:
|
2024-01-18 11:13:55 +01:00
|
|
|
comment = settings.PO_HEADER_COMMENT_COPYRIGHT.format(year=str(time.gmtime().tm_year))
|
2013-02-24 08:50:55 +00:00
|
|
|
comment = comment + settings.PO_HEADER_COMMENT
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
msgs = cls(uid=uid, settings=settings)
|
|
|
|
key = settings.PO_HEADER_KEY
|
|
|
|
msgs.msgs[key] = I18nMessage([key[0]], [key[1]], msgstr.split("\n"), comment.split("\n"),
|
|
|
|
False, False, settings=settings)
|
2013-01-12 16:49:06 +00:00
|
|
|
msgs.update_info()
|
|
|
|
|
|
|
|
return msgs
|
|
|
|
|
|
|
|
def normalize(self, max_len=80):
|
|
|
|
for msg in self.msgs.values():
|
|
|
|
msg.normalize(max_len)
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def escape(self, do_all=False):
|
|
|
|
for msg in self.msgs.values():
|
|
|
|
msg.escape(do_all)
|
|
|
|
|
|
|
|
def unescape(self, do_all=True):
|
|
|
|
for msg in self.msgs.values():
|
|
|
|
msg.unescape(do_all)
|
|
|
|
|
|
|
|
def check(self, fix=False):
|
|
|
|
"""
|
|
|
|
Check consistency between messages and their keys!
|
2018-09-03 16:49:08 +02:00
|
|
|
Check messages using format stuff are consistent between msgid and msgstr!
|
2013-02-24 08:50:55 +00:00
|
|
|
If fix is True, tries to fix the issues.
|
|
|
|
Return a list of found errors (empty if everything went OK!).
|
|
|
|
"""
|
|
|
|
ret = []
|
|
|
|
default_context = self.settings.DEFAULT_CONTEXT
|
2013-10-19 14:28:32 +00:00
|
|
|
_format = re.compile(self.settings.CHECK_PRINTF_FORMAT).findall
|
2013-02-24 08:50:55 +00:00
|
|
|
done_keys = set()
|
|
|
|
rem = set()
|
2013-10-19 14:28:32 +00:00
|
|
|
tmp = {}
|
2013-02-24 08:50:55 +00:00
|
|
|
for key, msg in self.msgs.items():
|
|
|
|
msgctxt, msgid, msgstr = msg.msgctxt, msg.msgid, msg.msgstr
|
|
|
|
real_key = (msgctxt or default_context, msgid)
|
|
|
|
if key != real_key:
|
|
|
|
ret.append("Error! msg's context/message do not match its key ({} / {})".format(real_key, key))
|
|
|
|
if real_key in self.msgs:
|
|
|
|
ret.append("Error! msg's real_key already used!")
|
|
|
|
if fix:
|
|
|
|
rem.add(real_key)
|
|
|
|
elif fix:
|
|
|
|
tmp[real_key] = msg
|
|
|
|
done_keys.add(key)
|
2013-10-17 06:52:26 +00:00
|
|
|
if '%' in msgid and msgstr and _format(msgid) != _format(msgstr):
|
2013-04-29 18:22:22 +00:00
|
|
|
if not msg.is_fuzzy:
|
2013-10-19 14:28:32 +00:00
|
|
|
ret.append("Error! msg's format entities are not matched in msgid and msgstr ({} / \"{}\")"
|
2013-10-17 06:52:26 +00:00
|
|
|
"".format(real_key, msgstr))
|
2013-02-24 08:50:55 +00:00
|
|
|
if fix:
|
|
|
|
msg.msgstr = ""
|
|
|
|
for k in rem:
|
|
|
|
del self.msgs[k]
|
|
|
|
self.msgs.update(tmp)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def clean_commented(self):
|
|
|
|
self.update_info()
|
|
|
|
nbr = len(self.comm_msgs)
|
|
|
|
for k in self.comm_msgs:
|
|
|
|
del self.msgs[k]
|
|
|
|
return nbr
|
|
|
|
|
|
|
|
def rtl_process(self):
|
|
|
|
keys = []
|
|
|
|
trans = []
|
|
|
|
for k, m in self.msgs.items():
|
|
|
|
keys.append(k)
|
|
|
|
trans.append(m.msgstr)
|
2013-03-19 08:33:24 +00:00
|
|
|
trans = utils_rtl.log2vis(trans, self.settings)
|
2013-02-24 08:50:55 +00:00
|
|
|
for k, t in zip(keys, trans):
|
|
|
|
self.msgs[k].msgstr = t
|
|
|
|
|
2013-04-18 15:26:22 +00:00
|
|
|
def merge(self, msgs, replace=False):
|
|
|
|
"""
|
|
|
|
Merge translations from msgs into self, following those rules:
|
|
|
|
* If a msg is in self and not in msgs, keep self untouched.
|
|
|
|
* If a msg is in msgs and not in self, skip it.
|
|
|
|
* Else (msg both in self and msgs):
|
|
|
|
* If self is not translated and msgs is translated or fuzzy, replace by msgs.
|
|
|
|
* If self is fuzzy, and msgs is translated, replace by msgs.
|
|
|
|
* If self is fuzzy, and msgs is fuzzy, and replace is True, replace by msgs.
|
|
|
|
* If self is translated, and msgs is translated, and replace is True, replace by msgs.
|
|
|
|
* Else, skip it!
|
|
|
|
"""
|
|
|
|
for k, m in msgs.msgs.items():
|
|
|
|
if k not in self.msgs:
|
|
|
|
continue
|
|
|
|
sm = self.msgs[k]
|
|
|
|
if (sm.is_commented or m.is_commented or not m.msgstr):
|
|
|
|
continue
|
|
|
|
if (not sm.msgstr or replace or (sm.is_fuzzy and (not m.is_fuzzy or replace))):
|
|
|
|
sm.msgstr = m.msgstr
|
|
|
|
sm.is_fuzzy = m.is_fuzzy
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
sm.comment_lines = m.comment_lines
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def update(self, ref, use_similar=None, keep_old_commented=True):
|
2013-01-12 16:49:06 +00:00
|
|
|
"""
|
|
|
|
Update this I18nMessage with the ref one. Translations from ref are never used. Source comments from ref
|
|
|
|
completely replace current ones. If use_similar is not 0.0, it will try to match new messages in ref with an
|
|
|
|
existing one. Messages no more found in ref will be marked as commented if keep_old_commented is True,
|
|
|
|
or removed.
|
|
|
|
"""
|
2013-02-24 08:50:55 +00:00
|
|
|
if use_similar is None:
|
|
|
|
use_similar = self.settings.SIMILAR_MSGID_THRESHOLD
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
similar_pool = {}
|
|
|
|
if use_similar > 0.0:
|
2013-01-29 08:34:26 +00:00
|
|
|
for key, msg in self.msgs.items():
|
|
|
|
if msg.msgstr: # No need to waste time with void translations!
|
|
|
|
similar_pool.setdefault(key[1], set()).add(key)
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-12 17:32:54 +00:00
|
|
|
msgs = self._new_messages().fromkeys(ref.msgs.keys())
|
|
|
|
ref_keys = set(ref.msgs.keys())
|
|
|
|
org_keys = set(self.msgs.keys())
|
|
|
|
new_keys = ref_keys - org_keys
|
|
|
|
removed_keys = org_keys - ref_keys
|
|
|
|
|
|
|
|
# First process keys present in both org and ref messages.
|
|
|
|
for key in ref_keys - new_keys:
|
|
|
|
msg, refmsg = self.msgs[key], ref.msgs[key]
|
|
|
|
msg.sources = refmsg.sources
|
|
|
|
msg.is_commented = refmsg.is_commented
|
|
|
|
msgs[key] = msg
|
|
|
|
|
|
|
|
# Next process new keys.
|
|
|
|
if use_similar > 0.0:
|
2020-05-05 18:04:22 +02:00
|
|
|
for key, msgid in map(get_best_similar,
|
|
|
|
tuple((nk, use_similar, tuple(similar_pool.keys())) for nk in new_keys)):
|
|
|
|
if msgid:
|
|
|
|
# Try to get the same context, else just get one...
|
|
|
|
skey = (key[0], msgid)
|
|
|
|
if skey not in similar_pool[msgid]:
|
|
|
|
skey = tuple(similar_pool[msgid])[0]
|
|
|
|
# We keep org translation and comments, and mark message as fuzzy.
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
msg, refmsg = self.msgs[skey].copy(), ref.msgs[key].copy()
|
2020-05-05 18:04:22 +02:00
|
|
|
msg.msgctxt = refmsg.msgctxt
|
|
|
|
msg.msgid = refmsg.msgid
|
|
|
|
msg.sources = refmsg.sources
|
|
|
|
msg.is_fuzzy = True
|
|
|
|
msg.is_commented = refmsg.is_commented
|
|
|
|
msgs[key] = msg
|
|
|
|
else:
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
msgs[key] = ref.msgs[key].copy()
|
2013-02-12 17:32:54 +00:00
|
|
|
else:
|
|
|
|
for key in new_keys:
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
msgs[key] = ref.msgs[key].copy()
|
2013-02-12 17:32:54 +00:00
|
|
|
|
2013-01-29 08:34:26 +00:00
|
|
|
# Add back all "old" and already commented messages as commented ones, if required
|
|
|
|
# (and translation was not void!).
|
2013-01-12 16:49:06 +00:00
|
|
|
if keep_old_commented:
|
2013-02-12 17:32:54 +00:00
|
|
|
for key in removed_keys:
|
|
|
|
msgs[key] = self.msgs[key]
|
|
|
|
msgs[key].is_commented = True
|
|
|
|
msgs[key].sources = []
|
|
|
|
|
|
|
|
# Special 'meta' message, change project ID version and pot creation date...
|
2013-02-24 08:50:55 +00:00
|
|
|
key = self.settings.PO_HEADER_KEY
|
2013-02-12 17:32:54 +00:00
|
|
|
rep = []
|
|
|
|
markers = ("Project-Id-Version:", "POT-Creation-Date:")
|
|
|
|
for mrk in markers:
|
|
|
|
for rl in ref.msgs[key].msgstr_lines:
|
|
|
|
if rl.startswith(mrk):
|
|
|
|
for idx, ml in enumerate(msgs[key].msgstr_lines):
|
|
|
|
if ml.startswith(mrk):
|
|
|
|
rep.append((idx, rl))
|
|
|
|
for idx, txt in rep:
|
|
|
|
msgs[key].msgstr_lines[idx] = txt
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
# And finalize the update!
|
|
|
|
self.msgs = msgs
|
|
|
|
|
|
|
|
def update_info(self):
|
|
|
|
self.trans_msgs.clear()
|
|
|
|
self.fuzzy_msgs.clear()
|
|
|
|
self.comm_msgs.clear()
|
|
|
|
self.ttip_msgs.clear()
|
|
|
|
self.contexts.clear()
|
|
|
|
self.nbr_signs = 0
|
|
|
|
self.nbr_trans_signs = 0
|
|
|
|
for key, msg in self.msgs.items():
|
2013-02-24 08:50:55 +00:00
|
|
|
if key == self.settings.PO_HEADER_KEY:
|
2013-01-12 16:49:06 +00:00
|
|
|
continue
|
|
|
|
if msg.is_commented:
|
|
|
|
self.comm_msgs.add(key)
|
|
|
|
else:
|
|
|
|
if msg.msgstr:
|
|
|
|
self.trans_msgs.add(key)
|
|
|
|
if msg.is_fuzzy:
|
|
|
|
self.fuzzy_msgs.add(key)
|
|
|
|
if msg.is_tooltip:
|
|
|
|
self.ttip_msgs.add(key)
|
|
|
|
self.contexts.add(key[0])
|
|
|
|
self.nbr_signs += len(msg.msgid)
|
|
|
|
self.nbr_trans_signs += len(msg.msgstr)
|
|
|
|
self.nbr_msgs = len(self.msgs)
|
2013-03-19 15:49:53 +00:00
|
|
|
self.nbr_trans_msgs = len(self.trans_msgs - self.fuzzy_msgs)
|
2013-01-12 16:49:06 +00:00
|
|
|
self.nbr_ttips = len(self.ttip_msgs)
|
2013-03-19 15:49:53 +00:00
|
|
|
self.nbr_trans_ttips = len(self.ttip_msgs & (self.trans_msgs - self.fuzzy_msgs))
|
2013-01-12 16:49:06 +00:00
|
|
|
self.nbr_comm_msgs = len(self.comm_msgs)
|
|
|
|
|
2013-03-19 15:49:53 +00:00
|
|
|
def print_info(self, prefix="", output=print, print_stats=True, print_errors=True):
|
2013-01-12 16:49:06 +00:00
|
|
|
"""
|
2013-03-19 15:49:53 +00:00
|
|
|
Print out some info about an I18nMessages object.
|
2013-01-12 16:49:06 +00:00
|
|
|
"""
|
|
|
|
lvl = 0.0
|
|
|
|
lvl_ttips = 0.0
|
|
|
|
lvl_comm = 0.0
|
|
|
|
lvl_trans_ttips = 0.0
|
|
|
|
lvl_ttips_in_trans = 0.0
|
|
|
|
if self.nbr_msgs > 0:
|
|
|
|
lvl = float(self.nbr_trans_msgs) / float(self.nbr_msgs)
|
|
|
|
lvl_ttips = float(self.nbr_ttips) / float(self.nbr_msgs)
|
|
|
|
lvl_comm = float(self.nbr_comm_msgs) / float(self.nbr_msgs + self.nbr_comm_msgs)
|
|
|
|
if self.nbr_ttips > 0:
|
|
|
|
lvl_trans_ttips = float(self.nbr_trans_ttips) / float(self.nbr_ttips)
|
|
|
|
if self.nbr_trans_msgs > 0:
|
|
|
|
lvl_ttips_in_trans = float(self.nbr_trans_ttips) / float(self.nbr_trans_msgs)
|
|
|
|
|
2013-03-19 15:49:53 +00:00
|
|
|
lines = []
|
|
|
|
if print_stats:
|
|
|
|
lines += [
|
|
|
|
"",
|
|
|
|
"{:>6.1%} done! ({} translated messages over {}).\n"
|
|
|
|
"".format(lvl, self.nbr_trans_msgs, self.nbr_msgs),
|
|
|
|
"{:>6.1%} of messages are tooltips ({} over {}).\n"
|
|
|
|
"".format(lvl_ttips, self.nbr_ttips, self.nbr_msgs),
|
|
|
|
"{:>6.1%} of tooltips are translated ({} over {}).\n"
|
|
|
|
"".format(lvl_trans_ttips, self.nbr_trans_ttips, self.nbr_ttips),
|
|
|
|
"{:>6.1%} of translated messages are tooltips ({} over {}).\n"
|
|
|
|
"".format(lvl_ttips_in_trans, self.nbr_trans_ttips, self.nbr_trans_msgs),
|
|
|
|
"{:>6.1%} of messages are commented ({} over {}).\n"
|
|
|
|
"".format(lvl_comm, self.nbr_comm_msgs, self.nbr_comm_msgs + self.nbr_msgs),
|
|
|
|
"This translation is currently made of {} signs.\n".format(self.nbr_trans_signs)
|
|
|
|
]
|
|
|
|
if print_errors and self.parsing_errors:
|
|
|
|
lines += ["WARNING! Errors during parsing:\n"]
|
|
|
|
lines += [" Around line {}: {}\n".format(line, error) for line, error in self.parsing_errors]
|
2013-02-24 08:50:55 +00:00
|
|
|
output(prefix.join(lines))
|
|
|
|
|
|
|
|
def invalidate_reverse_cache(self, rebuild_now=False):
|
|
|
|
"""
|
|
|
|
Invalidate the reverse cache used by find_best_messages_matches.
|
|
|
|
"""
|
|
|
|
self._reverse_cache = None
|
|
|
|
if rebuild_now:
|
|
|
|
src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg = {}, {}, {}, {}
|
2024-02-06 16:58:47 +01:00
|
|
|
ctxt_to_msg.setdefault(self.settings.DEFAULT_CONTEXT, set())
|
2013-02-24 08:50:55 +00:00
|
|
|
for key, msg in self.msgs.items():
|
|
|
|
if msg.is_commented:
|
|
|
|
continue
|
|
|
|
ctxt, msgid = key
|
|
|
|
ctxt_to_msg.setdefault(ctxt, set()).add(key)
|
|
|
|
msgid_to_msg.setdefault(msgid, set()).add(key)
|
|
|
|
msgstr_to_msg.setdefault(msg.msgstr, set()).add(key)
|
|
|
|
for src in msg.sources:
|
|
|
|
src_to_msg.setdefault(src, set()).add(key)
|
|
|
|
self._reverse_cache = (src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg)
|
|
|
|
|
|
|
|
def find_best_messages_matches(self, msgs, msgmap, rna_ctxt, rna_struct_name, rna_prop_name, rna_enum_name):
|
|
|
|
"""
|
|
|
|
Try to find the best I18nMessages (i.e. context/msgid pairs) for the given UI messages:
|
|
|
|
msgs: an object containing properties listed in msgmap's values.
|
|
|
|
msgmap: a dict of various messages to use for search:
|
|
|
|
{"but_label": subdict, "rna_label": subdict, "enum_label": subdict,
|
|
|
|
"but_tip": subdict, "rna_tip": subdict, "enum_tip": subdict}
|
|
|
|
each subdict being like that:
|
|
|
|
{"msgstr": id, "msgid": id, "msg_flags": id, "key": set()}
|
|
|
|
where msgstr and msgid are identifiers of string props in msgs (resp. translated and org message),
|
|
|
|
msg_flags is not used here, and key is a set of matching (msgctxt, msgid) keys for the item.
|
|
|
|
The other parameters are about the RNA element from which the strings come from, if it could be determined:
|
|
|
|
rna_ctxt: the labels' i18n context.
|
|
|
|
rna_struct_name, rna_prop_name, rna_enum_name: should be self-explanatory!
|
|
|
|
"""
|
2020-12-04 15:08:11 +01:00
|
|
|
try:
|
|
|
|
import bpy
|
|
|
|
except ModuleNotFoundError:
|
2021-11-16 18:44:04 -05:00
|
|
|
print("Could not import bpy, find_best_messages_matches must be run from within Blender.")
|
2020-12-04 15:08:11 +01:00
|
|
|
return
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
# Build helper mappings.
|
|
|
|
# Note it's user responsibility to know when to invalidate (and hence force rebuild) this cache!
|
|
|
|
if self._reverse_cache is None:
|
|
|
|
self.invalidate_reverse_cache(True)
|
|
|
|
src_to_msg, ctxt_to_msg, msgid_to_msg, msgstr_to_msg = self._reverse_cache
|
|
|
|
|
|
|
|
# print(len(src_to_msg), len(ctxt_to_msg), len(msgid_to_msg), len(msgstr_to_msg))
|
|
|
|
|
|
|
|
# Build RNA key.
|
|
|
|
src, src_rna, src_enum = bpy.utils.make_rna_paths(rna_struct_name, rna_prop_name, rna_enum_name)
|
|
|
|
print("src: ", src_rna, src_enum)
|
|
|
|
|
|
|
|
# Labels.
|
|
|
|
elbl = getattr(msgs, msgmap["enum_label"]["msgstr"])
|
|
|
|
if elbl:
|
|
|
|
# Enum items' labels have no i18n context...
|
|
|
|
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
|
|
|
|
if elbl in msgid_to_msg:
|
|
|
|
k &= msgid_to_msg[elbl]
|
|
|
|
elif elbl in msgstr_to_msg:
|
|
|
|
k &= msgstr_to_msg[elbl]
|
|
|
|
else:
|
|
|
|
k = set()
|
|
|
|
# We assume if we already have only one key, it's the good one!
|
|
|
|
if len(k) > 1 and src_enum in src_to_msg:
|
|
|
|
k &= src_to_msg[src_enum]
|
|
|
|
msgmap["enum_label"]["key"] = k
|
|
|
|
rlbl = getattr(msgs, msgmap["rna_label"]["msgstr"])
|
2023-03-01 22:12:18 +11:00
|
|
|
# print("rna label: " + rlbl, rlbl in msgid_to_msg, rlbl in msgstr_to_msg)
|
2013-02-24 08:50:55 +00:00
|
|
|
if rlbl:
|
2024-02-06 16:58:47 +01:00
|
|
|
k = ctxt_to_msg.get(rna_ctxt, set()).copy()
|
2013-02-24 08:50:55 +00:00
|
|
|
if k and rlbl in msgid_to_msg:
|
|
|
|
k &= msgid_to_msg[rlbl]
|
|
|
|
elif k and rlbl in msgstr_to_msg:
|
|
|
|
k &= msgstr_to_msg[rlbl]
|
|
|
|
else:
|
|
|
|
k = set()
|
|
|
|
# We assume if we already have only one key, it's the good one!
|
|
|
|
if len(k) > 1 and src_rna in src_to_msg:
|
|
|
|
k &= src_to_msg[src_rna]
|
|
|
|
msgmap["rna_label"]["key"] = k
|
|
|
|
blbl = getattr(msgs, msgmap["but_label"]["msgstr"])
|
|
|
|
blbls = [blbl]
|
|
|
|
if blbl.endswith(self.settings.NUM_BUTTON_SUFFIX):
|
|
|
|
# Num buttons report their label with a trailing ': '...
|
|
|
|
blbls.append(blbl[:-len(self.settings.NUM_BUTTON_SUFFIX)])
|
|
|
|
print("button label: " + blbl)
|
|
|
|
if blbl and elbl not in blbls and (rlbl not in blbls or rna_ctxt != self.settings.DEFAULT_CONTEXT):
|
|
|
|
# Always Default context for button label :/
|
|
|
|
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
|
|
|
|
found = False
|
|
|
|
for bl in blbls:
|
|
|
|
if bl in msgid_to_msg:
|
|
|
|
k &= msgid_to_msg[bl]
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
elif bl in msgstr_to_msg:
|
|
|
|
k &= msgstr_to_msg[bl]
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
if not found:
|
|
|
|
k = set()
|
|
|
|
# XXX No need to check against RNA path here, if blabel is different
|
|
|
|
# from rlabel, should not match anyway!
|
|
|
|
msgmap["but_label"]["key"] = k
|
|
|
|
|
|
|
|
# Tips (they never have a specific context).
|
|
|
|
etip = getattr(msgs, msgmap["enum_tip"]["msgstr"])
|
2023-03-01 22:12:18 +11:00
|
|
|
# print("enum tip: " + etip)
|
2013-02-24 08:50:55 +00:00
|
|
|
if etip:
|
|
|
|
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
|
|
|
|
if etip in msgid_to_msg:
|
|
|
|
k &= msgid_to_msg[etip]
|
|
|
|
elif etip in msgstr_to_msg:
|
|
|
|
k &= msgstr_to_msg[etip]
|
|
|
|
else:
|
|
|
|
k = set()
|
|
|
|
# We assume if we already have only one key, it's the good one!
|
|
|
|
if len(k) > 1 and src_enum in src_to_msg:
|
|
|
|
k &= src_to_msg[src_enum]
|
|
|
|
msgmap["enum_tip"]["key"] = k
|
|
|
|
rtip = getattr(msgs, msgmap["rna_tip"]["msgstr"])
|
2023-03-01 22:12:18 +11:00
|
|
|
# print("rna tip: " + rtip)
|
2013-02-24 08:50:55 +00:00
|
|
|
if rtip:
|
|
|
|
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
|
|
|
|
if k and rtip in msgid_to_msg:
|
|
|
|
k &= msgid_to_msg[rtip]
|
|
|
|
elif k and rtip in msgstr_to_msg:
|
|
|
|
k &= msgstr_to_msg[rtip]
|
|
|
|
else:
|
|
|
|
k = set()
|
|
|
|
# We assume if we already have only one key, it's the good one!
|
|
|
|
if len(k) > 1 and src_rna in src_to_msg:
|
|
|
|
k &= src_to_msg[src_rna]
|
|
|
|
msgmap["rna_tip"]["key"] = k
|
2018-07-07 08:48:35 +02:00
|
|
|
# print(k)
|
2013-02-24 08:50:55 +00:00
|
|
|
btip = getattr(msgs, msgmap["but_tip"]["msgstr"])
|
2023-03-01 22:12:18 +11:00
|
|
|
# print("button tip: " + btip)
|
2013-02-24 08:50:55 +00:00
|
|
|
if btip and btip not in {rtip, etip}:
|
|
|
|
k = ctxt_to_msg[self.settings.DEFAULT_CONTEXT].copy()
|
|
|
|
if btip in msgid_to_msg:
|
|
|
|
k &= msgid_to_msg[btip]
|
|
|
|
elif btip in msgstr_to_msg:
|
|
|
|
k &= msgstr_to_msg[btip]
|
|
|
|
else:
|
|
|
|
k = set()
|
|
|
|
# XXX No need to check against RNA path here, if btip is different from rtip, should not match anyway!
|
|
|
|
msgmap["but_tip"]["key"] = k
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
def parse(self, kind, key, src):
|
|
|
|
del self.parsing_errors[:]
|
|
|
|
self.parsers[kind](self, src, key)
|
|
|
|
if self.parsing_errors:
|
2013-03-19 15:49:53 +00:00
|
|
|
print("{} ({}):".format(key, src))
|
|
|
|
self.print_info(print_stats=False)
|
2013-01-12 16:49:06 +00:00
|
|
|
print("The parser solved them as well as it could...")
|
|
|
|
self.update_info()
|
|
|
|
|
|
|
|
def parse_messages_from_po(self, src, key=None):
|
|
|
|
"""
|
|
|
|
Parse a po file.
|
2019-08-17 00:54:22 +10:00
|
|
|
Note: This function will silently "arrange" mis-formatted entries, thus using afterward write_messages() should
|
2013-01-12 16:49:06 +00:00
|
|
|
always produce a po-valid file, though not correct!
|
|
|
|
"""
|
|
|
|
reading_msgid = False
|
|
|
|
reading_msgstr = False
|
|
|
|
reading_msgctxt = False
|
|
|
|
reading_comment = False
|
|
|
|
is_commented = False
|
|
|
|
is_fuzzy = False
|
|
|
|
msgctxt_lines = []
|
2012-07-02 19:51:06 +00:00
|
|
|
msgid_lines = []
|
|
|
|
msgstr_lines = []
|
|
|
|
comment_lines = []
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
default_context = self.settings.DEFAULT_CONTEXT
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
# Helper function
|
|
|
|
def finalize_message(self, line_nr):
|
|
|
|
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, reading_comment
|
|
|
|
nonlocal is_commented, is_fuzzy, msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
msgid = I18nMessage.do_unescape("".join(msgid_lines))
|
|
|
|
msgctxt = I18nMessage.do_unescape("".join(msgctxt_lines))
|
|
|
|
msgkey = (msgctxt or default_context, msgid)
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
# Never allow overriding existing msgid/msgctxt pairs!
|
|
|
|
if msgkey in self.msgs:
|
|
|
|
self.parsing_errors.append((line_nr, "{} context/msgid is already in current messages!".format(msgkey)))
|
|
|
|
return
|
|
|
|
|
|
|
|
self.msgs[msgkey] = I18nMessage(msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
|
2013-02-24 08:50:55 +00:00
|
|
|
is_commented, is_fuzzy, settings=self.settings)
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
# Let's clean up and get ready for next message!
|
|
|
|
reading_msgid = reading_msgstr = reading_msgctxt = reading_comment = False
|
|
|
|
is_commented = is_fuzzy = False
|
|
|
|
msgctxt_lines = []
|
|
|
|
msgid_lines = []
|
|
|
|
msgstr_lines = []
|
|
|
|
comment_lines = []
|
|
|
|
|
|
|
|
# try to use src as file name...
|
2013-02-24 08:50:55 +00:00
|
|
|
if os.path.isfile(src):
|
|
|
|
if os.stat(src).st_size > self.settings.PARSER_MAX_FILE_SIZE:
|
|
|
|
# Security, else we could read arbitrary huge files!
|
|
|
|
print("WARNING: skipping file {}, too huge!".format(src))
|
|
|
|
return
|
2013-01-12 16:49:06 +00:00
|
|
|
if not key:
|
|
|
|
key = src
|
|
|
|
with open(src, 'r', encoding="utf-8") as f:
|
|
|
|
src = f.read()
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
_msgctxt = self.settings.PO_MSGCTXT
|
|
|
|
_comm_msgctxt = self.settings.PO_COMMENT_PREFIX_MSG + _msgctxt
|
|
|
|
_len_msgctxt = len(_msgctxt + '"')
|
2013-01-12 16:49:06 +00:00
|
|
|
_len_comm_msgctxt = len(_comm_msgctxt + '"')
|
2013-02-24 08:50:55 +00:00
|
|
|
_msgid = self.settings.PO_MSGID
|
|
|
|
_comm_msgid = self.settings.PO_COMMENT_PREFIX_MSG + _msgid
|
|
|
|
_len_msgid = len(_msgid + '"')
|
2013-01-12 16:49:06 +00:00
|
|
|
_len_comm_msgid = len(_comm_msgid + '"')
|
2013-02-24 08:50:55 +00:00
|
|
|
_msgstr = self.settings.PO_MSGSTR
|
|
|
|
_comm_msgstr = self.settings.PO_COMMENT_PREFIX_MSG + _msgstr
|
|
|
|
_len_msgstr = len(_msgstr + '"')
|
2013-01-12 16:49:06 +00:00
|
|
|
_len_comm_msgstr = len(_comm_msgstr + '"')
|
2013-02-24 08:50:55 +00:00
|
|
|
_comm_str = self.settings.PO_COMMENT_PREFIX_MSG
|
|
|
|
_comm_fuzzy = self.settings.PO_COMMENT_FUZZY
|
|
|
|
_len_comm_str = len(_comm_str + '"')
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
# Main loop over all lines in src...
|
|
|
|
for line_nr, line in enumerate(src.splitlines()):
|
2012-07-02 19:51:06 +00:00
|
|
|
if line == "":
|
2013-01-14 21:41:21 +00:00
|
|
|
if reading_msgstr:
|
|
|
|
finalize_message(self, line_nr)
|
|
|
|
continue
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
elif line.startswith(_msgctxt) or line.startswith(_comm_msgctxt):
|
2012-07-02 19:51:06 +00:00
|
|
|
reading_comment = False
|
|
|
|
reading_ctxt = True
|
2013-02-24 08:50:55 +00:00
|
|
|
if line.startswith(_comm_str):
|
2012-07-02 19:51:06 +00:00
|
|
|
is_commented = True
|
2013-01-12 16:49:06 +00:00
|
|
|
line = line[_len_comm_msgctxt:-1]
|
2012-07-02 19:51:06 +00:00
|
|
|
else:
|
2013-01-12 16:49:06 +00:00
|
|
|
line = line[_len_msgctxt:-1]
|
2012-07-02 19:51:06 +00:00
|
|
|
msgctxt_lines.append(line)
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
elif line.startswith(_msgid) or line.startswith(_comm_msgid):
|
2012-07-02 19:51:06 +00:00
|
|
|
reading_comment = False
|
|
|
|
reading_msgid = True
|
2013-02-24 08:50:55 +00:00
|
|
|
if line.startswith(_comm_str):
|
2013-01-12 16:49:06 +00:00
|
|
|
if not is_commented and reading_ctxt:
|
|
|
|
self.parsing_errors.append((line_nr, "commented msgid following regular msgctxt"))
|
2012-07-02 19:51:06 +00:00
|
|
|
is_commented = True
|
2013-01-12 16:49:06 +00:00
|
|
|
line = line[_len_comm_msgid:-1]
|
2012-07-02 19:51:06 +00:00
|
|
|
else:
|
2013-01-12 16:49:06 +00:00
|
|
|
line = line[_len_msgid:-1]
|
|
|
|
reading_ctxt = False
|
2012-07-02 19:51:06 +00:00
|
|
|
msgid_lines.append(line)
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
elif line.startswith(_msgstr) or line.startswith(_comm_msgstr):
|
2012-07-02 19:51:06 +00:00
|
|
|
if not reading_msgid:
|
2013-01-12 16:49:06 +00:00
|
|
|
self.parsing_errors.append((line_nr, "msgstr without a prior msgid"))
|
2012-07-02 19:51:06 +00:00
|
|
|
else:
|
|
|
|
reading_msgid = False
|
|
|
|
reading_msgstr = True
|
2013-02-24 08:50:55 +00:00
|
|
|
if line.startswith(_comm_str):
|
2013-01-12 16:49:06 +00:00
|
|
|
line = line[_len_comm_msgstr:-1]
|
2012-07-02 19:51:06 +00:00
|
|
|
if not is_commented:
|
2013-01-12 16:49:06 +00:00
|
|
|
self.parsing_errors.append((line_nr, "commented msgstr following regular msgid"))
|
2012-07-02 19:51:06 +00:00
|
|
|
else:
|
2013-01-12 16:49:06 +00:00
|
|
|
line = line[_len_msgstr:-1]
|
2012-07-02 19:51:06 +00:00
|
|
|
if is_commented:
|
2013-01-12 16:49:06 +00:00
|
|
|
self.parsing_errors.append((line_nr, "regular msgstr following commented msgid"))
|
2012-07-02 19:51:06 +00:00
|
|
|
msgstr_lines.append(line)
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
elif line.startswith(_comm_str[0]):
|
|
|
|
if line.startswith(_comm_str):
|
2013-01-12 16:49:06 +00:00
|
|
|
if reading_msgctxt:
|
|
|
|
if is_commented:
|
|
|
|
msgctxt_lines.append(line[_len_comm_str:-1])
|
|
|
|
else:
|
|
|
|
msgctxt_lines.append(line)
|
|
|
|
self.parsing_errors.append((line_nr, "commented string while reading regular msgctxt"))
|
|
|
|
elif reading_msgid:
|
|
|
|
if is_commented:
|
|
|
|
msgid_lines.append(line[_len_comm_str:-1])
|
|
|
|
else:
|
|
|
|
msgid_lines.append(line)
|
|
|
|
self.parsing_errors.append((line_nr, "commented string while reading regular msgid"))
|
|
|
|
elif reading_msgstr:
|
|
|
|
if is_commented:
|
|
|
|
msgstr_lines.append(line[_len_comm_str:-1])
|
|
|
|
else:
|
|
|
|
msgstr_lines.append(line)
|
|
|
|
self.parsing_errors.append((line_nr, "commented string while reading regular msgstr"))
|
2012-07-02 19:51:06 +00:00
|
|
|
else:
|
2013-01-12 16:49:06 +00:00
|
|
|
if reading_msgctxt or reading_msgid or reading_msgstr:
|
|
|
|
self.parsing_errors.append((line_nr,
|
|
|
|
"commented string within msgctxt, msgid or msgstr scope, ignored"))
|
2013-02-24 08:50:55 +00:00
|
|
|
elif line.startswith(_comm_fuzzy):
|
2012-07-02 19:51:06 +00:00
|
|
|
is_fuzzy = True
|
|
|
|
else:
|
|
|
|
comment_lines.append(line)
|
|
|
|
reading_comment = True
|
|
|
|
|
|
|
|
else:
|
2013-01-12 16:49:06 +00:00
|
|
|
if reading_msgctxt:
|
|
|
|
msgctxt_lines.append(line[1:-1])
|
|
|
|
elif reading_msgid:
|
2012-07-02 19:51:06 +00:00
|
|
|
msgid_lines.append(line[1:-1])
|
|
|
|
elif reading_msgstr:
|
|
|
|
line = line[1:-1]
|
|
|
|
msgstr_lines.append(line)
|
|
|
|
else:
|
2013-01-12 16:49:06 +00:00
|
|
|
self.parsing_errors.append((line_nr, "regular string outside msgctxt, msgid or msgstr scope"))
|
2024-08-19 10:48:07 +10:00
|
|
|
# self.parsing_errors.append((
|
|
|
|
# line_nr, "regular string outside msgctxt, msgid or msgstr scope:\n\t\t{}"
|
|
|
|
# "\n\tComments:{}\n\tContext:{}\n\tKey:{}\n\tMessage:{}".format(
|
|
|
|
# line, comment_lines, msgctxt_lines, msgid_lines, msgstr_lines)))
|
2012-07-02 19:51:06 +00:00
|
|
|
|
|
|
|
# If no final empty line, last message is not finalized!
|
|
|
|
if reading_msgstr:
|
2013-01-12 16:49:06 +00:00
|
|
|
finalize_message(self, line_nr)
|
2013-02-24 08:50:55 +00:00
|
|
|
self.unescape()
|
2013-01-12 16:49:06 +00:00
|
|
|
|
|
|
|
def write(self, kind, dest):
|
2023-04-17 14:32:56 +02:00
|
|
|
return self.writers[kind](self, dest)
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2015-06-13 11:42:54 +02:00
|
|
|
def write_messages_to_po(self, fname, compact=False):
|
2013-01-12 16:49:06 +00:00
|
|
|
"""
|
|
|
|
Write messages in fname po file.
|
|
|
|
"""
|
2013-02-24 08:50:55 +00:00
|
|
|
default_context = self.settings.DEFAULT_CONTEXT
|
|
|
|
|
2015-06-13 11:42:54 +02:00
|
|
|
def _write(self, f, compact):
|
2013-02-24 08:50:55 +00:00
|
|
|
_msgctxt = self.settings.PO_MSGCTXT
|
|
|
|
_msgid = self.settings.PO_MSGID
|
|
|
|
_msgstr = self.settings.PO_MSGSTR
|
|
|
|
_comm = self.settings.PO_COMMENT_PREFIX_MSG
|
|
|
|
|
|
|
|
self.escape()
|
|
|
|
|
|
|
|
for num, msg in enumerate(self.msgs.values()):
|
2015-06-13 11:42:54 +02:00
|
|
|
if compact and (msg.is_commented or msg.is_fuzzy or not msg.msgstr_lines):
|
|
|
|
continue
|
|
|
|
if not compact:
|
|
|
|
f.write("\n".join(msg.comment_lines))
|
2013-01-12 16:49:06 +00:00
|
|
|
# Only mark as fuzzy if msgstr is not empty!
|
2015-06-13 11:42:54 +02:00
|
|
|
if msg.is_fuzzy and msg.msgstr_lines:
|
2013-02-24 08:50:55 +00:00
|
|
|
f.write("\n" + self.settings.PO_COMMENT_FUZZY)
|
|
|
|
_p = _comm if msg.is_commented else ""
|
2013-01-12 16:49:06 +00:00
|
|
|
chunks = []
|
2013-02-24 08:50:55 +00:00
|
|
|
if msg.msgctxt and msg.msgctxt != default_context:
|
2013-01-12 16:49:06 +00:00
|
|
|
if len(msg.msgctxt_lines) > 1:
|
|
|
|
chunks += [
|
2013-02-24 08:50:55 +00:00
|
|
|
"\n" + _p + _msgctxt + "\"\"\n" + _p + "\"",
|
2013-01-12 16:49:06 +00:00
|
|
|
("\"\n" + _p + "\"").join(msg.msgctxt_lines),
|
|
|
|
"\"",
|
|
|
|
]
|
|
|
|
else:
|
2013-02-24 08:50:55 +00:00
|
|
|
chunks += ["\n" + _p + _msgctxt + "\"" + msg.msgctxt + "\""]
|
2013-01-12 16:49:06 +00:00
|
|
|
if len(msg.msgid_lines) > 1:
|
|
|
|
chunks += [
|
2013-02-24 08:50:55 +00:00
|
|
|
"\n" + _p + _msgid + "\"\"\n" + _p + "\"",
|
2013-01-12 16:49:06 +00:00
|
|
|
("\"\n" + _p + "\"").join(msg.msgid_lines),
|
|
|
|
"\"",
|
|
|
|
]
|
|
|
|
else:
|
2013-02-24 08:50:55 +00:00
|
|
|
chunks += ["\n" + _p + _msgid + "\"" + msg.msgid + "\""]
|
2013-01-12 16:49:06 +00:00
|
|
|
if len(msg.msgstr_lines) > 1:
|
|
|
|
chunks += [
|
2013-02-24 08:50:55 +00:00
|
|
|
"\n" + _p + _msgstr + "\"\"\n" + _p + "\"",
|
2013-01-12 16:49:06 +00:00
|
|
|
("\"\n" + _p + "\"").join(msg.msgstr_lines),
|
|
|
|
"\"",
|
|
|
|
]
|
|
|
|
else:
|
2013-02-24 08:50:55 +00:00
|
|
|
chunks += ["\n" + _p + _msgstr + "\"" + msg.msgstr + "\""]
|
2013-01-12 16:49:06 +00:00
|
|
|
chunks += ["\n\n"]
|
|
|
|
f.write("".join(chunks))
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
self.unescape()
|
|
|
|
|
|
|
|
self.normalize(max_len=0) # No wrapping for now...
|
|
|
|
if isinstance(fname, str):
|
|
|
|
with open(fname, 'w', encoding="utf-8") as f:
|
2015-06-13 11:42:54 +02:00
|
|
|
_write(self, f, compact)
|
2013-02-24 08:50:55 +00:00
|
|
|
# Else assume fname is already a file(like) object!
|
|
|
|
else:
|
2015-06-13 11:42:54 +02:00
|
|
|
_write(self, fname, compact)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
def write_messages_to_mo(self, fname):
|
|
|
|
"""
|
|
|
|
Write messages in fname mo file.
|
|
|
|
"""
|
|
|
|
# XXX Temp solution, until I can make own mo generator working...
|
|
|
|
import subprocess
|
|
|
|
with tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8") as tmp_po_f:
|
2022-01-03 11:48:24 +01:00
|
|
|
os.makedirs(os.path.dirname(fname), exist_ok=True)
|
2013-02-24 08:50:55 +00:00
|
|
|
self.write_messages_to_po(tmp_po_f)
|
2018-07-07 08:48:35 +02:00
|
|
|
cmd = (
|
|
|
|
self.settings.GETTEXT_MSGFMT_EXECUTABLE,
|
|
|
|
"--statistics", # show stats
|
|
|
|
tmp_po_f.name,
|
|
|
|
"-o",
|
|
|
|
fname,
|
|
|
|
)
|
2023-04-17 14:32:56 +02:00
|
|
|
ret = subprocess.run(cmd, capture_output=True)
|
|
|
|
return ret
|
2013-02-24 08:50:55 +00:00
|
|
|
# XXX Code below is currently broken (generates corrupted mo files it seems :( )!
|
|
|
|
# Using http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html notation.
|
|
|
|
# Not generating hash table!
|
|
|
|
# Only translated, unfuzzy messages are taken into account!
|
|
|
|
default_context = self.settings.DEFAULT_CONTEXT
|
|
|
|
msgs = tuple(v for v in self.msgs.values() if not (v.is_fuzzy or v.is_commented) and v.msgstr and v.msgid)
|
|
|
|
msgs = sorted(msgs[:2],
|
|
|
|
key=lambda e: (e.msgctxt + e.msgid) if (e.msgctxt and e.msgctxt != default_context) else e.msgid)
|
|
|
|
magic_nbr = 0x950412de
|
|
|
|
format_rev = 0
|
|
|
|
N = len(msgs)
|
|
|
|
O = 32
|
|
|
|
T = O + N * 8
|
|
|
|
S = 0
|
|
|
|
H = T + N * 8
|
|
|
|
# Prepare our data! we need key (optional context and msgid), translation, and offset and length of both.
|
|
|
|
# Offset are relative to start of their own list.
|
|
|
|
EOT = b"0x04" # Used to concatenate context and msgid
|
|
|
|
_msgid_offset = 0
|
|
|
|
_msgstr_offset = 0
|
2018-07-07 08:48:35 +02:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def _gen(v):
|
|
|
|
nonlocal _msgid_offset, _msgstr_offset
|
|
|
|
msgid = v.msgid.encode("utf-8")
|
|
|
|
msgstr = v.msgstr.encode("utf-8")
|
|
|
|
if v.msgctxt and v.msgctxt != default_context:
|
|
|
|
msgctxt = v.msgctxt.encode("utf-8")
|
|
|
|
msgid = msgctxt + EOT + msgid
|
|
|
|
# Don't forget the final NULL char!
|
|
|
|
_msgid_len = len(msgid) + 1
|
|
|
|
_msgstr_len = len(msgstr) + 1
|
|
|
|
ret = ((msgid, _msgid_len, _msgid_offset), (msgstr, _msgstr_len, _msgstr_offset))
|
|
|
|
_msgid_offset += _msgid_len
|
|
|
|
_msgstr_offset += _msgstr_len
|
|
|
|
return ret
|
|
|
|
msgs = tuple(_gen(v) for v in msgs)
|
|
|
|
msgid_start = H
|
|
|
|
msgstr_start = msgid_start + _msgid_offset
|
|
|
|
print(N, msgstr_start + _msgstr_offset)
|
|
|
|
print(msgs)
|
|
|
|
|
|
|
|
with open(fname, 'wb') as f:
|
|
|
|
# Header...
|
|
|
|
f.write(struct.pack("=8I", magic_nbr, format_rev, N, O, T, S, H, 0))
|
|
|
|
# Msgid's length and offset.
|
|
|
|
f.write(b"".join(struct.pack("=2I", length, msgid_start + offset) for (_1, length, offset), _2 in msgs))
|
|
|
|
# Msgstr's length and offset.
|
|
|
|
f.write(b"".join(struct.pack("=2I", length, msgstr_start + offset) for _1, (_2, length, offset) in msgs))
|
|
|
|
# No hash table!
|
|
|
|
# Msgid's.
|
|
|
|
f.write(b"\0".join(msgid for (msgid, _1, _2), _3 in msgs) + b"\0")
|
|
|
|
# Msgstr's.
|
|
|
|
f.write(b"\0".join(msgstr for _1, (msgstr, _2, _3) in msgs) + b"\0")
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
parsers = {
|
|
|
|
"PO": parse_messages_from_po,
|
|
|
|
}
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
writers = {
|
|
|
|
"PO": write_messages_to_po,
|
2015-06-13 11:42:54 +02:00
|
|
|
"PO_COMPACT": lambda s, fn: s.write_messages_to_po(fn, True),
|
2013-02-24 08:50:55 +00:00
|
|
|
"MO": write_messages_to_mo,
|
2013-01-12 16:49:06 +00:00
|
|
|
}
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2024-08-09 17:48:01 +02:00
|
|
|
@classmethod
|
|
|
|
def update_from_pot_callback(cls, pot, lng, settings):
|
|
|
|
"""
|
|
|
|
Update or create a single PO file (specified by a filepath) from the given POT `I18nMessages` data.
|
|
|
|
|
2024-11-03 19:18:34 +11:00
|
|
|
Callback usable in a context where Blender specific modules (like ``bpy``) are not available.
|
2024-08-09 17:48:01 +02:00
|
|
|
"""
|
|
|
|
import sys
|
|
|
|
sys.stdout.reconfigure(encoding="utf-8")
|
|
|
|
sys.stderr.reconfigure(encoding="utf-8")
|
|
|
|
if not lng['use']:
|
|
|
|
return
|
|
|
|
if os.path.isfile(lng['po_path']):
|
|
|
|
po = cls(uid=lng['uid'], kind='PO', src=lng['po_path'], settings=settings)
|
|
|
|
po.update(pot)
|
|
|
|
else:
|
|
|
|
po = pot
|
|
|
|
po.write(kind="PO", dest=lng['po_path'])
|
|
|
|
print("{} PO written!".format(lng['uid']))
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def cleanup_callback(cls, lng, settings):
|
|
|
|
"""
|
|
|
|
Cleanup a single PO file (specified by a filepath).
|
|
|
|
|
2024-11-03 19:18:34 +11:00
|
|
|
Callback usable in a context where Blender specific modules (like ``bpy``) are not available.
|
2024-08-09 17:48:01 +02:00
|
|
|
"""
|
|
|
|
import sys
|
|
|
|
sys.stdout.reconfigure(encoding="utf-8")
|
|
|
|
sys.stderr.reconfigure(encoding="utf-8")
|
|
|
|
if not lng['use']:
|
|
|
|
return
|
|
|
|
po = cls(uid=lng['uid'], kind='PO', src=lng['po_path'], settings=settings)
|
|
|
|
errs = po.check(fix=True)
|
|
|
|
cleanedup_commented = po.clean_commented()
|
|
|
|
po.write(kind="PO", dest=lng['po_path'])
|
|
|
|
print("Processing {} language ({}).\n"
|
|
|
|
"Cleaned up {} commented messages.\n".format(lng['name'], lng['uid'], cleanedup_commented) +
|
|
|
|
("Errors in this po, solved as best as possible!\n\t" + "\n\t".join(errs) if errs else "") + "\n")
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def update_to_blender_repo_callback(cls, lng, settings):
|
|
|
|
"""
|
|
|
|
Cleanup and write a single PO file (specified by a filepath) into the relevant Blender source 'compact' PO file.
|
|
|
|
|
2024-11-03 19:18:34 +11:00
|
|
|
Callback usable in a context where Blender specific modules (like ``bpy``) are not available.
|
2024-08-09 17:48:01 +02:00
|
|
|
"""
|
|
|
|
import sys
|
|
|
|
sys.stdout.reconfigure(encoding="utf-8")
|
|
|
|
sys.stderr.reconfigure(encoding="utf-8")
|
|
|
|
reports = []
|
|
|
|
if lng['uid'] in settings.IMPORT_LANGUAGES_SKIP:
|
|
|
|
reports.append(
|
|
|
|
"Skipping {} language ({}), edit settings if you want to enable it.".format(
|
|
|
|
lng['name'], lng['uid']))
|
|
|
|
return lng['uid'], 0.0, reports
|
|
|
|
if not lng['use']:
|
|
|
|
reports.append("Skipping {} language ({}).".format(lng['name'], lng['uid']))
|
|
|
|
return lng['uid'], 0.0, reports
|
|
|
|
po = cls(uid=lng['uid'], kind='PO', src=lng['po_path'], settings=settings)
|
|
|
|
errs = po.check(fix=True)
|
|
|
|
reports.append("Processing {} language ({}).\n"
|
|
|
|
"Cleaned up {} commented messages.\n".format(lng['name'], lng['uid'], po.clean_commented()) +
|
|
|
|
("Errors in this po, solved as best as possible!\n\t" + "\n\t".join(errs) if errs else ""))
|
|
|
|
if lng['uid'] in settings.IMPORT_LANGUAGES_RTL:
|
2024-09-16 12:36:26 +02:00
|
|
|
if platform.system() not in {'Linux'}:
|
2024-08-09 17:48:01 +02:00
|
|
|
reports.append("Skipping RtL processing of {} language ({}), "
|
|
|
|
"this is only supported on Linux currently.".format(lng['name'], lng['uid']))
|
|
|
|
else:
|
|
|
|
po.rtl_process()
|
|
|
|
po.write(kind="PO_COMPACT", dest=lng['po_path_blender'])
|
|
|
|
po.update_info()
|
|
|
|
return lng['uid'], po.nbr_trans_msgs / po.nbr_msgs, reports
|
|
|
|
|
2012-07-02 19:51:06 +00:00
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
class I18n:
|
2012-07-02 19:51:06 +00:00
|
|
|
"""
|
2013-01-12 16:49:06 +00:00
|
|
|
Internal representation of a whole translation set.
|
2012-07-02 19:51:06 +00:00
|
|
|
"""
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
@staticmethod
|
2013-04-09 08:56:35 +00:00
|
|
|
def _parser_check_file(path, maxsize=settings.PARSER_MAX_FILE_SIZE,
|
|
|
|
_begin_marker=settings.PARSER_PY_MARKER_BEGIN,
|
|
|
|
_end_marker=settings.PARSER_PY_MARKER_END):
|
2013-02-24 08:50:55 +00:00
|
|
|
if os.stat(path).st_size > maxsize:
|
|
|
|
# Security, else we could read arbitrary huge files!
|
|
|
|
print("WARNING: skipping file {}, too huge!".format(path))
|
2013-04-12 12:19:50 +00:00
|
|
|
return None, None, None, False
|
2013-02-24 08:50:55 +00:00
|
|
|
txt = ""
|
2020-03-20 20:49:48 +01:00
|
|
|
with open(path, encoding="utf8") as f:
|
2013-02-24 08:50:55 +00:00
|
|
|
txt = f.read()
|
|
|
|
_in = 0
|
|
|
|
_out = len(txt)
|
|
|
|
if _begin_marker:
|
|
|
|
_in = None
|
|
|
|
if _begin_marker in txt:
|
|
|
|
_in = txt.index(_begin_marker) + len(_begin_marker)
|
|
|
|
if _end_marker:
|
|
|
|
_out = None
|
|
|
|
if _end_marker in txt:
|
|
|
|
_out = txt.index(_end_marker)
|
|
|
|
if _in is not None and _out is not None:
|
2013-04-09 08:56:35 +00:00
|
|
|
in_txt, txt, out_txt = txt[:_in], txt[_in:_out], txt[_out:]
|
|
|
|
elif _in is not None:
|
|
|
|
in_txt, txt, out_txt = txt[:_in], txt[_in:], None
|
|
|
|
elif _out is not None:
|
|
|
|
in_txt, txt, out_txt = None, txt[:_out], txt[_out:]
|
|
|
|
else:
|
|
|
|
in_txt, txt, out_txt = None, txt, None
|
2013-04-12 12:19:50 +00:00
|
|
|
return in_txt, txt, out_txt, (True if "translations_tuple" in txt else False)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _dst(self, path, uid, kind):
|
2013-04-12 12:19:50 +00:00
|
|
|
if isinstance(path, str):
|
|
|
|
if kind == 'PO':
|
|
|
|
if uid == self.settings.PARSER_TEMPLATE_ID:
|
|
|
|
if not path.endswith(".pot"):
|
|
|
|
return os.path.join(os.path.dirname(path), "blender.pot")
|
|
|
|
if not path.endswith(".po"):
|
|
|
|
return os.path.join(os.path.dirname(path), uid + ".po")
|
|
|
|
elif kind == 'PY':
|
|
|
|
if not path.endswith(".py"):
|
2022-07-20 11:04:38 +02:00
|
|
|
if os.path.isdir(path):
|
|
|
|
return os.path.join(path, "translations.py")
|
2013-04-12 12:19:50 +00:00
|
|
|
return os.path.join(os.path.dirname(path), "translations.py")
|
2013-02-24 08:50:55 +00:00
|
|
|
return path
|
|
|
|
|
|
|
|
def __init__(self, kind=None, src=None, langs=set(), settings=settings):
|
|
|
|
self.settings = settings
|
2024-10-18 10:39:58 +11:00
|
|
|
self.trans: dict[str, I18nMessages] = {}
|
2013-02-24 08:50:55 +00:00
|
|
|
self.src = {} # Should have the same keys as self.trans (plus PARSER_PY_ID for py file)!
|
|
|
|
self.dst = self._dst # A callable that transforms src_path into dst_path!
|
|
|
|
if kind and src:
|
|
|
|
self.parse(kind, src, langs)
|
2013-01-12 16:49:06 +00:00
|
|
|
self.update_info()
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def _py_file_get(self):
|
|
|
|
return self.src.get(self.settings.PARSER_PY_ID)
|
2018-07-07 08:48:35 +02:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def _py_file_set(self, value):
|
|
|
|
self.src[self.settings.PARSER_PY_ID] = value
|
|
|
|
py_file = property(_py_file_get, _py_file_set)
|
|
|
|
|
|
|
|
def escape(self, do_all=False):
|
|
|
|
for trans in self.trans.values():
|
|
|
|
trans.escape(do_all)
|
|
|
|
|
|
|
|
def unescape(self, do_all=True):
|
|
|
|
for trans in self.trans.values():
|
|
|
|
trans.unescape(do_all)
|
|
|
|
|
2013-01-12 16:49:06 +00:00
|
|
|
def update_info(self):
|
|
|
|
self.nbr_trans = 0
|
|
|
|
self.lvl = 0.0
|
|
|
|
self.lvl_ttips = 0.0
|
|
|
|
self.lvl_trans_ttips = 0.0
|
|
|
|
self.lvl_ttips_in_trans = 0.0
|
|
|
|
self.lvl_comm = 0.0
|
|
|
|
self.nbr_signs = 0
|
|
|
|
self.nbr_trans_signs = 0
|
|
|
|
self.contexts = set()
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
if self.settings.PARSER_TEMPLATE_ID in self.trans:
|
2013-01-12 16:49:06 +00:00
|
|
|
self.nbr_trans = len(self.trans) - 1
|
2013-02-24 08:50:55 +00:00
|
|
|
self.nbr_signs = self.trans[self.settings.PARSER_TEMPLATE_ID].nbr_signs
|
2013-01-12 16:49:06 +00:00
|
|
|
else:
|
|
|
|
self.nbr_trans = len(self.trans)
|
2013-02-24 08:50:55 +00:00
|
|
|
for msgs in self.trans.values():
|
2013-01-12 16:49:06 +00:00
|
|
|
msgs.update_info()
|
|
|
|
if msgs.nbr_msgs > 0:
|
|
|
|
self.lvl += float(msgs.nbr_trans_msgs) / float(msgs.nbr_msgs)
|
|
|
|
self.lvl_ttips += float(msgs.nbr_ttips) / float(msgs.nbr_msgs)
|
|
|
|
self.lvl_comm += float(msgs.nbr_comm_msgs) / float(msgs.nbr_msgs + msgs.nbr_comm_msgs)
|
|
|
|
if msgs.nbr_ttips > 0:
|
|
|
|
self.lvl_trans_ttips = float(msgs.nbr_trans_ttips) / float(msgs.nbr_ttips)
|
|
|
|
if msgs.nbr_trans_msgs > 0:
|
|
|
|
self.lvl_ttips_in_trans = float(msgs.nbr_trans_ttips) / float(msgs.nbr_trans_msgs)
|
|
|
|
if self.nbr_signs == 0:
|
|
|
|
self.nbr_signs = msgs.nbr_signs
|
|
|
|
self.nbr_trans_signs += msgs.nbr_trans_signs
|
|
|
|
self.contexts |= msgs.contexts
|
|
|
|
|
|
|
|
def print_stats(self, prefix="", print_msgs=True):
|
|
|
|
"""
|
|
|
|
Print out some stats about an I18n object.
|
|
|
|
If print_msgs is True, it will also print all its translations' stats.
|
|
|
|
"""
|
|
|
|
if print_msgs:
|
|
|
|
msgs_prefix = prefix + " "
|
2013-02-24 08:50:55 +00:00
|
|
|
for key, msgs in self.trans.items():
|
|
|
|
if key == self.settings.PARSER_TEMPLATE_ID:
|
2013-01-12 16:49:06 +00:00
|
|
|
continue
|
|
|
|
print(prefix + key + ":")
|
|
|
|
msgs.print_stats(prefix=msgs_prefix)
|
|
|
|
print(prefix)
|
|
|
|
|
2020-12-04 15:08:11 +01:00
|
|
|
nbr_contexts = len(self.contexts - {self.settings.DEFAULT_CONTEXT})
|
2013-01-12 16:49:06 +00:00
|
|
|
if nbr_contexts != 1:
|
|
|
|
if nbr_contexts == 0:
|
|
|
|
nbr_contexts = "No"
|
|
|
|
_ctx_txt = "s are"
|
|
|
|
else:
|
|
|
|
_ctx_txt = " is"
|
2018-07-07 08:48:35 +02:00
|
|
|
lines = ((
|
|
|
|
"",
|
2013-02-24 08:50:55 +00:00
|
|
|
"Average stats for all {} translations:\n".format(self.nbr_trans),
|
|
|
|
" {:>6.1%} done!\n".format(self.lvl / self.nbr_trans),
|
|
|
|
" {:>6.1%} of messages are tooltips.\n".format(self.lvl_ttips / self.nbr_trans),
|
|
|
|
" {:>6.1%} of tooltips are translated.\n".format(self.lvl_trans_ttips / self.nbr_trans),
|
|
|
|
" {:>6.1%} of translated messages are tooltips.\n".format(self.lvl_ttips_in_trans / self.nbr_trans),
|
|
|
|
" {:>6.1%} of messages are commented.\n".format(self.lvl_comm / self.nbr_trans),
|
|
|
|
" The org msgids are currently made of {} signs.\n".format(self.nbr_signs),
|
|
|
|
" All processed translations are currently made of {} signs.\n".format(self.nbr_trans_signs),
|
|
|
|
" {} specific context{} present:\n".format(self.nbr_contexts, _ctx_txt)) +
|
2020-12-04 15:08:11 +01:00
|
|
|
tuple(" " + c + "\n" for c in self.contexts - {self.settings.DEFAULT_CONTEXT}) +
|
2013-02-24 08:50:55 +00:00
|
|
|
("\n",)
|
|
|
|
)
|
2013-01-12 16:49:06 +00:00
|
|
|
print(prefix.join(lines))
|
|
|
|
|
2013-04-09 08:56:35 +00:00
|
|
|
@classmethod
|
2021-08-09 15:03:32 +10:00
|
|
|
def check_py_module_has_translations(cls, src, settings=settings):
|
2013-04-09 08:56:35 +00:00
|
|
|
"""
|
|
|
|
Check whether a given src (a py module, either a directory or a py file) has some i18n translation data,
|
|
|
|
and returns a tuple (src_file, translations_tuple) if yes, else (None, None).
|
|
|
|
"""
|
|
|
|
txts = []
|
|
|
|
if os.path.isdir(src):
|
2024-12-02 11:29:05 +11:00
|
|
|
for root, _dnames, fnames in os.walk(src):
|
2013-04-09 08:56:35 +00:00
|
|
|
for fname in fnames:
|
|
|
|
if not fname.endswith(".py"):
|
|
|
|
continue
|
|
|
|
path = os.path.join(root, fname)
|
2021-08-09 15:03:32 +10:00
|
|
|
_1, txt, _2, has_trans = cls._parser_check_file(path)
|
2013-04-12 12:19:50 +00:00
|
|
|
if has_trans:
|
2013-04-09 08:56:35 +00:00
|
|
|
txts.append((path, txt))
|
|
|
|
elif src.endswith(".py") and os.path.isfile(src):
|
2021-08-09 15:03:32 +10:00
|
|
|
_1, txt, _2, has_trans = cls._parser_check_file(src)
|
2013-04-12 12:19:50 +00:00
|
|
|
if has_trans:
|
2013-04-09 08:56:35 +00:00
|
|
|
txts.append((src, txt))
|
|
|
|
for path, txt in txts:
|
|
|
|
tuple_id = "translations_tuple"
|
|
|
|
env = globals().copy()
|
|
|
|
exec(txt, env)
|
|
|
|
if tuple_id in env:
|
|
|
|
return path, env[tuple_id]
|
|
|
|
return None, None # No data...
|
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def parse(self, kind, src, langs=set()):
|
|
|
|
self.parsers[kind](self, src, langs)
|
|
|
|
|
|
|
|
def parse_from_po(self, src, langs=set()):
|
|
|
|
"""
|
|
|
|
src must be a tuple (dir_of_pos, pot_file), where:
|
|
|
|
* dir_of_pos may either contains iso_CODE.po files, and/or iso_CODE/iso_CODE.po files.
|
|
|
|
* pot_file may be None (in which case there will be no ref messages).
|
|
|
|
if langs set is void, all languages found are loaded.
|
|
|
|
"""
|
|
|
|
root_dir, pot_file = src
|
|
|
|
if pot_file and os.path.isfile(pot_file):
|
|
|
|
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID, 'PO',
|
|
|
|
pot_file, pot_file, settings=self.settings)
|
|
|
|
self.src_po[self.settings.PARSER_TEMPLATE_ID] = pot_file
|
|
|
|
|
2013-04-18 11:09:25 +00:00
|
|
|
for uid, po_file in get_po_files_from_dir(root_dir, langs):
|
2013-02-24 08:50:55 +00:00
|
|
|
self.trans[uid] = I18nMessages(uid, 'PO', po_file, po_file, settings=self.settings)
|
|
|
|
self.src_po[uid] = po_file
|
|
|
|
|
|
|
|
def parse_from_py(self, src, langs=set()):
|
|
|
|
"""
|
|
|
|
src must be a valid path, either a py file or a module directory (in which case all py files inside it
|
2019-07-31 14:25:09 +02:00
|
|
|
will be checked, first file matching will win!).
|
2013-02-24 08:50:55 +00:00
|
|
|
if langs set is void, all languages found are loaded.
|
|
|
|
"""
|
|
|
|
default_context = self.settings.DEFAULT_CONTEXT
|
2022-07-20 11:04:38 +02:00
|
|
|
self.py_file, msgs = self.check_py_module_has_translations(src, self.settings)
|
2013-04-09 08:56:35 +00:00
|
|
|
if msgs is None:
|
2022-07-20 11:04:38 +02:00
|
|
|
self.py_file = src
|
2013-04-12 12:19:50 +00:00
|
|
|
msgs = ()
|
2013-02-24 08:50:55 +00:00
|
|
|
for key, (sources, gen_comments), *translations in msgs:
|
|
|
|
if self.settings.PARSER_TEMPLATE_ID not in self.trans:
|
|
|
|
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID,
|
|
|
|
settings=self.settings)
|
2022-07-20 11:04:38 +02:00
|
|
|
self.src[self.settings.PARSER_TEMPLATE_ID] = self.py_file
|
2013-02-24 08:50:55 +00:00
|
|
|
if key in self.trans[self.settings.PARSER_TEMPLATE_ID].msgs:
|
|
|
|
print("ERROR! key {} is defined more than once! Skipping re-definitions!")
|
|
|
|
continue
|
|
|
|
custom_src = [c for c in sources if c.startswith("bpy.")]
|
|
|
|
src = [c for c in sources if not c.startswith("bpy.")]
|
|
|
|
common_comment_lines = [self.settings.PO_COMMENT_PREFIX_GENERATED + c for c in gen_comments] + \
|
|
|
|
[self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + c for c in custom_src] + \
|
|
|
|
[self.settings.PO_COMMENT_PREFIX_SOURCE + c for c in src]
|
|
|
|
ctxt = [key[0]] if key[0] else [default_context]
|
|
|
|
self.trans[self.settings.PARSER_TEMPLATE_ID].msgs[key] = I18nMessage(ctxt, [key[1]], [""],
|
|
|
|
common_comment_lines, False, False,
|
|
|
|
settings=self.settings)
|
|
|
|
for uid, msgstr, (is_fuzzy, user_comments) in translations:
|
|
|
|
if uid not in self.trans:
|
|
|
|
self.trans[uid] = I18nMessages(uid, settings=self.settings)
|
2022-07-20 11:04:38 +02:00
|
|
|
self.src[uid] = self.py_file
|
2013-02-24 08:50:55 +00:00
|
|
|
comment_lines = [self.settings.PO_COMMENT_PREFIX + c for c in user_comments] + common_comment_lines
|
|
|
|
self.trans[uid].msgs[key] = I18nMessage(ctxt, [key[1]], [msgstr], comment_lines, False, is_fuzzy,
|
|
|
|
settings=self.settings)
|
2018-07-07 08:48:35 +02:00
|
|
|
# key = self.settings.PO_HEADER_KEY
|
|
|
|
# for uid, trans in self.trans.items():
|
|
|
|
# if key not in trans.msgs:
|
|
|
|
# trans.msgs[key]
|
2013-02-24 08:50:55 +00:00
|
|
|
self.unescape()
|
|
|
|
|
|
|
|
def write(self, kind, langs=set()):
|
2023-04-17 14:32:56 +02:00
|
|
|
return self.writers[kind](self, langs)
|
2013-02-24 08:50:55 +00:00
|
|
|
|
|
|
|
def write_to_po(self, langs=set()):
|
|
|
|
"""
|
|
|
|
Write all translations into po files. By default, write in the same files (or dir) as the source, specify
|
|
|
|
a custom self.dst function to write somewhere else!
|
|
|
|
Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
|
|
|
|
({} currently).
|
|
|
|
""".format(self.settings.PARSER_TEMPLATE_ID)
|
|
|
|
keys = self.trans.keys()
|
|
|
|
if langs:
|
|
|
|
keys &= langs
|
|
|
|
for uid in keys:
|
|
|
|
dst = self.dst(self, self.src.get(uid, ""), uid, 'PO')
|
|
|
|
self.trans[uid].write('PO', dst)
|
|
|
|
|
|
|
|
def write_to_py(self, langs=set()):
|
|
|
|
"""
|
|
|
|
Write all translations as python code, either in a "translations.py" file under same dir as source(s), or in
|
2013-04-09 08:56:35 +00:00
|
|
|
specified file if self.py_file is set (default, as usual can be customized with self.dst callable!).
|
2013-02-24 08:50:55 +00:00
|
|
|
Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
|
|
|
|
({} currently).
|
|
|
|
""".format(self.settings.PARSER_TEMPLATE_ID)
|
|
|
|
default_context = self.settings.DEFAULT_CONTEXT
|
2014-02-13 08:51:33 +11:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
def _gen_py(self, langs, tab=" "):
|
|
|
|
_lencomm = len(self.settings.PO_COMMENT_PREFIX)
|
|
|
|
_lengen = len(self.settings.PO_COMMENT_PREFIX_GENERATED)
|
|
|
|
_lensrc = len(self.settings.PO_COMMENT_PREFIX_SOURCE)
|
|
|
|
_lencsrc = len(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM)
|
|
|
|
ret = [
|
2013-04-12 12:19:50 +00:00
|
|
|
"# NOTE: You can safely move around this auto-generated block (with the begin/end markers!),",
|
|
|
|
"# and edit the translations by hand.",
|
2013-02-24 08:50:55 +00:00
|
|
|
"# Just carefully respect the format of the tuple!",
|
|
|
|
"",
|
2020-01-30 11:45:36 +11:00
|
|
|
"# Tuple of tuples:",
|
|
|
|
"# ((msgctxt, msgid), (sources, gen_comments), (lang, translation, (is_fuzzy, comments)), ...)",
|
2013-02-24 08:50:55 +00:00
|
|
|
"translations_tuple = (",
|
|
|
|
]
|
|
|
|
# First gather all keys (msgctxt, msgid) - theoretically, all translations should share the same, but...
|
2013-04-12 12:19:50 +00:00
|
|
|
# Note: using an ordered dict if possible (stupid sets cannot be ordered :/ ).
|
|
|
|
keys = I18nMessages._new_messages()
|
2013-04-09 08:56:35 +00:00
|
|
|
for trans in self.trans.values():
|
2013-04-12 12:19:50 +00:00
|
|
|
keys.update(trans.msgs)
|
2013-02-24 08:50:55 +00:00
|
|
|
# Get the ref translation (ideally, PARSER_TEMPLATE_ID one, else the first one that pops up!
|
|
|
|
# Ref translation will be used to generate sources "comments"
|
|
|
|
ref = self.trans.get(self.settings.PARSER_TEMPLATE_ID) or self.trans[list(self.trans.keys())[0]]
|
2013-04-09 08:56:35 +00:00
|
|
|
# Get all languages (uids) and sort them (PARSER_TEMPLATE_ID and PARSER_PY_ID excluded!)
|
|
|
|
translations = self.trans.keys() - {self.settings.PARSER_TEMPLATE_ID, self.settings.PARSER_PY_ID}
|
2013-02-24 08:50:55 +00:00
|
|
|
if langs:
|
|
|
|
translations &= langs
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
translations = [('"' + lng + '"', self.trans[lng]) for lng in sorted(translations)]
|
2024-06-04 18:40:56 +02:00
|
|
|
print(f"Translated {len(keys)} keys saved to .py file:")
|
|
|
|
print(*(k for k in keys.keys()), sep="\n")
|
2013-04-12 12:19:50 +00:00
|
|
|
for key in keys.keys():
|
2013-02-24 08:50:55 +00:00
|
|
|
if ref.msgs[key].is_commented:
|
|
|
|
continue
|
|
|
|
# Key (context + msgid).
|
2013-04-12 12:19:50 +00:00
|
|
|
msgctxt, msgid = ref.msgs[key].msgctxt, ref.msgs[key].msgid
|
2013-02-24 08:50:55 +00:00
|
|
|
if not msgctxt:
|
|
|
|
msgctxt = default_context
|
|
|
|
ret.append(tab + "(({}, \"{}\"),".format('"' + msgctxt + '"' if msgctxt else "None", msgid))
|
|
|
|
# Common comments (mostly sources!).
|
|
|
|
sources = []
|
|
|
|
gen_comments = []
|
|
|
|
for comment in ref.msgs[key].comment_lines:
|
|
|
|
if comment.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM):
|
|
|
|
sources.append(comment[_lencsrc:])
|
|
|
|
elif comment.startswith(self.settings.PO_COMMENT_PREFIX_SOURCE):
|
|
|
|
sources.append(comment[_lensrc:])
|
|
|
|
elif comment.startswith(self.settings.PO_COMMENT_PREFIX_GENERATED):
|
|
|
|
gen_comments.append(comment[_lengen:])
|
|
|
|
if not (sources or gen_comments):
|
|
|
|
ret.append(tab + " ((), ()),")
|
|
|
|
else:
|
|
|
|
if len(sources) > 1:
|
2013-04-12 12:19:50 +00:00
|
|
|
ret.append(tab + ' (("' + sources[0] + '",')
|
|
|
|
ret += [tab + ' "' + s + '",' for s in sources[1:-1]]
|
|
|
|
ret.append(tab + ' "' + sources[-1] + '"),')
|
2013-02-24 08:50:55 +00:00
|
|
|
else:
|
|
|
|
ret.append(tab + " ((" + ('"' + sources[0] + '",' if sources else "") + "),")
|
|
|
|
if len(gen_comments) > 1:
|
2013-04-12 12:19:50 +00:00
|
|
|
ret.append(tab + ' ("' + gen_comments[0] + '",')
|
|
|
|
ret += [tab + ' "' + s + '",' for s in gen_comments[1:-1]]
|
|
|
|
ret.append(tab + ' "' + gen_comments[-1] + '")),')
|
2013-02-24 08:50:55 +00:00
|
|
|
else:
|
|
|
|
ret.append(tab + " (" + ('"' + gen_comments[0] + '",' if gen_comments else "") + ")),")
|
|
|
|
# All languages
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
for lngstr, trans in translations:
|
2024-06-04 18:40:56 +02:00
|
|
|
if key not in trans.msgs:
|
|
|
|
continue # Happens if no language is enabled in translation UI, and new messages are generated.
|
2013-02-24 08:50:55 +00:00
|
|
|
if trans.msgs[key].is_commented:
|
|
|
|
continue
|
|
|
|
# Language code and translation.
|
2013-04-12 12:19:50 +00:00
|
|
|
ret.append(tab + " (" + lngstr + ', "' + trans.msgs[key].msgstr + '",')
|
2013-02-24 08:50:55 +00:00
|
|
|
# User comments and fuzzy.
|
|
|
|
comments = []
|
|
|
|
for comment in trans.msgs[key].comment_lines:
|
|
|
|
if comment.startswith(self.settings.PO_COMMENT_PREFIX):
|
|
|
|
comments.append(comment[_lencomm:])
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
ret.append(tab + " (" + ("True" if trans.msgs[key].is_fuzzy else "False") + ",")
|
2013-02-24 08:50:55 +00:00
|
|
|
if len(comments) > 1:
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
ret.append(tab + ' ("' + comments[0] + '",')
|
|
|
|
ret += [tab * 2 + '"' + s + '",' for s in comments[1:-1]]
|
|
|
|
ret.append(tab * 2 + '"' + comments[-1] + '"))),')
|
2013-02-24 08:50:55 +00:00
|
|
|
else:
|
2013-04-12 12:19:50 +00:00
|
|
|
ret[-1] = ret[-1] + " (" + (('"' + comments[0] + '",') if comments else "") + "))),"
|
2014-02-13 08:51:33 +11:00
|
|
|
|
I18n: improve add-on translation tooling
- Allow versions from bl_info to be strings
Versions are now allowed to be strings in extensions using
blender_manifest.toml, so this commit prevents them from being badly
formatted on extraction to add-on translations.
- Do not export Blender copyright text to add-on translations
This text is only relevant for Blender and maybe core add-ons, not
for the general case.
- Copy comment lines from add-on .po files to .py translations
Without this comments added by translators could be lost since they
were not copied over to the Python files.
- Fix indentation in add-on translations
Some lines in the translation dict were off by a few spaces. This
resulted in linting tools complaining about the indentation.
- Do not escape messages in add-on translations multiple times
When extracting add-on messages, they would get escaped multiple
times, resulting in invalid strings when they contained quotes for
example.
This happened because on updating the messages from the ref
ones (those actually extracted from Blender), each ref I18nMessage
would be assigned to the corresponding language I18nMessage, without
copy. When this message was escaped, it happened once for every
language since they were actually the same object.
To avoid this, I18nMessage objects are copied when merging.
-----
Example tuple before PR:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0...1.1...0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:10+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False,
("Blender's translation file (po format).",
"Copyright (C) 2024 The Blender Authors.",
"This file is distributed under the same license as the Blender package.",
"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR."))),
),
(("*", "\\"Location\\" and \\"Shape Key\\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
After:
```python
translations_tuple = (
(("*", ""),
((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.11.0 (0)\nReport-Msgid-Bugs-To: \nPOT-Creation-Date: 2024-05-26 17:06+0000\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nLanguage: __POT__\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit",
(False, ("FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",))),
),
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("extensions/user_default/animall/__init__.py:250",),
()),
("fr_FR", "",
(False, ())),
),
...
```
Pull Request: https://projects.blender.org/blender/blender/pulls/122273
2024-05-27 13:33:49 +02:00
|
|
|
ret.append(" " + "),")
|
2013-02-24 08:50:55 +00:00
|
|
|
ret += [
|
|
|
|
")",
|
|
|
|
"",
|
|
|
|
"translations_dict = {}",
|
|
|
|
"for msg in translations_tuple:",
|
|
|
|
tab + "key = msg[0]",
|
|
|
|
tab + "for lang, trans, (is_fuzzy, comments) in msg[2:]:",
|
|
|
|
tab * 2 + "if trans and not is_fuzzy:",
|
|
|
|
tab * 3 + "translations_dict.setdefault(lang, {})[key] = trans",
|
|
|
|
"",
|
|
|
|
]
|
|
|
|
return ret
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
self.escape(True)
|
|
|
|
dst = self.dst(self, self.src.get(self.settings.PARSER_PY_ID, ""), self.settings.PARSER_PY_ID, 'PY')
|
2013-04-09 08:56:35 +00:00
|
|
|
print(dst)
|
|
|
|
prev = txt = nxt = ""
|
2013-02-24 08:50:55 +00:00
|
|
|
if os.path.exists(dst):
|
|
|
|
if not os.path.isfile(dst):
|
|
|
|
print("WARNING: trying to write as python code into {}, which is not a file! Aborting.".format(dst))
|
|
|
|
return
|
2021-06-15 10:44:08 +10:00
|
|
|
prev, txt, nxt, _has_trans = self._parser_check_file(dst)
|
2013-04-09 08:56:35 +00:00
|
|
|
if prev is None and nxt is None:
|
|
|
|
print("WARNING: Looks like given python file {} has no auto-generated translations yet, will be added "
|
|
|
|
"at the end of the file, you can move that section later if needed...".format(dst))
|
2013-04-12 12:19:50 +00:00
|
|
|
txt = ([txt, "", self.settings.PARSER_PY_MARKER_BEGIN] +
|
|
|
|
_gen_py(self, langs) +
|
|
|
|
["", self.settings.PARSER_PY_MARKER_END])
|
2013-04-09 08:56:35 +00:00
|
|
|
else:
|
|
|
|
# We completely replace the text found between start and end markers...
|
|
|
|
txt = _gen_py(self, langs)
|
2013-02-24 08:50:55 +00:00
|
|
|
else:
|
2022-07-20 11:04:38 +02:00
|
|
|
print("Creating python file {} containing translations.".format(dst))
|
2013-02-24 08:50:55 +00:00
|
|
|
txt = [
|
2022-07-20 11:04:38 +02:00
|
|
|
"# SPDX-License-Identifier: GPL-2.0-or-later",
|
2013-02-24 08:50:55 +00:00
|
|
|
"",
|
|
|
|
self.settings.PARSER_PY_MARKER_BEGIN,
|
|
|
|
"",
|
|
|
|
]
|
|
|
|
txt += _gen_py(self, langs)
|
|
|
|
txt += [
|
|
|
|
"",
|
|
|
|
self.settings.PARSER_PY_MARKER_END,
|
|
|
|
]
|
2020-03-20 20:49:48 +01:00
|
|
|
with open(dst, 'w', encoding="utf8") as f:
|
2013-04-12 12:19:50 +00:00
|
|
|
f.write((prev or "") + "\n".join(txt) + (nxt or ""))
|
2013-02-24 08:50:55 +00:00
|
|
|
self.unescape()
|
2013-01-12 16:49:06 +00:00
|
|
|
|
2013-02-24 08:50:55 +00:00
|
|
|
parsers = {
|
|
|
|
"PO": parse_from_po,
|
|
|
|
"PY": parse_from_py,
|
|
|
|
}
|
|
|
|
|
|
|
|
writers = {
|
|
|
|
"PO": write_to_po,
|
|
|
|
"PY": write_to_py,
|
|
|
|
}
|