From 1a890e26c1c55528a041e335ce6265ba3ccc2e9e Mon Sep 17 00:00:00 2001 From: Mars Liu Date: Wed, 29 Dec 2021 20:44:31 +0800 Subject: [PATCH] remove tree.py; use skill tree parser package --- main.py | 2 +- requirements.txt | 3 +- src/__pycache__/tree.cpython-38.pyc | Bin 11712 -> 0 bytes src/tree.py | 425 ---------------------------- 4 files changed, 3 insertions(+), 427 deletions(-) delete mode 100644 src/__pycache__/tree.cpython-38.pyc delete mode 100644 src/tree.py diff --git a/main.py b/main.py index baa9b41..c6cbc97 100644 --- a/main.py +++ b/main.py @@ -1,4 +1,4 @@ -from src.tree import TreeWalker +from skill_tree.tree import TreeWalker if __name__ == '__main__': walker = TreeWalker("data", "pg", "PostgreSQL") diff --git a/requirements.txt b/requirements.txt index f060081..6441e60 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ pre_commit~=2.16.0 -GitPython~=3.1.24 \ No newline at end of file +GitPython~=3.1.24 +skill-tree-parser~=0.0.1 diff --git a/src/__pycache__/tree.cpython-38.pyc b/src/__pycache__/tree.cpython-38.pyc deleted file mode 100644 index 4893bda9d7a0b6ad640dc983506ab9fdcfc6d070..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11712 zcmds7TaX-AdG7o4%6|ufxG_t*-h;e`@DlTS?iL)3ikQiGTMss>svpYMp zd%D+>CcTdF%7G9RiC_o;o6H8NL{-F)BE`i(@|3E;YpO_9zs9ocyyPJ-iIc+lzQ22} zl8F-@Qk9wNbL-Qm`~2s>eE)yWN2jI=27dKRKdF819ft8wtPKBbRNjp%{3#M;D6?r) z(%h_=QnxCWP~N31lxBibcXu!&_b0l0f)Y#0R?Y>Za+7kEhkUP^QNGGwFe^8!f|@|fJ~gSP zkk6{3nnu3=0YjD4t_z0RwQW^yQRX8?dG{-rkFnS(zxhxttVfNtpd8NKGxz30&EWB% z*{b#HYe5*+mT|Xk@rmr0L+;^vAFi;4Br;Y^WhyH+mTi~ww9@+!AuDu34CWH->N^0UU47n|Ke z?5VYip-f?btpH{I9r-&Mli5yh_r}A8oelwPtt>g|t`2d(UQc zrmbH!=U(B5zw#P>3Fes;!p+m`x?RUIB<{oQ^`MnF4?pA@^$$P7+QI9IN4x+@B zF)EYUjdVo3tQAyBSzD`7UzLdn+0uj*PV)UJ?m;y?fW$Bz)51bnuUVG!bI;y~x^EWD zy+}1Xj4T=#i~=cq^;Q%jESoa7txE^C%*gCo-~xLGoN)p}zTNn+@e$)|SW@OEA(r3_ zYcVl(*-4y~R4ZY-g(~2Rd;Bt=l@W z!}I8Ydyzhfu@d*JZbX58la$rw+Im=a^=+(~D|j!zr`ldk{PyE`zM%qrJMLXaXFe!& zz7u!D5)#tc!QZn=W&xMJ7i}YKK?d)uoA;v9Be^nnEHDNaz>duwGrUh(%SOY5>TzN- zh8hwQJ0=v+BOrflZbSPCqj-z2PSo42rN;8SFp0k!oIl&vDm=b$#nNxXOZ2ZIDSP@5 z%Vi{qv(j#~^kLTdjlvvdJH3|;RwYF%P769?eCku#0;F%{;Nc_^!z_XTzF9IIOTP>C zk*Sm0`cr=h6=ib5T@%wYFnOzUAhKesYm=|2Tcr%{*>X-8t-Yqff8=ys<&dL3ZnUhE zMr_B<67{IN*qJ!=n7XU{*!-dS!)49y(Qjcw#wvS>r2}Z!pr-3*61&lYdRW%&&2@bb z`^>YCo0ux&g0+g(YFB*KNY&hR^Ixy&FsQcLqC0*A4`<8on3I+TDilr6ELr+JxHDpH zIDgVXBh3*Wn8H_u?x987TaAHt)Rfh9%XM? z8+#zB4}%V{Sx1FA%Ff~fC1Jg-qvKe0D(=D3Yk&hIUHQEzfilkNS{PL9`qFaZEH!J( zm0bF4sL7y}e1JQ`4C7>&te**>KhxeHMBi`^66#3@JV0KUv^1su)v`ff$?`$UKNmgYC=sSU@MDD9;RYIOuyVMNwg4(V2 zAfHe-slCW2)y--j@+mc|_9HKZAe4Zh?=O-?CZPI zp`0*iF6rMyCCRfrwUiWH&FSb5FnN&415AiK(u5OE{A#t)YDCrQ$5Bh=Bkzf1c#w`6 zT%FwmlFj#ekLV04)xL##7LuGopDpMNWzHJp`B`I`xh*wk)==)V?v~JTg}uC415#nY zs)@Bi36xxMP_t;opjdOW#wdrP(YkdfWE5NF zZ>Ci&#O_y&=WNI-Ru*r_#a=c(Y+yS6sqf0@K1TD0M$b!qFnS>yJ%`orjh)ZN#yh){ z@k9*dDBoV_k9|_cp2S#_Lt{^6V@(W=HIb)4b$TG}}(Vb39m(7j6 z8!v2B2J=-DnBjC(il;Gq6W|>1{31;fYS6*O#Uw{_QEe!FA6a0k$As0jO+dWqes!s> zPdC&RSAQ7Y(!PHEOrxoE(9$1a8^Nyr5R;EGp%NFJo|y9;VMeia$Mvt_MtN6~7tlja z*1v_8WJ2oV0))wwRO)AH>oEFZ#?V1)b1l#{w9N1U2j2$V@OZ7c8H8inG8-TSh;Fo7 zK(CDumLb`js=usxAaJG3ItJ$gU2lXKjwZHT(u>HlxARLOh$_Hf05#`;?C7pkCM2t- z!dOTv(k&|s%E@I@sZ3>stUvUzOi>1|^lM_rCwrgC(JE8DTKb~=(k2`M>6Z#Ri{tX_ zoP};%#T3E?B!=ljw|l0A%li*6R|FC+0t=V$KM2jgM@qPFlwNbJwBD<+?+oD4jMoHV+EJ zI91#-KW%iOI(E#@SsM-%DD@X`XDK)r=~_Kqo@&r~96;Nee`{i6&99j0B8sXLEmt^7 z@1@>eTR(P*<5LTJ(Dt3Xhh`SS62mdkRrVeBnR<_od@p&O-%Jt6{5>i=_`$M#cgg6G4wq$npz23`5R+*FG@`!bePyimX1Q){l-oZMbI<`emoE`|!V0ha` zNop>%4(uW{5^P`>I(o;17IfiXTismj#@>=>{EZUs`qCFVf6Jn8PcNSQL%RX%K;7kB z&of*~Sp%XP;h9M>JjSe%atydZV5TgGr70VFgNd+*B&3pVA$bnhfH{VjWk^%qh&4q2 zmGh#Kk{lTb3nE%X(7OI@zJZu7nI4QJ8bFiRlWD9R*{`Qw%ej<=Yq(kQQVpr=93)_O z8)`A}QeZzmkCW)|d0c&%SAw8hSm`3bZjs`=cWA{2Uo#qUA%l=MvflTjfb|hEgfxpc zAnOP_3~Yndf?c;VD-S0~YMGUXbL0XSxy1ihyv2bn8L}Y9F+SvXcczk4g__N7h4;La z8l1a+g2@&Wf?54Ild+}z9kg|Ei5@Yi^K8+8nI3dra|)gPHza~ANuBDf_akq(xP%Z* z6zB+~4w10^y8uPN;`9eIH-NR&4Jd+C2sjC;Jb(@x0^r5DjOBSZho>kUcE<1sQi0(? z!oHI*qI9Tm{#%CE=g=!9$@zY1l+S;F_5q&{E5Q*X{Q8{kr^dI#@#(6j{tCG~*)=@G zfW>u;n`J*;ND@;kBozRP%;0l1lhno$@uo|>#ExUDOQia3Tp=L={TO<3lfvGUWba{x zHvBE&|NnLUJCcPAa-DG#LiDiFFR`vYNJ4-js_Xi7UB>$>e(znWls<(vS@v&;R{~0F z%Zr>97m^BjhSLBO)pHv9?Y8p7p%~or;Y|3ew*CxESgz}C;bz)7m$oVnlY;~yzVKS(3;N)ZvPldw3Hnm z_#P5C_h`pEH5aw#9(~n3RW4{+eO+Nf!PAd2ImBe30O*ne@U)iN#3CF7HcA#P5SKdP(f%}Wrp zr{(?ukBER8gkHloXsqMN2h;x0Aj=1H#Uug=L%agJ0qc=QBoToG@VXzZE^!dIKbG-V z7F}%wfYbOSB9wFnTZbdSa{=c@H&3@)oKt`|-IBfz0z6q?;Z=80za4Kz2v+>P`%q48 z`A1kH0?XXI!Z=e1tq!x7sQ8KHpe1I34(PpXz5_{8%m_fmf;)JR@Xb5fK}`PrEZxZ3 zpTLcO#FfSW0e39y0f^UQ!RMK=d@u)MT~=q$xXq`7Hv)4>ECAB`_+~6^f>GypoyNfp7^4 zKZ9=?P)+{OpJqfzuS>II zDaFW@!k5nfh;nCs5#`FxjgVzZO8r@Ul)lJhOgwKImAYz*Isc8s*^fj*<#3=a{{PeS zh6prwJ%M_DeLBehA&J}pBMvjy2#G`$zZX+{qa^AX{&aSEn2i4giADz*ArY-Mk&Ea! zE*OLEkTG*L7tW_4L^vP_VZox>+}_ezYh2Pam&L?}1hk?A z?hlUy4-tBVgF@$XfYYt@Cu0vm`;(ArQ#LMgiwnYEubTH9AH=#~+-|~`rh3h5sA?EQ zni87WHKo#!zSzpg*o(5-1HgTaR`FWFS?IfpE3RLqAXy5VZ|6v6b&P3coc*N#LEI<) z_02ul?JWG}&g}JUnkm>31497NC;nbkKxo-Vz;3jDSZguU@T}m3VoS=(5FIvO;77sfJb~bVV?^F6@QVp-XBwRY@?=iI#DkX{ z1cVuq@}9yb@7oQ9K2LZjvyCg+xr62V7`{NT(I(mS+E<>x`r|LmJ-SJU{?y#dfBvP zG|nn3XqzYLODGu7VvGeyB*5Hbb#57Ej#hf_z>OOhfP|dAGT&-Qs|KCjW4k}CP~|9d*DVh zE=YVAGoHA9#yy=&xgS|o@8#_^17byC{;;X7=0W?P()xZIYH zqcRr!p9Od}Fh4gNTY2e0FDeh@ROr{ZRmv@rTwI9#cw)zf>{;-b9MW)WGS0`7t2w<7 zGMZN|M%;(Zk^;tnj4BUOKS??tr3Yo}pGFhCadC);E8r<_TXx@jBf`^D+1HClhj&ax zlkpU=SrPtOaW!|@3=44){rw#?np*X+(Q`p`NmA0m_iGqGdjc6jqHcW5)jh{tR z-Cews9UJ zFQ;c1;ug(7J(KjXO7H2gx)$Kv%|7x8wQ=f=_W`y39NV5@@&zVeWb#KyvMr8Ef3HEd z5&8xxF#}CGuh@J&6%97z+nc(MO^`L7;Ysp*Z@Sey%Rz}EG=&yt&9coDJPdF@#=W0F zmlNdPqbN{b8M&dQ@}ZcB(bG82jNB;z!xyuYJdRD3MJJEe4LE*8aitDc@6BPIcC7cy zqrgiP*UKo37!kPsRQDOEhau75;ch<+4SQYynD7|bQ;yW11sl?cO>MIsEX_~@`h8O& zNT^%{0CA8HJIFs%-cV0E9T>UyR+WQu=S4+`nTKO}KAAsbMg3*WH<|A3BV&(*`^x&O zZ1vLJW^5#%Uk|VcMW3rJwuWbHzJ@OP&zP{Y-eJPQj+dv$ZPsDlJILf#COoU7=a`H# zUQ$xQQVm-Z(7Wljww%RE_om6Ry)Z$S&&pecwZzW-DcTZ8nEiGXD>*>Wh%tjP{(2TS z&yv61h64R-obv$3%pfPbx2y5Mm z0+te_U(IW5Wvl>oAcfZmY(}>v+X8oLvD0N{@%j8I^@`B9Ewh8;>!NARkPbfW<<#7@FJHL&^mkr)=J~6C z`NuDQ|AlK;UH}q$`CH$6@o%4h>6stA^7O@PKYsqDFMjI9AAa}h@BPWur@ztfEI12j z=K&0pDo0Uqf{p~^+`}95NV3f@eTEHBFd5^BD%yA+Av^cPQ!WMYz#g-6>kuxu@dh!@ zD&e$O^hz5YrbM-Q?h%v3bEQGj37d-?!ha|aV@5IM>wXSw7Ulg#Bt mgu+qTbPbPzF^rH3EY%79)TU(NR1{MDZBWiFO!)=m#s30!a$(*8 diff --git a/src/tree.py b/src/tree.py deleted file mode 100644 index 0c58d32..0000000 --- a/src/tree.py +++ /dev/null @@ -1,425 +0,0 @@ -import json -import logging -import os -import re -import subprocess -import sys -import uuid -import re - -id_set = set() -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -handler = logging.StreamHandler(sys.stdout) -formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') -handler.setFormatter(formatter) -logger.addHandler(handler) - - -def search_author(author_dict, username): - for key in author_dict: - names = author_dict[key] - if username in names: - return key - return username - - -def user_name(md_file, author_dict): - ret = subprocess.Popen([ - "git", "log", md_file - ], stdout=subprocess.PIPE) - lines = list(map(lambda l: l.decode(), ret.stdout.readlines())) - author_lines = [] - for line in lines: - if line.startswith('Author'): - author_lines.append(line.split(' ')[1]) - author_nick_name = author_lines[-1] - return search_author(author_dict, author_nick_name) - - -def load_json(p): - with open(p, 'r', encoding="utf-8") as f: - return json.loads(f.read()) - - -def dump_json(p, j, exist_ok=False, override=False): - if os.path.exists(p): - if exist_ok: - if not override: - return - else: - logger.error(f"{p} already exist") - sys.exit(0) - - with open(p, 'w+', encoding="utf8") as f: - f.write(json.dumps(j, indent=2, ensure_ascii=False)) - - -def ensure_config(path): - config_path = os.path.join(path, "config.json") - if not os.path.exists(config_path): - node = {"keywords": []} - dump_json(config_path, node, exist_ok=True, override=False) - return node - else: - return load_json(config_path) - - -def parse_no_name(d): - p = r'(\d+)\.(.*)' - m = re.search(p, d) - - try: - no = int(m.group(1)) - dir_name = m.group(2) - except: - sys.exit(0) - - return no, dir_name - - -def check_export(base, cfg): - flag = False - exports = [] - for export in cfg.get('export', []): - ecfg_path = os.path.join(base, export) - if os.path.exists(ecfg_path): - exports.append(export) - else: - flag = True - if flag: - cfg["export"] = exports - return flag - - -class TreeWalker: - def __init__( - self, root, - tree_name, - title=None, - log=None, - authors=None, - enable_notebook=None, - ignore_keywords=False - ): - self.ignore_keywords = ignore_keywords - self.authors = authors if authors else {} - self.enable_notebook = enable_notebook - self.name = tree_name - self.root = root - self.title = tree_name if title is None else title - self.tree = {} - self.logger = logger if log is None else log - - def walk(self): - root = self.load_root() - root_node = { - "node_id": root["node_id"], - "keywords": root["keywords"], - "children": [], - "keywords_must": root["keywords_must"], - "keywords_forbid": root["keywords_forbid"] - } - self.tree[root["tree_name"]] = root_node - self.load_levels(root_node) - self.load_chapters(self.root, root_node) - for index, level in enumerate(root_node["children"]): - level_title = list(level.keys())[0] - level_node = list(level.values())[0] - level_path = os.path.join(self.root, f"{index + 1}.{level_title}") - self.load_chapters(level_path, level_node) - for index, chapter in enumerate(level_node["children"]): - chapter_title = list(chapter.keys())[0] - chapter_node = list(chapter.values())[0] - chapter_path = os.path.join( - level_path, f"{index + 1}.{chapter_title}") - self.load_sections(chapter_path, chapter_node) - for index, section_node in enumerate(chapter_node["children"]): - section_title = list(section_node.keys())[0] - full_path = os.path.join( - chapter_path, f"{index + 1}.{section_title}") - if os.path.isdir(full_path): - self.check_section_keywords(full_path) - self.ensure_exercises(full_path) - - tree_path = os.path.join(self.root, "tree.json") - dump_json(tree_path, self.tree, exist_ok=True, override=True) - return self.tree - - def sort_dir_list(self, dirs): - result = [self.extract_node_env(dir) for dir in dirs] - result.sort(key=lambda item: item[0]) - return result - - def load_levels(self, root_node): - levels = [] - for level in os.listdir(self.root): - if not os.path.isdir(level): - continue - level_path = os.path.join(self.root, level) - num, config = self.load_level_node(level_path) - levels.append((num, config)) - - levels = self.resort_children(self.root, levels) - root_node["children"] = [item[1] for item in levels] - return root_node - - def load_level_node(self, level_path): - config = self.ensure_level_config(level_path) - num, name = self.extract_node_env(level_path) - - result = { - name: { - "node_id": config["node_id"], - "keywords": config["keywords"], - "children": [], - "keywords_must": config["keywords_must"], - "keywords_forbid": config["keywords_forbid"] - } - } - - return num, result - - def load_chapters(self, base, level_node): - chapters = [] - for name in os.listdir(base): - full_name = os.path.join(base, name) - if os.path.isdir(full_name): - num, chapter = self.load_chapter_node(full_name) - chapters.append((num, chapter)) - - chapters = self.resort_children(base, chapters) - level_node["children"] = [item[1] for item in chapters] - return level_node - - def load_sections(self, base, chapter_node): - sections = [] - for name in os.listdir(base): - full_name = os.path.join(base, name) - if os.path.isdir(full_name): - num, section = self.load_section_node(full_name) - sections.append((num, section)) - - sections = self.resort_children(base, sections) - chapter_node["children"] = [item[1] for item in sections] - return chapter_node - - def resort_children(self, base, children): - children.sort(key=lambda item: item[0]) - for index, [number, element] in enumerate(children): - title = list(element.keys())[0] - origin = os.path.join(base, f"{number}.{title}") - posted = os.path.join(base, f"{index + 1}.{title}") - if origin != posted: - self.logger.info(f"rename [{origin}] to [{posted}]") - os.rename(origin, posted) - return children - - def ensure_chapters(self): - for subdir in os.listdir(self.root): - self.ensure_level_config(subdir) - - def load_root(self): - config_path = os.path.join(self.root, "config.json") - if not os.path.exists(config_path): - config = { - "tree_name": self.name, - "keywords": [], - "node_id": self.gen_node_id(), - "keywords_must": [], - "keywords_forbid": [] - } - dump_json(config_path, config, exist_ok=True, override=True) - else: - config = load_json(config_path) - flag, result = self.ensure_node_id(config) - if flag: - dump_json(config_path, result, exist_ok=True, override=True) - - return config - - def ensure_level_config(self, path): - config_path = os.path.join(path, "config.json") - if not os.path.exists(config_path): - config = { - "node_id": self.gen_node_id() - } - dump_json(config_path, config, exist_ok=True, override=True) - else: - config = load_json(config_path) - flag, result = self.ensure_node_id(config) - if flag: - dump_json(config_path, config, exist_ok=True, override=True) - return config - - def ensure_chapter_config(self, path): - config_path = os.path.join(path, "config.json") - if not os.path.exists(config_path): - config = { - "node_id": self.gen_node_id(), - "keywords": [], - "keywords_must": [], - "keywords_forbid": [] - } - dump_json(config_path, config, exist_ok=True, override=True) - else: - config = load_json(config_path) - flag, result = self.ensure_node_id(config) - if flag: - dump_json(config_path, config, exist_ok=True, override=True) - return config - - def ensure_section_config(self, path): - config_path = os.path.join(path, "config.json") - if not os.path.exists(config_path): - config = { - "node_id": self.gen_node_id(), - "keywords": [], - "children": [], - "export": [] - } - dump_json(config_path, config, exist_ok=True, override=True) - else: - config = load_json(config_path) - flag, result = self.ensure_node_id(config) - if flag: - dump_json(config_path, result, exist_ok=True, override=True) - return config - - def ensure_node_id(self, config): - flag = False - if "node_id" not in config or \ - not config["node_id"].startswith(f"{self.name}-") or \ - config["node_id"] in id_set: - new_id = self.gen_node_id() - id_set.add(new_id) - config["node_id"] = new_id - flag = True - - for child in config.get("children", []): - child_node = list(child.values())[0] - f, _ = self.ensure_node_id(child_node) - flag = flag or f - - return flag, config - - def gen_node_id(self): - return f"{self.name}-{uuid.uuid4().hex}" - - def extract_node_env(self, path): - try: - _, dir = os.path.split(path) - self.logger.info(path) - number, title = dir.split(".", 1) - return int(number), title - except Exception as error: - self.logger.error(f"目录 [{path}] 解析失败,结构不合法,可能是缺少序号") - # sys.exit(1) - raise error - - def load_chapter_node(self, full_name): - config = self.ensure_chapter_config(full_name) - num, name = self.extract_node_env(full_name) - result = { - name: { - "node_id": config["node_id"], - "keywords": config["keywords"], - "children": [], - "keywords_must": config["keywords_must"], - "keywords_forbid": config["keywords_forbid"] - } - } - return num, result - - def load_section_node(self, full_name): - config = self.ensure_section_config(full_name) - num, name = self.extract_node_env(full_name) - result = { - name: { - "node_id": config["node_id"], - "keywords": config["keywords"], - "children": config.get("children", []), - "keywords_must": config["keywords_must"], - "keywords_forbid": config["keywords_forbid"] - } - } - # if "children" in config: - # result["children"] = config["children"] - return num, result - - def ensure_exercises(self, section_path): - config = self.ensure_section_config(section_path) - flag = False - for e in os.listdir(section_path): - base, ext = os.path.splitext(e) - _, source = os.path.split(e) - if ext != ".md": - continue - mfile = base + ".json" - meta_path = os.path.join(section_path, mfile) - md_file = os.path.join(section_path, e) - self.ensure_exercises_meta(meta_path, source, md_file) - export = config.get("export", []) - if mfile not in export and self.name != "algorithm": - export.append(mfile) - flag = True - config["export"] = export - - if flag: - dump_json(os.path.join(section_path, "config.json"), - config, True, True) - - for e in config.get("export", []): - full_name = os.path.join(section_path, e) - exercise = load_json(full_name) - if "exercise_id" not in exercise or exercise.get("exercise_id") in id_set: - eid = uuid.uuid4().hex - exercise["exercise_id"] = eid - dump_json(full_name, exercise, True, True) - else: - id_set.add(exercise["exercise_id"]) - - def ensure_exercises_meta(self, meta_path, source, md_file): - _, mfile = os.path.split(meta_path) - meta = None - if os.path.exists(meta_path): - with open(meta_path) as f: - content = f.read() - if content: - meta = json.loads(content) - if "exercise_id" not in meta: - meta["exercise_id"] = uuid.uuid4().hex - if "notebook_enable" not in meta: - meta["notebook_enable"] = self.default_notebook() - if "source" not in meta: - meta["source"] = source - if "author" not in meta: - meta["author"] = user_name(md_file, self.authors) - if "type" not in meta: - meta["type"] = "code_options" - - if meta is None: - meta = { - "type": "code_options", - "author": user_name(md_file, self.authors), - "source": source, - "notebook_enable": self.default_notebook(), - "exercise_id": uuid.uuid4().hex - } - dump_json(meta_path, meta, True, True) - - def default_notebook(self): - if self.enable_notebook is not None: - return self.enable_notebook - if self.name in ["python", "java", "c"]: - return True - else: - return False - - def check_section_keywords(self, full_path): - if self.ignore_keywords: - return - config = self.ensure_section_config(full_path) - if not config.get("keywords", []): - self.logger.error(f"节点 [{full_path}] 的关键字为空,请修改配置文件写入关键字") - sys.exit(1) -- GitLab