Skip to content

Commit

Permalink
example text changed
Browse files Browse the repository at this point in the history
  • Loading branch information
KoichiYasuoka committed Jul 4, 2024
1 parent 469bacc commit ef5c03d
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
6 changes: 3 additions & 3 deletions demo/2024-07-05/ja-gpt2-upos.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
"!pip install transformers deplacy\n",
"from transformers import pipeline\n",
"nlp=pipeline(\"token-classification\",\"KoichiYasuoka/gpt2-small-japanese-upos\")\n",
"txt=\"国境の長いトンネルを抜けると雪国であった\"\n",
"txt=\"どこの村でも当然だった\"\n",
"doc=\"\\n\".join(\"\\t\".join([str(i+1),txt[t[\"start\"]:t[\"end\"]],\"_\",t[\"entity\"].split(\"|\")[0]]+[\"_\"]*5+[\"SpaceAfter=No\"]) for i,t in enumerate(nlp(txt)))+\"\\n\\n\"\n",
"import deplacy\n",
"deplacy.serve(doc,port=None)"
Expand All @@ -38,7 +38,7 @@
"metadata":{ "colab_type":"code" },
"source": [
"nlp=pipeline(\"upos\",\"KoichiYasuoka/gpt2-small-japanese-upos\",trust_remote_code=True)\n",
"txt=\"国境の長いトンネルを抜けると雪国であった\"\n",
"txt=\"どこの村でも当然だった\"\n",
"doc=\"\\n\".join(\"\\t\".join([str(i+1),txt[t[\"start\"]:t[\"end\"]],\"_\",t[\"entity\"].split(\"|\")[0]]+[\"_\"]*5+[\"SpaceAfter=No\"]) for i,t in enumerate(nlp(txt)))+\"\\n\\n\"\n",
"import deplacy\n",
"deplacy.serve(doc,port=None)"
Expand All @@ -58,7 +58,7 @@
"!pip install esupar\n",
"import esupar\n",
"nlp=esupar.load(\"KoichiYasuoka/gpt2-small-japanese-upos\")\n",
"txt=\"国境の長いトンネルを抜けると雪国であった\"\n",
"txt=\"どこの村でも当然だった\"\n",
"doc=nlp(txt)\n",
"import deplacy\n",
"deplacy.serve(doc,port=None)"
Expand Down
6 changes: 3 additions & 3 deletions demo/2024-07-05/ja-swallow-upos.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
" def check_model_type(self,supported_models):\n",
" pass\n",
"nlp=TCP(model=mdl,tokenizer=tkz)\n",
"txt=\"どこの村でも作っていた\"\n",
"txt=\"どこの村でも当然だった\"\n",
"doc=\"\\n\".join(\"\\t\".join([str(i+1),txt[t[\"start\"]:t[\"end\"]],\"_\",t[\"entity\"].split(\"|\")[0]]+[\"_\"]*5+[\"SpaceAfter=No\"]) for i,t in enumerate(nlp(txt)))+\"\\n\\n\"\n",
"import deplacy\n",
"deplacy.serve(doc,port=None)"
Expand All @@ -45,7 +45,7 @@
"source": [
"tkz=AutoTokenizer.from_pretrained(\"KoichiYasuoka/Swallow-MS-7b-char-upos\")\n",
"nlp=TCP(model=mdl,tokenizer=tkz)\n",
"txt=\"どこの村でも作っていた\"\n",
"txt=\"どこの村でも当然だった\"\n",
"doc=\"\\n\".join(\"\\t\".join([str(i+1),txt[t[\"start\"]:t[\"end\"]],\"_\",t[\"entity\"].split(\"|\")[0]]+[\"_\"]*5+[\"SpaceAfter=No\"]) for i,t in enumerate(nlp(txt)))+\"\\n\\n\"\n",
"deplacy.serve(doc,port=None)"
]
Expand Down Expand Up @@ -100,7 +100,7 @@
" t[\"text\"]=model_outputs[\"sentence\"][t[\"start\"]:t[\"end\"]]\n",
" return w\n",
"nlp=BFP(model=mdl,tokenizer=tkz)\n",
"txt=\"どこの村でも作っていた\"\n",
"txt=\"どこの村でも当然だった\"\n",
"doc=\"\\n\".join(\"\\t\".join([str(i+1),txt[t[\"start\"]:t[\"end\"]],\"_\",t[\"entity\"].split(\"|\")[0]]+[\"_\"]*5+[\"SpaceAfter=No\"]) for i,t in enumerate(nlp(txt)))+\"\\n\\n\"\n",
"deplacy.serve(doc,port=None)"
]
Expand Down

0 comments on commit ef5c03d

Please sign in to comment.