-
Notifications
You must be signed in to change notification settings - Fork 93
/
tokenize_chinese.py
32 lines (27 loc) · 1.21 KB
/
tokenize_chinese.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
"""Chinese text tokenization using jieba package - https://github.com/fxsjy/jieba"""
from typing import Union, List
from h2oaicore.data import CustomData
import datatable as dt
import numpy as np
import pandas as pd
cols_to_tokenize = []
_global_modules_needed_by_name = ["jieba==0.42.1"]
class TokenizeChiense(CustomData):
@staticmethod
def create_data(X: dt.Frame = None) -> Union[str, List[str],
dt.Frame, List[dt.Frame],
np.ndarray, List[np.ndarray],
pd.DataFrame, List[pd.DataFrame]]:
# exit gracefully if method is called as a data upload rather than data modify
if X is None:
return []
# Tokenize the chinese text
import jieba
X = dt.Frame(X).to_pandas()
# If no columns to tokenize, use the first column
if len(cols_to_tokenize) == 0:
cols_to_tokenize.append(X.columns[0])
for col in cols_to_tokenize:
X[col] = X[col].astype('unicode').fillna(u'NA')
X[col] = X[col].apply(lambda x: " ".join([r[0] for r in jieba.tokenize(x)]))
return dt.Frame(X)