-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmultiprocess_skill_extraction.py
56 lines (49 loc) · 2.06 KB
/
multiprocess_skill_extraction.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import pandas as pd
import numpy as np
# skill extraction modules
import spacy
from spacy.matcher import PhraseMatcher
from skillNer.general_params import SKILL_DB
from skillNer.skill_extractor_class import SkillExtractor
# remove warnings
import warnings
warnings.filterwarnings('ignore')
# parallel processing
from multiprocessing import Pool
# init params of skill extractor
nlp = spacy.load("en_core_web_lg")
# init skill extractor
skill_extractor = SkillExtractor(nlp, SKILL_DB, PhraseMatcher)
def skill_extraction(df):
skills_df = pd.DataFrame(columns=['skill_id', 'doc_node_value'])
skills_df_ngram = pd.DataFrame(columns=['skill_id', 'doc_node_value'])
for i in range(len(df)):
print("Progress: [{0:50s}] {1:.1f}%".format('#' * int(i / len(df) * 50), i / len(df) * 100), end='\r')
try:
annotations = skill_extractor.annotate(df['Descriptions'][i])
skills_df_sample = pd.DataFrame(annotations['results']['full_matches'], columns=['skill_id', 'doc_node_value'])
skills_df = skills_df.append(skills_df_sample)
skills_df_ngram_sample = pd.DataFrame(annotations['results']['ngram_scored'], columns=['skill_id', 'doc_node_value'])
skills_df_ngram = skills_df.append(skills_df_ngram_sample)
except:
pass
return skills_df, skills_df_ngram
# run function in parallel
if __name__ == '__main__':
# load data
df = pd.read_csv('webscraping_results_assignment3.csv')
pool_size = 12
# split data into chunks
full_match_df = pd.DataFrame(columns=['skill_id', 'doc_node_value'])
ngram_df = pd.DataFrame(columns=['skill_id', 'doc_node_value'])
df_split = np.array_split(df, pool_size)
# run function in parallel
with Pool(pool_size) as p:
results = p.map(skill_extraction, df_split)
# combine results
for result in results:
full_match_df = full_match_df.append(result[0])
ngram_df = ngram_df.append(result[1])
# save results
full_match_df.to_csv('full_match_df.csv', index=False)
ngram_df.to_csv('ngram_df.csv', index=False)