-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain_tplclosest.py
135 lines (123 loc) · 6.6 KB
/
main_tplclosest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from tqdm import tqdm
import Algorithms
import main_rarestfirst
import utilities
def main_run(algori):
import networkx as nx
year = "2015"
# for network in ["db"]:
results = main_rarestfirst.Results()
networks = ["dblp"]
# networks = ["vldb", "sigmod", "icde", "icdt", "edbt", "pods", "www", "kdd", "sdm", "pkdd", "icdm", "icml",
# "ecml", "colt", "uai", "soda", "focs", "stoc", "stacs", "db", "dm", "ai", "th", "dblp"]
# , "sigmod", "icde", "icdt", "edbt", "pods"
cs = set()
for network in tqdm(networks):
# vldb = nx.read_gml("../dblp-" + year + "/vldb.gml")
graph = nx.read_gml("../dblp-" + year + "/" + network + ".gml")
skill_freq = dict()
total = 0
skill_experts = utilities.get_skill_experts_dict(graph)
for skill in skill_experts:
if len(skill_experts[skill]) in skill_freq: # skill with same number of experts
skill_freq[len(skill_experts[skill])] += 1
else:
skill_freq[len(skill_experts[skill])] = 1
total += len(skill_experts[skill])
experts_per_skill = round(total / len(skill_experts), 2)
for skill in skill_experts:
if len(skill_experts[skill]) >= experts_per_skill:
cs.add(skill)
else:
if len(skill_experts[skill]) <= 3:
# rare_skills.add(skill)
pass
else:
cs.add(skill) # rare skills
print(network)
graph = nx.read_gml("../dblp-" + year + "/" + network + ".gml")
# skills_name_id_dict = dict()
# with open("../dblp-" + year + "/" + network + "-titles.txt") as file:
runs = 10
tot_tasks = 170
open("../dblp-" + year + "/" + network + "-" + str(tot_tasks) + "-0-" + algori + "-results2.txt", "w").close()
heading = results.get_heading()
open("../dblp-" + year + "/" + network + "-" + str(tot_tasks) + "-0-" + algori + "-results2.txt", "a").write(
heading + "\n")
open("../dblp-" + year + "/" + network + "-" + str(tot_tasks) + "-0-" + algori + "-teams2.txt", "w").close()
with open("../dblp-" + year + "/" + network + "-" + str(tot_tasks) + "-0.txt", "r") as file:
n_lines = utilities.get_num_lines("../dblp-" + year +"/" + network + "-" + str(tot_tasks) + "-0.txt")
crun = 0 # cu
for line in tqdm(file, total=n_lines):
crun += 1
# task = dblp_data.get_task_from_title_graph(graph, line.strip("\n").split("\t")[1])
task = line.strip("\n").split()
# print(task)
if len(set(task).intersection(cs)) < len(task):
# print(task)
continue
record = ""
start_time = time.time()
team = Algorithms.tfs(graph, task, 2, 2)
end_time = time.time()
tg = team.get_team_graph(graph)
# show_graph(tg)
results.task_size += len(task)
results.tot_time += end_time - start_time
results.cardinality += team.cardinality()
# results.radius += team.radius(tg)
results.radius += 0
results.diameter += team.diameter(tg)
results.leader_distance += team.leader_distance(tg)
results.leader_skill_distance += team.leader_skill_distance(tg, task)
results.sum_distance += team.sum_distance(tg, task)
# results.shannon_task_diversity += team.shannon_task_diversity(graph)
# results.shannon_team_diversity += team.shannon_team_diversity(graph)
# results.simpson_task_diversity += team.simpson_diversity(graph, False) # task diversity
# results.simpson_team_diversity += team.simpson_diversity(graph, True)
# results.gini_simpson_task_diversity += team.gini_simpson_diversity(graph, False) # task diversity
# results.gini_simpson_team_diversity += team.gini_simpson_diversity(graph, True)
open("../dblp-" + year + "/" + network + "-" + str(tot_tasks) + "-0-" + algori +
"-teams2.txt", "a").write(",".join(sorted(team.experts)) + "\n")
if crun % runs == 0:
record += str(results.task_size / runs)
record += "\t" + str(round(results.tot_time / runs, 3))
record += "\t" + str(results.cardinality / runs)
record += "\t" + str(results.radius / runs)
record += "\t" + str(results.diameter / runs)
record += "\t" + str(results.leader_distance / runs)
record += "\t" + str(results.leader_skill_distance / runs)
record += "\t" + str(results.sum_distance / runs)
# record += "\t" + str(results.shannon_task_diversity / runs)
# record += "\t" + str(results.shannon_team_diversity / runs)
# # record += "\t" + str(team.simpson_task_density(graph))
# # record += "\t" + str(team.simpson_team_density(graph))
# record += "\t" + str(results.simpson_task_diversity / runs) # task diversity
# record += "\t" + str(results.simpson_team_diversity / runs)
# record += "\t" + str(results.gini_simpson_task_diversity / runs) # task diversity
# record += "\t" + str(results.gini_simpson_team_diversity / runs)
open("../dblp-" + year + "/" + network + "-" + str(tot_tasks) + "-0-" + algori + "-results2.txt",
"a").write(
record + "\n")
results.clean_it()
def multiprocessing_func(algo):
main_run(algo)
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
import time
begin_time = time.time()
main_run("tfs")
# processes = []
# for alg in ["rfs"]:
# p = multiprocessing.Process(target=multiprocessing_func, args=(alg,))
# processes.append(p)
# p.start()
# for process in processes:
# process.join()
tqdm.write('Time taken = {} seconds'.format(time.time() - begin_time))