-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_extract_landmarks.py
156 lines (124 loc) · 4.99 KB
/
run_extract_landmarks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import json
import os
import argparse
from llm.query_llm import OpenAI_LLM
from vln.utils import load_dataset
parser = argparse.ArgumentParser(description='Define experiment parameters')
parser.add_argument('--datasets_dir', default='./datasets', type=str)
parser.add_argument('--dataset_name', default='map2seq', type=str)
parser.add_argument('--split', default='dev', type=str)
parser.add_argument('--scenario', default='unseen', type=str)
parser.add_argument('--exp_name', default='5shot', type=str) # is used to name output file
parser.add_argument('--model_name', default='openai/text-davinci-003', type=str)
parser.add_argument('--api_key', default='', type=str) # OpenAI API key
parser.add_argument('--num_instances', default=-1, type=int) # -1 for all instances
parser.add_argument('--max_tokens', default=1024, type=int) # api parameter
parser.add_argument('--prompt_file', default='5shot.txt', type=str) # filename in llm/prompts/{dataset_name}/landmarks/
parser.add_argument('--output_dir', default='./outputs', type=str)
opts = parser.parse_args()
# settings
exp_name = opts.exp_name
split = opts.split
scenario = opts.scenario
dataset_name = opts.dataset_name
max_tokens = opts.max_tokens
model = tuple(opts.model_name.split('/'))
prompt_file = opts.prompt_file
num_instances = opts.num_instances
api_key = opts.api_key
def main():
dataset_dir = os.path.join(opts.datasets_dir, dataset_name + '_' + scenario)
output_dir = os.path.join(opts.output_dir, dataset_name, 'landmarks', model[1], exp_name)
os.makedirs(output_dir, exist_ok=True)
results_file = os.path.join(output_dir, f'{exp_name}_unfiltered.json')
llm = OpenAI_LLM(max_tokens=opts.max_tokens,
model_name=model[-1],
api_key=opts.api_key,
cache_name='landmarks',
finish_reasons=['stop', 'length'])
prompt_template = get_prompt_template(dataset_name, prompt_file)
results = dict()
results['model'] = model
results['prompt_template'] = prompt_template
results['max_tokens'] = max_tokens
results['instances'] = dict()
if os.path.isfile(results_file):
with open(results_file) as f:
results = json.load(f)
print('results so far: ', len(results['instances']))
data = load_dataset(split, dataset_dir)
if num_instances > 0:
data = data[:num_instances]
for i, instance in enumerate(data):
print('results so far: ', len(results['instances']))
if dataset_name == 'touchdown':
instance['id'] = instance['route_id']
idx = str(instance['id'])
if idx in results['instances']:
print('skip')
continue
print(i, 'number of instances processed')
print('idx', instance['id'])
try:
result = get_landmarks(instance, llm, prompt_template)
except KeyboardInterrupt:
llm.save_cache()
exit()
idx = str(result['id'])
results['instances'][idx] = result
print(len(results['instances']))
llm.save_cache()
with open(results_file, 'w') as f:
json.dump(results, f, indent=1)
print('queried_tokens', llm.queried_tokens)
print('wrote results to: ', results_file)
def get_prompt_template(dataset_name, prompt_file):
with open(os.path.join('llm', 'prompts', dataset_name, 'landmarks', prompt_file)) as f:
prompt_template = ''
for line in f:
prompt_template += line
return prompt_template
def get_landmarks(instance, llm, prompt_template):
if dataset_name == 'map2seq':
idx = instance["id"]
instance_idx = 'map2seq_' + str(instance["id"])
instructions_id = instance['instructions_id']
else:
idx = instance["route_id"]
instance_idx = 'touchdown_' + str(instance["route_id"])
instructions_id = instance["route_id"]
instructions = instance['navigation_text']
prompt = prompt_template.format(instructions)
print('instance_idx', instance_idx)
print('instructions', instructions)
sequence = llm.get_sequence(prompt, instance_idx)
output = sequence.split(prompt)[1]
unfiltered = get_unfiltered(output)
print('output', output)
print('unfiltered', unfiltered)
result = dict(id=idx,
instructions_id=instructions_id,
instructions=instructions,
unfiltered=unfiltered)
print('queried_tokens', llm.queried_tokens)
print('')
return result
def get_unfiltered(sequence):
sequence = sequence.split('\n')[1:]
elements = list()
if len(sequence) == 1 and sequence[0] == None:
return [None]
expected_num = 1
for element in sequence:
num = element.split('.')[0]
try:
num = int(num)
except ValueError:
pass
if num == expected_num:
element = ' '.join(element.split(str(num) + '.')[1:]).strip()
elements.append(element)
expected_num += 1
return elements
if __name__ == '__main__':
main()