-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpicogen.py
559 lines (502 loc) · 25.4 KB
/
picogen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
#!/usr/bin/env python3
# Copyright (c) 2020-2021
# Marián Mižik <[email protected]>, Martin Hlavňa <[email protected]>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import glob
import argparse
import shutil
import re
import json
import unidecode
from enum import Enum
from datetime import datetime, timezone
class Log:
""" Colored print functions for strings using universal ANSI escape seq """
@staticmethod
def err(message):
sys.stderr.write('\x1b[1;31m' + message.strip() + '\x1b[0m' + '\n')
@staticmethod
def ok(message):
sys.stdout.write('\x1b[1;32m' + message.strip() + '\x1b[0m' + '\n')
@staticmethod
def warn(message):
sys.stderr.write('\x1b[1;33m' + message.strip() + '\x1b[0m' + '\n')
@staticmethod
def info(message):
sys.stdout.write('\x1b[1;0m' + message.strip() + '\x1b[0m' + '\n')
class Protocol(Enum):
""" enum with supported output protocols and helper methods """
HTTP = 'http'
GEMINI = 'gemini'
@staticmethod
def from_name(name):
if name == 'http':
return Protocol.HTTP
elif name == 'gemini':
return Protocol.GEMINI
else:
Log.err(f'Protocol name detection failed: Unknown protocol: {name}')
def file_suffix(self):
if self is Protocol.HTTP:
return 'html'
elif self is Protocol.GEMINI:
return 'gmi'
else:
Log.err(f'File suffix detection failed: Unknown protocol: {self}')
def scheme(self, is_ssl):
if self is Protocol.HTTP:
return 'https' if is_ssl else 'http'
elif self is Protocol.GEMINI:
return 'gemini'
else:
Log.err(f'Scheme detection failed: Unknown protocol: {self}')
def normalize_string(value):
""" normalize string - remove any accents, replace spaces with hyphen and transform to lowercase """
return unidecode.unidecode(value).lower().replace(" ", "-")
def read_file(path):
""" read file 'path' and return file content """
with open(path, 'r') as f:
return f.read()
def write_to_file(path, value):
""" create dir structure if needed and write 'value' to file 'path' """
basedir = os.path.dirname(path)
if not os.path.isdir(basedir):
os.makedirs(basedir)
with open(path, 'w') as f:
f.write(value)
def parse_trailer(name, body, body_format):
""" return the first paragraph from the post 'body' (to show in the indexes) """
if body_format is Protocol.GEMINI:
# for gemini, read until we find chars and then until blank line occurs
result = ''
for line in body.splitlines():
if result and not line:
return result
elif not line:
continue
else:
result = "\n".join((result, line))
elif body_format is Protocol.HTTP:
# for html, get the first <p> tag value
try:
from bs4 import BeautifulSoup
return BeautifulSoup(body, 'html.parser').find_all('p')[0].text
except (ImportError, IndexError) as e:
Log.err(f'Summarize failed for {name}. Reason: {e}')
return ''
else:
Log.err(f"Summarize failed for {name}. Reason: Unknown body format: {body_format}")
def fill(string_with_placeholders, **variables):
""" find {{ x }} placeholders in the string source and replace them with variables """
return re.sub(r'{{\s*([^}\s]+)\s*}}',
lambda m: str(variables.get(m.group(1), m.group(0))), string_with_placeholders)
def assemble_file_descriptor(path, cfg):
""" assemble and return file descriptor dictionary for the supplied 'path' """
descriptor = dict()
# parse all annotations from the file beginning and add them to descriptor
f = read_file(path)
for m in re.finditer(r'\s*<!--\s*(.+?)\s*:\s*(.+?)\s*-->\s*|.+', f):
if not m.group(1):
break
descriptor[m.group(1)] = m.group(2)
# the rest is document body
descriptor['body'] = f.split("\n", len(descriptor))[len(descriptor)]
# add file name and extension
descriptor['file_name'] = os.path.basename(path).split('.')[0]
descriptor['file_ext'] = os.path.basename(path).split('.')[1]
# add creation date if not declared and its rfc3339 variant (for atom feed)
descriptor['date'] = descriptor['date'] if 'date' in descriptor else '1970-01-01'
date_object = datetime.strptime(descriptor['date'], '%Y-%m-%d').astimezone()
if 'custom_date_format' in cfg:
descriptor['formatted_date'] = date_object.strftime(cfg['custom_date_format'])
descriptor['rfc3339_date'] = date_object.isoformat()
return descriptor
def convert(descriptor, protocol):
""" convert body to the protocol format if current format is markdown"""
if descriptor['file_ext'] in ['markdown', 'md']:
name = f"{descriptor['file_name']}.{descriptor['file_ext']}"
if protocol == Protocol.HTTP:
import commonmark
try:
descriptor['body'] = commonmark.commonmark(descriptor['body'])
except ImportError as e:
Log.err(f"Convert md => html for {name} failed. {e}")
elif protocol == Protocol.GEMINI:
import md2gemini
try:
descriptor['body'] = md2gemini.md2gemini(descriptor['body'], links='newline')
except ImportError as e:
Log.err(f"Convert md => gemini for {name} failed. {e}")
else:
suffix = protocol.file_suffix()
Log.err(f"Convert md => {suffix} for {name} failed: Unsupported format.")
return descriptor
def fill_taxonomy_value_post_index(protocol, config, t_value, t_cfg, variables, templates, i_cfg, descriptors):
""" fill taxonomy value posts index (tvpi), made of all values of specified taxonomy from files in content dir """
t_value_variables = {
'taxonomy_value': t_value,
'taxonomy_value_lower': t_value.lower(),
'taxonomy_value_normalized': normalize_string(t_value),
'title': f"{t_cfg['title']} {t_value}",
}
item_outputs = []
# sort and limit taxonomy values
order_by = i_cfg['order_by'] if 'order_by' in i_cfg else 'date'
reverse = False if 'order_direction' in i_cfg and i_cfg['order_direction'] == 'asc' else True
descriptors = sorted(descriptors, reverse=reverse, key=lambda descriptor: descriptor[order_by])
limit = int(i_cfg['limit']) if 'limit' in i_cfg else len(descriptors)
# fill, summarize and save index item templates
for d in descriptors[0:limit]:
v = {**config, **variables, **t_value_variables, **d}
v['body'] = fill(d['body'], **v)
name = f"{d['file_name']}.{d['file_ext']}"
v['summary'] = parse_trailer(name, v['body'], protocol)
item_outputs.append(fill(templates[i_cfg['item_template']], **v))
# merge index item outputs
t_value_variables['body'] = ''.join(item_outputs)
# fill user defined custom_variables
v = {**config, **variables, **t_value_variables}
if 'custom_variables' in i_cfg:
for variable in i_cfg['custom_variables']:
t_value_variables[variable] = fill(i_cfg['custom_variables'][variable], **v)
v = {**config, **variables, **t_value_variables}
return fill(templates[i_cfg['template']], **v)
def fill_taxonomy_value_index(config, t_cfg, templates, i_config, descriptors_by_taxonomy,
dynamic_vars, output_variables):
""" fill taxonomy value posts index (tvi), made of all posts which define specified taxonomy and value """
# taxonomy specific output variables to use in template
t_id = t_cfg['id']
t_values = list(descriptors_by_taxonomy[t_id].keys())
t_variables = {
'taxonomy_id': t_id,
'taxonomy_title': t_cfg['title'],
'title': t_cfg['title'],
}
if 'order_direction' in i_config:
# sort index based on config preference (alphabetically vs. count) in specified direction
reverse = False if 'order_direction' in i_config and i_config['order_direction'] == 'asc' else True
order_by_count = True if 'order_by' in i_config and i_config['order_by'] == 'count' else False
t_values = sorted(
t_values,
reverse=reverse,
key=lambda t_value: len(descriptors_by_taxonomy[t_id][t_value]) if order_by_count else t_value
)
limit = i_config['limit'] if 'limit' in i_config else len(t_values)
tv_bodies = []
# generate body for every taxonomy value
for tv in t_values[0:int(limit)]:
t_value_variables = {
'taxonomy_value': tv,
'taxonomy_value_lower': tv.lower(),
'taxonomy_value_normalized': normalize_string(tv),
'taxonomy_value_posts_count': len(descriptors_by_taxonomy[t_id][tv])
}
# add whole another generated index of choice if user configured to inline some index under every taxonomy value
if 'inlined_index_id' in i_config:
variable_name = f"{t_id}_{i_config['inlined_index_id']}" f"_{normalize_string(tv)}"
t_value_variables['taxonomy_value_posts_index'] = output_variables[variable_name]
v = {**config, **t_variables, **t_value_variables, **dynamic_vars}
tv_bodies.append(fill(templates[i_config['item_template']], **v))
# join all generated bodies to one final result
t_variables['body'] = ''.join(tv_bodies)
v = {**config, **t_variables, **dynamic_vars}
output = fill(templates[i_config['template']], **v)
return output
def main():
# declare input arguments
parser = argparse.ArgumentParser(usage="""
To initialize with functional demo page data for both http and gemini run:
%(prog)s --init http gemini
To serve target locally using target/[FORMAT] as root dir run:
%(prog)s --serve [FORMAT]
""")
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument(
'-i',
'--init',
action='store_true',
help="""
Create directory/file structure with config and full demo page
(process includes automatic archive download)
"""
)
mutex.add_argument(
'-s',
'--serve',
type=str,
nargs=1,
metavar='PROTOCOL',
choices=['gemini', 'http'],
help="Run local server and serve requested format. (http or gemini)"
)
parser.add_argument(
'-p',
'--port',
type=int,
help="Specify custom port for server to bind on (default is 8000)"
)
mutex.add_argument(
'-g',
'--generate',
type=str,
nargs='+',
metavar='PROTOCOL',
choices=['gemini', 'http'],
help="""
Generate pages to requested format and save to 'target/[FORMAT]'
directory. (http and/or gemini)
"""
)
# parse arguments
args = parser.parse_args()
# if no arguments given, show help
if len(sys.argv) == 1:
parser.print_help()
exit(0)
# business logic for --init
if args.init:
import urllib.request as ur
from zipfile import ZipFile
Log.info('Downloading archive with initialization files')
ur.urlretrieve('https://mizik.eu/picogen/init.zip', 'init.zip')
with ZipFile('init.zip', 'r') as zipFile:
Log.info(f'Unpacking in current directory ({os.path.abspath(os.getcwd())})')
zipFile.extractall()
Log.ok('Initialization successful')
os.remove('init.zip')
# business logic for --serve
if args.serve:
port = args.port or 8000
if 'http' in args.serve:
import http.server
import socketserver as ss
ss.TCPServer.allow_reuse_address = True
os.chdir('target/html')
with ss.TCPServer(("", port), http.server.SimpleHTTPRequestHandler) as httpd:
Log.ok(f'serving data from target/html on port {port}')
httpd.serve_forever()
if 'gemini' in args.serve:
from jetforce import GeminiServer, StaticDirectoryApplication
from jetforce.app.composite import CompositeApplication
app = CompositeApplication({"localhost": StaticDirectoryApplication(root_directory="target/gmi")})
Log.ok('serving data from target/gmi')
GeminiServer(app, port=port).run()
# business logic for --generate
if args.generate:
cfg = None
try:
# clean target directory
if os.path.isdir('target'):
shutil.rmtree('target')
os.makedirs('target')
# load configuration
cfg = json.loads(read_file('config.json'))
except FileNotFoundError as e:
Log.err(f"Initial filesystem check failed: {e}")
Log.err(f"Run picogen with --init to execute filesystem structure setup with demo data")
exit(1)
# run business logic for --generate for every type (http, gemini)
# NOTE variable prefix "t_" stands for "taxonomy_"
for item in args.generate:
# STEP 1. INITIALIZE
# define generation type specific variables
protocol = Protocol.from_name(item)
suffix = protocol.file_suffix()
scheme = protocol.scheme(cfg['ssl_enabled'])
# copy static files to target directory
shutil.copytree(f'static/{suffix}', f'target/{suffix}')
# load templates
templates = dict()
for filepath in glob.glob(f'templates/{suffix}/*.*'):
filename = os.path.basename(filepath).split('.')[0]
templates[filename] = read_file(filepath)
# if template has parent, merge them together (1 level deep for now)
for template_name in list(templates):
if '_' in template_name:
child_and_parent = template_name.split('_')
parent_value = templates[child_and_parent[1]]
child_value = templates.pop(template_name)
templates[child_and_parent[0]] = fill(parent_value, body=child_value)
# declare initial dynamic variables (generated by picogen)
dynamic_vars = dict()
dynamic_vars['scheme'] = scheme
dynamic_vars['current_year'] = datetime.now().year
dynamic_vars['rfc3339_now'] = datetime.now(timezone.utc).astimezone().isoformat()
# optionally load page views for posts if file available (file format is file:views on every line)
page_views = dict()
if 'page_views_file' in cfg:
for line in read_file(cfg['page_views_file']).splitlines():
parts = line.split(sep=":")
page_views[parts[0]] = int(parts[1])
# STEP 2. ASSEMBLE FILE DESCRIPTORS (FILES IN CONTENT DIR PARSED TO DICTS)
# array to store all generated file descriptors for later template filling
descriptors = []
# dict where descriptor is copied under all values of all taxonomies declared in the file headers
# it will be used for later taxonomy value indexes and taxonomy value posts indexes generation
descriptors_by_t_value = dict()
for root, dirs, files in os.walk('content'):
# recursively travers content folder. assemble and save descriptor for every file
for f in files:
# assemble descriptor
d = assemble_file_descriptor(os.path.join(root, f), cfg)
# don't process this file further if marked as draft
if 'draft' in d:
continue
# don't process if it isn't markdown nor protocol native file
if d['file_ext'] not in ['markdown', 'md', suffix]:
continue
# assemble correct target path and relative path
if d['file_name'] == 'index':
d['target_path'] = os.path.join(f'target/{suffix}', root[8:], f'index.{suffix}')
d['relative_path'] = os.path.join(cfg['base_path'], root[8:], f'index.{suffix}')
d['relative_dir_path'] = os.path.join(cfg['base_path'], root[8:])
else:
d['target_path'] = os.path.join(f'target/{suffix}', root[8:], d['file_name'], f'index.{suffix}')
d['relative_path'] = os.path.join(cfg['base_path'], root[8:], d['file_name'], f'index.{suffix}')
d['relative_dir_path'] = os.path.join(cfg['base_path'], root[8:], d['file_name'])
d['page_views'] = page_views[d['relative_dir_path']] if d['relative_dir_path'] in page_views else 0
# save descriptor under every value of every declared taxonomy
t_template = None
if 'taxonomies' in cfg:
for t in cfg['taxonomies']:
t_id = t['id']
if t_id not in descriptors_by_t_value:
descriptors_by_t_value[t_id] = dict()
if t_id in d:
if 'document_template' in t:
if t_template:
Log.warn('Multiple applicable taxonomy templates found for {file_path} ')
t_template = t['document_template']
for t_value in d[t_id].split(','):
t_value = t_value.strip()
if t_value not in descriptors_by_t_value[t_id]:
descriptors_by_t_value[t_id][t_value] = []
descriptors_by_t_value[t_id][t_value].append(d)
# choose correct template
if 'template' not in d:
if t_template:
d['template'] = t_template
else:
Log.warn(f"No template specified for {d['target_path']}. Using default")
d['template'] = cfg['default_template']
# convert body from markdown to protocol specific format
d = convert(d, protocol)
# save descriptor for later file generation
descriptors.append(d)
# STEP 3. ASSEMBLE INDEX DESCRIPTORS
# generation configs for those taxonomy value indexes (tvpi) which are provided as generated variables
tvpi_as_variable_cfgs = []
# generation configs for those taxonomy value posts indexes (tvi) which are provided as generated variables
tvi_as_variable_cfgs = []
# generation configs for taxonomy value indexes (tvpi) which are exported to files
tvpi_as_file_cfgs = []
# generation configs for taxonomy value posts indexes (tvi) that are exported to files
tvi_as_file_cfgs = []
if 'taxonomies' in cfg:
for t in cfg['taxonomies']:
if 'value_posts_indexes' in t:
# inspect declared indexes for taxonomy
for vpi in t['value_posts_indexes']:
i_cfg = {'taxonomy_cfg': t, 'index_cfg': vpi}
if 'output_type' in vpi and vpi['output_type'] == 'file':
tvpi_as_file_cfgs.append(i_cfg)
elif 'output_type' in vpi and vpi['output_type'] == 'variable':
tvpi_as_variable_cfgs.append(i_cfg)
if 'value_indexes' in t:
# inspect declared value lists for taxonomy
for vi in t['value_indexes']:
i_cfg = {'taxonomy_cfg': t, 'index_cfg': vi}
if 'output_type' in vi and vi['output_type'] == 'file':
tvi_as_file_cfgs.append(i_cfg)
elif 'output_type' in vi and vi['output_type'] == 'variable':
tvi_as_variable_cfgs.append(i_cfg)
# STEP 4. GENERATE INDEXES WHICH ARE PROVIDED AS GENERATED VARIABLES IN STEP 5 AND STEP 6
generated_variables = dict()
for tvpi_cfg in tvpi_as_variable_cfgs:
t_cfg = tvpi_cfg['taxonomy_cfg']
t_id = t_cfg['id']
i_cfg = tvpi_cfg['index_cfg']
# taxonomy specific output variables to use in template
t_variables = {'taxonomy_id': t_id, 'taxonomy_title': t_cfg['title'], 'title': t_cfg['title'], }
# iterate over taxonomy values
for t_value in descriptors_by_t_value[t_id]:
output = fill_taxonomy_value_post_index(protocol, cfg, t_value, t_cfg,
{**t_variables, **generated_variables, **dynamic_vars},
templates, i_cfg, descriptors_by_t_value[t_id][t_value])
variable_name = f"{t_id}_{i_cfg['id']}_{normalize_string(t_value)}"
generated_variables[variable_name] = output
Log.ok(f"Generated {variable_name} taxonomy index variable")
for tvi_cfg in tvi_as_variable_cfgs:
t_cfg = tvi_cfg['taxonomy_cfg']
i_cfg = tvi_cfg['index_cfg']
output = fill_taxonomy_value_index(cfg, t_cfg, templates, i_cfg, descriptors_by_t_value,
dynamic_vars, generated_variables)
variable_name = f"{t_cfg['id']}_{i_cfg['id']}"
generated_variables[variable_name] = output
Log.ok(f"Generated {variable_name} taxonomy value list variable")
# STEP 5. GENERATE INDEXES WHICH ARE EXPORTED TO FILES
for tvpi_cfg in tvpi_as_file_cfgs:
t_cfg = tvpi_cfg['taxonomy_cfg']
t_id = t_cfg['id']
i_cfg = tvpi_cfg['index_cfg']
# taxonomy specific output variables to use in template
t_variables = {
'taxonomy_id': t_id,
'taxonomy_title': t_cfg['title'],
'title': t_cfg['title'],
}
# iterate over taxonomy values
for t_value in descriptors_by_t_value[t_id]:
output = fill_taxonomy_value_post_index(protocol, cfg, t_value, t_cfg,
{**t_variables, **dynamic_vars, **generated_variables},
templates, i_cfg, descriptors_by_t_value[t_id][t_value])
target_path = os.path.join(
f"target/{suffix}/{t_cfg['id']}",
normalize_string(t_value) if t_value else '',
f'{i_cfg["id"]}.{i_cfg["output_suffix"] if "output_suffix" in i_cfg else suffix}'
)
write_to_file(target_path, output)
Log.ok(f"Generated {t_cfg['id']} {i_cfg['id']} index => {target_path}")
for tvi_cfg in tvi_as_file_cfgs:
t_cfg = tvi_cfg['taxonomy_cfg']
t_id = t_cfg['id']
i_cfg = tvi_cfg['index_cfg']
output = fill_taxonomy_value_index(cfg, t_cfg, templates, i_cfg, descriptors_by_t_value,
dynamic_vars, generated_variables)
target_path = os.path.join(
f"target/{suffix}/{t_id}",
f'{i_cfg["id"]}.{i_cfg["output_suffix"] if "output_suffix" in i_cfg else suffix}'
)
write_to_file(target_path, output)
Log.ok(f"Generated {t_cfg['id']} {i_cfg['id']} value_list => {target_path}")
# STEP 6. GENERATE STANDARD FILES FROM CONTENT DIRECTORY
for d in descriptors:
d['body'] = fill(d['body'], **d, **cfg, **dynamic_vars, **generated_variables)
name = f"{d['file_name']}.{d['file_ext']}"
d['summary'] = parse_trailer(name, d['body'], protocol)
template = templates[d['template']]
write_to_file(d['target_path'], fill(template, **d, **cfg, **dynamic_vars, **generated_variables))
Log.ok(f"Generated {d['file_name']}.{d['file_ext']} => {d['target_path']}")
if __name__ == '__main__':
main()