|
| 1 | +import os |
| 2 | +from pathlib import Path |
| 3 | +import itertools |
| 4 | +import functools |
| 5 | +from typing import List |
| 6 | + |
| 7 | +input_p = Path(os.getcwd()) / 'input' |
| 8 | +text = input_p.read_text() |
| 9 | +lines = text.splitlines() |
| 10 | + |
| 11 | +def line_to_rule(l): |
| 12 | + split = l.split(" -> ") |
| 13 | + return (split[0], split[1]) |
| 14 | + |
| 15 | +start = lines[0] |
| 16 | +rules = [line_to_rule(l) for l in lines[2:]] |
| 17 | +rules_map = {r[0]: r[1] for r in rules} |
| 18 | + |
| 19 | +#part 1 |
| 20 | +def do_step(polymer): |
| 21 | + pairs = [polymer[i:i+2] for i in range(0, len(polymer) - 1)] |
| 22 | + new_pairs = [p[0] + rules_map.get(p,"") + p[1] for p in pairs] |
| 23 | + |
| 24 | + pairs_for_joining = [p if i == 0 else p[1:] for (i,p) in enumerate(new_pairs)] |
| 25 | + new_polymer = "".join(pairs_for_joining) |
| 26 | + return new_polymer |
| 27 | + |
| 28 | +polymer = start |
| 29 | +for i in range(10): |
| 30 | + polymer = do_step(polymer) |
| 31 | + |
| 32 | +counts = {} |
| 33 | +for c in polymer: |
| 34 | + counts[c] = counts.get(c, 0) + 1 |
| 35 | + |
| 36 | +print(max(counts.values()) - min(counts.values())) |
| 37 | +print("####################") |
| 38 | + |
| 39 | +# part 2 |
| 40 | +polymer = start |
| 41 | + |
| 42 | +pair_counts = {} # pair to count |
| 43 | +n_duplicates = {} #char to count |
| 44 | + |
| 45 | +#build initial pairs and initial duplicate characters (whenever a pair splits the new character is duplicated and propagated) |
| 46 | +pairs = [polymer[i:i+2] for i in range(0, len(polymer) - 1)] |
| 47 | +for p in pairs: |
| 48 | + pair_counts[p] = pair_counts.get(p, 0) + 1 |
| 49 | + if p != pairs[0]: |
| 50 | + n_duplicates[p[0]] = n_duplicates.get(p[0], 0) + 1 |
| 51 | + |
| 52 | + |
| 53 | +for i in range(40): |
| 54 | + |
| 55 | + pairs_to_add = {} #pair to count |
| 56 | + pairs_to_remove = {} #pair to count |
| 57 | + |
| 58 | + for (p, v) in pair_counts.items(): |
| 59 | + if p in rules_map: |
| 60 | + |
| 61 | + new_char = rules_map[p] |
| 62 | + n_duplicates[new_char] = n_duplicates.get(new_char, 0) + v |
| 63 | + |
| 64 | + n_p_1 = p[0] + new_char |
| 65 | + n_p_2 = new_char + p[1] |
| 66 | + |
| 67 | + pairs_to_add[n_p_1] = pairs_to_add.get(n_p_1, 0) + v |
| 68 | + pairs_to_add[n_p_2] = pairs_to_add.get(n_p_2, 0) + v |
| 69 | + |
| 70 | + pairs_to_remove[p] = pairs_to_remove.get(p, 0) - v |
| 71 | + |
| 72 | + for (p,v) in pairs_to_add.items(): |
| 73 | + pair_counts[p] = pair_counts.get(p, 0) + v |
| 74 | + for (p,v) in pairs_to_remove.items(): |
| 75 | + pair_counts[p] += v |
| 76 | + |
| 77 | + |
| 78 | +chars = set() |
| 79 | +for (k,v) in pair_counts.items(): |
| 80 | + if v != 0: |
| 81 | + chars.add(k[0]) |
| 82 | + chars.add(k[1]) |
| 83 | + |
| 84 | +counts = {} |
| 85 | +for c in chars: |
| 86 | + pairs_containing_char = [(k,v) for (k,v) in pair_counts.items() if k[0] == c or k[1] == c] |
| 87 | + total = 0 |
| 88 | + for (k,v) in pairs_containing_char: |
| 89 | + |
| 90 | + if k == c + c: #BB,CC... |
| 91 | + total += 2 * v |
| 92 | + else: |
| 93 | + total += v |
| 94 | + counts[c] = total - n_duplicates.get(c,0) |
| 95 | + # print(c, ": ", total - n_duplicates.get(c, 0)) |
| 96 | + |
| 97 | +print(max(counts.values()) - min(counts.values())) |
0 commit comments