-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathreplace_all.py
173 lines (155 loc) Β· 6.27 KB
/
replace_all.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# Copyright (c) 2020 kamyu. All rights reserved.
#
# Google Code Jam 2020 Virtual World Finals - Problem E. Replace All
# https://codingcompetitions.withgoogle.com/codejam/round/000000000019ff31/00000000003b4bc4
#
# Time: O(A^3)
# Space: O(A^2)
#
from collections import defaultdict
from functools import partial
# Time: O(E * sqrt(V))
# Space: O(V)
# Source code from http://code.activestate.com/recipes/123641-hopcroft-karp-bipartite-matching/
# Hopcroft-Karp bipartite max-cardinality matching and max independent set
# David Eppstein, UC Irvine, 27 Apr 2002
def bipartiteMatch(graph):
'''Find maximum cardinality matching of a bipartite graph (U,V,E).
The input format is a dictionary mapping members of U to a list
of their neighbors in V. The output is a triple (M,A,B) where M is a
dictionary mapping members of V to their matches in U, A is the part
of the maximum independent set in U, and B is the part of the MIS in V.
The same object may occur in both U and V, and is treated as two
distinct vertices if this happens.'''
# initialize greedy matching (redundant, but faster than full search)
matching = {}
for u in graph:
for v in graph[u]:
if v not in matching:
matching[v] = u
break
while 1:
# structure residual graph into layers
# pred[u] gives the neighbor in the previous layer for u in U
# preds[v] gives a list of neighbors in the previous layer for v in V
# unmatched gives a list of unmatched vertices in final layer of V,
# and is also used as a flag value for pred[u] when u is in the first layer
preds = {}
unmatched = []
pred = dict([(u,unmatched) for u in graph])
for v in matching:
del pred[matching[v]]
layer = list(pred)
# repeatedly extend layering structure by another pair of layers
while layer and not unmatched:
newLayer = {}
for u in layer:
for v in graph[u]:
if v not in preds:
newLayer.setdefault(v,[]).append(u)
layer = []
for v in newLayer:
preds[v] = newLayer[v]
if v in matching:
layer.append(matching[v])
pred[matching[v]] = v
else:
unmatched.append(v)
# did we finish layering without finding any alternating paths?
if not unmatched:
unlayered = {}
for u in graph:
for v in graph[u]:
if v not in preds:
unlayered[v] = None
return (matching,list(pred),list(unlayered))
# recursively search backward through layers to find alternating paths
# recursion returns true if found path, false otherwise
def recurse(v):
if v in preds:
L = preds[v]
del preds[v]
for u in L:
if u in pred:
pu = pred[u]
del pred[u]
if pu is unmatched or recurse(pu):
matching[v] = u
return 1
return 0
def recurse_iter(v):
def divide(v):
if v not in preds:
return
L = preds[v]
del preds[v]
for u in L :
if u in pred and pred[u] is unmatched: # early return
del pred[u]
matching[v] = u
ret[0] = True
return
stk.append(partial(conquer, v, iter(L)))
def conquer(v, it):
for u in it:
if u not in pred:
continue
pu = pred[u]
del pred[u]
stk.append(partial(postprocess, v, u, it))
stk.append(partial(divide, pu))
return
def postprocess(v, u, it):
if not ret[0]:
stk.append(partial(conquer, v, it))
return
matching[v] = u
ret, stk = [False], []
stk.append(partial(divide, v))
while stk:
stk.pop()()
return ret[0]
for v in unmatched: recurse_iter(v)
def floydWarshall(adj): # Time: O(N^3)
for k in xrange(len(adj[0])):
for i in xrange(len(adj)):
for j in xrange((len(adj[i]))):
if adj[i][k] and adj[k][j]:
adj[i][j] = 1
def char_to_num(c):
if c.isdigit():
return (ord(c)-ord('0'))
if c.islower():
return (ord(c)-ord('a')) + 10
if c.isupper():
return (ord(c)-ord('A')) + 36
def replace_all():
S, N = raw_input().strip().split()
has_alpha = [0]*ALPHABET_SIZE
for c in S:
has_alpha[char_to_num(c)] = 1
adj = [[int(i == j) for j in xrange(ALPHABET_SIZE)] for i in xrange(ALPHABET_SIZE)]
for A, B in raw_input().strip().split():
adj[char_to_num(A)][char_to_num(B)] = 1
floydWarshall(adj) # Time: O(A^3)
sources, sinks = [], [i for i in xrange(ALPHABET_SIZE) if not has_alpha[i]]
for i in xrange(ALPHABET_SIZE):
if any(adj[i][j] and adj[j][i] for j in xrange(i)):
continue # not a representative node of scc (root, the smallest idx in a scc)
if any(not has_alpha[j] for j in xrange(i, ALPHABET_SIZE) if adj[i][j] and adj[j][i]):
continue # not all nodes in scc are in S
if not any(adj[i][j] for j in xrange(ALPHABET_SIZE) if i != j):
continue # zero out-degree
# found a scc where all nodes are in S and the root is with out-degree at least 1,
# take the root as both a source and a sink
sources.append(i)
sinks.append(i)
E = defaultdict(list)
for i in xrange(len(sources)):
for j in xrange(len(sinks)):
if sources[i] != sinks[j] and adj[sources[i]][sinks[j]]:
E[j].append(i)
return sum(has_alpha)-(len(sources)-len(bipartiteMatch(E)[0])) # Time: O(A^2 * sqrt(A))
ALPHABET_SIZE = 62
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, replace_all())