-
Notifications
You must be signed in to change notification settings - Fork 0
/
function.py
123 lines (88 loc) · 3.8 KB
/
function.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import cv2
import torch
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
from torch.utils import data
def RecurrentSample(n):
'''循环采样'''
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class RecurrentSampler(data.sampler.Sampler):
'''循环采样器'''
def __init__(self, data_source):
self.num_samples = len(data_source)
def __iter__(self):
return iter(RecurrentSample(self.num_samples))
def __len__(self):
return 2 ** 31
def get_mean_std(feature, epsilon=1e-5):
'''计算特征图的均值与标准差'''
# epsilon用来防止出现除零运算
N, C = feature.size()[:2]
feature_var = feature.view(N, C, -1).var(dim=2) + epsilon#计算方差
feature_std = feature_var.sqrt().view(N, C, 1,1)#计算偏移标准差
feature_mean = feature.view(N, C, -1).mean(dim=2).view(N, C, 1,1)
return feature_mean, feature_std
def AdaIN(content_features,style_features,epsilon=1e-5):
'''归一化层,用于调整图片样式的核心层'''
assert (content_features.size()[:2] == style_features.size()[:2])
content_mean,content_std=get_mean_std(content_features)
style_mean,style_std=get_mean_std(style_features)
size = content_features.size()
normalized_features=(content_features-content_mean.expand(size))/content_std.expand(size)
normalized_features=(normalized_features*style_std.expand(size))+style_mean.expand(size)
return normalized_features
def _calc_feat_flatten_mean_std(feat):
# takes 3D feat (C, H, W), return mean and std of array within channels
feat_flatten = feat.view(3, -1)
mean = feat_flatten.mean(dim=-1, keepdim=True)
std = feat_flatten.std(dim=-1, keepdim=True)
return feat_flatten, mean, std
def _mat_sqrt(x):
U, D, V = torch.svd(x)
return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t())
def coral(source, target):
# assume both source and target are 3D array (C, H, W)
# Note: flatten -> f
source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source)
source_f_norm = (source_f - source_f_mean.expand_as(
source_f)) / source_f_std.expand_as(source_f)
source_f_cov_eye = \
torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3)
target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target)
target_f_norm = (target_f - target_f_mean.expand_as(
target_f)) / target_f_std.expand_as(target_f)
target_f_cov_eye = \
torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3)
source_f_norm_transfer = torch.mm(
_mat_sqrt(target_f_cov_eye),
torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)),
source_f_norm)
)
source_f_transfer = source_f_norm_transfer * \
target_f_std.expand_as(source_f_norm) + \
target_f_mean.expand_as(source_f_norm)
return source_f_transfer.view(source.size())
def change_color(source,target):
transform=transforms.Resize(size=(512,512))
detransform=transforms.Resize(size=(source.size[1],source.size[0]))
source=np.asarray(transform(source))
target=np.asarray(transform(target))
source=cv2.cvtColor(source,cv2.COLOR_RGB2HSV)
target=cv2.cvtColor(target,cv2.COLOR_RGB2HSV)
for i in range(512):
for j in range(512):
source[i][j][0]=target[i][j][0]
source[i][j][1]=target[i][j][1]
source=cv2.cvtColor(source,cv2.COLOR_HSV2BGR)
source=Image.fromarray(source)
source=detransform(source)
return source