昇思25天学习打卡营第23天 | CycleGAN图像风格迁移互换
文章目录
- 昇思25天学习打卡营第23天 | CycleGAN图像风格迁移互换
- CycleGAN模型
- 模型结构
- 循环一致损失函数
 
- 数据集
- 数据下载
- 创建数据集
 
- 网络构建
- 生成器
- 判别器
- 损失函数和优化器
- 前向计算
- 梯度计算与反向传播
 
- 总结
- 打卡
 
CycleGAN模型
循环对抗生成网络(Cycle Generative Adversarial Network)的一个重要应用是域迁移(Domain Adaptation),即图像风格迁移。
CycleGAN实现了一种在没有配对示例的情况下学习将图像从源域X转换到目标域Y的方法,是一种无监督的图像迁移网络。
模型结构
CycleGAN网络是由两个镜像对称的GAN网络组成:
 
 图中, 
     
      
       
       
         G 
        
       
      
        G 
       
      
    G为将X生成Y风格的生成器, 
     
      
       
       
         F 
        
       
      
        F 
       
      
    F为将Y生成X风格的生成器, 
     
      
       
        
        
          D 
         
        
          X 
         
        
       
      
        D_X 
       
      
    DX和 
     
      
       
        
        
          D 
         
        
          Y 
         
        
       
      
        D_Y 
       
      
    DY为相应的判别器。
该模型最终输出两个模型的权重,分别将两种图像风格进行彼此迁移,生成新的图像。
循环一致损失函数
循环一致损失(Cycle Consistency Loss)的计算方法如下:

 将 
     
      
       
       
         x 
        
       
         ∈ 
        
       
         X 
        
       
      
        x\in X 
       
      
    x∈X经过生成器 
     
      
       
       
         G 
        
       
      
        G 
       
      
    G得到 
     
      
       
       
         Y 
        
       
      
        Y 
       
      
    Y风格的 
     
      
       
        
        
          Y 
         
        
          ^ 
         
        
       
      
        \hat Y 
       
      
    Y^,然后将 
     
      
       
        
        
          Y 
         
        
          ^ 
         
        
       
      
        \hat Y 
       
      
    Y^送进生成器 
     
      
       
       
         F 
        
       
      
        F 
       
      
    F产生X风格的新图片 
     
      
       
        
        
          x 
         
        
          ^ 
         
        
       
      
        \hat x 
       
      
    x^,最后使用 
     
      
       
        
        
          x 
         
        
          ^ 
         
        
       
      
        \hat x 
       
      
    x^和 
     
      
       
       
         x 
        
       
      
        x 
       
      
    x一起计算出循环一致损失。
数据集
实验使用ImageNet中的苹果橘子部分图片,图像缩放为 256 × 256 256\times 256 256×256大小。
数据下载
from download import download
url = "https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/models/application/CycleGAN_apple2orange.zip"
download(url, ".", kind="zip", replace=True)
创建数据集
from mindspore.dataset import MindDataset
# 读取MindRecord格式数据
name_mr = "./CycleGAN_apple2orange/apple2orange_train.mindrecord"
data = MindDataset(dataset_files=name_mr)
print("Datasize: ", data.get_dataset_size())
batch_size = 1
dataset = data.batch(batch_size)
datasize = dataset.get_dataset_size()
网络构建
生成器

import mindspore.nn as nn
import mindspore.ops as ops
from mindspore.common.initializer import Normal
weight_init = Normal(sigma=0.02)
class ConvNormReLU(nn.Cell):
    def __init__(self, input_channel, out_planes, kernel_size=4, stride=2, alpha=0.2, norm_mode='instance',
                 pad_mode='CONSTANT', use_relu=True, padding=None, transpose=False):
        super(ConvNormReLU, self).__init__()
        norm = nn.BatchNorm2d(out_planes)
        if norm_mode == 'instance':
            norm = nn.BatchNorm2d(out_planes, affine=False)
        has_bias = (norm_mode == 'instance')
        if padding is None:
            padding = (kernel_size - 1) // 2
        if pad_mode == 'CONSTANT':
            if transpose:
                conv = nn.Conv2dTranspose(input_channel, out_planes, kernel_size, stride, pad_mode='same',
                                          has_bias=has_bias, weight_init=weight_init)
            else:
                conv = nn.Conv2d(input_channel, out_planes, kernel_size, stride, pad_mode='pad',
                                 has_bias=has_bias, padding=padding, weight_init=weight_init)
            layers = [conv, norm]
        else:
            paddings = ((0, 0), (0, 0), (padding, padding), (padding, padding))
            pad = nn.Pad(paddings=paddings, mode=pad_mode)
            if transpose:
                conv = nn.Conv2dTranspose(input_channel, out_planes, kernel_size, stride, pad_mode='pad',
                                          has_bias=has_bias, weight_init=weight_init)
            else:
                conv = nn.Conv2d(input_channel, out_planes, kernel_size, stride, pad_mode='pad',
                                 has_bias=has_bias, weight_init=weight_init)
            layers = [pad, conv, norm]
        if use_relu:
            relu = nn.ReLU()
            if alpha > 0:
                relu = nn.LeakyReLU(alpha)
            layers.append(relu)
        self.features = nn.SequentialCell(layers)
    def construct(self, x):
        output = self.features(x)
        return output
class ResidualBlock(nn.Cell):
    def __init__(self, dim, norm_mode='instance', dropout=False, pad_mode="CONSTANT"):
        super(ResidualBlock, self).__init__()
        self.conv1 = ConvNormReLU(dim, dim, 3, 1, 0, norm_mode, pad_mode)
        self.conv2 = ConvNormReLU(dim, dim, 3, 1, 0, norm_mode, pad_mode, use_relu=False)
        self.dropout = dropout
        if dropout:
            self.dropout = nn.Dropout(p=0.5)
    def construct(self, x):
        out = self.conv1(x)
        if self.dropout:
            out = self.dropout(out)
        out = self.conv2(out)
        return x + out
class ResNetGenerator(nn.Cell):
    def __init__(self, input_channel=3, output_channel=64, n_layers=9, alpha=0.2, norm_mode='instance', dropout=False,
                 pad_mode="CONSTANT"):
        super(ResNetGenerator, self).__init__()
        self.conv_in = ConvNormReLU(input_channel, output_channel, 7, 1, alpha, norm_mode, pad_mode=pad_mode)
        self.down_1 = ConvNormReLU(output_channel, output_channel * 2, 3, 2, alpha, norm_mode)
        self.down_2 = ConvNormReLU(output_channel * 2, output_channel * 4, 3, 2, alpha, norm_mode)
        layers = [ResidualBlock(output_channel * 4, norm_mode, dropout=dropout, pad_mode=pad_mode)] * n_layers
        self.residuals = nn.SequentialCell(layers)
        self.up_2 = ConvNormReLU(output_channel * 4, output_channel * 2, 3, 2, alpha, norm_mode, transpose=True)
        self.up_1 = ConvNormReLU(output_channel * 2, output_channel, 3, 2, alpha, norm_mode, transpose=True)
        if pad_mode == "CONSTANT":
            self.conv_out = nn.Conv2d(output_channel, 3, kernel_size=7, stride=1, pad_mode='pad',
                                      padding=3, weight_init=weight_init)
        else:
            pad = nn.Pad(paddings=((0, 0), (0, 0), (3, 3), (3, 3)), mode=pad_mode)
            conv = nn.Conv2d(output_channel, 3, kernel_size=7, stride=1, pad_mode='pad', weight_init=weight_init)
            self.conv_out = nn.SequentialCell([pad, conv])
    def construct(self, x):
        x = self.conv_in(x)
        x = self.down_1(x)
        x = self.down_2(x)
        x = self.residuals(x)
        x = self.up_2(x)
        x = self.up_1(x)
        output = self.conv_out(x)
        return ops.tanh(output)
# 实例化生成器
net_rg_a = ResNetGenerator()
net_rg_a.update_parameters_name('net_rg_a.')
net_rg_b = ResNetGenerator()
net_rg_b.update_parameters_name('net_rg_b.')
判别器
# 定义判别器
class Discriminator(nn.Cell):
    def __init__(self, input_channel=3, output_channel=64, n_layers=3, alpha=0.2, norm_mode='instance'):
        super(Discriminator, self).__init__()
        kernel_size = 4
        layers = [nn.Conv2d(input_channel, output_channel, kernel_size, 2, pad_mode='pad', padding=1, weight_init=weight_init),
                  nn.LeakyReLU(alpha)]
        nf_mult = output_channel
        for i in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** i, 8) * output_channel
            layers.append(ConvNormReLU(nf_mult_prev, nf_mult, kernel_size, 2, alpha, norm_mode, padding=1))
        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 8) * output_channel
        layers.append(ConvNormReLU(nf_mult_prev, nf_mult, kernel_size, 1, alpha, norm_mode, padding=1))
        layers.append(nn.Conv2d(nf_mult, 1, kernel_size, 1, pad_mode='pad', padding=1, weight_init=weight_init))
        self.features = nn.SequentialCell(layers)
    def construct(self, x):
        output = self.features(x)
        return output
# 判别器初始化
net_d_a = Discriminator()
net_d_a.update_parameters_name('net_d_a.')
net_d_b = Discriminator()
net_d_b.update_parameters_name('net_d_b.')
损失函数和优化器
对于生成器 
     
      
       
       
         G 
        
       
      
        G 
       
      
    G和对应的判别器 
     
      
       
        
        
          D 
         
        
          Y 
         
        
       
      
        D_Y 
       
      
    DY,目标损失函数定义为:
  
      
       
        
         
         
           L 
          
          
          
            G 
           
          
            A 
           
          
            N 
           
          
         
        
          ( 
         
        
          D 
         
        
          , 
         
         
         
           D 
          
         
           Y 
          
         
        
          , 
         
        
          X 
         
        
          , 
         
        
          Y 
         
        
          ) 
         
        
          = 
         
         
         
           E 
          
          
          
            y 
           
          
            − 
           
           
           
             p 
            
            
            
              d 
             
            
              a 
             
            
              t 
             
            
              a 
             
            
           
          
            ( 
           
          
            y 
           
          
            ) 
           
          
         
        
          [ 
         
        
          log 
         
        
           
         
         
          
          
            D 
           
          
            Y 
           
          
         
           ( 
          
         
           y 
          
         
           ) 
          
         
        
          ] 
         
        
          + 
         
         
         
           E 
          
          
          
            x 
           
          
            − 
           
           
           
             p 
            
            
            
              d 
             
            
              a 
             
            
              t 
             
            
              a 
             
            
           
          
            ( 
           
          
            x 
           
          
            ) 
           
          
         
        
          [ 
         
        
          log 
         
        
           
         
        
          ( 
         
        
          1 
         
        
          − 
         
         
         
           D 
          
         
           Y 
          
         
        
          ( 
         
        
          G 
         
        
          ( 
         
        
          x 
         
        
          ) 
         
        
          ) 
         
        
          ) 
         
        
          ] 
         
        
       
         L_{GAN}(D,D_Y,X,Y)=E_{y- p_{data}(y)}[\log{D_Y(y)}] + E_{x-p_{data}(x)}[\log(1-D_Y(G(x))) ] 
        
       
     LGAN(D,DY,X,Y)=Ey−pdata(y)[logDY(y)]+Ex−pdata(x)[log(1−DY(G(x)))]
 生成器的目标是最小化这个损失函数,即:
  
      
       
        
         
          
          
            min 
           
          
             
           
          
         
           G 
          
         
         
          
          
            max 
           
          
             
           
          
          
          
            D 
           
          
            Y 
           
          
         
         
         
           L 
          
          
          
            G 
           
          
            A 
           
          
            N 
           
          
         
        
          ( 
         
        
          G 
         
        
          , 
         
         
         
           D 
          
         
           Y 
          
         
        
          , 
         
        
          X 
         
        
          , 
         
        
          Y 
         
        
          ) 
         
        
       
         \min_G\max_{D_{Y}}L_{GAN}(G,D_Y,X,Y) 
        
       
     GminDYmaxLGAN(G,DY,X,Y)
对于 
     
      
       
       
         X 
        
       
      
        X 
       
      
    X的每个图象 
     
      
       
       
         x 
        
       
      
        x 
       
      
    x,图像转换周期应该能将 
     
      
       
       
         x 
        
       
      
        x 
       
      
    x带回原始图像,称之为正向循环一致性,即 
     
      
       
       
         x 
        
       
         → 
        
       
         G 
        
       
         ( 
        
       
         x 
        
       
         ) 
        
       
         → 
        
       
         F 
        
       
         ( 
        
       
         G 
        
       
         ( 
        
       
         x 
        
       
         ) 
        
       
         ) 
        
       
         ≈ 
        
       
         x 
        
       
      
        x\to G(x)\to F(G(x))\approx x 
       
      
    x→G(x)→F(G(x))≈x。
 循环一致损失函数定义为:
  
      
       
        
         
         
           L 
          
          
          
            c 
           
          
            y 
           
          
            c 
           
          
         
        
          ( 
         
        
          G 
         
        
          , 
         
        
          F 
         
        
          ) 
         
        
          = 
         
         
         
           E 
          
          
          
            x 
           
          
            − 
           
           
           
             p 
            
            
            
              d 
             
            
              a 
             
            
              t 
             
            
              a 
             
            
           
          
            ( 
           
          
            x 
           
          
            ) 
           
          
         
        
          [ 
         
        
          ∣ 
         
        
          ∣ 
         
        
          F 
         
        
          ( 
         
        
          G 
         
        
          ( 
         
        
          x 
         
        
          ) 
         
        
          ) 
         
        
          − 
         
        
          x 
         
        
          ∣ 
         
         
         
           ∣ 
          
         
           1 
          
         
        
          ] 
         
        
          + 
         
         
         
           E 
          
          
          
            y 
           
          
            − 
           
           
           
             p 
            
            
            
              d 
             
            
              a 
             
            
              t 
             
            
              a 
             
            
           
          
            ( 
           
          
            y 
           
          
            ) 
           
          
         
        
          [ 
         
        
          ∣ 
         
        
          ∣ 
         
        
          G 
         
        
          ( 
         
        
          F 
         
        
          ( 
         
        
          y 
         
        
          ) 
         
        
          ) 
         
        
          − 
         
        
          y 
         
        
          ∣ 
         
         
         
           ∣ 
          
         
           1 
          
         
        
          ] 
         
        
       
         L_{cyc}(G,F)=E_{x-p_{data}(x)}[||F(G(x))-x||_1]+E_{y-p_{data}(y)}[||G(F(y))-y||_1] 
        
       
     Lcyc(G,F)=Ex−pdata(x)[∣∣F(G(x))−x∣∣1]+Ey−pdata(y)[∣∣G(F(y))−y∣∣1]
 循环一致损失能够保证重建图像 
     
      
       
       
         F 
        
       
         ( 
        
       
         G 
        
       
         ( 
        
       
         x 
        
       
         ) 
        
       
         ) 
        
       
      
        F(G(x)) 
       
      
    F(G(x))与输入图像 
     
      
       
       
         x 
        
       
      
        x 
       
      
    x紧密匹配。
# 构建生成器,判别器优化器
optimizer_rg_a = nn.Adam(net_rg_a.trainable_params(), learning_rate=0.0002, beta1=0.5)
optimizer_rg_b = nn.Adam(net_rg_b.trainable_params(), learning_rate=0.0002, beta1=0.5)
optimizer_d_a = nn.Adam(net_d_a.trainable_params(), learning_rate=0.0002, beta1=0.5)
optimizer_d_b = nn.Adam(net_d_b.trainable_params(), learning_rate=0.0002, beta1=0.5)
# GAN网络损失函数,这里最后一层不使用sigmoid函数
loss_fn = nn.MSELoss(reduction='mean')
l1_loss = nn.L1Loss("mean")
def gan_loss(predict, target):
    target = ops.ones_like(predict) * target
    loss = loss_fn(predict, target)
    return loss
前向计算
import mindspore as ms
# 前向计算
def generator(img_a, img_b):
    fake_a = net_rg_b(img_b)
    fake_b = net_rg_a(img_a)
    rec_a = net_rg_b(fake_b)
    rec_b = net_rg_a(fake_a)
    identity_a = net_rg_b(img_a)
    identity_b = net_rg_a(img_b)
    return fake_a, fake_b, rec_a, rec_b, identity_a, identity_b
lambda_a = 10.0
lambda_b = 10.0
lambda_idt = 0.5
def generator_forward(img_a, img_b):
    true = Tensor(True, dtype.bool_)
    fake_a, fake_b, rec_a, rec_b, identity_a, identity_b = generator(img_a, img_b)
    loss_g_a = gan_loss(net_d_b(fake_b), true)
    loss_g_b = gan_loss(net_d_a(fake_a), true)
    loss_c_a = l1_loss(rec_a, img_a) * lambda_a
    loss_c_b = l1_loss(rec_b, img_b) * lambda_b
    loss_idt_a = l1_loss(identity_a, img_a) * lambda_a * lambda_idt
    loss_idt_b = l1_loss(identity_b, img_b) * lambda_b * lambda_idt
    loss_g = loss_g_a + loss_g_b + loss_c_a + loss_c_b + loss_idt_a + loss_idt_b
    return fake_a, fake_b, loss_g, loss_g_a, loss_g_b, loss_c_a, loss_c_b, loss_idt_a, loss_idt_b
def generator_forward_grad(img_a, img_b):
    _, _, loss_g, _, _, _, _, _, _ = generator_forward(img_a, img_b)
    return loss_g
def discriminator_forward(img_a, img_b, fake_a, fake_b):
    false = Tensor(False, dtype.bool_)
    true = Tensor(True, dtype.bool_)
    d_fake_a = net_d_a(fake_a)
    d_img_a = net_d_a(img_a)
    d_fake_b = net_d_b(fake_b)
    d_img_b = net_d_b(img_b)
    loss_d_a = gan_loss(d_fake_a, false) + gan_loss(d_img_a, true)
    loss_d_b = gan_loss(d_fake_b, false) + gan_loss(d_img_b, true)
    loss_d = (loss_d_a + loss_d_b) * 0.5
    return loss_d
def discriminator_forward_a(img_a, fake_a):
    false = Tensor(False, dtype.bool_)
    true = Tensor(True, dtype.bool_)
    d_fake_a = net_d_a(fake_a)
    d_img_a = net_d_a(img_a)
    loss_d_a = gan_loss(d_fake_a, false) + gan_loss(d_img_a, true)
    return loss_d_a
def discriminator_forward_b(img_b, fake_b):
    false = Tensor(False, dtype.bool_)
    true = Tensor(True, dtype.bool_)
    d_fake_b = net_d_b(fake_b)
    d_img_b = net_d_b(img_b)
    loss_d_b = gan_loss(d_fake_b, false) + gan_loss(d_img_b, true)
    return loss_d_b
# 保留了一个图像缓冲区,用来存储之前创建的50个图像
pool_size = 50
def image_pool(images):
    num_imgs = 0
    image1 = []
    if isinstance(images, Tensor):
        images = images.asnumpy()
    return_images = []
    for image in images:
        if num_imgs < pool_size:
            num_imgs = num_imgs + 1
            image1.append(image)
            return_images.append(image)
        else:
            if random.uniform(0, 1) > 0.5:
                random_id = random.randint(0, pool_size - 1)
                tmp = image1[random_id].copy()
                image1[random_id] = image
                return_images.append(tmp)
            else:
                return_images.append(image)
    output = Tensor(return_images, ms.float32)
    if output.ndim != 4:
        raise ValueError("img should be 4d, but get shape {}".format(output.shape))
    return output
梯度计算与反向传播
from mindspore import value_and_grad
# 实例化求梯度的方法
grad_g_a = value_and_grad(generator_forward_grad, None, net_rg_a.trainable_params())
grad_g_b = value_and_grad(generator_forward_grad, None, net_rg_b.trainable_params())
grad_d_a = value_and_grad(discriminator_forward_a, None, net_d_a.trainable_params())
grad_d_b = value_and_grad(discriminator_forward_b, None, net_d_b.trainable_params())
# 计算生成器的梯度,反向传播更新参数
def train_step_g(img_a, img_b):
    net_d_a.set_grad(False)
    net_d_b.set_grad(False)
    fake_a, fake_b, lg, lga, lgb, lca, lcb, lia, lib = generator_forward(img_a, img_b)
    _, grads_g_a = grad_g_a(img_a, img_b)
    _, grads_g_b = grad_g_b(img_a, img_b)
    optimizer_rg_a(grads_g_a)
    optimizer_rg_b(grads_g_b)
    return fake_a, fake_b, lg, lga, lgb, lca, lcb, lia, lib
# 计算判别器的梯度,反向传播更新参数
def train_step_d(img_a, img_b, fake_a, fake_b):
    net_d_a.set_grad(True)
    net_d_b.set_grad(True)
    loss_d_a, grads_d_a = grad_d_a(img_a, fake_a)
    loss_d_b, grads_d_b = grad_d_b(img_b, fake_b)
    loss_d = (loss_d_a + loss_d_b) * 0.5
    optimizer_d_a(grads_d_a)
    optimizer_d_b(grads_d_b)
    return loss_d
总结
这一节介绍了CycleGAN网络,用来将图像风格从源域 X X X转移到目标域 Y Y Y上。CycleGAN由两个镜像对称的GAN网络组成,两个GAN网络各自有一个生成器(用来相互转移风格)和判别器。此外还引入了循环一致损失函数用来捕获生成图像和输入图像的关系。
打卡







![【2024最新华为OD-C/D卷试题汇总】[支持在线评测] 开源项目热度排行榜(100分) - 三语言AC题解(Python/Java/Cpp)](https://i-blog.csdnimg.cn/direct/e442d53d966b4a9cb88541b962bbb4b7.png)












