深度学习2.0-35.ResNet-18实战

1147-柳同学

发表文章数:589

首页 » 算法 » 正文

1. Basic Block的实现

深度学习2.0-35.ResNet-18实战

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,optimizers,Sequential

# 构建层
class BasicBlock(layers.Layer):
    def __init__(self,filter_num,stride=1):
        super(BasicBlock, self).__init__()
        
        # strides=1时,得到output会略小于inputsize,所有需要设置padding=‘same’,保证两者相同
        # convolution layer = conv + bn + relu
        self.conv1 = layers.Conv2D(filter_num,kernel_size=(3,3),strides=stride,padding='same')
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')   # rulu层没有参数,可以多次使用

        # convolution layer = conv + bn + relu
        self.conv2 = layers.Conv2D(filter_num,kernel_size=(3,3),strides=stride,padding='same')
        self.bn2 = layers.BatchNormalization()
        
        # 短接层
        if stride != 1:
            self.downsample = Sequential()
            self.downsample.add(layers.Conv2D(filter_num,kernel_size=(1,1),strides = stride))
        else:
            self.downsample = lambda x:x

    # 构建前向传播
    def call(self,inputs,training=None):
        # inputs:[b,h,w,c]
        out = self.conv1(inputs)
        out = self.bn1(out)
        out = self.relu(out)
        
        out = self.conv2(out)
        out = self.bn2(out)
        
        identity = self.downsample(inputs)
        
        output = layers.add([out,identity])
        output = self.relu(output)
        # output = tf.nn.relu(output)
        
        return output

2.Res Block的实现

深度学习2.0-35.ResNet-18实战
深度学习2.0-35.ResNet-18实战

    def build_resblock(self,filter_num,blocks,stride=1):
        res_blocks = Sequential()
        # 下采样-只有一个具有下采样的能力
        res_blocks.add(BasicBlock(filter_num,stride))

        for _ in range(1,blocks):
            res_blocks.add(BasicBlock(filter_num,stride=1))

        return res_blocks

3.ResNet的实现

class ResNet(keras.Model):
    # layer_dims,比如:[2,2,2,2] 4个Res Block,每个Res Block包含两个BasicBlock
    # num_classes=100  100类
    def __init__(self,layer_dims,num_classes=100):

        super(ResNet, self).__init__()
        # 预处理层
        self.stem = Sequential([layers.Conv2D(64,kernel_size=(3,3),strides=(1,1)),
                                layers.BatchNormalization(),
                                layers.Activation('relu'),
                                layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same')  # 池化层
                                ])

        # 按照经验chanel从小到大,feature_size从大到小
        self.layer1 = self.build_resblock(64,layer_dims[0])
        self.layer2 = self.build_resblock(128,layer_dims[1],stride=2)
        self.layer3 = self.build_resblock(256,layer_dims[2],stride=2)
        self.layer4 = self.build_resblock(512,layer_dims[3],stride=2)

        # output: [b,512,h,w],无法直接确定h,w,故可以设置自适应层
        # 原理是对512个通道上面的feature像素值做一个平均,得到一个像素的平均值,将512个像素值送到下一层做均值
        self.avgpool = layers.GlobalAveragePooling2D()
        # 全连接层-做分类
        self.fc = layers.Dense(num_classes)

    def call(self,inputs,training=None):

        x = self.stem(inputs)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        # [b,c]
        x = self.avgpool(x)
        # [b,c]=>[b,100]
        x = self.fc(x)

        return x

    def build_resblock(self,filter_num,blocks,stride=1):
        res_blocks = Sequential()
        # 下采样-只有一个具有下采样的能力
        res_blocks.add(BasicBlock(filter_num,stride))

        for _ in range(1,blocks):
            res_blocks.add(BasicBlock(filter_num,stride=1))

        return res_blocks

4.ResNet18的实现

import  tensorflow as tf
from    tensorflow import keras
from    tensorflow.keras import layers, Sequential



class BasicBlock(layers.Layer):

    def __init__(self, filter_num, stride=1):
        super(BasicBlock, self).__init__()

        self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same')
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same')
        self.bn2 = layers.BatchNormalization()

        if stride != 1:
            self.downsample = Sequential()
            self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
        else:
            self.downsample = lambda x:x



    def call(self, inputs, training=None):

        # [b, h, w, c]
        out = self.conv1(inputs)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        identity = self.downsample(inputs)

        output = layers.add([out, identity])
        output = tf.nn.relu(output)

        return output


class ResNet(keras.Model):


    def __init__(self, layer_dims, num_classes=100): # [2, 2, 2, 2]
        super(ResNet, self).__init__()

        self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
                                layers.BatchNormalization(),
                                layers.Activation('relu'),
                                layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
                                ])

        self.layer1 = self.build_resblock(64,  layer_dims[0])
        self.layer2 = self.build_resblock(128, layer_dims[1], stride=2)
        self.layer3 = self.build_resblock(256, layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(512, layer_dims[3], stride=2)

        # output: [b, 512, h, w],
        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes)





    def call(self, inputs, training=None):

        x = self.stem(inputs)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        # [b, c]
        x = self.avgpool(x)
        # [b, 100]
        x = self.fc(x)

        return x



    def build_resblock(self, filter_num, blocks, stride=1):

        res_blocks = Sequential()
        # may down sample
        res_blocks.add(BasicBlock(filter_num, stride))

        for _ in range(1, blocks):
            res_blocks.add(BasicBlock(filter_num, stride=1))

        return res_blocks


def resnet18():
    return ResNet([2, 2, 2, 2])


def resnet34():
    return ResNet([3, 4, 6, 3])

5.ResNet18的实战

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets, Sequential
from resnet import resnet18

tf.random.set_seed(2345)

def preprocess(x, y):
    # [-1~1]
    x = tf.cast(x, dtype=tf.float32) / 255. - 0.5
    y = tf.cast(y, dtype=tf.int32)
    return x, y

(x, y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y, axis=1)
y_test = tf.squeeze(y_test, axis=1)
print(x.shape, y.shape, x_test.shape, y_test.shape)

train_db = tf.data.Dataset.from_tensor_slices((x, y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)

test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.map(preprocess).batch(128)

sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


def main():

    model = resnet18()
    model.build(input_shape=(None, 32, 32, 3))
    # 查看模型的参数量
    model.summary()
    optimizer = optimizers.Adam(lr=1e-3)

    for epoch in range(500):

        for step, (x, y) in enumerate(train_db):

            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 100]
                logits = model(x)
                # [b] => [b, 100]
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step % 50 == 0:
                print(epoch, step, 'loss:', float(loss))

        total_num = 0
        total_correct = 0
        for x, y in test_db:
            logits = model(x)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)


if __name__ == '__main__':
    main()

一部分结果如下:

(50000, 32, 32, 3) (50000,) (10000, 32, 32, 3) (10000,)
sample: (256, 32, 32, 3) (256,) tf.Tensor(-0.5, shape=(), dtype=float32) tf.Tensor(0.5, shape=(), dtype=float32)
Model: "res_net"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
sequential (Sequential)      multiple                  2048      
_________________________________________________________________
sequential_1 (Sequential)    multiple                  148736    
_________________________________________________________________
sequential_2 (Sequential)    multiple                  526976    
_________________________________________________________________
sequential_4 (Sequential)    multiple                  2102528   
_________________________________________________________________
sequential_6 (Sequential)    multiple                  8399360   
_________________________________________________________________
global_average_pooling2d (Gl multiple                  0         
_________________________________________________________________
dense (Dense)                multiple                  51300     
=================================================================
Total params: 11,230,948
Trainable params: 11,223,140
Non-trainable params: 7,808
_________________________________________________________________
0 0 loss: 4.604719638824463
0 50 loss: 4.561609745025635
0 100 loss: 4.337265491485596
0 150 loss: 4.3709611892700195
0 acc: 0.0803
1 0 loss: 4.024875164031982
1 50 loss: 3.8826417922973633
1 100 loss: 3.5792930126190186
1 150 loss: 3.672839641571045
1 acc: 0.1549
2 0 loss: 3.5927116870880127
2 50 loss: 3.357438564300537
2 100 loss: 3.4201531410217285
2 150 loss: 3.187776565551758
2 acc: 0.2268
3 0 loss: 3.1957569122314453
3 50 loss: 3.1121461391448975
3 100 loss: 2.817192316055298
3 150 loss: 2.813638210296631
3 acc: 0.2721
4 0 loss: 3.081834316253662

深度学习2.0-35.ResNet-18实战

未经允许不得转载:作者:1147-柳同学, 转载或复制请以 超链接形式 并注明出处 拜师资源博客
原文地址:《深度学习2.0-35.ResNet-18实战》 发布于2020-10-12

分享到:
赞(0) 打赏

评论 抢沙发

评论前必须登录!

  注册



长按图片转发给朋友

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

微信扫一扫打赏

Vieu3.3主题
专业打造轻量级个人企业风格博客主题!专注于前端开发,全站响应式布局自适应模板。

登录

忘记密码 ?

您也可以使用第三方帐号快捷登录

Q Q 登 录
微 博 登 录