用PaddlePaddle实现图像分类-MobileNet-v2

原创
2020/04/20 15:56
阅读数 1.3K

项目简介

本项目使用paddle实现图像分类模型 MobileNet-V2网络的训练和预测。MobileNet-V2是对 MobileNet-V1 的改进,同样是一个轻量级卷积神经网络。关于深度可分离卷积等MobileNet-V1相关的基础理论可以查看:图像分类-MobileNet-V1 建议使用GPU运行。动态图版本请查看:用PaddlePaddle实现图像分类-MobileNet-v2(动态图版)

下载安装命令

## CPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/cpu paddlepaddle

## GPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/gpu paddlepaddle-gpu

模型结构

MobileNet-V1的缺陷

  1. 结构问题: MobileNet-V1使用了传统的直筒结构,然而后续的ResNet等网络结构证明了通过Residual Connection等操作复用图像特征的融合操作可以给网络带来更大的性能提升。
  2. Depthwise Convolution的问题: 虽然理论上N x N的DepthWise+1 x 1的PointWise在性能方面能够接近N x N的普通卷积,但是由于DepthWise的每个kernel维度较小,再Rlue激活函数的影响下,神经元的输出很容易变为0,从而使DepthWise部分的卷积过于稀疏,并且这个问题在定点化低精度训练的时候会进一步放大。

MobileNet-V2的改进

  1. Inverted Residual Block 添加了Residual Connection,由于DepthWise Convolution的参数量较小,所以作者并没有采用类似与ResNet中的先降维、再卷积、再升维的沙漏型Residual Block。而是改为先升维、再卷积、再降维的柳叶型Residual Block,并起名为Inverted Residual Block。具体结构图下图所示,Residual Connection并未在图中画出。
  2. ReLU6 在MobileNet-V1中DepthWise Convolution中最后一个1*1的conv后面接着ReLU6激活函数,而Xception已经证明了再DepthWise卷积后再加ReLU效果会变差,所以V2中去掉了最后的ReLU激活函数,改为线性的输出,具体结构如下图所示,V2中的Residual Connection并未画出。

MobileNet-V2结构图

参考链接

MobileNet V2 论文初读
MobileNetV2: Inverted Residuals and Linear Bottlenecks

数据介绍

使用公开鲜花据集,数据集压缩包里包含五个文件夹,每个文件夹一种花卉。分别是雏菊,蒲公英,玫瑰,向日葵,郁金香。每种各690-890张不等

In[1]
# 解压花朵数据集  
!cd data/data2815 && unzip -q flower_photos.zip
In[2]
# 解压预训练模型参数
!cd data/data6592 && unzip -q MobileNetV2_pretrained.zip
 

预处理数据,将其转化为需要的格式

In[3]
# 预处理数据,将其转化为标准格式。同时将数据拆分成两份,以便训练和计算预估准确率
import codecs  
import os  
import random  
import shutil  
from PIL import Image  
  
train_ratio = 4.0 / 5  
  
all_file_dir = 'data/data2815'  
class_list = [c for c in os.listdir(all_file_dir) if os.path.isdir(os.path.join(all_file_dir, c)) and not c.endswith('Set') and not c.startswith('.')]  
class_list.sort()
print(class_list)  
train_image_dir = os.path.join(all_file_dir, "trainImageSet")  
if not os.path.exists(train_image_dir):  
    os.makedirs(train_image_dir)  
      
eval_image_dir = os.path.join(all_file_dir, "evalImageSet")  
if not os.path.exists(eval_image_dir):  
    os.makedirs(eval_image_dir)  
  
train_file = codecs.open(os.path.join(all_file_dir, "train.txt"), 'w')  
eval_file = codecs.open(os.path.join(all_file_dir, "eval.txt"), 'w')  
  
with codecs.open(os.path.join(all_file_dir, "label_list.txt"), "w") as label_list:  
    label_id = 0  
    for class_dir in class_list:  
        label_list.write("{0}\t{1}\n".format(label_id, class_dir))  
        image_path_pre = os.path.join(all_file_dir, class_dir)  
        for file in os.listdir(image_path_pre):  
            try:  
                img = Image.open(os.path.join(image_path_pre, file))  
                if random.uniform(0, 1) <= train_ratio:  
                    shutil.copyfile(os.path.join(image_path_pre, file), os.path.join(train_image_dir, file))  
                    train_file.write("{0}\t{1}\n".format(os.path.join(train_image_dir, file), label_id))  
                else:  
                    shutil.copyfile(os.path.join(image_path_pre, file), os.path.join(eval_image_dir, file))  
                    eval_file.write("{0}\t{1}\n".format(os.path.join(eval_image_dir, file), label_id))  
            except Exception as e:  
                pass  
                # 存在一些文件打不开,此处需要稍作清洗  
        label_id += 1  
              
train_file.close()  
eval_file.close() 
['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
 

模型训练主体

In[4]
# -*- coding: UTF-8 -*-  
"""  
训练常用视觉基础网络,用于分类任务  
需要将训练图片,类别文件 label_list.txt 放置在同一个文件夹下  
程序会先读取 train.txt 文件获取类别数和图片数量  
"""  
from __future__ import absolute_import  
from __future__ import division  
from __future__ import print_function  
import os  
import numpy as np  
import time  
import math  
import paddle  
import paddle.fluid as fluid  
import codecs  
import logging  
  
from paddle.fluid.initializer import MSRA  
from paddle.fluid.initializer import Uniform  
from paddle.fluid.param_attr import ParamAttr  
from PIL import Image  
from PIL import ImageEnhance  
  
train_parameters = {  
    "input_size": [3, 224, 224],  
    "class_dim": -1,  # 分类数,会在初始化自定义 reader 的时候获得  
    "image_count": -1,  # 训练图片数量,会在初始化自定义 reader 的时候获得  
    "label_dict": {},  
    "data_dir": "data/data2815",  # 训练数据存储地址  
    "train_file_list": "train.txt",  
    "label_file": "label_list.txt",  
    "save_freeze_dir": "./freeze-model",  
    "save_persistable_dir": "./check_points/",  
    "save_persistable_name": "persistable-params",
    "continue_train": True,        # 是否接着上一次保存的参数接着训练,优先级高于预训练模型  
    "pretrained": False,            # 是否使用预训练的模型  
    "pretrained_dir": "data/data6592/MobileNetV2_pretrained",   
    "mode": "train",  
    "num_epochs": 10,  
    "train_batch_size": 30,  
    "mean_rgb": [127.5, 127.5, 127.5],  # 常用图片的三通道均值,通常来说需要先对训练数据做统计,此处仅取中间值  
    "use_gpu": True,  
    "dropout_seed": None,  
    "image_enhance_strategy": {  # 图像增强相关策略  
        "need_distort": True,  # 是否启用图像颜色增强  
        "need_rotate": True,   # 是否需要增加随机角度  
        "need_crop": True,      # 是否要增加裁剪  
        "need_flip": True,      # 是否要增加水平随机翻转  
        "hue_prob": 0.5,  
        "hue_delta": 18,  
        "contrast_prob": 0.5,  
        "contrast_delta": 0.5,  
        "saturation_prob": 0.5,  
        "saturation_delta": 0.5,  
        "brightness_prob": 0.5,  
        "brightness_delta": 0.125  
    },  
    "early_stop": {  
        "sample_frequency": 50,  
        "successive_limit": 3,  
        "good_acc1": 0.92  
    },  
    "rsm_strategy": {  
        "learning_rate": 0.001,  
        "lr_epochs": [20, 40, 60, 80, 100],  
        "lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002]  
    },  
    "momentum_strategy": {  
        "learning_rate": 0.001,  
        "lr_epochs": [20, 40, 60, 80, 100],  
        "lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002]  
    },  
    "sgd_strategy": {  
        "learning_rate": 0.001,  
        "lr_epochs": [20, 40, 60, 80, 100],  
        "lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002]  
    },  
    "adam_strategy": {  
        "learning_rate": 0.002  
    }  
}  
  
  
class MobileNetV2():
    def __init__(self):
        pass

    def net(self, input, class_dim=1000, scale=1.0):

        bottleneck_params_list = [
            (1, 16, 1, 1),
            (6, 24, 2, 2),
            (6, 32, 3, 2),
            (6, 64, 4, 2),
            (6, 96, 3, 1),
            (6, 160, 3, 2),
            (6, 320, 1, 1),
        ]

        #conv1 
        input = self.conv_bn_layer(
            input,
            num_filters=int(32 * scale),
            filter_size=3,
            stride=2,
            padding=1,
            if_act=True,
            name='conv1_1')

        # bottleneck sequences
        i = 1
        in_c = int(32 * scale)
        for layer_setting in bottleneck_params_list:
            t, c, n, s = layer_setting
            i += 1
            input = self.invresi_blocks(
                input=input,
                in_c=in_c,
                t=t,
                c=int(c * scale),
                n=n,
                s=s,
                name='conv' + str(i))
            in_c = int(c * scale)
        #last_conv
        input = self.conv_bn_layer(
            input=input,
            num_filters=int(1280 * scale) if scale > 1.0 else 1280,
            filter_size=1,
            stride=1,
            padding=0,
            if_act=True,
            name='conv9')

        input = fluid.layers.pool2d(
            input=input,
            pool_size=7,
            pool_stride=1,
            pool_type='avg',
            global_pooling=True)

        output = fluid.layers.fc(input=input,
                                 size=class_dim,
                                 act="softmax",
                                 param_attr=ParamAttr(name='fc10_weights'),
                                 bias_attr=ParamAttr(name='fc10_offset'))
        return output

    def conv_bn_layer(self,
                      input,
                      filter_size,
                      num_filters,
                      stride,
                      padding,
                      channels=None,
                      num_groups=1,
                      if_act=True,
                      name=None,
                      use_cudnn=True):
        conv = fluid.layers.conv2d(
            input=input,
            num_filters=num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            groups=num_groups,
            act=None,
            use_cudnn=use_cudnn,
            param_attr=ParamAttr(name=name + '_weights'),
            bias_attr=False)
        bn_name = name + '_bn'
        bn = fluid.layers.batch_norm(
            input=conv,
            param_attr=ParamAttr(name=bn_name + "_scale"),
            bias_attr=ParamAttr(name=bn_name + "_offset"),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
        if if_act:
            return fluid.layers.relu6(bn)
        else:
            return bn

    def shortcut(self, input, data_residual):
        return fluid.layers.elementwise_add(input, data_residual)

    def inverted_residual_unit(self,
                               input,
                               num_in_filter,
                               num_filters,
                               ifshortcut,
                               stride,
                               filter_size,
                               padding,
                               expansion_factor,
                               name=None):
        num_expfilter = int(round(num_in_filter * expansion_factor))

        channel_expand = self.conv_bn_layer(
            input=input,
            num_filters=num_expfilter,
            filter_size=1,
            stride=1,
            padding=0,
            num_groups=1,
            if_act=True,
            name=name + '_expand')

        bottleneck_conv = self.conv_bn_layer(
            input=channel_expand,
            num_filters=num_expfilter,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            num_groups=num_expfilter,
            if_act=True,
            name=name + '_dwise',
            use_cudnn=False)

        linear_out = self.conv_bn_layer(
            input=bottleneck_conv,
            num_filters=num_filters,
            filter_size=1,
            stride=1,
            padding=0,
            num_groups=1,
            if_act=False,
            name=name + '_linear')
        if ifshortcut:
            out = self.shortcut(input=input, data_residual=linear_out)
            return out
        else:
            return linear_out

    def invresi_blocks(self, input, in_c, t, c, n, s, name=None):
        first_block = self.inverted_residual_unit(
            input=input,
            num_in_filter=in_c,
            num_filters=c,
            ifshortcut=False,
            stride=s,
            filter_size=3,
            padding=1,
            expansion_factor=t,
            name=name + '_1')

        last_residual_block = first_block
        last_c = c

        for i in range(1, n):
            last_residual_block = self.inverted_residual_unit(
                input=last_residual_block,
                num_in_filter=last_c,
                num_filters=c,
                ifshortcut=True,
                stride=1,
                filter_size=3,
                padding=1,
                expansion_factor=t,
                name=name + '_' + str(i + 1))
        return last_residual_block
  
  
def init_log_config():  
    """  
    初始化日志相关配置  
    :return:  
    """  
    global logger  
    logger = logging.getLogger()  
    logger.setLevel(logging.INFO)  
    log_path = os.path.join(os.getcwd(), 'logs')  
    if not os.path.exists(log_path):  
        os.makedirs(log_path)  
    log_name = os.path.join(log_path, 'train.log')  
    sh = logging.StreamHandler()  
    fh = logging.FileHandler(log_name, mode='w')  
    fh.setLevel(logging.DEBUG)  
    formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")  
    fh.setFormatter(formatter)  
    sh.setFormatter(formatter)  
    logger.addHandler(sh)  
    logger.addHandler(fh)  
  
  
def init_train_parameters():  
    """  
    初始化训练参数,主要是初始化图片数量,类别数  
    :return:  
    """  
    train_file_list = os.path.join(train_parameters['data_dir'], train_parameters['train_file_list'])  
    label_list = os.path.join(train_parameters['data_dir'], train_parameters['label_file'])  
    index = 0  
    with codecs.open(label_list, encoding='utf-8') as flist:  
        lines = [line.strip() for line in flist]  
        for line in lines:  
            parts = line.strip().split()  
            train_parameters['label_dict'][parts[1]] = int(parts[0])  
            index += 1  
        train_parameters['class_dim'] = index  
    with codecs.open(train_file_list, encoding='utf-8') as flist:  
        lines = [line.strip() for line in flist]  
        train_parameters['image_count'] = len(lines)  
  
  
def resize_img(img, target_size):  
    """  
    强制缩放图片  
    :param img:  
    :param target_size:  
    :return:  
    """  
    target_size = input_size  
    img = img.resize((target_size[1], target_size[2]), Image.BILINEAR)  
    return img  
  
  
def random_crop(img, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):  
    aspect_ratio = math.sqrt(np.random.uniform(*ratio))  
    w = 1. * aspect_ratio  
    h = 1. / aspect_ratio  
  
    bound = min((float(img.size[0]) / img.size[1]) / (w**2),  
                (float(img.size[1]) / img.size[0]) / (h**2))  
    scale_max = min(scale[1], bound)  
    scale_min = min(scale[0], bound)  
  
    target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,  
                                                                scale_max)  
    target_size = math.sqrt(target_area)  
    w = int(target_size * w)  
    h = int(target_size * h)  
  
    i = np.random.randint(0, img.size[0] - w + 1)  
    j = np.random.randint(0, img.size[1] - h + 1)  
  
    img = img.crop((i, j, i + w, j + h))  
    img = img.resize((train_parameters['input_size'][1], train_parameters['input_size'][2]), Image.BILINEAR)  
    return img  
  
  
def rotate_image(img):  
    """  
    图像增强,增加随机旋转角度  
    """  
    angle = np.random.randint(-14, 15)  
    img = img.rotate(angle)  
    return img  
  
  
def random_brightness(img):  
    """  
    图像增强,亮度调整  
    :param img:  
    :return:  
    """  
    prob = np.random.uniform(0, 1)  
    if prob < train_parameters['image_enhance_strategy']['brightness_prob']:  
        brightness_delta = train_parameters['image_enhance_strategy']['brightness_delta']  
        delta = np.random.uniform(-brightness_delta, brightness_delta) + 1  
        img = ImageEnhance.Brightness(img).enhance(delta)  
    return img  
  
  
def random_contrast(img):  
    """  
    图像增强,对比度调整  
    :param img:  
    :return:  
    """  
    prob = np.random.uniform(0, 1)  
    if prob < train_parameters['image_enhance_strategy']['contrast_prob']:  
        contrast_delta = train_parameters['image_enhance_strategy']['contrast_delta']  
        delta = np.random.uniform(-contrast_delta, contrast_delta) + 1  
        img = ImageEnhance.Contrast(img).enhance(delta)  
    return img  
  
  
def random_saturation(img):  
    """  
    图像增强,饱和度调整  
    :param img:  
    :return:  
    """  
    prob = np.random.uniform(0, 1)  
    if prob < train_parameters['image_enhance_strategy']['saturation_prob']:  
        saturation_delta = train_parameters['image_enhance_strategy']['saturation_delta']  
        delta = np.random.uniform(-saturation_delta, saturation_delta) + 1  
        img = ImageEnhance.Color(img).enhance(delta)  
    return img  
  
  
def random_hue(img):  
    """  
    图像增强,色度调整  
    :param img:  
    :return:  
    """  
    prob = np.random.uniform(0, 1)  
    if prob < train_parameters['image_enhance_strategy']['hue_prob']:  
        hue_delta = train_parameters['image_enhance_strategy']['hue_delta']  
        delta = np.random.uniform(-hue_delta, hue_delta)  
        img_hsv = np.array(img.convert('HSV'))  
        img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta  
        img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')  
    return img  
  
  
def distort_color(img):  
    """  
    概率的图像增强  
    :param img:  
    :return:  
    """  
    prob = np.random.uniform(0, 1)  
    # Apply different distort order  
    if prob < 0.35:  
        img = random_brightness(img)  
        img = random_contrast(img)  
        img = random_saturation(img)  
        img = random_hue(img)  
    elif prob < 0.7:  
        img = random_brightness(img)  
        img = random_saturation(img)  
        img = random_hue(img)  
        img = random_contrast(img)  
    return img  
  
  
def custom_image_reader(file_list, data_dir, mode):  
    """  
    自定义用户图片读取器,先初始化图片种类,数量  
    :param file_list:  
    :param data_dir:  
    :param mode:  
    :return:  
    """  
    with codecs.open(file_list) as flist:  
        lines = [line.strip() for line in flist]  
  
    def reader():  
        np.random.shuffle(lines)  
        for line in lines:  
            if mode == 'train' or mode == 'val':  
                img_path, label = line.split()  
                img = Image.open(img_path)  
                try:  
                    if img.mode != 'RGB':  
                        img = img.convert('RGB')  
                    if train_parameters['image_enhance_strategy']['need_distort'] == True:  
                        img = distort_color(img)  
                    if train_parameters['image_enhance_strategy']['need_rotate'] == True:  
                        img = rotate_image(img)  
                    if train_parameters['image_enhance_strategy']['need_crop'] == True:  
                        img = random_crop(img, train_parameters['input_size'])  
                    if train_parameters['image_enhance_strategy']['need_flip'] == True:  
                        mirror = int(np.random.uniform(0, 2))  
                        if mirror == 1:  
                            img = img.transpose(Image.FLIP_LEFT_RIGHT)  
                    # HWC--->CHW && normalized  
                    img = np.array(img).astype('float32')  
                    img -= train_parameters['mean_rgb']  
                    img = img.transpose((2, 0, 1))  # HWC to CHW  
                    img *= 0.007843                 # 像素值归一化  
                    yield img, int(label)  
                except Exception as e:  
                    pass                            # 以防某些图片读取处理出错,加异常处理  
            elif mode == 'test':  
                img_path = os.path.join(data_dir, line)  
                img = Image.open(img_path)  
                if img.mode != 'RGB':  
                    img = img.convert('RGB')  
                img = resize_img(img, train_parameters['input_size'])  
                # HWC--->CHW && normalized  
                img = np.array(img).astype('float32')  
                img -= train_parameters['mean_rgb']  
                img = img.transpose((2, 0, 1))  # HWC to CHW  
                img *= 0.007843  # 像素值归一化  
                yield img  
  
    return reader  
  
  
def optimizer_momentum_setting():  
    """  
    阶梯型的学习率适合比较大规模的训练数据  
    """  
    learning_strategy = train_parameters['momentum_strategy']  
    batch_size = train_parameters["train_batch_size"]  
    iters = train_parameters["image_count"] // batch_size  
    lr = learning_strategy['learning_rate']  
  
    boundaries = [i * iters for i in learning_strategy["lr_epochs"]]  
    values = [i * lr for i in learning_strategy["lr_decay"]]  
    learning_rate = fluid.layers.piecewise_decay(boundaries, values)  
    optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)  
    return optimizer  
  
  
def optimizer_rms_setting():  
    """  
    阶梯型的学习率适合比较大规模的训练数据  
    """  
    batch_size = train_parameters["train_batch_size"]  
    iters = train_parameters["image_count"] // batch_size  
    learning_strategy = train_parameters['rsm_strategy']  
    lr = learning_strategy['learning_rate']  
  
    boundaries = [i * iters for i in learning_strategy["lr_epochs"]]  
    values = [i * lr for i in learning_strategy["lr_decay"]]  
  
    optimizer = fluid.optimizer.RMSProp(  
        learning_rate=fluid.layers.piecewise_decay(boundaries, values))  
  
    return optimizer  
  
  
def optimizer_sgd_setting():  
    """  
    loss下降相对较慢,但是最终效果不错,阶梯型的学习率适合比较大规模的训练数据  
    """  
    learning_strategy = train_parameters['sgd_strategy']  
    batch_size = train_parameters["train_batch_size"]  
    iters = train_parameters["image_count"] // batch_size  
    lr = learning_strategy['learning_rate']  
  
    boundaries = [i * iters for i in learning_strategy["lr_epochs"]]  
    values = [i * lr for i in learning_strategy["lr_decay"]]  
    learning_rate = fluid.layers.piecewise_decay(boundaries, values)  
    optimizer = fluid.optimizer.SGD(learning_rate=learning_rate)  
    return optimizer  
  
  
def optimizer_adam_setting():  
    """  
    能够比较快速的降低 loss,但是相对后期乏力  
    """  
    learning_strategy = train_parameters['adam_strategy']  
    learning_rate = learning_strategy['learning_rate']  
    optimizer = fluid.optimizer.Adam(learning_rate=learning_rate)  
    return optimizer  
  
  
def load_params(exe, program):  
    save_persistable = os.path.join(train_parameters['save_persistable_dir'], train_parameters['save_persistable_name'])
    if train_parameters['continue_train'] and os.path.exists(save_persistable):  
        logger.info('load params from retrain model')  
        fluid.io.load(program, save_persistable)  
    elif train_parameters['pretrained'] and os.path.exists(train_parameters['pretrained_dir']):  
        logger.info('load params from pretrained model')  
        
        def if_exist(var):  
            return os.path.exists(os.path.join(train_parameters['pretrained_dir'], var.name))  
  
        fluid.io.load_vars(exe, train_parameters['pretrained_dir'], main_program=program,  
                           predicate=if_exist)  
  
  
def train():  
    train_prog = fluid.Program()  
    train_startup = fluid.Program()  
    logger.info("create prog success")  
    logger.info("train config: %s", str(train_parameters))  
    logger.info("build input custom reader and data feeder")  
    file_list = os.path.join(train_parameters['data_dir'], "train.txt")  
    mode = train_parameters['mode']  
    batch_reader = paddle.batch(custom_image_reader(file_list, train_parameters['data_dir'], mode),  
                                batch_size=train_parameters['train_batch_size'],  
                                drop_last=False)  
    batch_reader = paddle.reader.shuffle(batch_reader, train_parameters['train_batch_size'])  
    place = fluid.CUDAPlace(0) if train_parameters['use_gpu'] else fluid.CPUPlace()  
    # 定义输入数据的占位符  
    img = fluid.data(name='img', shape=[-1] + train_parameters['input_size'], dtype='float32')  
    label = fluid.data(name='label', shape=[-1, 1], dtype='int64')  
    feeder = fluid.DataFeeder(feed_list=[img, label], place=place)  
  
    # 选取不同的网络  
    logger.info("build newwork")  
    model = MobileNetV2()  
    out = model.net(input=img, class_dim=train_parameters['class_dim'])  
    cost = fluid.layers.cross_entropy(out, label)  
    avg_cost = fluid.layers.mean(x=cost)  
    acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)  
    # 选取不同的优化器  
    optimizer = optimizer_rms_setting()  
    # optimizer = optimizer_momentum_setting()  
    # optimizer = optimizer_sgd_setting()  
    # optimizer = optimizer_adam_setting()  
    optimizer.minimize(avg_cost)  
    exe = fluid.Executor(place)  
  
    main_program = fluid.default_main_program()  
    exe.run(fluid.default_startup_program())  
    train_fetch_list = [avg_cost.name, acc_top1.name, out.name]  
    
    if not os.path.exists(train_parameters['save_persistable_dir']):
        os.mkdir(train_parameters['save_persistable_dir'])
    save_persistable = os.path.join(train_parameters['save_persistable_dir'], train_parameters['save_persistable_name'])
    load_params(exe, main_program)  
  
    # 训练循环主体  
    stop_strategy = train_parameters['early_stop']  
    successive_limit = stop_strategy['successive_limit']  
    sample_freq = stop_strategy['sample_frequency']  
    good_acc1 = stop_strategy['good_acc1']  
    successive_count = 0  
    stop_train = False  
    total_batch_count = 0  
    for pass_id in range(train_parameters["num_epochs"]):  
        logger.info("current pass: %d, start read image", pass_id)  
        batch_id = 0  
        for step_id, data in enumerate(batch_reader()):  
            t1 = time.time()  
            # logger.info("data size:{0}".format(len(data)))  
            loss, acc1, pred_ot = exe.run(main_program,  
                                          feed=feeder.feed(data),  
                                          fetch_list=train_fetch_list)  
            t2 = time.time()  
            batch_id += 1  
            total_batch_count += 1  
            period = t2 - t1  
            loss = np.mean(np.array(loss))  
            acc1 = np.mean(np.array(acc1))  
            if batch_id % 10 == 0:  
                logger.info("Pass {0}, trainbatch {1}, loss {2}, acc1 {3}, time {4}".format(pass_id, batch_id, loss, acc1,  
                                                                                            "%2.2f sec" % period))  
            # 简单的提前停止策略,认为连续达到某个准确率就可以停止了  
            if acc1 >= good_acc1:  
                successive_count += 1  
                logger.info("current acc1 {0} meets good {1}, successive count {2}".format(acc1, good_acc1, successive_count))  
                fluid.io.save_inference_model(dirname=train_parameters['save_freeze_dir'],  
                                              feeded_var_names=['img'],  
                                              target_vars=[out],  
                                              main_program=main_program,  
                                              executor=exe)  
                if successive_count >= successive_limit:  
                    logger.info("end training")  
                    stop_train = True  
                    break  
            else:  
                successive_count = 0  
  
            # 通用的保存策略,减小意外停止的损失  
            if total_batch_count % sample_freq == 0:  
                logger.info("temp save {0} batch train result, current acc1 {1}".format(total_batch_count, acc1))  
                fluid.io.save(main_program, save_persistable)  
        if stop_train:  
            break  
    logger.info("training till last epcho, end training")  
    fluid.io.save(main_program, save_persistable)  
    fluid.io.save_inference_model(dirname=train_parameters['save_freeze_dir'],  
                                              feeded_var_names=['img'],  
                                              target_vars=[out],  
                                              main_program=main_program,  
                                              executor=exe)  
  
  
if __name__ == '__main__':  
    init_log_config()  
    init_train_parameters()  
    train()
2020-02-13 16:46:58,678-INFO: create prog success
2020-02-13 16:46:58,678 - <ipython-input-4-4703896c7ee9>[line:562] - INFO: create prog success
2020-02-13 16:46:58,681-INFO: train config: {'input_size': [3, 224, 224], 'class_dim': 5, 'image_count': 2961, 'label_dict': {'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}, 'data_dir': 'data/data2815', 'train_file_list': 'train.txt', 'label_file': 'label_list.txt', 'save_freeze_dir': './freeze-model', 'save_persistable_dir': './check_points/', 'save_persistable_name': 'persistable-params', 'continue_train': True, 'pretrained': False, 'pretrained_dir': 'data/data6592/MobileNetV2_pretrained', 'mode': 'train', 'num_epochs': 10, 'train_batch_size': 30, 'mean_rgb': [127.5, 127.5, 127.5], 'use_gpu': True, 'dropout_seed': None, 'image_enhance_strategy': {'need_distort': True, 'need_rotate': True, 'need_crop': True, 'need_flip': True, 'hue_prob': 0.5, 'hue_delta': 18, 'contrast_prob': 0.5, 'contrast_delta': 0.5, 'saturation_prob': 0.5, 'saturation_delta': 0.5, 'brightness_prob': 0.5, 'brightness_delta': 0.125}, 'early_stop': {'sample_frequency': 50, 'successive_limit': 3, 'good_acc1': 0.92}, 'rsm_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'momentum_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'sgd_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'adam_strategy': {'learning_rate': 0.002}}
2020-02-13 16:46:58,681 - <ipython-input-4-4703896c7ee9>[line:563] - INFO: train config: {'input_size': [3, 224, 224], 'class_dim': 5, 'image_count': 2961, 'label_dict': {'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}, 'data_dir': 'data/data2815', 'train_file_list': 'train.txt', 'label_file': 'label_list.txt', 'save_freeze_dir': './freeze-model', 'save_persistable_dir': './check_points/', 'save_persistable_name': 'persistable-params', 'continue_train': True, 'pretrained': False, 'pretrained_dir': 'data/data6592/MobileNetV2_pretrained', 'mode': 'train', 'num_epochs': 10, 'train_batch_size': 30, 'mean_rgb': [127.5, 127.5, 127.5], 'use_gpu': True, 'dropout_seed': None, 'image_enhance_strategy': {'need_distort': True, 'need_rotate': True, 'need_crop': True, 'need_flip': True, 'hue_prob': 0.5, 'hue_delta': 18, 'contrast_prob': 0.5, 'contrast_delta': 0.5, 'saturation_prob': 0.5, 'saturation_delta': 0.5, 'brightness_prob': 0.5, 'brightness_delta': 0.125}, 'early_stop': {'sample_frequency': 50, 'successive_limit': 3, 'good_acc1': 0.92}, 'rsm_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'momentum_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'sgd_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'adam_strategy': {'learning_rate': 0.002}}
2020-02-13 16:46:58,682-INFO: build input custom reader and data feeder
2020-02-13 16:46:58,682 - <ipython-input-4-4703896c7ee9>[line:564] - INFO: build input custom reader and data feeder
2020-02-13 16:46:58,685-INFO: build newwork
2020-02-13 16:46:58,685 - <ipython-input-4-4703896c7ee9>[line:578] - INFO: build newwork
2020-02-13 16:47:02,448-INFO: current pass: 0, start read image
2020-02-13 16:47:02,448 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 0, start read image
2020-02-13 16:47:14,224-INFO: Pass 0, trainbatch 10, loss 1.8577364683151245, acc1 0.2666666805744171, time 0.11 sec
2020-02-13 16:47:14,224 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 10, loss 1.8577364683151245, acc1 0.2666666805744171, time 0.11 sec
2020-02-13 16:47:15,428-INFO: Pass 0, trainbatch 20, loss 1.7803314924240112, acc1 0.23333333432674408, time 0.12 sec
2020-02-13 16:47:15,428 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 20, loss 1.7803314924240112, acc1 0.23333333432674408, time 0.12 sec
2020-02-13 16:47:16,512-INFO: Pass 0, trainbatch 30, loss 1.5155632495880127, acc1 0.23333333432674408, time 0.11 sec
2020-02-13 16:47:16,512 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 30, loss 1.5155632495880127, acc1 0.23333333432674408, time 0.11 sec
2020-02-13 16:47:28,427-INFO: Pass 0, trainbatch 40, loss 1.2129517793655396, acc1 0.46666666865348816, time 0.10 sec
2020-02-13 16:47:28,427 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 40, loss 1.2129517793655396, acc1 0.46666666865348816, time 0.10 sec
2020-02-13 16:47:29,505-INFO: Pass 0, trainbatch 50, loss 1.5497139692306519, acc1 0.36666667461395264, time 0.11 sec
2020-02-13 16:47:29,505 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 50, loss 1.5497139692306519, acc1 0.36666667461395264, time 0.11 sec
2020-02-13 16:47:29,507-INFO: temp save 50 batch train result, current acc1 0.36666667461395264
2020-02-13 16:47:29,507 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 50 batch train result, current acc1 0.36666667461395264
2020-02-13 16:47:30,975-INFO: Pass 0, trainbatch 60, loss 1.3815921545028687, acc1 0.4000000059604645, time 0.11 sec
2020-02-13 16:47:30,975 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 60, loss 1.3815921545028687, acc1 0.4000000059604645, time 0.11 sec
2020-02-13 16:47:41,551-INFO: Pass 0, trainbatch 70, loss 1.4694125652313232, acc1 0.46666666865348816, time 0.12 sec
2020-02-13 16:47:41,551 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 70, loss 1.4694125652313232, acc1 0.46666666865348816, time 0.12 sec
2020-02-13 16:47:42,726-INFO: Pass 0, trainbatch 80, loss 1.4448015689849854, acc1 0.3333333432674408, time 0.11 sec
2020-02-13 16:47:42,726 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 80, loss 1.4448015689849854, acc1 0.3333333432674408, time 0.11 sec
2020-02-13 16:47:44,081-INFO: Pass 0, trainbatch 90, loss 1.4323869943618774, acc1 0.36666667461395264, time 0.12 sec
2020-02-13 16:47:44,081 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 0, trainbatch 90, loss 1.4323869943618774, acc1 0.36666667461395264, time 0.12 sec
2020-02-13 16:47:48,013-INFO: current pass: 1, start read image
2020-02-13 16:47:48,013 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 1, start read image
2020-02-13 16:47:58,185-INFO: temp save 100 batch train result, current acc1 0.30000001192092896
2020-02-13 16:47:58,185 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 100 batch train result, current acc1 0.30000001192092896
2020-02-13 16:47:59,746-INFO: Pass 1, trainbatch 10, loss 1.2042444944381714, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:47:59,746 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 10, loss 1.2042444944381714, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:48:00,933-INFO: Pass 1, trainbatch 20, loss 1.5015184879302979, acc1 0.46666666865348816, time 0.12 sec
2020-02-13 16:48:00,933 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 20, loss 1.5015184879302979, acc1 0.46666666865348816, time 0.12 sec
2020-02-13 16:48:02,298-INFO: Pass 1, trainbatch 30, loss 1.352638840675354, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:48:02,298 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 30, loss 1.352638840675354, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:48:13,163-INFO: Pass 1, trainbatch 40, loss 1.4644893407821655, acc1 0.36666667461395264, time 0.11 sec
2020-02-13 16:48:13,163 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 40, loss 1.4644893407821655, acc1 0.36666667461395264, time 0.11 sec
2020-02-13 16:48:14,331-INFO: Pass 1, trainbatch 50, loss 1.2125543355941772, acc1 0.4333333373069763, time 0.12 sec
2020-02-13 16:48:14,331 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 50, loss 1.2125543355941772, acc1 0.4333333373069763, time 0.12 sec
2020-02-13 16:48:14,444-INFO: temp save 150 batch train result, current acc1 0.5333333611488342
2020-02-13 16:48:14,444 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 150 batch train result, current acc1 0.5333333611488342
2020-02-13 16:48:16,020-INFO: Pass 1, trainbatch 60, loss 1.4655354022979736, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:48:16,020 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 60, loss 1.4655354022979736, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:48:27,783-INFO: Pass 1, trainbatch 70, loss 1.3001445531845093, acc1 0.4333333373069763, time 0.11 sec
2020-02-13 16:48:27,783 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 70, loss 1.3001445531845093, acc1 0.4333333373069763, time 0.11 sec
2020-02-13 16:48:29,143-INFO: Pass 1, trainbatch 80, loss 0.9637636542320251, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:48:29,143 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 80, loss 0.9637636542320251, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:48:30,327-INFO: Pass 1, trainbatch 90, loss 1.1228325366973877, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:48:30,327 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 1, trainbatch 90, loss 1.1228325366973877, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:48:34,350-INFO: current pass: 2, start read image
2020-02-13 16:48:34,350 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 2, start read image
2020-02-13 16:48:44,738-INFO: temp save 200 batch train result, current acc1 0.6666666865348816
2020-02-13 16:48:44,738 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 200 batch train result, current acc1 0.6666666865348816
2020-02-13 16:48:45,964-INFO: Pass 2, trainbatch 10, loss 0.954548180103302, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:48:45,964 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 10, loss 0.954548180103302, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:48:47,132-INFO: Pass 2, trainbatch 20, loss 1.4676710367202759, acc1 0.36666667461395264, time 0.12 sec
2020-02-13 16:48:47,132 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 20, loss 1.4676710367202759, acc1 0.36666667461395264, time 0.12 sec
2020-02-13 16:48:48,481-INFO: Pass 2, trainbatch 30, loss 1.2924731969833374, acc1 0.4000000059604645, time 0.11 sec
2020-02-13 16:48:48,481 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 30, loss 1.2924731969833374, acc1 0.4000000059604645, time 0.11 sec
2020-02-13 16:49:00,403-INFO: Pass 2, trainbatch 40, loss 1.4888590574264526, acc1 0.4000000059604645, time 0.13 sec
2020-02-13 16:49:00,403 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 40, loss 1.4888590574264526, acc1 0.4000000059604645, time 0.13 sec
2020-02-13 16:49:01,760-INFO: Pass 2, trainbatch 50, loss 1.5290966033935547, acc1 0.4333333373069763, time 0.13 sec
2020-02-13 16:49:01,760 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 50, loss 1.5290966033935547, acc1 0.4333333373069763, time 0.13 sec
2020-02-13 16:49:01,983-INFO: temp save 250 batch train result, current acc1 0.5
2020-02-13 16:49:01,983 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 250 batch train result, current acc1 0.5
2020-02-13 16:49:03,257-INFO: Pass 2, trainbatch 60, loss 1.0284888744354248, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:49:03,257 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 60, loss 1.0284888744354248, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:49:14,522-INFO: Pass 2, trainbatch 70, loss 1.1727396249771118, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:49:14,522 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 70, loss 1.1727396249771118, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:49:15,688-INFO: Pass 2, trainbatch 80, loss 1.1851352453231812, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:49:15,688 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 80, loss 1.1851352453231812, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:49:16,861-INFO: Pass 2, trainbatch 90, loss 0.8734394907951355, acc1 0.5666666626930237, time 0.12 sec
2020-02-13 16:49:16,861 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 2, trainbatch 90, loss 0.8734394907951355, acc1 0.5666666626930237, time 0.12 sec
2020-02-13 16:49:21,417-INFO: current pass: 3, start read image
2020-02-13 16:49:21,417 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 3, start read image
2020-02-13 16:49:31,785-INFO: temp save 300 batch train result, current acc1 0.6333333253860474
2020-02-13 16:49:31,785 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 300 batch train result, current acc1 0.6333333253860474
2020-02-13 16:49:32,872-INFO: Pass 3, trainbatch 10, loss 1.1105053424835205, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:49:32,872 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 10, loss 1.1105053424835205, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:49:34,248-INFO: Pass 3, trainbatch 20, loss 0.8933258056640625, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:49:34,248 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 20, loss 0.8933258056640625, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:49:35,431-INFO: Pass 3, trainbatch 30, loss 0.776534378528595, acc1 0.699999988079071, time 0.11 sec
2020-02-13 16:49:35,431 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 30, loss 0.776534378528595, acc1 0.699999988079071, time 0.11 sec
2020-02-13 16:49:47,030-INFO: Pass 3, trainbatch 40, loss 1.288002610206604, acc1 0.5333333611488342, time 0.32 sec
2020-02-13 16:49:47,030 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 40, loss 1.288002610206604, acc1 0.5333333611488342, time 0.32 sec
2020-02-13 16:49:48,199-INFO: Pass 3, trainbatch 50, loss 1.0243092775344849, acc1 0.5666666626930237, time 0.11 sec
2020-02-13 16:49:48,199 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 50, loss 1.0243092775344849, acc1 0.5666666626930237, time 0.11 sec
2020-02-13 16:49:48,580-INFO: temp save 350 batch train result, current acc1 0.6333333253860474
2020-02-13 16:49:48,580 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 350 batch train result, current acc1 0.6333333253860474
2020-02-13 16:49:49,712-INFO: Pass 3, trainbatch 60, loss 1.0591466426849365, acc1 0.46666666865348816, time 0.13 sec
2020-02-13 16:49:49,712 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 60, loss 1.0591466426849365, acc1 0.46666666865348816, time 0.13 sec
2020-02-13 16:50:01,582-INFO: Pass 3, trainbatch 70, loss 1.0065597295761108, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:50:01,582 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 70, loss 1.0065597295761108, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:50:02,766-INFO: Pass 3, trainbatch 80, loss 1.3617016077041626, acc1 0.4333333373069763, time 0.13 sec
2020-02-13 16:50:02,766 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 80, loss 1.3617016077041626, acc1 0.4333333373069763, time 0.13 sec
2020-02-13 16:50:04,143-INFO: Pass 3, trainbatch 90, loss 1.0526816844940186, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:50:04,143 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 3, trainbatch 90, loss 1.0526816844940186, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:50:08,154-INFO: current pass: 4, start read image
2020-02-13 16:50:08,154 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 4, start read image
2020-02-13 16:50:18,630-INFO: temp save 400 batch train result, current acc1 0.699999988079071
2020-02-13 16:50:18,630 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 400 batch train result, current acc1 0.699999988079071
2020-02-13 16:50:19,833-INFO: Pass 4, trainbatch 10, loss 0.7672765851020813, acc1 0.6333333253860474, time 0.31 sec
2020-02-13 16:50:19,833 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 10, loss 0.7672765851020813, acc1 0.6333333253860474, time 0.31 sec
2020-02-13 16:50:21,043-INFO: Pass 4, trainbatch 20, loss 0.9565847516059875, acc1 0.6666666865348816, time 0.13 sec
2020-02-13 16:50:21,043 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 20, loss 0.9565847516059875, acc1 0.6666666865348816, time 0.13 sec
2020-02-13 16:50:22,249-INFO: Pass 4, trainbatch 30, loss 1.2698613405227661, acc1 0.5666666626930237, time 0.11 sec
2020-02-13 16:50:22,249 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 30, loss 1.2698613405227661, acc1 0.5666666626930237, time 0.11 sec
2020-02-13 16:50:34,282-INFO: Pass 4, trainbatch 40, loss 0.8866437077522278, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:50:34,282 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 40, loss 0.8866437077522278, acc1 0.5333333611488342, time 0.12 sec
2020-02-13 16:50:35,461-INFO: Pass 4, trainbatch 50, loss 0.8511383533477783, acc1 0.5666666626930237, time 0.12 sec
2020-02-13 16:50:35,461 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 50, loss 0.8511383533477783, acc1 0.5666666626930237, time 0.12 sec
2020-02-13 16:50:35,948-INFO: temp save 450 batch train result, current acc1 0.5333333611488342
2020-02-13 16:50:35,948 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 450 batch train result, current acc1 0.5333333611488342
2020-02-13 16:50:37,187-INFO: Pass 4, trainbatch 60, loss 1.01409113407135, acc1 0.6000000238418579, time 0.12 sec
2020-02-13 16:50:37,187 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 60, loss 1.01409113407135, acc1 0.6000000238418579, time 0.12 sec
2020-02-13 16:50:48,260-INFO: Pass 4, trainbatch 70, loss 0.6784738302230835, acc1 0.699999988079071, time 0.11 sec
2020-02-13 16:50:48,260 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 70, loss 0.6784738302230835, acc1 0.699999988079071, time 0.11 sec
2020-02-13 16:50:49,653-INFO: Pass 4, trainbatch 80, loss 0.6537202000617981, acc1 0.8333333134651184, time 0.36 sec
2020-02-13 16:50:49,653 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 80, loss 0.6537202000617981, acc1 0.8333333134651184, time 0.36 sec
2020-02-13 16:50:50,865-INFO: Pass 4, trainbatch 90, loss 0.8890798687934875, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:50:50,865 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 4, trainbatch 90, loss 0.8890798687934875, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:50:54,877-INFO: current pass: 5, start read image
2020-02-13 16:50:54,877 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 5, start read image
2020-02-13 16:51:06,055-INFO: temp save 500 batch train result, current acc1 0.5333333611488342
2020-02-13 16:51:06,055 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 500 batch train result, current acc1 0.5333333611488342
2020-02-13 16:51:06,995-INFO: Pass 5, trainbatch 10, loss 0.9425557851791382, acc1 0.5666666626930237, time 0.12 sec
2020-02-13 16:51:06,995 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 10, loss 0.9425557851791382, acc1 0.5666666626930237, time 0.12 sec
2020-02-13 16:51:08,154-INFO: Pass 5, trainbatch 20, loss 1.0944534540176392, acc1 0.5666666626930237, time 0.11 sec
2020-02-13 16:51:08,154 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 20, loss 1.0944534540176392, acc1 0.5666666626930237, time 0.11 sec
2020-02-13 16:51:09,568-INFO: Pass 5, trainbatch 30, loss 1.0179704427719116, acc1 0.5333333611488342, time 0.14 sec
2020-02-13 16:51:09,568 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 30, loss 1.0179704427719116, acc1 0.5333333611488342, time 0.14 sec
2020-02-13 16:51:21,096-INFO: Pass 5, trainbatch 40, loss 0.6842880845069885, acc1 0.6666666865348816, time 0.13 sec
2020-02-13 16:51:21,096 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 40, loss 0.6842880845069885, acc1 0.6666666865348816, time 0.13 sec
2020-02-13 16:51:22,376-INFO: Pass 5, trainbatch 50, loss 1.1201632022857666, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:51:22,376 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 50, loss 1.1201632022857666, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:51:23,190-INFO: temp save 550 batch train result, current acc1 0.7666666507720947
2020-02-13 16:51:23,190 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 550 batch train result, current acc1 0.7666666507720947
2020-02-13 16:51:24,145-INFO: Pass 5, trainbatch 60, loss 0.6772313714027405, acc1 0.800000011920929, time 0.11 sec
2020-02-13 16:51:24,145 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 60, loss 0.6772313714027405, acc1 0.800000011920929, time 0.11 sec
2020-02-13 16:51:35,626-INFO: Pass 5, trainbatch 70, loss 0.9270073771476746, acc1 0.699999988079071, time 0.12 sec
2020-02-13 16:51:35,626 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 70, loss 0.9270073771476746, acc1 0.699999988079071, time 0.12 sec
2020-02-13 16:51:37,014-INFO: Pass 5, trainbatch 80, loss 0.8206346035003662, acc1 0.6333333253860474, time 0.13 sec
2020-02-13 16:51:37,014 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 80, loss 0.8206346035003662, acc1 0.6333333253860474, time 0.13 sec
2020-02-13 16:51:38,194-INFO: Pass 5, trainbatch 90, loss 1.0289485454559326, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:51:38,194 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 5, trainbatch 90, loss 1.0289485454559326, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:51:42,329-INFO: current pass: 6, start read image
2020-02-13 16:51:42,329 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 6, start read image
2020-02-13 16:51:54,293-INFO: temp save 600 batch train result, current acc1 0.699999988079071
2020-02-13 16:51:54,293 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 600 batch train result, current acc1 0.699999988079071
2020-02-13 16:51:55,113-INFO: Pass 6, trainbatch 10, loss 0.715276837348938, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:51:55,113 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 10, loss 0.715276837348938, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:51:56,289-INFO: Pass 6, trainbatch 20, loss 0.7567002773284912, acc1 0.7666666507720947, time 0.11 sec
2020-02-13 16:51:56,289 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 20, loss 0.7567002773284912, acc1 0.7666666507720947, time 0.11 sec
2020-02-13 16:51:57,691-INFO: Pass 6, trainbatch 30, loss 0.9267144799232483, acc1 0.7666666507720947, time 0.13 sec
2020-02-13 16:51:57,691 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 30, loss 0.9267144799232483, acc1 0.7666666507720947, time 0.13 sec
2020-02-13 16:52:09,141-INFO: Pass 6, trainbatch 40, loss 1.0969451665878296, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:52:09,141 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 40, loss 1.0969451665878296, acc1 0.6333333253860474, time 0.12 sec
2020-02-13 16:52:10,496-INFO: Pass 6, trainbatch 50, loss 1.012272596359253, acc1 0.699999988079071, time 0.11 sec
2020-02-13 16:52:10,496 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 50, loss 1.012272596359253, acc1 0.699999988079071, time 0.11 sec
2020-02-13 16:52:11,216-INFO: temp save 650 batch train result, current acc1 0.699999988079071
2020-02-13 16:52:11,216 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 650 batch train result, current acc1 0.699999988079071
2020-02-13 16:52:12,000-INFO: Pass 6, trainbatch 60, loss 1.1281439065933228, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:52:12,000 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 60, loss 1.1281439065933228, acc1 0.5333333611488342, time 0.11 sec
2020-02-13 16:52:23,464-INFO: Pass 6, trainbatch 70, loss 1.1626155376434326, acc1 0.699999988079071, time 0.14 sec
2020-02-13 16:52:23,464 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 70, loss 1.1626155376434326, acc1 0.699999988079071, time 0.14 sec
2020-02-13 16:52:24,653-INFO: Pass 6, trainbatch 80, loss 1.231780767440796, acc1 0.6333333253860474, time 0.13 sec
2020-02-13 16:52:24,653 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 80, loss 1.231780767440796, acc1 0.6333333253860474, time 0.13 sec
2020-02-13 16:52:25,847-INFO: Pass 6, trainbatch 90, loss 0.5708886981010437, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:52:25,847 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 6, trainbatch 90, loss 0.5708886981010437, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:52:30,296-INFO: current pass: 7, start read image
2020-02-13 16:52:30,296 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 7, start read image
2020-02-13 16:52:40,815-INFO: temp save 700 batch train result, current acc1 0.8333333134651184
2020-02-13 16:52:40,815 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 700 batch train result, current acc1 0.8333333134651184
2020-02-13 16:52:41,469-INFO: Pass 7, trainbatch 10, loss 0.6180769801139832, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:52:41,469 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 10, loss 0.6180769801139832, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:52:42,862-INFO: Pass 7, trainbatch 20, loss 0.7773688435554504, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:52:42,862 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 20, loss 0.7773688435554504, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:52:44,053-INFO: Pass 7, trainbatch 30, loss 0.5643612742424011, acc1 0.7333333492279053, time 0.12 sec
2020-02-13 16:52:44,053 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 30, loss 0.5643612742424011, acc1 0.7333333492279053, time 0.12 sec
2020-02-13 16:52:56,045-INFO: Pass 7, trainbatch 40, loss 1.2130895853042603, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:52:56,045 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 40, loss 1.2130895853042603, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:52:57,231-INFO: Pass 7, trainbatch 50, loss 0.9061844944953918, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:52:57,231 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 50, loss 0.9061844944953918, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:52:58,071-INFO: temp save 750 batch train result, current acc1 0.7333333492279053
2020-02-13 16:52:58,071 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 750 batch train result, current acc1 0.7333333492279053
2020-02-13 16:52:58,757-INFO: Pass 7, trainbatch 60, loss 0.682985246181488, acc1 0.800000011920929, time 0.11 sec
2020-02-13 16:52:58,757 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 60, loss 0.682985246181488, acc1 0.800000011920929, time 0.11 sec
2020-02-13 16:53:10,244-INFO: Pass 7, trainbatch 70, loss 1.0602455139160156, acc1 0.5, time 0.13 sec
2020-02-13 16:53:10,244 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 70, loss 1.0602455139160156, acc1 0.5, time 0.13 sec
2020-02-13 16:53:11,397-INFO: Pass 7, trainbatch 80, loss 0.6763803958892822, acc1 0.7333333492279053, time 0.12 sec
2020-02-13 16:53:11,397 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 80, loss 0.6763803958892822, acc1 0.7333333492279053, time 0.12 sec
2020-02-13 16:53:12,752-INFO: Pass 7, trainbatch 90, loss 0.9885975122451782, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:53:12,752 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 7, trainbatch 90, loss 0.9885975122451782, acc1 0.6000000238418579, time 0.11 sec
2020-02-13 16:53:16,626-INFO: current pass: 8, start read image
2020-02-13 16:53:16,626 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 8, start read image
2020-02-13 16:53:27,520-INFO: temp save 800 batch train result, current acc1 0.7333333492279053
2020-02-13 16:53:27,520 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 800 batch train result, current acc1 0.7333333492279053
2020-02-13 16:53:28,254-INFO: Pass 8, trainbatch 10, loss 0.8577665090560913, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:53:28,254 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 10, loss 0.8577665090560913, acc1 0.6333333253860474, time 0.11 sec
2020-02-13 16:53:29,412-INFO: Pass 8, trainbatch 20, loss 0.7546260356903076, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:53:29,412 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 20, loss 0.7546260356903076, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:53:30,565-INFO: Pass 8, trainbatch 30, loss 1.004921317100525, acc1 0.5, time 0.12 sec
2020-02-13 16:53:30,565 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 30, loss 1.004921317100525, acc1 0.5, time 0.12 sec
2020-02-13 16:53:41,818-INFO: Pass 8, trainbatch 40, loss 0.7440248131752014, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:53:41,818 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 40, loss 0.7440248131752014, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:53:42,976-INFO: Pass 8, trainbatch 50, loss 1.13462233543396, acc1 0.5333333611488342, time 0.13 sec
2020-02-13 16:53:42,976 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 50, loss 1.13462233543396, acc1 0.5333333611488342, time 0.13 sec
2020-02-13 16:53:44,087-INFO: temp save 850 batch train result, current acc1 0.7666666507720947
2020-02-13 16:53:44,087 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 850 batch train result, current acc1 0.7666666507720947
2020-02-13 16:53:44,660-INFO: Pass 8, trainbatch 60, loss 1.0528401136398315, acc1 0.6666666865348816, time 0.11 sec
2020-02-13 16:53:44,660 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 60, loss 1.0528401136398315, acc1 0.6666666865348816, time 0.11 sec
2020-02-13 16:53:56,118-INFO: Pass 8, trainbatch 70, loss 0.707714855670929, acc1 0.7666666507720947, time 0.13 sec
2020-02-13 16:53:56,118 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 70, loss 0.707714855670929, acc1 0.7666666507720947, time 0.13 sec
2020-02-13 16:53:57,494-INFO: Pass 8, trainbatch 80, loss 0.730656623840332, acc1 0.7666666507720947, time 0.11 sec
2020-02-13 16:53:57,494 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 80, loss 0.730656623840332, acc1 0.7666666507720947, time 0.11 sec
2020-02-13 16:53:58,662-INFO: Pass 8, trainbatch 90, loss 0.5127027630805969, acc1 0.800000011920929, time 0.13 sec
2020-02-13 16:53:58,662 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 8, trainbatch 90, loss 0.5127027630805969, acc1 0.800000011920929, time 0.13 sec
2020-02-13 16:54:02,694-INFO: current pass: 9, start read image
2020-02-13 16:54:02,694 - <ipython-input-4-4703896c7ee9>[line:610] - INFO: current pass: 9, start read image
2020-02-13 16:54:14,146-INFO: temp save 900 batch train result, current acc1 0.800000011920929
2020-02-13 16:54:14,146 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 900 batch train result, current acc1 0.800000011920929
2020-02-13 16:54:14,546-INFO: Pass 9, trainbatch 10, loss 0.7061624526977539, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:54:14,546 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 10, loss 0.7061624526977539, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:54:15,715-INFO: Pass 9, trainbatch 20, loss 0.7127918004989624, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:54:15,715 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 20, loss 0.7127918004989624, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:54:17,105-INFO: Pass 9, trainbatch 30, loss 0.5467488765716553, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:54:17,105 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 30, loss 0.5467488765716553, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:54:28,065-INFO: Pass 9, trainbatch 40, loss 0.4181240200996399, acc1 0.8333333134651184, time 0.11 sec
2020-02-13 16:54:28,065 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 40, loss 0.4181240200996399, acc1 0.8333333134651184, time 0.11 sec
2020-02-13 16:54:29,444-INFO: Pass 9, trainbatch 50, loss 1.0870100259780884, acc1 0.5666666626930237, time 0.32 sec
2020-02-13 16:54:29,444 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 50, loss 1.0870100259780884, acc1 0.5666666626930237, time 0.32 sec
2020-02-13 16:54:30,509-INFO: temp save 950 batch train result, current acc1 0.800000011920929
2020-02-13 16:54:30,509 - <ipython-input-4-4703896c7ee9>[line:645] - INFO: temp save 950 batch train result, current acc1 0.800000011920929
2020-02-13 16:54:30,966-INFO: Pass 9, trainbatch 60, loss 0.6866453886032104, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:54:30,966 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 60, loss 0.6866453886032104, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:54:42,221-INFO: Pass 9, trainbatch 70, loss 0.6243331432342529, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:54:42,221 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 70, loss 0.6243331432342529, acc1 0.7666666507720947, time 0.12 sec
2020-02-13 16:54:43,574-INFO: Pass 9, trainbatch 80, loss 0.5491384267807007, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:54:43,574 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 80, loss 0.5491384267807007, acc1 0.7333333492279053, time 0.11 sec
2020-02-13 16:54:44,754-INFO: Pass 9, trainbatch 90, loss 0.7589463591575623, acc1 0.800000011920929, time 0.13 sec
2020-02-13 16:54:44,754 - <ipython-input-4-4703896c7ee9>[line:626] - INFO: Pass 9, trainbatch 90, loss 0.7589463591575623, acc1 0.800000011920929, time 0.13 sec
2020-02-13 16:54:49,023-INFO: training till last epcho, end training
2020-02-13 16:54:49,023 - <ipython-input-4-4703896c7ee9>[line:649] - INFO: training till last epcho, end training
 

加载训练保存的模型,验证效果

In[5]
from __future__ import absolute_import    
from __future__ import division    
from __future__ import print_function    
    
import os    
import numpy as np    
import random    
import time    
import codecs    
import sys    
import functools    
import math    
import paddle    
import paddle.fluid as fluid    
from paddle.fluid import core    
from paddle.fluid.param_attr import ParamAttr    
from PIL import Image, ImageEnhance    
    
target_size = [3, 224, 224]    
mean_rgb = [127.5, 127.5, 127.5]    
data_dir = "data/data2815"    
eval_file = "eval.txt"    
use_gpu = False   
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()    
exe = fluid.Executor(place)    
save_freeze_dir = "./freeze-model"    
[inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=save_freeze_dir, executor=exe)    
# print(fetch_targets)    
    
    
def crop_image(img, target_size):    
    width, height = img.size    
    w_start = (width - target_size[2]) / 2    
    h_start = (height - target_size[1]) / 2    
    w_end = w_start + target_size[2]    
    h_end = h_start + target_size[1]    
    img = img.crop((w_start, h_start, w_end, h_end))    
    return img    
    
    
def resize_img(img, target_size):    
    ret = img.resize((target_size[1], target_size[2]), Image.BILINEAR)    
    return ret    
    
    
def read_image(img_path):    
    img = Image.open(img_path)    
    if img.mode != 'RGB':    
        img = img.convert('RGB')    
    img = crop_image(img, target_size)    
    img = np.array(img).astype('float32')    
    img -= mean_rgb    
    img = img.transpose((2, 0, 1))  # HWC to CHW    
    img *= 0.007843    
    img = img[np.newaxis,:]    
    return img    
    
    
def infer(image_path):    
    tensor_img = read_image(image_path)    
    label = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets)    
    return np.argmax(label)    
    
    
def eval_all():    
    eval_file_path = os.path.join(data_dir, eval_file)    
    total_count = 0    
    right_count = 0    
    with codecs.open(eval_file_path, encoding='utf-8') as flist:     
        lines = [line.strip() for line in flist]    
        t1 = time.time()    
        for line in lines:    
            total_count += 1    
            parts = line.strip().split()    
            result = infer(parts[0])    
            # print("infer result:{0} answer:{1}".format(result, parts[1]))    
            if str(result) == parts[1]:    
                right_count += 1    
        period = time.time() - t1    
        print("total eval count:{0} cost time:{1} predict accuracy:{2}".format(total_count, "%2.2f sec" % period, right_count / total_count))    
    
    
if __name__ == '__main__':    
    eval_all()  
total eval count:709 cost time:49.18 sec predict accuracy:0.7009873060648801
In[  ]
!tar -cf mbnet-v2.tar freeze-model/

 点击链接,使用AI Studio一键上手实践项目吧:https://aistudio.baidu.com/aistudio/projectdetail/169398

下载安装命令

## CPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/cpu paddlepaddle

## GPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/gpu paddlepaddle-gpu

>> 访问 PaddlePaddle 官网,了解更多相关内容

展开阅读全文
打赏
0
0 收藏
分享
加载中
更多评论
打赏
0 评论
0 收藏
0
分享
返回顶部
顶部