caffe 和 pycaffe 报告的准确率不同 [英] Accuracy reported by caffe and pycaffe are different

查看:19
本文介绍了caffe 和 pycaffe 报告的准确率不同的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

以下是用于训练预训练模型的 train.Prototxt 文件.

Below is the train.Prototxt file that is used to train a pretrained model.

    name: "TempWLDNET"
    layer {
      name: "data"
      type: "ImageData"
      top: "data"
      top: "label"
      include {
        phase: TRAIN
      }
      transform_param {
        mirror: true
        crop_size: 224 
        mean_file: "mean.binaryproto"
      }
      image_data_param {
        source: "train.txt"
        batch_size: 25
        new_height: 256 
        new_width: 256 
      }
    }
    layer {
      name: "data"
      type: "ImageData"
      top: "data"
      top: "label"
      include {
        phase: TEST
      }
      transform_param {
        mirror: false
        crop_size: 224 
        mean_file: "painmean.binaryproto"
      }
      image_data_param {
        source: "test.txt"
        batch_size: 25
        new_height: 256 
        new_width: 256 
      }
    }
    layer {
      name: "conv1"
      type: "Convolution"
      bottom: "data"
      top: "conv1"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 96
        kernel_size: 7
        stride: 2
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "relu1"
      type: "ReLU"
      bottom: "conv1"
      top: "conv1"
    }
    layer {
      name: "norm1"
      type: "LRN"
      bottom: "conv1"
      top: "norm1"
      lrn_param {
        local_size: 5
        alpha: 0.0005
        beta: 0.75
      }
    }
    layer {
      name: "pool1"
      type: "Pooling"
      bottom: "norm1"
      top: "pool1"
      pooling_param {
        pool: MAX
        kernel_size: 3
        stride: 3
      }
    }
    layer {
      name: "conv2"
      type: "Convolution"
      bottom: "pool1"
      top: "conv2"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 256
        pad: 2
        kernel_size: 5
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu2"
      type: "ReLU"
      bottom: "conv2"
      top: "conv2"
    }
    layer {
      name: "pool2"
      type: "Pooling"
      bottom: "conv2"
      top: "pool2"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "conv3"
      type: "Convolution"
      bottom: "pool2"
      top: "conv3"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "relu3"
      type: "ReLU"
      bottom: "conv3"
      top: "conv3"
    }
    layer {
      name: "conv4"
      type: "Convolution"
      bottom: "conv3"
      top: "conv4"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu4"
      type: "ReLU"
      bottom: "conv4"
      top: "conv4"
    }
    layer {
      name: "conv5"
      type: "Convolution"
      bottom: "conv4"
      top: "conv5"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "relu5"
      type: "ReLU"
      bottom: "conv5"
      top: "conv5"
    }
    layer {
      name: "pool5"
      type: "Pooling"
      bottom: "conv5"
      top: "pool5"
      pooling_param {
        pool: MAX
        kernel_size: 3
        stride: 3
      }
    }
    layer {
      name: "fc6"
      type: "InnerProduct"
      bottom: "pool5"
      top: "fc6"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      inner_product_param {
        num_output: 4048
        weight_filler {
          type: "gaussian"
          std: 0.005
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu6"
      type: "ReLU"
      bottom: "fc6"
      top: "fc6"
    }
    layer {
      name: "drop6"
      type: "Dropout"
      bottom: "fc6"
      top: "fc6"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layer {
      name: "fc7"
      type: "InnerProduct"
      bottom: "fc6"
      top: "fc7"
      # Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      inner_product_param {
        num_output: 4048
        weight_filler {
          type: "gaussian"
          std: 0.005
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu7"
      type: "ReLU"
      bottom: "fc7"
      top: "fc7"
    }
    layer {
      name: "drop7"
      type: "Dropout"
      bottom: "fc7"
      top: "fc7"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layer {
      name: "fc8_temp"
      type: "InnerProduct"
      bottom: "fc7"
      top: "fc8_temp"
      # lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained
      param {
        lr_mult: 10
        decay_mult: 1
      }
      param {
        lr_mult: 20
        decay_mult: 0
      }
      inner_product_param {
        num_output: 16
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "accuracy"
      type: "Accuracy"
      bottom: "fc8_temp"
      bottom: "label"
      top: "accuracy"
      include {
        phase: TEST
      }
    }
    layer {
      name: "loss"
      type: "SoftmaxWithLoss"
      bottom: "fc8_temp"
      bottom: "label"
      top: "loss"
    }

使用上述 prototxt 文件,在训练结束时为测试集报告的准确率为 92%.有关更多详细信息,请参阅 如何在caffe中评估一个训练好的模型的准确率和损失?

Using the above prototxt file accuracy reported for test set at the end of the Training is 92%. For more details please see How to evaluate the accuracy and loss of a trained model is good or not in caffe?

我在 13000 次迭代结束时拍摄了模型快照,并使用以下 python 脚本尝试构建混淆矩阵,报告的准确率为 74%.

I took the model snapshot at the end of 13000 iteration and using below python script, i tried to construct the confusion matrix, Accuracy reported is 74%.

    #!/usr/bin/python
    # -*- coding: utf-8 -*-

    import sys
    import caffe
    import numpy as np
    import argparse
    from collections import defaultdict

    TRAIN_DATA_ROOT='/Images/test/'

    if __name__ == "__main__":
            parser = argparse.ArgumentParser()
            parser.add_argument('--proto', type=str, required=True)
            parser.add_argument('--model', type=str, required=True)
            parser.add_argument('--meanfile', type=str, required=True)
            parser.add_argument('--labelfile', type=str, required=True)
            args = parser.parse_args()

            proto_data = open(args.meanfile, 'rb').read()
            a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
            mean  = caffe.io.blobproto_to_array(a)[0]


            caffe.set_mode_gpu()

            count = 0
            correct = 0
            matrix = defaultdict(int) # (real,pred) -> int
            labels_set = set()

            net = caffe.Net(args.proto, args.model, caffe.TEST)
            # load input and configure preprocessing    
            transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
            transformer.set_mean('data', mean)
            transformer.set_transpose('data', (2,0,1))
            transformer.set_channel_swap('data', (2,1,0))
            transformer.set_raw_scale('data', 1)


            #note we can change the batch size on-the-fly
            #since we classify only one image, we change batch size from 10 to 1
            net.blobs['data'].reshape(1,3,224,224)

            #load the image in the data layer
            f = open(args.labelfile, "r")
            for line in f.readlines():
                    parts = line.split()
                    example_image = parts[0]
                    label = int(parts[1])
                    im = caffe.io.load_image(TRAIN_DATA_ROOT + example_image)
                    print(im.shape)
                    net.blobs['data'].data[...] = transformer.preprocess('data', im)
                    out = net.forward()
                    plabel = int(out['prob'][0].argmax(axis=0))
                    count += 1
                    iscorrect = label == plabel
                    correct += (1 if iscorrect else 0)
                    matrix[(label, plabel)] += 1
                    labels_set.update([label, plabel])
                    if not iscorrect:
                            print("
Error: expected %i but predicted %i" 
                                        % (label, plabel))

                    sys.stdout.write("
Accuracy: %.1f%%" % (100.*correct/count))
                    sys.stdout.flush()

            print(", %i/%i corrects" % (correct, count))

            print ("")
            print ("Confusion matrix:")
            print ("(r , p) | count")
            for l in labels_set:
                    for pl in labels_set:
                            print ("(%i , %i) | %i" % (l, pl, matrix[(l,pl)])) 

我正在使用 deploy.protxt

I am using the deploy.protxt

    name: "CaffeNet"
    input: "data"
    input_shape {
      dim: 1
      dim: 3
      dim: 224
      dim: 224
    }
    layers {
      name: "conv1"
      type: CONVOLUTION
      bottom: "data"
      top: "conv1"

        blobs_lr: 1
        weight_decay: 1

        blobs_lr: 2
        weight_decay: 0


      convolution_param {
        num_output: 96
        kernel_size: 7
        stride: 2
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layers {
      name: "relu1"
      type: RELU
      bottom: "conv1"
      top: "conv1"
    }
    layers {
      name: "norm1"
      type: LRN
      bottom: "conv1"
      top: "norm1"
      lrn_param {
        local_size: 5
        alpha: 0.0005
        beta: 0.75
      }
    }
    layers {
      name: "pool1"
      type: POOLING
      bottom: "norm1"
      top: "pool1"
      pooling_param {
        pool: MAX
        kernel_size: 3
        stride: 3
      }
    }
    layers {
      name: "conv2"
      type: CONVOLUTION
      bottom: "pool1"
      top: "conv2"

        blobs_lr: 1
        weight_decay: 1


        blobs_lr: 2
        weight_decay: 0

      convolution_param {
        num_output: 256
        pad: 2
        kernel_size: 5
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layers {
      name: "relu2"
      type: RELU
      bottom: "conv2"
      top: "conv2"
    }
    layers {
      name: "pool2"
      type: POOLING
      bottom: "conv2"
      top: "pool2"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layers {
      name: "conv3"
      type: CONVOLUTION
      bottom: "pool2"
      top: "conv3"

        blobs_lr: 1
        weight_decay: 1

        blobs_lr: 2
        weight_decay: 0

      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layers {
      name: "relu3"
      type: RELU
      bottom: "conv3"
      top: "conv3"
    }
    layers {
      name: "conv4"
      type: CONVOLUTION
      bottom: "conv3"
      top: "conv4"

        blobs_lr: 1
        weight_decay: 1


        blobs_lr: 2
        weight_decay: 0

      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layers {
      name: "relu4"
      type: RELU
      bottom: "conv4"
      top: "conv4"
    }
    layers {
      name: "conv5"
      type: CONVOLUTION
      bottom: "conv4"
      top: "conv5"

        blobs_lr: 1
        weight_decay: 1


        blobs_lr: 2
        weight_decay: 0

      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layers {
      name: "relu5"
      type: RELU
      bottom: "conv5"
      top: "conv5"
    }
    layers {
      name: "pool5"
      type: POOLING
      bottom: "conv5"
      top: "pool5"
      pooling_param {
        pool: MAX
        kernel_size: 3
        stride: 3
      }
    }
    layers {
      name: "fc6"
      type: INNER_PRODUCT
      bottom: "pool5"
      top: "fc6"

        blobs_lr: 1
        weight_decay: 1

        blobs_lr: 2
        weight_decay: 0

      inner_product_param {
        num_output: 4048
        weight_filler {
          type: "gaussian"
          std: 0.005
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layers {
      name: "relu6"
      type: RELU
      bottom: "fc6"
      top: "fc6"
    }
    layers {
      name: "drop6"
      type: DROPOUT
      bottom: "fc6"
      top: "fc6"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layers {
      name: "fc7"
      type: INNER_PRODUCT
      bottom: "fc6"
      top: "fc7"
      # Note that blobs_lr can be set to 0 to disable any fine-tuning of this, and any other, layers

        blobs_lr: 1
        weight_decay: 1

        blobs_lr: 2
        weight_decay: 0

      inner_product_param {
        num_output: 4048
        weight_filler {
          type: "gaussian"
          std: 0.005
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layers {
      name: "relu7"
      type: RELU
      bottom: "fc7"
      top: "fc7"
    }
    layers {
      name: "drop7"
      type: DROPOUT
      bottom: "fc7"
      top: "fc7"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layers {
      name: "fc8_temp"
      type: INNER_PRODUCT
      bottom: "fc7"
      top: "fc8_temp"
      # blobs_lr is set to higher than for other layers, because this layers is starting from random while the others are already trained
        blobs_lr: 10
        weight_decay: 1

        blobs_lr: 20
        weight_decay: 0

      inner_product_param {
        num_output: 16
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layers {
      name: "prob"
      type: SOFTMAX
      bottom: "fc8_temp"
      top: "prob"
    }

用于运行脚本的命令是

    python confusion.py --proto deploy.prototxt --model models/model_iter_13000.caffemodel --meanfile mean.binaryproto --labelfile NamesTest.txt

我的疑问是为什么当我使用相同的模型和相同的测试集时,准确性会有所不同.我做错了什么吗?先感谢您.

My doubt is Why there is a difference in accuracy when i am using the same model and same test set. Am i doing anything wrong?. Thank you in advance.

推荐答案

您的验证步骤(TEST 阶段)与您正在运行的 Python 代码之间存在差异:

There are differences between your validation step (TEST phase) and the python code you are running:

  1. 您正在使用不同均值文件进行训练和测试 (!):对于 phase: TRAIN,您正在使用 mean_file: "mean.binaryproto" 而对于 phase: TEST 你正在使用 mean_file: "painmean.binaryproto".您的 Python 评估代码使用训练均值文件而不是验证.
    为训练/验证设置不同的设置不是好的做法.

  1. You are using a different mean file for train and test (!): for phase: TRAIN you are using mean_file: "mean.binaryproto" while for phase: TEST you are using mean_file: "painmean.binaryproto". Your python evaluation code uses the training mean file and not the validation.
    It is not a good practice to have different settings for train/validation.

您的输入图像具有 new_height: 256copr_size: 224.这个设置意味着caffe读取图像,缩放256x256,然后裁剪中心到224x224的大小.你的 python 代码似乎只有 scale 输入到 224x224 而不进行裁剪:你用不同的输入输入你的网络.

Your input images have new_height: 256 and copr_size: 224. This settings means caffe reads the image, scales it to 256x256 and then crops the center to size 224x224. Your python code seems to only scale the input to 224x224 without cropping: you feed your net with different inputs.

请确认您的训练 prototxt 和部署 prototxt 之间没有任何其他差异.

Please verify that you do not have any other differences between your training prototxt and deploy prototxt.

这篇关于caffe 和 pycaffe 报告的准确率不同的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆