解析文本格式caffe.NetParameter时出错:54:17:消息类型&"caffe.ConvolutionParameter& quot;没有名为& quot; sparse_ratio& quot的字段; [英] Error parsing text-format caffe.NetParameter: 54:17: Message type "caffe.ConvolutionParameter" has no field named "sparse_ratio"

查看:72
本文介绍了解析文本格式caffe.NetParameter时出错:54:17:消息类型&"caffe.ConvolutionParameter& quot;没有名为& quot; sparse_ratio& quot的字段;的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我希望您一切都好,我尝试运行从此处下载的python代码:"

i hope you are doing well, i tried to run a python code that i downloaded from here : "https://github.com/may0324/DeepCompression-caffe/tree/master/examples/mnist"

我正在使用Ubuntu 16.04,python(2.7,3.5),

i am using Ubuntu 16.04,python (2.7,3.5),

import sys
import os



sparse_ratio_vec = [0.33, 0.8, 0.9, 0.8] #sparse ratio of each layer
iters = [500, 1000, 10500, 11000, 500] #max iteration of each stage

def generate_data_layer():
    data_layer_str = '''
name: "LeNet"
layer {
  name: "mnist"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TRAIN
  }
  transform_param {
    scale: 0.00390625
  }
  data_param {
    source: "examples/mnist/mnist_train_lmdb"
    batch_size: 64
    backend: LMDB
  }
}
layer {
  name: "mnist"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TEST
  }
  transform_param {
    scale: 0.00390625
  }
  data_param {
    source: "examples/mnist/mnist_test_lmdb"
    batch_size: 100 
    backend: LMDB
  }
}

    '''

    return data_layer_str

def generate_cmp_conv_layer(kernel_size, kernel_num, stride,  layer_name, bottom, top, filler="xavier", sparse_ratio=0, class_num=256, quantize_term="false"):
    tmp =''
    if filler == 'gaussian':
      tmp = '''    std: 0.01
      '''
    conv_layer_str = '''
layer {
  name: "%s"
  type: "CmpConvolution"
  bottom: "%s"
  top: "%s"
  param {
    lr_mult: 1
  }
  param {
     lr_mult: 2
  }
  convolution_param {
    num_output: %d
    kernel_size: %d
    stride: %d
    sparse_ratio: %f
    class_num: %d
    quantize_term: %s
    weight_filler {
      type: "%s"
    ''' %(layer_name, bottom, top, kernel_num, kernel_size, stride, sparse_ratio, class_num, quantize_term, filler) + tmp + '''
    }
    bias_filler {
      type: "constant"
    }
  }
}
    '''
    return conv_layer_str
def generate_cmp_fc_layer(kernel_num, layer_name, bottom, top, filler="xavier", sparse_ratio=0, class_num=256, quantize_term="false"):
    tmp =''
    if filler == 'gaussian':
      tmp = '''    std: 0.01
      '''
    fc_layer_str = '''
layer {
  name: "%s"
  type: "CmpInnerProduct"
  bottom: "%s"
  top: "%s"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  inner_product_param {
    num_output: %d
    sparse_ratio: %f
    class_num: %d
    quantize_term: %s
    weight_filler {
      type: "%s"
    ''' %(layer_name, bottom, top, kernel_num, sparse_ratio, class_num, quantize_term, filler ) + tmp + '''
    }
    bias_filler {
      type: "constant"
    }
  }
}'''
    return fc_layer_str
def generate_pooling_layer(kernel_size, stride, pool_type, layer_name, bottom, top):
    pool_layer_str = '''
layer {
  name: "%s"
  type: "Pooling"
  bottom: "%s"
  top: "%s"
  pooling_param {
    pool: %s
    kernel_size: %d
    stride: %d
  }
}'''%(layer_name, bottom, top, pool_type, kernel_size, stride)
    return pool_layer_str


def generate_activation_layer(layer_name, bottom, top, act_type="ReLU"):
    act_layer_str = '''
layer {
  name: "%s"
  type: "%s"
  bottom: "%s"
  top: "%s"
}'''%(layer_name, act_type, bottom, top)
    return act_layer_str

def generate_softmax_loss(bottom):
    softmax_loss_str = '''
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "%s"
  bottom: "label"
  top: "loss"
}
layer {
  name: "accuracy"
  type: "Accuracy"
  bottom: "%s"
  bottom: "label"
  top: "accuracy"
  include {
     phase: TEST
  }
}

'''%(bottom, bottom)
    return softmax_loss_str


def generate_lenet(stage):
    if stage<1:
       return ''
    network_str = generate_data_layer()
    if stage == 5: #last stage do weight quantization
      quantize_term = "true"
    else:
      quantize_term = "false"

    ratio = sparse_ratio_vec[0]
    network_str += generate_cmp_conv_layer(5,20,1,"conv1","data","conv1","xavier",ratio,256,quantize_term)
    network_str += generate_pooling_layer(2,2,"MAX","pool1","conv1","pool1")
    if stage >= 2:
      ratio = sparse_ratio_vec[1]
    else:
      ratio = 0
    network_str += generate_cmp_conv_layer(5,50,1,"conv2","pool1","conv2","xavier",ratio,256,quantize_term)
    network_str += generate_pooling_layer(2,2,"MAX","pool2","conv2","pool2")
    if stage >= 3:
      ratio = sparse_ratio_vec[2]
    else:
      ratio = 0
    network_str += generate_cmp_fc_layer(500,"fc1","pool2","fc1","xavier",ratio,32,quantize_term)
    network_str += generate_activation_layer("relu1", "fc1", "fc1", "ReLU")
    if stage >= 4:
      ratio = sparse_ratio_vec[3]
    else:
      ratio = 0
    network_str += generate_cmp_fc_layer(10,"fc2","fc1","fc2","xavier",ratio,32,quantize_term)
    network_str += generate_softmax_loss("fc2")

    protoname = 'examples/mnist/lenet_train_test_compress_stage%d.prototxt' %stage
    fp = open(protoname, 'w')
    fp.write(network_str)
    fp.close()

def generate_solver(stage, max_iter):
    solver_str = '''
# The train/test net protocol buffer definition
net: "examples/mnist/lenet_train_test_compress_stage%d.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100 
# Carry out testing every 500 training iterations.
test_interval: 500 
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.001#0.01
momentum: 0.9 
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100 
# The maximum number of iterations
max_iter: %d 
# snapshot intermediate results
snapshot: 500 
snapshot_prefix: "examples/mnist/lenet_finetune_stage%d" 
# solver mode: CPU or GPU
solver_mode: CPU 

''' %(stage, max_iter, stage ) 
    protoname = 'examples/mnist/lenet_solver_stage%d.prototxt' %stage
    fp = open(protoname,'w')
    fp.write(solver_str)
    fp.close()

if __name__ == '__main__':

    max_stage = 5 
    for s in range(0,max_stage):
      generate_lenet(s+1)
      generate_solver(s+1,iters[s])

      if s==0:
        modelfile = "lenet_iter_10000.caffemodel" #initial model
      else:
        modelfile = "lenet_finetune_stage%d_iter_%d.caffemodel" %(s, iters[s-1]) #model of last stage
      cmd = "./build/tools/caffe train --solver=examples/mnist/lenet_solver_stage%d.prototxt --weights=examples/mnist/%s " %(s+1, modelfile)
      #print cmd
      os.system(cmd)      

当我尝试运行以上代码时,出现此错误:

when i tried to run the above code , i got this error :

[libprotobuf ERROR google/protobuf/text_format.cc:274] Error parsing text-format caffe.NetParameter: 54:17: Message type "caffe.ConvolutionParameter" has no field named "sparse_ratio".
F0213 15:05:57.959002  4726 upgrade_proto.cpp:79] Check failed: ReadProtoFromTextFile(param_file, param) Failed to parse NetParameter file: 
examples/mnist/lenet_train_test_compress_stage5.prototxt

我想找出问题或解决问题.我正在使用此代码来修剪lenet 5

i would like that to find out the problem or solve it. i am using this code for pruning lenet 5

推荐答案

您的Caffe版本没有 sparse_ratio 作为其 inner_product_param convolution_param的一部分(请参见caffe.proto).
似乎您要运行的特定代码需要它自己的caffe版本,该版本具有稍微不同的 caffe.proto 定义.

Your version of caffe does not have sparse_ratio as part of its inner_product_param or its convolution_param (see these definitions in caffe.proto).
It seems like the specific code you are trying to run requires its own version of caffe that has a slightly different caffe.proto definitions.

这篇关于解析文本格式caffe.NetParameter时出错:54:17:消息类型&amp;"caffe.ConvolutionParameter&amp; quot;没有名为&amp; quot; sparse_ratio&amp; quot的字段;的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
相关文章
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆