- Caffe reference ImageNet model
- AlexNet
Note that one can upgrade the weights locally by
`upgrade_net_proto_binary.bin` to avoid re-downloading.
input_dim: 227
input_dim: 227
layers {
- layer {
- name: "conv1"
- type: "conv"
+ name: "conv1"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 96
- kernelsize: 11
+ kernel_size: 11
stride: 4
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "data"
top: "conv1"
}
layers {
- layer {
- name: "relu1"
- type: "relu"
- }
+ name: "relu1"
+ type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
- layer {
- name: "norm1"
- type: "lrn"
+ name: "norm1"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
top: "norm1"
}
layers {
- layer {
- name: "pool1"
- type: "pool"
+ name: "pool1"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
- layer {
- name: "conv2"
- type: "conv"
+ name: "conv2"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 5
pad: 2
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 5
+ group: 2
}
bottom: "pool1"
top: "conv2"
}
layers {
- layer {
- name: "relu2"
- type: "relu"
- }
+ name: "relu2"
+ type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
- layer {
- name: "norm2"
- type: "lrn"
+ name: "norm2"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
top: "norm2"
}
layers {
- layer {
- name: "pool2"
- type: "pool"
+ name: "pool2"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
- layer {
- name: "conv3"
- type: "conv"
+ name: "conv3"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- kernelsize: 3
pad: 1
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 3
}
bottom: "pool2"
top: "conv3"
}
layers {
- layer {
- name: "relu3"
- type: "relu"
- }
+ name: "relu3"
+ type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
- layer {
- name: "conv4"
- type: "conv"
+ name: "conv4"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- group: 2
- kernelsize: 3
pad: 1
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 3
+ group: 2
}
bottom: "conv3"
top: "conv4"
}
layers {
- layer {
- name: "relu4"
- type: "relu"
- }
+ name: "relu4"
+ type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
- layer {
- name: "conv5"
- type: "conv"
+ name: "conv5"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 3
pad: 1
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 3
+ group: 2
}
bottom: "conv4"
top: "conv5"
}
layers {
- layer {
- name: "relu5"
- type: "relu"
- }
+ name: "relu5"
+ type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
- layer {
- name: "pool5"
- type: "pool"
- kernelsize: 3
+ name: "pool5"
+ type: POOLING
+ pooling_param {
pool: MAX
+ kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
- layer {
- name: "fc6"
- type: "innerproduct"
+ name: "fc6"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool5"
top: "fc6"
}
layers {
- layer {
- name: "relu6"
- type: "relu"
- }
+ name: "relu6"
+ type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "drop6"
- type: "dropout"
+ name: "drop6"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "fc7"
- type: "innerproduct"
+ name: "fc7"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc6"
top: "fc7"
}
layers {
- layer {
- name: "relu7"
- type: "relu"
- }
+ name: "relu7"
+ type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "drop7"
- type: "dropout"
+ name: "drop7"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "fc8"
- type: "innerproduct"
+ name: "fc8"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 1000
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc7"
top: "fc8"
}
layers {
- layer {
- name: "prob"
- type: "softmax"
- }
+ name: "prob"
+ type: SOFTMAX
bottom: "fc8"
top: "prob"
}
name: "AlexNet"
layers {
- layer {
- name: "data"
- type: "data"
+ name: "data"
+ type: DATA
+ data_param {
source: "ilsvrc12_train_leveldb"
- meanfile: "../../data/ilsvrc12/imagenet_mean.binaryproto"
- batchsize: 256
- cropsize: 227
+ mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
+ batch_size: 256
+ crop_size: 227
mirror: true
}
top: "data"
top: "label"
}
layers {
- layer {
- name: "conv1"
- type: "conv"
+ name: "conv1"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 96
- kernelsize: 11
+ kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
}
bias_filler {
type: "constant"
- value: 0.
+ value: 0
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "data"
top: "conv1"
}
layers {
- layer {
- name: "relu1"
- type: "relu"
- }
+ name: "relu1"
+ type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
- layer {
- name: "norm1"
- type: "lrn"
+ name: "norm1"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
top: "norm1"
}
layers {
- layer {
- name: "pool1"
- type: "pool"
+ name: "pool1"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
- layer {
- name: "conv2"
- type: "conv"
+ name: "conv2"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 5
pad: 2
+ kernel_size: 5
+ group: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool1"
top: "conv2"
}
layers {
- layer {
- name: "relu2"
- type: "relu"
- }
+ name: "relu2"
+ type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
- layer {
- name: "norm2"
- type: "lrn"
+ name: "norm2"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
top: "norm2"
}
layers {
- layer {
- name: "pool2"
- type: "pool"
+ name: "pool2"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
- layer {
- name: "conv3"
- type: "conv"
+ name: "conv3"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- kernelsize: 3
pad: 1
+ kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
- value: 0.
+ value: 0
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool2"
top: "conv3"
}
layers {
- layer {
- name: "relu3"
- type: "relu"
- }
+ name: "relu3"
+ type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
- layer {
- name: "conv4"
- type: "conv"
+ name: "conv4"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "conv3"
top: "conv4"
}
layers {
- layer {
- name: "relu4"
- type: "relu"
- }
+ name: "relu4"
+ type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
- layer {
- name: "conv5"
- type: "conv"
+ name: "conv5"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
weight_filler {
type: "gaussian"
std: 0.01
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "conv4"
top: "conv5"
}
layers {
- layer {
- name: "relu5"
- type: "relu"
- }
+ name: "relu5"
+ type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
- layer {
- name: "pool5"
- type: "pool"
- kernelsize: 3
+ name: "pool5"
+ type: POOLING
+ pooling_param {
pool: MAX
+ kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
- layer {
- name: "fc6"
- type: "innerproduct"
+ name: "fc6"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool5"
top: "fc6"
}
layers {
- layer {
- name: "relu6"
- type: "relu"
- }
+ name: "relu6"
+ type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "drop6"
- type: "dropout"
+ name: "drop6"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "fc7"
- type: "innerproduct"
+ name: "fc7"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc6"
top: "fc7"
}
layers {
- layer {
- name: "relu7"
- type: "relu"
- }
+ name: "relu7"
+ type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "drop7"
- type: "dropout"
+ name: "drop7"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "fc8"
- type: "innerproduct"
+ name: "fc8"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 1000
weight_filler {
type: "gaussian"
}
bias_filler {
type: "constant"
- value: 0.
+ value: 0
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc7"
top: "fc8"
}
layers {
- layer {
- name: "loss"
- type: "softmax_loss"
- }
+ name: "loss"
+ type: SOFTMAX_LOSS
bottom: "fc8"
bottom: "label"
}
name: "AlexNet"
layers {
- layer {
- name: "data"
- type: "data"
+ name: "data"
+ type: DATA
+ data_param {
source: "ilsvrc12_val_leveldb"
- meanfile: "../../data/ilsvrc12/imagenet_mean.binaryproto"
- batchsize: 50
- cropsize: 227
+ mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
+ batch_size: 50
+ crop_size: 227
mirror: false
}
top: "data"
top: "label"
}
layers {
- layer {
- name: "conv1"
- type: "conv"
+ name: "conv1"
+ type: CONVOLUTION
+ convolution_param {
num_output: 96
- kernelsize: 11
+ kernel_size: 11
stride: 4
}
bottom: "data"
top: "conv1"
}
layers {
- layer {
- name: "relu1"
- type: "relu"
- }
+ name: "relu1"
+ type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
- layer {
- name: "norm1"
- type: "lrn"
+ name: "norm1"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
top: "norm1"
}
layers {
- layer {
- name: "pool1"
- type: "pool"
+ name: "pool1"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
- layer {
- name: "conv2"
- type: "conv"
+ name: "conv2"
+ type: CONVOLUTION
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 5
pad: 2
+ kernel_size: 5
+ group: 2
}
bottom: "pool1"
top: "conv2"
}
layers {
- layer {
- name: "relu2"
- type: "relu"
- }
+ name: "relu2"
+ type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
- layer {
- name: "norm2"
- type: "lrn"
+ name: "norm2"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
top: "norm2"
}
layers {
- layer {
- name: "pool2"
- type: "pool"
+ name: "pool2"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
- layer {
- name: "conv3"
- type: "conv"
+ name: "conv3"
+ type: CONVOLUTION
+ convolution_param {
num_output: 384
- kernelsize: 3
pad: 1
+ kernel_size: 3
}
bottom: "pool2"
top: "conv3"
}
layers {
- layer {
- name: "relu3"
- type: "relu"
- }
+ name: "relu3"
+ type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
- layer {
- name: "conv4"
- type: "conv"
+ name: "conv4"
+ type: CONVOLUTION
+ convolution_param {
num_output: 384
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
}
bottom: "conv3"
top: "conv4"
}
layers {
- layer {
- name: "relu4"
- type: "relu"
- }
+ name: "relu4"
+ type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
- layer {
- name: "conv5"
- type: "conv"
+ name: "conv5"
+ type: CONVOLUTION
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
}
bottom: "conv4"
top: "conv5"
}
layers {
- layer {
- name: "relu5"
- type: "relu"
- }
+ name: "relu5"
+ type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
- layer {
- name: "pool5"
- type: "pool"
- kernelsize: 3
+ name: "pool5"
+ type: POOLING
+ pooling_param {
pool: MAX
+ kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
- layer {
- name: "fc6"
- type: "innerproduct"
+ name: "fc6"
+ type: INNER_PRODUCT
+ inner_product_param {
num_output: 4096
}
bottom: "pool5"
top: "fc6"
}
layers {
- layer {
- name: "relu6"
- type: "relu"
- }
+ name: "relu6"
+ type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "drop6"
- type: "dropout"
+ name: "drop6"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "fc7"
- type: "innerproduct"
+ name: "fc7"
+ type: INNER_PRODUCT
+ inner_product_param {
num_output: 4096
}
bottom: "fc6"
top: "fc7"
}
layers {
- layer {
- name: "relu7"
- type: "relu"
- }
+ name: "relu7"
+ type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "drop7"
- type: "dropout"
+ name: "drop7"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "fc8"
- type: "innerproduct"
+ name: "fc8"
+ type: INNER_PRODUCT
+ inner_product_param {
num_output: 1000
}
bottom: "fc7"
top: "fc8"
}
layers {
- layer {
- name: "prob"
- type: "softmax"
- }
+ name: "prob"
+ type: SOFTMAX
bottom: "fc8"
top: "prob"
}
layers {
- layer {
- name: "accuracy"
- type: "accuracy"
- }
+ top: "accuracy"
+ name: "accuracy"
+ type: ACCURACY
bottom: "prob"
bottom: "label"
- top: "accuracy"
}
# for ilsvrc image classification and deep feature extraction
MODEL=caffe_alexnet_model
-CHECKSUM=91df0e19290ef78324de9eecb258a77f
+CHECKSUM=29eb495b11613825c1900382f5286963
if [ -f $MODEL ]; then
echo "Model already exists. Checking md5..."
# for ilsvrc image classification and deep feature extraction
MODEL=caffe_reference_imagenet_model
-CHECKSUM=bf44bac4a59aa7792b296962fe483f2b
+CHECKSUM=af678f0bd3cdd2437e35679d88665170
if [ -f $MODEL ]; then
echo "Model already exists. Checking md5..."
echo "Downloading..."
-wget --no-check-certificate https://www.dropbox.com/s/n3jups0gr7uj0dv/$MODEL
+wget --no-check-certificate https://www.dropbox.com/s/7qkokvr7x0esljl/$MODEL
echo "Done. Please run this command again to verify that checksum = $CHECKSUM."