Skip to content
Snippets Groups Projects
Commit bb46c698 authored by Ross Girshick's avatar Ross Girshick
Browse files

removing VGG_CNN_S

parent a2b35f2a
No related branches found
No related tags found
No related merge requests found
name: "VGG_CNN_S"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 224
input_dim: 224
input: "rois"
input_dim: 1 # to be changed on-the-fly to num ROIs
input_dim: 5 # [batch ind, x1, y1, x2, y2] zero-based indexing
input_dim: 1
input_dim: 1
layers {
bottom: "data"
top: "conv1"
name: "conv1"
type: CONVOLUTION
convolution_param {
num_output: 96
kernel_size: 7
stride: 2
}
# Learning parameters
blobs_lr: 0
blobs_lr: 0
weight_decay: 0
weight_decay: 0
}
layers {
bottom: "conv1"
top: "conv1"
name: "relu1"
type: RELU
}
layers {
bottom: "conv1"
top: "norm1"
name: "norm1"
type: LRN
lrn_param {
local_size: 5
alpha: 0.0005
beta: 0.75
k: 2
}
}
layers {
bottom: "norm1"
top: "pool1"
name: "pool1"
type: POOLING
pooling_param {
pool: MAX
kernel_size: 3
stride: 3
}
}
layers {
bottom: "pool1"
top: "conv2"
name: "conv2"
type: CONVOLUTION
convolution_param {
num_output: 256
kernel_size: 5
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv2"
top: "conv2"
name: "relu2"
type: RELU
}
layers {
bottom: "conv2"
top: "pool2"
name: "pool2"
type: POOLING
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layers {
bottom: "pool2"
top: "conv3"
name: "conv3"
type: CONVOLUTION
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv3"
top: "conv3"
name: "relu3"
type: RELU
}
layers {
bottom: "conv3"
top: "conv4"
name: "conv4"
type: CONVOLUTION
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv4"
top: "conv4"
name: "relu4"
type: RELU
}
layers {
bottom: "conv4"
top: "conv5"
name: "conv5"
type: CONVOLUTION
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv5"
top: "conv5"
name: "relu5"
type: RELU
}
layers {
name: "roi_pool5"
type: ROI_POOLING
bottom: "conv5"
bottom: "rois"
top: "pool5"
roi_pooling_param {
pooled_w: 6
pooled_h: 6
}
}
layers {
bottom: "pool5"
top: "fc6"
name: "fc6"
type: INNER_PRODUCT
inner_product_param {
num_output: 4096
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "fc6"
top: "fc6"
name: "relu6"
type: RELU
}
layers {
bottom: "fc6"
top: "fc6"
name: "drop6"
type: DROPOUT
dropout_param {
dropout_ratio: 0.5
}
}
layers {
bottom: "fc6"
top: "fc7"
name: "fc7"
type: INNER_PRODUCT
inner_product_param {
num_output: 4096
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "fc7"
top: "fc7"
name: "relu7"
type: RELU
}
layers {
bottom: "fc7"
top: "fc7"
name: "drop7"
type: DROPOUT
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8_pascal"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8_pascal"
inner_product_param {
num_output: 21
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
name: "fc8_pascal_bbox"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8_pascal_bbox"
inner_product_param {
num_output: 84
weight_filler {
type: "gaussian"
std: 0.001
}
bias_filler {
type: "constant"
value: 0
}
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
name: "prob"
type: SOFTMAX
bottom: "fc8_pascal"
top: "prob"
}
train_net: "models/VGG_CNN_S/train.prototxt"
base_lr: 0.001
lr_policy: "step"
gamma: 0.1
stepsize: 30000
display: 20
average_loss: 100
max_iter: 500000
iter_size: 1
momentum: 0.9
weight_decay: 0.0005
# We disable standard caffe solver snapshotting and implement our own snapshot
# function
snapshot: 0
# We still use the snapshot prefix, though
snapshot_prefix: "snapshots/vgg_cnn_s_fast_rcnn"
#debug_info: true
name: "VGG_CNN_S"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 224
input_dim: 224
input: "rois"
input_dim: 1 # to be changed on-the-fly to num ROIs
input_dim: 5 # [batch ind, x1, y1, x2, y2] zero-based indexing
input_dim: 1
input_dim: 1
input: "labels"
input_dim: 1 # to be changed on-the-fly to match num ROIs
input_dim: 1
input_dim: 1
input_dim: 1
input: "bbox_targets"
input_dim: 1 # to be changed on-the-fly to match num ROIs
input_dim: 84 # 4 * K (=21) classes
input_dim: 1
input_dim: 1
input: "bbox_loss_weights"
input_dim: 1 # to be changed on-the-fly to match num ROIs
input_dim: 84 # 4 * K (=21) classes
input_dim: 1
input_dim: 1
layers {
bottom: "data"
top: "conv1"
name: "conv1"
type: CONVOLUTION
convolution_param {
num_output: 96
kernel_size: 7
stride: 2
}
# Learning parameters
blobs_lr: 0
blobs_lr: 0
weight_decay: 0
weight_decay: 0
}
layers {
bottom: "conv1"
top: "conv1"
name: "relu1"
type: RELU
}
layers {
bottom: "conv1"
top: "norm1"
name: "norm1"
type: LRN
lrn_param {
local_size: 5
alpha: 0.0005
beta: 0.75
k: 2
}
}
layers {
bottom: "norm1"
top: "pool1"
name: "pool1"
type: POOLING
pooling_param {
pool: MAX
kernel_size: 3
stride: 3
}
}
layers {
bottom: "pool1"
top: "conv2"
name: "conv2"
type: CONVOLUTION
convolution_param {
num_output: 256
kernel_size: 5
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv2"
top: "conv2"
name: "relu2"
type: RELU
}
layers {
bottom: "conv2"
top: "pool2"
name: "pool2"
type: POOLING
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layers {
bottom: "pool2"
top: "conv3"
name: "conv3"
type: CONVOLUTION
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv3"
top: "conv3"
name: "relu3"
type: RELU
}
layers {
bottom: "conv3"
top: "conv4"
name: "conv4"
type: CONVOLUTION
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv4"
top: "conv4"
name: "relu4"
type: RELU
}
layers {
bottom: "conv4"
top: "conv5"
name: "conv5"
type: CONVOLUTION
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "conv5"
top: "conv5"
name: "relu5"
type: RELU
}
layers {
name: "roi_pool5"
type: ROI_POOLING
bottom: "conv5"
bottom: "rois"
top: "pool5"
roi_pooling_param {
pooled_w: 6
pooled_h: 6
}
}
layers {
bottom: "pool5"
top: "fc6"
name: "fc6"
type: INNER_PRODUCT
inner_product_param {
num_output: 4096
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "fc6"
top: "fc6"
name: "relu6"
type: RELU
}
layers {
bottom: "fc6"
top: "fc6"
name: "drop6"
type: DROPOUT
dropout_param {
dropout_ratio: 0.5
}
}
layers {
bottom: "fc6"
top: "fc7"
name: "fc7"
type: INNER_PRODUCT
inner_product_param {
num_output: 4096
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
bottom: "fc7"
top: "fc7"
name: "relu7"
type: RELU
}
layers {
bottom: "fc7"
top: "fc7"
name: "drop7"
type: DROPOUT
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc8_pascal"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8_pascal"
inner_product_param {
num_output: 21
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
name: "fc8_pascal_bbox"
type: INNER_PRODUCT
bottom: "fc7"
top: "fc8_pascal_bbox"
inner_product_param {
num_output: 84
weight_filler {
type: "gaussian"
std: 0.001
}
bias_filler {
type: "constant"
value: 0
}
}
# Learning parameters
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
}
layers {
name: "loss"
type: SOFTMAX_LOSS
bottom: "fc8_pascal"
bottom: "labels"
top: "loss"
}
layers {
name: "loss_bbox"
type: SMOOTH_L1_LOSS
bottom: "fc8_pascal_bbox"
bottom: "bbox_targets"
bottom: "bbox_loss_weights"
top: "loss_bbox"
loss_weight: 1
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment