5 // Specifies the shape (dimensions) of a Blob.
7 repeated int64 dim = 1 [packed = true];
11 optional BlobShape shape = 7;
12 repeated float data = 5 [packed = true];
13 repeated float diff = 6 [packed = true];
14 repeated double double_data = 8 [packed = true];
15 repeated double double_diff = 9 [packed = true];
17 // 4D dimensions -- deprecated. Use "shape" instead.
18 optional int32 num = 1 [default = 0];
19 optional int32 channels = 2 [default = 0];
20 optional int32 height = 3 [default = 0];
21 optional int32 width = 4 [default = 0];
24 // The BlobProtoVector is simply a way to pass multiple blobproto instances
26 message BlobProtoVector {
27 repeated BlobProto blobs = 1;
31 optional int32 channels = 1;
32 optional int32 height = 2;
33 optional int32 width = 3;
34 // the actual image data, in bytes
35 optional bytes data = 4;
36 optional int32 label = 5;
37 // Optionally, the datum could also hold float data.
38 repeated float float_data = 6;
39 // If true data contains an encoded image that need to be decoded
40 optional bool encoded = 7 [default = false];
43 message FillerParameter {
45 optional string type = 1 [default = 'constant'];
46 optional float value = 2 [default = 0]; // the value in constant filler
47 optional float min = 3 [default = 0]; // the min value in uniform filler
48 optional float max = 4 [default = 1]; // the max value in uniform filler
49 optional float mean = 5 [default = 0]; // the mean value in Gaussian filler
50 optional float std = 6 [default = 1]; // the std value in Gaussian filler
51 // The expected number of non-zero output weights for a given input in
52 // Gaussian filler -- the default -1 means don't perform sparsification.
53 optional int32 sparse = 7 [default = -1];
54 // Normalize the filler variance by fan_in, fan_out, or their average.
55 // Applies to 'xavier' and 'msra' fillers.
61 optional VarianceNorm variance_norm = 8 [default = FAN_IN];
64 message NetParameter {
65 optional string name = 1; // consider giving the network a name
66 // DEPRECATED. See InputParameter. The input blobs to the network.
67 repeated string input = 3;
68 // DEPRECATED. See InputParameter. The shape of the input blobs.
69 repeated BlobShape input_shape = 8;
71 // 4D input dimensions -- deprecated. Use "input_shape" instead.
72 // If specified, for each input blob there should be four
73 // values specifying the num, channels, height and width of the input blob.
74 // Thus, there should be a total of (4 * #input) numbers.
75 repeated int32 input_dim = 4;
77 // Whether the network will force every layer to carry out backward operation.
78 // If set False, then whether to carry out backward is determined
79 // automatically according to the net structure and learning rates.
80 optional bool force_backward = 5 [default = false];
81 // The current "state" of the network, including the phase, level, and stage.
82 // Some layers may be included/excluded depending on this state and the states
83 // specified in the layers' include and exclude fields.
84 optional NetState state = 6;
86 // Print debugging information about results while running Net::Forward,
87 // Net::Backward, and Net::Update.
88 optional bool debug_info = 7 [default = false];
90 // The layers that make up the net. Each of their configurations, including
91 // connectivity and behavior, is specified as a LayerParameter.
92 repeated LayerParameter layer = 100; // ID 100 so layers are printed last.
94 // DEPRECATED: use 'layer' instead.
95 repeated V1LayerParameter layers = 2;
99 // Update the next available ID when you add a new SolverParameter field.
101 // SolverParameter next available ID: 43 (last added: weights)
102 message SolverParameter {
103 //////////////////////////////////////////////////////////////////////////////
104 // Specifying the train and test networks
106 // Exactly one train net must be specified using one of the following fields:
107 // train_net_param, train_net, net_param, net
108 // One or more test nets may be specified using any of the following fields:
109 // test_net_param, test_net, net_param, net
110 // If more than one test net field is specified (e.g., both net and
111 // test_net are specified), they will be evaluated in the field order given
112 // above: (1) test_net_param, (2) test_net, (3) net_param/net.
113 // A test_iter must be specified for each test_net.
114 // A test_level and/or a test_stage may also be specified for each test_net.
115 //////////////////////////////////////////////////////////////////////////////
117 // Proto filename for the train net, possibly combined with one or more
119 optional string net = 24;
120 // Inline train net param, possibly combined with one or more test nets.
121 optional NetParameter net_param = 25;
123 optional string train_net = 1; // Proto filename for the train net.
124 repeated string test_net = 2; // Proto filenames for the test nets.
125 optional NetParameter train_net_param = 21; // Inline train net params.
126 repeated NetParameter test_net_param = 22; // Inline test net params.
128 // The states for the train/test nets. Must be unspecified or
129 // specified once per net.
131 // By default, train_state will have phase = TRAIN,
132 // and all test_state's will have phase = TEST.
133 // Other defaults are set according to the NetState defaults.
134 optional NetState train_state = 26;
135 repeated NetState test_state = 27;
137 // The number of iterations for each test net.
138 repeated int32 test_iter = 3;
140 // The number of iterations between two testing phases.
141 optional int32 test_interval = 4 [default = 0];
142 optional bool test_compute_loss = 19 [default = false];
143 // If true, run an initial test pass before the first iteration,
144 // ensuring memory availability and printing the starting value of the loss.
145 optional bool test_initialization = 32 [default = true];
146 optional float base_lr = 5; // The base learning rate
147 // the number of iterations between displaying info. If display = 0, no info
148 // will be displayed.
149 optional int32 display = 6;
150 // Display the loss averaged over the last average_loss iterations
151 optional int32 average_loss = 33 [default = 1];
152 optional int32 max_iter = 7; // the maximum number of iterations
153 // accumulate gradients over `iter_size` x `batch_size` instances
154 optional int32 iter_size = 36 [default = 1];
156 // The learning rate decay policy. The currently implemented learning rate
157 // policies are as follows:
158 // - fixed: always return base_lr.
159 // - step: return base_lr * gamma ^ (floor(iter / step))
160 // - exp: return base_lr * gamma ^ iter
161 // - inv: return base_lr * (1 + gamma * iter) ^ (- power)
162 // - multistep: similar to step but it allows non uniform steps defined by
164 // - poly: the effective learning rate follows a polynomial decay, to be
165 // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)
166 // - sigmoid: the effective learning rate follows a sigmod decay
167 // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))
169 // where base_lr, max_iter, gamma, step, stepvalue and power are defined
170 // in the solver parameter protocol buffer, and iter is the current iteration.
171 optional string lr_policy = 8;
172 optional float gamma = 9; // The parameter to compute the learning rate.
173 optional float power = 10; // The parameter to compute the learning rate.
174 optional float momentum = 11; // The momentum value.
175 optional float weight_decay = 12; // The weight decay.
176 // regularization types supported: L1 and L2
177 // controlled by weight_decay
178 optional string regularization_type = 29 [default = "L2"];
179 // the stepsize for learning rate policy "step"
180 optional int32 stepsize = 13;
181 // the stepsize for learning rate policy "multistep"
182 repeated int32 stepvalue = 34;
184 // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm,
185 // whenever their actual L2 norm is larger.
186 optional float clip_gradients = 35 [default = -1];
188 optional int32 snapshot = 14 [default = 0]; // The snapshot interval
189 // The prefix for the snapshot.
190 // If not set then is replaced by prototxt file path without extention.
191 // If is set to directory then is augmented by prototxt file name
192 // without extention.
193 optional string snapshot_prefix = 15;
194 // whether to snapshot diff in the results or not. Snapshotting diff will help
195 // debugging but the final protocol buffer size will be much larger.
196 optional bool snapshot_diff = 16 [default = false];
197 enum SnapshotFormat {
201 optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO];
202 // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default.
207 optional SolverMode solver_mode = 17 [default = GPU];
208 // the device_id will that be used in GPU mode. Use device_id = 0 in default.
209 optional int32 device_id = 18 [default = 0];
210 // If non-negative, the seed with which the Solver will initialize the Caffe
211 // random number generator -- useful for reproducible results. Otherwise,
212 // (and by default) initialize using a seed derived from the system clock.
213 optional int64 random_seed = 20 [default = -1];
215 // type of the solver
216 optional string type = 40 [default = "SGD"];
218 // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam
219 optional float delta = 31 [default = 1e-8];
220 // parameters for the Adam solver
221 optional float momentum2 = 39 [default = 0.999];
223 // RMSProp decay value
224 // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t)
225 optional float rms_decay = 38 [default = 0.99];
227 // If true, print information about the state of the net that may help with
228 // debugging learning problems.
229 optional bool debug_info = 23 [default = false];
231 // If false, don't save a snapshot after training finishes.
232 optional bool snapshot_after_train = 28 [default = true];
234 // DEPRECATED: old solver enum types, use string instead
243 // DEPRECATED: use type instead of solver_type
244 optional SolverType solver_type = 30 [default = SGD];
246 // Overlap compute and communication for data parallel training
247 optional bool layer_wise_reduce = 41 [default = true];
249 // Path to caffemodel file(s) with pretrained weights to initialize finetuning.
250 // Tha same as command line --weights parameter for caffe train command.
251 // If command line --weights parameter if specified, it has higher priority
252 // and owerwrites this one(s).
253 // If --snapshot command line parameter is specified, this one(s) are ignored.
254 // If several model files are expected, they can be listed in a one
255 // weights parameter separated by ',' (like in a command string) or
256 // in repeated weights parameters separately.
257 repeated string weights = 42;
260 // A message that stores the solver snapshots
261 message SolverState {
262 optional int32 iter = 1; // The current iteration
263 optional string learned_net = 2; // The file that stores the learned net.
264 repeated BlobProto history = 3; // The history for sgd solvers
265 optional int32 current_step = 4 [default = 0]; // The current step for learning rate
274 optional Phase phase = 1 [default = TEST];
275 optional int32 level = 2 [default = 0];
276 repeated string stage = 3;
279 message NetStateRule {
280 // Set phase to require the NetState have a particular phase (TRAIN or TEST)
281 // to meet this rule.
282 optional Phase phase = 1;
284 // Set the minimum and/or maximum levels in which the layer should be used.
285 // Leave undefined to meet the rule regardless of level.
286 optional int32 min_level = 2;
287 optional int32 max_level = 3;
289 // Customizable sets of stages to include or exclude.
290 // The net must have ALL of the specified stages and NONE of the specified
291 // "not_stage"s to meet the rule.
292 // (Use multiple NetStateRules to specify conjunctions of stages.)
293 repeated string stage = 4;
294 repeated string not_stage = 5;
297 // Specifies training parameters (multipliers on global learning constants,
298 // and the name and other settings used for weight sharing).
300 // The names of the parameter blobs -- useful for sharing parameters among
301 // layers, but never required otherwise. To share a parameter between two
302 // layers, give it a (non-empty) name.
303 optional string name = 1;
305 // Whether to require shared weights to have the same shape, or just the same
306 // count -- defaults to STRICT if unspecified.
307 optional DimCheckMode share_mode = 2;
309 // STRICT (default) requires that num, channels, height, width each match.
311 // PERMISSIVE requires only the count (num*channels*height*width) to match.
315 // The multiplier on the global learning rate for this parameter.
316 optional float lr_mult = 3 [default = 1.0];
318 // The multiplier on the global weight decay for this parameter.
319 optional float decay_mult = 4 [default = 1.0];
323 // Update the next available ID when you add a new LayerParameter field.
325 // LayerParameter next available layer-specific ID: 147 (last added: recurrent_param)
326 message LayerParameter {
327 optional string name = 1; // the layer name
328 optional string type = 2; // the layer type
329 repeated string bottom = 3; // the name of each bottom blob
330 repeated string top = 4; // the name of each top blob
332 // The train / test phase for computation.
333 optional Phase phase = 10;
335 // The amount of weight to assign each top blob in the objective.
336 // Each layer assigns a default value, usually of either 0 or 1,
338 repeated float loss_weight = 5;
340 // Specifies training parameters (multipliers on global learning constants,
341 // and the name and other settings used for weight sharing).
342 repeated ParamSpec param = 6;
344 // The blobs containing the numeric parameters of the layer.
345 repeated BlobProto blobs = 7;
347 // Specifies whether to backpropagate to each bottom. If unspecified,
348 // Caffe will automatically infer whether each input needs backpropagation
349 // to compute parameter gradients. If set to true for some inputs,
350 // backpropagation to those inputs is forced; if set false for some inputs,
351 // backpropagation to those inputs is skipped.
353 // The size must be either 0 or equal to the number of bottoms.
354 repeated bool propagate_down = 11;
356 // Rules controlling whether and when a layer is included in the network,
357 // based on the current NetState. You may specify a non-zero number of rules
358 // to include OR exclude, but not both. If no include or exclude rules are
359 // specified, the layer is always included. If the current NetState meets
360 // ANY (i.e., one or more) of the specified rules, the layer is
361 // included/excluded.
362 repeated NetStateRule include = 8;
363 repeated NetStateRule exclude = 9;
365 // Parameters for data pre-processing.
366 optional TransformationParameter transform_param = 100;
368 // Parameters shared by loss layers.
369 optional LossParameter loss_param = 101;
371 // Layer type-specific parameters.
373 // Note: certain layers may have more than one computational engine
374 // for their implementation. These layers include an Engine type and
375 // engine parameter for selecting the implementation.
376 // The default for the engine is set by the ENGINE switch at compile-time.
377 optional AccuracyParameter accuracy_param = 102;
378 optional ArgMaxParameter argmax_param = 103;
379 optional BatchNormParameter batch_norm_param = 139;
380 optional BiasParameter bias_param = 141;
381 optional ConcatParameter concat_param = 104;
382 optional ContrastiveLossParameter contrastive_loss_param = 105;
383 optional ConvolutionParameter convolution_param = 106;
384 optional CropParameter crop_param = 144;
385 optional DataParameter data_param = 107;
386 optional DropoutParameter dropout_param = 108;
387 optional DummyDataParameter dummy_data_param = 109;
388 optional EltwiseParameter eltwise_param = 110;
389 optional ELUParameter elu_param = 140;
390 optional EmbedParameter embed_param = 137;
391 optional ExpParameter exp_param = 111;
392 optional FlattenParameter flatten_param = 135;
393 optional HDF5DataParameter hdf5_data_param = 112;
394 optional HDF5OutputParameter hdf5_output_param = 113;
395 optional HingeLossParameter hinge_loss_param = 114;
396 optional ImageDataParameter image_data_param = 115;
397 optional InfogainLossParameter infogain_loss_param = 116;
398 optional InnerProductParameter inner_product_param = 117;
399 optional InputParameter input_param = 143;
400 optional LogParameter log_param = 134;
401 optional LRNParameter lrn_param = 118;
402 optional MemoryDataParameter memory_data_param = 119;
403 optional MVNParameter mvn_param = 120;
404 optional ParameterParameter parameter_param = 145;
405 optional PoolingParameter pooling_param = 121;
406 optional PowerParameter power_param = 122;
407 optional PReLUParameter prelu_param = 131;
408 optional PythonParameter python_param = 130;
409 optional RecurrentParameter recurrent_param = 146;
410 optional ReductionParameter reduction_param = 136;
411 optional ReLUParameter relu_param = 123;
412 optional ReshapeParameter reshape_param = 133;
413 optional ScaleParameter scale_param = 142;
414 optional SigmoidParameter sigmoid_param = 124;
415 optional SoftmaxParameter softmax_param = 125;
416 optional SPPParameter spp_param = 132;
417 optional SliceParameter slice_param = 126;
418 optional TanHParameter tanh_param = 127;
419 optional ThresholdParameter threshold_param = 128;
420 optional TileParameter tile_param = 138;
421 optional WindowDataParameter window_data_param = 129;
424 // Message that stores parameters used to apply transformation
425 // to the data layer's data
426 message TransformationParameter {
427 // For data pre-processing, we can do simple scaling and subtracting the
428 // data mean, if provided. Note that the mean subtraction is always carried
429 // out before scaling.
430 optional float scale = 1 [default = 1];
431 // Specify if we want to randomly mirror data.
432 optional bool mirror = 2 [default = false];
433 // Specify if we would like to randomly crop an image.
434 optional uint32 crop_size = 3 [default = 0];
435 // mean_file and mean_value cannot be specified at the same time
436 optional string mean_file = 4;
437 // if specified can be repeated once (would subtract it from all the channels)
438 // or can be repeated the same number of times as channels
439 // (would subtract them from the corresponding channel)
440 repeated float mean_value = 5;
441 // Force the decoded image to have 3 color channels.
442 optional bool force_color = 6 [default = false];
443 // Force the decoded image to have 1 color channels.
444 optional bool force_gray = 7 [default = false];
447 // Message that stores parameters shared by loss layers
448 message LossParameter {
449 // If specified, ignore instances with the given label.
450 optional int32 ignore_label = 1;
451 // How to normalize the loss for loss layers that aggregate across batches,
452 // spatial dimensions, or other dimensions. Currently only implemented in
453 // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers.
454 enum NormalizationMode {
455 // Divide by the number of examples in the batch times spatial dimensions.
456 // Outputs that receive the ignore label will NOT be ignored in computing
457 // the normalization factor.
459 // Divide by the total number of output locations that do not take the
460 // ignore_label. If ignore_label is not set, this behaves like FULL.
462 // Divide by the batch size.
464 // Do not normalize the loss.
467 // For historical reasons, the default normalization for
468 // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID.
469 optional NormalizationMode normalization = 3 [default = VALID];
470 // Deprecated. Ignored if normalization is specified. If normalization
471 // is not specified, then setting this to false will be equivalent to
472 // normalization = BATCH_SIZE to be consistent with previous behavior.
473 optional bool normalize = 2;
476 // Messages that store parameters used by individual layer types follow, in
477 // alphabetical order.
479 message AccuracyParameter {
480 // When computing accuracy, count as correct by comparing the true label to
481 // the top k scoring classes. By default, only compare to the top scoring
482 // class (i.e. argmax).
483 optional uint32 top_k = 1 [default = 1];
485 // The "label" axis of the prediction blob, whose argmax corresponds to the
486 // predicted label -- may be negative to index from the end (e.g., -1 for the
487 // last axis). For example, if axis == 1 and the predictions are
488 // (N x C x H x W), the label blob is expected to contain N*H*W ground truth
489 // labels with integer values in {0, 1, ..., C-1}.
490 optional int32 axis = 2 [default = 1];
492 // If specified, ignore instances with the given label.
493 optional int32 ignore_label = 3;
496 message ArgMaxParameter {
497 // If true produce pairs (argmax, maxval)
498 optional bool out_max_val = 1 [default = false];
499 optional uint32 top_k = 2 [default = 1];
500 // The axis along which to maximise -- may be negative to index from the
501 // end (e.g., -1 for the last axis).
502 // By default ArgMaxLayer maximizes over the flattened trailing dimensions
503 // for each index of the first / num dimension.
504 optional int32 axis = 3;
507 message ConcatParameter {
508 // The axis along which to concatenate -- may be negative to index from the
509 // end (e.g., -1 for the last axis). Other axes must have the
510 // same dimension for all the bottom blobs.
511 // By default, ConcatLayer concatenates blobs along the "channels" axis (1).
512 optional int32 axis = 2 [default = 1];
514 // DEPRECATED: alias for "axis" -- does not support negative indexing.
515 optional uint32 concat_dim = 1 [default = 1];
518 message BatchNormParameter {
519 // If false, normalization is performed over the current mini-batch
520 // and global statistics are accumulated (but not yet used) by a moving
522 // If true, those accumulated mean and variance values are used for the
524 // By default, it is set to false when the network is in the training
525 // phase and true when the network is in the testing phase.
526 optional bool use_global_stats = 1;
527 // What fraction of the moving average remains each iteration?
528 // Smaller values make the moving average decay faster, giving more
529 // weight to the recent values.
530 // Each iteration updates the moving average @f$S_{t-1}@f$ with the
531 // current mean @f$ Y_t @f$ by
532 // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$
533 // is the moving_average_fraction parameter.
534 optional float moving_average_fraction = 2 [default = .999];
535 // Small value to add to the variance estimate so that we don't divide by
537 optional float eps = 3 [default = 1e-5];
540 message BiasParameter {
541 // The first axis of bottom[0] (the first input Blob) along which to apply
542 // bottom[1] (the second input Blob). May be negative to index from the end
543 // (e.g., -1 for the last axis).
545 // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
546 // top[0] will have the same shape, and bottom[1] may have any of the
547 // following shapes (for the given value of axis):
548 // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
549 // (axis == 1 == -3) 3; 3x40; 3x40x60
550 // (axis == 2 == -2) 40; 40x60
551 // (axis == 3 == -1) 60
552 // Furthermore, bottom[1] may have the empty shape (regardless of the value of
553 // "axis") -- a scalar bias.
554 optional int32 axis = 1 [default = 1];
556 // (num_axes is ignored unless just one bottom is given and the bias is
557 // a learned parameter of the layer. Otherwise, num_axes is determined by the
558 // number of axes by the second bottom.)
559 // The number of axes of the input (bottom[0]) covered by the bias
560 // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
561 // Set num_axes := 0, to add a zero-axis Blob: a scalar.
562 optional int32 num_axes = 2 [default = 1];
564 // (filler is ignored unless just one bottom is given and the bias is
565 // a learned parameter of the layer.)
566 // The initialization for the learned bias parameter.
567 // Default is the zero (0) initialization, resulting in the BiasLayer
568 // initially performing the identity operation.
569 optional FillerParameter filler = 3;
572 message ContrastiveLossParameter {
573 // margin for dissimilar pair
574 optional float margin = 1 [default = 1.0];
575 // The first implementation of this cost did not exactly match the cost of
576 // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2.
577 // legacy_version = false (the default) uses (margin - d)^2 as proposed in the
578 // Hadsell paper. New models should probably use this version.
579 // legacy_version = true uses (margin - d^2). This is kept to support /
580 // reproduce existing models and results
581 optional bool legacy_version = 2 [default = false];
584 message ConvolutionParameter {
585 optional uint32 num_output = 1; // The number of outputs for the layer
586 optional bool bias_term = 2 [default = true]; // whether to have bias terms
588 // Pad, kernel size, and stride are all given as a single value for equal
589 // dimensions in all spatial dimensions, or once per spatial dimension.
590 repeated uint32 pad = 3; // The padding size; defaults to 0
591 repeated uint32 kernel_size = 4; // The kernel size
592 repeated uint32 stride = 6; // The stride; defaults to 1
593 // Factor used to dilate the kernel, (implicitly) zero-filling the resulting
594 // holes. (Kernel dilation is sometimes referred to by its use in the
595 // algorithme à trous from Holschneider et al. 1987.)
596 repeated uint32 dilation = 18; // The dilation; defaults to 1
598 // For 2D convolution only, the *_h and *_w versions may also be used to
599 // specify both spatial dimensions.
600 optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only)
601 optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only)
602 optional uint32 kernel_h = 11; // The kernel height (2D only)
603 optional uint32 kernel_w = 12; // The kernel width (2D only)
604 optional uint32 stride_h = 13; // The stride height (2D only)
605 optional uint32 stride_w = 14; // The stride width (2D only)
607 optional uint32 group = 5 [default = 1]; // The group size for group conv
609 optional FillerParameter weight_filler = 7; // The filler for the weight
610 optional FillerParameter bias_filler = 8; // The filler for the bias
616 optional Engine engine = 15 [default = DEFAULT];
618 // The axis to interpret as "channels" when performing convolution.
619 // Preceding dimensions are treated as independent inputs;
620 // succeeding dimensions are treated as "spatial".
621 // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
622 // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
623 // groups g>1) filters across the spatial axes (H, W) of the input.
624 // With (N, C, D, H, W) inputs, and axis == 1, we perform
625 // N independent 3D convolutions, sliding (C/g)-channels
626 // filters across the spatial axes (D, H, W) of the input.
627 optional int32 axis = 16 [default = 1];
629 // Whether to force use of the general ND convolution, even if a specific
630 // implementation for blobs of the appropriate number of spatial dimensions
631 // is available. (Currently, there is only a 2D-specific convolution
632 // implementation; for input blobs with num_axes != 2, this option is
633 // ignored and the ND implementation will be used.)
634 optional bool force_nd_im2col = 17 [default = false];
637 message CropParameter {
638 // To crop, elements of the first bottom are selected to fit the dimensions
639 // of the second, reference bottom. The crop is configured by
640 // - the crop `axis` to pick the dimensions for cropping
641 // - the crop `offset` to set the shift for all/each dimension
642 // to align the cropped bottom with the reference bottom.
643 // All dimensions up to but excluding `axis` are preserved, while
644 // the dimensions including and trailing `axis` are cropped.
645 // If only one `offset` is set, then all dimensions are offset by this amount.
646 // Otherwise, the number of offsets must equal the number of cropped axes to
647 // shift the crop in each dimension accordingly.
648 // Note: standard dimensions are N,C,H,W so the default is a spatial crop,
649 // and `axis` may be negative to index from the end (e.g., -1 for the last
651 optional int32 axis = 1 [default = 2];
652 repeated uint32 offset = 2;
655 message DataParameter {
660 // Specify the data source.
661 optional string source = 1;
662 // Specify the batch size.
663 optional uint32 batch_size = 4;
664 // The rand_skip variable is for the data layer to skip a few data points
665 // to avoid all asynchronous sgd clients to start at the same point. The skip
666 // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
667 // be larger than the number of keys in the database.
668 // DEPRECATED. Each solver accesses a different subset of the database.
669 optional uint32 rand_skip = 7 [default = 0];
670 optional DB backend = 8 [default = LEVELDB];
671 // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
672 // simple scaling and subtracting the data mean, if provided. Note that the
673 // mean subtraction is always carried out before scaling.
674 optional float scale = 2 [default = 1];
675 optional string mean_file = 3;
676 // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
678 optional uint32 crop_size = 5 [default = 0];
679 // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
681 optional bool mirror = 6 [default = false];
682 // Force the encoded image to have 3 color channels
683 optional bool force_encoded_color = 9 [default = false];
684 // Prefetch queue (Increase if data feeding bandwidth varies, within the
685 // limit of device memory for GPU training)
686 optional uint32 prefetch = 10 [default = 4];
689 message DropoutParameter {
690 optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio
693 // DummyDataLayer fills any number of arbitrarily shaped blobs with random
694 // (or constant) data generated by "Fillers" (see "message FillerParameter").
695 message DummyDataParameter {
696 // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N
697 // shape fields, and 0, 1 or N data_fillers.
699 // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used.
700 // If 1 data_filler is specified, it is applied to all top blobs. If N are
701 // specified, the ith is applied to the ith top blob.
702 repeated FillerParameter data_filler = 1;
703 repeated BlobShape shape = 6;
705 // 4D dimensions -- deprecated. Use "shape" instead.
706 repeated uint32 num = 2;
707 repeated uint32 channels = 3;
708 repeated uint32 height = 4;
709 repeated uint32 width = 5;
712 message EltwiseParameter {
718 optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation
719 repeated float coeff = 2; // blob-wise coefficient for SUM operation
721 // Whether to use an asymptotically slower (for >2 inputs) but stabler method
722 // of computing the gradient for the PROD operation. (No effect for SUM op.)
723 optional bool stable_prod_grad = 3 [default = true];
726 // Message that stores parameters used by ELULayer
727 message ELUParameter {
729 // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
730 // Deep Network Learning by Exponential Linear Units (ELUs). arXiv
731 optional float alpha = 1 [default = 1];
734 // Message that stores parameters used by EmbedLayer
735 message EmbedParameter {
736 optional uint32 num_output = 1; // The number of outputs for the layer
737 // The input is given as integers to be interpreted as one-hot
738 // vector indices with dimension num_input. Hence num_input should be
739 // 1 greater than the maximum possible input value.
740 optional uint32 input_dim = 2;
742 optional bool bias_term = 3 [default = true]; // Whether to use a bias term
743 optional FillerParameter weight_filler = 4; // The filler for the weight
744 optional FillerParameter bias_filler = 5; // The filler for the bias
748 // Message that stores parameters used by ExpLayer
749 message ExpParameter {
750 // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0.
751 // Or if base is set to the default (-1), base is set to e,
752 // so y = exp(shift + scale * x).
753 optional float base = 1 [default = -1.0];
754 optional float scale = 2 [default = 1.0];
755 optional float shift = 3 [default = 0.0];
758 /// Message that stores parameters used by FlattenLayer
759 message FlattenParameter {
760 // The first axis to flatten: all preceding axes are retained in the output.
761 // May be negative to index from the end (e.g., -1 for the last axis).
762 optional int32 axis = 1 [default = 1];
764 // The last axis to flatten: all following axes are retained in the output.
765 // May be negative to index from the end (e.g., the default -1 for the last
767 optional int32 end_axis = 2 [default = -1];
770 // Message that stores parameters used by HDF5DataLayer
771 message HDF5DataParameter {
772 // Specify the data source.
773 optional string source = 1;
774 // Specify the batch size.
775 optional uint32 batch_size = 2;
777 // Specify whether to shuffle the data.
778 // If shuffle == true, the ordering of the HDF5 files is shuffled,
779 // and the ordering of data within any given HDF5 file is shuffled,
780 // but data between different files are not interleaved; all of a file's
781 // data are output (in a random order) before moving onto another file.
782 optional bool shuffle = 3 [default = false];
785 message HDF5OutputParameter {
786 optional string file_name = 1;
789 message HingeLossParameter {
794 // Specify the Norm to use L1 or L2
795 optional Norm norm = 1 [default = L1];
798 message ImageDataParameter {
799 // Specify the data source.
800 optional string source = 1;
801 // Specify the batch size.
802 optional uint32 batch_size = 4 [default = 1];
803 // The rand_skip variable is for the data layer to skip a few data points
804 // to avoid all asynchronous sgd clients to start at the same point. The skip
805 // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
806 // be larger than the number of keys in the database.
807 optional uint32 rand_skip = 7 [default = 0];
808 // Whether or not ImageLayer should shuffle the list of files at every epoch.
809 optional bool shuffle = 8 [default = false];
810 // It will also resize images if new_height or new_width are not zero.
811 optional uint32 new_height = 9 [default = 0];
812 optional uint32 new_width = 10 [default = 0];
813 // Specify if the images are color or gray
814 optional bool is_color = 11 [default = true];
815 // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
816 // simple scaling and subtracting the data mean, if provided. Note that the
817 // mean subtraction is always carried out before scaling.
818 optional float scale = 2 [default = 1];
819 optional string mean_file = 3;
820 // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
822 optional uint32 crop_size = 5 [default = 0];
823 // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
825 optional bool mirror = 6 [default = false];
826 optional string root_folder = 12 [default = ""];
829 message InfogainLossParameter {
830 // Specify the infogain matrix source.
831 optional string source = 1;
832 optional int32 axis = 2 [default = 1]; // axis of prob
835 message InnerProductParameter {
836 optional uint32 num_output = 1; // The number of outputs for the layer
837 optional bool bias_term = 2 [default = true]; // whether to have bias terms
838 optional FillerParameter weight_filler = 3; // The filler for the weight
839 optional FillerParameter bias_filler = 4; // The filler for the bias
841 // The first axis to be lumped into a single inner product computation;
842 // all preceding axes are retained in the output.
843 // May be negative to index from the end (e.g., -1 for the last axis).
844 optional int32 axis = 5 [default = 1];
845 // Specify whether to transpose the weight matrix or not.
846 // If transpose == true, any operations will be performed on the transpose
847 // of the weight matrix. The weight matrix itself is not going to be transposed
848 // but rather the transfer flag of operations will be toggled accordingly.
849 optional bool transpose = 6 [default = false];
852 message InputParameter {
853 // This layer produces N >= 1 top blob(s) to be assigned manually.
854 // Define N shapes to set a shape for each top.
855 // Define 1 shape to set the same shape for every top.
856 // Define no shape to defer to reshaping manually.
857 repeated BlobShape shape = 1;
860 // Message that stores parameters used by LogLayer
861 message LogParameter {
862 // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0.
863 // Or if base is set to the default (-1), base is set to e,
864 // so y = ln(shift + scale * x) = log_e(shift + scale * x)
865 optional float base = 1 [default = -1.0];
866 optional float scale = 2 [default = 1.0];
867 optional float shift = 3 [default = 0.0];
870 // Message that stores parameters used by LRNLayer
871 message LRNParameter {
872 optional uint32 local_size = 1 [default = 5];
873 optional float alpha = 2 [default = 1.];
874 optional float beta = 3 [default = 0.75];
879 optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS];
880 optional float k = 5 [default = 1.];
886 optional Engine engine = 6 [default = DEFAULT];
889 message MemoryDataParameter {
890 optional uint32 batch_size = 1;
891 optional uint32 channels = 2;
892 optional uint32 height = 3;
893 optional uint32 width = 4;
896 message MVNParameter {
897 // This parameter can be set to false to normalize mean only
898 optional bool normalize_variance = 1 [default = true];
900 // This parameter can be set to true to perform DNN-like MVN
901 optional bool across_channels = 2 [default = false];
903 // Epsilon for not dividing by zero while normalizing variance
904 optional float eps = 3 [default = 1e-9];
907 message ParameterParameter {
908 optional BlobShape shape = 1;
911 message PoolingParameter {
917 optional PoolMethod pool = 1 [default = MAX]; // The pooling method
918 // Pad, kernel size, and stride are all given as a single value for equal
919 // dimensions in height and width or as Y, X pairs.
920 optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)
921 optional uint32 pad_h = 9 [default = 0]; // The padding height
922 optional uint32 pad_w = 10 [default = 0]; // The padding width
923 optional uint32 kernel_size = 2; // The kernel size (square)
924 optional uint32 kernel_h = 5; // The kernel height
925 optional uint32 kernel_w = 6; // The kernel width
926 optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X)
927 optional uint32 stride_h = 7; // The stride height
928 optional uint32 stride_w = 8; // The stride width
934 optional Engine engine = 11 [default = DEFAULT];
935 // If global_pooling then it will pool over the size of the bottom by doing
936 // kernel_h = bottom->height and kernel_w = bottom->width
937 optional bool global_pooling = 12 [default = false];
938 // How to calculate the output size - using ceil (default) or floor rounding.
943 optional RoundMode round_mode = 13 [default = CEIL];
946 message PowerParameter {
947 // PowerLayer computes outputs y = (shift + scale * x) ^ power.
948 optional float power = 1 [default = 1.0];
949 optional float scale = 2 [default = 1.0];
950 optional float shift = 3 [default = 0.0];
953 message PythonParameter {
954 optional string module = 1;
955 optional string layer = 2;
956 // This value is set to the attribute `param_str` of the `PythonLayer` object
957 // in Python before calling the `setup()` method. This could be a number,
958 // string, dictionary in Python dict format, JSON, etc. You may parse this
959 // string in `setup` method and use it in `forward` and `backward`.
960 optional string param_str = 3 [default = ''];
962 optional bool share_in_parallel = 4 [default = false];
965 // Message that stores parameters used by RecurrentLayer
966 message RecurrentParameter {
967 // The dimension of the output (and usually hidden state) representation --
968 // must be explicitly set to non-zero.
969 optional uint32 num_output = 1 [default = 0];
971 optional FillerParameter weight_filler = 2; // The filler for the weight
972 optional FillerParameter bias_filler = 3; // The filler for the bias
974 // Whether to enable displaying debug_info in the unrolled recurrent net.
975 optional bool debug_info = 4 [default = false];
977 // Whether to add as additional inputs (bottoms) the initial hidden state
978 // blobs, and add as additional outputs (tops) the final timestep hidden state
979 // blobs. The number of additional bottom/top blobs required depends on the
980 // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs.
981 optional bool expose_hidden = 5 [default = false];
984 // Message that stores parameters used by ReductionLayer
985 message ReductionParameter {
993 optional ReductionOp operation = 1 [default = SUM]; // reduction operation
995 // The first axis to reduce to a scalar -- may be negative to index from the
996 // end (e.g., -1 for the last axis).
997 // (Currently, only reduction along ALL "tail" axes is supported; reduction
998 // of axis M through N, where N < num_axes - 1, is unsupported.)
999 // Suppose we have an n-axis bottom Blob with shape:
1000 // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)).
1001 // If axis == m, the output Blob will have shape
1002 // (d0, d1, d2, ..., d(m-1)),
1003 // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1))
1004 // times, each including (dm * d(m+1) * ... * d(n-1)) individual data.
1005 // If axis == 0 (the default), the output Blob always has the empty shape
1006 // (count 1), performing reduction across the entire input --
1007 // often useful for creating new loss functions.
1008 optional int32 axis = 2 [default = 0];
1010 optional float coeff = 3 [default = 1.0]; // coefficient for output
1013 // Message that stores parameters used by ReLULayer
1014 message ReLUParameter {
1015 // Allow non-zero slope for negative inputs to speed up optimization
1017 // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities
1018 // improve neural network acoustic models. In ICML Workshop on Deep Learning
1019 // for Audio, Speech, and Language Processing.
1020 optional float negative_slope = 1 [default = 0];
1026 optional Engine engine = 2 [default = DEFAULT];
1029 message ReshapeParameter {
1030 // Specify the output dimensions. If some of the dimensions are set to 0,
1031 // the corresponding dimension from the bottom layer is used (unchanged).
1032 // Exactly one dimension may be set to -1, in which case its value is
1033 // inferred from the count of the bottom blob and the remaining dimensions.
1034 // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8:
1037 // type: "Reshape" bottom: "input" top: "output"
1038 // reshape_param { ... }
1041 // If "input" is 2D with shape 2 x 8, then the following reshape_param
1042 // specifications are all equivalent, producing a 3D blob "output" with shape
1045 // reshape_param { shape { dim: 2 dim: 2 dim: 4 } }
1046 // reshape_param { shape { dim: 0 dim: 2 dim: 4 } }
1047 // reshape_param { shape { dim: 0 dim: 2 dim: -1 } }
1048 // reshape_param { shape { dim: 0 dim:-1 dim: 4 } }
1050 optional BlobShape shape = 1;
1052 // axis and num_axes control the portion of the bottom blob's shape that are
1053 // replaced by (included in) the reshape. By default (axis == 0 and
1054 // num_axes == -1), the entire bottom blob shape is included in the reshape,
1055 // and hence the shape field must specify the entire output shape.
1057 // axis may be non-zero to retain some portion of the beginning of the input
1058 // shape (and may be negative to index from the end; e.g., -1 to begin the
1059 // reshape after the last axis, including nothing in the reshape,
1060 // -2 to include only the last axis, etc.).
1062 // For example, suppose "input" is a 2D blob with shape 2 x 8.
1063 // Then the following ReshapeLayer specifications are all equivalent,
1064 // producing a blob "output" with shape 2 x 2 x 4:
1066 // reshape_param { shape { dim: 2 dim: 2 dim: 4 } }
1067 // reshape_param { shape { dim: 2 dim: 4 } axis: 1 }
1068 // reshape_param { shape { dim: 2 dim: 4 } axis: -3 }
1070 // num_axes specifies the extent of the reshape.
1071 // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on
1072 // input axes in the range [axis, axis+num_axes].
1073 // num_axes may also be -1, the default, to include all remaining axes
1074 // (starting from axis).
1076 // For example, suppose "input" is a 2D blob with shape 2 x 8.
1077 // Then the following ReshapeLayer specifications are equivalent,
1078 // producing a blob "output" with shape 1 x 2 x 8.
1080 // reshape_param { shape { dim: 1 dim: 2 dim: 8 } }
1081 // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 }
1082 // reshape_param { shape { dim: 1 } num_axes: 0 }
1084 // On the other hand, these would produce output blob shape 2 x 1 x 8:
1086 // reshape_param { shape { dim: 2 dim: 1 dim: 8 } }
1087 // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 }
1089 optional int32 axis = 2 [default = 0];
1090 optional int32 num_axes = 3 [default = -1];
1093 message ScaleParameter {
1094 // The first axis of bottom[0] (the first input Blob) along which to apply
1095 // bottom[1] (the second input Blob). May be negative to index from the end
1096 // (e.g., -1 for the last axis).
1098 // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
1099 // top[0] will have the same shape, and bottom[1] may have any of the
1100 // following shapes (for the given value of axis):
1101 // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
1102 // (axis == 1 == -3) 3; 3x40; 3x40x60
1103 // (axis == 2 == -2) 40; 40x60
1104 // (axis == 3 == -1) 60
1105 // Furthermore, bottom[1] may have the empty shape (regardless of the value of
1106 // "axis") -- a scalar multiplier.
1107 optional int32 axis = 1 [default = 1];
1109 // (num_axes is ignored unless just one bottom is given and the scale is
1110 // a learned parameter of the layer. Otherwise, num_axes is determined by the
1111 // number of axes by the second bottom.)
1112 // The number of axes of the input (bottom[0]) covered by the scale
1113 // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
1114 // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar.
1115 optional int32 num_axes = 2 [default = 1];
1117 // (filler is ignored unless just one bottom is given and the scale is
1118 // a learned parameter of the layer.)
1119 // The initialization for the learned scale parameter.
1120 // Default is the unit (1) initialization, resulting in the ScaleLayer
1121 // initially performing the identity operation.
1122 optional FillerParameter filler = 3;
1124 // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but
1125 // may be more efficient). Initialized with bias_filler (defaults to 0).
1126 optional bool bias_term = 4 [default = false];
1127 optional FillerParameter bias_filler = 5;
1130 message SigmoidParameter {
1136 optional Engine engine = 1 [default = DEFAULT];
1139 message SliceParameter {
1140 // The axis along which to slice -- may be negative to index from the end
1141 // (e.g., -1 for the last axis).
1142 // By default, SliceLayer concatenates blobs along the "channels" axis (1).
1143 optional int32 axis = 3 [default = 1];
1144 repeated uint32 slice_point = 2;
1146 // DEPRECATED: alias for "axis" -- does not support negative indexing.
1147 optional uint32 slice_dim = 1 [default = 1];
1150 // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
1151 message SoftmaxParameter {
1157 optional Engine engine = 1 [default = DEFAULT];
1159 // The axis along which to perform the softmax -- may be negative to index
1160 // from the end (e.g., -1 for the last axis).
1161 // Any other axes will be evaluated as independent softmaxes.
1162 optional int32 axis = 2 [default = 1];
1165 message TanHParameter {
1171 optional Engine engine = 1 [default = DEFAULT];
1174 // Message that stores parameters used by TileLayer
1175 message TileParameter {
1176 // The index of the axis to tile.
1177 optional int32 axis = 1 [default = 1];
1179 // The number of copies (tiles) of the blob to output.
1180 optional int32 tiles = 2;
1183 // Message that stores parameters used by ThresholdLayer
1184 message ThresholdParameter {
1185 optional float threshold = 1 [default = 0]; // Strictly positive values
1188 message WindowDataParameter {
1189 // Specify the data source.
1190 optional string source = 1;
1191 // For data pre-processing, we can do simple scaling and subtracting the
1192 // data mean, if provided. Note that the mean subtraction is always carried
1193 // out before scaling.
1194 optional float scale = 2 [default = 1];
1195 optional string mean_file = 3;
1196 // Specify the batch size.
1197 optional uint32 batch_size = 4;
1198 // Specify if we would like to randomly crop an image.
1199 optional uint32 crop_size = 5 [default = 0];
1200 // Specify if we want to randomly mirror data.
1201 optional bool mirror = 6 [default = false];
1202 // Foreground (object) overlap threshold
1203 optional float fg_threshold = 7 [default = 0.5];
1204 // Background (non-object) overlap threshold
1205 optional float bg_threshold = 8 [default = 0.5];
1206 // Fraction of batch that should be foreground objects
1207 optional float fg_fraction = 9 [default = 0.25];
1208 // Amount of contextual padding to add around a window
1209 // (used only by the window_data_layer)
1210 optional uint32 context_pad = 10 [default = 0];
1211 // Mode for cropping out a detection window
1212 // warp: cropped window is warped to a fixed size and aspect ratio
1213 // square: the tightest square around the window is cropped
1214 optional string crop_mode = 11 [default = "warp"];
1215 // cache_images: will load all images in memory for faster access
1216 optional bool cache_images = 12 [default = false];
1217 // append root_folder to locate images
1218 optional string root_folder = 13 [default = ""];
1221 message SPPParameter {
1227 optional uint32 pyramid_height = 1;
1228 optional PoolMethod pool = 2 [default = MAX]; // The pooling method
1234 optional Engine engine = 6 [default = DEFAULT];
1237 // DEPRECATED: use LayerParameter.
1238 message V1LayerParameter {
1239 repeated string bottom = 2;
1240 repeated string top = 3;
1241 optional string name = 4;
1242 repeated NetStateRule include = 32;
1243 repeated NetStateRule exclude = 33;
1251 CONTRASTIVE_LOSS = 37;
1270 MULTINOMIAL_LOGISTIC_LOSS = 16;
1276 SIGMOID_CROSS_ENTROPY_LOSS = 27;
1286 optional LayerType type = 5;
1287 repeated BlobProto blobs = 6;
1288 repeated string param = 1001;
1289 repeated DimCheckMode blob_share_mode = 1002;
1294 repeated float blobs_lr = 7;
1295 repeated float weight_decay = 8;
1296 repeated float loss_weight = 35;
1297 optional AccuracyParameter accuracy_param = 27;
1298 optional ArgMaxParameter argmax_param = 23;
1299 optional ConcatParameter concat_param = 9;
1300 optional ContrastiveLossParameter contrastive_loss_param = 40;
1301 optional ConvolutionParameter convolution_param = 10;
1302 optional DataParameter data_param = 11;
1303 optional DropoutParameter dropout_param = 12;
1304 optional DummyDataParameter dummy_data_param = 26;
1305 optional EltwiseParameter eltwise_param = 24;
1306 optional ExpParameter exp_param = 41;
1307 optional HDF5DataParameter hdf5_data_param = 13;
1308 optional HDF5OutputParameter hdf5_output_param = 14;
1309 optional HingeLossParameter hinge_loss_param = 29;
1310 optional ImageDataParameter image_data_param = 15;
1311 optional InfogainLossParameter infogain_loss_param = 16;
1312 optional InnerProductParameter inner_product_param = 17;
1313 optional LRNParameter lrn_param = 18;
1314 optional MemoryDataParameter memory_data_param = 22;
1315 optional MVNParameter mvn_param = 34;
1316 optional PoolingParameter pooling_param = 19;
1317 optional PowerParameter power_param = 21;
1318 optional ReLUParameter relu_param = 30;
1319 optional SigmoidParameter sigmoid_param = 38;
1320 optional SoftmaxParameter softmax_param = 39;
1321 optional SliceParameter slice_param = 31;
1322 optional TanHParameter tanh_param = 37;
1323 optional ThresholdParameter threshold_param = 25;
1324 optional WindowDataParameter window_data_param = 20;
1325 optional TransformationParameter transform_param = 36;
1326 optional LossParameter loss_param = 42;
1327 optional V0LayerParameter layer = 1;
1330 // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters
1331 // in Caffe. We keep this message type around for legacy support.
1332 message V0LayerParameter {
1333 optional string name = 1; // the layer name
1334 optional string type = 2; // the string to specify the layer type
1336 // Parameters to specify layers with inner products.
1337 optional uint32 num_output = 3; // The number of outputs for the layer
1338 optional bool biasterm = 4 [default = true]; // whether to have bias terms
1339 optional FillerParameter weight_filler = 5; // The filler for the weight
1340 optional FillerParameter bias_filler = 6; // The filler for the bias
1342 optional uint32 pad = 7 [default = 0]; // The padding size
1343 optional uint32 kernelsize = 8; // The kernel size
1344 optional uint32 group = 9 [default = 1]; // The group size for group conv
1345 optional uint32 stride = 10 [default = 1]; // The stride
1351 optional PoolMethod pool = 11 [default = MAX]; // The pooling method
1352 optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio
1354 optional uint32 local_size = 13 [default = 5]; // for local response norm
1355 optional float alpha = 14 [default = 1.]; // for local response norm
1356 optional float beta = 15 [default = 0.75]; // for local response norm
1357 optional float k = 22 [default = 1.];
1359 // For data layers, specify the data source
1360 optional string source = 16;
1361 // For data pre-processing, we can do simple scaling and subtracting the
1362 // data mean, if provided. Note that the mean subtraction is always carried
1363 // out before scaling.
1364 optional float scale = 17 [default = 1];
1365 optional string meanfile = 18;
1366 // For data layers, specify the batch size.
1367 optional uint32 batchsize = 19;
1368 // For data layers, specify if we would like to randomly crop an image.
1369 optional uint32 cropsize = 20 [default = 0];
1370 // For data layers, specify if we want to randomly mirror data.
1371 optional bool mirror = 21 [default = false];
1373 // The blobs containing the numeric parameters of the layer
1374 repeated BlobProto blobs = 50;
1375 // The ratio that is multiplied on the global learning rate. If you want to
1376 // set the learning ratio for one blob, you need to set it for all blobs.
1377 repeated float blobs_lr = 51;
1378 // The weight decay that is multiplied on the global weight decay.
1379 repeated float weight_decay = 52;
1381 // The rand_skip variable is for the data layer to skip a few data points
1382 // to avoid all asynchronous sgd clients to start at the same point. The skip
1383 // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
1384 // be larger than the number of keys in the database.
1385 optional uint32 rand_skip = 53 [default = 0];
1387 // Fields related to detection (det_*)
1388 // foreground (object) overlap threshold
1389 optional float det_fg_threshold = 54 [default = 0.5];
1390 // background (non-object) overlap threshold
1391 optional float det_bg_threshold = 55 [default = 0.5];
1392 // Fraction of batch that should be foreground objects
1393 optional float det_fg_fraction = 56 [default = 0.25];
1395 // optional bool OBSOLETE_can_clobber = 57 [default = true];
1397 // Amount of contextual padding to add around a window
1398 // (used only by the window_data_layer)
1399 optional uint32 det_context_pad = 58 [default = 0];
1401 // Mode for cropping out a detection window
1402 // warp: cropped window is warped to a fixed size and aspect ratio
1403 // square: the tightest square around the window is cropped
1404 optional string det_crop_mode = 59 [default = "warp"];
1406 // For ReshapeLayer, one needs to specify the new dimensions.
1407 optional int32 new_num = 60 [default = 0];
1408 optional int32 new_channels = 61 [default = 0];
1409 optional int32 new_height = 62 [default = 0];
1410 optional int32 new_width = 63 [default = 0];
1412 // Whether or not ImageLayer should shuffle the list of files at every epoch.
1413 // It will also resize images if new_height or new_width are not zero.
1414 optional bool shuffle_images = 64 [default = false];
1416 // For ConcatLayer, one needs to specify the dimension for concatenation, and
1417 // the other dimensions must be the same for all the bottom blobs.
1418 // By default it will concatenate blobs along the channels dimension.
1419 optional uint32 concat_dim = 65 [default = 1];
1421 optional HDF5OutputParameter hdf5_output_param = 1001;
1424 message PReLUParameter {
1425 // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers:
1426 // Surpassing Human-Level Performance on ImageNet Classification, 2015.
1428 // Initial value of a_i. Default is a_i=0.25 for all i.
1429 optional FillerParameter filler = 1;
1430 // Whether or not slope parameters are shared across channels.
1431 optional bool channel_shared = 2 [default = false];