4 # list of launchers for your topology.
6 # launcher framework (e.g. caffe, dlsdk)
8 # device for infer (e.g. for dlsdk cpu, gpu, hetero:cpu, gpu ...)
10 cpu_extensions: libcpu_extension.so
11 # topology IR (*.prototxt for caffe, *.xml for InferenceEngine, etc)
12 # path to topology is prefixed with directory, specified in "-m/--models" option
13 model: graph_frozen.xml
14 # topology weights binary (*.caffemodel for caffe, *.bin for InferenceEngine)
15 weights: graph_frozen.bin
16 # launcher returns raw result, so it should be converted
17 # to an appropriate representation with adapter
18 adapter: hit_ratio_adapter
23 name: embedding/embedding_lookup/placeholder_port_1
26 name: embedding_1/embedding_lookup/placeholder_port_1
29 name: embedding_2/embedding_lookup/placeholder_port_1
32 name: embedding_3/embedding_lookup/placeholder_port_1
34 # metrics, preprocessing and postprocessing are typically dataset specific, so dataset field
35 # specifies data and all other steps required to validate topology
36 # there is typically definitions file, which contains options for common datasets and which is merged
37 # during evaluation, but since "sample_dataset" is not used anywhere else, this config contains full definition
39 # uniquely distinguishable name for dataset
40 # note that all other steps are specific for this dataset only
41 # if you need to test topology on multiple datasets, you need to specify
42 # every step explicitly for each dataset
43 - name: ncf_validation_dataset.npy
44 # directory where input images are searched.
45 # prefixed with directory specified in "-s/--source" option
46 # name of converted annotation file (specified in -a option during annotation conversion)
47 # prefixed with directory specified in "-a/--annotations" option
48 annotation: ncf_converter.pickle
49 dataset_meta: ncf_converter.json
51 reader: ncf_data_reader
53 # list of metrics, calculated on dataset