1. Add tensors info in TensorsData class.
2. Handle error case when failed to create pipe info in native functions.
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
private Pipeline.NewDataCallback mSinkCb = new Pipeline.NewDataCallback() {
@Override
- public void onNewDataReceived(TensorsData data, TensorsInfo info) {
- if (data == null || data.getTensorsCount() != 1 ||
- info == null || info.getTensorsCount() != 1) {
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
mInvalidState = true;
- } else {
- ByteBuffer output = data.getTensorData(0);
+ return;
+ }
- for (int i = 0; i < 10; i++) {
- float expected = i + 1.5f;
+ TensorsInfo info = data.getTensorsInfo();
- if (expected != output.getFloat(i * 4)) {
- mInvalidState = true;
- }
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ ByteBuffer output = data.getTensorData(0);
+
+ for (int i = 0; i < 10; i++) {
+ float expected = i + 1.5f;
+
+ if (expected != output.getFloat(i * 4)) {
+ mInvalidState = true;
}
}
mCustomPassthrough = CustomFilter.registerCustomFilter("custom-passthrough",
new CustomFilter.CustomFilterCallback() {
@Override
- public TensorsInfo getOutputInfo(TensorsInfo inInfo) {
- return inInfo;
+ public TensorsInfo getOutputInfo(TensorsInfo in) {
+ return in;
}
@Override
- public TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo) {
- return inData;
+ public TensorsData invoke(TensorsData in) {
+ return in;
}
});
mCustomConvert = CustomFilter.registerCustomFilter("custom-convert",
new CustomFilter.CustomFilterCallback() {
@Override
- public TensorsInfo getOutputInfo(TensorsInfo inInfo) {
- inInfo.setTensorType(0, NNStreamer.TENSOR_TYPE_FLOAT32);
- return inInfo;
+ public TensorsInfo getOutputInfo(TensorsInfo in) {
+ in.setTensorType(0, NNStreamer.TENSOR_TYPE_FLOAT32);
+ return in;
}
@Override
- public TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo) {
- ByteBuffer input = inData.getTensorData(0);
- ByteBuffer output = TensorsData.allocateByteBuffer(4 * 10);
+ public TensorsData invoke(TensorsData in) {
+ TensorsInfo info = in.getTensorsInfo();
+ ByteBuffer input = in.getTensorData(0);
+
+ info.setTensorType(0, NNStreamer.TENSOR_TYPE_FLOAT32);
+
+ TensorsData out = info.allocate();
+ ByteBuffer output = out.getTensorData(0);
for (int i = 0; i < 10; i++) {
float value = (float) input.getInt(i * 4);
output.putFloat(i * 4, value);
}
- TensorsData out = new TensorsData();
- out.addTensorData(output);
-
+ out.setTensorData(0, output);
return out;
}
});
mCustomAdd = CustomFilter.registerCustomFilter("custom-add",
new CustomFilter.CustomFilterCallback() {
@Override
- public TensorsInfo getOutputInfo(TensorsInfo inInfo) {
- return inInfo;
+ public TensorsInfo getOutputInfo(TensorsInfo in) {
+ return in;
}
@Override
- public TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo) {
- ByteBuffer input = inData.getTensorData(0);
- ByteBuffer output = TensorsData.allocateByteBuffer(4 * 10);
+ public TensorsData invoke(TensorsData in) {
+ TensorsInfo info = in.getTensorsInfo();
+ ByteBuffer input = in.getTensorData(0);
+
+ TensorsData out = info.allocate();
+ ByteBuffer output = out.getTensorData(0);
for (int i = 0; i < 10; i++) {
float value = input.getFloat(i * 4);
output.putFloat(i * 4, value);
}
- TensorsData out = new TensorsData();
- out.addTensorData(output);
-
+ out.setTensorData(0, output);
return out;
}
});
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_INT32, new int[]{10});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", mSinkCb);
/* start pipeline */
pipe.start();
/* push input buffer */
for (int i = 0; i < 15; i++) {
- ByteBuffer input = TensorsData.allocateByteBuffer(4 * 10);
+ TensorsData in = TensorsData.allocate(info);
+ ByteBuffer input = in.getTensorData(0);
for (int j = 0; j < 10; j++) {
input.putInt(j * 4, j);
}
- TensorsData in = new TensorsData();
- in.addTensorData(input);
+ in.setTensorData(0, input);
pipe.inputData("srcx", in);
Thread.sleep(50);
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_INT32, new int[]{10,1,1,1});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", mSinkCb);
/* start pipeline */
pipe.start();
/* push input buffer repeatedly */
for (int i = 0; i < 2048; i++) {
- ByteBuffer input = TensorsData.allocateByteBuffer(4 * 10);
+ TensorsData in = TensorsData.allocate(info);
+ ByteBuffer input = in.getTensorData(0);
for (int j = 0; j < 10; j++) {
input.putInt(j * 4, j);
}
- TensorsData in = new TensorsData();
- in.addTensorData(input);
+ in.setTensorData(0, input);
pipe.inputData("srcx", in);
Thread.sleep(20);
CustomFilter.registerCustomFilter(null,
new CustomFilter.CustomFilterCallback() {
@Override
- public TensorsInfo getOutputInfo(TensorsInfo inInfo) {
- return inInfo;
+ public TensorsInfo getOutputInfo(TensorsInfo in) {
+ return in;
}
@Override
- public TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo) {
- return inData;
+ public TensorsData invoke(TensorsData in) {
+ return in;
}
});
CustomFilter.registerCustomFilter(mCustomPassthrough.getName(),
new CustomFilter.CustomFilterCallback() {
@Override
- public TensorsInfo getOutputInfo(TensorsInfo inInfo) {
- return inInfo;
+ public TensorsInfo getOutputInfo(TensorsInfo in) {
+ return in;
}
@Override
- public TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo) {
- return inData;
+ public TensorsData invoke(TensorsData in) {
+ return in;
}
});
private Pipeline.NewDataCallback mSinkCb = new Pipeline.NewDataCallback() {
@Override
- public void onNewDataReceived(TensorsData data, TensorsInfo info) {
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null ||
+ data.getTensorsCount() != 1 ||
+ data.getTensorData(0).capacity() != 200) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
/* validate received data (unit8 2:10:10:1) */
- if (data == null || data.getTensorsCount() != 1 ||
- data.getTensorData(0).capacity() != 200 ||
- info == null || info.getTensorsCount() != 1 ||
+ if (info == null ||
+ info.getTensorsCount() != 1 ||
info.getTensorName(0) != null ||
info.getTensorType(0) != NNStreamer.TENSOR_TYPE_UINT8 ||
!Arrays.equals(info.getTensorDimension(0), new int[]{2,10,10,1})) {
}
@Test
- public void testSetNullDataCb() {
+ public void testRegisterNullDataCb() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "tensor_converter ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback("sinkx", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterDataCbInvalidName() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "tensor_converter ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback("invalid_sink", mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterDataCbNullName() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "tensor_converter ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback(null, mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUnregisterNullDataCb() {
String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
"tensor_converter ! tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
- pipe.setSinkCallback("sinkx", null);
+ pipe.unregisterSinkCallback("sinkx", null);
fail();
} catch (Exception e) {
/* expected */
}
@Test
- public void testSetDataCbInvalidName() {
+ public void testUnregisterDataCbNullName() {
String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
"tensor_converter ! tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
- pipe.setSinkCallback("invalid_sink", mSinkCb);
+ pipe.unregisterSinkCallback(null, mSinkCb);
fail();
} catch (Exception e) {
/* expected */
}
@Test
- public void testSetDataCbNullName() {
+ public void testUnregisteredDataCb() {
String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
"tensor_converter ! tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
- pipe.setSinkCallback(null, mSinkCb);
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
fail();
} catch (Exception e) {
/* expected */
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", mSinkCb);
/* start pipeline */
pipe.start();
/* push input buffer */
for (int i = 0; i < 10; i++) {
/* dummy input */
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData("srcx", in);
+ pipe.inputData("srcx", info.allocate());
Thread.sleep(50);
}
Thread.sleep(100);
pipe.stop();
- pipe.setSinkCallback("sinkx", null);
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
Thread.sleep(100);
/* start pipeline again */
/* push input buffer again */
for (int i = 0; i < 10; i++) {
/* dummy input */
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData("srcx", in);
+ pipe.inputData("srcx", info.allocate());
Thread.sleep(50);
}
}
@Test
+ public void testDuplicatedDataCb() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
+ /* register three callbacks */
+ Pipeline.NewDataCallback cb1 = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ mReceived++;
+ }
+ };
+
+ Pipeline.NewDataCallback cb2 = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ mReceived++;
+ }
+ };
+
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", cb1);
+ pipe.registerSinkCallback("sinkx", cb2);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* pause pipeline and unregister sink callback */
+ Thread.sleep(100);
+ pipe.stop();
+
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
+ pipe.unregisterSinkCallback("sinkx", cb1);
+ Thread.sleep(100);
+
+ /* start pipeline again */
+ pipe.start();
+
+ /* push input buffer again */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(40, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
public void testRunModel() {
File model = APITestCommon.getTestModel();
String desc = "appsrc name=srcx ! " +
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{3,224,224,1});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
@Override
- public void onNewDataReceived(TensorsData data, TensorsInfo info) {
- if (data == null || data.getTensorsCount() != 1 ||
- info == null || info.getTensorsCount() != 1) {
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
mInvalidState = true;
} else {
ByteBuffer output = data.getTensorData(0);
/* push input buffer */
for (int i = 0; i < 15; i++) {
/* dummy input */
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(3 * 224 * 224));
-
- pipe.inputData("srcx", in);
+ pipe.inputData("srcx", TensorsData.allocate(info));
Thread.sleep(100);
}
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", mSinkCb);
/* start pipeline */
pipe.start();
/* push input buffer repeatedly */
for (int i = 0; i < 2048; i++) {
/* dummy input */
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData("srcx", in);
+ pipe.inputData("srcx", TensorsData.allocate(info));
Thread.sleep(20);
}
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
/* start pipeline */
pipe.start();
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData("invalid_src", in);
+ pipe.inputData("invalid_src", TensorsData.allocate(info));
fail();
} catch (Exception e) {
/* expected */
"tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
/* start pipeline */
pipe.start();
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData(null, in);
+ pipe.inputData(null, TensorsData.allocate(info));
fail();
} catch (Exception e) {
/* expected */
"outs.src_1 ! tensor_sink async=false";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", mSinkCb);
/* start pipeline */
pipe.start();
/* push input buffer */
for (int i = 0; i < 15; i++) {
/* dummy input */
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData("srcx", in);
+ pipe.inputData("srcx", TensorsData.allocate(info));
Thread.sleep(50);
if (i == 9) {
"outs.src_0 ! tensor_sink name=sinkx async=false " +
"outs.src_1 ! tensor_sink async=false";
- Pipeline pipe = new Pipeline(desc);
-
- /* start pipeline */
- pipe.start();
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
- try {
/* get pad list with invalid switch name */
pipe.getSwitchPads("invalid_outs");
fail();
} catch (Exception e) {
/* expected */
}
-
- pipe.close();
}
@Test
"outs.src_0 ! tensor_sink name=sinkx async=false " +
"outs.src_1 ! tensor_sink async=false";
- Pipeline pipe = new Pipeline(desc);
-
- /* start pipeline */
- pipe.start();
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
- try {
/* get pad list with null param */
pipe.getSwitchPads(null);
fail();
} catch (Exception e) {
/* expected */
}
-
- pipe.close();
}
@Test
"outs.src_0 ! tensor_sink name=sinkx async=false " +
"outs.src_1 ! tensor_sink async=false";
- Pipeline pipe = new Pipeline(desc);
-
- /* start pipeline */
- pipe.start();
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
- try {
/* select invalid pad name */
pipe.selectSwitchPad("outs", "invalid_src");
fail();
} catch (Exception e) {
/* expected */
}
-
- pipe.close();
}
@Test
"t. ! queue ! valve name=valvex ! tensor_sink name=sinkx";
try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{2,10,10,1});
+
/* register sink callback */
- pipe.setSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", mSinkCb);
/* start pipeline */
pipe.start();
/* push input buffer */
for (int i = 0; i < 15; i++) {
/* dummy input */
- TensorsData in = new TensorsData();
- in.addTensorData(TensorsData.allocateByteBuffer(200));
-
- pipe.inputData("srcx", in);
+ pipe.inputData("srcx", info.allocate());
Thread.sleep(50);
if (i == 9) {
"t. ! queue ! tensor_sink " +
"t. ! queue ! valve name=valvex ! tensor_sink name=sinkx";
- Pipeline pipe = new Pipeline(desc);
-
- /* start pipeline */
- pipe.start();
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
- try {
/* control valve with invalid name */
pipe.controlValve("invalid_valve", false);
fail();
} catch (Exception e) {
/* expected */
}
-
- pipe.close();
}
}
try {
TensorsInfo info = mSingle.getOutputInfo();
- /* output: uint8 1001:1:1:1 */
+ /* output: uint8 1001:1 */
assertEquals(1, info.getTensorsCount());
assertEquals(NNStreamer.TENSOR_TYPE_UINT8, info.getTensorType(0));
assertArrayEquals(new int[]{1001,1,1,1}, info.getTensorDimension(0));
SingleShot addSingle = new SingleShot(model);
TensorsInfo info = addSingle.getInputInfo();
- /* input: float32 1:1:1:1 */
+ /* input: float32 with dimension 1 */
assertEquals(1, info.getTensorsCount());
assertEquals(NNStreamer.TENSOR_TYPE_FLOAT32, info.getTensorType(0));
assertArrayEquals(new int[]{1,1,1,1}, info.getTensorDimension(0));
TensorsInfo newInfo = new TensorsInfo();
- newInfo.addTensorInfo(NNStreamer.TENSOR_TYPE_FLOAT32, new int[]{1,1,1,10});
+ newInfo.addTensorInfo(NNStreamer.TENSOR_TYPE_FLOAT32, new int[]{10});
addSingle.setInputInfo(newInfo);
info = addSingle.getInputInfo();
- /* input: float32 1:1:1:10 */
+ /* input: float32 with dimension 10 */
assertEquals(1, info.getTensorsCount());
assertEquals(NNStreamer.TENSOR_TYPE_FLOAT32, info.getTensorType(0));
- assertArrayEquals(new int[]{1,1,1,10}, info.getTensorDimension(0));
+ assertArrayEquals(new int[]{10,1,1,1}, info.getTensorDimension(0));
info = addSingle.getOutputInfo();
- /* output: float32 1:1:1:10 */
+ /* output: float32 with dimension 10 */
assertEquals(1, info.getTensorsCount());
assertEquals(NNStreamer.TENSOR_TYPE_FLOAT32, info.getTensorType(0));
- assertArrayEquals(new int[]{1,1,1,10}, info.getTensorDimension(0));
+ assertArrayEquals(new int[]{10,1,1,1}, info.getTensorDimension(0));
} catch (Exception e) {
fail();
}
/* single-shot invoke */
for (int i = 0; i < 600; i++) {
/* dummy input */
- TensorsData in = TensorsData.allocate(info);
- TensorsData out = mSingle.invoke(in);
+ TensorsData out = mSingle.invoke(info.allocate());
- /* output: uint8 1001:1:1:1 */
+ /* output: uint8 1001:1 */
assertEquals(1, out.getTensorsCount());
assertEquals(1001, out.getTensorData(0).capacity());
}
@Test
- public void testInvokeDynamicBase() {
- try {
- TensorsInfo info = mSingle.getInputInfo();
-
- /* let's ignore timeout (set 10 sec) */
- mSingle.setTimeout(10000);
-
- /* single-shot invoke with null */
- TensorsData in = TensorsData.allocate(info);
- TensorsData out = mSingle.invoke(in, null);
- assertEquals(1, out.getTensorsCount());
- assertEquals(1001, out.getTensorData(0).capacity());
-
- /* single-shot invoke with resetting existing input info */
- in = TensorsData.allocate(info);
- out = mSingle.invoke(in, info);
- assertEquals(1, out.getTensorsCount());
- assertEquals(1001, out.getTensorData(0).capacity());
- } catch (Exception e) {
- fail();
- }
- }
-
- @Test
public void testInvokeDynamicVary() {
String root = Environment.getExternalStorageDirectory().getAbsolutePath();
File model = new File(root + "/nnstreamer/test/add.tflite");
for (int i = 1; i < 2; i++) {
TensorsInfo info = new TensorsInfo();
info.addTensorInfo(NNStreamer.TENSOR_TYPE_FLOAT32, new int[]{1,1,1,i});
+
/* dummy input */
- TensorsData in = TensorsData.allocate(info);
- TensorsData out = addSingle.invoke(in, info);
+ TensorsData out = addSingle.invoke(TensorsData.allocate(info));
/* output: float32 1:1:1:i */
assertEquals(1, out.getTensorsCount());
/* timeout 5ms */
mSingle.setTimeout(5);
- for (int i = 0; i < 5; i++) {
+ try {
/* dummy input */
- TensorsData in = TensorsData.allocate(info);
-
- try {
- mSingle.invoke(in);
- fail();
- } catch (Exception e) {
- /* expected */
- }
+ mSingle.invoke(TensorsData.allocate(info));
+ fail();
+ } catch (Exception e) {
+ /* expected */
}
}
@Test
public void testInvalidOutputType() {
- /* output: uint8 1001:1:1:1 */
+ /* output: uint8 1001:1 */
TensorsInfo info = new TensorsInfo();
- info.addTensorInfo(NNStreamer.TENSOR_TYPE_INT16, new int[]{1001,1,1,1});
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_INT16, new int[]{1001,1});
try {
new SingleShot(APITestCommon.getTestModel(), null, info);
@Test
public void testInvalidOutputDimension() {
- /* output: uint8 1001:1:1:1 */
+ /* output: uint8 1001:1 */
TensorsInfo info = new TensorsInfo();
info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{1001,2,1,1});
@Test
public void testInvokeInvalidData() {
/* input data size: 3 * 224 * 224 */
- TensorsData data = new TensorsData();
- data.addTensorData(TensorsData.allocateByteBuffer(100));
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{100});
try {
- mSingle.invoke(data);
+ mSingle.invoke(TensorsData.allocate(info));
fail();
} catch (Exception e) {
/* expected */
@Before
public void setUp() {
APITestCommon.initNNStreamer();
- mData = new TensorsData();
+
+ TensorsInfo info = new TensorsInfo();
+
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{100});
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{200});
+ info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT8, new int[]{300});
+
+ mData = TensorsData.allocate(info);
}
@After
info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT16, new int[]{2,2});
info.addTensorInfo(NNStreamer.TENSOR_TYPE_UINT32, new int[]{2,2,2});
- mData = TensorsData.allocate(info);
+ TensorsData data = TensorsData.allocate(info);
- /* index 0: 2:1:1:1 int16 */
- assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(0), 4));
+ /* index 0: 2 int16 */
+ assertTrue(APITestCommon.isValidBuffer(data.getTensorData(0), 4));
- /* index 1: 2:2:1:1 uint16 */
- assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(1), 8));
+ /* index 1: 2:2 uint16 */
+ assertTrue(APITestCommon.isValidBuffer(data.getTensorData(1), 8));
- /* index 0: 2:2:2:1 uint32 */
- assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(2), 32));
+ /* index 2: 2:2:2 uint32 */
+ assertTrue(APITestCommon.isValidBuffer(data.getTensorData(2), 32));
} catch (Exception e) {
fail();
}
}
@Test
- public void testAddData() {
+ public void testAllocateNullInfo() {
try {
- Object buffer = ByteBuffer.allocateDirect(100).order(ByteOrder.nativeOrder());
-
- mData.addTensorData(buffer);
- assertEquals(1, mData.getTensorsCount());
-
- mData.addTensorData(new byte[200]);
- assertEquals(2, mData.getTensorsCount());
-
- mData.addTensorData(TensorsData.allocateByteBuffer(300));
- assertEquals(3, mData.getTensorsCount());
- } catch (Exception e) {
+ TensorsData.allocate(null);
fail();
+ } catch (Exception e) {
+ /* expected */
}
}
@Test
public void testGetData() {
try {
- testAddData();
-
assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(0), 100));
assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(1), 200));
assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(2), 300));
@Test
public void testSetData() {
try {
- testAddData();
-
- ByteBuffer buffer = TensorsData.allocateByteBuffer(500);
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(200);
mData.setTensorData(1, buffer);
assertEquals(3, mData.getTensorsCount());
assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(0), 100));
- assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(1), 500));
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(1), 200));
assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(2), 300));
} catch (Exception e) {
fail();
}
@Test
- public void testAllocateNullInfo() {
- try {
- TensorsData.allocate(null);
- fail();
- } catch (Exception e) {
- /* expected */
- }
-
- assertEquals(0, mData.getTensorsCount());
- }
-
- @Test
- public void testAddNullByteBuffer() {
+ public void testSetNullByteBuffer() {
try {
ByteBuffer buffer = null;
- mData.addTensorData(buffer);
- fail();
- } catch (Exception e) {
- /* expected */
- }
-
- assertEquals(0, mData.getTensorsCount());
- }
-
- @Test
- public void testAddInvalidType() {
- try {
- Object buffer = new int[8];
-
- mData.addTensorData(buffer);
+ mData.setTensorData(0, buffer);
fail();
} catch (Exception e) {
/* expected */
}
-
- assertEquals(0, mData.getTensorsCount());
}
@Test
- public void testAddInvalidByteBuffer() {
+ public void testSetInvalidOrderByteBuffer() {
try {
/* big-endian byte order */
- Object buffer = ByteBuffer.allocateDirect(100);
+ ByteBuffer buffer = ByteBuffer.allocateDirect(100);
- mData.addTensorData(buffer);
+ mData.setTensorData(0, buffer);
fail();
} catch (Exception e) {
/* expected */
}
-
- assertEquals(0, mData.getTensorsCount());
}
@Test
- public void testAddNonDirectBuffer() {
+ public void testSetNonDirectByteBuffer() {
try {
/* non-direct byte buffer */
- Object buffer = ByteBuffer.allocate(100);
-
- mData.addTensorData(buffer);
- fail();
- } catch (Exception e) {
- /* expected */
- }
-
- assertEquals(0, mData.getTensorsCount());
- }
-
- @Test
- public void testAddNullObject() {
- try {
- Object buffer = null;
-
- mData.addTensorData(buffer);
- fail();
- } catch (Exception e) {
- /* expected */
- }
+ ByteBuffer buffer = ByteBuffer.allocate(100).order(ByteOrder.nativeOrder());
- assertEquals(0, mData.getTensorsCount());
- }
-
- @Test
- public void testAddNullByteArray() {
- try {
- byte[] buffer = null;
-
- mData.addTensorData(buffer);
+ mData.setTensorData(0, buffer);
fail();
} catch (Exception e) {
/* expected */
}
-
- assertEquals(0, mData.getTensorsCount());
}
@Test
public void testGetInvalidIndex() {
try {
- mData.getTensorData(0);
+ mData.getTensorData(5);
fail();
} catch (Exception e) {
/* expected */
try {
ByteBuffer buffer = TensorsData.allocateByteBuffer(500);
- mData.setTensorData(1, buffer);
+ mData.setTensorData(5, buffer);
fail();
} catch (Exception e) {
/* expected */
}
@Test
- public void testSetInvalidByteBuffer() {
- testAddData();
-
+ public void testSetInvalidSizeByteBuffer() {
try {
- /* non-direct byte buffer */
- ByteBuffer buffer = ByteBuffer.allocate(100);
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(500);
mData.setTensorData(1, buffer);
fail();
/* expected */
}
}
-
- @Test
- public void testAddMaxData() {
- try {
- for (int i = 0; i <= NNStreamer.TENSOR_SIZE_LIMIT; i++) {
- mData.addTensorData(TensorsData.allocateByteBuffer(10));
- }
- fail();
- } catch (Exception e) {
- /* expected */
- }
- }
}
try {
testAddInfo();
- /* index 0: 1:1:1:1 int8 */
+ /* index 0: 1 int8 */
assertEquals(1, mInfo.getTensorSize(0));
- /* index 1: 2:2:1:1 uint8 */
+ /* index 1: 2:2 uint8 */
assertEquals(4, mInfo.getTensorSize(1));
- /* index 2: 3:3:3:1 float32 */
+ /* index 2: 3:3:3 float32 */
assertEquals(108, mInfo.getTensorSize(2));
} catch (Exception e) {
fail();
}
@Test
+ public void testAllocate() {
+ try {
+ testAddInfo();
+
+ TensorsData data = mInfo.allocate();
+
+ assertEquals(3, data.getTensorsCount());
+ assertEquals(1, data.getTensorData(0).capacity());
+ assertEquals(4, data.getTensorData(1).capacity());
+ assertEquals(108, data.getTensorData(2).capacity());
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
public void testUpdateInfo() {
try {
testAddInfo();
@Test
public void testAddInvalidDimension() {
try {
- mInfo.addTensorInfo(NNStreamer.TENSOR_TYPE_INT32, new int[]{1,1,0});
+ mInfo.addTensorInfo(NNStreamer.TENSOR_TYPE_INT32, new int[]{1,1,-1});
fail();
} catch (Exception e) {
/* expected */
* To register a new custom-filter, an application should call {@link #registerCustomFilter(String, CustomFilterCallback)}
* before constructing the pipeline.
*/
-public class CustomFilter implements AutoCloseable {
+public final class CustomFilter implements AutoCloseable {
private long mHandle = 0;
private String mName = null;
private CustomFilterCallback mCallback = null;
*
* NNStreamer filter configures input and output tensors information during the caps negotiation.
*
- * Note that this is not a fixed value and the pipeline may try different values during the cap negotiations.
+ * Note that this is not a fixed value and the pipeline may try different values during the caps negotiation.
* An application should validate the information of input tensors and return proper output information.
*
- * @param inInfo The input tensors information
+ * @param in The input tensors information
*
* @return The output tensors information
*/
- TensorsInfo getOutputInfo(TensorsInfo inInfo);
+ TensorsInfo getOutputInfo(TensorsInfo in);
/**
* Called synchronously while processing the pipeline.
*
* NNStreamer filter invokes the given custom-filter callback while processing the pipeline.
*
- * @param inData The input data (a single frame, tensor/tensors)
- * @param inInfo The input tensors information
- * @param outInfo The output tensors information
+ * @param in The input data (a single frame, tensor/tensors)
*
* @return The output data (a single frame, tensor/tensors)
*/
- TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo);
+ TensorsData invoke(TensorsData in);
}
/**
* @param name The name of custom-filter
* @param callback The function to be called while processing the pipeline
*
- * @return <code>CustomFilter</code> instance
+ * @return {@link CustomFilter} instance
*
* @throws IllegalArgumentException if given param is null
* @throws IllegalStateException if failed to initialize custom-filter
*/
private CustomFilter(@NonNull String name, @NonNull CustomFilterCallback callback) {
if (name == null) {
- throw new IllegalArgumentException("The param name is null");
+ throw new IllegalArgumentException("Given name is null");
}
if (callback == null) {
- throw new IllegalArgumentException("The param callback is null");
+ throw new IllegalArgumentException("Given callback is null");
}
mHandle = nativeInitialize(name);
/**
* Internal method called from native during the caps negotiation.
*/
- private TensorsInfo getOutputInfo(TensorsInfo info) {
+ private TensorsInfo getOutputInfo(TensorsInfo in) {
TensorsInfo out = null;
if (mCallback != null) {
- out = mCallback.getOutputInfo(info);
+ out = mCallback.getOutputInfo(in);
}
return out;
/**
* Internal method called from native while processing the pipeline.
*/
- private TensorsData invoke(TensorsData inData, TensorsInfo inInfo, TensorsInfo outInfo) {
+ private TensorsData invoke(TensorsData in) {
TensorsData out = null;
if (mCallback != null) {
- out = mCallback.invoke(inData, inInfo, outInfo);
+ out = mCallback.invoke(in);
}
return out;
mHandle = 0;
}
}
+
+ /**
+ * Private constructor to prevent the instantiation.
+ */
+ private CustomFilter() {}
}
public static final int TENSOR_RANK_LIMIT = 4;
/**
- * The maximum number of tensor instances that tensors may have.
+ * The maximum number of tensor that {@link TensorsData} instance may have.
*/
public static final int TENSOR_SIZE_LIMIT = 16;
public static String getVersion() {
return nativeGetVersion();
}
+
+ /**
+ * Private constructor to prevent the instantiation.
+ */
+ private NNStreamer() {}
}
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
-import java.util.Hashtable;
+import java.util.ArrayList;
+import java.util.HashMap;
/**
* Provides interfaces to create and execute stream pipelines with neural networks.<br>
* <br>
- * <code>Pipeline</code> allows the following operations with NNStreamer:<br>
+ * {@link Pipeline} allows the following operations with NNStreamer:<br>
* - Create a stream pipeline with NNStreamer plugins, GStreamer plugins.<br>
* - Interfaces to push data to the pipeline from the application.<br>
* - Interfaces to pull data from the pipeline to the application.<br>
*/
public final class Pipeline implements AutoCloseable {
private long mHandle = 0;
- private Hashtable<String, NewDataCallback> mSinkCallbacks = new Hashtable<>();
+ private HashMap<String, ArrayList<NewDataCallback>> mSinkCallbacks = new HashMap<>();
private StateChangeCallback mStateCallback = null;
private native long nativeConstruct(String description, boolean addStateCb);
* Called when a sink node receives new data.
*
* If an application wants to accept data outputs of an NNStreamer stream, use this callback to get data from the stream.
- * Note that the buffer may be deallocated after the return and this is synchronously called.
+ * Note that this is synchronously called and the buffer may be deallocated after the callback is finished.
* Thus, if you need the data afterwards, copy the data to another buffer and return fast.
* Do not spend too much time in the callback. It is recommended to use very small tensors at sinks.
*
* @param data The output data (a single frame, tensor/tensors)
- * @param info The tensors information (dimension, type of output tensor/tensors)
*/
- void onNewDataReceived(TensorsData data, TensorsInfo info);
+ void onNewDataReceived(TensorsData data);
}
/**
}
/**
- * Creates a new <code>Pipeline</code> instance with the given pipeline description.
+ * Creates a new {@link Pipeline} instance with the given pipeline description.
*
- * @param description The pipeline description.
- * Refer to GStreamer manual or NNStreamer (github.com/nnsuite/nnstreamer) documentation for examples and the grammar.
+ * @param description The pipeline description. Refer to GStreamer manual or
+ * <a href="https://github.com/nnsuite/nnstreamer">NNStreamer</a> documentation for examples and the grammar.
*
* @throws IllegalArgumentException if given param is null
* @throws IllegalStateException if failed to construct the pipeline
}
/**
- * Creates a new <code>Pipeline</code> instance with the given pipeline description.
+ * Creates a new {@link Pipeline} instance with the given pipeline description.
*
- * @param description The pipeline description.
- * Refer to GStreamer manual or NNStreamer (github.com/nnsuite/nnstreamer) documentation for examples and the grammar.
+ * @param description The pipeline description. Refer to GStreamer manual or
+ * <a href="https://github.com/nnsuite/nnstreamer">NNStreamer</a> documentation for examples and the grammar.
* @param callback The function to be called when the pipeline state is changed.
* You may set null if it's not required.
*
*/
public Pipeline(@NonNull String description, @Nullable StateChangeCallback callback) {
if (description == null) {
- throw new IllegalArgumentException("The param description is null");
+ throw new IllegalArgumentException("Given description is null");
}
mHandle = nativeConstruct(description, (callback != null));
checkPipelineHandle();
if (name == null) {
- throw new IllegalArgumentException("The param name is null");
+ throw new IllegalArgumentException("Given name is null");
}
if (data == null) {
- throw new IllegalArgumentException("The param data is null");
+ throw new IllegalArgumentException("Given data is null");
}
if (!nativeInputData(mHandle, name, data)) {
checkPipelineHandle();
if (name == null) {
- throw new IllegalArgumentException("The param name is null");
+ throw new IllegalArgumentException("Given name is null");
}
String[] pads = nativeGetSwitchPads(mHandle, name);
checkPipelineHandle();
if (name == null) {
- throw new IllegalArgumentException("The param name is null");
+ throw new IllegalArgumentException("Given name is null");
}
if (pad == null) {
- throw new IllegalArgumentException("The param pad is null");
+ throw new IllegalArgumentException("Given pad is null");
}
if (!nativeSelectSwitchPad(mHandle, name, pad)) {
checkPipelineHandle();
if (name == null) {
- throw new IllegalArgumentException("The param name is null");
+ throw new IllegalArgumentException("Given name is null");
}
if (!nativeControlValve(mHandle, name, open)) {
/**
* Registers new data callback to sink node.
- * If an application registers a callback with same name, the callback is replaced with new one.
+ * The callback can be added in duplicate if an application tries to register multiple callbacks with same name.
*
* @param name The name of sink node
- * @param callback Callback for new data
+ * @param callback The callback for new data
*
* @throws IllegalArgumentException if given param is null
- * @throws IllegalStateException if failed to add the callback to sink node in the pipeline
+ * @throws IllegalStateException if failed to register the callback to sink node in the pipeline
*/
- public void setSinkCallback(@NonNull String name, NewDataCallback callback) {
+ public void registerSinkCallback(@NonNull String name, @NonNull NewDataCallback callback) {
if (name == null) {
- throw new IllegalArgumentException("The param name is null");
+ throw new IllegalArgumentException("Given name is null");
+ }
+
+ if (callback == null) {
+ throw new IllegalArgumentException("Given callback is null");
}
synchronized(this) {
- if (mSinkCallbacks.containsKey(name)) {
- if (callback == null) {
- /* remove callback */
- mSinkCallbacks.remove(name);
- nativeRemoveSinkCallback(mHandle, name);
- } else {
- mSinkCallbacks.replace(name, callback);
+ ArrayList<NewDataCallback> cbList = mSinkCallbacks.get(name);
+
+ if (cbList != null) {
+ /* check the list already includes same callback */
+ if (!cbList.contains(callback)) {
+ cbList.add(callback);
}
} else {
- if (callback == null) {
- throw new IllegalArgumentException("The param callback is null");
+ if (nativeAddSinkCallback(mHandle, name)) {
+ cbList = new ArrayList<>();
+ cbList.add(callback);
+ mSinkCallbacks.put(name, cbList);
} else {
- if (nativeAddSinkCallback(mHandle, name)) {
- mSinkCallbacks.put(name, callback);
- } else {
- throw new IllegalStateException("Failed to set sink callback to " + name);
- }
+ throw new IllegalStateException("Failed to register sink callback to " + name);
}
}
}
}
/**
- * Internal method called from native when a new data is available.
+ * Unregisters data callback from sink node.
+ *
+ * @param name The name of sink node
+ * @param callback The callback object to be unregistered
+ *
+ * @throws IllegalArgumentException if given param is null
+ * @throws IllegalStateException if failed to unregister the callback from sink node
*/
- private void newDataReceived(String name, TensorsData data, TensorsInfo info) {
- NewDataCallback cb;
+ public void unregisterSinkCallback(@NonNull String name, @NonNull NewDataCallback callback) {
+ if (name == null) {
+ throw new IllegalArgumentException("Given name is null");
+ }
+
+ if (callback == null) {
+ throw new IllegalArgumentException("Given callback is null");
+ }
synchronized(this) {
- cb = mSinkCallbacks.get(name);
+ ArrayList<NewDataCallback> cbList = mSinkCallbacks.get(name);
+
+ if (cbList == null || !cbList.contains(callback)) {
+ throw new IllegalStateException("Failed to unregister sink callback from " + name);
+ }
+
+ cbList.remove(callback);
+ if (cbList.isEmpty()) {
+ /* remove callback */
+ mSinkCallbacks.remove(name);
+ nativeRemoveSinkCallback(mHandle, name);
+ }
}
+ }
+
+ /**
+ * Internal method called from native when a new data is available.
+ */
+ private void newDataReceived(String name, TensorsData data) {
+ synchronized(this) {
+ ArrayList<NewDataCallback> cbList = mSinkCallbacks.get(name);
- if (cb != null) {
- cb.onNewDataReceived(data, info);
+ if (cbList != null) {
+ for (int i = 0; i < cbList.size(); i++) {
+ cbList.get(i).onNewDataReceived(data);
+ }
+ }
}
}
* Internal method called from native when the state of pipeline is changed.
*/
private void stateChanged(int state) {
- StateChangeCallback cb;
-
synchronized(this) {
- cb = mStateCallback;
- }
-
- if (cb != null) {
- cb.onStateChanged(state);
+ if (mStateCallback != null) {
+ mStateCallback.onStateChanged(state);
+ }
}
}
* thus, users are supposed to use NNStreamer Pipeline API directly if they want more advanced features.<br>
* The user is expected to preprocess the input data for the given neural network model.<br>
* <br>
- * <code>SingleShot</code> allows the following operations with NNStreamer:<br>
+ * {@link SingleShot} allows the following operations with NNStreamer:<br>
* - Open a machine learning model.<br>
* - Interfaces to enter a single instance of input data to the opened model.<br>
* - Utility functions to get the information of opened model.<br>
private native long nativeOpen(String model, TensorsInfo in, TensorsInfo out);
private native void nativeClose(long handle);
- private native TensorsData nativeInvoke(long handle, TensorsData in, TensorsInfo inInfo);
+ private native TensorsData nativeInvoke(long handle, TensorsData in);
private native TensorsInfo nativeGetInputInfo(long handle);
private native TensorsInfo nativeGetOutputInfo(long handle);
private native boolean nativeSetInputInfo(long handle, TensorsInfo in);
private native boolean nativeSetTimeout(long handle, int timeout);
/**
- * Creates a new <code>SingleShot</code> instance with the given model.
+ * Creates a new {@link SingleShot} instance with the given model.
* If the model has flexible data dimensions, the pipeline will not be constructed and this will make an exception.
*
* @param model The path to the neural network model file
*
- * @throws IllegalArgumentException if given param is null
+ * @throws IllegalArgumentException if given param is invalid
* @throws IllegalStateException if failed to construct the pipeline
*/
public SingleShot(@NonNull File model) {
}
/**
- * Creates a new <code>SingleShot</code> instance with the given model.
+ * Creates a new {@link SingleShot} instance with the given model.
* The input and output tensors information are required if the given model has flexible data dimensions,
* where the information MUST be given before executing the model.
* However, once it's given, the dimension cannot be changed for the given model handle.
* You may set null if it's not required.
*
- * @param model The path to the neural network model file
+ * @param model The {@link File} object to the neural network model file
* @param in The input tensors information
* @param out The output tensors information
*
- * @throws IllegalArgumentException if given param is null
+ * @throws IllegalArgumentException if given param is invalid
* @throws IllegalStateException if failed to construct the pipeline
*/
public SingleShot(@NonNull File model, @Nullable TensorsInfo in, @Nullable TensorsInfo out) {
if (model == null || !model.exists()) {
- throw new IllegalArgumentException("The param model is invalid");
+ throw new IllegalArgumentException("Given model is invalid");
}
String path = model.getAbsolutePath();
/**
* Invokes the model with the given input data.
- *
- * Note that this has a default timeout of 3 seconds.
- * If an application wants to change the time to wait for an output,
- * set the timeout using {@link #setTimeout(int)}.
- *
- * @param in The input data to be inferred (a single frame, tensor/tensors)
- *
- * @return The output data (a single frame, tensor/tensors)
- *
- * @throws IllegalStateException if failed to invoke the model
- * @throws IllegalArgumentException if given param is null
- */
- public TensorsData invoke(@NonNull TensorsData in) {
- return invoke(in, null);
- }
-
- /**
- * Invokes the model with the given input data.
* If the model has flexible input data dimensions, input information for this
* run of the model can be passed. This changes the currently set input information
* for this instance of the model. The corresponding output information can be
* If an application wants to change the time to wait for an output,
* set the timeout using {@link #setTimeout(int)}.
*
- * @param inData The input data to be inferred (a single frame, tensor/tensors)
- * @param inInfo The input tensors information
+ * @param in The input data to be inferred (a single frame, tensor/tensors)
*
* @return The output data (a single frame, tensor/tensors)
*
* @throws IllegalStateException if failed to invoke the model
* @throws IllegalArgumentException if given param is null
*/
- public TensorsData invoke(@NonNull TensorsData inData, @Nullable TensorsInfo inInfo) {
+ public TensorsData invoke(@NonNull TensorsData in) {
checkPipelineHandle();
- if (inData == null) {
- throw new IllegalArgumentException("Input tensor data is null");
+ if (in == null) {
+ throw new IllegalArgumentException("Given input data is null");
}
- TensorsData out = nativeInvoke(mHandle, inData, inInfo);
+ TensorsData out = nativeInvoke(mHandle, in);
if (out == null) {
throw new IllegalStateException("Failed to invoke the model");
}
checkPipelineHandle();
if (timeout <= 0) {
- throw new IllegalArgumentException("The param timeout is invalid");
+ throw new IllegalArgumentException("Given timeout is invalid");
}
if (!nativeSetTimeout(mHandle, timeout)) {
checkPipelineHandle();
if (in == null) {
- throw new IllegalArgumentException("Input tensor info is null");
+ throw new IllegalArgumentException("Given input info is null");
}
if (!nativeSetInputInfo(mHandle, in)) {
* Provides interfaces to handle tensor data frame.
*/
public final class TensorsData implements AutoCloseable {
+ private TensorsInfo mInfo = null;
private ArrayList<ByteBuffer> mDataList = new ArrayList<>();
/**
* @param size The byte size of the buffer
*
* @return The new byte buffer
+ *
+ * @throws IllegalArgumentException if given size is invalid
*/
public static ByteBuffer allocateByteBuffer(int size) {
if (size <= 0) {
- throw new IllegalArgumentException("The param size is invalid");
+ throw new IllegalArgumentException("Given size is invalid");
}
return ByteBuffer.allocateDirect(size).order(ByteOrder.nativeOrder());
}
/**
- * Allocates a new <code>TensorsData</code> instance with the given tensors information.
+ * Allocates a new {@link TensorsData} instance with the given tensors information.
*
* @param info The tensors information
*
- * @return The allocated tensors data instance
+ * @return {@link TensorsData} instance
*
- * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalArgumentException if given info is invalid
*/
public static TensorsData allocate(@NonNull TensorsInfo info) {
- if (info == null) {
- throw new IllegalArgumentException("The param info is null");
+ if (info == null || info.getTensorsCount() == 0) {
+ throw new IllegalArgumentException("Given info is invalid");
}
TensorsData data = new TensorsData();
int count = info.getTensorsCount();
+ data.setTensorsInfo(info);
+
for (int i = 0; i < count; i++) {
- int size = info.getTensorSize(i);
- data.addTensorData(allocateByteBuffer(size));
+ data.addTensorData(allocateByteBuffer(info.getTensorSize(i)));
}
return data;
}
/**
- * Gets the number of tensors in tensors data.
+ * Gets the tensors information.
*
- * @return The number of tensors
+ * @return The {@link TensorsInfo} instance
*/
- public int getTensorsCount() {
- return mDataList.size();
+ public TensorsInfo getTensorsInfo() {
+ return mInfo;
}
/**
- * Adds a new tensor data.
+ * Sets the tensors information.
*
- * @param data The data object to be added
+ * @param info The tensors information
*
- * @throws IllegalArgumentException if the data is not a byte buffer or the buffer is invalid
- * @throws IndexOutOfBoundsException when the maximum number of tensors in the list
+ * @throws IllegalArgumentException if given info is null
*/
- public void addTensorData(@NonNull Object data) {
- if (data == null || !(data instanceof ByteBuffer)) {
- throw new IllegalArgumentException("Given data is not a byte buffer");
+ private void setTensorsInfo(@NonNull TensorsInfo info) {
+ if (info == null || info.getTensorsCount() == 0) {
+ throw new IllegalArgumentException("Given info is invalid");
}
- addTensorData((ByteBuffer) data);
+ mInfo = info;
+ }
+
+ /**
+ * Gets the number of tensors in tensors data.
+ *
+ * @return The number of tensors
+ */
+ public int getTensorsCount() {
+ return mDataList.size();
}
/**
* @throws IllegalArgumentException if given data is invalid
* @throws IndexOutOfBoundsException when the maximum number of tensors in the list
*/
- public void addTensorData(@NonNull byte[] data) {
+ private void addTensorData(@NonNull byte[] data) {
if (data == null) {
throw new IllegalArgumentException("Given data is null");
}
* @throws IllegalArgumentException if given data is invalid
* @throws IndexOutOfBoundsException when the maximum number of tensors in the list
*/
- public void addTensorData(@NonNull ByteBuffer data) {
- checkByteBuffer(data);
-
+ private void addTensorData(@NonNull ByteBuffer data) {
int index = getTensorsCount();
- if (index >= NNStreamer.TENSOR_SIZE_LIMIT) {
- throw new IndexOutOfBoundsException("Max size of the tensors is " + NNStreamer.TENSOR_SIZE_LIMIT);
- }
+ checkByteBuffer(index, data);
mDataList.add(data);
}
*/
public void setTensorData(int index, @NonNull ByteBuffer data) {
checkIndexBounds(index);
- checkByteBuffer(data);
+ checkByteBuffer(index, data);
mDataList.set(index, data);
}
* Internal method to check byte buffer.
*
* @throws IllegalArgumentException if given data is invalid
+ * @throws IndexOutOfBoundsException if the given index is invalid
*/
- private void checkByteBuffer(ByteBuffer data) {
+ private void checkByteBuffer(int index, ByteBuffer data) {
if (data == null) {
throw new IllegalArgumentException("Given data is null");
}
/* Default byte order of ByteBuffer in java is big-endian, it should be a little-endian. */
throw new IllegalArgumentException("Given data has invalid byte order");
}
+
+ if (index >= NNStreamer.TENSOR_SIZE_LIMIT) {
+ throw new IndexOutOfBoundsException("Max size of the tensors is " + NNStreamer.TENSOR_SIZE_LIMIT);
+ }
+
+ /* compare to tensors info */
+ if (mInfo != null) {
+ int count = mInfo.getTensorsCount();
+
+ if (index >= count) {
+ throw new IndexOutOfBoundsException("Current information has " + count + " tensors");
+ }
+
+ int size = mInfo.getTensorSize(index);
+
+ if (data.capacity() != size) {
+ throw new IllegalArgumentException("Invalid buffer size, required size is " + size);
+ }
+ }
}
@Override
public void close() {
mDataList.clear();
+ mInfo = null;
}
+
+ /**
+ * Private constructor to prevent the instantiation.
+ */
+ private TensorsData() {}
}
private ArrayList<TensorInfo> mInfoList = new ArrayList<>();
/**
- * Gets the number of tensors in tensors information.
+ * Allocates a new {@link TensorsData} instance with the tensors information.
+ *
+ * @return {@link TensorsData} instance
+ *
+ * @throws IllegalStateException if tensors info is empty
+ */
+ public TensorsData allocate() {
+ if (getTensorsCount() == 0) {
+ throw new IllegalStateException("Empty tensor info");
+ }
+
+ return TensorsData.allocate(this);
+ }
+
+ /**
+ * Gets the number of tensors.
+ * The maximum number of tensors is {@link NNStreamer#TENSOR_SIZE_LIMIT}.
*
* @return The number of tensors
*/
int index = getTensorsCount();
if (index >= NNStreamer.TENSOR_SIZE_LIMIT) {
- throw new IndexOutOfBoundsException("Max size of the tensors is " + NNStreamer.TENSOR_SIZE_LIMIT);
+ throw new IndexOutOfBoundsException("Max number of the tensors is " + NNStreamer.TENSOR_SIZE_LIMIT);
}
mInfoList.add(new TensorInfo(name, type, dimension));
}
/**
- * Gets a tensor name of given index.
+ * Gets the tensor name of given index.
*
* @param index The index of the tensor information in the list
*
setDimension(dimension);
}
- public void setName(String name) {
+ public void setName(@Nullable String name) {
this.name = name;
}
throw new IllegalArgumentException("Given tensor dimension is null");
}
- int length = dimension.length;
+ int rank = dimension.length;
- if (length > NNStreamer.TENSOR_RANK_LIMIT) {
+ if (rank > NNStreamer.TENSOR_RANK_LIMIT) {
throw new IllegalArgumentException("Max size of the tensor rank is " + NNStreamer.TENSOR_RANK_LIMIT);
}
- for (int i = 0; i < length; i++) {
- if (dimension[i] <= 0) {
+ for (int dim : dimension) {
+ if (dim <= 0) {
throw new IllegalArgumentException("The dimension should be a positive value");
}
}
- System.arraycopy(dimension, 0, this.dimension, 0, length);
+ System.arraycopy(dimension, 0, this.dimension, 0, rank);
- /* set 1 as default */
- for (int i = length; i < NNStreamer.TENSOR_RANK_LIMIT; i++) {
+ /* fill default value */
+ for (int i = rank; i < NNStreamer.TENSOR_RANK_LIMIT; i++) {
this.dimension[i] = 1;
}
}
void
nns_destroy_pipe_info (pipeline_info_s * pipe_info, JNIEnv * env)
{
- g_assert (pipe_info);
+ g_return_if_fail (pipe_info != NULL);
g_mutex_lock (&pipe_info->lock);
g_hash_table_destroy (pipe_info->element_handles);
*/
gboolean
nns_convert_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env,
- ml_tensors_data_s * data, jobject * result)
+ ml_tensors_data_h data_h, ml_tensors_info_h info_h, jobject * result)
{
guint i;
- jmethodID mid_init, mid_add;
+ jmethodID mid_init, mid_add_data, mid_set_info;
jobject obj_data = NULL;
+ ml_tensors_data_s *data;
g_return_val_if_fail (pipe_info, FALSE);
g_return_val_if_fail (env, FALSE);
- g_return_val_if_fail (data && result, FALSE);
+ g_return_val_if_fail (data_h, FALSE);
+ g_return_val_if_fail (result, FALSE);
+
+ data = (ml_tensors_data_s *) data_h;
/* method to generate tensors data */
mid_init = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "<init>", "()V");
- mid_add = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "addTensorData", "([B)V");
+ mid_add_data = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "addTensorData", "([B)V");
+ mid_set_info = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "setTensorsInfo", "(Lorg/nnsuite/nnstreamer/TensorsInfo;)V");
obj_data = (*env)->NewObject (env, pipe_info->cls_tensors_data, mid_init);
if (!obj_data) {
goto done;
}
+ if (info_h) {
+ jobject obj_info = NULL;
+
+ if (!nns_convert_tensors_info (pipe_info, env, info_h, &obj_info)) {
+ nns_loge ("Failed to convert tensors info.");
+ (*env)->DeleteLocalRef (env, obj_data);
+ obj_data = NULL;
+ goto done;
+ }
+
+ (*env)->CallVoidMethod (env, obj_data, mid_set_info, obj_info);
+ (*env)->DeleteLocalRef (env, obj_info);
+ }
+
for (i = 0; i < data->num_tensors; i++) {
jsize buffer_size = (jsize) data->tensors[i].size;
jbyteArray buffer = (*env)->NewByteArray (env, buffer_size);
(*env)->SetByteArrayRegion (env, buffer, 0, buffer_size, (jbyte *) data->tensors[i].tensor);
- (*env)->CallVoidMethod (env, obj_data, mid_add, buffer);
+ (*env)->CallVoidMethod (env, obj_data, mid_add_data, buffer);
(*env)->DeleteLocalRef (env, buffer);
}
*/
gboolean
nns_parse_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env,
- jobject obj_data, ml_tensors_data_s * data)
+ jobject obj_data, ml_tensors_data_h * data_h, ml_tensors_info_h * info_h)
{
guint i;
+ ml_tensors_data_s *data;
+ ml_tensors_info_s *info;
+ gboolean failed = FALSE;
g_return_val_if_fail (pipe_info, FALSE);
g_return_val_if_fail (env, FALSE);
- g_return_val_if_fail (obj_data && data, FALSE);
+ g_return_val_if_fail (obj_data, FALSE);
+ g_return_val_if_fail (data_h, FALSE);
+
+ if (ml_tensors_data_create_no_alloc (NULL, data_h) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create handle for tensors data.");
+ return FALSE;
+ }
+
+ data = (ml_tensors_data_s *) (*data_h);
/* get field 'mDataList' */
- jfieldID fid_arraylist = (*env)->GetFieldID (env, pipe_info->cls_tensors_data, "mDataList", "java/util/ArrayList");
+ jfieldID fid_arraylist = (*env)->GetFieldID (env, pipe_info->cls_tensors_data, "mDataList", "Ljava/util/ArrayList;");
jobject obj_arraylist = (*env)->GetObjectField (env, obj_data, fid_arraylist);
/* method to get tensors data */
if (data->tensors[i].tensor == NULL) {
nns_loge ("Failed to allocate memory %zd, data index %d.", data_size, i);
(*env)->DeleteLocalRef (env, tensor_data);
- goto failed;
+ failed = TRUE;
+ goto done;
}
memcpy (data->tensors[i].tensor, data_ptr, data_size);
}
}
- (*env)->DeleteLocalRef (env, cls_arraylist);
- (*env)->DeleteLocalRef (env, obj_arraylist);
- return TRUE;
+ /* parse tensors info in data class */
+ if (info_h) {
+ jmethodID mid_get_info = (*env)->GetMethodID (env, pipe_info->cls_tensors_data,
+ "getTensorsInfo", "()Lorg/nnsuite/nnstreamer/TensorsInfo;");
+ jobject obj_info = (*env)->CallObjectMethod (env, obj_data, mid_get_info);
-failed:
- for (i = 0; i < data->num_tensors; i++) {
- if (data->tensors[i].tensor) {
- g_free (data->tensors[i].tensor);
- data->tensors[i].tensor = NULL;
+ if (obj_info) {
+ nns_parse_tensors_info (pipe_info, env, obj_info, info_h);
+ (*env)->DeleteLocalRef (env, obj_info);
}
+ }
- data->tensors[i].size = 0;
+done:
+ (*env)->DeleteLocalRef (env, cls_arraylist);
+ (*env)->DeleteLocalRef (env, obj_arraylist);
+
+ if (failed) {
+ ml_tensors_data_destroy (*data_h);
+ *data_h = NULL;
}
- data->num_tensors = 0;
- return FALSE;
+ return !failed;
}
/**
*/
gboolean
nns_convert_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env,
- ml_tensors_info_s * info, jobject * result)
+ ml_tensors_info_h info_h, jobject * result)
{
guint i, j;
+ ml_tensors_info_s *info;
jmethodID mid_init, mid_add;
jobject obj_info = NULL;
g_return_val_if_fail (pipe_info, FALSE);
g_return_val_if_fail (env, FALSE);
- g_return_val_if_fail (info && result, FALSE);
+ g_return_val_if_fail (info_h, FALSE);
+ g_return_val_if_fail (result, FALSE);
+
+ info = (ml_tensors_info_s *) info_h;
/* method to generate tensors info */
mid_init = (*env)->GetMethodID (env, pipe_info->cls_tensors_info, "<init>", "()V");
*/
gboolean
nns_parse_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env,
- jobject obj_info, ml_tensors_info_s * info)
+ jobject obj_info, ml_tensors_info_h * info_h)
{
guint i, j;
+ ml_tensors_info_s *info;
g_return_val_if_fail (pipe_info, FALSE);
g_return_val_if_fail (env, FALSE);
- g_return_val_if_fail (obj_info && info, FALSE);
+ g_return_val_if_fail (obj_info, FALSE);
+ g_return_val_if_fail (info_h, FALSE);
+
+ if (ml_tensors_info_create (info_h) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create handle for tensors info.");
+ return FALSE;
+ }
- ml_tensors_info_initialize (info);
+ info = (ml_tensors_info_s *) (*info_h);
/* get field 'mInfoList' */
- jfieldID fid_arraylist = (*env)->GetFieldID (env, pipe_info->cls_tensors_info, "mInfoList", "java/util/ArrayList");
+ jfieldID fid_arraylist = (*env)->GetFieldID (env, pipe_info->cls_tensors_info, "mInfoList", "Ljava/util/ArrayList;");
jobject obj_arraylist = (*env)->GetObjectField (env, obj_info, fid_arraylist);
/* method to get tensors info */
const GstTensorMemory * input, GstTensorMemory * output)
{
pipeline_info_s *pipe_info = NULL;
- ml_tensors_data_s *in_data, *out_data;
- ml_tensors_info_h in_info, out_info;
+ ml_tensors_data_h in_data, out_data;
+ ml_tensors_info_h in_info;
+ ml_tensors_data_s *_data;
JNIEnv *env;
jclass cls_custom;
jmethodID mid_invoke;
jobject obj_in_data, obj_out_data;
- jobject obj_in_info, obj_out_info;
guint i;
int ret = -1;
g_return_val_if_fail (env, -1);
in_data = out_data = NULL;
- in_info = out_info = NULL;
+ in_info = NULL;
obj_in_data = obj_out_data = NULL;
- obj_in_info = obj_out_info = NULL;
-
- if ((in_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
- nns_loge ("Failed to allocate memory for input tensors data.");
- goto done;
- }
- if ((out_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
- nns_loge ("Failed to allocate memory for output tensors data.");
+ if (ml_tensors_data_create_no_alloc (NULL, &in_data) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create handle for input tensors data.");
goto done;
}
if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
- nns_loge ("Failed to create input tensors info.");
- goto done;
- }
-
- if (ml_tensors_info_create (&out_info) != ML_ERROR_NONE) {
- nns_loge ("Failed to create output tensors info.");
+ nns_loge ("Failed to create handle for input tensors info.");
goto done;
}
cls_custom = (*env)->GetObjectClass (env, pipe_info->instance);
mid_invoke = (*env)->GetMethodID (env, cls_custom, "invoke",
- "(Lorg/nnsuite/nnstreamer/TensorsData;"
- "Lorg/nnsuite/nnstreamer/TensorsInfo;"
- "Lorg/nnsuite/nnstreamer/TensorsInfo;)"
+ "(Lorg/nnsuite/nnstreamer/TensorsData;)"
"Lorg/nnsuite/nnstreamer/TensorsData;");
/* convert to c-api data type */
- in_data->num_tensors = prop->input_meta.num_tensors;
- for (i = 0; i < in_data->num_tensors; i++) {
- in_data->tensors[i].tensor = input[i].data;
- in_data->tensors[i].size = input[i].size;
+ _data = (ml_tensors_data_s *) in_data;
+ _data->num_tensors = prop->input_meta.num_tensors;
+ for (i = 0; i < _data->num_tensors; i++) {
+ _data->tensors[i].tensor = input[i].data;
+ _data->tensors[i].size = input[i].size;
}
ml_tensors_info_copy_from_gst (in_info, &prop->input_meta);
- ml_tensors_info_copy_from_gst (out_info, &prop->output_meta);
/* call invoke callback */
- if (!nns_convert_tensors_info (pipe_info, env, in_info, &obj_in_info)) {
- nns_loge ("Failed to convert input info to info-object.");
- goto done;
- }
-
- if (!nns_convert_tensors_info (pipe_info, env, out_info, &obj_out_info)) {
- nns_loge ("Failed to convert output info to info-object.");
- goto done;
- }
-
- if (!nns_convert_tensors_data (pipe_info, env, in_data, &obj_in_data)) {
+ if (!nns_convert_tensors_data (pipe_info, env, in_data, in_info, &obj_in_data)) {
nns_loge ("Failed to convert input data to data-object.");
goto done;
}
- obj_out_data = (*env)->CallObjectMethod (env, pipe_info->instance, mid_invoke,
- obj_in_data, obj_in_info, obj_out_info);
+ obj_out_data = (*env)->CallObjectMethod (env, pipe_info->instance, mid_invoke, obj_in_data);
- if (!nns_parse_tensors_data (pipe_info, env, obj_out_data, out_data)) {
+ if (!nns_parse_tensors_data (pipe_info, env, obj_out_data, &out_data, NULL)) {
nns_loge ("Failed to parse output data.");
goto done;
}
/* set output data */
- for (i = 0; i < out_data->num_tensors; i++) {
- output[i].data = out_data->tensors[i].tensor;
+ _data = (ml_tensors_data_s *) out_data;
+ for (i = 0; i < _data->num_tensors; i++) {
+ output[i].data = _data->tensors[i].tensor;
- if (out_data->tensors[i].size != output[i].size) {
+ if (_data->tensors[i].size != output[i].size) {
nns_logw ("The result has different buffer size at index %d [%zd:%zd]",
- i, output[i].size, out_data->tensors[i].size);
- output[i].size = out_data->tensors[i].size;
+ i, output[i].size, _data->tensors[i].size);
+ output[i].size = _data->tensors[i].size;
}
}
(*env)->DeleteLocalRef (env, obj_in_data);
if (obj_out_data)
(*env)->DeleteLocalRef (env, obj_out_data);
- if (obj_in_info)
- (*env)->DeleteLocalRef (env, obj_in_info);
- if (obj_out_info)
- (*env)->DeleteLocalRef (env, obj_out_info);
(*env)->DeleteLocalRef (env, cls_custom);
g_free (in_data);
g_free (out_data);
ml_tensors_info_destroy (in_info);
- ml_tensors_info_destroy (out_info);
return ret;
}
obj_in_info = obj_out_info = NULL;
if (ml_tensors_info_create (&in) != ML_ERROR_NONE) {
- nns_loge ("Failed to create input tensors info.");
- goto done;
- }
-
- if (ml_tensors_info_create (&out) != ML_ERROR_NONE) {
- nns_loge ("Failed to create output tensors info.");
+ nns_loge ("Failed to create handle for input tensors info.");
goto done;
}
obj_out_info = (*env)->CallObjectMethod (env, pipe_info->instance, mid_info, obj_in_info);
- if (!nns_parse_tensors_info (pipe_info, env, obj_out_info, out)) {
+ if (!nns_parse_tensors_info (pipe_info, env, obj_out_info, &out)) {
nns_loge ("Failed to parse output info.");
goto done;
}
}
pipe_info = nns_construct_pipe_info (env, thiz, fw, NNS_PIPE_TYPE_CUSTOM);
+ if (pipe_info == NULL) {
+ nns_loge ("Failed to create pipe info.");
+ goto done;
+ }
/* add custom-filter handle to the table */
g_mutex_lock (&pipe_info->lock);
{
element_data_s *cb_data;
pipeline_info_s *pipe_info;
- ml_tensors_data_s *out_data;
- ml_tensors_info_s *out_info;
- jobject obj_data, obj_info;
+ jobject obj_data = NULL;
JNIEnv *env;
cb_data = (element_data_s *) user_data;
pipe_info = cb_data->pipe_info;
- out_data = (ml_tensors_data_s *) data;
- out_info = (ml_tensors_info_s *) info;
if ((env = nns_get_jni_env (pipe_info)) == NULL) {
nns_logw ("Cannot get jni env in the sink callback.");
return;
}
- obj_data = obj_info = NULL;
- if (nns_convert_tensors_data (pipe_info, env, out_data, &obj_data) &&
- nns_convert_tensors_info (pipe_info, env, out_info, &obj_info)) {
+ if (nns_convert_tensors_data (pipe_info, env, data, info, &obj_data)) {
/* method for sink callback */
jclass cls_pipeline = (*env)->GetObjectClass (env, pipe_info->instance);
jmethodID mid_callback = (*env)->GetMethodID (env, cls_pipeline, "newDataReceived",
- "(Ljava/lang/String;"
- "Lorg/nnsuite/nnstreamer/TensorsData;"
- "Lorg/nnsuite/nnstreamer/TensorsInfo;)V");
+ "(Ljava/lang/String;Lorg/nnsuite/nnstreamer/TensorsData;)V");
jstring sink_name = (*env)->NewStringUTF (env, cb_data->name);
- (*env)->CallVoidMethod (env, pipe_info->instance, mid_callback, sink_name, obj_data, obj_info);
+ (*env)->CallVoidMethod (env, pipe_info->instance, mid_callback, sink_name, obj_data);
if ((*env)->ExceptionCheck (env)) {
nns_loge ("Failed to call the callback method.");
(*env)->DeleteLocalRef (env, sink_name);
(*env)->DeleteLocalRef (env, cls_pipeline);
+ (*env)->DeleteLocalRef (env, obj_data);
} else {
nns_loge ("Failed to convert the result to data object.");
}
-
- if (obj_data)
- (*env)->DeleteLocalRef (env, obj_data);
- if (obj_info)
- (*env)->DeleteLocalRef (env, obj_info);
}
/**
const char *pipeline = (*env)->GetStringUTFChars (env, description, NULL);
pipe_info = nns_construct_pipe_info (env, thiz, NULL, NNS_PIPE_TYPE_PIPELINE);
+ if (pipe_info == NULL) {
+ nns_loge ("Failed to create pipe info.");
+ goto done;
+ }
if (add_state_cb)
status = ml_pipeline_construct (pipeline, nns_pipeline_state_cb, pipe_info, &pipe);
pipe_info->pipeline_handle = pipe;
}
+done:
(*env)->ReleaseStringUTFChars (env, description, pipeline);
return CAST_TO_LONG (pipe_info);
}
{
pipeline_info_s *pipe_info = NULL;
ml_pipeline_src_h src;
- ml_tensors_data_s *in_data = NULL;
+ ml_tensors_data_h in_data = NULL;
int status;
jboolean res = JNI_FALSE;
const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
goto done;
}
- if ((in_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
- nns_loge ("Failed to allocate memory for input data.");
- goto done;
- }
-
- if (!nns_parse_tensors_data (pipe_info, env, in, in_data)) {
+ if (!nns_parse_tensors_data (pipe_info, env, in, &in_data, NULL)) {
nns_loge ("Failed to parse input data.");
- ml_tensors_data_destroy ((ml_tensors_data_h) in_data);
goto done;
}
- status = ml_pipeline_src_input_data (src, (ml_tensors_data_h) in_data,
+ status = ml_pipeline_src_input_data (src, in_data,
ML_PIPELINE_BUF_POLICY_AUTO_FREE);
if (status != ML_ERROR_NONE) {
nns_loge ("Failed to input tensors data to source node %s.", element_name);
pipeline_info_s *pipe_info = NULL;
ml_single_h single;
ml_tensors_info_h in_info, out_info;
+ gboolean opened = FALSE;
const char *model_info = (*env)->GetStringUTFChars (env, model, NULL);
single = NULL;
in_info = out_info = NULL;
- if (in) {
- if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
- nns_loge ("Failed to create input tensors info.");
- goto done;
- }
+ pipe_info = nns_construct_pipe_info (env, thiz, NULL, NNS_PIPE_TYPE_SINGLE);
+ if (pipe_info == NULL) {
+ nns_loge ("Failed to create pipe info.");
+ goto done;
+ }
- if (!nns_parse_tensors_info (pipe_info, env, in, in_info)) {
+ if (in) {
+ if (!nns_parse_tensors_info (pipe_info, env, in, &in_info)) {
nns_loge ("Failed to parse input tensor.");
goto done;
}
}
if (out) {
- if (ml_tensors_info_create (&out_info) != ML_ERROR_NONE) {
- nns_loge ("Failed to create output tensors info.");
- goto done;
- }
-
- if (!nns_parse_tensors_info (pipe_info, env, out, out_info)) {
+ if (!nns_parse_tensors_info (pipe_info, env, out, &out_info)) {
nns_loge ("Failed to parse output tensor.");
goto done;
}
/* supposed tensorflow-lite only for android */
if (ml_single_open (&single, model_info, in_info, out_info,
- ML_NNFW_TYPE_ANY, ML_NNFW_HW_AUTO) != ML_ERROR_NONE) {
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_AUTO) != ML_ERROR_NONE) {
nns_loge ("Failed to create the pipeline.");
goto done;
}
- pipe_info = nns_construct_pipe_info (env, thiz, single, NNS_PIPE_TYPE_SINGLE);
+ opened = TRUE;
+ pipe_info->pipeline_handle = single;
done:
ml_tensors_info_destroy (in_info);
ml_tensors_info_destroy (out_info);
+ if (!opened) {
+ nns_destroy_pipe_info (pipe_info, env);
+ pipe_info = NULL;
+ }
+
(*env)->ReleaseStringUTFChars (env, model, model_info);
return CAST_TO_LONG (pipe_info);
}
*/
jobject
Java_org_nnsuite_nnstreamer_SingleShot_nativeInvoke (JNIEnv * env,
- jobject thiz, jlong handle, jobject obj_data, jobject obj_info)
+ jobject thiz, jlong handle, jobject in)
{
pipeline_info_s *pipe_info;
ml_single_h single;
- ml_tensors_info_h in_info, out_info;
- ml_tensors_data_s *in_data, *out_data;
+ ml_tensors_info_h cur_info, in_info, out_info;
+ ml_tensors_data_h in_data, out_data;
int status;
jobject result = NULL;
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
- in_info = out_info = NULL;
+ cur_info = in_info = out_info = NULL;
in_data = out_data = NULL;
- if ((in_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
- nns_loge ("Failed to allocate memory for input data.");
+ if (ml_single_get_input_info (single, &cur_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to get input tensors info.");
goto done;
}
- if (!nns_parse_tensors_data (pipe_info, env, obj_data, in_data)) {
- nns_loge ("Failed to parse input data.");
+ if (!nns_parse_tensors_data (pipe_info, env, in, &in_data, &in_info)) {
+ nns_loge ("Failed to parse input tensors data.");
goto done;
}
- if (obj_info) {
- if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
- nns_loge ("Failed to create input tensors info.");
- goto done;
- }
-
- if (!nns_parse_tensors_info (pipe_info, env, obj_info, in_info)) {
- nns_loge ("Failed to parse input tensors info.");
+ if (in_info == NULL || ml_tensors_info_is_equal (cur_info, in_info)) {
+ /* input tensors info is not changed */
+ if (ml_single_get_output_info (single, &out_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to get output tensors info.");
goto done;
}
- status = ml_single_invoke_dynamic (single, in_data, in_info,
- (ml_tensors_data_h *) &out_data, &out_info);
+ status = ml_single_invoke (single, in_data, &out_data);
} else {
- status = ml_single_invoke (single, in_data, (ml_tensors_data_h *) &out_data);
+ /* input tensors info changed, call dynamic */
+ status = ml_single_invoke_dynamic (single, in_data, in_info, &out_data, &out_info);
}
if (status != ML_ERROR_NONE) {
goto done;
}
- if (!nns_convert_tensors_data (pipe_info, env, out_data, &result)) {
+ if (!nns_convert_tensors_data (pipe_info, env, out_data, out_info, &result)) {
nns_loge ("Failed to convert the result to data.");
result = NULL;
}
done:
- ml_tensors_data_destroy ((ml_tensors_data_h) in_data);
- ml_tensors_data_destroy ((ml_tensors_data_h) out_data);
+ ml_tensors_data_destroy (in_data);
+ ml_tensors_data_destroy (out_data);
ml_tensors_info_destroy (in_info);
ml_tensors_info_destroy (out_info);
+ ml_tensors_info_destroy (cur_info);
return result;
}
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
- if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
- nns_loge ("Failed to create input info handle.");
- return JNI_FALSE;
- }
-
- if (!nns_parse_tensors_info (pipe_info, env, in, in_info)) {
+ if (!nns_parse_tensors_info (pipe_info, env, in, &in_info)) {
nns_loge ("Failed to parse input tensor.");
goto done;
}
* @brief Convert tensors data to TensorsData object.
*/
extern gboolean
-nns_convert_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env, ml_tensors_data_s * data, jobject * result);
+nns_convert_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env, ml_tensors_data_h data_h, ml_tensors_info_h info_h, jobject * result);
/**
* @brief Parse tensors data from TensorsData object.
*/
extern gboolean
-nns_parse_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_data, ml_tensors_data_s * data);
+nns_parse_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_data, ml_tensors_data_h * data_h, ml_tensors_info_h * info_h);
/**
* @brief Convert tensors info to TensorsInfo object.
*/
extern gboolean
-nns_convert_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env, ml_tensors_info_s * info, jobject * result);
+nns_convert_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env, ml_tensors_info_h info_h, jobject * result);
/**
* @brief Parse tensors info from TensorsInfo object.
*/
extern gboolean
-nns_parse_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_info, ml_tensors_info_s * info);
+nns_parse_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_info, ml_tensors_info_h * info_h);
#endif /* __NNSTREAMER_ANDROID_NATIVE_H__ */