}
/**
+ * Initializes a {@code Interpreter} and specifies the number of threads used for inference.
+ *
+ * @param modelFile: a file of a pre-trained TF Lite model
+ * @param numThreads: number of threads to use for inference
+ */
+ public Interpreter(@NonNull File modelFile, int numThreads) {
+ if (modelFile == null) {
+ return;
+ }
+ wrapper = new NativeInterpreterWrapper(modelFile.getAbsolutePath(), numThreads);
+ }
+
+ /**
* Initializes a {@code Interpreter} with a {@code MappedByteBuffer} to the model file.
*
* <p>The {@code MappedByteBuffer} should remain unchanged after the construction of a {@code
final class NativeInterpreterWrapper implements AutoCloseable {
NativeInterpreterWrapper(String modelPath) {
+ this(modelPath, /* numThreads= */ -1);
+ }
+
+ NativeInterpreterWrapper(String modelPath, int numThreads) {
errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
modelHandle = createModel(modelPath, errorHandle);
- interpreterHandle = createInterpreter(modelHandle, errorHandle, /* numThreads= */ -1);
+ interpreterHandle = createInterpreter(modelHandle, errorHandle, numThreads);
isMemoryAllocated = true;
}
* NativeInterpreterWrapper}.
*/
NativeInterpreterWrapper(MappedByteBuffer mappedByteBuffer) {
- modelByteBuffer = mappedByteBuffer;
- errorHandle = createErrorReporter(ERROR_BUFFER_SIZE);
- modelHandle = createModelWithBuffer(modelByteBuffer, errorHandle);
- interpreterHandle = createInterpreter(modelHandle, errorHandle, /* numThreads= */ -1);
- isMemoryAllocated = true;
+ this(mappedByteBuffer, /* numThreads= */ -1);
}
/**