/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using InteropInference = Interop.MediaVision.Inference;
namespace Tizen.Multimedia.Vision
{
///
/// Provides the ability to detect objects and get its locations on image source using inference engine.
///
/// 6
public static class ObjectDetector
{
///
/// Detects objects and gets its locations on the source image using inference engine set in .
/// Each time when DetectAsync is called, a set of the detected objects at the media source are received asynchronously.
///
/// http://tizen.org/feature/vision.inference
/// http://tizen.org/feature/vision.inference.image
/// The source of the media where faces will be detected.
/// The engine's configuration that will be used for detecting.
///
/// A task that represents the asynchronous detect operation.
/// If there's no detected object, empty collection will be returned.
///
/// or is null.
/// Internal error.
/// The feature is not supported.
/// The caller has no required privilege.
///
/// 6
public static async Task> DetectAsync(MediaVisionSource source,
InferenceModelConfiguration config)
{
// `vision.inference` feature is already checked, when config is created.
ValidationUtil.ValidateFeatureSupported(VisionFeatures.InferenceImage);
if (source == null)
{
throw new ArgumentNullException(nameof(source));
}
if (config == null)
{
throw new ArgumentNullException(nameof(config));
}
var tcs = new TaskCompletionSource>();
using (var cb = ObjectKeeper.Get(GetCallback(tcs)))
{
InteropInference.DetectObject(source.Handle, config.GetHandle(), cb.Target).
Validate("Failed to detect object.");
return await tcs.Task;
}
}
private static InteropInference.ObjectDetectedCallback GetCallback(TaskCompletionSource> tcs)
{
return (IntPtr sourceHandle, int numberOfObjects, int[] indices, string[] names, float[] confidences,
global::Interop.MediaVision.Rectangle[] locations, IntPtr _) =>
{
try
{
if (!tcs.TrySetResult(GetResults(numberOfObjects, indices, names, confidences, locations)))
{
Log.Error(MediaVisionLog.Tag, "Failed to set object detection result.");
}
}
catch (Exception e)
{
tcs.TrySetException(e);
}
};
}
private static IEnumerable GetResults(int number, int[] indices,
string[] names, float[] confidences, global::Interop.MediaVision.Rectangle[] locations)
{
if (number == 0)
{
return Enumerable.Empty();
}
var results = new List();
for (int i = 0; i < number; i++)
{
results.Add(new ObjectDetectionResult(indices[i], names[i], confidences[i], locations[i]));
}
return results;
}
}
}