case SH_CSS_ACC_ARG_SCALAR_IN:
kfree(host->scalar.kernel_ptr);
break;
- case ATOMISP_ACC_ARG_FRAME:
+ case SH_CSS_ACC_ARG_FRAME:
case SH_CSS_ACC_ARG_PTR_IN:
case SH_CSS_ACC_ARG_PTR_OUT:
case SH_CSS_ACC_ARG_PTR_IO:
+ case SH_CSS_ACC_ARG_PTR_NOFLUSH:
+ case SH_CSS_ACC_ARG_PTR_STABLE:
hrt_isp_css_mm_free(host->ptr.hmm_ptr);
break;
default:
ret = sh_css_set_acceleration_argument(fw, index,
host->scalar.kernel_ptr, size);
break;
- case ATOMISP_ACC_ARG_FRAME:
+ case SH_CSS_ACC_ARG_FRAME:
case SH_CSS_ACC_ARG_PTR_IN:
case SH_CSS_ACC_ARG_PTR_OUT:
case SH_CSS_ACC_ARG_PTR_IO:
+ case SH_CSS_ACC_ARG_PTR_NOFLUSH:
+ case SH_CSS_ACC_ARG_PTR_STABLE:
/* Free old argument data if one already exists */
hrt_isp_css_mm_free(host->ptr.hmm_ptr);
pgnr = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
mutex_lock(&isp->isp_lock);
frame_ptr = hrt_isp_css_mm_alloc_user_ptr(size,
- (unsigned int)fw_arg->value, pgnr);
+ (unsigned int)fw_arg->value, pgnr,
+ type != SH_CSS_ACC_ARG_PTR_NOFLUSH);
mutex_unlock(&isp->isp_lock);
if (IS_ERR_OR_NULL(frame_ptr)) {
return ret;
}
+/* Flush all flushable pointer arguments of <fw>, this function is private to this layer, yet used in atomisp_fops.c */
+void flush_acc_api_arguments(struct sh_css_acc_fw *fw)
+{
+ unsigned i;
+
+ for (i = 0; i < sh_css_num_accelerator_args(fw); i++) {
+ enum atomisp_acc_arg_type type = sh_css_argument_type(fw, i);
+ union host *host;
+ size_t size;
+ switch (type) {
+ case SH_CSS_ACC_ARG_PTR_STABLE:
+ if (sh_css_acc_is_stable(fw, i)) break;
+ /* Fall through */
+ case SH_CSS_ACC_ARG_FRAME:
+ case SH_CSS_ACC_ARG_PTR_IN:
+ case SH_CSS_ACC_ARG_PTR_OUT:
+ case SH_CSS_ACC_ARG_PTR_IO:
+ host = (union host *)sh_css_argument_get_host(fw, i);
+ size = sh_css_argument_get_size(fw, i);
+ hmm_flush(host->ptr.hmm_ptr, size);
+ sh_css_acc_stabilize(fw, i, true);
+ break;
+ case SH_CSS_ACC_ARG_PTR_NOFLUSH:
+ /* Do not flush */
+ break;
+ default:
+ break;
+ }
+
+ }
+}
+
+int atomisp_acc_destabilize(struct atomisp_device *isp,
+ struct atomisp_acc_fw_arg *fw_arg)
+{
+ struct sh_css_acc_fw *fw;
+ unsigned int ret = 0;
+ unsigned int handle = fw_arg->fw_handle;
+ unsigned int index = fw_arg->index;
+
+ mutex_lock(&isp->input_lock);
+ mutex_lock(&isp->isp_lock);
+ fw = atomisp_acc_get_fw(isp, handle);
+ if (fw == NULL) {
+ v4l2_err(&atomisp_dev, "%s: Invalid firmware handle\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ sh_css_acc_stabilize(fw, index, false);
+
+ mutex_unlock(&isp->isp_lock);
+ mutex_unlock(&isp->input_lock);
+ return ret;
+}
+
int atomisp_acc_start(struct atomisp_device *isp, unsigned int *handle)
{
struct sh_css_acc_fw *fw;
goto out;
}
+ flush_acc_api_arguments(fw);
+
ret = sh_css_start_acceleration(fw);
mutex_unlock(&isp->isp_lock);
if (ret) {
int atomisp_acc_abort(struct atomisp_device *isp,
struct atomisp_acc_fw_abort *abort);
+int atomisp_acc_destabilize(struct atomisp_device *isp,
+ struct atomisp_acc_fw_arg *fw_arg);
+
void atomisp_wdt_wakeup_dog(unsigned long handle);
void atomisp_wdt_lock_dog(struct atomisp_device *isp);
}
/*
+ * Implementation in atomisp_cmd.c only required to be known here as
+ * it is registered in CSS with "sh_css_init()" which is called from
+ * here. So "flush_acc_api_arguments()" is not a true public function
+ *
+ * "flush_acc_api_arguments()" needs to be accessible in CSS for the
+ * extension type accelerator cache control of (shared buffer pointer)
+ * arguments
+ */
+extern void flush_acc_api_arguments(struct sh_css_acc_fw *fw);
+
+
+/*
* Videobuf ops
*/
int atomisp_buf_setup(struct videobuf_queue *vq,
/* Init ISP */
if (sh_css_init(atomisp_kernel_malloc,
atomisp_kernel_free,
+ flush_acc_api_arguments, /*NULL,*/
SH_CSS_INTERRUPT_SETTING_PULSE,
isp->firmware->data,
isp->firmware->size))
case ATOMISP_IOC_ACC_ABORT:
return atomisp_acc_abort(isp, arg);
+ case ATOMISP_IOC_ACC_DESTAB:
+ return atomisp_acc_destabilize(isp, arg);
+
case ATOMISP_IOC_CAMERA_BRIDGE:
/* here we convert the atomisp struct to a BC_Video struct.
* We do this to avoid exporting the BC_Video struct in
/* Allocate memory, returns a virtual address */
void *hrt_isp_css_mm_alloc(size_t bytes);
void *hrt_isp_css_mm_alloc_user_ptr(size_t bytes, unsigned int userptr,
- unsigned int num_pages);
+ unsigned int num_pages,
+ bool cached);
void *hrt_isp_css_mm_alloc_cached(size_t bytes);
/* allocate memory and initialize with zeros,
struct sh_css_mipi_config mipi_config;
bool reconfigure_css_rx;
bool invalidate;
+ /* Register functions that are only provided by the OS */
void *(*malloc) (size_t size);
void (*free) (void *ptr);
+ void (*flush) (struct sh_css_acc_fw *fw);
enum sh_css_state state;
bool two_ppc;
enum sh_css_bayer_order bayer_order;
.invalidate = false, \
.malloc = NULL, \
.free = NULL, \
+ .flush = NULL, \
.state = sh_css_state_idle, \
.two_ppc = false, \
.bayer_order = sh_css_bayer_order_grbg, \
enum sh_css_err
sh_css_init(void *(*malloc_func) (size_t size),
void (*free_func) (void *ptr),
+ void (*flush_func) (struct sh_css_acc_fw *fw),
enum sh_css_interrupt_setting irq_setting,
const char *fw_data,
unsigned int fw_size)
static struct sh_css default_css = DEFAULT_CSS;
enum sh_css_err err;
- if (malloc_func == NULL || free_func == NULL)
+ /* "flush()" for cache control of accelrator API
+ * (shared buffer pointer) arguments
+ */
+ if (malloc_func == NULL || free_func == NULL || flush_func == NULL)
return sh_css_err_invalid_arguments;
memcpy(&my_css, &default_css, sizeof(my_css));
my_css.malloc = malloc_func;
my_css.free = free_func;
+ my_css.flush = flush_func;
my_css.irq_edge = (irq_setting == SH_CSS_INTERRUPT_SETTING_EDGE);
/* In case this has been programmed already, update internal
my_css.free(ptr);
}
+/* For Acceleration API: Flush FW (shared buffer pointer) arguments */
+void
+sh_css_flush(struct sh_css_acc_fw *fw)
+{
+ if (fw && my_css.flush)
+ my_css.flush(fw);
+}
+
void
sh_css_set_print_function(int (*func) (const char *fmt, ...))
{
enum sh_css_err
sh_css_init(void *(*malloc_func) (size_t size),
void (*free_func) (void *ptr),
+ void (*flush_func) (struct sh_css_acc_fw *fw),
enum sh_css_interrupt_setting irq_setting,
const char *fw_data,
unsigned int fw_size);
if (has_extension_args != is_extension)
return sh_css_err_invalid_arguments;
+ /* NOTE: standalone accelerators have their (shared buffer pointer)
+ * arguments flushed in "atomisp_acc_start()"
+ */
+ if (is_extension)
+ sh_css_flush(firmware);
+
sp_program = sh_css_sp_load_program(sp_fw,
SH_CSS_ACC_PROG_NAME(firmware),
(void *)firmware->header.sp_code,
void
sh_css_free(void *ptr);
+void
+sh_css_flush(struct sh_css_acc_fw *fw);
+
/* Check two frames for equality (format, resolution, bits per element) */
bool
sh_css_frame_equal_types(const struct sh_css_frame *frame_a,
v4l2_err(&atomisp_dev, "hmm_bo_bind failed.\n");
goto bind_err;
}
-
return (void *)bo->vm_node->start;
bind_err:
}
/*Read function in ISP memory management*/
-int hmm_load(void *virt, void *data, unsigned int bytes)
+static int load_and_flush(void *virt, void *data, unsigned int bytes)
{
unsigned int ptr;
struct hmm_buffer_object *bo;
ptr += len; /* update ptr for next loop */
+ if (des) {
+
#ifdef USE_SSSE3
- _ssse3_memcpy(des, src, len);
+ _ssse3_memcpy(des, src, len);
#else
- memcpy(des, src, len);
+ memcpy(des, src, len);
#endif
+ des += len;
+ }
- des += len;
clflush_cache_range(src, len);
kunmap(bo->pages[idx]);
return 0;
}
+/*Read function in ISP memory management*/
+int hmm_load(void *virt, void *data, unsigned int bytes)
+{
+ if (!data) {
+ v4l2_err(&atomisp_dev,
+ "hmm_load NULL argument\n");
+ return -EINVAL;
+ }
+ return load_and_flush (virt, data, bytes);
+}
+
+/*Flush hmm data from the data cache*/
+int hmm_flush(void *virt, unsigned int bytes)
+{
+ return load_and_flush (virt, NULL, bytes);
+}
+
/*Write function in ISP memory management*/
int hmm_store(void *virt, const void *data, unsigned int bytes)
{
}
static void *__hrt_isp_css_mm_alloc(size_t bytes, unsigned int userptr,
- unsigned int num_pages)
+ unsigned int num_pages,
+ bool cached)
{
if (!init_done)
hrt_isp_css_mm_init();
if (userptr == 0)
return (void *)hmm_alloc(bytes, HMM_BO_PRIVATE, 0, 0,
- HMM_UNCACHED);
+ cached);
else {
if (num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
v4l2_err(&atomisp_dev,
" large than the expected size..\n");
return (void *)hmm_alloc(bytes, HMM_BO_USER, 0,
- userptr, HMM_UNCACHED);
+ userptr, cached);
}
}
void *hrt_isp_css_mm_alloc(size_t bytes)
{
- return __hrt_isp_css_mm_alloc(bytes, my_userptr, my_num_pages);
+ return __hrt_isp_css_mm_alloc(bytes, my_userptr, my_num_pages, false);
}
void *hrt_isp_css_mm_alloc_user_ptr(size_t bytes, unsigned int userptr,
- unsigned int num_pages)
+ unsigned int num_pages,
+ bool cached)
{
- return __hrt_isp_css_mm_alloc(bytes, userptr, num_pages);
+ return __hrt_isp_css_mm_alloc(bytes, userptr, num_pages, cached);
}
void *hrt_isp_css_mm_alloc_cached(size_t bytes)
int hmm_load(void *virt, void *data, unsigned int bytes);
int hmm_store(void *virt, const void *data, unsigned int bytes);
int hmm_set(void *virt, int c, unsigned int bytes);
+int hmm_flush(void *virt, unsigned int bytes);
/*
* get kernel memory physical address from ISP virtual address.
ATOMISP_ACC_ARG_PTR_IN, /* Pointer input argument */
ATOMISP_ACC_ARG_PTR_OUT, /* Pointer output argument */
ATOMISP_ACC_ARG_PTR_IO, /* Pointer in/output argument */
+ ATOMISP_ARG_PTR_NOFLUSH, /* Pointer argument will not be flushed */
+ ATOMISP_ARG_PTR_STABLE, /* Pointer input argument that is stable */
ATOMISP_ACC_ARG_FRAME /* Frame argument */
};
#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \
_IOW('v', BASE_VIDIOC_PRIVATE + 53, struct atomisp_gc_config)
+#define ATOMISP_IOC_ACC_DESTAB \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 54, struct atomisp_acc_fw_arg)
+
/* ISP Private control IDs */
#define V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION \
(V4L2_CID_PRIVATE_BASE + 0)