void hmm_cleanup(void);
ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
- int from_highmem, const void __user *userptr, bool cached);
+ int from_highmem, const void __user *userptr,
+ const uint16_t attrs);
void hmm_free(ia_css_ptr ptr);
int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes);
int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes);
#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */
#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */
+#define ATOMISP_MAP_FLAG_CONTIGUOUS 0x0004
+#define ATOMISP_MAP_FLAG_CLEARED 0x0008
struct atomisp_acc_state {
__u32 flags; /* Flags, see list below */
* within the allocation referencable from the
* returned pointer/address.
*/
-#define MMGR_ATTRIBUTE_CACHED 0x0001
-#define MMGR_ATTRIBUTE_CONTIGUOUS 0x0002
-#define MMGR_ATTRIBUTE_CLEARED 0x0008
#define mmgr_NULL ((hrt_vaddress)0)
#define mmgr_EXCEPTION ((hrt_vaddress)-1)
* at the beginning, to avoid hmm_alloc return 0 in the
* further allocation.
*/
- dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, false);
+ dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, 0);
if (!ret) {
ret = sysfs_create_group(&atomisp_dev->kobj,
}
ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
- int from_highmem, const void __user *userptr, bool cached)
+ int from_highmem, const void __user *userptr,
+ const uint16_t attrs)
{
unsigned int pgnr;
struct hmm_buffer_object *bo;
+ bool cached = attrs & ATOMISP_MAP_FLAG_CACHED;
int ret;
+ WARN_ON(attrs & ATOMISP_MAP_FLAG_CONTIGUOUS);
+
/*
* Check if we are initialized. In the ideal world we wouldn't need
* this but we can tackle it once the driver is a lot cleaner
hmm_mem_stat.tol_cnt += pgnr;
+ if (attrs & ATOMISP_MAP_FLAG_CLEARED)
+ hmm_set(bo->start, 0, bytes);
+
return bo->start;
bind_err:
* more details.
*/
-#include <type_support.h>
-#include <system_types.h>
-#include <assert_support.h>
#include <memory_access.h>
-#include <ia_css_env.h>
-
-#include "atomisp_internal.h"
hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attrs)
{
- ia_css_ptr data;
-
- WARN_ON(attrs & MMGR_ATTRIBUTE_CONTIGUOUS);
-
- data = hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL,
- attrs & MMGR_ATTRIBUTE_CACHED);
-
- if (!data)
- return 0;
-
- if (attrs & MMGR_ATTRIBUTE_CLEARED)
- hmm_set(data, 0, size);
-
- return (ia_css_ptr)data;
+ return hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL, attrs);
}
void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size)
}
return hmm_alloc(me->data_bytes, HMM_BO_USER, 0, data,
- attribute & MMGR_ATTRIBUTE_CACHED);
+ attribute & ATOMISP_MAP_FLAG_CACHED);
if (me->data == mmgr_NULL)
err = IA_CSS_ERR_INVALID_ARGUMENTS;
#endif
frame->data = mmgr_alloc_attr(frame->data_bytes,
frame->contiguous ?
- MMGR_ATTRIBUTE_CONTIGUOUS : 0);
+ ATOMISP_MAP_FLAG_CONTIGUOUS : 0);
if (frame->data == mmgr_NULL)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
xmem_sp_stage_ptrs[p][i] =
ia_css_refcount_increment(-1,
mmgr_alloc_attr(sizeof(struct sh_css_sp_stage),
- MMGR_ATTRIBUTE_CLEARED));
+ ATOMISP_MAP_FLAG_CLEARED));
xmem_isp_stage_ptrs[p][i] =
ia_css_refcount_increment(-1,
mmgr_alloc_attr(sizeof(struct sh_css_sp_stage),
- MMGR_ATTRIBUTE_CLEARED));
+ ATOMISP_MAP_FLAG_CLEARED));
if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) ||
(xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) {
sp_ddr_ptrs = ia_css_refcount_increment(-1,
mmgr_alloc_attr(CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
HIVE_ISP_DDR_WORD_BYTES),
- MMGR_ATTRIBUTE_CLEARED));
+ ATOMISP_MAP_FLAG_CLEARED));
xmem_sp_group_ptrs = ia_css_refcount_increment(-1,
mmgr_alloc_attr(sizeof(struct sh_css_sp_group),
- MMGR_ATTRIBUTE_CLEARED));
+ ATOMISP_MAP_FLAG_CLEARED));
if ((sp_ddr_ptrs == mmgr_NULL) ||
(xmem_sp_group_ptrs == mmgr_NULL))