if (sm_state == NULL)
return 0;
- seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
- (unsigned int)sm_state->sm_handle);
+ seq_printf(s, "\nVC-ServiceHandle %p\n",
+ sm_state->sm_handle);
/* Log all applicable mapping(s).
*/
list_for_each_entry(map, &sm_state->map_list, map_list) {
map_count++;
- seq_printf(s, "\nMapping 0x%x\n",
- (unsigned int)map);
+ seq_printf(s, "\nMapping %p\n",
+ map);
seq_printf(s, " TGID %u\n",
map->res_pid);
seq_printf(s, " VC-HDL 0x%x\n",
}
pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
+#ifdef CONFIG_ARM
outer_inv_range(__pfn_to_phys(pfn),
__pfn_to_phys(pfn) + resource->res_size);
+#endif
resource->res_stats[LOCK]++;
resource->lock_count++;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start);
- pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
+ pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
pfn += mm_vc_mem_phys_addr;
pfn += page_offset;
pfn >>= PAGE_SHIFT;
|| !pte_present(*pte))
continue;
+#ifdef CONFIG_ARM
/* Clean + invalidate */
dmac_flush_range((const void *) addr,
(const void *)
(addr + PAGE_SIZE));
+#endif
} while (pte++, addr +=
PAGE_SIZE, addr != pmd_next);
/* Lock assumed taken already, address to be mapped is known.
*/
else
- resource->res_base_mem = (void *)vc_addr;
+ resource->res_base_mem = (void *)(unsigned long)vc_addr;
resource->res_stats[LOCK]++;
resource->lock_count++;
ret = -ENOMEM;
goto error;
} else {
- phys_addr = (uint32_t)resource->res_base_mem &
+ phys_addr = (unsigned long)resource->res_base_mem &
0x3FFFFFFF;
phys_addr += mm_vc_mem_phys_addr;
if (resource->res_cached
resource->res_stats[FLUSH]++;
phys_addr =
- (dma_addr_t)((uint32_t)resource->res_base_mem &
+ (dma_addr_t)((unsigned long)resource->res_base_mem &
0x3FFFFFFF);
phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
}
up_read(¤t->mm->mmap_sem);
+#ifdef CONFIG_ARM
/* L2 cache flush */
outer_clean_range(phys_addr,
phys_addr +
(size_t) resource->res_size);
+#endif
}
/* We need to zap all the vmas associated with this resource */
VMCS_SM_CACHE_HOST)) {
long unsigned int
phys_addr;
- phys_addr = (uint32_t)
+ phys_addr = (unsigned long)
resource->res_base_mem & 0x3FFFFFFF;
phys_addr +=
mm_vc_mem_phys_addr;
+#ifdef CONFIG_ARM
/* L1 cache flush */
dmac_flush_range((const
void
phys_addr +
(size_t)
resource->res_size);
+#endif
}
iounmap((void *)map->res_addr);
vmcs_sm_acquire_resource(file_data, ioparam.handle);
if (resource != NULL) {
ioparam.addr =
- (unsigned int)resource->res_base_mem;
+ (unsigned long)resource->res_base_mem;
vmcs_sm_release_resource(resource, 0);
} else {
ioparam.addr = 0;
resource->res_stats[FLUSH]++;
phys_addr =
- (dma_addr_t)((uint32_t)
+ (dma_addr_t)((unsigned long)
resource->res_base_mem &
0x3FFFFFFF);
phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
ioparam.size);
up_read(¤t->mm->mmap_sem);
+#ifdef CONFIG_ARM
/* L2 cache flush */
outer_clean_range(phys_addr,
phys_addr +
(size_t) ioparam.size);
+#endif
} else if (resource == NULL) {
ret = -EINVAL;
goto out;
resource->res_stats[INVALID]++;
phys_addr =
- (dma_addr_t)((uint32_t)
+ (dma_addr_t)((unsigned long)
resource->res_base_mem &
0x3FFFFFFF);
phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
+#ifdef CONFIG_ARM
/* L2 cache invalidate */
outer_inv_range(phys_addr,
phys_addr +
(size_t) ioparam.size);
+#endif
/* L1 cache invalidate */
down_read(¤t->mm->mmap_sem);