Fix build warning about pointer cast.
Change-Id: I29b84d07152f9c1745685e156a53275b1e15b6eb
Signed-off-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
hcdma_data_t hcdma;
int i = st->channel[n].dma_info.index;
int len;
- struct fiq_dma_blob *blob = (struct fiq_dma_blob *) st->dma_base;
+ struct fiq_dma_blob *blob = (struct fiq_dma_blob *)(uintptr_t)st->dma_base;
len = fiq_get_xfer_len(st, n);
fiq_print(FIQDBG_INT, st, "LEN: %03d", len);
if (i > 6)
BUG();
- hcdma.d32 = (dma_addr_t) &blob->channel[n].index[i].buf[0];
+ hcdma.d32 = (dma_addr_t)(uintptr_t)&blob->channel[n].index[i].buf[0];
FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
st->channel[n].dma_info.index = i;
return 0;
hcsplt_data_t hcsplt;
hctsiz_data_t hctsiz;
hcdma_data_t hcdma;
- struct fiq_dma_blob *blob = (struct fiq_dma_blob *) st->dma_base;
+ struct fiq_dma_blob *blob = (struct fiq_dma_blob *)(uintptr_t)st->dma_base;
int last = 0;
int i = st->channel[n].dma_info.index;
last = 1;
/* New DMA address - address of bounce buffer referred to in index */
- hcdma.d32 = (dma_addr_t) blob->channel[n].index[i].buf;
+ hcdma.d32 = (dma_addr_t)(uintptr_t)blob->channel[n].index[i].buf;
//hcdma.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA);
//hcdma.d32 += st->channel[n].dma_info.slot_len[i];
fiq_print(FIQDBG_INT, st, "LAST: %01d ", last);
hc->multi_count = 1;
if (hcd->core_if->dma_enable) {
- hc->xfer_buff = (uint8_t *) urb->dma + urb->actual_length;
+ hc->xfer_buff = (uint8_t *)((uintptr_t)urb->dma + urb->actual_length);
/* For non-dword aligned case */
if (((unsigned long)hc->xfer_buff & 0x3)
hc->ep_is_in = 0;
hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
if (hcd->core_if->dma_enable) {
- hc->xfer_buff = (uint8_t *) urb->setup_dma;
+ hc->xfer_buff = (uint8_t *)(uintptr_t)urb->setup_dma;
} else {
hc->xfer_buff = (uint8_t *) urb->setup_packet;
}
hc->xfer_len = 0;
if (hcd->core_if->dma_enable) {
- hc->xfer_buff = (uint8_t *) hcd->status_buf_dma;
+ hc->xfer_buff = (uint8_t *)(uintptr_t)hcd->status_buf_dma;
} else {
hc->xfer_buff = (uint8_t *) hcd->status_buf;
}
frame_desc->status = 0;
if (hcd->core_if->dma_enable) {
- hc->xfer_buff = (uint8_t *) urb->dma;
+ hc->xfer_buff = (uint8_t *)(uintptr_t)urb->dma;
} else {
hc->xfer_buff = (uint8_t *) urb->buf;
}
* Pointer arithmetic on hcd->fiq_state->dma_base (a dma_addr_t)
* to point it to the correct offset in the allocated buffers.
*/
- blob = (struct fiq_dma_blob *) hcd->fiq_state->dma_base;
- st->hcdma_copy.d32 = (dma_addr_t) blob->channel[hc->hc_num].index[0].buf;
+ blob = (struct fiq_dma_blob *)(uintptr_t)hcd->fiq_state->dma_base;
+ st->hcdma_copy.d32 = (dma_addr_t)(uintptr_t)blob->channel[hc->hc_num].index[0].buf;
/* Calculate the max number of CSPLITS such that the FIQ can time out
* a transaction if it fails.
* dma_addr_t) to point it to the correct offset in the
* allocated buffers.
*/
- blob = (struct fiq_dma_blob *) hcd->fiq_state->dma_base;
- st->hcdma_copy.d32 = (dma_addr_t) blob->channel[hc->hc_num].index[0].buf;
+ blob = (struct fiq_dma_blob *)(uintptr_t)hcd->fiq_state->dma_base;
+ st->hcdma_copy.d32 = (dma_addr_t)(uintptr_t)blob->channel[hc->hc_num].index[0].buf;
/* fixup xfersize to the actual packet size */
st->hctsiz_copy.b.pid = 0;
if (n_desc) {
/* SG request - more than 1 QTDs */
- hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
+ hc->xfer_buff = (uint8_t *)((uintptr_t)qtd->urb->dma + qtd->urb->actual_length);
hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
}
dwc_otg_hcd_get_mps(&urb->pipe_info));
DWC_ERROR(" Data buffer length: %d\n", urb->length);
DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
- urb->buf, (void *)urb->dma);
+ urb->buf, (void *)(uintptr_t)urb->dma);
DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
- urb->setup_packet, (void *)urb->setup_dma);
+ urb->setup_packet, (void *)(uintptr_t)urb->setup_dma);
DWC_ERROR(" Interval: %d\n", urb->interval);
/* Core haltes the channel for Descriptor DMA mode */