spl: Add a separate silence option for SPL
[platform/kernel/u-boot.git] / common / bouncebuf.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Generic bounce buffer implementation
4  *
5  * Copyright (C) 2012 Marek Vasut <marex@denx.de>
6  */
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <log.h>
11 #include <malloc.h>
12 #include <errno.h>
13 #include <bouncebuf.h>
14 #include <asm/cache.h>
15
16 static int addr_aligned(struct bounce_buffer *state)
17 {
18         const ulong align_mask = ARCH_DMA_MINALIGN - 1;
19
20         /* Check if start is aligned */
21         if ((ulong)state->user_buffer & align_mask) {
22                 debug("Unaligned buffer address %p\n", state->user_buffer);
23                 return 0;
24         }
25
26         /* Check if length is aligned */
27         if (state->len != state->len_aligned) {
28                 debug("Unaligned buffer length %zu\n", state->len);
29                 return 0;
30         }
31
32         /* Aligned */
33         return 1;
34 }
35
36 int bounce_buffer_start_extalign(struct bounce_buffer *state, void *data,
37                                  size_t len, unsigned int flags,
38                                  size_t alignment,
39                                  int (*addr_is_aligned)(struct bounce_buffer *state))
40 {
41         state->user_buffer = data;
42         state->bounce_buffer = data;
43         state->len = len;
44         state->len_aligned = roundup(len, alignment);
45         state->flags = flags;
46
47         if (!addr_is_aligned(state)) {
48                 state->bounce_buffer = memalign(alignment,
49                                                 state->len_aligned);
50                 if (!state->bounce_buffer)
51                         return -ENOMEM;
52
53                 if (state->flags & GEN_BB_READ)
54                         memcpy(state->bounce_buffer, state->user_buffer,
55                                 state->len);
56         }
57
58         /*
59          * Flush data to RAM so DMA reads can pick it up,
60          * and any CPU writebacks don't race with DMA writes
61          */
62         flush_dcache_range((unsigned long)state->bounce_buffer,
63                                 (unsigned long)(state->bounce_buffer) +
64                                         state->len_aligned);
65
66         return 0;
67 }
68
69 int bounce_buffer_start(struct bounce_buffer *state, void *data,
70                         size_t len, unsigned int flags)
71 {
72         return bounce_buffer_start_extalign(state, data, len, flags,
73                                             ARCH_DMA_MINALIGN,
74                                             addr_aligned);
75 }
76
77 int bounce_buffer_stop(struct bounce_buffer *state)
78 {
79         if (state->flags & GEN_BB_WRITE) {
80                 /* Invalidate cache so that CPU can see any newly DMA'd data */
81                 invalidate_dcache_range((unsigned long)state->bounce_buffer,
82                                         (unsigned long)(state->bounce_buffer) +
83                                                 state->len_aligned);
84         }
85
86         if (state->bounce_buffer == state->user_buffer)
87                 return 0;
88
89         if (state->flags & GEN_BB_WRITE)
90                 memcpy(state->user_buffer, state->bounce_buffer, state->len);
91
92         free(state->bounce_buffer);
93
94         return 0;
95 }