static void ffs_closed(struct ffs_data *ffs);
#ifdef CONFIG_AMLOGIC_USB
-static int ffs_malloc_buffer(struct ffs_data *ffs)
+static int ffs_malloc_buffer_init(struct ffs_data *ffs, int cout)
{
int i;
+ pr_info("assign_ffs_buffer FFS_BUFFER_MAX=%d!!!\n", FFS_BUFFER_MAX);
for (i = 0; i < FFS_BUFFER_MAX; i++) {
+ ffs->buffer[i].data_ep = NULL;
+ ffs->buffer[i].data_state = -1;
+ }
+
+ for (i = 0; i < cout; i++) {
+ if (i >= FFS_BUFFER_MAX) {
+ pr_err("<%s>wait alloc (%d) > define (%d)!!!\n",
+ __func__, cout, FFS_BUFFER_MAX);
+ break;
+ }
ffs->buffer[i].data_ep = kzalloc(MAX_PAYLOAD_EPS, GFP_KERNEL);
if (!ffs->buffer[i].data_ep)
return -ENOMEM;
return 0;
}
+struct ffs_data_buffer *ffs_retry_malloc_buffer(struct ffs_data *ffs)
+{
+ int i;
+
+ pr_info("ffs_retry_malloc_buffer\n");
+ for (i = 0; i < FFS_BUFFER_MAX; i++) {
+ if (ffs->buffer[i].data_state == -1) {
+ spin_unlock_irq(&ffs->eps_lock);
+ ffs->buffer[i].data_ep
+ = kzalloc(MAX_PAYLOAD_EPS, GFP_KERNEL);
+ spin_lock_irq(&ffs->eps_lock);
+ if (!ffs->buffer[i].data_ep)
+ return NULL;
+ ffs->buffer[i].data_state = 1;
+ return &(ffs->buffer[i]);
+ }
+ }
+ pr_info("assign_ffs_buffer failed, FFS_BUFFER_MAX(%d) is too small!!!\n",
+ FFS_BUFFER_MAX);
+ return NULL;
+}
+
static void ffs_free_buffer(struct ffs_data *ffs)
{
int i;
for (i = 0; i < FFS_BUFFER_MAX; i++) {
- kfree(ffs->buffer[i].data_ep);
- ffs->buffer[i].data_ep = NULL;
- ffs->buffer[i].data_state = 0;
+ if (ffs->buffer[i].data_state != -1) {
+ kfree(ffs->buffer[i].data_ep);
+ ffs->buffer[i].data_ep = NULL;
+ ffs->buffer[i].data_state = 0;
+ }
}
}
}
}
- pr_info("assign_ffs_buffer failed!!!\n");
- return NULL;
+ return ffs_retry_malloc_buffer(ffs);
}
static void release_ffs_buffer(struct ffs_data *ffs,
io_data->req->actual;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+#ifdef CONFIG_AMLOGIC_USB
+ int i = 0;
+ struct ffs_data_buffer *buffer = NULL;
+
+ for (i = 0; i < FFS_BUFFER_MAX; i++) {
+ buffer = &(io_data->ffs->buffer[i]);
+ if (io_data->buf == buffer->data_ep) {
+ break;
+ }
+ }
+#endif
if (io_data->read && ret > 0) {
mm_segment_t oldfs = get_fs();
if (io_data->read)
kfree(io_data->to_free);
+
+#ifdef CONFIG_AMLOGIC_USB
+ if (io_data->aio) {
+ if (buffer)
+ release_ffs_buffer(io_data->ffs, buffer);
+ }
+#else
kfree(io_data->buf);
+#endif
kfree(io_data);
}
#ifdef CONFIG_AMLOGIC_USB
struct ffs_ep *ep = epfile->ep;
struct ffs_data_buffer *buffer = NULL;
- int data_flag = -1;
+ int data_aio_flag = -1;
#else
struct ffs_ep *ep;
#endif
goto error_mutex;
}
#else
- if (io_data->aio) {
- spin_unlock_irq(&epfile->ffs->eps_lock);
- data = kmalloc(data_len, GFP_KERNEL);
- data_flag = 1;
- if (unlikely(!data)) {
- ret = -ENOMEM;
- goto error_mutex;
- }
- } else {
/* Fire the request */
/*
* Avoid kernel panic caused by race condition. For example,
* To avoid this, during FunctionFS mount, we allocated the
* data buffer for requests. And the memory resources has
* been released in kill_sb.
+ *reboot adb disconnect,so buffer aways used assign_ffs_buffer.
*/
buffer = assign_ffs_buffer(epfile->ffs);
- data_flag = -1;
+ data_aio_flag = 1;
if (unlikely(!buffer)) {
ret = -ENOMEM;
spin_unlock_irq(&epfile->ffs->eps_lock);
data = buffer->data_ep;
spin_unlock_irq(&epfile->ffs->eps_lock);
- }
#endif
if (!io_data->read &&
DECLARE_COMPLETION_ONSTACK(done);
#endif
bool interrupted = false;
+#ifdef CONFIG_AMLOGIC_USB
+ data_aio_flag = 1;
+#endif
req = ep->req;
req->buf = data;
req->length = data_len;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;
} else {
+#ifdef CONFIG_AMLOGIC_USB
+ data_aio_flag = -1;
+#endif
req->buf = data;
req->length = data_len;
mutex_unlock(&epfile->mutex);
error:
#ifdef CONFIG_AMLOGIC_USB
- if (data_flag > 0) {
- kfree(data);
- data = NULL;
- } else {
+ if (data_aio_flag > 0) {
if (buffer)
release_ffs_buffer(epfile->ffs, buffer);
}
if (unlikely(!ffs->data_ep0))
return ERR_PTR(-ENOMEM);
- ret = ffs_malloc_buffer(ffs);
+ ret = ffs_malloc_buffer_init(ffs, 10);
if (ret < 0)
return ERR_PTR(ret);