Merged mga branch with trunk
[platform/upstream/libdrm.git] / linux / mga_bufs.c
1 /* mga_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2  * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  * 
26  * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
27  *          Jeff Hartmann <jhartmann@precisioninsight.com>
28  * 
29  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_bufs.c,v 1.1 2000/02/11 17:26:06 dawes Exp $
30  *
31  */
32
33 #define __NO_VERSION__
34 #include "drmP.h"
35 #include "mga_drv.h"
36 #include "linux/un.h"
37
38
39 int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
40                     unsigned long arg)
41 {
42         drm_file_t *priv = filp->private_data;
43         drm_device_t *dev = priv->dev;
44         drm_device_dma_t *dma = dev->dma;
45         drm_buf_desc_t request;
46         drm_buf_entry_t *entry;
47         drm_buf_t *buf;
48         unsigned long offset;
49         unsigned long agp_offset;
50         int count;
51         int order;
52         int size;
53         int alignment;
54         int page_order;
55         int total;
56         int byte_count;
57         int i;
58
59         if (!dma) return -EINVAL;
60
61         copy_from_user_ret(&request,
62                            (drm_buf_desc_t *)arg,
63                            sizeof(request),
64                            -EFAULT);
65
66         count = request.count;
67         order = drm_order(request.size);
68         size    = 1 << order;
69         agp_offset = request.agp_start;
70         alignment  = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
71         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
72         total = PAGE_SIZE << page_order;
73         byte_count = 0;
74
75         DRM_DEBUG("count: %d\n", count);
76         DRM_DEBUG("order: %d\n", order);
77         DRM_DEBUG("size: %d\n", size);
78         DRM_DEBUG("agp_offset: %ld\n", agp_offset);
79         DRM_DEBUG("alignment: %d\n", alignment);
80         DRM_DEBUG("page_order: %d\n", page_order);
81         DRM_DEBUG("total: %d\n", total);
82         DRM_DEBUG("byte_count: %d\n", byte_count);
83
84         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
85         if (dev->queue_count) return -EBUSY; /* Not while in use */
86         spin_lock(&dev->count_lock);
87         if (dev->buf_use) {
88                 spin_unlock(&dev->count_lock);
89                 return -EBUSY;
90         }
91         atomic_inc(&dev->buf_alloc);
92         spin_unlock(&dev->count_lock);
93    
94         down(&dev->struct_sem);
95         entry = &dma->bufs[order];
96         if (entry->buf_count) {
97                 up(&dev->struct_sem);
98                 atomic_dec(&dev->buf_alloc);
99                 return -ENOMEM; /* May only call once for each order */
100         }
101    
102         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
103                                    DRM_MEM_BUFS);
104         if (!entry->buflist) {
105                 up(&dev->struct_sem);
106                 atomic_dec(&dev->buf_alloc);
107                 return -ENOMEM;
108         }
109         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
110    
111         entry->buf_size   = size;
112         entry->page_order = page_order;
113         offset = 0;
114
115    
116         while(entry->buf_count < count) {
117                 buf = &entry->buflist[entry->buf_count];
118                 buf->idx = dma->buf_count + entry->buf_count;
119                 buf->total = alignment;
120                 buf->order = order;
121                 buf->used = 0;
122
123                 DRM_DEBUG("offset : %ld\n", offset);
124
125                 buf->offset = offset; /* Hrm */
126                 buf->bus_address = dev->agp->base + agp_offset + offset;
127                 buf->address = (void *)(agp_offset + offset + dev->agp->base);
128                 buf->next = NULL;
129                 buf->waiting = 0;
130                 buf->pending = 0;
131                 init_waitqueue_head(&buf->dma_wait);
132                 buf->pid = 0;
133
134                 buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
135                 buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
136
137 #if DRM_DMA_HISTOGRAM
138                 buf->time_queued = 0;
139                 buf->time_dispatched = 0;
140                 buf->time_completed = 0;
141                 buf->time_freed = 0;
142 #endif
143                 offset = offset + alignment;
144                 entry->buf_count++;
145                 byte_count += PAGE_SIZE << page_order;
146       
147                 DRM_DEBUG("buffer %d @ %p\n",
148                           entry->buf_count, buf->address);
149         }
150    
151         dma->buflist = drm_realloc(dma->buflist,
152                                    dma->buf_count * sizeof(*dma->buflist),
153                                    (dma->buf_count + entry->buf_count)
154                                    * sizeof(*dma->buflist),
155                                    DRM_MEM_BUFS);
156         for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
157                 dma->buflist[i] = &entry->buflist[i - dma->buf_count];
158    
159         dma->buf_count  += entry->buf_count;
160
161         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
162
163         dma->byte_count += byte_count;
164
165         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
166
167         drm_freelist_create(&entry->freelist, entry->buf_count);
168         for (i = 0; i < entry->buf_count; i++) {
169                 drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
170         }
171    
172         up(&dev->struct_sem);
173    
174         request.count = entry->buf_count;
175         request.size  = size;
176    
177         copy_to_user_ret((drm_buf_desc_t *)arg,
178                          &request,
179                          sizeof(request),
180                          -EFAULT);
181    
182         atomic_dec(&dev->buf_alloc);
183
184         DRM_DEBUG("count: %d\n", count);
185         DRM_DEBUG("order: %d\n", order);
186         DRM_DEBUG("size: %d\n", size);
187         DRM_DEBUG("agp_offset: %ld\n", agp_offset);
188         DRM_DEBUG("alignment: %d\n", alignment);
189         DRM_DEBUG("page_order: %d\n", page_order);
190         DRM_DEBUG("total: %d\n", total);
191         DRM_DEBUG("byte_count: %d\n", byte_count);
192
193         dma->flags = _DRM_DMA_USE_AGP;
194
195         DRM_DEBUG("dma->flags : %x\n", dma->flags);
196
197         return 0;
198 }
199
200 int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
201                     unsigned long arg)
202 {
203         drm_file_t       *priv   = filp->private_data;
204         drm_device_t     *dev    = priv->dev;
205         drm_device_dma_t *dma    = dev->dma;
206         drm_buf_desc_t   request;
207         int              count;
208         int              order;
209         int              size;
210         int              total;
211         int              page_order;
212         drm_buf_entry_t  *entry;
213         unsigned long    page;
214         drm_buf_t        *buf;
215         int              alignment;
216         unsigned long    offset;
217         int              i;
218         int              byte_count;
219         int              page_count;
220
221         if (!dma) return -EINVAL;
222
223         copy_from_user_ret(&request,
224                            (drm_buf_desc_t *)arg,
225                            sizeof(request),
226                            -EFAULT);
227
228         count      = request.count;
229         order      = drm_order(request.size);
230         size       = 1 << order;
231         
232         DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
233                   request.count, request.size, size, order, dev->queue_count);
234
235         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
236         if (dev->queue_count) return -EBUSY; /* Not while in use */
237
238         alignment  = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
239         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
240         total      = PAGE_SIZE << page_order;
241
242         spin_lock(&dev->count_lock);
243         if (dev->buf_use) {
244                 spin_unlock(&dev->count_lock);
245                 return -EBUSY;
246         }
247         atomic_inc(&dev->buf_alloc);
248         spin_unlock(&dev->count_lock);
249         
250         down(&dev->struct_sem);
251         entry = &dma->bufs[order];
252         if (entry->buf_count) {
253                 up(&dev->struct_sem);
254                 atomic_dec(&dev->buf_alloc);
255                 return -ENOMEM; /* May only call once for each order */
256         }
257         
258         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
259                                    DRM_MEM_BUFS);
260         if (!entry->buflist) {
261                 up(&dev->struct_sem);
262                 atomic_dec(&dev->buf_alloc);
263                 return -ENOMEM;
264         }
265         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
266
267         entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
268                                    DRM_MEM_SEGS);
269         if (!entry->seglist) {
270                 drm_free(entry->buflist,
271                          count * sizeof(*entry->buflist),
272                          DRM_MEM_BUFS);
273                 up(&dev->struct_sem);
274                 atomic_dec(&dev->buf_alloc);
275                 return -ENOMEM;
276         }
277         memset(entry->seglist, 0, count * sizeof(*entry->seglist));
278
279         dma->pagelist = drm_realloc(dma->pagelist,
280                                     dma->page_count * sizeof(*dma->pagelist),
281                                     (dma->page_count + (count << page_order))
282                                     * sizeof(*dma->pagelist),
283                                     DRM_MEM_PAGES);
284         DRM_DEBUG("pagelist: %d entries\n",
285                   dma->page_count + (count << page_order));
286
287
288         entry->buf_size   = size;
289         entry->page_order = page_order;
290         byte_count        = 0;
291         page_count        = 0;
292         while (entry->buf_count < count) {
293                 if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
294                 entry->seglist[entry->seg_count++] = page;
295                 for (i = 0; i < (1 << page_order); i++) {
296                         DRM_DEBUG("page %d @ 0x%08lx\n",
297                                   dma->page_count + page_count,
298                                   page + PAGE_SIZE * i);
299                         dma->pagelist[dma->page_count + page_count++]
300                                 = page + PAGE_SIZE * i;
301                 }
302                 for (offset = 0;
303                      offset + size <= total && entry->buf_count < count;
304                      offset += alignment, ++entry->buf_count) {
305                         buf          = &entry->buflist[entry->buf_count];
306                         buf->idx     = dma->buf_count + entry->buf_count;
307                         buf->total   = alignment;
308                         buf->order   = order;
309                         buf->used    = 0;
310                         buf->offset  = (dma->byte_count + byte_count + offset);
311                         buf->address = (void *)(page + offset);
312                         buf->next    = NULL;
313                         buf->waiting = 0;
314                         buf->pending = 0;
315                         init_waitqueue_head(&buf->dma_wait);
316                         buf->pid     = 0;
317 #if DRM_DMA_HISTOGRAM
318                         buf->time_queued     = 0;
319                         buf->time_dispatched = 0;
320                         buf->time_completed  = 0;
321                         buf->time_freed      = 0;
322 #endif
323                         DRM_DEBUG("buffer %d @ %p\n",
324                                   entry->buf_count, buf->address);
325                 }
326                 byte_count += PAGE_SIZE << page_order;
327         }
328
329         dma->buflist = drm_realloc(dma->buflist,
330                                    dma->buf_count * sizeof(*dma->buflist),
331                                    (dma->buf_count + entry->buf_count)
332                                    * sizeof(*dma->buflist),
333                                    DRM_MEM_BUFS);
334         for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
335                 dma->buflist[i] = &entry->buflist[i - dma->buf_count];
336
337         dma->buf_count  += entry->buf_count;
338         dma->seg_count  += entry->seg_count;
339         dma->page_count += entry->seg_count << page_order;
340         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
341         
342         drm_freelist_create(&entry->freelist, entry->buf_count);
343         for (i = 0; i < entry->buf_count; i++) {
344                 drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
345         }
346         
347         up(&dev->struct_sem);
348
349         request.count = entry->buf_count;
350         request.size  = size;
351
352         copy_to_user_ret((drm_buf_desc_t *)arg,
353                          &request,
354                          sizeof(request),
355                          -EFAULT);
356         
357         atomic_dec(&dev->buf_alloc);
358         return 0;
359 }
360
361 int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
362                 unsigned long arg)
363 {
364         drm_buf_desc_t   request;
365
366         copy_from_user_ret(&request,
367                            (drm_buf_desc_t *)arg,
368                            sizeof(request),
369                            -EFAULT);
370
371         if(request.flags & _DRM_AGP_BUFFER)
372                 return mga_addbufs_agp(inode, filp, cmd, arg);
373         else
374                 return mga_addbufs_pci(inode, filp, cmd, arg);
375 }
376
377 int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
378                  unsigned long arg)
379 {
380         drm_file_t       *priv   = filp->private_data;
381         drm_device_t     *dev    = priv->dev;
382         drm_device_dma_t *dma    = dev->dma;
383         drm_buf_info_t   request;
384         int              i;
385         int              count;
386
387         if (!dma) return -EINVAL;
388
389         spin_lock(&dev->count_lock);
390         if (atomic_read(&dev->buf_alloc)) {
391                 spin_unlock(&dev->count_lock);
392                 return -EBUSY;
393         }
394         ++dev->buf_use;         /* Can't allocate more after this call */
395         spin_unlock(&dev->count_lock);
396
397         copy_from_user_ret(&request,
398                            (drm_buf_info_t *)arg,
399                            sizeof(request),
400                            -EFAULT);
401
402         for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
403                 if (dma->bufs[i].buf_count) ++count;
404         }
405         
406         DRM_DEBUG("count = %d\n", count);
407         
408         if (request.count >= count) {
409                 for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
410                         if (dma->bufs[i].buf_count) {
411                                 copy_to_user_ret(&request.list[count].count,
412                                                  &dma->bufs[i].buf_count,
413                                                  sizeof(dma->bufs[0]
414                                                         .buf_count),
415                                                  -EFAULT);
416                                 copy_to_user_ret(&request.list[count].size,
417                                                  &dma->bufs[i].buf_size,
418                                                  sizeof(dma->bufs[0].buf_size),
419                                                  -EFAULT);
420                                 copy_to_user_ret(&request.list[count].low_mark,
421                                                  &dma->bufs[i]
422                                                  .freelist.low_mark,
423                                                  sizeof(dma->bufs[0]
424                                                         .freelist.low_mark),
425                                                  -EFAULT);
426                                 copy_to_user_ret(&request.list[count]
427                                                  .high_mark,
428                                                  &dma->bufs[i]
429                                                  .freelist.high_mark,
430                                                  sizeof(dma->bufs[0]
431                                                         .freelist.high_mark),
432                                                  -EFAULT);
433                                 DRM_DEBUG("%d %d %d %d %d\n",
434                                           i,
435                                           dma->bufs[i].buf_count,
436                                           dma->bufs[i].buf_size,
437                                           dma->bufs[i].freelist.low_mark,
438                                           dma->bufs[i].freelist.high_mark);
439                                 ++count;
440                         }
441                 }
442         }
443         request.count = count;
444
445         copy_to_user_ret((drm_buf_info_t *)arg,
446                          &request,
447                          sizeof(request),
448                          -EFAULT);
449         
450         return 0;
451 }
452
453 int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
454                  unsigned long arg)
455 {
456         drm_file_t       *priv   = filp->private_data;
457         drm_device_t     *dev    = priv->dev;
458         drm_device_dma_t *dma    = dev->dma;
459         drm_buf_desc_t   request;
460         int              order;
461         drm_buf_entry_t  *entry;
462
463         if (!dma) return -EINVAL;
464
465         copy_from_user_ret(&request,
466                            (drm_buf_desc_t *)arg,
467                            sizeof(request),
468                            -EFAULT);
469
470         DRM_DEBUG("%d, %d, %d\n",
471                   request.size, request.low_mark, request.high_mark);
472         order = drm_order(request.size);
473         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
474         entry = &dma->bufs[order];
475
476         if (request.low_mark < 0 || request.low_mark > entry->buf_count)
477                 return -EINVAL;
478         if (request.high_mark < 0 || request.high_mark > entry->buf_count)
479                 return -EINVAL;
480
481         entry->freelist.low_mark  = request.low_mark;
482         entry->freelist.high_mark = request.high_mark;
483         
484         return 0;
485 }
486
487 int mga_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
488                  unsigned long arg)
489 {
490         drm_file_t       *priv   = filp->private_data;
491         drm_device_t     *dev    = priv->dev;
492         drm_device_dma_t *dma    = dev->dma;
493         drm_buf_free_t   request;
494         int              i;
495         int              idx;
496         drm_buf_t        *buf;
497
498         if (!dma) return -EINVAL;
499
500         copy_from_user_ret(&request,
501                            (drm_buf_free_t *)arg,
502                            sizeof(request),
503                            -EFAULT);
504
505         DRM_DEBUG("%d\n", request.count);
506         for (i = 0; i < request.count; i++) {
507                 copy_from_user_ret(&idx,
508                                    &request.list[i],
509                                    sizeof(idx),
510                                    -EFAULT);
511                 if (idx < 0 || idx >= dma->buf_count) {
512                         DRM_ERROR("Index %d (of %d max)\n",
513                                   idx, dma->buf_count - 1);
514                         return -EINVAL;
515                 }
516                 buf = dma->buflist[idx];
517                 if (buf->pid != current->pid) {
518                         DRM_ERROR("Process %d freeing buffer owned by %d\n",
519                                   current->pid, buf->pid);
520                         return -EINVAL;
521                 }
522                 drm_free_buffer(dev, buf);
523         }
524         
525         return 0;
526 }
527
528 int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
529                 unsigned long arg)
530 {
531         drm_file_t       *priv   = filp->private_data;
532         drm_device_t     *dev    = priv->dev;
533         drm_device_dma_t *dma    = dev->dma;
534         int              retcode = 0;
535         const int        zero    = 0;
536         unsigned long    virtual;
537         unsigned long    address;
538         drm_buf_map_t    request;
539         int              i;
540
541         if (!dma) return -EINVAL;
542         
543         DRM_DEBUG("\n");
544
545         spin_lock(&dev->count_lock);
546         if (atomic_read(&dev->buf_alloc)) {
547                 spin_unlock(&dev->count_lock);
548                 DRM_DEBUG("Busy\n");
549                 return -EBUSY;
550         }
551         ++dev->buf_use;         /* Can't allocate more after this call */
552         spin_unlock(&dev->count_lock);
553
554         copy_from_user_ret(&request,
555                            (drm_buf_map_t *)arg,
556                            sizeof(request),
557                            -EFAULT);
558
559         DRM_DEBUG("mga_mapbufs\n");
560         DRM_DEBUG("dma->flags : %x\n", dma->flags);
561    
562         if (request.count >= dma->buf_count) {
563                 if(dma->flags & _DRM_DMA_USE_AGP) {
564                         drm_mga_private_t *dev_priv = dev->dev_private;
565                         drm_map_t *map = NULL;
566          
567                         map = dev->maplist[dev_priv->buffer_map_idx];
568                         if (!map) {
569                                 DRM_DEBUG("map is null\n");
570                                 retcode = -EINVAL;
571                                 goto done;
572                         }
573
574                         DRM_DEBUG("map->offset : %lx\n", map->offset);
575                         DRM_DEBUG("map->size : %lx\n", map->size);
576                         DRM_DEBUG("map->type : %d\n", map->type);
577                         DRM_DEBUG("map->flags : %x\n", map->flags);
578                         DRM_DEBUG("map->handle : %p\n", map->handle);
579                         DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
580                         down(&current->mm->mmap_sem);
581                         virtual = do_mmap(filp, 0, map->size, 
582                                           PROT_READ|PROT_WRITE,
583                                           MAP_SHARED, 
584                                           (unsigned long)map->offset);
585                         up(&current->mm->mmap_sem);
586                 } else {
587                         down(&current->mm->mmap_sem);
588                         virtual = do_mmap(filp, 0, dma->byte_count,
589                                           PROT_READ|PROT_WRITE, MAP_SHARED, 0);
590                         up(&current->mm->mmap_sem);
591                 }
592                 if (virtual > -1024UL) {
593                         /* Real error */
594                         DRM_DEBUG("mmap error\n");
595                         retcode = (signed long)virtual;
596                         goto done;
597                 }
598                 request.virtual = (void *)virtual;
599       
600                 for (i = 0; i < dma->buf_count; i++) {
601                         if (copy_to_user(&request.list[i].idx,
602                                          &dma->buflist[i]->idx,
603                                          sizeof(request.list[0].idx))) {
604                                 retcode = -EFAULT;
605                                 goto done;
606                         }
607                         if (copy_to_user(&request.list[i].total,
608                                          &dma->buflist[i]->total,
609                                          sizeof(request.list[0].total))) {
610                                 retcode = -EFAULT;
611                                 goto done;
612                         }
613                         if (copy_to_user(&request.list[i].used,
614                                          &zero,
615                                          sizeof(zero))) {
616                                 retcode = -EFAULT;
617                                 goto done;
618                         }
619                         address = virtual + dma->buflist[i]->offset;
620                         if (copy_to_user(&request.list[i].address,
621                                          &address,
622                                          sizeof(address))) {
623                                 retcode = -EFAULT;
624                                 goto done;
625                         }
626                 }
627         }
628  done:
629         request.count = dma->buf_count;
630         DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
631    
632         copy_to_user_ret((drm_buf_map_t *)arg,
633                          &request,
634                          sizeof(request),
635                          -EFAULT);
636
637         DRM_DEBUG("retcode : %d\n", retcode);
638
639         return retcode;
640 }