2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
34 * We are initializing two zones for Mbufs and Clusters.
40 /* #include <sys/param.h> This defines MSIZE 256 */
41 #if !defined(SCTP_SIMPLE_ALLOCATOR)
44 #include "user_mbuf.h"
45 #include "user_environment.h"
46 #include "user_atomic.h"
47 #include "netinet/sctp_pcb.h"
49 #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */
50 #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
51 int max_linkhdr = KIPC_MAX_LINKHDR;
52 int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
55 * Zones from which we allocate.
57 sctp_zone_t zone_mbuf;
58 sctp_zone_t zone_clust;
59 sctp_zone_t zone_ext_refcnt;
61 /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
63 * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
64 * struct mbuf * clust_mb_args; does not work.
66 struct clust_args clust_mb_args;
72 static int mb_ctor_mbuf(void *, void *, int);
73 static int mb_ctor_clust(void *, void *, int);
74 static void mb_dtor_mbuf(void *, void *);
75 static void mb_dtor_clust(void *, void *);
78 /***************** Functions taken from user_mbuf.h *************/
80 static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
83 if (type == MT_NOINIT)
91 if (flags & M_PKTHDR) {
92 m->m_data = m->m_pktdat;
93 m->m_pkthdr.rcvif = NULL;
95 m->m_pkthdr.header = NULL;
96 m->m_pkthdr.csum_flags = 0;
97 m->m_pkthdr.csum_data = 0;
98 m->m_pkthdr.tso_segsz = 0;
99 m->m_pkthdr.ether_vtag = 0;
100 SLIST_INIT(&m->m_pkthdr.tags);
102 m->m_data = m->m_dat;
109 m_get(int how, short type)
112 #if defined(SCTP_SIMPLE_ALLOCATOR)
113 struct mb_args mbuf_mb_args;
115 /* The following setter function is not yet being enclosed within
116 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
117 * mb_dtor_mbuf. See comment there
119 mbuf_mb_args.flags = 0;
120 mbuf_mb_args.type = type;
122 /* Mbuf master zone, zone_mbuf, has already been
123 * created in mbuf_initialize() */
124 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
125 #if defined(SCTP_SIMPLE_ALLOCATOR)
126 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
128 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
130 /* There are cases when an object available in the current CPU's
131 * loaded magazine and in those cases the object's constructor is not applied.
132 * If that is the case, then we are duplicating constructor initialization here,
133 * so that the mbuf is properly constructed before returning it.
136 #if USING_MBUF_CONSTRUCTOR
137 if (! (mret->m_type == type) ) {
138 mbuf_constructor_dup(mret, 0, type);
141 mbuf_constructor_dup(mret, 0, type);
151 m_gethdr(int how, short type)
154 #if defined(SCTP_SIMPLE_ALLOCATOR)
155 struct mb_args mbuf_mb_args;
157 /* The following setter function is not yet being enclosed within
158 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
159 * mb_dtor_mbuf. See comment there
161 mbuf_mb_args.flags = M_PKTHDR;
162 mbuf_mb_args.type = type;
164 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
165 #if defined(SCTP_SIMPLE_ALLOCATOR)
166 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
168 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
169 /* There are cases when an object available in the current CPU's
170 * loaded magazine and in those cases the object's constructor is not applied.
171 * If that is the case, then we are duplicating constructor initialization here,
172 * so that the mbuf is properly constructed before returning it.
175 #if USING_MBUF_CONSTRUCTOR
176 if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
177 mbuf_constructor_dup(mret, M_PKTHDR, type);
180 mbuf_constructor_dup(mret, M_PKTHDR, type);
188 m_free(struct mbuf *m)
191 struct mbuf *n = m->m_next;
193 if (m->m_flags & M_EXT)
195 else if ((m->m_flags & M_NOFREE) == 0) {
196 #if defined(SCTP_SIMPLE_ALLOCATOR)
197 mb_dtor_mbuf(m, NULL);
199 SCTP_ZONE_FREE(zone_mbuf, m);
201 /*umem_cache_free(zone_mbuf, m);*/
207 clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
215 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
219 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
220 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
221 #if !defined(SCTP_SIMPLE_ALLOCATOR)
222 if (refcnt == NULL) {
224 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
225 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
229 m->m_ext.ext_buf = (caddr_t)m_clust;
230 m->m_data = m->m_ext.ext_buf;
232 m->m_ext.ext_free = NULL;
233 m->m_ext.ext_args = NULL;
234 m->m_ext.ext_size = size;
235 m->m_ext.ext_type = type;
236 m->m_ext.ref_cnt = refcnt;
243 m_clget(struct mbuf *m, int how)
246 #if defined(SCTP_SIMPLE_ALLOCATOR)
247 struct clust_args clust_mb_args_l;
249 if (m->m_flags & M_EXT) {
250 SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
252 m->m_ext.ext_buf = (char *)NULL;
253 #if defined(SCTP_SIMPLE_ALLOCATOR)
254 clust_mb_args_l.parent_mbuf = m;
256 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
257 #if defined(SCTP_SIMPLE_ALLOCATOR)
258 mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0);
260 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
262 On a cluster allocation failure, call umem_reap() and retry.
265 if (mclust_ret == NULL) {
266 #if !defined(SCTP_SIMPLE_ALLOCATOR)
267 /* mclust_ret = SCTP_ZONE_GET(zone_clust, char);
268 mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
271 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
273 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
274 /* if (NULL == mclust_ret) { */
275 SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
279 #if USING_MBUF_CONSTRUCTOR
280 if ((m->m_ext.ext_buf == NULL)) {
281 clust_constructor_dup(mclust_ret, m);
284 clust_constructor_dup(mclust_ret, m);
289 m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf)
291 struct mbuf *mb, *nm = NULL, *mtail = NULL;
292 int size, mbuf_threshold, space_needed = len;
294 KASSERT(len >= 0, ("%s: len is < 0", __func__));
296 /* Validate flags. */
297 flags &= (M_PKTHDR | M_EOR);
299 /* Packet header mbuf must be first in chain. */
300 if ((flags & M_PKTHDR) && m != NULL) {
305 mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
309 /* Loop and append maximum sized mbufs to the chain tail. */
311 if ((!allonebuf && len >= MCLBYTES) || (len > (int)(((mbuf_threshold - 1) * MLEN) + MHLEN))) {
312 mb = m_gethdr(how, type);
315 /* SCTP_BUF_LEN(mb) = MCLBYTES; */
316 } else if (flags & M_PKTHDR) {
317 mb = m_gethdr(how, type);
324 mb = m_get(how, type);
332 /* Fail the whole operation if one mbuf can't be allocated. */
339 if (allonebuf != 0 && size < space_needed) {
351 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
354 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
357 /* If mbuf was supplied, append new chain to the end of it. */
359 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
361 mtail->m_flags &= ~M_EOR;
370 * Copy the contents of uio into a properly sized mbuf chain.
373 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
381 * len can be zero or an arbitrary large value bound by
382 * the total data supplied by the uio.
385 total = min(uio->uio_resid, len);
387 total = uio->uio_resid;
389 * The smallest unit returned by m_getm2() is a single mbuf
390 * with pkthdr. We can't align past it.
395 * Give us the full allocation or nothing.
396 * If len is zero return the smallest empty mbuf.
398 m = m_getm2(NULL, (int)max(total + align, 1), how, MT_DATA, flags, 0);
403 /* Fill all mbufs with uio data and update header information. */
404 for (mb = m; mb != NULL; mb = mb->m_next) {
405 length = (int)min(M_TRAILINGSPACE(mb), total - progress);
406 error = uiomove(mtod(mb, void *), length, uio);
414 if (flags & M_PKTHDR)
415 m->m_pkthdr.len += length;
417 KASSERT(progress == total, ("%s: progress != total", __func__));
423 m_length(struct mbuf *m0, struct mbuf **last)
429 for (m = m0; m != NULL; m = m->m_next) {
431 if (m->m_next == NULL)
440 m_last(struct mbuf *m)
449 * Unlink a tag from the list of tags associated with an mbuf.
452 m_tag_unlink(struct mbuf *m, struct m_tag *t)
455 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
459 * Reclaim resources associated with a tag.
462 m_tag_free(struct m_tag *t)
469 * Set up the contents of a tag. Note that this does not fill in the free
470 * method; the caller is expected to do that.
472 * XXX probably should be called m_tag_init, but that was already taken.
475 m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len)
480 t->m_tag_cookie = cookie;
483 /************ End functions from user_mbuf.h ******************/
487 /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
490 mbuf_initialize(void *dummy)
494 * __Userspace__Configure UMA zones for Mbufs and Clusters.
495 * (TODO: m_getcl() - using packet secondary zone).
496 * There is no provision for trash_init and trash_fini in umem.
499 /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
500 mb_ctor_mbuf, mb_dtor_mbuf, NULL,
503 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
504 #if defined(SCTP_SIMPLE_ALLOCATOR)
505 SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
507 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
508 mb_ctor_mbuf, mb_dtor_mbuf, NULL,
512 /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
516 SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
518 /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
519 mb_ctor_clust, mb_dtor_clust, NULL,
522 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
523 #if defined(SCTP_SIMPLE_ALLOCATOR)
524 SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
526 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
527 mb_ctor_clust, mb_dtor_clust, NULL,
532 /* uma_prealloc() goes here... */
534 /* __Userspace__ Add umem_reap here for low memory situation?
545 * Constructor for Mbuf master zone. We have a different constructor
546 * for allocating the cluster.
548 * The 'arg' pointer points to a mb_args structure which
549 * contains call-specific information required to support the
550 * mbuf allocation API. See user_mbuf.h.
552 * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
553 * was passed when umem_cache_alloc was called.
554 * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
555 * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
556 * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
557 * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
560 * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
561 * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
562 * It also mentions that umem_nofail_callback is Evolving.
566 mb_ctor_mbuf(void *mem, void *arg, int flgs)
568 #if USING_MBUF_CONSTRUCTOR
570 struct mb_args *args;
575 m = (struct mbuf *)mem;
576 args = (struct mb_args *)arg;
581 * The mbuf is initialized later.
584 if (type == MT_NOINIT)
592 if (flags & M_PKTHDR) {
593 m->m_data = m->m_pktdat;
594 m->m_pkthdr.rcvif = NULL;
596 m->m_pkthdr.header = NULL;
597 m->m_pkthdr.csum_flags = 0;
598 m->m_pkthdr.csum_data = 0;
599 m->m_pkthdr.tso_segsz = 0;
600 m->m_pkthdr.ether_vtag = 0;
601 SLIST_INIT(&m->m_pkthdr.tags);
603 m->m_data = m->m_dat;
611 * The Mbuf master zone destructor.
612 * This would be called in response to umem_cache_destroy
613 * TODO: Recheck if this is what we want to do in this destructor.
614 * (Note: the number of times mb_dtor_mbuf is called is equal to the
615 * number of individual mbufs allocated from zone_mbuf.
618 mb_dtor_mbuf(void *mem, void *arg)
622 m = (struct mbuf *)mem;
623 if ((m->m_flags & M_PKTHDR) != 0) {
624 m_tag_delete_chain(m, NULL);
630 * The Cluster zone constructor.
632 * Here the 'arg' pointer points to the Mbuf which we
633 * are configuring cluster storage for. If 'arg' is
634 * empty we allocate just the cluster without setting
635 * the mbuf to it. See mbuf.h.
638 mb_ctor_clust(void *mem, void *arg, int flgs)
641 #if USING_MBUF_CONSTRUCTOR
643 struct clust_args * cla;
648 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
653 cla = (struct clust_args *)arg;
654 m = cla->parent_mbuf;
656 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
657 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
661 m->m_ext.ext_buf = (caddr_t)mem;
662 m->m_data = m->m_ext.ext_buf;
664 m->m_ext.ext_free = NULL;
665 m->m_ext.ext_args = NULL;
666 m->m_ext.ext_size = size;
667 m->m_ext.ext_type = type;
668 m->m_ext.ref_cnt = refcnt;
676 mb_dtor_clust(void *mem, void *arg)
679 /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */
680 /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
681 * mb_dtor_clust is called is equal to the number of individual mbufs allocated
682 * from zone_clust. Similarly for mb_dtor_mbuf).
683 * At this point the following:
685 * m = (struct mbuf *)arg;
686 * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL;
687 * has been done in mb_free_ext().
695 /* Unlink and free a packet tag. */
697 m_tag_delete(struct mbuf *m, struct m_tag *t)
699 KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
705 /* Unlink and free a packet tag chain, starting from given tag. */
707 m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
712 KASSERT(m, ("m_tag_delete_chain: null mbuf"));
716 p = SLIST_FIRST(&m->m_pkthdr.tags);
719 while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
726 sctp_print_mbuf_chain(struct mbuf *m)
728 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
729 for(; m; m=m->m_next) {
730 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
731 if (m->m_flags & M_EXT)
732 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
738 * Free an entire chain of mbufs and associated external buffers, if
742 m_freem(struct mbuf *mb)
750 * clean mbufs with M_EXT storage attached to them
751 * if the reference count hits 1.
754 mb_free_ext(struct mbuf *m)
759 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
760 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
763 * check if the header is embedded in the cluster
765 skipmbuf = (m->m_flags & M_NOFREE);
767 /* Free the external attached storage if this
768 * mbuf is the only reference to it.
769 *__Userspace__ TODO: jumbo frames
772 /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
773 reduces to here before but the IPHONE malloc commit had changed
774 this to compare to 0 instead of 1 (see next line). Why?
775 . .. this caused a huge memory leak in Linux.
778 if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
780 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
783 if (m->m_ext.ext_type == EXT_CLUSTER){
784 #if defined(SCTP_SIMPLE_ALLOCATOR)
785 mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
787 SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
788 SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
789 m->m_ext.ref_cnt = NULL;
797 /* __Userspace__ Also freeing the storage for ref_cnt
798 * Free this mbuf back to the mbuf zone with all m_ext
799 * information purged.
801 m->m_ext.ext_buf = NULL;
802 m->m_ext.ext_free = NULL;
803 m->m_ext.ext_args = NULL;
804 m->m_ext.ref_cnt = NULL;
805 m->m_ext.ext_size = 0;
806 m->m_ext.ext_type = 0;
807 m->m_flags &= ~M_EXT;
808 #if defined(SCTP_SIMPLE_ALLOCATOR)
809 mb_dtor_mbuf(m, NULL);
811 SCTP_ZONE_FREE(zone_mbuf, m);
813 /*umem_cache_free(zone_mbuf, m);*/
817 * "Move" mbuf pkthdr from "from" to "to".
818 * "from" must have M_PKTHDR set, and "to" must be empty.
821 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
824 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
825 if ((to->m_flags & M_EXT) == 0)
826 to->m_data = to->m_pktdat;
827 to->m_pkthdr = from->m_pkthdr; /* especially tags */
828 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
829 from->m_flags &= ~M_PKTHDR;
834 * Rearange an mbuf chain so that len bytes are contiguous
835 * and in the data area of an mbuf (so that mtod and dtom
836 * will work for a structure of size len). Returns the resulting
837 * mbuf chain on success, frees it and returns null on failure.
838 * If there is room, it will add up to max_protohdr-len extra bytes to the
839 * contiguous region in an attempt to avoid being called next time.
842 m_pullup(struct mbuf *n, int len)
849 * If first mbuf has no cluster, and has room for len bytes
850 * without shifting current data, pullup into it,
851 * otherwise allocate a new mbuf to prepend to the chain.
853 if ((n->m_flags & M_EXT) == 0 &&
854 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
863 MGET(m, M_NOWAIT, n->m_type);
867 if (n->m_flags & M_PKTHDR)
870 space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len));
872 count = min(min(max(len, max_protohdr), space), n->m_len);
873 memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count);
882 } while (len > 0 && n);
896 m_dup1(struct mbuf *m, int off, int len, int wait)
898 struct mbuf *n = NULL;
903 if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
907 if (len >= MINCLSIZE) {
909 m_clget(n, wait); /* TODO: include code for copying the header */
910 m_dup_pkthdr(n, m, wait);
915 n = m_gethdr(wait, m->m_type);
917 n = m_get(wait, m->m_type);
920 return NULL; /* ENOBUFS */
922 if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
926 m_copydata(m, off, len, mtod(n, caddr_t));
932 /* Taken from sys/kern/uipc_mbuf2.c */
934 m_pulldown(struct mbuf *m, int off, int len, int *offp)
937 int hlen, tlen, olen;
940 /* check invalid arguments. */
941 KASSERT(m, ("m == NULL in m_pulldown()"));
942 if (len > MCLBYTES) {
944 return NULL; /* impossible */
947 #ifdef PULLDOWN_DEBUG
950 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
951 for (t = m; t; t = t->m_next)
952 SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
953 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
957 while (n != NULL && off > 0) {
963 /* be sure to point non-empty mbuf */
964 while (n != NULL && n->m_len == 0)
968 return NULL; /* mbuf chain too short */
972 if ((n->m_flags & M_EXT) == 0 ||
973 (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
977 * the target data is on <n, off>.
978 * if we got enough data on the mbuf "n", we're done.
980 if ((off == 0 || offp) && len <= n->m_len - off && writable)
984 * when len <= n->m_len - off and off != 0, it is a special case.
985 * len bytes from <n, off> sits in single mbuf, but the caller does
986 * not like the starting position (off).
987 * chop the current mbuf into two pieces, set off to 0.
989 if (len <= n->m_len - off) {
990 o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
993 return NULL; /* ENOBUFS */
996 o->m_next = n->m_next;
1003 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
1004 * and construct contiguous mbuf with m_len == len.
1005 * note that hlen + tlen == len, and tlen > 0.
1007 hlen = n->m_len - off;
1011 * ensure that we have enough trailing data on mbuf chain.
1012 * if not, we can do nothing about the chain.
1015 for (o = n->m_next; o != NULL; o = o->m_next)
1017 if (hlen + olen < len) {
1019 return NULL; /* mbuf chain too short */
1024 * we need to use m_copydata() to get data from <n->m_next, 0>.
1026 if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) {
1027 m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
1029 m_adj(n->m_next, tlen);
1033 if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) {
1034 n->m_next->m_data -= hlen;
1035 n->m_next->m_len += hlen;
1036 memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen);
1044 * now, we need to do the hard way. don't m_copy as there's no room
1048 m_clget(o, M_NOWAIT);
1049 /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
1051 o = m_get(M_NOWAIT, m->m_type);
1054 return NULL; /* ENOBUFS */
1056 /* get hlen from <n, off> into <o, 0> */
1058 memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen);
1060 /* get tlen from <n->m_next, 0> into <o, hlen> */
1061 m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
1063 m_adj(n->m_next, tlen);
1064 o->m_next = n->m_next;
1069 #ifdef PULLDOWN_DEBUG
1072 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
1073 for (t = m; t; t = t->m_next)
1074 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
1075 SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
1084 * Attach the the cluster from *m to *n, set up m_ext in *n
1085 * and bump the refcount of the cluster.
1088 mb_dupcl(struct mbuf *n, struct mbuf *m)
1090 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
1091 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
1092 KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
1094 if (*(m->m_ext.ref_cnt) == 1)
1095 *(m->m_ext.ref_cnt) += 1;
1097 atomic_add_int(m->m_ext.ref_cnt, 1);
1098 n->m_ext.ext_buf = m->m_ext.ext_buf;
1099 n->m_ext.ext_free = m->m_ext.ext_free;
1100 n->m_ext.ext_args = m->m_ext.ext_args;
1101 n->m_ext.ext_size = m->m_ext.ext_size;
1102 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
1103 n->m_ext.ext_type = m->m_ext.ext_type;
1104 n->m_flags |= M_EXT;
1109 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1110 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1111 * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
1112 * Note that the copy is read-only, because clusters are not copied,
1113 * only their reference counts are incremented.
1117 m_copym(struct mbuf *m, int off0, int len, int wait)
1119 struct mbuf *n, **np;
1124 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1125 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1126 KASSERT(m != NULL, ("m_copym, m is NULL"));
1128 #if !defined(INVARIANTS)
1133 if (off == 0 && m->m_flags & M_PKTHDR)
1136 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1146 KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
1150 MGETHDR(n, wait, m->m_type);
1152 MGET(n, wait, m->m_type);
1157 if (!m_dup_pkthdr(n, m, wait))
1159 if (len == M_COPYALL)
1160 n->m_pkthdr.len -= off0;
1162 n->m_pkthdr.len = len;
1165 n->m_len = min(len, m->m_len - off);
1166 if (m->m_flags & M_EXT) {
1167 n->m_data = m->m_data + off;
1170 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len);
1171 if (len != M_COPYALL)
1186 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
1188 struct m_tag *p, *t, *tprev = NULL;
1190 KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
1191 m_tag_delete_chain(to, NULL);
1192 SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
1193 t = m_tag_copy(p, how);
1195 m_tag_delete_chain(to, NULL);
1199 SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
1201 SLIST_INSERT_AFTER(tprev, t, m_tag_link);
1208 * Duplicate "from"'s mbuf pkthdr in "to".
1209 * "from" must have M_PKTHDR set, and "to" must be empty.
1210 * In particular, this does a deep copy of the packet tags.
1213 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1216 KASSERT(to, ("m_dup_pkthdr: to is NULL"));
1217 KASSERT(from, ("m_dup_pkthdr: from is NULL"));
1218 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1219 if ((to->m_flags & M_EXT) == 0)
1220 to->m_data = to->m_pktdat;
1221 to->m_pkthdr = from->m_pkthdr;
1222 SLIST_INIT(&to->m_pkthdr.tags);
1223 return (m_tag_copy_chain(to, from, MBTOM(how)));
1226 /* Copy a single tag. */
1228 m_tag_copy(struct m_tag *t, int how)
1232 KASSERT(t, ("m_tag_copy: null tag"));
1233 p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
1236 memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */
1240 /* Get a packet tag structure along with specified data following. */
1242 m_tag_alloc(uint32_t cookie, int type, int len, int wait)
1248 t = malloc(len + sizeof(struct m_tag));
1251 m_tag_setup(t, cookie, type, len);
1252 t->m_tag_free = m_tag_free_default;
1256 /* Free a packet tag. */
1258 m_tag_free_default(struct m_tag *t)
1264 * Copy data from a buffer back into the indicated mbuf chain,
1265 * starting "off" bytes from the beginning, extending the mbuf
1266 * chain if necessary.
1269 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1272 struct mbuf *m = m0, *n;
1277 while (off > (mlen = m->m_len)) {
1280 if (m->m_next == NULL) {
1281 n = m_get(M_NOWAIT, m->m_type);
1284 memset(mtod(n, caddr_t), 0, MLEN);
1285 n->m_len = min(MLEN, len + off);
1291 mlen = min (m->m_len - off, len);
1292 memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen);
1300 if (m->m_next == NULL) {
1301 n = m_get(M_NOWAIT, m->m_type);
1304 n->m_len = min(MLEN, len);
1309 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1310 m->m_pkthdr.len = totlen;
1315 * Lesser-used path for M_PREPEND:
1316 * allocate new mbuf to prepend to chain,
1320 m_prepend(struct mbuf *m, int len, int how)
1324 if (m->m_flags & M_PKTHDR)
1325 MGETHDR(mn, how, m->m_type);
1327 MGET(mn, how, m->m_type);
1332 if (m->m_flags & M_PKTHDR)
1333 M_MOVE_PKTHDR(mn, m);
1336 if (m->m_flags & M_PKTHDR) {
1348 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1349 * continuing for "len" bytes, into the indicated buffer.
1352 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1356 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1357 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1359 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1366 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1367 count = min(m->m_len - off, len);
1368 memcpy(cp, mtod(m, caddr_t) + off, count);
1378 * Concatenate mbuf chain n to m.
1379 * Both chains must be of the same type (e.g. MT_DATA).
1380 * Any m_pkthdr is not updated.
1383 m_cat(struct mbuf *m, struct mbuf *n)
1388 if (m->m_flags & M_EXT ||
1389 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1390 /* just join the two chains */
1394 /* splat the data from one into the other */
1395 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len);
1396 m->m_len += n->m_len;
1403 m_adj(struct mbuf *mp, int req_len)
1409 if ((m = mp) == NULL)
1415 while (m != NULL && len > 0) {
1416 if (m->m_len <= len) {
1427 if (mp->m_flags & M_PKTHDR)
1428 m->m_pkthdr.len -= (req_len - len);
1431 * Trim from tail. Scan the mbuf chain,
1432 * calculating its length and finding the last mbuf.
1433 * If the adjustment only affects this mbuf, then just
1434 * adjust and return. Otherwise, rescan and truncate
1435 * after the remaining size.
1441 if (m->m_next == (struct mbuf *)0)
1445 if (m->m_len >= len) {
1447 if (mp->m_flags & M_PKTHDR)
1448 mp->m_pkthdr.len -= len;
1455 * Correct length for chain is "count".
1456 * Find the mbuf with last data, adjust its length,
1457 * and toss data from remaining mbufs on chain.
1460 if (m->m_flags & M_PKTHDR)
1461 m->m_pkthdr.len = count;
1462 for (; m; m = m->m_next) {
1463 if (m->m_len >= count) {
1465 if (m->m_next != NULL) {
1477 /* m_split is used within sctp_handle_cookie_echo. */
1480 * Partition an mbuf chain in two pieces, returning the tail --
1481 * all but the first len0 bytes. In case of failure, it returns NULL and
1482 * attempts to restore the chain to its original state.
1484 * Note that the resulting mbufs might be read-only, because the new
1485 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1486 * the "breaking point" happens to lie within a cluster mbuf. Use the
1487 * M_WRITABLE() macro to check for this case.
1490 m_split(struct mbuf *m0, int len0, int wait)
1493 u_int len = len0, remain;
1495 /* MBUF_CHECKSLEEP(wait); */
1496 for (m = m0; m && (int)len > m->m_len; m = m->m_next)
1500 remain = m->m_len - len;
1501 if (m0->m_flags & M_PKTHDR) {
1502 MGETHDR(n, wait, m0->m_type);
1505 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1506 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1507 m0->m_pkthdr.len = len0;
1508 if (m->m_flags & M_EXT)
1510 if (remain > MHLEN) {
1511 /* m can't be the lead packet */
1513 n->m_next = m_split(m, len, wait);
1514 if (n->m_next == NULL) {
1522 MH_ALIGN(n, remain);
1523 } else if (remain == 0) {
1528 MGET(n, wait, m->m_type);
1534 if (m->m_flags & M_EXT) {
1535 n->m_data = m->m_data + len;
1538 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
1542 n->m_next = m->m_next;
1551 pack_send_buffer(caddr_t buffer, struct mbuf* mb){
1554 int total_count_copied = 0;
1558 count_to_copy = mb->m_len;
1559 memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy);
1560 offset += count_to_copy;
1561 total_count_copied += count_to_copy;
1565 return (total_count_copied);