summaryrefslogtreecommitdiffstats
path: root/src/VBox/Devices/Network/slirp/bsd
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/Devices/Network/slirp/bsd')
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/Makefile.kup0
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/amd64/Makefile.kup0
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/amd64/in_cksum.c242
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/amd64/include/in_cksum.h84
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/arm64/Makefile.kup0
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/arm64/in_cksum.c242
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/arm64/include/in_cksum.h84
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/i386/Makefile.kup0
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/i386/in_cksum.c499
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/i386/include/in_cksum.h145
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/kern/Makefile.kup0
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/kern/kern_mbuf.c824
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/kern/subr_sbuf.c594
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf.c2238
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf2.c539
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/sys/mbuf.h1177
-rw-r--r--src/VBox/Devices/Network/slirp/bsd/sys/sbuf.h95
17 files changed, 6763 insertions, 0 deletions
diff --git a/src/VBox/Devices/Network/slirp/bsd/Makefile.kup b/src/VBox/Devices/Network/slirp/bsd/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/Makefile.kup
diff --git a/src/VBox/Devices/Network/slirp/bsd/amd64/Makefile.kup b/src/VBox/Devices/Network/slirp/bsd/amd64/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/amd64/Makefile.kup
diff --git a/src/VBox/Devices/Network/slirp/bsd/amd64/in_cksum.c b/src/VBox/Devices/Network/slirp/bsd/amd64/in_cksum.c
new file mode 100644
index 00000000..351003e3
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/amd64/in_cksum.c
@@ -0,0 +1,242 @@
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef VBOX
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/in_cksum.c,v 1.5.20.1 2009/04/15 03:14:26 kensmith Exp $");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+#else
+# include "in_cksum.h"
+# include "slirp.h"
+#endif
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+ const u_int32_t *lw = (const u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (intptr_t) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (intptr_t) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((RTHCUINTPTR) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (intptr_t) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/src/VBox/Devices/Network/slirp/bsd/amd64/include/in_cksum.h b/src/VBox/Devices/Network/slirp/bsd/amd64/include/in_cksum.h
new file mode 100644
index 00000000..47a4565e
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/amd64/include/in_cksum.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD: src/sys/amd64/include/in_cksum.h,v 1.5.20.1 2009/04/15 03:14:26 kensmith Exp $
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#ifndef VBOX
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+#include <sys/cdefs.h>
+#else
+# include "slirp.h"
+#endif
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#if defined(_KERNEL) || defined(VBOX)
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/src/VBox/Devices/Network/slirp/bsd/arm64/Makefile.kup b/src/VBox/Devices/Network/slirp/bsd/arm64/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/arm64/Makefile.kup
diff --git a/src/VBox/Devices/Network/slirp/bsd/arm64/in_cksum.c b/src/VBox/Devices/Network/slirp/bsd/arm64/in_cksum.c
new file mode 100644
index 00000000..351003e3
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/arm64/in_cksum.c
@@ -0,0 +1,242 @@
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef VBOX
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/in_cksum.c,v 1.5.20.1 2009/04/15 03:14:26 kensmith Exp $");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+#else
+# include "in_cksum.h"
+# include "slirp.h"
+#endif
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+ const u_int32_t *lw = (const u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (intptr_t) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (intptr_t) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((RTHCUINTPTR) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (intptr_t) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
diff --git a/src/VBox/Devices/Network/slirp/bsd/arm64/include/in_cksum.h b/src/VBox/Devices/Network/slirp/bsd/arm64/include/in_cksum.h
new file mode 100644
index 00000000..47a4565e
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/arm64/include/in_cksum.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD: src/sys/amd64/include/in_cksum.h,v 1.5.20.1 2009/04/15 03:14:26 kensmith Exp $
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#ifndef VBOX
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+#include <sys/cdefs.h>
+#else
+# include "slirp.h"
+#endif
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#if defined(_KERNEL) || defined(VBOX)
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/src/VBox/Devices/Network/slirp/bsd/i386/Makefile.kup b/src/VBox/Devices/Network/slirp/bsd/i386/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/i386/Makefile.kup
diff --git a/src/VBox/Devices/Network/slirp/bsd/i386/in_cksum.c b/src/VBox/Devices/Network/slirp/bsd/i386/in_cksum.c
new file mode 100644
index 00000000..2be70636
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/i386/in_cksum.c
@@ -0,0 +1,499 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ */
+
+#ifndef VBOX
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/i386/i386/in_cksum.c,v 1.28.10.1.6.1 2009/04/15 03:14:26 kensmith Exp $");
+
+/*
+ * MPsafe: alfred
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+
+#include <machine/in_cksum.h>
+#else
+# include "in_cksum.h"
+# include "slirp.h"
+#endif
+
+/*
+ * Checksum routine for Internet Protocol family headers.
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ *
+ * This implementation is 386 version.
+ */
+
+#undef ADDCARRY
+#define ADDCARRY(x) if ((x) > 0xffff) (x) -= 0xffff
+/*
+ * icc needs to be special cased here, as the asm code below results
+ * in broken code if compiled with icc.
+ */
+#if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
+/* non gcc parts stolen from sys/alpha/alpha/in_cksum.c */
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+#endif
+#define REDUCE {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
+
+#if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const u_int32_t *lw, int len)
+{
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((RTHCUINTPTR) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (long) addr) & 1)
+ sum += in_cksumdata((const u_int32_t *)addr, mlen) << 8;
+ else
+ sum += in_cksumdata((const u_int32_t *)addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata((const u_int32_t *)ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+#else
+
+/*
+ * These asm statements require __volatile because they pass information
+ * via the condition codes. GCC does not currently provide a way to specify
+ * the condition codes as an input or output operand.
+ *
+ * The LOAD macro below is effectively a prefetch into cache. GCC will
+ * load the value into a register but will not use it. Since modern CPUs
+ * reorder operations, this will generally take place in parallel with
+ * other calculations.
+ */
+u_short
+in_cksum_skip(m, len, skip)
+ struct mbuf *m;
+ int len;
+ int skip;
+{
+ register u_short *w;
+ register unsigned sum = 0;
+ register int mlen = 0;
+ int byte_swapped = 0;
+ union { char c[2]; u_short s; } su;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ w = (u_short *)(mtod(m, u_char *) + skip);
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (;m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ w = mtod(m, u_short *);
+ if (mlen == -1) {
+ /*
+ * The first byte of this mbuf is the continuation
+ * of a word spanning between this mbuf and the
+ * last mbuf.
+ */
+
+ /* su.c[0] is already saved when scanning previous
+ * mbuf. sum was REDUCEd when we found mlen == -1
+ */
+ su.c[1] = *(u_char *)w;
+ sum += su.s;
+ w = (u_short *)((char *)w + 1);
+ mlen = m->m_len - 1;
+ len--;
+ } else
+ mlen = m->m_len;
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ len -= mlen;
+ /*
+ * Force to long boundary so we do longword aligned
+ * memory operations
+ */
+ if (3 & (int) w) {
+ REDUCE;
+ if ((1 & (int) w) && (mlen > 0)) {
+ sum <<= 8;
+ su.c[0] = *(char *)w;
+ w = (u_short *)((char *)w + 1);
+ mlen--;
+ byte_swapped = 1;
+ }
+ if ((2 & (int) w) && (mlen >= 2)) {
+ sum += *w++;
+ mlen -= 2;
+ }
+ }
+ /*
+ * Advance to a 486 cache line boundary.
+ */
+ if (4 & (int) w && mlen >= 4) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0])
+ );
+ w += 2;
+ mlen -= 4;
+ }
+ if (8 & (int) w && mlen >= 8) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1])
+ );
+ w += 4;
+ mlen -= 8;
+ }
+ /*
+ * Do as much of the checksum as possible 32 bits at at time.
+ * In fact, this loop is unrolled to make overhead from
+ * branches &c small.
+ */
+ mlen -= 1;
+ while ((mlen -= 32) >= 0) {
+ /*
+ * Add with carry 16 words and fold in the last
+ * carry by adding a 0 with carry.
+ *
+ * The early ADD(16) and the LOAD(32) are to load
+ * the next 2 cache lines in advance on 486's. The
+ * 486 has a penalty of 2 clock cycles for loading
+ * a cache line, plus whatever time the external
+ * memory takes to load the first word(s) addressed.
+ * These penalties are unavoidable. Subsequent
+ * accesses to a cache line being loaded (and to
+ * other external memory?) are delayed until the
+ * whole load finishes. These penalties are mostly
+ * avoided by not accessing external memory for
+ * 8 cycles after the ADD(16) and 12 cycles after
+ * the LOAD(32). The loop terminates when mlen
+ * is initially 33 (not 32) to guaranteed that
+ * the LOAD(32) is within bounds.
+ */
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl %5, %0\n"
+ "mov %6, %%eax\n"
+ "adcl %7, %0\n"
+ "adcl %8, %0\n"
+ "adcl %9, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[4]),
+ "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1]),
+ "g" (((const u_int32_t *)w)[2]),
+ "g" (((const u_int32_t *)w)[3]),
+ "g" (((const u_int32_t *)w)[8]),
+ "g" (((const u_int32_t *)w)[5]),
+ "g" (((const u_int32_t *)w)[6]),
+ "g" (((const u_int32_t *)w)[7])
+ : "eax"
+ );
+ w += 16;
+ }
+ mlen += 32 + 1;
+ if (mlen >= 32) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl %5, %0\n"
+ "adcl %6, %0\n"
+ "adcl %7, %0\n"
+ "adcl %8, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[4]),
+ "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1]),
+ "g" (((const u_int32_t *)w)[2]),
+ "g" (((const u_int32_t *)w)[3]),
+ "g" (((const u_int32_t *)w)[5]),
+ "g" (((const u_int32_t *)w)[6]),
+ "g" (((const u_int32_t *)w)[7])
+ );
+ w += 16;
+ mlen -= 32;
+ }
+ if (mlen >= 16) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1]),
+ "g" (((const u_int32_t *)w)[2]),
+ "g" (((const u_int32_t *)w)[3])
+ );
+ w += 8;
+ mlen -= 16;
+ }
+ if (mlen >= 8) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1])
+ );
+ w += 4;
+ mlen -= 8;
+ }
+ if (mlen == 0 && byte_swapped == 0)
+ continue; /* worth 1% maybe ?? */
+ REDUCE;
+ while ((mlen -= 2) >= 0) {
+ sum += *w++;
+ }
+ if (byte_swapped) {
+ sum <<= 8;
+ byte_swapped = 0;
+ if (mlen == -1) {
+ su.c[1] = *(char *)w;
+ sum += su.s;
+ mlen = 0;
+ } else
+ mlen = -1;
+ } else if (mlen == -1)
+ /*
+ * This mbuf has odd number of bytes.
+ * There could be a word split betwen
+ * this mbuf and the next mbuf.
+ * Save the last byte (to prepend to next mbuf).
+ */
+ su.c[0] = *(char *)w;
+ }
+
+ if (len)
+ printf("%s: out of data by %d\n", __func__, len);
+ if (mlen == -1) {
+ /* The last mbuf has odd # of bytes. Follow the
+ standard (the odd byte is shifted left by 8 bits) */
+ su.c[1] = 0;
+ sum += su.s;
+ }
+ REDUCE;
+ return (~sum & 0xffff);
+}
+#endif
diff --git a/src/VBox/Devices/Network/slirp/bsd/i386/include/in_cksum.h b/src/VBox/Devices/Network/slirp/bsd/i386/include/in_cksum.h
new file mode 100644
index 00000000..0d20d193
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/i386/include/in_cksum.h
@@ -0,0 +1,145 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD: src/sys/i386/include/in_cksum.h,v 1.17.10.1.6.1 2009/04/15 03:14:26 kensmith Exp $
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#ifndef VBOX
+#ifndef _SYS_CDEFS_H_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * MP safe (alfred)
+ */
+
+#include <sys/cdefs.h>
+#else
+# include "slirp.h"
+#endif
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#if defined(__GNUCLIKE_ASM) && !defined(__INTEL_COMPILER)
+static __inline u_int
+in_cksum_hdr(const struct ip *ip)
+{
+ register u_int sum = 0;
+
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl %5, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)ip)[0]),
+ "g" (((const u_int32_t *)ip)[1]),
+ "g" (((const u_int32_t *)ip)[2]),
+ "g" (((const u_int32_t *)ip)[3]),
+ "g" (((const u_int32_t *)ip)[4])
+ );
+ sum = (sum & 0xffff) + (sum >> 16);
+ if (sum > 0xffff)
+ sum -= 0xffff;
+
+ return ~sum & 0xffff;
+}
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+static __inline u_short
+in_addword(u_short sum, u_short b)
+{
+ /* __volatile is necessary because the condition codes are used. */
+ __asm __volatile (
+ "addw %1, %0\n"
+ "adcw $0, %0"
+ : "+r" (sum)
+ : "r" (b)
+ );
+ return (sum);
+}
+
+static __inline u_short
+in_pseudo(u_int sum, u_int b, u_int c)
+{
+ /* __volatile is necessary because the condition codes are used. */
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (b),
+ "g" (c)
+ );
+ sum = (sum & 0xffff) + (sum >> 16);
+ if (sum > 0xffff)
+ sum -= 0xffff;
+ return (sum);
+}
+
+#else
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#if defined(_KERNEL) || defined(VBOX)
+#if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER)
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+#endif
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif /* _KERNEL */
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/src/VBox/Devices/Network/slirp/bsd/kern/Makefile.kup b/src/VBox/Devices/Network/slirp/bsd/kern/Makefile.kup
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/kern/Makefile.kup
diff --git a/src/VBox/Devices/Network/slirp/bsd/kern/kern_mbuf.c b/src/VBox/Devices/Network/slirp/bsd/kern/kern_mbuf.c
new file mode 100644
index 00000000..c908e2fd
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/kern/kern_mbuf.c
@@ -0,0 +1,824 @@
+/*-
+ * Copyright (c) 2004, 2005,
+ * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef VBOX
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/kern/kern_mbuf.c,v 1.32.2.5.2.1 2009/04/15 03:14:26 kensmith Exp $");
+
+#include "opt_mac.h"
+#include "opt_param.h"
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/protosw.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+
+#include <security/mac/mac_framework.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+#include <vm/uma_dbg.h>
+#else
+# include <iprt/param.h>
+# include <slirp.h>
+# define IN_BSD
+# include "ext.h"
+#endif
+
+/*
+ * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
+ * Zones.
+ *
+ * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
+ * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
+ * administrator so desires.
+ *
+ * Mbufs are allocated from a UMA Master Zone called the Mbuf
+ * Zone.
+ *
+ * Additionally, FreeBSD provides a Packet Zone, which it
+ * configures as a Secondary Zone to the Mbuf Master Zone,
+ * thus sharing backend Slab kegs with the Mbuf Master Zone.
+ *
+ * Thus common-case allocations and locking are simplified:
+ *
+ * m_clget() m_getcl()
+ * | |
+ * | .------------>[(Packet Cache)] m_get(), m_gethdr()
+ * | | [ Packet ] |
+ * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
+ * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
+ * | \________ |
+ * [ Cluster Keg ] \ /
+ * | [ Mbuf Keg ]
+ * [ Cluster Slabs ] |
+ * | [ Mbuf Slabs ]
+ * \____________(VM)_________________/
+ *
+ *
+ * Whenever an object is allocated with uma_zalloc() out of
+ * one of the Zones its _ctor_ function is executed. The same
+ * for any deallocation through uma_zfree() the _dtor_ function
+ * is executed.
+ *
+ * Caches are per-CPU and are filled from the Master Zone.
+ *
+ * Whenever an object is allocated from the underlying global
+ * memory pool it gets pre-initialized with the _zinit_ functions.
+ * When the Keg's are overfull objects get decomissioned with
+ * _zfini_ functions and free'd back to the global memory pool.
+ *
+ */
+
+#ifndef VBOX
+int nmbclusters; /* limits number of mbuf clusters */
+int nmbjumbop; /* limits number of page size jumbo clusters */
+int nmbjumbo9; /* limits number of 9k jumbo clusters */
+int nmbjumbo16; /* limits number of 16k jumbo clusters */
+struct mbstat mbstat;
+#endif
+
+/*
+ * tunable_mbinit() has to be run before init_maxsockets() thus
+ * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
+ * runs at SI_ORDER_ANY.
+ */
+static void
+tunable_mbinit(void *dummy)
+{
+#ifdef VBOX
+ PNATState pData = (PNATState)dummy;
+#endif
+ /* This has to be done before VM init. */
+ nmbclusters = 1024 + maxusers * 64;
+ nmbjumbop = nmbclusters / 2;
+ nmbjumbo9 = nmbjumbop / 2;
+ nmbjumbo16 = nmbjumbo9 / 2;
+ TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
+}
+SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
+
+#ifndef VBOX
+/* XXX: These should be tuneables. Can't change UMA limits on the fly. */
+static int
+sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbclusters;
+
+ newnmbclusters = nmbclusters;
+ error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbclusters > nmbclusters) {
+ nmbclusters = newnmbclusters;
+ uma_zone_set_max(zone_clust, nmbclusters);
+ EVENTHANDLER_INVOKE(nmbclusters_change);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
+&nmbclusters, 0, sysctl_nmbclusters, "IU",
+ "Maximum number of mbuf clusters allowed");
+
+static int
+sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbjumbop;
+
+ newnmbjumbop = nmbjumbop;
+ error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbjumbop> nmbjumbop) {
+ nmbjumbop = newnmbjumbop;
+ uma_zone_set_max(zone_jumbop, nmbjumbop);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
+&nmbjumbop, 0, sysctl_nmbjumbop, "IU",
+ "Maximum number of mbuf page size jumbo clusters allowed");
+
+
+static int
+sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbjumbo9;
+
+ newnmbjumbo9 = nmbjumbo9;
+ error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbjumbo9> nmbjumbo9) {
+ nmbjumbo9 = newnmbjumbo9;
+ uma_zone_set_max(zone_jumbo9, nmbjumbo9);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
+&nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
+ "Maximum number of mbuf 9k jumbo clusters allowed");
+
+static int
+sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbjumbo16;
+
+ newnmbjumbo16 = nmbjumbo16;
+ error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbjumbo16> nmbjumbo16) {
+ nmbjumbo16 = newnmbjumbo16;
+ uma_zone_set_max(zone_jumbo16, nmbjumbo16);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
+&nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
+ "Maximum number of mbuf 16k jumbo clusters allowed");
+
+
+
+SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
+ "Mbuf general information and statistics");
+
+/*
+ * Zones from which we allocate.
+ */
+uma_zone_t zone_mbuf;
+uma_zone_t zone_clust;
+uma_zone_t zone_pack;
+uma_zone_t zone_jumbop;
+uma_zone_t zone_jumbo9;
+uma_zone_t zone_jumbo16;
+uma_zone_t zone_ext_refcnt;
+
+/*
+ * Local prototypes.
+ */
+static int mb_ctor_mbuf(void *, int, void *, int);
+static int mb_ctor_clust(void *, int, void *, int);
+static int mb_ctor_pack(void *, int, void *, int);
+static void mb_dtor_mbuf(void *, int, void *);
+static void mb_dtor_clust(void *, int, void *);
+static void mb_dtor_pack(void *, int, void *);
+static int mb_zinit_pack(void *, int, int);
+static void mb_zfini_pack(void *, int);
+#else
+/*
+ * Local prototypes.
+ */
+static int mb_ctor_mbuf(PNATState, void *, int, void *, int);
+static int mb_ctor_clust(PNATState, void *, int, void *, int);
+static int mb_ctor_pack(PNATState, void *, int, void *, int);
+static void mb_dtor_mbuf(PNATState, void *, int, void *);
+static void mb_dtor_clust(PNATState, void *, int, void *);
+static void mb_dtor_pack(PNATState, void *, int, void *);
+static int mb_zinit_pack(PNATState, void *, int, int);
+static void mb_zfini_pack(PNATState, void *, int);
+#endif
+
+/*static void mb_reclaim(void *); - unused */
+#ifndef VBOX
+static void mbuf_init(void *);
+static void *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int);
+static void mbuf_jumbo_free(void *, int, u_int8_t);
+#endif
+
+#ifndef VBOX
+static MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers");
+
+/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
+CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
+#else
+#define uma_zcreate(a0, a1, a2, a3, a4, a5, a6, a7) \
+ uma_zcreate(pData, a0, a1, a2, a3, a4, a5, a6, a7)
+#endif
+
+/*
+ * Initialize FreeBSD Network buffer allocation.
+ */
+SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
+#ifndef VBOX
+static void
+#else
+void
+#endif
+mbuf_init(void *dummy)
+{
+
+ /*
+ * Configure UMA zones for Mbufs, Clusters, and Packets.
+ */
+#ifndef VBOX
+ zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
+ mb_ctor_mbuf, mb_dtor_mbuf,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ MSIZE - 1, UMA_ZONE_MAXBUCKET);
+
+ zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+#else /*!VBOX*/
+ PNATState pData = (PNATState)dummy;
+ tunable_mbinit(pData);
+ zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
+ mb_ctor_mbuf, mb_dtor_mbuf,
+ NULL, NULL,
+ MSIZE - 1, UMA_ZONE_MAXBUCKET);
+ if (nmbclusters > 0)
+ uma_zone_set_max(zone_mbuf, nmbclusters);
+
+ zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
+ mb_ctor_clust, mb_dtor_clust,
+ NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+#endif /*VBOX*/
+ if (nmbclusters > 0)
+ uma_zone_set_max(zone_clust, nmbclusters);
+
+ zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
+ mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
+
+ /* Make jumbo frame zone too. Page size, 9k and 16k. */
+#ifndef VBOX
+ zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbop > 0)
+ uma_zone_set_max(zone_jumbop, nmbjumbop);
+
+ zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbo9 > 0)
+ uma_zone_set_max(zone_jumbo9, nmbjumbo9);
+ uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
+ uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free);
+
+ zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbo16 > 0)
+ uma_zone_set_max(zone_jumbo16, nmbjumbo16);
+#else /*!VBOX*/
+ zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
+ mb_ctor_clust, mb_dtor_clust,
+ NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbop > 0)
+ uma_zone_set_max(zone_jumbop, nmbjumbop);
+
+ zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
+ mb_ctor_clust, mb_dtor_clust,
+ NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbo9 > 0)
+ uma_zone_set_max(zone_jumbo9, nmbjumbo9);
+
+ zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
+ mb_ctor_clust, mb_dtor_clust,
+ NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbo16 > 0)
+ uma_zone_set_max(zone_jumbo16, nmbjumbo16);
+#endif /*VBOX*/
+
+ zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
+ NULL, NULL,
+ NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+
+ /* uma_prealloc() goes here... */
+
+ /*
+ * Hook event handler for low-memory situation, used to
+ * drain protocols and push data back to the caches (UMA
+ * later pushes it back to VM).
+ */
+ EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
+ EVENTHANDLER_PRI_FIRST);
+
+ /*
+ * [Re]set counters and local statistics knobs.
+ * XXX Some of these should go and be replaced, but UMA stat
+ * gathering needs to be revised.
+ */
+ mbstat.m_mbufs = 0;
+ mbstat.m_mclusts = 0;
+ mbstat.m_drain = 0;
+ mbstat.m_msize = MSIZE;
+ mbstat.m_mclbytes = MCLBYTES;
+ mbstat.m_minclsize = MINCLSIZE;
+ mbstat.m_mlen = MLEN;
+ mbstat.m_mhlen = MHLEN;
+ mbstat.m_numtypes = MT_NTYPES;
+
+ mbstat.m_mcfail = mbstat.m_mpfail = 0;
+ mbstat.sf_iocnt = 0;
+ mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
+}
+
+#ifndef VBOX
+/*
+ * UMA backend page allocator for the jumbo frame zones.
+ *
+ * Allocates kernel virtual memory that is backed by contiguous physical
+ * pages.
+ */
+static void *
+mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int fWait)
+{
+
+ /* Inform UMA that this allocator uses kernel_map/object. */
+ *flags = UMA_SLAB_KERNEL;
+ return (contigmalloc(bytes, M_JUMBOFRAME, fWait, (vm_paddr_t)0,
+ ~(vm_paddr_t)0, 1, 0));
+}
+
+/*
+ * UMA backend page deallocator for the jumbo frame zones.
+ */
+static void
+mbuf_jumbo_free(void *mem, int size, u_int8_t flags)
+{
+
+ contigfree(mem, size, M_JUMBOFRAME);
+}
+#endif
+
+/*
+ * Constructor for Mbuf master zone.
+ *
+ * The 'arg' pointer points to a mb_args structure which
+ * contains call-specific information required to support the
+ * mbuf allocation API. See mbuf.h.
+ */
+static int
+#ifndef VBOX
+mb_ctor_mbuf(void *mem, int size, void *arg, int how)
+#else
+mb_ctor_mbuf(PNATState pData, void *mem, int size, void *arg, int how)
+#endif
+{
+ struct mbuf *m;
+ struct mb_args *args;
+#ifdef MAC
+ int error;
+#endif
+ int flags;
+ short type;
+#ifdef VBOX
+ NOREF(pData);
+#endif
+
+#ifdef INVARIANTS
+ trash_ctor(mem, size, arg, how);
+#elif defined(VBOX)
+ NOREF(size);
+ NOREF(how);
+#endif
+ m = (struct mbuf *)mem;
+ args = (struct mb_args *)arg;
+ flags = args->flags;
+ type = args->type;
+
+ /*
+ * The mbuf is initialized later. The caller has the
+ * responsibility to set up any MAC labels too.
+ */
+ if (type == MT_NOINIT)
+ return (0);
+
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_len = 0;
+ m->m_flags = flags;
+ m->m_type = type;
+ if (flags & M_PKTHDR) {
+ m->m_data = m->m_pktdat;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+ SLIST_INIT(&m->m_pkthdr.tags);
+#ifdef MAC
+ /* If the label init fails, fail the alloc */
+ error = mac_init_mbuf(m, how);
+ if (error)
+ return (error);
+#endif
+ } else
+ m->m_data = m->m_dat;
+ return (0);
+}
+
+/*
+ * The Mbuf master zone destructor.
+ */
+static void
+#ifndef VBOX
+mb_dtor_mbuf(void *mem, int size, void *arg)
+#else
+mb_dtor_mbuf(PNATState pData, void *mem, int size, void *arg)
+#endif
+{
+ struct mbuf *m;
+ uintptr_t flags;
+#ifdef VBOX
+ NOREF(pData);
+#endif
+
+ m = (struct mbuf *)mem;
+ flags = (uintptr_t)arg;
+
+ if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
+ m_tag_delete_chain(m, NULL);
+ KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
+ KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
+#ifdef INVARIANTS
+ trash_dtor(mem, size, arg);
+#elif defined(VBOX)
+ NOREF(size);
+ NOREF(arg);
+#endif
+}
+
+/*
+ * The Mbuf Packet zone destructor.
+ */
+static void
+#ifndef VBOX
+mb_dtor_pack(void *mem, int size, void *arg)
+#else
+mb_dtor_pack(PNATState pData, void *mem, int size, void *arg)
+#endif
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem;
+ if ((m->m_flags & M_PKTHDR) != 0)
+ m_tag_delete_chain(m, NULL);
+
+ /* Make sure we've got a clean cluster back. */
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
+ KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
+ KASSERT(m->m_ext.ext_args == NULL, ("%s: ext_args != NULL", __func__));
+ KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
+ KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
+ KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
+#ifdef INVARIANTS
+ trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
+#elif defined(VBOX)
+ NOREF(size);
+ NOREF(arg);
+#endif
+ /*
+ * If there are processes blocked on zone_clust, waiting for pages to be freed up,
+ * cause them to be woken up by draining the packet zone. We are exposed to a race here
+ * (in the check for the UMA_ZFLAG_FULL) where we might miss the flag set, but that is
+ * deliberate. We don't want to acquire the zone lock for every mbuf free.
+ */
+ if (uma_zone_exhausted_nolock(zone_clust))
+ zone_drain(zone_pack);
+}
+
+/*
+ * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
+ *
+ * Here the 'arg' pointer points to the Mbuf which we
+ * are configuring cluster storage for. If 'arg' is
+ * empty we allocate just the cluster without setting
+ * the mbuf to it. See mbuf.h.
+ */
+static int
+#ifndef VBOX
+mb_ctor_clust(void *mem, int size, void *arg, int how)
+#else
+mb_ctor_clust(PNATState pData, void *mem, int size, void *arg, int how)
+#endif
+{
+ struct mbuf *m;
+ u_int *refcnt;
+ int type;
+ uma_zone_t zone;
+#ifdef VBOX
+ NOREF(how);
+#endif
+
+#ifdef INVARIANTS
+ trash_ctor(mem, size, arg, how);
+#elif defined(VBOX)
+ NOREF(how);
+#endif
+ switch (size) {
+ case MCLBYTES:
+ type = EXT_CLUSTER;
+ zone = zone_clust;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case MJUMPAGESIZE:
+ type = EXT_JUMBOP;
+ zone = zone_jumbop;
+ break;
+#endif
+ case MJUM9BYTES:
+ type = EXT_JUMBO9;
+ zone = zone_jumbo9;
+ break;
+ case MJUM16BYTES:
+ type = EXT_JUMBO16;
+ zone = zone_jumbo16;
+ break;
+ default:
+ panic("unknown cluster size");
+ break;
+ }
+
+ m = (struct mbuf *)arg;
+ refcnt = uma_find_refcnt(zone, mem);
+ *refcnt = 1;
+ if (m != NULL) {
+ m->m_ext.ext_buf = (caddr_t)mem;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_args = NULL;
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_type = type;
+ m->m_ext.ref_cnt = refcnt;
+ }
+
+ return (0);
+}
+
+/*
+ * The Mbuf Cluster zone destructor.
+ */
+static void
+#ifndef VBOX
+mb_dtor_clust(void *mem, int size, void *arg)
+#else
+mb_dtor_clust(PNATState pData, void *mem, int size, void *arg)
+#endif
+{
+#ifdef INVARIANTS
+ uma_zone_t zone;
+
+ zone = m_getzone(size);
+ KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
+ ("%s: refcnt incorrect %u", __func__,
+ *(uma_find_refcnt(zone, mem))) );
+
+ trash_dtor(mem, size, arg);
+#elif defined(VBOX)
+ NOREF(pData);
+ NOREF(mem);
+ NOREF(size);
+ NOREF(arg);
+#endif
+}
+
+/*
+ * The Packet secondary zone's init routine, executed on the
+ * object's transition from mbuf keg slab to zone cache.
+ */
+static int
+#ifndef VBOX
+mb_zinit_pack(void *mem, int size, int how)
+#else
+mb_zinit_pack(PNATState pData, void *mem, int size, int how)
+#endif
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem; /* m is virgin. */
+ if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
+ m->m_ext.ext_buf == NULL)
+ return (ENOMEM);
+ m->m_ext.ext_type = EXT_PACKET; /* Override. */
+#ifdef INVARIANTS
+ trash_init(m->m_ext.ext_buf, MCLBYTES, how);
+#elif defined(VBOX)
+ NOREF(size);
+#endif
+ return (0);
+}
+
+/*
+ * The Packet secondary zone's fini routine, executed on the
+ * object's transition from zone cache to keg slab.
+ */
+static void
+#ifndef VBOX
+mb_zfini_pack(void *mem, int size)
+#else
+mb_zfini_pack(PNATState pData, void *mem, int size)
+#endif
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem;
+#ifdef INVARIANTS
+ trash_fini(m->m_ext.ext_buf, MCLBYTES);
+#endif
+ uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
+#ifdef INVARIANTS
+ trash_dtor(mem, size, NULL);
+#elif defined(VBOX)
+ NOREF(size);
+#endif
+}
+
+/*
+ * The "packet" keg constructor.
+ */
+static int
+#ifndef VBOX
+mb_ctor_pack(void *mem, int size, void *arg, int how)
+#else
+mb_ctor_pack(PNATState pData, void *mem, int size, void *arg, int how)
+#endif
+{
+ struct mbuf *m;
+ struct mb_args *args;
+#ifdef MAC
+ int error;
+#endif
+ int flags;
+ short type;
+#ifdef VBOX
+ NOREF(pData);
+ NOREF(size);
+#endif
+
+ m = (struct mbuf *)mem;
+ args = (struct mb_args *)arg;
+ flags = args->flags;
+ type = args->type;
+
+#ifdef INVARIANTS
+ trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
+#elif defined(VBOX)
+ NOREF(how);
+#endif
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_len = 0;
+ m->m_flags = (flags | M_EXT);
+ m->m_type = type;
+
+ if (flags & M_PKTHDR) {
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+ SLIST_INIT(&m->m_pkthdr.tags);
+#ifdef MAC
+ /* If the label init fails, fail the alloc */
+ error = mac_init_mbuf(m, how);
+ if (error)
+ return (error);
+#endif
+ }
+ /* m_ext is already initialized. */
+
+ return (0);
+}
+
+#if 0 /* unused */
+/*
+ * This is the protocol drain routine.
+ *
+ * No locks should be held when this is called. The drain routines have to
+ * presently acquire some locks which raises the possibility of lock order
+ * reversal.
+ */
+static void
+mb_reclaim(void *junk)
+{
+#ifndef VBOX
+ struct domain *dp;
+ struct protosw *pr;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
+ "mb_reclaim()");
+
+ for (dp = domains; dp != NULL; dp = dp->dom_next)
+ for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
+ if (pr->pr_drain != NULL)
+ (*pr->pr_drain)();
+#else
+ NOREF(junk);
+#endif
+}
+#endif /* unused */
diff --git a/src/VBox/Devices/Network/slirp/bsd/kern/subr_sbuf.c b/src/VBox/Devices/Network/slirp/bsd/kern/subr_sbuf.c
new file mode 100644
index 00000000..c2021f5b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/kern/subr_sbuf.c
@@ -0,0 +1,594 @@
+/*-
+ * Copyright (c) 2000 Poul-Henning Kamp and Dag-Erling Coïdan Smørgrav
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VBOX
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/kern/subr_sbuf.c,v 1.30.8.1 2009/04/15 03:14:26 kensmith Exp $");
+
+#include <sys/param.h>
+
+#ifdef _KERNEL
+#include <sys/ctype.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <machine/stdarg.h>
+#else /* _KERNEL */
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif /* _KERNEL */
+
+#include <sys/sbuf.h>
+
+#ifdef _KERNEL
+static MALLOC_DEFINE(M_SBUF, "sbuf", "string buffers");
+#define SBMALLOC(size) malloc(size, M_SBUF, M_WAITOK)
+#define SBFREE(buf) free(buf, M_SBUF)
+#else /* _KERNEL */
+#define KASSERT(e, m)
+#define SBMALLOC(size) malloc(size)
+#define SBFREE(buf) free(buf)
+#define min(x,y) MIN(x,y)
+#endif /* _KERNEL */
+#else /* VBOX */
+# include <iprt/param.h>
+# include <iprt/ctype.h>
+# include <slirp.h>
+# define SBMALLOC(size) RTMemAlloc((size))
+# define SBFREE(buf) RTMemFree((buf))
+#endif
+
+/*
+ * Predicates
+ */
+#define SBUF_ISDYNAMIC(s) ((s)->s_flags & SBUF_DYNAMIC)
+#define SBUF_ISDYNSTRUCT(s) ((s)->s_flags & SBUF_DYNSTRUCT)
+#define SBUF_ISFINISHED(s) ((s)->s_flags & SBUF_FINISHED)
+#define SBUF_HASOVERFLOWED(s) ((s)->s_flags & SBUF_OVERFLOWED)
+#define SBUF_HASROOM(s) ((s)->s_len < (s)->s_size - 1)
+#define SBUF_FREESPACE(s) ((s)->s_size - (s)->s_len - 1)
+#define SBUF_CANEXTEND(s) ((s)->s_flags & SBUF_AUTOEXTEND)
+
+/*
+ * Set / clear flags
+ */
+#define SBUF_SETFLAG(s, f) do { (s)->s_flags |= (f); } while (0)
+#define SBUF_CLEARFLAG(s, f) do { (s)->s_flags &= ~(f); } while (0)
+
+#define SBUF_MINEXTENDSIZE 16 /* Should be power of 2. */
+#define SBUF_MAXEXTENDSIZE PAGE_SIZE
+#define SBUF_MAXEXTENDINCR PAGE_SIZE
+
+/*
+ * Debugging support
+ */
+#if defined(_KERNEL) && defined(INVARIANTS)
+static void
+_assert_sbuf_integrity(const char *fun, struct sbuf *s)
+{
+ KASSERT(s != NULL,
+ ("%s called with a NULL sbuf pointer", fun));
+ KASSERT(s->s_buf != NULL,
+ ("%s called with uninitialized or corrupt sbuf", fun));
+ KASSERT(s->s_len < s->s_size,
+ ("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size));
+}
+
+static void
+_assert_sbuf_state(const char *fun, struct sbuf *s, int state)
+{
+ KASSERT((s->s_flags & SBUF_FINISHED) == state,
+ ("%s called with %sfinished or corrupt sbuf", fun,
+ (state ? "un" : "")));
+}
+#define assert_sbuf_integrity(s) _assert_sbuf_integrity(__func__, (s))
+#define assert_sbuf_state(s, i) _assert_sbuf_state(__func__, (s), (i))
+#else /* _KERNEL && INVARIANTS */
+#define assert_sbuf_integrity(s) do { } while (0)
+#define assert_sbuf_state(s, i) do { } while (0)
+#endif /* _KERNEL && INVARIANTS */
+
+static int
+sbuf_extendsize(int size)
+{
+ int newsize;
+
+ newsize = SBUF_MINEXTENDSIZE;
+ while (newsize < size) {
+ if (newsize < (int)SBUF_MAXEXTENDSIZE)
+ newsize *= 2;
+ else
+ newsize += SBUF_MAXEXTENDINCR;
+ }
+
+ return (newsize);
+}
+
+
+/*
+ * Extend an sbuf.
+ */
+static int
+sbuf_extend(struct sbuf *s, int addlen)
+{
+ char *newbuf;
+ int newsize;
+
+ if (!SBUF_CANEXTEND(s))
+ return (-1);
+
+ newsize = sbuf_extendsize(s->s_size + addlen);
+ newbuf = (char *)SBMALLOC(newsize);
+ if (newbuf == NULL)
+ return (-1);
+ bcopy(s->s_buf, newbuf, s->s_size);
+ if (SBUF_ISDYNAMIC(s))
+ SBFREE(s->s_buf);
+ else
+ SBUF_SETFLAG(s, SBUF_DYNAMIC);
+ s->s_buf = newbuf;
+ s->s_size = newsize;
+ return (0);
+}
+
+/*
+ * Initialize an sbuf.
+ * If buf is non-NULL, it points to a static or already-allocated string
+ * big enough to hold at least length characters.
+ */
+struct sbuf *
+sbuf_new(struct sbuf *s, char *buf, int length, int flags)
+{
+ KASSERT(length >= 0,
+ ("attempt to create an sbuf of negative length (%d)", length));
+ KASSERT((flags & ~SBUF_USRFLAGMSK) == 0,
+ ("%s called with invalid flags", __func__));
+
+ flags &= SBUF_USRFLAGMSK;
+ if (s == NULL) {
+ s = (struct sbuf *)SBMALLOC(sizeof *s);
+ if (s == NULL)
+ return (NULL);
+ bzero(s, sizeof *s);
+ s->s_flags = flags;
+ SBUF_SETFLAG(s, SBUF_DYNSTRUCT);
+ } else {
+ bzero(s, sizeof *s);
+ s->s_flags = flags;
+ }
+ s->s_size = length;
+ if (buf) {
+ s->s_buf = buf;
+ return (s);
+ }
+ if (flags & SBUF_AUTOEXTEND)
+ s->s_size = sbuf_extendsize(s->s_size);
+ s->s_buf = (char *)SBMALLOC(s->s_size);
+ if (s->s_buf == NULL) {
+ if (SBUF_ISDYNSTRUCT(s))
+ SBFREE(s);
+ return (NULL);
+ }
+ SBUF_SETFLAG(s, SBUF_DYNAMIC);
+ return (s);
+}
+
+#ifdef _KERNEL
+/*
+ * Create an sbuf with uio data
+ */
+struct sbuf *
+sbuf_uionew(struct sbuf *s, struct uio *uio, int *error)
+{
+ KASSERT(uio != NULL,
+ ("%s called with NULL uio pointer", __func__));
+ KASSERT(error != NULL,
+ ("%s called with NULL error pointer", __func__));
+
+ s = sbuf_new(s, NULL, uio->uio_resid + 1, 0);
+ if (s == NULL) {
+ *error = ENOMEM;
+ return (NULL);
+ }
+ *error = uiomove(s->s_buf, uio->uio_resid, uio);
+ if (*error != 0) {
+ sbuf_delete(s);
+ return (NULL);
+ }
+ s->s_len = s->s_size - 1;
+ *error = 0;
+ return (s);
+}
+#endif
+
+/*
+ * Clear an sbuf and reset its position.
+ */
+void
+sbuf_clear(struct sbuf *s)
+{
+ assert_sbuf_integrity(s);
+ /* don't care if it's finished or not */
+
+ SBUF_CLEARFLAG(s, SBUF_FINISHED);
+ SBUF_CLEARFLAG(s, SBUF_OVERFLOWED);
+ s->s_len = 0;
+}
+
+/*
+ * Set the sbuf's end position to an arbitrary value.
+ * Effectively truncates the sbuf at the new position.
+ */
+int
+sbuf_setpos(struct sbuf *s, int pos)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ KASSERT(pos >= 0,
+ ("attempt to seek to a negative position (%d)", pos));
+ KASSERT(pos < s->s_size,
+ ("attempt to seek past end of sbuf (%d >= %d)", pos, s->s_size));
+
+ if (pos < 0 || pos > s->s_len)
+ return (-1);
+ s->s_len = pos;
+ return (0);
+}
+
+/*
+ * Append a byte string to an sbuf.
+ */
+int
+sbuf_bcat(struct sbuf *s, const void *buf, size_t len)
+{
+ const char *str = buf;
+
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+ for (; len; len--) {
+ if (!SBUF_HASROOM(s) && sbuf_extend(s, len) < 0)
+ break;
+ s->s_buf[s->s_len++] = *str++;
+ }
+ if (len) {
+ SBUF_SETFLAG(s, SBUF_OVERFLOWED);
+ return (-1);
+ }
+ return (0);
+}
+
+#ifdef _KERNEL
+/*
+ * Copy a byte string from userland into an sbuf.
+ */
+int
+sbuf_bcopyin(struct sbuf *s, const void *uaddr, size_t len)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+ if (len == 0)
+ return (0);
+ if (len > SBUF_FREESPACE(s)) {
+ sbuf_extend(s, len - SBUF_FREESPACE(s));
+ len = min(len, SBUF_FREESPACE(s));
+ }
+ if (copyin(uaddr, s->s_buf + s->s_len, len) != 0)
+ return (-1);
+ s->s_len += len;
+
+ return (0);
+}
+#endif
+
+/*
+ * Copy a byte string into an sbuf.
+ */
+int
+sbuf_bcpy(struct sbuf *s, const void *buf, size_t len)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ sbuf_clear(s);
+ return (sbuf_bcat(s, buf, len));
+}
+
+/*
+ * Append a string to an sbuf.
+ */
+int
+sbuf_cat(struct sbuf *s, const char *str)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+ while (*str) {
+ if (!SBUF_HASROOM(s) && sbuf_extend(s, strlen(str)) < 0)
+ break;
+ s->s_buf[s->s_len++] = *str++;
+ }
+ if (*str) {
+ SBUF_SETFLAG(s, SBUF_OVERFLOWED);
+ return (-1);
+ }
+ return (0);
+}
+
+#ifdef _KERNEL
+/*
+ * Append a string from userland to an sbuf.
+ */
+int
+sbuf_copyin(struct sbuf *s, const void *uaddr, size_t len)
+{
+ size_t done;
+
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+ if (len == 0)
+ len = SBUF_FREESPACE(s); /* XXX return 0? */
+ if (len > SBUF_FREESPACE(s)) {
+ sbuf_extend(s, len);
+ len = min(len, SBUF_FREESPACE(s));
+ }
+ switch (copyinstr(uaddr, s->s_buf + s->s_len, len + 1, &done)) {
+ case ENAMETOOLONG:
+ SBUF_SETFLAG(s, SBUF_OVERFLOWED);
+ RT_FALL_THRU();
+ case 0:
+ s->s_len += done - 1;
+ break;
+ default:
+ return (-1); /* XXX */
+ }
+
+ return (done);
+}
+#endif
+
+/*
+ * Copy a string into an sbuf.
+ */
+int
+sbuf_cpy(struct sbuf *s, const char *str)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ sbuf_clear(s);
+ return (sbuf_cat(s, str));
+}
+
+/*
+ * Format the given argument list and append the resulting string to an sbuf.
+ */
+int
+sbuf_vprintf(struct sbuf *s, const char *fmt, va_list ap)
+{
+ va_list ap_copy;
+ int len;
+
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ KASSERT(fmt != NULL,
+ ("%s called with a NULL format string", __func__));
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+ do {
+ va_copy(ap_copy, ap);
+#ifndef VBOX
+ len = vsnprintf(&s->s_buf[s->s_len], SBUF_FREESPACE(s) + 1,
+ fmt, ap_copy);
+#else
+ len = RTStrPrintfV(&s->s_buf[s->s_len], SBUF_FREESPACE(s) + 1,
+ fmt, ap_copy);
+#endif
+ va_end(ap_copy);
+ } while (len > SBUF_FREESPACE(s) &&
+ sbuf_extend(s, len - SBUF_FREESPACE(s)) == 0);
+
+ /*
+ * s->s_len is the length of the string, without the terminating nul.
+ * When updating s->s_len, we must subtract 1 from the length that
+ * we passed into vsnprintf() because that length includes the
+ * terminating nul.
+ *
+ * vsnprintf() returns the amount that would have been copied,
+ * given sufficient space, hence the min() calculation below.
+ */
+ s->s_len += min(len, SBUF_FREESPACE(s));
+ if (!SBUF_HASROOM(s) && !SBUF_CANEXTEND(s))
+ SBUF_SETFLAG(s, SBUF_OVERFLOWED);
+
+ KASSERT(s->s_len < s->s_size,
+ ("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size));
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+ return (0);
+}
+
+/*
+ * Format the given arguments and append the resulting string to an sbuf.
+ */
+int
+sbuf_printf(struct sbuf *s, const char *fmt, ...)
+{
+ va_list ap;
+ int result;
+
+ va_start(ap, fmt);
+ result = sbuf_vprintf(s, fmt, ap);
+ va_end(ap);
+ return(result);
+}
+
+/*
+ * Append a character to an sbuf.
+ */
+int
+sbuf_putc(struct sbuf *s, int c)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+ if (!SBUF_HASROOM(s) && sbuf_extend(s, 1) < 0) {
+ SBUF_SETFLAG(s, SBUF_OVERFLOWED);
+ return (-1);
+ }
+ if (c != '\0')
+ s->s_buf[s->s_len++] = c;
+ return (0);
+}
+
+/*
+ * Trim whitespace characters from end of an sbuf.
+ */
+int
+sbuf_trim(struct sbuf *s)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+
+#ifndef VBOX
+ while (s->s_len && isspace(s->s_buf[s->s_len-1]))
+ --s->s_len;
+#else
+ while (s->s_len && RT_C_IS_SPACE(s->s_buf[s->s_len-1]))
+ --s->s_len;
+#endif
+
+ return (0);
+}
+
+/*
+ * Check if an sbuf overflowed
+ */
+int
+sbuf_overflowed(struct sbuf *s)
+{
+ return SBUF_HASOVERFLOWED(s);
+}
+
+/*
+ * Finish off an sbuf.
+ */
+void
+sbuf_finish(struct sbuf *s)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, 0);
+
+ s->s_buf[s->s_len] = '\0';
+ SBUF_CLEARFLAG(s, SBUF_OVERFLOWED);
+ SBUF_SETFLAG(s, SBUF_FINISHED);
+}
+
+/*
+ * Return a pointer to the sbuf data.
+ */
+char *
+sbuf_data(struct sbuf *s)
+{
+ assert_sbuf_integrity(s);
+ assert_sbuf_state(s, SBUF_FINISHED);
+
+ return s->s_buf;
+}
+
+/*
+ * Return the length of the sbuf data.
+ */
+int
+sbuf_len(struct sbuf *s)
+{
+ assert_sbuf_integrity(s);
+ /* don't care if it's finished or not */
+
+ if (SBUF_HASOVERFLOWED(s))
+ return (-1);
+ return s->s_len;
+}
+
+/*
+ * Clear an sbuf, free its buffer if necessary.
+ */
+void
+sbuf_delete(struct sbuf *s)
+{
+ int isdyn;
+
+ assert_sbuf_integrity(s);
+ /* don't care if it's finished or not */
+
+ if (SBUF_ISDYNAMIC(s))
+ SBFREE(s->s_buf);
+ isdyn = SBUF_ISDYNSTRUCT(s);
+ bzero(s, sizeof *s);
+ if (isdyn)
+ SBFREE(s);
+}
+
+/*
+ * Check if an sbuf has been finished.
+ */
+int
+sbuf_done(struct sbuf *s)
+{
+
+ return(SBUF_ISFINISHED(s));
+}
diff --git a/src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf.c b/src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf.c
new file mode 100644
index 00000000..0af49faa
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf.c
@@ -0,0 +1,2238 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
+ */
+
+#ifndef VBOX
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.174.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $");
+
+#include "opt_mac.h"
+#include "opt_param.h"
+#include "opt_mbuf_stress_test.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/sysctl.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/uio.h>
+
+#include <security/mac/mac_framework.h>
+
+int max_linkhdr;
+#ifndef VBOX
+int max_protohdr;
+#endif
+int max_hdr;
+int max_datalen;
+#ifdef MBUF_STRESS_TEST
+int m_defragpackets;
+int m_defragbytes;
+int m_defraguseless;
+int m_defragfailure;
+int m_defragrandomfailures;
+#endif
+
+/*
+ * sysctl(8) exported objects
+ */
+SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
+ &max_linkhdr, 0, "Size of largest link layer header");
+SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
+ &max_protohdr, 0, "Size of largest protocol layer header");
+SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
+ &max_hdr, 0, "Size of largest link plus protocol header");
+SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
+ &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
+#ifdef MBUF_STRESS_TEST
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
+ &m_defragpackets, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
+ &m_defragbytes, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
+ &m_defraguseless, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
+ &m_defragfailure, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
+ &m_defragrandomfailures, 0, "");
+#endif
+#else /* VBOX */
+# include <iprt/asm.h>
+# include "slirp.h"
+# define atomic_fetchadd_int(var, val) (ASMAtomicAddU32((var), (val)))
+# define atomic_add_int(var, val) (ASMAtomicAddU32((var), (val)))
+#endif /* VBOX */
+
+/*
+ * Allocate a given length worth of mbufs and/or clusters (whatever fits
+ * best) and return a pointer to the top of the allocated chain. If an
+ * existing mbuf chain is provided, then we will append the new chain
+ * to the existing one but still return the top of the newly allocated
+ * chain.
+ */
+struct mbuf *
+#ifndef VBOX
+m_getm2(struct mbuf *m, int len, int how, short type, int flags)
+#else
+m_getm2(PNATState pData, struct mbuf *m, int len, int how, short type, int flags)
+#endif
+{
+ struct mbuf *mb, *nm = NULL, *mtail = NULL;
+
+ KASSERT(len >= 0, ("%s: len is < 0", __func__));
+
+ /* Validate flags. */
+ flags &= (M_PKTHDR | M_EOR);
+
+ /* Packet header mbuf must be first in chain. */
+ if ((flags & M_PKTHDR) && m != NULL)
+ flags &= ~M_PKTHDR;
+
+ /* Loop and append maximum sized mbufs to the chain tail. */
+ while (len > 0) {
+#ifndef VBOX
+ if (len > MCLBYTES)
+ mb = m_getjcl(how, type, (flags & M_PKTHDR),
+ MJUMPAGESIZE);
+ else if (len >= MINCLSIZE)
+ mb = m_getcl(how, type, (flags & M_PKTHDR));
+ else if (flags & M_PKTHDR)
+ mb = m_gethdr(how, type);
+ else
+ mb = m_get(how, type);
+
+ /* Fail the whole operation if one mbuf can't be allocated. */
+ if (mb == NULL) {
+ if (nm != NULL)
+ m_freem(nm);
+ return (NULL);
+ }
+#else
+ if (len > MCLBYTES)
+ mb = m_getjcl(pData, how, type, (flags & M_PKTHDR),
+ MJUMPAGESIZE);
+ else if (len >= MINCLSIZE)
+ mb = m_getcl(pData, how, type, (flags & M_PKTHDR));
+ else if (flags & M_PKTHDR)
+ mb = m_gethdr(pData, how, type);
+ else
+ mb = m_get(pData, how, type);
+ /* Fail the whole operation if one mbuf can't be allocated. */
+ if (mb == NULL) {
+ if (nm != NULL)
+ m_freem(pData, nm);
+ return (NULL);
+ }
+#endif
+
+ /* Book keeping. */
+ len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
+ ((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
+ if (mtail != NULL)
+ mtail->m_next = mb;
+ else
+ nm = mb;
+ mtail = mb;
+ flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
+ }
+ if (flags & M_EOR)
+ mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
+
+ /* If mbuf was supplied, append new chain to the end of it. */
+ if (m != NULL) {
+ for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
+ ;
+ mtail->m_next = nm;
+ mtail->m_flags &= ~M_EOR;
+ } else
+ m = nm;
+
+ return (m);
+}
+
+/*
+ * Free an entire chain of mbufs and associated external buffers, if
+ * applicable.
+ */
+void
+#ifndef VBOX
+m_freem(struct mbuf *mb)
+#else
+m_freem(PNATState pData, struct mbuf *mb)
+#endif
+{
+
+ while (mb != NULL)
+ mb = m_free(pData, mb);
+}
+
+/*-
+ * Configure a provided mbuf to refer to the provided external storage
+ * buffer and setup a reference count for said buffer. If the setting
+ * up of the reference count fails, the M_EXT bit will not be set. If
+ * successfull, the M_EXT bit is set in the mbuf's flags.
+ *
+ * Arguments:
+ * mb The existing mbuf to which to attach the provided buffer.
+ * buf The address of the provided external storage buffer.
+ * size The size of the provided buffer.
+ * freef A pointer to a routine that is responsible for freeing the
+ * provided external storage buffer.
+ * args A pointer to an argument structure (of any type) to be passed
+ * to the provided freef routine (may be NULL).
+ * flags Any other flags to be passed to the provided mbuf.
+ * type The type that the external storage buffer should be
+ * labeled with.
+ *
+ * Returns:
+ * Nothing.
+ */
+void
+#ifndef VBOX
+m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
+ void (*freef)(void *, void *), void *args, int flags, int type)
+#else
+m_extadd(PNATState pData, struct mbuf *mb, caddr_t buf, u_int size,
+ void (*freef)(void *, void *), void *args, int flags, int type)
+#endif
+{
+ KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
+
+ if (type != EXT_EXTREF)
+ mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
+ if (mb->m_ext.ref_cnt != NULL) {
+ *(mb->m_ext.ref_cnt) = 1;
+ mb->m_flags |= (M_EXT | flags);
+ mb->m_ext.ext_buf = buf;
+ mb->m_data = mb->m_ext.ext_buf;
+ mb->m_ext.ext_size = size;
+ mb->m_ext.ext_free = freef;
+ mb->m_ext.ext_args = args;
+ mb->m_ext.ext_type = type;
+ }
+}
+
+/*
+ * Non-directly-exported function to clean up after mbufs with M_EXT
+ * storage attached to them if the reference count hits 1.
+ */
+void
+#ifndef VBOX
+mb_free_ext(struct mbuf *m)
+#else
+mb_free_ext(PNATState pData, struct mbuf *m)
+#endif
+{
+ int skipmbuf;
+
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+
+
+ /*
+ * check if the header is embedded in the cluster
+ */
+ skipmbuf = (m->m_flags & M_NOFREE);
+
+ /* Free attached storage if this mbuf is the only reference to it. */
+ if (*(m->m_ext.ref_cnt) == 1 ||
+ atomic_fetchadd_int(m->m_ext.ref_cnt, (uint32_t)-1) == 1) {
+ switch (m->m_ext.ext_type) {
+ case EXT_PACKET: /* The packet zone is special. */
+ if (*(m->m_ext.ref_cnt) == 0)
+ *(m->m_ext.ref_cnt) = 1;
+ uma_zfree(zone_pack, m);
+ return; /* Job done. */
+ case EXT_CLUSTER:
+ uma_zfree(zone_clust, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBOP:
+ uma_zfree(zone_jumbop, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBO9:
+ uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBO16:
+ uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
+ break;
+ case EXT_SFBUF:
+ case EXT_NET_DRV:
+ case EXT_MOD_TYPE:
+ case EXT_DISPOSABLE:
+#ifndef VBOX
+ /* This code is dead in VBOX port of BSD mbufs (probably will be used for EXT_SBUFS some day)
+ * @todo once bsd sbufs will be on trunk think about this code.
+ */
+ *(m->m_ext.ref_cnt) = 0;
+ uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
+ m->m_ext.ref_cnt));
+#else
+ AssertMsgFailed(("unimplemented"));
+#endif
+ RT_FALL_THRU();
+ case EXT_EXTREF:
+ KASSERT(m->m_ext.ext_free != NULL,
+ ("%s: ext_free not set", __func__));
+ (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
+ m->m_ext.ext_args);
+ break;
+ default:
+ KASSERT(m->m_ext.ext_type == 0,
+ ("%s: unknown ext_type", __func__));
+ }
+ }
+ if (skipmbuf)
+ return;
+
+ /*
+ * Free this mbuf back to the mbuf zone with all m_ext
+ * information purged.
+ */
+ m->m_ext.ext_buf = NULL;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_args = NULL;
+ m->m_ext.ref_cnt = NULL;
+ m->m_ext.ext_size = 0;
+ m->m_ext.ext_type = 0;
+ m->m_flags &= ~M_EXT;
+ uma_zfree(zone_mbuf, m);
+}
+
+/*
+ * Attach the cluster from *m to *n, set up m_ext in *n
+ * and bump the refcount of the cluster.
+ */
+static void
+mb_dupcl(struct mbuf *n, struct mbuf *m)
+{
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+ KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
+
+ if (*(m->m_ext.ref_cnt) == 1)
+ *(m->m_ext.ref_cnt) += 1;
+ else
+ atomic_add_int(m->m_ext.ref_cnt, 1);
+ n->m_ext.ext_buf = m->m_ext.ext_buf;
+ n->m_ext.ext_free = m->m_ext.ext_free;
+ n->m_ext.ext_args = m->m_ext.ext_args;
+ n->m_ext.ext_size = m->m_ext.ext_size;
+ n->m_ext.ref_cnt = m->m_ext.ref_cnt;
+ n->m_ext.ext_type = m->m_ext.ext_type;
+ n->m_flags |= M_EXT;
+}
+
+/*
+ * Clean up mbuf (chain) from any tags and packet headers.
+ * If "all" is set then the first mbuf in the chain will be
+ * cleaned too.
+ */
+void
+m_demote(struct mbuf *m0, int all)
+{
+ struct mbuf *m;
+
+ for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
+ if (m->m_flags & M_PKTHDR) {
+ m_tag_delete_chain(m, NULL);
+ m->m_flags &= ~M_PKTHDR;
+ bzero(&m->m_pkthdr, sizeof(struct pkthdr));
+ }
+ if (m->m_type == MT_HEADER)
+ m->m_type = MT_DATA;
+ if (m != m0 && m->m_nextpkt != NULL)
+ m->m_nextpkt = NULL;
+ m->m_flags = m->m_flags & (M_EXT|M_EOR|M_RDONLY|M_FREELIST);
+ }
+}
+
+/*
+ * Sanity checks on mbuf (chain) for use in KASSERT() and general
+ * debugging.
+ * Returns 0 or panics when bad and 1 on all tests passed.
+ * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
+ * blow up later.
+ */
+int
+#ifndef VBOX
+m_sanity(struct mbuf *m0, int sanitize)
+#else
+m_sanity(PNATState pData, struct mbuf *m0, int sanitize)
+#endif
+{
+ struct mbuf *m;
+ caddr_t a, b;
+ int pktlen = 0;
+
+#ifdef INVARIANTS
+#define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
+#else
+#define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
+#endif
+
+ for (m = m0; m != NULL; m = m->m_next) {
+ /*
+ * Basic pointer checks. If any of these fails then some
+ * unrelated kernel memory before or after us is trashed.
+ * No way to recover from that.
+ */
+ a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
+ ((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
+ (caddr_t)(&m->m_dat)) );
+ b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
+ ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
+ if ((caddr_t)m->m_data < a)
+ M_SANITY_ACTION("m_data outside mbuf data range left");
+ if ((caddr_t)m->m_data > b)
+ M_SANITY_ACTION("m_data outside mbuf data range right");
+ if ((caddr_t)m->m_data + m->m_len > b)
+ M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
+ if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
+ if ((caddr_t)m->m_pkthdr.header < a ||
+ (caddr_t)m->m_pkthdr.header > b)
+ M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
+ }
+
+ /* m->m_nextpkt may only be set on first mbuf in chain. */
+ if (m != m0 && m->m_nextpkt != NULL) {
+ if (sanitize) {
+#ifndef VBOX
+ m_freem(m->m_nextpkt);
+#else
+ m_freem(pData, m->m_nextpkt);
+#endif
+ m->m_nextpkt = (struct mbuf *)(uintptr_t)UINT32_C(0xDEADC0DE);
+ } else
+ M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
+ }
+
+ /* packet length (not mbuf length!) calculation */
+ if (m0->m_flags & M_PKTHDR)
+ pktlen += m->m_len;
+
+ /* m_tags may only be attached to first mbuf in chain. */
+ if (m != m0 && m->m_flags & M_PKTHDR &&
+ !SLIST_EMPTY(&m->m_pkthdr.tags)) {
+ if (sanitize) {
+ m_tag_delete_chain(m, NULL);
+ /* put in 0xDEADC0DE perhaps? */
+ } else
+ M_SANITY_ACTION("m_tags on in-chain mbuf");
+ }
+
+ /* M_PKTHDR may only be set on first mbuf in chain */
+ if (m != m0 && m->m_flags & M_PKTHDR) {
+ if (sanitize) {
+ bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
+ m->m_flags &= ~M_PKTHDR;
+ /* put in 0xDEADCODE and leave hdr flag in */
+ } else
+ M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
+ }
+ }
+ m = m0;
+ if (pktlen && pktlen != m->m_pkthdr.len) {
+ if (sanitize)
+ m->m_pkthdr.len = 0;
+ else
+ M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
+ }
+ return 1;
+
+#undef M_SANITY_ACTION
+}
+
+
+/*
+ * "Move" mbuf pkthdr from "from" to "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ */
+void
+m_move_pkthdr(struct mbuf *to, struct mbuf *from)
+{
+
+#if 0
+ /* see below for why these are not enabled */
+ M_ASSERTPKTHDR(to);
+ /* Note: with MAC, this may not be a good assertion. */
+ KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
+ ("m_move_pkthdr: to has tags"));
+#endif
+#ifdef MAC
+ /*
+ * XXXMAC: It could be this should also occur for non-MAC?
+ */
+ if (to->m_flags & M_PKTHDR)
+ m_tag_delete_chain(to, NULL);
+#endif
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr; /* especially tags */
+ SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
+ from->m_flags &= ~M_PKTHDR;
+}
+
+/*
+ * Duplicate "from"'s mbuf pkthdr in "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ * In particular, this does a deep copy of the packet tags.
+ */
+int
+m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
+{
+
+#if 0
+ /*
+ * The mbuf allocator only initializes the pkthdr
+ * when the mbuf is allocated with MGETHDR. Many users
+ * (e.g. m_copy*, m_prepend) use MGET and then
+ * smash the pkthdr as needed causing these
+ * assertions to trip. For now just disable them.
+ */
+ M_ASSERTPKTHDR(to);
+ /* Note: with MAC, this may not be a good assertion. */
+ KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
+#endif
+ MBUF_CHECKSLEEP(how);
+#ifdef MAC
+ if (to->m_flags & M_PKTHDR)
+ m_tag_delete_chain(to, NULL);
+#endif
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr;
+ SLIST_INIT(&to->m_pkthdr.tags);
+ return (m_tag_copy_chain(to, from, MBTOM(how)));
+}
+
+/*
+ * Lesser-used path for M_PREPEND:
+ * allocate new mbuf to prepend to chain,
+ * copy junk along.
+ */
+struct mbuf *
+#ifndef VBOX
+m_prepend(struct mbuf *m, int len, int how)
+#else
+m_prepend(PNATState pData, struct mbuf *m, int len, int how)
+#endif
+{
+ struct mbuf *mn;
+
+ if (m->m_flags & M_PKTHDR)
+ MGETHDR(mn, how, m->m_type);
+ else
+ MGET(mn, how, m->m_type);
+ if (mn == NULL) {
+#ifndef VBOX
+ m_freem(m);
+#else
+ m_freem(pData, m);
+#endif
+ return (NULL);
+ }
+ if (m->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(mn, m);
+ mn->m_next = m;
+ m = mn;
+ if(m->m_flags & M_PKTHDR) {
+ if (len < MHLEN)
+ MH_ALIGN(m, len);
+ } else {
+ if (len < MLEN)
+ M_ALIGN(m, len);
+ }
+ m->m_len = len;
+ return (m);
+}
+
+/*
+ * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
+ * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
+ * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
+ * Note that the copy is read-only, because clusters are not copied,
+ * only their reference counts are incremented.
+ */
+struct mbuf *
+#ifndef VBOX
+m_copym(struct mbuf *m, int off0, int len, int fWait)
+#else
+m_copym(PNATState pData, struct mbuf *m, int off0, int len, int fWait)
+#endif
+{
+ struct mbuf *n, **np;
+ int off = off0;
+ struct mbuf *top;
+ int copyhdr = 0;
+
+ KASSERT(off >= 0, ("m_copym, negative off %d", off));
+ KASSERT(len >= 0, ("m_copym, negative len %d", len));
+ MBUF_CHECKSLEEP(fWait);
+ if (off == 0 && m->m_flags & M_PKTHDR)
+ copyhdr = 1;
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ np = &top;
+ top = 0;
+ while (len > 0) {
+ if (m == NULL) {
+ KASSERT(len == M_COPYALL,
+ ("m_copym, length > size of mbuf chain"));
+ break;
+ }
+ if (copyhdr)
+ MGETHDR(n, fWait, m->m_type);
+ else
+ MGET(n, fWait, m->m_type);
+ *np = n;
+ if (n == NULL)
+ goto nospace;
+ if (copyhdr) {
+ if (!m_dup_pkthdr(n, m, fWait))
+ goto nospace;
+ if (len == M_COPYALL)
+ n->m_pkthdr.len -= off0;
+ else
+ n->m_pkthdr.len = len;
+ copyhdr = 0;
+ }
+ n->m_len = min(len, m->m_len - off);
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data + off;
+ mb_dupcl(n, m);
+ } else
+ bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
+ (u_int)n->m_len);
+ if (len != M_COPYALL)
+ len -= n->m_len;
+ off = 0;
+ m = m->m_next;
+ np = &n->m_next;
+ }
+ if (top == NULL)
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+
+ return (top);
+nospace:
+#ifndef VBOX
+ m_freem(top);
+#else
+ m_freem(pData, top);
+#endif
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Returns mbuf chain with new head for the prepending case.
+ * Copies from mbuf (chain) n from off for len to mbuf (chain) m
+ * either prepending or appending the data.
+ * The resulting mbuf (chain) m is fully writeable.
+ * m is destination (is made writeable)
+ * n is source, off is offset in source, len is len from offset
+ * dir, 0 append, 1 prepend
+ * how, wait or nowait
+ */
+
+static int
+m_bcopyxxx(void *s, void *t, u_int len)
+{
+ bcopy(s, t, (size_t)len);
+ return 0;
+}
+
+struct mbuf *
+#ifndef VBOX
+m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
+ int prep, int how)
+#else
+m_copymdata(PNATState pData, struct mbuf *m, struct mbuf *n, int off, int len,
+ int prep, int how)
+#endif
+{
+ struct mbuf *mm, *x, *z, *prev = NULL;
+ caddr_t p;
+ int i, nlen = 0;
+ caddr_t buf[MLEN];
+
+ KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
+ KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
+ KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
+ KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
+
+ mm = m;
+ if (!prep) {
+ while(mm->m_next) {
+ prev = mm;
+ mm = mm->m_next;
+ }
+ }
+ for (z = n; z != NULL; z = z->m_next)
+ nlen += z->m_len;
+ if (len == M_COPYALL)
+ len = nlen - off;
+ if (off + len > nlen || len < 1)
+ return NULL;
+
+ if (!M_WRITABLE(mm)) {
+ /* XXX: Use proper m_xxx function instead. */
+#ifndef VBOX
+ x = m_getcl(how, MT_DATA, mm->m_flags);
+#else
+ x = m_getcl(pData, how, MT_DATA, mm->m_flags);
+#endif
+ if (x == NULL)
+ return NULL;
+ bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
+ p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
+ x->m_data = p;
+ mm->m_next = NULL;
+ if (mm != m)
+ prev->m_next = x;
+#ifndef VBOX
+ m_free(mm);
+#else
+ m_free(pData, mm);
+#endif
+ mm = x;
+ }
+
+ /*
+ * Append/prepend the data. Allocating mbufs as necessary.
+ */
+ /* Shortcut if enough free space in first/last mbuf. */
+ if (!prep && M_TRAILINGSPACE(mm) >= len) {
+ m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
+ mm->m_len);
+ mm->m_len += len;
+ mm->m_pkthdr.len += len;
+ return m;
+ }
+ if (prep && M_LEADINGSPACE(mm) >= len) {
+ mm->m_data = mtod(mm, caddr_t) - len;
+ m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
+ mm->m_len += len;
+ mm->m_pkthdr.len += len;
+ return mm;
+ }
+
+ /* Expand first/last mbuf to cluster if possible. */
+ if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
+ bcopy(mm->m_data, &buf, mm->m_len);
+#ifndef VBOX
+ m_clget(mm, how);
+#else
+ m_clget(pData, mm, how);
+#endif
+ if (!(mm->m_flags & M_EXT))
+ return NULL;
+ bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
+ mm->m_data = mm->m_ext.ext_buf;
+ mm->m_pkthdr.header = NULL;
+ }
+ if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
+ bcopy(mm->m_data, &buf, mm->m_len);
+#ifndef VBOX
+ m_clget(mm, how);
+#else
+ m_clget(pData, mm, how);
+#endif
+ if (!(mm->m_flags & M_EXT))
+ return NULL;
+ bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
+ mm->m_ext.ext_size - mm->m_len, mm->m_len);
+ mm->m_data = (caddr_t)mm->m_ext.ext_buf +
+ mm->m_ext.ext_size - mm->m_len;
+ mm->m_pkthdr.header = NULL;
+ }
+
+ /* Append/prepend as many mbuf (clusters) as necessary to fit len. */
+ if (!prep && len > M_TRAILINGSPACE(mm)) {
+ if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
+ return NULL;
+ }
+ if (prep && len > M_LEADINGSPACE(mm)) {
+ if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
+ return NULL;
+ i = 0;
+ for (x = z; x != NULL; x = x->m_next) {
+ i += x->m_flags & M_EXT ? x->m_ext.ext_size :
+ (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
+ if (!x->m_next)
+ break;
+ }
+ z->m_data += i - len;
+ m_move_pkthdr(mm, z);
+ x->m_next = mm;
+ mm = z;
+ }
+
+ /* Seek to start position in source mbuf. Optimization for long chains. */
+ while (off > 0) {
+ if (off < n->m_len)
+ break;
+ off -= n->m_len;
+ n = n->m_next;
+ }
+
+ /* Copy data into target mbuf. */
+ z = mm;
+ while (len > 0) {
+ KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
+ i = M_TRAILINGSPACE(z);
+ m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
+ z->m_len += i;
+ /* fixup pkthdr.len if necessary */
+ if ((prep ? mm : m)->m_flags & M_PKTHDR)
+ (prep ? mm : m)->m_pkthdr.len += i;
+ off += i;
+ len -= i;
+ z = z->m_next;
+ }
+ return (prep ? mm : m);
+}
+
+/*
+ * Copy an entire packet, including header (which must be present).
+ * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
+ * Note that the copy is read-only, because clusters are not copied,
+ * only their reference counts are incremented.
+ * Preserve alignment of the first mbuf so if the creator has left
+ * some room at the beginning (e.g. for inserting protocol headers)
+ * the copies still have the room available.
+ */
+struct mbuf *
+#ifndef VBOX
+m_copypacket(struct mbuf *m, int how)
+#else
+m_copypacket(PNATState pData, struct mbuf *m, int how)
+#endif
+{
+ struct mbuf *top, *n, *o;
+
+ MBUF_CHECKSLEEP(how);
+ MGET(n, how, m->m_type);
+ top = n;
+ if (n == NULL)
+ goto nospace;
+
+ if (!m_dup_pkthdr(n, m, how))
+ goto nospace;
+ n->m_len = m->m_len;
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data;
+ mb_dupcl(n, m);
+ } else {
+ n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
+ bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
+ }
+
+ m = m->m_next;
+ while (m) {
+ MGET(o, how, m->m_type);
+ if (o == NULL)
+ goto nospace;
+
+ n->m_next = o;
+ n = n->m_next;
+
+ n->m_len = m->m_len;
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data;
+ mb_dupcl(n, m);
+ } else {
+ bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
+ }
+
+ m = m->m_next;
+ }
+ return top;
+nospace:
+#ifndef VBOX
+ m_freem(top);
+#else
+ m_freem(pData, top);
+#endif
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Copy data from an mbuf chain starting "off" bytes from the beginning,
+ * continuing for "len" bytes, into the indicated buffer.
+ */
+void
+m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
+{
+ u_int count;
+
+ KASSERT(off >= 0, ("m_copydata, negative off %d", off));
+ KASSERT(len >= 0, ("m_copydata, negative len %d", len));
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ while (len > 0) {
+ KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
+ count = min(m->m_len - off, len);
+ bcopy(mtod(m, caddr_t) + off, cp, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ m = m->m_next;
+ }
+}
+
+/*
+ * Copy a packet header mbuf chain into a completely new chain, including
+ * copying any mbuf clusters. Use this instead of m_copypacket() when
+ * you need a writable copy of an mbuf chain.
+ */
+struct mbuf *
+#ifndef VBOX
+m_dup(struct mbuf *m, int how)
+#else
+m_dup(PNATState pData, struct mbuf *m, int how)
+#endif
+{
+ struct mbuf **p, *top = NULL;
+ int remain, moff, nsize;
+
+ MBUF_CHECKSLEEP(how);
+ /* Sanity check */
+ if (m == NULL)
+ return (NULL);
+ M_ASSERTPKTHDR(m);
+
+ /* While there's more data, get a new mbuf, tack it on, and fill it */
+ remain = m->m_pkthdr.len;
+ moff = 0;
+ p = &top;
+ while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
+ struct mbuf *n;
+
+ /* Get the next new mbuf */
+ if (remain >= MINCLSIZE) {
+#ifndef VBOX
+ n = m_getcl(how, m->m_type, 0);
+#else
+ n = m_getcl(pData, how, m->m_type, 0);
+#endif
+ nsize = MCLBYTES;
+ } else {
+#ifndef VBOX
+ n = m_get(how, m->m_type);
+#else
+ n = m_get(pData, how, m->m_type);
+#endif
+ nsize = MLEN;
+ }
+ if (n == NULL)
+ goto nospace;
+
+ if (top == NULL) { /* First one, must be PKTHDR */
+ if (!m_dup_pkthdr(n, m, how)) {
+#ifndef VBOX
+ m_free(n);
+#else
+ m_free(pData, n);
+#endif
+ goto nospace;
+ }
+ if ((n->m_flags & M_EXT) == 0)
+ nsize = MHLEN;
+ }
+ n->m_len = 0;
+
+ /* Link it into the new chain */
+ *p = n;
+ p = &n->m_next;
+
+ /* Copy data from original mbuf(s) into new mbuf */
+ while (n->m_len < nsize && m != NULL) {
+ int chunk = min(nsize - n->m_len, m->m_len - moff);
+
+ bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
+ moff += chunk;
+ n->m_len += chunk;
+ remain -= chunk;
+ if (moff == m->m_len) {
+ m = m->m_next;
+ moff = 0;
+ }
+ }
+
+ /* Check correct total mbuf length */
+ KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
+ ("%s: bogus m_pkthdr.len", __func__));
+ }
+ return (top);
+
+nospace:
+#ifndef VBOX
+ m_freem(top);
+#else
+ m_freem(pData, top);
+#endif
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Concatenate mbuf chain n to m.
+ * Both chains must be of the same type (e.g. MT_DATA).
+ * Any m_pkthdr is not updated.
+ */
+void
+#ifndef VBOX
+m_cat(struct mbuf *m, struct mbuf *n)
+#else
+m_cat(PNATState pData, struct mbuf *m, struct mbuf *n)
+#endif
+{
+ while (m->m_next)
+ m = m->m_next;
+ while (n) {
+ if (m->m_flags & M_EXT ||
+ m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
+ /* just join the two chains */
+ m->m_next = n;
+ return;
+ }
+ /* splat the data from one into the other */
+ bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
+ (u_int)n->m_len);
+ m->m_len += n->m_len;
+#ifndef VBOX
+ n = m_free(n);
+#else
+ n = m_free(pData, n);
+#endif
+ }
+}
+
+void
+#ifndef VBOX
+m_adj(struct mbuf *mp, int req_len)
+#else
+m_adj(PNATState pData, struct mbuf *mp, int req_len)
+#endif
+{
+ int len = req_len;
+ struct mbuf *m;
+ int count;
+
+ if ((m = mp) == NULL)
+ return;
+ if (len >= 0) {
+ /*
+ * Trim from head.
+ */
+ while (m != NULL && len > 0) {
+ if (m->m_len <= len) {
+ len -= m->m_len;
+ m->m_len = 0;
+ m = m->m_next;
+ } else {
+ m->m_len -= len;
+ m->m_data += len;
+ len = 0;
+ }
+ }
+ m = mp;
+ if (mp->m_flags & M_PKTHDR)
+ m->m_pkthdr.len -= (req_len - len);
+ } else {
+ /*
+ * Trim from tail. Scan the mbuf chain,
+ * calculating its length and finding the last mbuf.
+ * If the adjustment only affects this mbuf, then just
+ * adjust and return. Otherwise, rescan and truncate
+ * after the remaining size.
+ */
+ len = -len;
+ count = 0;
+ for (;;) {
+ count += m->m_len;
+ if (m->m_next == (struct mbuf *)0)
+ break;
+ m = m->m_next;
+ }
+ if (m->m_len > len || (m->m_len == len && m == mp)) {
+ m->m_len -= len;
+ if (mp->m_flags & M_PKTHDR)
+ mp->m_pkthdr.len -= len;
+ return;
+ }
+ count -= len;
+ if (count < 0)
+ count = 0;
+ /*
+ * Correct length for chain is "count".
+ * Find the mbuf with last data, adjust its length,
+ * and toss data from remaining mbufs on chain.
+ */
+ m = mp;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len = count;
+ for (; m; m = m->m_next) {
+ if (m->m_len >= count) {
+ m->m_len = count;
+ if (m->m_next != NULL) {
+#ifndef VBOX
+ m_freem(m->m_next);
+#else
+ m_freem(pData, m->m_next);
+#endif
+ m->m_next = NULL;
+ }
+ break;
+ }
+ count -= m->m_len;
+ }
+ }
+}
+
+/*
+ * Rearange an mbuf chain so that len bytes are contiguous
+ * and in the data area of an mbuf (so that mtod and dtom
+ * will work for a structure of size len). Returns the resulting
+ * mbuf chain on success, frees it and returns null on failure.
+ * If there is room, it will add up to max_protohdr-len extra bytes to the
+ * contiguous region in an attempt to avoid being called next time.
+ */
+struct mbuf *
+#ifndef VBOX
+m_pullup(struct mbuf *n, int len)
+#else
+m_pullup(PNATState pData, struct mbuf *n, int len)
+#endif
+{
+ struct mbuf *m;
+ int count;
+ int space;
+
+ /*
+ * If first mbuf has no cluster, and has room for len bytes
+ * without shifting current data, pullup into it,
+ * otherwise allocate a new mbuf to prepend to the chain.
+ */
+ if ((n->m_flags & M_EXT) == 0 &&
+ n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
+ if (n->m_len >= len)
+ return (n);
+ m = n;
+ n = n->m_next;
+ len -= m->m_len;
+ } else {
+ if (len > MHLEN)
+ goto bad;
+ MGET(m, M_DONTWAIT, n->m_type);
+ if (m == NULL)
+ goto bad;
+ m->m_len = 0;
+ if (n->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(m, n);
+ }
+ space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
+ do {
+ count = min(min(max(len, max_protohdr), space), n->m_len);
+ bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
+ (u_int)count);
+ len -= count;
+ m->m_len += count;
+ n->m_len -= count;
+ space -= count;
+ if (n->m_len)
+ n->m_data += count;
+ else
+#ifndef VBOX
+ n = m_free(n);
+#else
+ n = m_free(pData, n);
+#endif
+ } while (len > 0 && n);
+ if (len > 0) {
+#ifndef VBOX
+ (void) m_free(m);
+#else
+ (void) m_free(pData, m);
+#endif
+ goto bad;
+ }
+ m->m_next = n;
+ return (m);
+bad:
+#ifndef VBOX
+ m_freem(n);
+#else
+ m_freem(pData, n);
+#endif
+ mbstat.m_mpfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Like m_pullup(), except a new mbuf is always allocated, and we allow
+ * the amount of empty space before the data in the new mbuf to be specified
+ * (in the event that the caller expects to prepend later).
+ */
+int MSFail;
+
+struct mbuf *
+#ifndef VBOX
+m_copyup(struct mbuf *n, int len, int dstoff)
+#else
+m_copyup(PNATState pData, struct mbuf *n, int len, int dstoff)
+#endif
+{
+ struct mbuf *m;
+ int count, space;
+
+ if (len > (int)(MHLEN - dstoff))
+ goto bad;
+ MGET(m, M_DONTWAIT, n->m_type);
+ if (m == NULL)
+ goto bad;
+ m->m_len = 0;
+ if (n->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(m, n);
+ m->m_data += dstoff;
+ space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
+ do {
+ count = min(min(max(len, max_protohdr), space), n->m_len);
+ memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
+ (unsigned)count);
+ len -= count;
+ m->m_len += count;
+ n->m_len -= count;
+ space -= count;
+ if (n->m_len)
+ n->m_data += count;
+ else
+#ifndef VBOX
+ n = m_free(n);
+#else
+ n = m_free(pData, n);
+#endif
+ } while (len > 0 && n);
+ if (len > 0) {
+#ifndef VBOX
+ (void) m_free(m);
+#else
+ (void) m_free(pData, m);
+#endif
+ goto bad;
+ }
+ m->m_next = n;
+ return (m);
+ bad:
+#ifndef VBOX
+ m_freem(n);
+#else
+ m_freem(pData, n);
+#endif
+ MSFail++;
+ return (NULL);
+}
+
+/*
+ * Partition an mbuf chain in two pieces, returning the tail --
+ * all but the first len0 bytes. In case of failure, it returns NULL and
+ * attempts to restore the chain to its original state.
+ *
+ * Note that the resulting mbufs might be read-only, because the new
+ * mbuf can end up sharing an mbuf cluster with the original mbuf if
+ * the "breaking point" happens to lie within a cluster mbuf. Use the
+ * M_WRITABLE() macro to check for this case.
+ */
+struct mbuf *
+#ifndef VBOX
+m_split(struct mbuf *m0, int len0, int fWait)
+#else
+m_split(PNATState pData, struct mbuf *m0, int len0, int fWait)
+#endif
+{
+ struct mbuf *m, *n;
+ u_int len = len0, remain;
+
+ MBUF_CHECKSLEEP(fWait);
+ for (m = m0; m && len > m->m_len; m = m->m_next)
+ len -= m->m_len;
+ if (m == NULL)
+ return (NULL);
+ remain = m->m_len - len;
+ if (m0->m_flags & M_PKTHDR) {
+ MGETHDR(n, fWait, m0->m_type);
+ if (n == NULL)
+ return (NULL);
+ n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
+ n->m_pkthdr.len = m0->m_pkthdr.len - len0;
+ m0->m_pkthdr.len = len0;
+ if (m->m_flags & M_EXT)
+ goto extpacket;
+ if (remain > MHLEN) {
+ /* m can't be the lead packet */
+ MH_ALIGN(n, 0);
+#ifndef VBOX
+ n->m_next = m_split(m, len, fWait);
+#else
+ n->m_next = m_split(pData, m, len, fWait);
+#endif
+ if (n->m_next == NULL) {
+#ifndef VBOX
+ (void) m_free(n);
+#else
+ (void) m_free(pData, n);
+#endif
+ return (NULL);
+ } else {
+ n->m_len = 0;
+ return (n);
+ }
+ } else
+ MH_ALIGN(n, remain);
+ } else if (remain == 0) {
+ n = m->m_next;
+ m->m_next = NULL;
+ return (n);
+ } else {
+ MGET(n, fWait, m->m_type);
+ if (n == NULL)
+ return (NULL);
+ M_ALIGN(n, remain);
+ }
+extpacket:
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data + len;
+ mb_dupcl(n, m);
+ } else {
+ bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
+ }
+ n->m_len = remain;
+ m->m_len = len;
+ n->m_next = m->m_next;
+ m->m_next = NULL;
+ return (n);
+}
+/*
+ * Routine to copy from device local memory into mbufs.
+ * Note that `off' argument is offset into first mbuf of target chain from
+ * which to begin copying the data to.
+ */
+#ifndef VBOX
+struct mbuf *
+m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
+ void (*copy)(char *from, caddr_t to, u_int len))
+{
+ struct mbuf *m;
+ struct mbuf *top = NULL, **mp = &top;
+ int len;
+
+ if (off < 0 || off > MHLEN)
+ return (NULL);
+
+ while (totlen > 0) {
+ if (top == NULL) { /* First one, must be PKTHDR */
+ if (totlen + off >= MINCLSIZE) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ len = MCLBYTES;
+ } else {
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ len = MHLEN;
+
+ /* Place initial small packet/header at end of mbuf */
+ if (m && totlen + off + max_linkhdr <= MLEN) {
+ m->m_data += max_linkhdr;
+ len -= max_linkhdr;
+ }
+ }
+ if (m == NULL)
+ return NULL;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = totlen;
+ } else {
+ if (totlen + off >= MINCLSIZE) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, 0);
+ len = MCLBYTES;
+ } else {
+ m = m_get(M_DONTWAIT, MT_DATA);
+ len = MLEN;
+ }
+ if (m == NULL) {
+ m_freem(top);
+ return NULL;
+ }
+ }
+ if (off) {
+ m->m_data += off;
+ len -= off;
+ off = 0;
+ }
+ m->m_len = len = min(totlen, len);
+ if (copy)
+ copy(buf, mtod(m, caddr_t), (u_int)len);
+ else
+ bcopy(buf, mtod(m, caddr_t), (u_int)len);
+ buf += len;
+ *mp = m;
+ mp = &m->m_next;
+ totlen -= len;
+ }
+ return (top);
+}
+#endif
+
+/*
+ * Copy data from a buffer back into the indicated mbuf chain,
+ * starting "off" bytes from the beginning, extending the mbuf
+ * chain if necessary.
+ */
+void
+#ifndef VBOX
+m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
+#else
+m_copyback(PNATState pData, struct mbuf *m0, int off, int len, c_caddr_t cp)
+#endif
+{
+ int mlen;
+ struct mbuf *m = m0, *n;
+ int totlen = 0;
+
+ if (m0 == NULL)
+ return;
+ while (off > (mlen = m->m_len)) {
+ off -= mlen;
+ totlen += mlen;
+ if (m->m_next == NULL) {
+#ifndef VBOX
+ n = m_get(M_DONTWAIT, m->m_type);
+#else
+ n = m_get(pData, M_DONTWAIT, m->m_type);
+#endif
+ if (n == NULL)
+ goto out;
+ bzero(mtod(n, caddr_t), MLEN);
+ n->m_len = min(MLEN, len + off);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ }
+ while (len > 0) {
+ if (m->m_next == NULL && (len > m->m_len - off)) {
+ m->m_len += min(len - (m->m_len - off),
+ M_TRAILINGSPACE(m));
+ }
+ mlen = min (m->m_len - off, len);
+ bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
+ cp += mlen;
+ len -= mlen;
+ mlen += off;
+ off = 0;
+ totlen += mlen;
+ if (len == 0)
+ break;
+ if (m->m_next == NULL) {
+#ifndef VBOX
+ n = m_get(M_DONTWAIT, m->m_type);
+#else
+ n = m_get(pData, M_DONTWAIT, m->m_type);
+#endif
+ if (n == NULL)
+ break;
+ n->m_len = min(MLEN, len);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ }
+out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
+ m->m_pkthdr.len = totlen;
+}
+
+/*
+ * Append the specified data to the indicated mbuf chain,
+ * Extend the mbuf chain if the new data does not fit in
+ * existing space.
+ *
+ * Return 1 if able to complete the job; otherwise 0.
+ */
+int
+#ifndef VBOX
+m_append(struct mbuf *m0, int len, c_caddr_t cp)
+#else
+m_append(PNATState pData, struct mbuf *m0, int len, c_caddr_t cp)
+#endif
+{
+ struct mbuf *m, *n;
+ int remainder, space;
+
+ for (m = m0; m->m_next != NULL; m = m->m_next)
+ ;
+ remainder = len;
+ space = M_TRAILINGSPACE(m);
+ if (space > 0) {
+ /*
+ * Copy into available space.
+ */
+ if (space > remainder)
+ space = remainder;
+ bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
+ m->m_len += space;
+ cp += space, remainder -= space;
+ }
+ while (remainder > 0) {
+ /*
+ * Allocate a new mbuf; could check space
+ * and allocate a cluster instead.
+ */
+#ifndef VBOX
+ n = m_get(M_DONTWAIT, m->m_type);
+#else
+ n = m_get(pData, M_DONTWAIT, m->m_type);
+#endif
+ if (n == NULL)
+ break;
+ n->m_len = min(MLEN, remainder);
+ bcopy(cp, mtod(n, caddr_t), n->m_len);
+ cp += n->m_len, remainder -= n->m_len;
+ m->m_next = n;
+ m = n;
+ }
+ if (m0->m_flags & M_PKTHDR)
+ m0->m_pkthdr.len += len - remainder;
+ return (remainder == 0);
+}
+
+/*
+ * Apply function f to the data in an mbuf chain starting "off" bytes from
+ * the beginning, continuing for "len" bytes.
+ */
+int
+m_apply(struct mbuf *m, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg)
+{
+ u_int count;
+ int rval;
+
+ KASSERT(off >= 0, ("m_apply, negative off %d", off));
+ KASSERT(len >= 0, ("m_apply, negative len %d", len));
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ while (len > 0) {
+ KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
+ count = min(m->m_len - off, len);
+ rval = (*f)(arg, mtod(m, caddr_t) + off, count);
+ if (rval)
+ return (rval);
+ len -= count;
+ off = 0;
+ m = m->m_next;
+ }
+ return (0);
+}
+
+/*
+ * Return a pointer to mbuf/offset of location in mbuf chain.
+ */
+struct mbuf *
+m_getptr(struct mbuf *m, int loc, int *off)
+{
+
+ while (loc >= 0) {
+ /* Normal end of search. */
+ if (m->m_len > loc) {
+ *off = loc;
+ return (m);
+ } else {
+ loc -= m->m_len;
+ if (m->m_next == NULL) {
+ if (loc == 0) {
+ /* Point at the end of valid data. */
+ *off = m->m_len;
+ return (m);
+ }
+ return (NULL);
+ }
+ m = m->m_next;
+ }
+ }
+ return (NULL);
+}
+
+void
+m_print(const struct mbuf *m, int maxlen)
+{
+ int len;
+ int pdata;
+ const struct mbuf *m2;
+
+ if (m->m_flags & M_PKTHDR)
+ len = m->m_pkthdr.len;
+ else
+ len = -1;
+ m2 = m;
+ while (m2 != NULL && (len == -1 || len)) {
+ pdata = m2->m_len;
+ if (maxlen != -1 && pdata > maxlen)
+ pdata = maxlen;
+ printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
+ m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
+ "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
+ "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
+ if (pdata)
+ printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
+ if (len != -1)
+ len -= m2->m_len;
+ m2 = m2->m_next;
+ }
+ if (len > 0)
+ printf("%d bytes unaccounted for.\n", len);
+ return;
+}
+
+u_int
+m_fixhdr(struct mbuf *m0)
+{
+ u_int len;
+
+ len = m_length(m0, NULL);
+ m0->m_pkthdr.len = len;
+ return (len);
+}
+
+u_int
+m_length(struct mbuf *m0, struct mbuf **last)
+{
+ struct mbuf *m;
+ u_int len;
+
+ len = 0;
+ for (m = m0; m != NULL; m = m->m_next) {
+ len += m->m_len;
+ if (m->m_next == NULL)
+ break;
+ }
+ if (last != NULL)
+ *last = m;
+ return (len);
+}
+
+/*
+ * Defragment a mbuf chain, returning the shortest possible
+ * chain of mbufs and clusters. If allocation fails and
+ * this cannot be completed, NULL will be returned, but
+ * the passed in chain will be unchanged. Upon success,
+ * the original chain will be freed, and the new chain
+ * will be returned.
+ *
+ * If a non-packet header is passed in, the original
+ * mbuf (chain?) will be returned unharmed.
+ */
+struct mbuf *
+#ifndef VBOX
+m_defrag(struct mbuf *m0, int how)
+#else
+m_defrag(PNATState pData, struct mbuf *m0, int how)
+#endif
+{
+ struct mbuf *m_new = NULL, *m_final = NULL;
+ int progress = 0, length;
+
+ MBUF_CHECKSLEEP(how);
+ if (!(m0->m_flags & M_PKTHDR))
+ return (m0);
+
+ m_fixhdr(m0); /* Needed sanity check */
+
+#ifdef MBUF_STRESS_TEST
+ if (m_defragrandomfailures) {
+ int temp = arc4random() & 0xff;
+ if (temp == 0xba)
+ goto nospace;
+ }
+#endif
+
+ if (m0->m_pkthdr.len > MHLEN)
+#ifndef VBOX
+ m_final = m_getcl(how, MT_DATA, M_PKTHDR);
+#else
+ m_final = m_getcl(pData, how, MT_DATA, M_PKTHDR);
+#endif
+ else
+#ifndef VBOX
+ m_final = m_gethdr(how, MT_DATA);
+#else
+ m_final = m_gethdr(pData, how, MT_DATA);
+#endif
+
+ if (m_final == NULL)
+ goto nospace;
+
+ if (m_dup_pkthdr(m_final, m0, how) == 0)
+ goto nospace;
+
+ m_new = m_final;
+
+ while (progress < m0->m_pkthdr.len) {
+ length = m0->m_pkthdr.len - progress;
+ if (length > MCLBYTES)
+ length = MCLBYTES;
+
+ if (m_new == NULL) {
+ if (length > MLEN)
+#ifndef VBOX
+ m_new = m_getcl(how, MT_DATA, 0);
+#else
+ m_new = m_getcl(pData, how, MT_DATA, 0);
+#endif
+ else
+#ifndef VBOX
+ m_new = m_get(how, MT_DATA);
+#else
+ m_new = m_get(pData, how, MT_DATA);
+#endif
+ if (m_new == NULL)
+ goto nospace;
+ }
+
+ m_copydata(m0, progress, length, mtod(m_new, caddr_t));
+ progress += length;
+ m_new->m_len = length;
+ if (m_new != m_final)
+#ifndef VBOX
+ m_cat(m_final, m_new);
+#else
+ m_cat(pData, m_final, m_new);
+#endif
+ m_new = NULL;
+ }
+#ifdef MBUF_STRESS_TEST
+ if (m0->m_next == NULL)
+ m_defraguseless++;
+#endif
+#ifndef VBOX
+ m_freem(m0);
+#else
+ m_freem(pData, m0);
+#endif
+ m0 = m_final;
+#ifdef MBUF_STRESS_TEST
+ m_defragpackets++;
+ m_defragbytes += m0->m_pkthdr.len;
+#endif
+ return (m0);
+nospace:
+#ifdef MBUF_STRESS_TEST
+ m_defragfailure++;
+#endif
+ if (m_final)
+#ifndef VBOX
+ m_freem(m_final);
+#else
+ m_freem(pData, m_final);
+#endif
+ return (NULL);
+}
+
+/*
+ * Defragment an mbuf chain, returning at most maxfrags separate
+ * mbufs+clusters. If this is not possible NULL is returned and
+ * the original mbuf chain is left in it's present (potentially
+ * modified) state. We use two techniques: collapsing consecutive
+ * mbufs and replacing consecutive mbufs by a cluster.
+ *
+ * NB: this should really be named m_defrag but that name is taken
+ */
+struct mbuf *
+#ifndef VBOX
+m_collapse(struct mbuf *m0, int how, int maxfrags)
+#else
+m_collapse(PNATState pData, struct mbuf *m0, int how, int maxfrags)
+#endif
+{
+ struct mbuf *m, *n, *n2, **prev;
+ u_int curfrags;
+
+ /*
+ * Calculate the current number of frags.
+ */
+ curfrags = 0;
+ for (m = m0; m != NULL; m = m->m_next)
+ curfrags++;
+ /*
+ * First, try to collapse mbufs. Note that we always collapse
+ * towards the front so we don't need to deal with moving the
+ * pkthdr. This may be suboptimal if the first mbuf has much
+ * less data than the following.
+ */
+ m = m0;
+again:
+ for (;;) {
+ n = m->m_next;
+ if (n == NULL)
+ break;
+ if ((m->m_flags & M_RDONLY) == 0 &&
+ n->m_len < M_TRAILINGSPACE(m)) {
+ bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
+ n->m_len);
+ m->m_len += n->m_len;
+ m->m_next = n->m_next;
+#ifndef VBOX
+ m_free(n);
+#else
+ m_free(pData, n);
+#endif
+ if (--curfrags <= maxfrags)
+ return m0;
+ } else
+ m = n;
+ }
+ KASSERT(maxfrags > 1,
+ ("maxfrags %u, but normal collapse failed", maxfrags));
+ /*
+ * Collapse consecutive mbufs to a cluster.
+ */
+ prev = &m0->m_next; /* NB: not the first mbuf */
+ while ((n = *prev) != NULL) {
+ if ((n2 = n->m_next) != NULL &&
+ n->m_len + n2->m_len < MCLBYTES) {
+#ifndef VBOX
+ m = m_getcl(how, MT_DATA, 0);
+#else
+ m = m_getcl(pData, how, MT_DATA, 0);
+#endif
+ if (m == NULL)
+ goto bad;
+ bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
+ bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
+ n2->m_len);
+ m->m_len = n->m_len + n2->m_len;
+ m->m_next = n2->m_next;
+ *prev = m;
+#ifndef VBOX
+ m_free(n);
+ m_free(n2);
+#else
+ m_free(pData, n);
+ m_free(pData, n2);
+#endif
+ if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
+ return m0;
+ /*
+ * Still not there, try the normal collapse
+ * again before we allocate another cluster.
+ */
+ goto again;
+ }
+ prev = &n->m_next;
+ }
+ /*
+ * No place where we can collapse to a cluster; punt.
+ * This can occur if, for example, you request 2 frags
+ * but the packet requires that both be clusters (we
+ * never reallocate the first mbuf to avoid moving the
+ * packet header).
+ */
+bad:
+ return NULL;
+}
+
+#ifdef MBUF_STRESS_TEST
+
+/*
+ * Fragment an mbuf chain. There's no reason you'd ever want to do
+ * this in normal usage, but it's great for stress testing various
+ * mbuf consumers.
+ *
+ * If fragmentation is not possible, the original chain will be
+ * returned.
+ *
+ * Possible length values:
+ * 0 no fragmentation will occur
+ * > 0 each fragment will be of the specified length
+ * -1 each fragment will be the same random value in length
+ * -2 each fragment's length will be entirely random
+ * (Random values range from 1 to 256)
+ */
+struct mbuf *
+m_fragment(struct mbuf *m0, int how, int length)
+{
+ struct mbuf *m_new = NULL, *m_final = NULL;
+ int progress = 0;
+
+ if (!(m0->m_flags & M_PKTHDR))
+ return (m0);
+
+ if ((length == 0) || (length < -2))
+ return (m0);
+
+ m_fixhdr(m0); /* Needed sanity check */
+
+ m_final = m_getcl(how, MT_DATA, M_PKTHDR);
+
+ if (m_final == NULL)
+ goto nospace;
+
+ if (m_dup_pkthdr(m_final, m0, how) == 0)
+ goto nospace;
+
+ m_new = m_final;
+
+ if (length == -1)
+ length = 1 + (arc4random() & 255);
+
+ while (progress < m0->m_pkthdr.len) {
+ int fraglen;
+
+ if (length > 0)
+ fraglen = length;
+ else
+ fraglen = 1 + (arc4random() & 255);
+ if (fraglen > m0->m_pkthdr.len - progress)
+ fraglen = m0->m_pkthdr.len - progress;
+
+ if (fraglen > MCLBYTES)
+ fraglen = MCLBYTES;
+
+ if (m_new == NULL) {
+ m_new = m_getcl(how, MT_DATA, 0);
+ if (m_new == NULL)
+ goto nospace;
+ }
+
+ m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
+ progress += fraglen;
+ m_new->m_len = fraglen;
+ if (m_new != m_final)
+ m_cat(m_final, m_new);
+ m_new = NULL;
+ }
+ m_freem(m0);
+ m0 = m_final;
+ return (m0);
+nospace:
+ if (m_final)
+ m_freem(m_final);
+ /* Return the original chain on failure */
+ return (m0);
+}
+
+#endif
+
+/*
+ * Copy the contents of uio into a properly sized mbuf chain.
+ */
+#ifndef VBOX
+struct mbuf *
+m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
+{
+ struct mbuf *m, *mb;
+ int error, length, total;
+ int progress = 0;
+
+ /*
+ * len can be zero or an arbitrary large value bound by
+ * the total data supplied by the uio.
+ */
+ if (len > 0)
+ total = min(uio->uio_resid, len);
+ else
+ total = uio->uio_resid;
+
+ /*
+ * The smallest unit returned by m_getm2() is a single mbuf
+ * with pkthdr. We can't align past it.
+ */
+ if (align >= MHLEN)
+ return (NULL);
+
+ /*
+ * Give us the full allocation or nothing.
+ * If len is zero return the smallest empty mbuf.
+ */
+ m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
+ if (m == NULL)
+ return (NULL);
+ m->m_data += align;
+
+ /* Fill all mbufs with uio data and update header information. */
+ for (mb = m; mb != NULL; mb = mb->m_next) {
+ length = min(M_TRAILINGSPACE(mb), total - progress);
+
+ error = uiomove(mtod(mb, void *), length, uio);
+ if (error) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ mb->m_len = length;
+ progress += length;
+ if (flags & M_PKTHDR)
+ m->m_pkthdr.len += length;
+ }
+ KASSERT(progress == total, ("%s: progress != total", __func__));
+
+ return (m);
+}
+#endif
+
+/*
+ * Set the m_data pointer of a newly-allocated mbuf
+ * to place an object of the specified size at the
+ * end of the mbuf, longword aligned.
+ */
+void
+m_align(struct mbuf *m, int len)
+{
+ int adjust;
+
+ if (m->m_flags & M_EXT)
+ adjust = m->m_ext.ext_size - len;
+ else if (m->m_flags & M_PKTHDR)
+ adjust = MHLEN - len;
+ else
+ adjust = MLEN - len;
+ m->m_data += adjust &~ (sizeof(long)-1);
+}
+
+/*
+ * Create a writable copy of the mbuf chain. While doing this
+ * we compact the chain with a goal of producing a chain with
+ * at most two mbufs. The second mbuf in this chain is likely
+ * to be a cluster. The primary purpose of this work is to create
+ * a writable packet for encryption, compression, etc. The
+ * secondary goal is to linearize the data so the data can be
+ * passed to crypto hardware in the most efficient manner possible.
+ */
+struct mbuf *
+#ifndef VBOX
+m_unshare(struct mbuf *m0, int how)
+#else
+m_unshare(PNATState pData, struct mbuf *m0, int how)
+#endif
+{
+ struct mbuf *m, *mprev;
+ struct mbuf *n, *mfirst, *mlast;
+ int len, off;
+
+ mprev = NULL;
+ for (m = m0; m != NULL; m = mprev->m_next) {
+ /*
+ * Regular mbufs are ignored unless there's a cluster
+ * in front of it that we can use to coalesce. We do
+ * the latter mainly so later clusters can be coalesced
+ * also w/o having to handle them specially (i.e. convert
+ * mbuf+cluster -> cluster). This optimization is heavily
+ * influenced by the assumption that we're running over
+ * Ethernet where MCLBYTES is large enough that the max
+ * packet size will permit lots of coalescing into a
+ * single cluster. This in turn permits efficient
+ * crypto operations, especially when using hardware.
+ */
+ if ((m->m_flags & M_EXT) == 0) {
+ if (mprev && (mprev->m_flags & M_EXT) &&
+ m->m_len <= M_TRAILINGSPACE(mprev)) {
+ /* XXX: this ignores mbuf types */
+ memcpy(mtod(mprev, caddr_t) + mprev->m_len,
+ mtod(m, caddr_t), m->m_len);
+ mprev->m_len += m->m_len;
+ mprev->m_next = m->m_next; /* unlink from chain */
+#ifndef VBOX
+ m_free(m); /* reclaim mbuf */
+#else
+ m_free(pData, m); /* reclaim mbuf */
+#endif
+#if 0
+ newipsecstat.ips_mbcoalesced++;
+#endif
+ } else {
+ mprev = m;
+ }
+ continue;
+ }
+ /*
+ * Writable mbufs are left alone (for now).
+ */
+ if (M_WRITABLE(m)) {
+ mprev = m;
+ continue;
+ }
+
+ /*
+ * Not writable, replace with a copy or coalesce with
+ * the previous mbuf if possible (since we have to copy
+ * it anyway, we try to reduce the number of mbufs and
+ * clusters so that future work is easier).
+ */
+ KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
+ /* NB: we only coalesce into a cluster or larger */
+ if (mprev != NULL && (mprev->m_flags & M_EXT) &&
+ m->m_len <= M_TRAILINGSPACE(mprev)) {
+ /* XXX: this ignores mbuf types */
+ memcpy(mtod(mprev, caddr_t) + mprev->m_len,
+ mtod(m, caddr_t), m->m_len);
+ mprev->m_len += m->m_len;
+ mprev->m_next = m->m_next; /* unlink from chain */
+#ifndef VBOX
+ m_free(m); /* reclaim mbuf */
+#else
+ m_free(pData, m); /* reclaim mbuf */
+#endif
+#if 0
+ newipsecstat.ips_clcoalesced++;
+#endif
+ continue;
+ }
+
+ /*
+ * Allocate new space to hold the copy...
+ */
+ /* XXX why can M_PKTHDR be set past the first mbuf? */
+ if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
+ /*
+ * NB: if a packet header is present we must
+ * allocate the mbuf separately from any cluster
+ * because M_MOVE_PKTHDR will smash the data
+ * pointer and drop the M_EXT marker.
+ */
+ MGETHDR(n, how, m->m_type);
+ if (n == NULL) {
+#ifndef VBOX
+ m_freem(m0);
+#else
+ m_freem(pData, m0);
+#endif
+ return (NULL);
+ }
+ M_MOVE_PKTHDR(n, m);
+ MCLGET(n, how);
+ if ((n->m_flags & M_EXT) == 0) {
+#ifndef VBOX
+ m_free(n);
+ m_freem(m0);
+#else
+ m_free(pData, n);
+ m_freem(pData, m0);
+#endif
+ return (NULL);
+ }
+ } else {
+#ifndef VBOX
+ n = m_getcl(how, m->m_type, m->m_flags);
+#else
+ n = m_getcl(pData, how, m->m_type, m->m_flags);
+#endif
+ if (n == NULL) {
+#ifndef VBOX
+ m_freem(m0);
+#else
+ m_freem(pData, m0);
+#endif
+ return (NULL);
+ }
+ }
+ /*
+ * ... and copy the data. We deal with jumbo mbufs
+ * (i.e. m_len > MCLBYTES) by splitting them into
+ * clusters. We could just malloc a buffer and make
+ * it external but too many device drivers don't know
+ * how to break up the non-contiguous memory when
+ * doing DMA.
+ */
+ len = m->m_len;
+ off = 0;
+ mfirst = n;
+ mlast = NULL;
+ for (;;) {
+ int cc = min(len, MCLBYTES);
+ memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
+ n->m_len = cc;
+ if (mlast != NULL)
+ mlast->m_next = n;
+ mlast = n;
+#if 0
+ newipsecstat.ips_clcopied++;
+#endif
+
+ len -= cc;
+ if (len <= 0)
+ break;
+ off += cc;
+
+#ifndef VBOX
+ n = m_getcl(how, m->m_type, m->m_flags);
+#else
+ n = m_getcl(pData, how, m->m_type, m->m_flags);
+#endif
+ if (n == NULL) {
+#ifndef VBOX
+ m_freem(mfirst);
+ m_freem(m0);
+#else
+ m_freem(pData, mfirst);
+ m_freem(pData, m0);
+#endif
+ return (NULL);
+ }
+ }
+ n->m_next = m->m_next;
+ if (mprev == NULL)
+ m0 = mfirst; /* new head of chain */
+ else
+ mprev->m_next = mfirst; /* replace old mbuf */
+#ifndef VBOX
+ m_free(m); /* release old mbuf */
+#else
+ m_free(pData, m); /* release old mbuf */
+#endif
+ mprev = mfirst;
+ }
+ return (m0);
+}
diff --git a/src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf2.c b/src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf2.c
new file mode 100644
index 00000000..5f3abde8
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/kern/uipc_mbuf2.c
@@ -0,0 +1,539 @@
+/* $KAME: uipc_mbuf2.c,v 1.31 2001/11/28 11:08:53 itojun Exp $ */
+/* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */
+
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
+ */
+#ifndef VBOX
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/kern/uipc_mbuf2.c,v 1.33.8.1 2009/04/15 03:14:26 kensmith Exp $");
+
+/*#define PULLDOWN_DEBUG*/
+
+#include "opt_mac.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+
+#include <security/mac/mac_framework.h>
+
+static MALLOC_DEFINE(M_PACKET_TAGS, MBUF_TAG_MEM_NAME,
+ "packet-attached information");
+#else
+# include "slirp.h"
+#endif
+
+/* can't call it m_dup(), as freebsd[34] uses m_dup() with different arg */
+#ifndef VBOX
+static struct mbuf *m_dup1(struct mbuf *, int, int, int);
+#else
+static struct mbuf *m_dup1(PNATState, struct mbuf *, int, int, int);
+#endif
+
+/*
+ * ensure that [off, off + len) is contiguous on the mbuf chain "m".
+ * packet chain before "off" is kept untouched.
+ * if offp == NULL, the target will start at <retval, 0> on resulting chain.
+ * if offp != NULL, the target will start at <retval, *offp> on resulting chain.
+ *
+ * on error return (NULL return value), original "m" will be freed.
+ *
+ * XXX: M_TRAILINGSPACE/M_LEADINGSPACE only permitted on writable ext_buf.
+ */
+struct mbuf *
+#ifndef VBOX
+m_pulldown(struct mbuf *m, int off, int len, int *offp)
+#else
+m_pulldown(PNATState pData, struct mbuf *m, int off, int len, int *offp)
+#endif
+{
+ struct mbuf *n, *o;
+ int hlen, tlen, olen;
+ int writable;
+
+ /* check invalid arguments. */
+ if (m == NULL)
+ panic("m == NULL in m_pulldown()");
+ if (len > MCLBYTES) {
+#ifndef VBOX
+ m_freem(m);
+#else
+ m_freem(pData, m);
+#endif
+ return NULL; /* impossible */
+ }
+
+#ifdef PULLDOWN_DEBUG
+ {
+ struct mbuf *t;
+ printf("before:");
+ for (t = m; t; t = t->m_next)
+ printf(" %d", t->m_len);
+ printf("\n");
+ }
+#endif
+ n = m;
+ while (n != NULL && off > 0) {
+ if (n->m_len > off)
+ break;
+ off -= n->m_len;
+ n = n->m_next;
+ }
+ /* be sure to point non-empty mbuf */
+ while (n != NULL && n->m_len == 0)
+ n = n->m_next;
+ if (!n) {
+#ifndef VBOX
+ m_freem(m);
+#else
+ m_freem(pData, m);
+#endif
+ return NULL; /* mbuf chain too short */
+ }
+
+ /*
+ * XXX: This code is flawed because it considers a "writable" mbuf
+ * data region to require all of the following:
+ * (i) mbuf _has_ to have M_EXT set; if it is just a regular
+ * mbuf, it is still not considered "writable."
+ * (ii) since mbuf has M_EXT, the ext_type _has_ to be
+ * EXT_CLUSTER. Anything else makes it non-writable.
+ * (iii) M_WRITABLE() must evaluate true.
+ * Ideally, the requirement should only be (iii).
+ *
+ * If we're writable, we're sure we're writable, because the ref. count
+ * cannot increase from 1, as that would require posession of mbuf
+ * n by someone else (which is impossible). However, if we're _not_
+ * writable, we may eventually become writable )if the ref. count drops
+ * to 1), but we'll fail to notice it unless we re-evaluate
+ * M_WRITABLE(). For now, we only evaluate once at the beginning and
+ * live with this.
+ */
+ /*
+ * XXX: This is dumb. If we're just a regular mbuf with no M_EXT,
+ * then we're not "writable," according to this code.
+ */
+ writable = 0;
+ if ((n->m_flags & M_EXT) == 0 ||
+ (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
+ writable = 1;
+
+ /*
+ * the target data is on <n, off>.
+ * if we got enough data on the mbuf "n", we're done.
+ */
+ if ((off == 0 || offp) && len <= n->m_len - off && writable)
+ goto ok;
+
+ /*
+ * when len <= n->m_len - off and off != 0, it is a special case.
+ * len bytes from <n, off> sits in single mbuf, but the caller does
+ * not like the starting position (off).
+ * chop the current mbuf into two pieces, set off to 0.
+ */
+ if (len <= n->m_len - off) {
+#ifndef VBOX
+ o = m_dup1(n, off, n->m_len - off, M_DONTWAIT);
+#else
+ o = m_dup1(pData, n, off, n->m_len - off, M_DONTWAIT);
+#endif
+ if (o == NULL) {
+#ifndef VBOX
+ m_freem(m);
+#else
+ m_freem(pData, m);
+#endif
+ return NULL; /* ENOBUFS */
+ }
+ n->m_len = off;
+ o->m_next = n->m_next;
+ n->m_next = o;
+ n = n->m_next;
+ off = 0;
+ goto ok;
+ }
+
+ /*
+ * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
+ * and construct contiguous mbuf with m_len == len.
+ * note that hlen + tlen == len, and tlen > 0.
+ */
+ hlen = n->m_len - off;
+ tlen = len - hlen;
+
+ /*
+ * ensure that we have enough trailing data on mbuf chain.
+ * if not, we can do nothing about the chain.
+ */
+ olen = 0;
+ for (o = n->m_next; o != NULL; o = o->m_next)
+ olen += o->m_len;
+ if (hlen + olen < len) {
+#ifndef VBOX
+ m_freem(m);
+#else
+ m_freem(pData, m);
+#endif
+ return NULL; /* mbuf chain too short */
+ }
+
+ /*
+ * easy cases first.
+ * we need to use m_copydata() to get data from <n->m_next, 0>.
+ */
+ if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
+ && writable) {
+ m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
+ n->m_len += tlen;
+#ifndef VBOX
+ m_adj(n->m_next, tlen);
+#else
+ m_adj(pData, n->m_next, tlen);
+#endif
+ goto ok;
+ }
+ if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
+ && writable) {
+ n->m_next->m_data -= hlen;
+ n->m_next->m_len += hlen;
+ bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
+ n->m_len -= hlen;
+ n = n->m_next;
+ off = 0;
+ goto ok;
+ }
+
+ /*
+ * now, we need to do the hard way. don't m_copy as there's no room
+ * on both end.
+ */
+ if (len > MLEN)
+#ifndef VBOX
+ o = m_getcl(M_DONTWAIT, m->m_type, 0);
+#else
+ o = m_getcl(pData, M_DONTWAIT, m->m_type, 0);
+#endif
+ else
+#ifndef VBOX
+ o = m_get(M_DONTWAIT, m->m_type);
+#else
+ o = m_get(pData, M_DONTWAIT, m->m_type);
+#endif
+ if (!o) {
+#ifndef VBOX
+ m_freem(m);
+#else
+ m_freem(pData, m);
+#endif
+ return NULL; /* ENOBUFS */
+ }
+ /* get hlen from <n, off> into <o, 0> */
+ o->m_len = hlen;
+ bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
+ n->m_len -= hlen;
+ /* get tlen from <n->m_next, 0> into <o, hlen> */
+ m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
+ o->m_len += tlen;
+#ifndef VBOX
+ m_adj(n->m_next, tlen);
+#else
+ m_adj(pData, n->m_next, tlen);
+#endif
+ o->m_next = n->m_next;
+ n->m_next = o;
+ n = o;
+ off = 0;
+
+ok:
+#ifdef PULLDOWN_DEBUG
+ {
+ struct mbuf *t;
+ printf("after:");
+ for (t = m; t; t = t->m_next)
+ printf("%c%d", t == n ? '*' : ' ', t->m_len);
+ printf(" (off=%d)\n", off);
+ }
+#endif
+ if (offp)
+ *offp = off;
+ return n;
+}
+
+static struct mbuf *
+#ifndef VBOX
+m_dup1(struct mbuf *m, int off, int len, int fWait)
+#else
+m_dup1(PNATState pData, struct mbuf *m, int off, int len, int fWait)
+#endif
+{
+ struct mbuf *n;
+ int copyhdr;
+
+ if (len > MCLBYTES)
+ return NULL;
+ if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
+ copyhdr = 1;
+ else
+ copyhdr = 0;
+ if (len >= MINCLSIZE) {
+ if (copyhdr == 1)
+#ifndef VBOX
+ n = m_getcl(fWait, m->m_type, M_PKTHDR);
+#else
+ n = m_getcl(pData, fWait, m->m_type, M_PKTHDR);
+#endif
+ else
+#ifndef VBOX
+ n = m_getcl(fWait, m->m_type, 0);
+#else
+ n = m_getcl(pData, fWait, m->m_type, 0);
+#endif
+ } else {
+ if (copyhdr == 1)
+#ifndef VBOX
+ n = m_gethdr(fWait, m->m_type);
+#else
+ n = m_gethdr(pData, fWait, m->m_type);
+#endif
+ else
+#ifndef VBOX
+ n = m_get(fWait, m->m_type);
+#else
+ n = m_get(pData, fWait, m->m_type);
+#endif
+ }
+ if (!n)
+ return NULL; /* ENOBUFS */
+
+ if (copyhdr && !m_dup_pkthdr(n, m, fWait)) {
+#ifndef VBOX
+ m_free(n);
+#else
+ m_free(pData, n);
+#endif
+ return NULL;
+ }
+ m_copydata(m, off, len, mtod(n, caddr_t));
+ n->m_len = len;
+ return n;
+}
+
+/* Free a packet tag. */
+void
+m_tag_free_default(struct m_tag *t)
+{
+#ifdef MAC
+ if (t->m_tag_id == PACKET_TAG_MACLABEL)
+ mac_destroy_mbuf_tag(t);
+#endif
+#ifndef VBOX
+ free(t, M_PACKET_TAGS);
+#else
+ RTMemFree(t);
+#endif
+}
+
+/* Get a packet tag structure along with specified data following. */
+struct m_tag *
+m_tag_alloc(u_int32_t cookie, int type, int len, int fWait)
+{
+ struct m_tag *t;
+
+ MBUF_CHECKSLEEP(fWait);
+ if (len < 0)
+ return NULL;
+#ifndef VBOX
+ t = malloc(len + sizeof(struct m_tag), M_PACKET_TAGS, fWait);
+#else
+ NOREF(fWait);
+ t = RTMemAllocZ(len + sizeof(struct m_tag));
+#endif
+ if (t == NULL)
+ return NULL;
+ m_tag_setup(t, cookie, type, len);
+ t->m_tag_free = m_tag_free_default;
+ return t;
+}
+
+/* Unlink and free a packet tag. */
+void
+m_tag_delete(struct mbuf *m, struct m_tag *t)
+{
+
+ KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", m, t));
+ m_tag_unlink(m, t);
+ m_tag_free(t);
+}
+
+/* Unlink and free a packet tag chain, starting from given tag. */
+void
+m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
+{
+ struct m_tag *p, *q;
+
+ KASSERT(m, ("m_tag_delete_chain: null mbuf"));
+ if (t != NULL)
+ p = t;
+ else
+ p = SLIST_FIRST(&m->m_pkthdr.tags);
+ if (p == NULL)
+ return;
+ while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
+ m_tag_delete(m, q);
+ m_tag_delete(m, p);
+}
+
+/*
+ * Strip off all tags that would normally vanish when
+ * passing through a network interface. Only persistent
+ * tags will exist after this; these are expected to remain
+ * so long as the mbuf chain exists, regardless of the
+ * path the mbufs take.
+ */
+void
+m_tag_delete_nonpersistent(struct mbuf *m)
+{
+ struct m_tag *p, *q;
+
+ SLIST_FOREACH_SAFE(p, &m->m_pkthdr.tags, m_tag_link, q)
+ if ((p->m_tag_id & MTAG_PERSISTENT) == 0)
+ m_tag_delete(m, p);
+}
+
+/* Find a tag, starting from a given position. */
+struct m_tag *
+m_tag_locate(struct mbuf *m, u_int32_t cookie, int type, struct m_tag *t)
+{
+ struct m_tag *p;
+
+ KASSERT(m, ("m_tag_locate: null mbuf"));
+ if (t == NULL)
+ p = SLIST_FIRST(&m->m_pkthdr.tags);
+ else
+ p = SLIST_NEXT(t, m_tag_link);
+ while (p != NULL) {
+ if (p->m_tag_cookie == cookie && p->m_tag_id == type)
+ return p;
+ p = SLIST_NEXT(p, m_tag_link);
+ }
+ return NULL;
+}
+
+/* Copy a single tag. */
+struct m_tag *
+m_tag_copy(struct m_tag *t, int how)
+{
+ struct m_tag *p;
+
+ MBUF_CHECKSLEEP(how);
+ KASSERT(t, ("m_tag_copy: null tag"));
+ p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
+ if (p == NULL)
+ return (NULL);
+#ifdef MAC
+ /*
+ * XXXMAC: we should probably pass off the initialization, and
+ * copying here? can we hide that PACKET_TAG_MACLABEL is
+ * special from the mbuf code?
+ */
+ if (t->m_tag_id == PACKET_TAG_MACLABEL) {
+ if (mac_init_mbuf_tag(p, how) != 0) {
+ m_tag_free(p);
+ return (NULL);
+ }
+ mac_copy_mbuf_tag(t, p);
+ } else
+#endif
+ bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
+ return p;
+}
+
+/*
+ * Copy two tag chains. The destination mbuf (to) loses any attached
+ * tags even if the operation fails. This should not be a problem, as
+ * m_tag_copy_chain() is typically called with a newly-allocated
+ * destination mbuf.
+ */
+int
+m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
+{
+ struct m_tag *p, *t, *tprev = NULL;
+
+ MBUF_CHECKSLEEP(how);
+ KASSERT(to && from,
+ ("m_tag_copy_chain: null argument, to %p from %p", to, from));
+ m_tag_delete_chain(to, NULL);
+ SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
+ t = m_tag_copy(p, how);
+ if (t == NULL) {
+ m_tag_delete_chain(to, NULL);
+ return 0;
+ }
+ if (tprev == NULL)
+ SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
+ else
+ SLIST_INSERT_AFTER(tprev, t, m_tag_link);
+ tprev = t;
+ }
+ return 1;
+}
diff --git a/src/VBox/Devices/Network/slirp/bsd/sys/mbuf.h b/src/VBox/Devices/Network/slirp/bsd/sys/mbuf.h
new file mode 100644
index 00000000..909f300b
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/sys/mbuf.h
@@ -0,0 +1,1177 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
+ * $FreeBSD: src/sys/sys/mbuf.h,v 1.217.2.3.4.1 2009/04/15 03:14:26 kensmith Exp $
+ */
+
+#ifndef _SYS_MBUF_H_
+#define _SYS_MBUF_H_
+
+#ifndef VBOX
+/* XXX: These includes suck. Sorry! */
+#include <sys/queue.h>
+#ifdef _KERNEL
+#include <sys/systm.h>
+#include <vm/uma.h>
+#ifdef WITNESS
+#include <sys/lock.h>
+#endif
+#endif
+#else /* VBOX */
+# include <VBox/param.h>
+# include "misc.h"
+# include "ext.h"
+
+typedef const char *c_caddr_t;
+
+DECL_NO_RETURN(static void) panic (char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ vbox_slirp_printV(fmt, args);
+ va_end(args);
+ AssertFatalFailed();
+}
+/* for non-gnu compilers */
+# define __func__ RT_GCC_EXTENSION __FUNCTION__
+# ifndef __inline
+# ifdef __GNUC__
+# define __inline __inline__
+# else
+# define __inline
+# endif
+# endif
+
+# undef bzero
+# define bzero(a1, len) memset((a1), 0, (len))
+
+/* (vvl) some definitions from sys/param.h */
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than HOST_PAGE_SIZE.
+ */
+# ifndef MSIZE
+# define MSIZE 256 /* size of an mbuf */
+# endif /* MSIZE */
+
+# ifndef MCLSHIFT
+# define MCLSHIFT 11 /* convert bytes to mbuf clusters */
+# endif /* MCLSHIFT */
+
+# ifndef MCLBYTES
+# define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */
+# endif /*MCLBYTES*/
+
+# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
+# define MJUMPAGESIZE HOST_PAGE_SIZE /* jumbo cluster 4k */
+# else
+# define MJUMPAGESIZE (4 * 1024) /* jumbo cluster 4k */
+# endif
+# define MJUM9BYTES (9 * 1024) /* jumbo cluster 9k */
+# define MJUM16BYTES (16 * 1024) /* jumbo cluster 16k */
+#endif /* VBOX */
+
+/*
+ * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
+ * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
+ * sys/param.h), which has no additional overhead and is used instead of the
+ * internal data area; this is done when at least MINCLSIZE of data must be
+ * stored. Additionally, it is possible to allocate a separate buffer
+ * externally and attach it to the mbuf in a way similar to that of mbuf
+ * clusters.
+ */
+#define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
+#define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
+#define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
+#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
+
+#if defined(_KERNEL) || defined(VBOX)
+/*-
+ * Macros for type conversion:
+ * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
+ * dtom(x) -- Convert data pointer within mbuf to mbuf pointer (XXX).
+ */
+#define mtod(m, t) ((t)((m)->m_data))
+#define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
+
+/*
+ * Argument structure passed to UMA routines during mbuf and packet
+ * allocations.
+ */
+struct mb_args {
+ int flags; /* Flags for mbuf being allocated */
+ short type; /* Type of mbuf being allocated */
+};
+#endif /* _KERNEL */
+
+#if defined(__LP64__)
+#define M_HDR_PAD 6
+#else
+#define M_HDR_PAD 2
+#endif
+
+/*
+ * Header present at the beginning of every mbuf.
+ */
+struct m_hdr {
+ struct mbuf *mh_next; /* next buffer in chain */
+ struct mbuf *mh_nextpkt; /* next chain in queue/record */
+ caddr_t mh_data; /* location of data */
+ int mh_len; /* amount of data in this mbuf */
+ int mh_flags; /* flags; see below */
+ short mh_type; /* type of data in this mbuf */
+#ifdef VBOX
+ struct socket *mh_so; /*socket assotiated with mbuf*/
+ TAILQ_ENTRY(mbuf) mh_ifq;
+#endif
+ uint8_t pad[M_HDR_PAD];/* word align */
+};
+
+/*
+ * Packet tag structure (see below for details).
+ */
+struct m_tag {
+ SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
+ u_int16_t m_tag_id; /* Tag ID */
+ u_int16_t m_tag_len; /* Length of data */
+ u_int32_t m_tag_cookie; /* ABI/Module ID */
+ void (*m_tag_free)(struct m_tag *);
+};
+
+/*
+ * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
+ */
+struct pkthdr {
+ struct ifnet *rcvif; /* rcv interface */
+ /* variables for ip and tcp reassembly */
+ void *header; /* pointer to packet header */
+ int len; /* total packet length */
+ /* variables for hardware checksum */
+ int csum_flags; /* flags regarding checksum */
+ int csum_data; /* data field used by csum routines */
+ u_int16_t tso_segsz; /* TSO segment size */
+ u_int16_t ether_vtag; /* Ethernet 802.1p+q vlan tag */
+ SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
+};
+
+/*
+ * Description of external storage mapped into mbuf; valid only if M_EXT is
+ * set.
+ */
+struct m_ext {
+ caddr_t ext_buf; /* start of buffer */
+ void (*ext_free) /* free routine if not the usual */
+ (void *, void *);
+ void *ext_args; /* optional argument pointer */
+ u_int ext_size; /* size of buffer, for ext_free */
+#ifdef VBOX
+ volatile uint32_t *ref_cnt; /* pointer to ref count info */
+#else
+ volatile u_int *ref_cnt; /* pointer to ref count info */
+#endif
+ int ext_type; /* type of external storage */
+};
+
+/*
+ * The core of the mbuf object along with some shortcut defines for practical
+ * purposes.
+ */
+struct mbuf {
+ struct m_hdr m_hdr;
+ union {
+ struct {
+ struct pkthdr MH_pkthdr; /* M_PKTHDR set */
+ union {
+ struct m_ext MH_ext; /* M_EXT set */
+ char MH_databuf[MHLEN];
+ } MH_dat;
+ } MH;
+ char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
+ } M_dat;
+};
+#define m_next m_hdr.mh_next
+#define m_len m_hdr.mh_len
+#define m_data m_hdr.mh_data
+#define m_type m_hdr.mh_type
+#define m_flags m_hdr.mh_flags
+#define m_nextpkt m_hdr.mh_nextpkt
+#define m_act m_nextpkt
+#define m_pkthdr M_dat.MH.MH_pkthdr
+#define m_ext M_dat.MH.MH_dat.MH_ext
+#define m_pktdat M_dat.MH.MH_dat.MH_databuf
+#define m_dat M_dat.M_databuf
+#ifdef VBOX
+# define m_so m_hdr.mh_so
+# define ifq_so m_hdr.mh_so
+# define m_ifq m_hdr.mh_ifq
+#endif
+
+/*
+ * mbuf flags.
+ */
+#define M_EXT 0x00000001 /* has associated external storage */
+#define M_PKTHDR 0x00000002 /* start of record */
+#define M_EOR 0x00000004 /* end of record */
+#define M_RDONLY 0x00000008 /* associated data is marked read-only */
+#define M_PROTO1 0x00000010 /* protocol-specific */
+#define M_PROTO2 0x00000020 /* protocol-specific */
+#define M_PROTO3 0x00000040 /* protocol-specific */
+#define M_PROTO4 0x00000080 /* protocol-specific */
+#define M_PROTO5 0x00000100 /* protocol-specific */
+#define M_BCAST 0x00000200 /* send/received as link-level broadcast */
+#define M_MCAST 0x00000400 /* send/received as link-level multicast */
+#define M_FRAG 0x00000800 /* packet is a fragment of a larger packet */
+#define M_FIRSTFRAG 0x00001000 /* packet is first fragment */
+#define M_LASTFRAG 0x00002000 /* packet is last fragment */
+#define M_SKIP_FIREWALL 0x00004000 /* skip firewall processing */
+#define M_FREELIST 0x00008000 /* mbuf is on the free list */
+#define M_VLANTAG 0x00010000 /* ether_vtag is valid */
+#define M_PROMISC 0x00020000 /* packet was not for us */
+#define M_NOFREE 0x00040000 /* do not free mbuf, embedded in cluster */
+#define M_PROTO6 0x00080000 /* protocol-specific */
+#define M_PROTO7 0x00100000 /* protocol-specific */
+#define M_PROTO8 0x00200000 /* protocol-specific */
+/*
+ * For RELENG_{6,7} steal these flags for limited multiple routing table
+ * support. In RELENG_8 and beyond, use just one flag and a tag.
+ */
+#define M_FIB 0xF0000000 /* steal some bits to store fib number. */
+
+#define M_NOTIFICATION M_PROTO5 /* SCTP notification */
+
+/*
+ * Flags to purge when crossing layers.
+ */
+#define M_PROTOFLAGS \
+ (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8)
+
+/*
+ * Flags preserved when copying m_pkthdr.
+ */
+#define M_COPYFLAGS \
+ (M_PKTHDR|M_EOR|M_RDONLY|M_PROTOFLAGS|M_SKIP_FIREWALL|M_BCAST|M_MCAST|\
+ M_FRAG|M_FIRSTFRAG|M_LASTFRAG|M_VLANTAG|M_PROMISC|M_FIB)
+
+/*
+ * External buffer types: identify ext_buf type.
+ */
+#define EXT_CLUSTER 1 /* mbuf cluster */
+#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
+#define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */
+#define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */
+#define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
+#define EXT_PACKET 6 /* mbuf+cluster from packet zone */
+#define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */
+#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
+#define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
+#define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */
+#define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */
+
+/*
+ * Flags indicating hw checksum support and sw checksum requirements. This
+ * field can be directly tested against if_data.ifi_hwassist.
+ */
+#define CSUM_IP 0x0001 /* will csum IP */
+#define CSUM_TCP 0x0002 /* will csum TCP */
+#define CSUM_UDP 0x0004 /* will csum UDP */
+#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
+#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
+#define CSUM_TSO 0x0020 /* will do TSO */
+
+#define CSUM_IP_CHECKED 0x0100 /* did csum IP */
+#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
+#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
+#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
+
+#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
+#define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
+
+/*
+ * mbuf types.
+ */
+#define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */
+#define MT_DATA 1 /* dynamic (data) allocation */
+#define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */
+#define MT_SONAME 8 /* socket name */
+#define MT_CONTROL 14 /* extra-data protocol message */
+#define MT_OOBDATA 15 /* expedited data */
+#define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */
+
+#define MT_NOINIT 255 /* Not a type but a flag to allocate
+ a non-initialized mbuf */
+
+#define MB_NOTAGS 0x1UL /* no tags attached to mbuf */
+
+/*
+ * General mbuf allocator statistics structure.
+ *
+ * Many of these statistics are no longer used; we instead track many
+ * allocator statistics through UMA's built in statistics mechanism.
+ */
+struct mbstat {
+ u_long m_mbufs; /* XXX */
+ u_long m_mclusts; /* XXX */
+
+ u_long m_drain; /* times drained protocols for space */
+ u_long m_mcfail; /* XXX: times m_copym failed */
+ u_long m_mpfail; /* XXX: times m_pullup failed */
+ u_long m_msize; /* length of an mbuf */
+ u_long m_mclbytes; /* length of an mbuf cluster */
+ u_long m_minclsize; /* min length of data to allocate a cluster */
+ u_long m_mlen; /* length of data in an mbuf */
+ u_long m_mhlen; /* length of data in a header mbuf */
+
+ /* Number of mbtypes (gives # elems in mbtypes[] array) */
+ short m_numtypes;
+
+ /* XXX: Sendfile stats should eventually move to their own struct */
+ u_long sf_iocnt; /* times sendfile had to do disk I/O */
+ u_long sf_allocfail; /* times sfbuf allocation failed */
+ u_long sf_allocwait; /* times sfbuf allocation had to wait */
+};
+
+/*
+ * Flags specifying how an allocation should be made.
+ *
+ * The flag to use is as follows:
+ * - M_DONTWAIT or M_NOWAIT from an interrupt handler to not block allocation.
+ * - M_WAIT or M_WAITOK or M_TRYWAIT from wherever it is safe to block.
+ *
+ * M_DONTWAIT/M_NOWAIT means that we will not block the thread explicitly and
+ * if we cannot allocate immediately we may return NULL, whereas
+ * M_WAIT/M_WAITOK/M_TRYWAIT means that if we cannot allocate resources we
+ * will block until they are available, and thus never return NULL.
+ *
+ * XXX Eventually just phase this out to use M_WAITOK/M_NOWAIT.
+ */
+#define MBTOM(how) (how)
+#ifndef VBOX
+#define M_DONTWAIT M_NOWAIT
+#define M_TRYWAIT M_WAITOK
+#define M_WAIT M_WAITOK
+#else
+/* @todo (r=vvl) not sure we can do it in NAT */
+# define M_WAITOK 0
+# define M_NOWAIT 0
+# define M_DONTWAIT 0
+# define M_TRYWAI 0
+# define M_WAIT 0
+#endif
+
+/*
+ * String names of mbuf-related UMA(9) and malloc(9) types. Exposed to
+ * !_KERNEL so that monitoring tools can look up the zones with
+ * libmemstat(3).
+ */
+#define MBUF_MEM_NAME "mbuf"
+#define MBUF_CLUSTER_MEM_NAME "mbuf_cluster"
+#define MBUF_PACKET_MEM_NAME "mbuf_packet"
+#define MBUF_JUMBOP_MEM_NAME "mbuf_jumbo_pagesize"
+#define MBUF_JUMBO9_MEM_NAME "mbuf_jumbo_9k"
+#define MBUF_JUMBO16_MEM_NAME "mbuf_jumbo_16k"
+#define MBUF_TAG_MEM_NAME "mbuf_tag"
+#define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
+
+#if defined(_KERNEL) || defined(VBOX)
+
+#ifdef WITNESS
+#define MBUF_CHECKSLEEP(how) do { \
+ if (how == M_WAITOK) \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \
+ "Sleeping in \"%s\"", __func__); \
+} while (0)
+#else
+#define MBUF_CHECKSLEEP(how)
+#endif
+
+/*
+ * Network buffer allocation API
+ *
+ * The rest of it is defined in kern/kern_mbuf.c
+ */
+
+#ifndef VBOX
+extern uma_zone_t zone_mbuf;
+extern uma_zone_t zone_clust;
+extern uma_zone_t zone_pack;
+extern uma_zone_t zone_jumbop;
+extern uma_zone_t zone_jumbo9;
+extern uma_zone_t zone_jumbo16;
+extern uma_zone_t zone_ext_refcnt;
+#endif
+
+#ifndef VBOX
+static __inline struct mbuf *m_getcl(int how, short type, int flags);
+static __inline struct mbuf *m_get(int how, short type);
+static __inline struct mbuf *m_gethdr(int how, short type);
+static __inline struct mbuf *m_getjcl(int how, short type, int flags,
+ int size);
+static __inline struct mbuf *m_getclr(int how, short type); /* XXX */
+static __inline struct mbuf *m_free(struct mbuf *m);
+static __inline void m_clget(struct mbuf *m, int how);
+static __inline void *m_cljget(struct mbuf *m, int how, int size);
+void mb_free_ext(struct mbuf *);
+#else
+static __inline struct mbuf *m_getcl(PNATState pData, int how, short type, int flags);
+static __inline struct mbuf *m_get(PNATState pData, int how, short type);
+static __inline struct mbuf *m_gethdr(PNATState pData, int how, short type);
+static __inline struct mbuf *m_getjcl(PNATState pData, int how,
+ short type, int flags, int size);
+static __inline struct mbuf *m_getclr(PNATState pData, int how, short type); /* XXX */
+static __inline struct mbuf *m_free(PNATState pData, struct mbuf *m);
+static __inline void m_clget(PNATState pData, struct mbuf *m, int how);
+static __inline void *m_cljget(PNATState pData, struct mbuf *m, int how, int size);
+void mb_free_ext(PNATState, struct mbuf *);
+#endif
+static __inline void m_chtype(struct mbuf *m, short new_type);
+static __inline struct mbuf *m_last(struct mbuf *m);
+
+static __inline int
+m_gettype(int size)
+{
+ int type;
+
+ switch (size) {
+ case MSIZE:
+ type = EXT_MBUF;
+ break;
+ case MCLBYTES:
+ type = EXT_CLUSTER;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case MJUMPAGESIZE:
+ type = EXT_JUMBOP;
+ break;
+#endif
+ case MJUM9BYTES:
+ type = EXT_JUMBO9;
+ break;
+ case MJUM16BYTES:
+ type = EXT_JUMBO16;
+ break;
+ default:
+ panic("%s: m_getjcl: invalid cluster size", __func__);
+ }
+
+ return (type);
+}
+
+static __inline uma_zone_t
+#ifndef VBOX
+m_getzone(int size)
+#else
+m_getzone(PNATState pData, int size)
+#endif
+{
+ uma_zone_t zone;
+
+ switch (size) {
+ case MSIZE:
+ zone = zone_mbuf;
+ break;
+ case MCLBYTES:
+ zone = zone_clust;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case MJUMPAGESIZE:
+ zone = zone_jumbop;
+ break;
+#endif
+ case MJUM9BYTES:
+ zone = zone_jumbo9;
+ break;
+ case MJUM16BYTES:
+ zone = zone_jumbo16;
+ break;
+ default:
+ panic("%s: m_getjcl: invalid cluster type", __func__);
+ }
+
+ return (zone);
+}
+
+static __inline struct mbuf *
+#ifndef VBOX
+m_get(int how, short type)
+#else
+m_get(PNATState pData, int how, short type)
+#endif
+{
+ struct mb_args args;
+
+ args.flags = 0;
+ args.type = type;
+ return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
+}
+
+/*
+ * XXX This should be deprecated, very little use.
+ */
+static __inline struct mbuf *
+#ifndef VBOX
+m_getclr(int how, short type)
+#else
+m_getclr(PNATState pData, int how, short type)
+#endif
+{
+ struct mbuf *m;
+ struct mb_args args;
+
+ args.flags = 0;
+ args.type = type;
+ m = uma_zalloc_arg(zone_mbuf, &args, how);
+ if (m != NULL)
+ bzero(m->m_data, MLEN);
+ return (m);
+}
+
+static __inline struct mbuf *
+#ifndef VBOX
+m_gethdr(int how, short type)
+#else
+m_gethdr(PNATState pData, int how, short type)
+#endif
+{
+ struct mb_args args;
+
+ args.flags = M_PKTHDR;
+ args.type = type;
+ return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
+}
+
+static __inline struct mbuf *
+#ifndef VBOX
+m_getcl(int how, short type, int flags)
+#else
+m_getcl(PNATState pData, int how, short type, int flags)
+#endif
+{
+ struct mb_args args;
+
+ args.flags = flags;
+ args.type = type;
+ return ((struct mbuf *)(uma_zalloc_arg(zone_pack, &args, how)));
+}
+
+/*
+ * m_getjcl() returns an mbuf with a cluster of the specified size attached.
+ * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
+ *
+ * XXX: This is rather large, should be real function maybe.
+ */
+static __inline struct mbuf *
+#ifndef VBOX
+m_getjcl(int how, short type, int flags, int size)
+#else
+m_getjcl(PNATState pData, int how, short type, int flags, int size)
+#endif
+{
+ struct mb_args args;
+ struct mbuf *m, *n;
+ uma_zone_t zone;
+
+ args.flags = flags;
+ args.type = type;
+
+ m = uma_zalloc_arg(zone_mbuf, &args, how);
+ if (m == NULL)
+ return (NULL);
+
+#ifndef VBOX
+ zone = m_getzone(size);
+#else
+ zone = m_getzone(pData, size);
+#endif
+ n = uma_zalloc_arg(zone, m, how);
+ if (n == NULL) {
+ uma_zfree(zone_mbuf, m);
+ return (NULL);
+ }
+ return (m);
+}
+
+#ifndef VBOX
+static __inline void
+m_free_fast(struct mbuf *m)
+{
+ KASSERT(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
+
+ uma_zfree_arg(zone_mbuf, m, (void *)MB_NOTAGS);
+}
+#else
+static __inline void
+m_free_fast(PNATState pData, struct mbuf *m)
+{
+ AssertMsg(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
+
+ uma_zfree_arg(zone_mbuf, m, (void *)(uintptr_t)MB_NOTAGS);
+}
+#endif
+
+static __inline struct mbuf *
+#ifndef VBOX
+m_free(struct mbuf *m)
+#else
+m_free(PNATState pData, struct mbuf *m)
+#endif
+{
+ struct mbuf *n = m->m_next;
+
+ if (m->m_flags & M_EXT)
+#ifndef VBOX
+ mb_free_ext(m);
+#else
+ mb_free_ext(pData, m);
+#endif
+ else if ((m->m_flags & M_NOFREE) == 0)
+ uma_zfree(zone_mbuf, m);
+ return (n);
+}
+
+static __inline void
+#ifndef VBOX
+m_clget(struct mbuf *m, int how)
+#else
+m_clget(PNATState pData, struct mbuf *m, int how)
+#endif
+{
+
+ if (m->m_flags & M_EXT)
+ printf("%s: %p mbuf already has cluster\n", __func__, m);
+ m->m_ext.ext_buf = (char *)NULL;
+ uma_zalloc_arg(zone_clust, m, how);
+ /*
+ * On a cluster allocation failure, drain the packet zone and retry,
+ * we might be able to loosen a few clusters up on the drain.
+ */
+ if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
+ zone_drain(zone_pack);
+ uma_zalloc_arg(zone_clust, m, how);
+ }
+}
+
+/*
+ * m_cljget() is different from m_clget() as it can allocate clusters without
+ * attaching them to an mbuf. In that case the return value is the pointer
+ * to the cluster of the requested size. If an mbuf was specified, it gets
+ * the cluster attached to it and the return value can be safely ignored.
+ * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
+ */
+static __inline void *
+#ifndef VBOX
+m_cljget(struct mbuf *m, int how, int size)
+#else
+m_cljget(PNATState pData, struct mbuf *m, int how, int size)
+#endif
+{
+ uma_zone_t zone;
+
+ if (m && m->m_flags & M_EXT)
+ printf("%s: %p mbuf already has cluster\n", __func__, m);
+ if (m != NULL)
+ m->m_ext.ext_buf = NULL;
+
+#ifndef VBOX
+ zone = m_getzone(size);
+#else
+ zone = m_getzone(pData, size);
+#endif
+ return (uma_zalloc_arg(zone, m, how));
+}
+
+static __inline void
+#ifndef VBOX
+m_cljset(struct mbuf *m, void *cl, int type)
+#else
+m_cljset(PNATState pData, struct mbuf *m, void *cl, int type)
+#endif
+{
+ uma_zone_t zone;
+ int size;
+
+ switch (type) {
+ case EXT_CLUSTER:
+ size = MCLBYTES;
+ zone = zone_clust;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case EXT_JUMBOP:
+ size = MJUMPAGESIZE;
+ zone = zone_jumbop;
+ break;
+#endif
+ case EXT_JUMBO9:
+ size = MJUM9BYTES;
+ zone = zone_jumbo9;
+ break;
+ case EXT_JUMBO16:
+ size = MJUM16BYTES;
+ zone = zone_jumbo16;
+ break;
+ default:
+ panic("unknown cluster type");
+ break;
+ }
+
+ m->m_data = m->m_ext.ext_buf = cl;
+#ifdef VBOX
+ m->m_ext.ext_free = (void (*)(void *, void *))0;
+ m->m_ext.ext_args = NULL;
+#else
+ m->m_ext.ext_free = m->m_ext.ext_args = NULL;
+#endif
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_type = type;
+ m->m_ext.ref_cnt = uma_find_refcnt(zone, cl);
+ m->m_flags |= M_EXT;
+
+}
+
+static __inline void
+m_chtype(struct mbuf *m, short new_type)
+{
+
+ m->m_type = new_type;
+}
+
+static __inline struct mbuf *
+m_last(struct mbuf *m)
+{
+
+ while (m->m_next)
+ m = m->m_next;
+ return (m);
+}
+
+/*
+ * mbuf, cluster, and external object allocation macros (for compatibility
+ * purposes).
+ */
+#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
+#ifndef VBOX
+#define MGET(m, how, type) ((m) = m_get((how), (type)))
+#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type)))
+#define MCLGET(m, how) m_clget((m), (how))
+#define MEXTADD(m, buf, size, free, args, flags, type) \
+ m_extadd((m), (caddr_t)(buf), (size), (free), (args), (flags), (type))
+#define m_getm(m, len, how, type) \
+ m_getm2((m), (len), (how), (type), M_PKTHDR)
+#else /*!VBOX*/
+#define MGET(m, how, type) ((m) = m_get(pData, (how), (type)))
+#define MGETHDR(m, how, type) ((m) = m_gethdr(pData, (how), (type)))
+#define MCLGET(m, how) m_clget(pData, (m), (how))
+#define MEXTADD(m, buf, size, free, args, flags, type) \
+ m_extadd(pData, (m), (caddr_t)(buf), (size), (free), (args), (flags), (type))
+#define m_getm(m, len, how, type) \
+ m_getm2(pData, (m), (len), (how), (type), M_PKTHDR)
+#endif
+
+/*
+ * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
+ * be both the local data payload, or an external buffer area, depending on
+ * whether M_EXT is set).
+ */
+#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \
+ (!(((m)->m_flags & M_EXT)) || \
+ (*((m)->m_ext.ref_cnt) == 1)) ) \
+
+/* Check if the supplied mbuf has a packet header, or else panic. */
+#define M_ASSERTPKTHDR(m) \
+ KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
+ ("%s: no mbuf packet header!", __func__))
+
+/*
+ * Ensure that the supplied mbuf is a valid, non-free mbuf.
+ *
+ * XXX: Broken at the moment. Need some UMA magic to make it work again.
+ */
+#define M_ASSERTVALID(m) \
+ KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \
+ ("%s: attempted use of a free mbuf!", __func__))
+
+/*
+ * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place an
+ * object of the specified size at the end of the mbuf, longword aligned.
+ */
+#define M_ALIGN(m, len) do { \
+ KASSERT(!((m)->m_flags & (M_PKTHDR|M_EXT)), \
+ ("%s: M_ALIGN not normal mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_dat, \
+ ("%s: M_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+/*
+ * As above, for mbufs allocated with m_gethdr/MGETHDR or initialized by
+ * M_DUP/MOVE_PKTHDR.
+ */
+#define MH_ALIGN(m, len) do { \
+ KASSERT((m)->m_flags & M_PKTHDR && !((m)->m_flags & M_EXT), \
+ ("%s: MH_ALIGN not PKTHDR mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_pktdat, \
+ ("%s: MH_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+/*
+ * Compute the amount of space available before the current start of data in
+ * an mbuf.
+ *
+ * The M_WRITABLE() is a temporary, conservative safety measure: the burden
+ * of checking writability of the mbuf data area rests solely with the caller.
+ */
+#define M_LEADINGSPACE(m) \
+ ((m)->m_flags & M_EXT ? \
+ (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
+ (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
+ (m)->m_data - (m)->m_dat)
+
+/*
+ * Compute the amount of space available after the end of data in an mbuf.
+ *
+ * The M_WRITABLE() is a temporary, conservative safety measure: the burden
+ * of checking writability of the mbuf data area rests solely with the caller.
+ */
+#define M_TRAILINGSPACE(m) \
+ ((m)->m_flags & M_EXT ? \
+ (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
+ - ((m)->m_data + (m)->m_len) : 0) : \
+ &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
+
+/*
+ * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be
+ * allocated, how specifies whether to wait. If the allocation fails, the
+ * original mbuf chain is freed and m is set to NULL.
+ */
+#define M_PREPEND(m, plen, how) do { \
+ struct mbuf **_mmp = &(m); \
+ struct mbuf *_mm = *_mmp; \
+ int _mplen = (plen); \
+ int __mhow = (how); \
+ \
+ MBUF_CHECKSLEEP(how); \
+ if (M_LEADINGSPACE(_mm) >= _mplen) { \
+ _mm->m_data -= _mplen; \
+ _mm->m_len += _mplen; \
+ } else \
+ _mm = m_prepend(_mm, _mplen, __mhow); \
+ if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
+ _mm->m_pkthdr.len += _mplen; \
+ *_mmp = _mm; \
+} while (0)
+
+/*
+ * Change mbuf to new type. This is a relatively expensive operation and
+ * should be avoided.
+ */
+#define MCHTYPE(m, t) m_chtype((m), (t))
+
+/* Length to m_copy to copy all. */
+#define M_COPYALL 1000000000
+
+/* Compatibility with 4.3. */
+#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
+
+extern int max_datalen; /* MHLEN - max_hdr */
+extern int max_hdr; /* Largest link + protocol header */
+extern int max_linkhdr; /* Largest link-level header */
+extern int max_protohdr; /* Largest protocol header */
+extern struct mbstat mbstat; /* General mbuf stats/infos */
+extern int nmbclusters; /* Maximum number of clusters */
+
+struct uio;
+
+void m_align(struct mbuf *, int);
+int m_apply(struct mbuf *, int, int,
+ int (*)(void *, void *, u_int), void *);
+#ifndef VBOX
+void m_adj(struct mbuf *, int);
+int m_append(struct mbuf *, int, c_caddr_t);
+struct mbuf *m_defrag(struct mbuf *, int);
+struct mbuf *m_dup(struct mbuf *, int);
+void m_cat(struct mbuf *, struct mbuf *);
+struct mbuf *m_collapse(struct mbuf *, int, int);
+void m_copyback(struct mbuf *, int, int, c_caddr_t);
+struct mbuf *m_copym(struct mbuf *, int, int, int);
+struct mbuf *m_copymdata(struct mbuf *, struct mbuf *,
+ int, int, int, int);
+struct mbuf *m_copypacket(struct mbuf *, int);
+struct mbuf *m_copyup(struct mbuf *n, int len, int dstoff);
+void m_extadd(struct mbuf *, caddr_t, u_int,
+ void (*)(void *, void *), void *, int, int);
+#else
+void m_adj(PNATState, struct mbuf *, int);
+int m_append(PNATState pData, struct mbuf *, int, c_caddr_t);
+struct mbuf *m_defrag(PNATState, struct mbuf *, int);
+struct mbuf *m_dup(PNATState, struct mbuf *, int);
+void m_cat(PNATState, struct mbuf *, struct mbuf *);
+struct mbuf *m_collapse(PNATState, struct mbuf *, int, int);
+void m_copyback(PNATState, struct mbuf *, int, int, c_caddr_t);
+struct mbuf *m_copym(PNATState, struct mbuf *, int, int, int);
+struct mbuf *m_copymdata(PNATState, struct mbuf *, struct mbuf *,
+ int, int, int, int);
+struct mbuf *m_copypacket(PNATState, struct mbuf *, int);
+struct mbuf *m_copyup(PNATState, struct mbuf *n, int len, int dstoff);
+void m_extadd(PNATState pData, struct mbuf *, caddr_t, u_int,
+ void (*)(void *, void *), void *, int, int);
+#endif
+void m_copydata(const struct mbuf *, int, int, caddr_t);
+void m_copy_pkthdr(struct mbuf *, struct mbuf *);
+void m_demote(struct mbuf *, int);
+struct mbuf *m_devget(char *, int, int, struct ifnet *,
+ void (*)(char *, caddr_t, u_int));
+int m_dup_pkthdr(struct mbuf *, struct mbuf *, int);
+u_int m_fixhdr(struct mbuf *);
+struct mbuf *m_fragment(struct mbuf *, int, int);
+#ifndef VBOX
+void m_freem(struct mbuf *);
+struct mbuf *m_getm2(struct mbuf *, int, int, short, int);
+struct mbuf *m_prepend(struct mbuf *, int, int);
+struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
+struct mbuf *m_pullup(struct mbuf *, int);
+int m_sanity(struct mbuf *, int);
+struct mbuf *m_split(struct mbuf *, int, int);
+struct mbuf *m_unshare(struct mbuf *, int how);
+#else
+void m_freem(PNATState pData, struct mbuf *);
+struct mbuf *m_getm2(PNATState pData, struct mbuf *, int, int, short, int);
+struct mbuf *m_prepend(PNATState, struct mbuf *, int, int);
+struct mbuf *m_pulldown(PNATState, struct mbuf *, int, int, int *);
+struct mbuf *m_pullup(PNATState, struct mbuf *, int);
+int m_sanity(PNATState, struct mbuf *, int);
+struct mbuf *m_split(PNATState, struct mbuf *, int, int);
+struct mbuf *m_unshare(PNATState, struct mbuf *, int how);
+#endif
+struct mbuf *m_getptr(struct mbuf *, int, int *);
+u_int m_length(struct mbuf *, struct mbuf **);
+void m_move_pkthdr(struct mbuf *, struct mbuf *);
+void m_print(const struct mbuf *, int);
+struct mbuf *m_uiotombuf(struct uio *, int, int, int, int);
+
+/*-
+ * Network packets may have annotations attached by affixing a list of
+ * "packet tags" to the pkthdr structure. Packet tags are dynamically
+ * allocated semi-opaque data structures that have a fixed header
+ * (struct m_tag) that specifies the size of the memory block and a
+ * <cookie,type> pair that identifies it. The cookie is a 32-bit unique
+ * unsigned value used to identify a module or ABI. By convention this value
+ * is chosen as the date+time that the module is created, expressed as the
+ * number of seconds since the epoch (e.g., using date -u +'%s'). The type
+ * value is an ABI/module-specific value that identifies a particular
+ * annotation and is private to the module. For compatibility with systems
+ * like OpenBSD that define packet tags w/o an ABI/module cookie, the value
+ * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find
+ * compatibility shim functions and several tag types are defined below.
+ * Users that do not require compatibility should use a private cookie value
+ * so that packet tag-related definitions can be maintained privately.
+ *
+ * Note that the packet tag returned by m_tag_alloc has the default memory
+ * alignment implemented by malloc. To reference private data one can use a
+ * construct like:
+ *
+ * struct m_tag *mtag = m_tag_alloc(...);
+ * struct foo *p = (struct foo *)(mtag+1);
+ *
+ * if the alignment of struct m_tag is sufficient for referencing members of
+ * struct foo. Otherwise it is necessary to embed struct m_tag within the
+ * private data structure to insure proper alignment; e.g.,
+ *
+ * struct foo {
+ * struct m_tag tag;
+ * ...
+ * };
+ * struct foo *p = (struct foo *) m_tag_alloc(...);
+ * struct m_tag *mtag = &p->tag;
+ */
+
+/*
+ * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise
+ * tags are expected to ``vanish'' when they pass through a network
+ * interface. For most interfaces this happens normally as the tags are
+ * reclaimed when the mbuf is free'd. However in some special cases
+ * reclaiming must be done manually. An example is packets that pass through
+ * the loopback interface. Also, one must be careful to do this when
+ * ``turning around'' packets (e.g., icmp_reflect).
+ *
+ * To mark a tag persistent bit-or this flag in when defining the tag id.
+ * The tag will then be treated as described above.
+ */
+#define MTAG_PERSISTENT 0x800
+
+#define PACKET_TAG_NONE 0 /* Nadda */
+
+/* Packet tags for use with PACKET_ABI_COMPAT. */
+#define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
+#define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
+#define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
+#define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
+#define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */
+#define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */
+#define PACKET_TAG_BRIDGE 7 /* Bridge processing done */
+#define PACKET_TAG_GIF 8 /* GIF processing done */
+#define PACKET_TAG_GRE 9 /* GRE processing done */
+#define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */
+#define PACKET_TAG_ENCAP 11 /* Encap. processing */
+#define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */
+#define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */
+#define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */
+#define PACKET_TAG_DUMMYNET 15 /* dummynet info */
+#define PACKET_TAG_DIVERT 17 /* divert info */
+#define PACKET_TAG_IPFORWARD 18 /* ipforward info */
+#define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */
+#define PACKET_TAG_PF 21 /* PF + ALTQ information */
+#define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */
+#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */
+#define PACKET_TAG_CARP 28 /* CARP info */
+#ifdef VBOX
+# define PACKET_TAG_ALIAS 0xab01
+# define PACKET_TAG_ETHER 0xab02
+# define PACKET_SERVICE 0xab03
+#endif
+
+/* Specific cookies and tags. */
+
+/* Packet tag routines. */
+struct m_tag *m_tag_alloc(u_int32_t, int, int, int);
+void m_tag_delete(struct mbuf *, struct m_tag *);
+void m_tag_delete_chain(struct mbuf *, struct m_tag *);
+void m_tag_free_default(struct m_tag *);
+struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
+struct m_tag *m_tag_copy(struct m_tag *, int);
+int m_tag_copy_chain(struct mbuf *, struct mbuf *, int);
+void m_tag_delete_nonpersistent(struct mbuf *);
+
+/*
+ * Initialize the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_init(struct mbuf *m)
+{
+
+ SLIST_INIT(&m->m_pkthdr.tags);
+}
+
+/*
+ * Set up the contents of a tag. Note that this does not fill in the free
+ * method; the caller is expected to do that.
+ *
+ * XXX probably should be called m_tag_init, but that was already taken.
+ */
+static __inline void
+m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
+{
+
+ t->m_tag_id = type;
+ t->m_tag_len = len;
+ t->m_tag_cookie = cookie;
+}
+
+/*
+ * Reclaim resources associated with a tag.
+ */
+static __inline void
+m_tag_free(struct m_tag *t)
+{
+
+ (*t->m_tag_free)(t);
+}
+
+/*
+ * Return the first tag associated with an mbuf.
+ */
+static __inline struct m_tag *
+m_tag_first(struct mbuf *m)
+{
+
+ return (SLIST_FIRST(&m->m_pkthdr.tags));
+}
+
+/*
+ * Return the next tag in the list of tags associated with an mbuf.
+ */
+static __inline struct m_tag *
+m_tag_next(struct mbuf *m, struct m_tag *t)
+{
+ NOREF(m);
+ return (SLIST_NEXT(t, m_tag_link));
+}
+
+/*
+ * Prepend a tag to the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_prepend(struct mbuf *m, struct m_tag *t)
+{
+
+ SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
+}
+
+/*
+ * Unlink a tag from the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_unlink(struct mbuf *m, struct m_tag *t)
+{
+
+ SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
+}
+
+/* These are for OpenBSD compatibility. */
+#define MTAG_ABI_COMPAT 0 /* compatibility ABI */
+
+static __inline struct m_tag *
+m_tag_get(int type, int length, int fWait)
+{
+ return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, fWait));
+}
+
+static __inline struct m_tag *
+m_tag_find(struct mbuf *m, int type, struct m_tag *start)
+{
+ return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
+ m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
+}
+
+/* XXX temporary FIB methods probably eventually use tags.*/
+#define M_FIBSHIFT 28
+#define M_FIBMASK 0x0F
+
+/* get the fib from an mbuf and if it is not set, return the default */
+#define M_GETFIB(_m) \
+ ((((_m)->m_flags & M_FIB) >> M_FIBSHIFT) & M_FIBMASK)
+
+#define M_SETFIB(_m, _fib) do { \
+ _m->m_flags &= ~M_FIB; \
+ _m->m_flags |= (((_fib) << M_FIBSHIFT) & M_FIB); \
+} while (0)
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_MBUF_H_ */
diff --git a/src/VBox/Devices/Network/slirp/bsd/sys/sbuf.h b/src/VBox/Devices/Network/slirp/bsd/sys/sbuf.h
new file mode 100644
index 00000000..3f59c46f
--- /dev/null
+++ b/src/VBox/Devices/Network/slirp/bsd/sys/sbuf.h
@@ -0,0 +1,95 @@
+/*-
+ * Copyright (c) 2000 Poul-Henning Kamp and Dag-Erling Coïdan Smørgrav
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/sys/sbuf.h,v 1.14.18.1.2.1 2009/04/15 03:14:26 kensmith Exp $
+ */
+
+#ifndef _SYS_SBUF_H_
+#define _SYS_SBUF_H_
+
+#ifndef VBOX
+#include <sys/_types.h>
+#else
+# include <iprt/types.h>
+#endif
+
+/*
+ * Structure definition
+ */
+struct sbuf {
+ char *s_buf; /* storage buffer */
+ void *s_unused; /* binary compatibility. */
+ int s_size; /* size of storage buffer */
+ int s_len; /* current length of string */
+#define SBUF_FIXEDLEN 0x00000000 /* fixed length buffer (default) */
+#define SBUF_AUTOEXTEND 0x00000001 /* automatically extend buffer */
+#define SBUF_USRFLAGMSK 0x0000ffff /* mask of flags the user may specify */
+#define SBUF_DYNAMIC 0x00010000 /* s_buf must be freed */
+#define SBUF_FINISHED 0x00020000 /* set by sbuf_finish() */
+#define SBUF_OVERFLOWED 0x00040000 /* sbuf overflowed */
+#define SBUF_DYNSTRUCT 0x00080000 /* sbuf must be freed */
+ int s_flags; /* flags */
+};
+
+__BEGIN_DECLS
+/*
+ * API functions
+ */
+struct sbuf *sbuf_new(struct sbuf *, char *, int, int);
+#define sbuf_new_auto() \
+ sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND)
+void sbuf_clear(struct sbuf *);
+int sbuf_setpos(struct sbuf *, int);
+int sbuf_bcat(struct sbuf *, const void *, size_t);
+int sbuf_bcpy(struct sbuf *, const void *, size_t);
+int sbuf_cat(struct sbuf *, const char *);
+int sbuf_cpy(struct sbuf *, const char *);
+#ifndef VBOX
+int sbuf_printf(struct sbuf *, const char *, ...) __printflike(2, 3);
+int sbuf_vprintf(struct sbuf *, const char *, __va_list) __printflike(2, 0);
+#else
+int sbuf_printf(struct sbuf *, const char *, ...);
+int sbuf_vprintf(struct sbuf *, const char *, va_list);
+#endif
+int sbuf_putc(struct sbuf *, int);
+int sbuf_trim(struct sbuf *);
+int sbuf_overflowed(struct sbuf *);
+void sbuf_finish(struct sbuf *);
+char *sbuf_data(struct sbuf *);
+int sbuf_len(struct sbuf *);
+int sbuf_done(struct sbuf *);
+void sbuf_delete(struct sbuf *);
+
+#ifdef _KERNEL
+struct uio;
+struct sbuf *sbuf_uionew(struct sbuf *, struct uio *, int *);
+int sbuf_bcopyin(struct sbuf *, const void *, size_t);
+int sbuf_copyin(struct sbuf *, const void *, size_t);
+#endif
+__END_DECLS
+
+#endif