summaryrefslogtreecommitdiff
path: root/lwip/src/core
diff options
context:
space:
mode:
authorVijay Kumar Banerjee <vijay@rtems.org>2021-03-16 21:35:33 -0600
committerVijay Kumar Banerjee <vijay@rtems.org>2021-03-21 11:32:28 -0600
commit5ad5279ca879c3cf0faf3141c84067bdf232c3a2 (patch)
tree415a3fa457bfb011b3f7c0a5bc1d3252d78722d6 /lwip/src/core
parentd5eb08aab5c6e051e70cad2c9ecef4dc70ce85e1 (diff)
lwip: Add src files
+ Add rtems port from uLan + Add waf script
Diffstat (limited to 'lwip/src/core')
-rw-r--r--lwip/src/core/altcp.c681
-rw-r--r--lwip/src/core/altcp_alloc.c87
-rw-r--r--lwip/src/core/altcp_tcp.c543
-rw-r--r--lwip/src/core/def.c240
-rw-r--r--lwip/src/core/dns.c1631
-rw-r--r--lwip/src/core/inet_chksum.c608
-rw-r--r--lwip/src/core/init.c380
-rw-r--r--lwip/src/core/ip.c167
-rw-r--r--lwip/src/core/ipv4/autoip.c527
-rw-r--r--lwip/src/core/ipv4/dhcp.c1990
-rw-r--r--lwip/src/core/ipv4/etharp.c1204
-rw-r--r--lwip/src/core/ipv4/icmp.c404
-rw-r--r--lwip/src/core/ipv4/igmp.c801
-rw-r--r--lwip/src/core/ipv4/ip4.c1132
-rw-r--r--lwip/src/core/ipv4/ip4_addr.c321
-rw-r--r--lwip/src/core/ipv4/ip4_frag.c894
-rw-r--r--lwip/src/core/ipv6/dhcp6.c812
-rw-r--r--lwip/src/core/ipv6/ethip6.c123
-rw-r--r--lwip/src/core/ipv6/icmp6.c425
-rw-r--r--lwip/src/core/ipv6/inet6.c53
-rw-r--r--lwip/src/core/ipv6/ip6.c1492
-rw-r--r--lwip/src/core/ipv6/ip6_addr.c343
-rw-r--r--lwip/src/core/ipv6/ip6_frag.c862
-rw-r--r--lwip/src/core/ipv6/mld6.c626
-rw-r--r--lwip/src/core/ipv6/nd6.c2434
-rw-r--r--lwip/src/core/mem.c1017
-rw-r--r--lwip/src/core/memp.c447
-rw-r--r--lwip/src/core/netif.c1795
-rw-r--r--lwip/src/core/pbuf.c1514
-rw-r--r--lwip/src/core/raw.c671
-rw-r--r--lwip/src/core/stats.c169
-rw-r--r--lwip/src/core/sys.c148
-rw-r--r--lwip/src/core/tcp.c2686
-rw-r--r--lwip/src/core/tcp_in.c2178
-rw-r--r--lwip/src/core/tcp_out.c2190
-rw-r--r--lwip/src/core/timeouts.c451
-rw-r--r--lwip/src/core/udp.c1314
37 files changed, 33360 insertions, 0 deletions
diff --git a/lwip/src/core/altcp.c b/lwip/src/core/altcp.c
new file mode 100644
index 0000000..d46d6cd
--- /dev/null
+++ b/lwip/src/core/altcp.c
@@ -0,0 +1,681 @@
+/**
+ * @file
+ * @defgroup altcp Application layered TCP Functions
+ * @ingroup altcp_api
+ *
+ * This file contains the common functions for altcp to work.
+ * For more details see @ref altcp_api.
+ */
+
+/**
+ * @defgroup altcp_api Application layered TCP Introduction
+ * @ingroup callbackstyle_api
+ *
+ * Overview
+ * --------
+ * altcp (application layered TCP connection API; to be used from TCPIP thread)
+ * is an abstraction layer that prevents applications linking hard against the
+ * @ref tcp.h functions while providing the same functionality. It is used to
+ * e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application
+ * written for the tcp callback API without that application knowing the
+ * protocol details.
+ *
+ * * This interface mimics the tcp callback API to the application while preventing
+ * direct linking (much like virtual functions).
+ * * This way, an application can make use of other application layer protocols
+ * on top of TCP without knowing the details (e.g. TLS, proxy connection).
+ * * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h",
+ * replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions
+ * with "altcp_" instead of "tcp_".
+ *
+ * With altcp support disabled (LWIP_ALTCP==0), applications written against the
+ * altcp API can still be compiled but are directly linked against the tcp.h
+ * callback API and then cannot use layered protocols. To minimize code changes
+ * in this case, the use of altcp_allocators is strongly suggested.
+ *
+ * Usage
+ * -----
+ * To make use of this API from an existing tcp raw API application:
+ * * Include "lwip/altcp.h" instead of "lwip/tcp.h"
+ * * Replace "struct tcp_pcb" with "struct altcp_pcb"
+ * * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link
+ * against the altcp functions
+ * * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take
+ * an @ref altcp_allocator_t as an argument, whereas the original tcp API
+ * functions take no arguments.
+ * * An @ref altcp_allocator_t allocator is an object that holds a pointer to an
+ * allocator object and a corresponding state (e.g. for TLS, the corresponding
+ * state may hold certificates or keys). This way, the application does not
+ * even need to know if it uses TLS or pure TCP, this is handled at runtime
+ * by passing a specific allocator.
+ * * An application can alternatively bind hard to the altcp_tls API by calling
+ * @ref altcp_tls_new or @ref altcp_tls_wrap.
+ * * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is
+ * provided.
+ * * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see
+ * @ref altcp_proxyconnect.h)
+ *
+ * altcp_allocator_t
+ * -----------------
+ * An altcp allocator is created by the application by combining an allocator
+ * callback function and a corresponding state, e.g.:\code{.c}
+ * static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)};
+ * struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert));
+ * altcp_allocator_t tls_allocator = {
+ * altcp_tls_alloc, conf
+ * };
+ * \endcode
+ *
+ *
+ * struct altcp_tls_config
+ * -----------------------
+ * The struct altcp_tls_config holds state that is needed to create new TLS client
+ * or server connections (e.g. certificates and private keys).
+ *
+ * It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS
+ * adaption). However, the parameters used to create it are defined in @ref
+ * altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers
+ * and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth
+ * for clients).
+ *
+ * For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and
+ * private keys can be parsed by 'mbedtls_pk_parse_key()'.
+ */
+
+/*
+ * Copyright (c) 2017 Simon Goldschmidt
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Simon Goldschmidt <goldsimon@gmx.de>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/altcp.h"
+#include "lwip/priv/altcp_priv.h"
+#include "lwip/altcp_tcp.h"
+#include "lwip/tcp.h"
+#include "lwip/mem.h"
+
+#include <string.h>
+
+extern const struct altcp_functions altcp_tcp_functions;
+
+/**
+ * For altcp layer implementations only: allocate a new struct altcp_pcb from the pool
+ * and zero the memory
+ */
+struct altcp_pcb *
+altcp_alloc(void)
+{
+ struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB);
+ if (ret != NULL) {
+ memset(ret, 0, sizeof(struct altcp_pcb));
+ }
+ return ret;
+}
+
+/**
+ * For altcp layer implementations only: return a struct altcp_pcb to the pool
+ */
+void
+altcp_free(struct altcp_pcb *conn)
+{
+ if (conn) {
+ if (conn->fns && conn->fns->dealloc) {
+ conn->fns->dealloc(conn);
+ }
+ memp_free(MEMP_ALTCP_PCB, conn);
+ }
+}
+
+/**
+ * @ingroup altcp
+ * altcp_new_ip6: @ref altcp_new for IPv6
+ */
+struct altcp_pcb *
+altcp_new_ip6(altcp_allocator_t *allocator)
+{
+ return altcp_new_ip_type(allocator, IPADDR_TYPE_V6);
+}
+
+/**
+ * @ingroup altcp
+ * altcp_new: @ref altcp_new for IPv4
+ */
+struct altcp_pcb *
+altcp_new(altcp_allocator_t *allocator)
+{
+ return altcp_new_ip_type(allocator, IPADDR_TYPE_V4);
+}
+
+/**
+ * @ingroup altcp
+ * altcp_new_ip_type: called by applications to allocate a new pcb with the help of an
+ * allocator function.
+ *
+ * @param allocator allocator function and argument
+ * @param ip_type IP version of the pcb (@ref lwip_ip_addr_type)
+ * @return a new altcp_pcb or NULL on error
+ */
+struct altcp_pcb *
+altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type)
+{
+ struct altcp_pcb *conn;
+ if (allocator == NULL) {
+ /* no allocator given, create a simple TCP connection */
+ return altcp_tcp_new_ip_type(ip_type);
+ }
+ if (allocator->alloc == NULL) {
+ /* illegal allocator */
+ return NULL;
+ }
+ conn = allocator->alloc(allocator->arg, ip_type);
+ if (conn == NULL) {
+ /* allocation failed */
+ return NULL;
+ }
+ return conn;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_arg()
+ */
+void
+altcp_arg(struct altcp_pcb *conn, void *arg)
+{
+ if (conn) {
+ conn->arg = arg;
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_accept()
+ */
+void
+altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept)
+{
+ if (conn != NULL) {
+ conn->accept = accept;
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_recv()
+ */
+void
+altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv)
+{
+ if (conn) {
+ conn->recv = recv;
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_sent()
+ */
+void
+altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent)
+{
+ if (conn) {
+ conn->sent = sent;
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_poll()
+ */
+void
+altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval)
+{
+ if (conn) {
+ conn->poll = poll;
+ conn->pollinterval = interval;
+ if (conn->fns && conn->fns->set_poll) {
+ conn->fns->set_poll(conn, interval);
+ }
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_err()
+ */
+void
+altcp_err(struct altcp_pcb *conn, altcp_err_fn err)
+{
+ if (conn) {
+ conn->err = err;
+ }
+}
+
+/* Generic functions calling the "virtual" ones */
+
+/**
+ * @ingroup altcp
+ * @see tcp_recved()
+ */
+void
+altcp_recved(struct altcp_pcb *conn, u16_t len)
+{
+ if (conn && conn->fns && conn->fns->recved) {
+ conn->fns->recved(conn, len);
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_bind()
+ */
+err_t
+altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port)
+{
+ if (conn && conn->fns && conn->fns->bind) {
+ return conn->fns->bind(conn, ipaddr, port);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_connect()
+ */
+err_t
+altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected)
+{
+ if (conn && conn->fns && conn->fns->connect) {
+ return conn->fns->connect(conn, ipaddr, port, connected);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_listen_with_backlog_and_err()
+ */
+struct altcp_pcb *
+altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err)
+{
+ if (conn && conn->fns && conn->fns->listen) {
+ return conn->fns->listen(conn, backlog, err);
+ }
+ return NULL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_abort()
+ */
+void
+altcp_abort(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->abort) {
+ conn->fns->abort(conn);
+ }
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_close()
+ */
+err_t
+altcp_close(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->close) {
+ return conn->fns->close(conn);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_shutdown()
+ */
+err_t
+altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx)
+{
+ if (conn && conn->fns && conn->fns->shutdown) {
+ return conn->fns->shutdown(conn, shut_rx, shut_tx);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_write()
+ */
+err_t
+altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags)
+{
+ if (conn && conn->fns && conn->fns->write) {
+ return conn->fns->write(conn, dataptr, len, apiflags);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_output()
+ */
+err_t
+altcp_output(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->output) {
+ return conn->fns->output(conn);
+ }
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_mss()
+ */
+u16_t
+altcp_mss(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->mss) {
+ return conn->fns->mss(conn);
+ }
+ return 0;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_sndbuf()
+ */
+u16_t
+altcp_sndbuf(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->sndbuf) {
+ return conn->fns->sndbuf(conn);
+ }
+ return 0;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_sndqueuelen()
+ */
+u16_t
+altcp_sndqueuelen(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->sndqueuelen) {
+ return conn->fns->sndqueuelen(conn);
+ }
+ return 0;
+}
+
+void
+altcp_nagle_disable(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->nagle_disable) {
+ conn->fns->nagle_disable(conn);
+ }
+}
+
+void
+altcp_nagle_enable(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->nagle_enable) {
+ conn->fns->nagle_enable(conn);
+ }
+}
+
+int
+altcp_nagle_disabled(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->nagle_disabled) {
+ return conn->fns->nagle_disabled(conn);
+ }
+ return 0;
+}
+
+/**
+ * @ingroup altcp
+ * @see tcp_setprio()
+ */
+void
+altcp_setprio(struct altcp_pcb *conn, u8_t prio)
+{
+ if (conn && conn->fns && conn->fns->setprio) {
+ conn->fns->setprio(conn, prio);
+ }
+}
+
+err_t
+altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port)
+{
+ if (conn && conn->fns && conn->fns->addrinfo) {
+ return conn->fns->addrinfo(conn, local, addr, port);
+ }
+ return ERR_VAL;
+}
+
+ip_addr_t *
+altcp_get_ip(struct altcp_pcb *conn, int local)
+{
+ if (conn && conn->fns && conn->fns->getip) {
+ return conn->fns->getip(conn, local);
+ }
+ return NULL;
+}
+
+u16_t
+altcp_get_port(struct altcp_pcb *conn, int local)
+{
+ if (conn && conn->fns && conn->fns->getport) {
+ return conn->fns->getport(conn, local);
+ }
+ return 0;
+}
+
+#ifdef LWIP_DEBUG
+enum tcp_state
+altcp_dbg_get_tcp_state(struct altcp_pcb *conn)
+{
+ if (conn && conn->fns && conn->fns->dbg_get_tcp_state) {
+ return conn->fns->dbg_get_tcp_state(conn);
+ }
+ return CLOSED;
+}
+#endif
+
+/* Default implementations for the "virtual" functions */
+
+void
+altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval)
+{
+ if (conn && conn->inner_conn) {
+ altcp_poll(conn->inner_conn, conn->poll, interval);
+ }
+}
+
+void
+altcp_default_recved(struct altcp_pcb *conn, u16_t len)
+{
+ if (conn && conn->inner_conn) {
+ altcp_recved(conn->inner_conn, len);
+ }
+}
+
+err_t
+altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_bind(conn->inner_conn, ipaddr, port);
+ }
+ return ERR_VAL;
+}
+
+err_t
+altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx)
+{
+ if (conn) {
+ if (shut_rx && shut_tx && conn->fns && conn->fns->close) {
+ /* default shutdown for both sides is close */
+ return conn->fns->close(conn);
+ }
+ if (conn->inner_conn) {
+ return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx);
+ }
+ }
+ return ERR_VAL;
+}
+
+err_t
+altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_write(conn->inner_conn, dataptr, len, apiflags);
+ }
+ return ERR_VAL;
+}
+
+err_t
+altcp_default_output(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_output(conn->inner_conn);
+ }
+ return ERR_VAL;
+}
+
+u16_t
+altcp_default_mss(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_mss(conn->inner_conn);
+ }
+ return 0;
+}
+
+u16_t
+altcp_default_sndbuf(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_sndbuf(conn->inner_conn);
+ }
+ return 0;
+}
+
+u16_t
+altcp_default_sndqueuelen(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_sndqueuelen(conn->inner_conn);
+ }
+ return 0;
+}
+
+void
+altcp_default_nagle_disable(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ altcp_nagle_disable(conn->inner_conn);
+ }
+}
+
+void
+altcp_default_nagle_enable(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ altcp_nagle_enable(conn->inner_conn);
+ }
+}
+
+int
+altcp_default_nagle_disabled(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_nagle_disabled(conn->inner_conn);
+ }
+ return 0;
+}
+
+void
+altcp_default_setprio(struct altcp_pcb *conn, u8_t prio)
+{
+ if (conn && conn->inner_conn) {
+ altcp_setprio(conn->inner_conn, prio);
+ }
+}
+
+void
+altcp_default_dealloc(struct altcp_pcb *conn)
+{
+ LWIP_UNUSED_ARG(conn);
+ /* nothing to do */
+}
+
+err_t
+altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port);
+ }
+ return ERR_VAL;
+}
+
+ip_addr_t *
+altcp_default_get_ip(struct altcp_pcb *conn, int local)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_get_ip(conn->inner_conn, local);
+ }
+ return NULL;
+}
+
+u16_t
+altcp_default_get_port(struct altcp_pcb *conn, int local)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_get_port(conn->inner_conn, local);
+ }
+ return 0;
+}
+
+#ifdef LWIP_DEBUG
+enum tcp_state
+altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn)
+{
+ if (conn && conn->inner_conn) {
+ return altcp_dbg_get_tcp_state(conn->inner_conn);
+ }
+ return CLOSED;
+}
+#endif
+
+
+#endif /* LWIP_ALTCP */
diff --git a/lwip/src/core/altcp_alloc.c b/lwip/src/core/altcp_alloc.c
new file mode 100644
index 0000000..cd619bc
--- /dev/null
+++ b/lwip/src/core/altcp_alloc.c
@@ -0,0 +1,87 @@
+/**
+ * @file
+ * Application layered TCP connection API (to be used from TCPIP thread)\n
+ * This interface mimics the tcp callback API to the application while preventing
+ * direct linking (much like virtual functions).
+ * This way, an application can make use of other application layer protocols
+ * on top of TCP without knowing the details (e.g. TLS, proxy connection).
+ *
+ * This file contains allocation implementation that combine several layers.
+ */
+
+/*
+ * Copyright (c) 2017 Simon Goldschmidt
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Simon Goldschmidt <goldsimon@gmx.de>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/altcp.h"
+#include "lwip/altcp_tcp.h"
+#include "lwip/altcp_tls.h"
+#include "lwip/priv/altcp_priv.h"
+#include "lwip/mem.h"
+
+#include <string.h>
+
+#if LWIP_ALTCP_TLS
+
+/** This standard allocator function creates an altcp pcb for
+ * TLS over TCP */
+struct altcp_pcb *
+altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type)
+{
+ struct altcp_pcb *inner_conn, *ret;
+ LWIP_UNUSED_ARG(ip_type);
+
+ inner_conn = altcp_tcp_new_ip_type(ip_type);
+ if (inner_conn == NULL) {
+ return NULL;
+ }
+ ret = altcp_tls_wrap(config, inner_conn);
+ if (ret == NULL) {
+ altcp_close(inner_conn);
+ }
+ return ret;
+}
+
+/** This standard allocator function creates an altcp pcb for
+ * TLS over TCP */
+struct altcp_pcb *
+altcp_tls_alloc(void *arg, u8_t ip_type)
+{
+ return altcp_tls_new((struct altcp_tls_config *)arg, ip_type);
+}
+
+#endif /* LWIP_ALTCP_TLS */
+
+#endif /* LWIP_ALTCP */
diff --git a/lwip/src/core/altcp_tcp.c b/lwip/src/core/altcp_tcp.c
new file mode 100644
index 0000000..b715f04
--- /dev/null
+++ b/lwip/src/core/altcp_tcp.c
@@ -0,0 +1,543 @@
+/**
+ * @file
+ * Application layered TCP connection API (to be used from TCPIP thread)\n
+ * This interface mimics the tcp callback API to the application while preventing
+ * direct linking (much like virtual functions).
+ * This way, an application can make use of other application layer protocols
+ * on top of TCP without knowing the details (e.g. TLS, proxy connection).
+ *
+ * This file contains the base implementation calling into tcp.
+ */
+
+/*
+ * Copyright (c) 2017 Simon Goldschmidt
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Simon Goldschmidt <goldsimon@gmx.de>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/altcp.h"
+#include "lwip/altcp_tcp.h"
+#include "lwip/priv/altcp_priv.h"
+#include "lwip/tcp.h"
+#include "lwip/mem.h"
+
+#include <string.h>
+
+#define ALTCP_TCP_ASSERT_CONN(conn) do { \
+ LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \
+ LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0)
+#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \
+ LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \
+ LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \
+ ALTCP_TCP_ASSERT_CONN(conn); } while(0)
+
+
+/* Variable prototype, the actual declaration is at the end of this file
+ since it contains pointers to static functions declared here */
+extern const struct altcp_functions altcp_tcp_functions;
+
+static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb);
+
+/* callback functions for TCP */
+static err_t
+altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err)
+{
+ struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg;
+ if (listen_conn && listen_conn->accept) {
+ /* create a new altcp_conn to pass to the next 'accept' callback */
+ struct altcp_pcb *new_conn = altcp_alloc();
+ if (new_conn == NULL) {
+ return ERR_MEM;
+ }
+ altcp_tcp_setup(new_conn, new_tpcb);
+ return listen_conn->accept(listen_conn->arg, new_conn, err);
+ }
+ return ERR_ARG;
+}
+
+static err_t
+altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err)
+{
+ struct altcp_pcb *conn = (struct altcp_pcb *)arg;
+ if (conn) {
+ ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
+ if (conn->connected) {
+ return conn->connected(conn->arg, conn, err);
+ }
+ }
+ return ERR_OK;
+}
+
+static err_t
+altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err)
+{
+ struct altcp_pcb *conn = (struct altcp_pcb *)arg;
+ if (conn) {
+ ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
+ if (conn->recv) {
+ return conn->recv(conn->arg, conn, p, err);
+ }
+ }
+ if (p != NULL) {
+ /* prevent memory leaks */
+ pbuf_free(p);
+ }
+ return ERR_OK;
+}
+
+static err_t
+altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len)
+{
+ struct altcp_pcb *conn = (struct altcp_pcb *)arg;
+ if (conn) {
+ ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
+ if (conn->sent) {
+ return conn->sent(conn->arg, conn, len);
+ }
+ }
+ return ERR_OK;
+}
+
+static err_t
+altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb)
+{
+ struct altcp_pcb *conn = (struct altcp_pcb *)arg;
+ if (conn) {
+ ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
+ if (conn->poll) {
+ return conn->poll(conn->arg, conn);
+ }
+ }
+ return ERR_OK;
+}
+
+static void
+altcp_tcp_err(void *arg, err_t err)
+{
+ struct altcp_pcb *conn = (struct altcp_pcb *)arg;
+ if (conn) {
+ conn->state = NULL; /* already freed */
+ if (conn->err) {
+ conn->err(conn->arg, err);
+ }
+ altcp_free(conn);
+ }
+}
+
+/* setup functions */
+
+static void
+altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb)
+{
+ tcp_arg(tpcb, NULL);
+ tcp_recv(tpcb, NULL);
+ tcp_sent(tpcb, NULL);
+ tcp_err(tpcb, NULL);
+ tcp_poll(tpcb, NULL, tpcb->pollinterval);
+}
+
+static void
+altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb)
+{
+ tcp_arg(tpcb, conn);
+ tcp_recv(tpcb, altcp_tcp_recv);
+ tcp_sent(tpcb, altcp_tcp_sent);
+ tcp_err(tpcb, altcp_tcp_err);
+ /* tcp_poll is set when interval is set by application */
+ /* listen is set totally different :-) */
+}
+
+static void
+altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb)
+{
+ altcp_tcp_setup_callbacks(conn, tpcb);
+ conn->state = tpcb;
+ conn->fns = &altcp_tcp_functions;
+}
+
+struct altcp_pcb *
+altcp_tcp_new_ip_type(u8_t ip_type)
+{
+ /* Allocate the tcp pcb first to invoke the priority handling code
+ if we're out of pcbs */
+ struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type);
+ if (tpcb != NULL) {
+ struct altcp_pcb *ret = altcp_alloc();
+ if (ret != NULL) {
+ altcp_tcp_setup(ret, tpcb);
+ return ret;
+ } else {
+ /* altcp_pcb allocation failed -> free the tcp_pcb too */
+ tcp_close(tpcb);
+ }
+ }
+ return NULL;
+}
+
+/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new.
+*
+* arg pointer is not used for TCP.
+*/
+struct altcp_pcb *
+altcp_tcp_alloc(void *arg, u8_t ip_type)
+{
+ LWIP_UNUSED_ARG(arg);
+ return altcp_tcp_new_ip_type(ip_type);
+}
+
+struct altcp_pcb *
+altcp_tcp_wrap(struct tcp_pcb *tpcb)
+{
+ if (tpcb != NULL) {
+ struct altcp_pcb *ret = altcp_alloc();
+ if (ret != NULL) {
+ altcp_tcp_setup(ret, tpcb);
+ return ret;
+ }
+ }
+ return NULL;
+}
+
+
+/* "virtual" functions calling into tcp */
+static void
+altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval)
+{
+ if (conn != NULL) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ tcp_poll(pcb, altcp_tcp_poll, interval);
+ }
+}
+
+static void
+altcp_tcp_recved(struct altcp_pcb *conn, u16_t len)
+{
+ if (conn != NULL) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ tcp_recved(pcb, len);
+ }
+}
+
+static err_t
+altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_bind(pcb, ipaddr, port);
+}
+
+static err_t
+altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ conn->connected = connected;
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected);
+}
+
+static struct altcp_pcb *
+altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err)
+{
+ struct tcp_pcb *pcb;
+ struct tcp_pcb *lpcb;
+ if (conn == NULL) {
+ return NULL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err);
+ if (lpcb != NULL) {
+ conn->state = lpcb;
+ tcp_accept(lpcb, altcp_tcp_accept);
+ return conn;
+ }
+ return NULL;
+}
+
+static void
+altcp_tcp_abort(struct altcp_pcb *conn)
+{
+ if (conn != NULL) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ if (pcb) {
+ tcp_abort(pcb);
+ }
+ }
+}
+
+static err_t
+altcp_tcp_close(struct altcp_pcb *conn)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ if (pcb) {
+ err_t err;
+ tcp_poll_fn oldpoll = pcb->poll;
+ altcp_tcp_remove_callbacks(pcb);
+ err = tcp_close(pcb);
+ if (err != ERR_OK) {
+ /* not closed, set up all callbacks again */
+ altcp_tcp_setup_callbacks(conn, pcb);
+ /* poll callback is not included in the above */
+ tcp_poll(pcb, oldpoll, pcb->pollinterval);
+ return err;
+ }
+ conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */
+ }
+ altcp_free(conn);
+ return ERR_OK;
+}
+
+static err_t
+altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_shutdown(pcb, shut_rx, shut_tx);
+}
+
+static err_t
+altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_write(pcb, dataptr, len, apiflags);
+}
+
+static err_t
+altcp_tcp_output(struct altcp_pcb *conn)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return ERR_VAL;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_output(pcb);
+}
+
+static u16_t
+altcp_tcp_mss(struct altcp_pcb *conn)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return 0;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_mss(pcb);
+}
+
+static u16_t
+altcp_tcp_sndbuf(struct altcp_pcb *conn)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return 0;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_sndbuf(pcb);
+}
+
+static u16_t
+altcp_tcp_sndqueuelen(struct altcp_pcb *conn)
+{
+ struct tcp_pcb *pcb;
+ if (conn == NULL) {
+ return 0;
+ }
+ ALTCP_TCP_ASSERT_CONN(conn);
+ pcb = (struct tcp_pcb *)conn->state;
+ return tcp_sndqueuelen(pcb);
+}
+
+static void
+altcp_tcp_nagle_disable(struct altcp_pcb *conn)
+{
+ if (conn && conn->state) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ tcp_nagle_disable(pcb);
+ }
+}
+
+static void
+altcp_tcp_nagle_enable(struct altcp_pcb *conn)
+{
+ if (conn && conn->state) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ tcp_nagle_enable(pcb);
+ }
+}
+
+static int
+altcp_tcp_nagle_disabled(struct altcp_pcb *conn)
+{
+ if (conn && conn->state) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ return tcp_nagle_disabled(pcb);
+ }
+ return 0;
+}
+
+static void
+altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio)
+{
+ if (conn != NULL) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ tcp_setprio(pcb, prio);
+ }
+}
+
+static void
+altcp_tcp_dealloc(struct altcp_pcb *conn)
+{
+ LWIP_UNUSED_ARG(conn);
+ ALTCP_TCP_ASSERT_CONN(conn);
+ /* no private state to clean up */
+}
+
+static err_t
+altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port)
+{
+ if (conn) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port);
+ }
+ return ERR_VAL;
+}
+
+static ip_addr_t *
+altcp_tcp_get_ip(struct altcp_pcb *conn, int local)
+{
+ if (conn) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ if (pcb) {
+ if (local) {
+ return &pcb->local_ip;
+ } else {
+ return &pcb->remote_ip;
+ }
+ }
+ }
+ return NULL;
+}
+
+static u16_t
+altcp_tcp_get_port(struct altcp_pcb *conn, int local)
+{
+ if (conn) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ if (pcb) {
+ if (local) {
+ return pcb->local_port;
+ } else {
+ return pcb->remote_port;
+ }
+ }
+ }
+ return 0;
+}
+
+#ifdef LWIP_DEBUG
+static enum tcp_state
+altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn)
+{
+ if (conn) {
+ struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
+ ALTCP_TCP_ASSERT_CONN(conn);
+ if (pcb) {
+ return pcb->state;
+ }
+ }
+ return CLOSED;
+}
+#endif
+const struct altcp_functions altcp_tcp_functions = {
+ altcp_tcp_set_poll,
+ altcp_tcp_recved,
+ altcp_tcp_bind,
+ altcp_tcp_connect,
+ altcp_tcp_listen,
+ altcp_tcp_abort,
+ altcp_tcp_close,
+ altcp_tcp_shutdown,
+ altcp_tcp_write,
+ altcp_tcp_output,
+ altcp_tcp_mss,
+ altcp_tcp_sndbuf,
+ altcp_tcp_sndqueuelen,
+ altcp_tcp_nagle_disable,
+ altcp_tcp_nagle_enable,
+ altcp_tcp_nagle_disabled,
+ altcp_tcp_setprio,
+ altcp_tcp_dealloc,
+ altcp_tcp_get_tcp_addrinfo,
+ altcp_tcp_get_ip,
+ altcp_tcp_get_port
+#ifdef LWIP_DEBUG
+ , altcp_tcp_dbg_get_tcp_state
+#endif
+};
+
+#endif /* LWIP_ALTCP */
diff --git a/lwip/src/core/def.c b/lwip/src/core/def.c
new file mode 100644
index 0000000..9da36fe
--- /dev/null
+++ b/lwip/src/core/def.c
@@ -0,0 +1,240 @@
+/**
+ * @file
+ * Common functions used throughout the stack.
+ *
+ * These are reference implementations of the byte swapping functions.
+ * Again with the aim of being simple, correct and fully portable.
+ * Byte swapping is the second thing you would want to optimize. You will
+ * need to port it to your architecture and in your cc.h:
+ *
+ * \#define lwip_htons(x) your_htons
+ * \#define lwip_htonl(x) your_htonl
+ *
+ * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts.
+ *
+ * If you \#define them to htons() and htonl(), you should
+ * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from
+ * defining htonx/ntohx compatibility macros.
+
+ * @defgroup sys_nonstandard Non-standard functions
+ * @ingroup sys_layer
+ * lwIP provides default implementations for non-standard functions.
+ * These can be mapped to OS functions to reduce code footprint if desired.
+ * All defines related to this section must not be placed in lwipopts.h,
+ * but in arch/cc.h!
+ * These options cannot be \#defined in lwipopts.h since they are not options
+ * of lwIP itself, but options of the lwIP port to your system.
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Simon Goldschmidt
+ *
+ */
+
+#include "lwip/opt.h"
+#include "lwip/def.h"
+
+#include <string.h>
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+#if !defined(lwip_htons)
+/**
+ * Convert an u16_t from host- to network byte order.
+ *
+ * @param n u16_t in host byte order
+ * @return n in network byte order
+ */
+u16_t
+lwip_htons(u16_t n)
+{
+ return PP_HTONS(n);
+}
+#endif /* lwip_htons */
+
+#if !defined(lwip_htonl)
+/**
+ * Convert an u32_t from host- to network byte order.
+ *
+ * @param n u32_t in host byte order
+ * @return n in network byte order
+ */
+u32_t
+lwip_htonl(u32_t n)
+{
+ return PP_HTONL(n);
+}
+#endif /* lwip_htonl */
+
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#ifndef lwip_strnstr
+/**
+ * @ingroup sys_nonstandard
+ * lwIP default implementation for strnstr() non-standard function.
+ * This can be \#defined to strnstr() depending on your platform port.
+ */
+char *
+lwip_strnstr(const char *buffer, const char *token, size_t n)
+{
+ const char *p;
+ size_t tokenlen = strlen(token);
+ if (tokenlen == 0) {
+ return LWIP_CONST_CAST(char *, buffer);
+ }
+ for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) {
+ if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) {
+ return LWIP_CONST_CAST(char *, p);
+ }
+ }
+ return NULL;
+}
+#endif
+
+#ifndef lwip_stricmp
+/**
+ * @ingroup sys_nonstandard
+ * lwIP default implementation for stricmp() non-standard function.
+ * This can be \#defined to stricmp() depending on your platform port.
+ */
+int
+lwip_stricmp(const char *str1, const char *str2)
+{
+ char c1, c2;
+
+ do {
+ c1 = *str1++;
+ c2 = *str2++;
+ if (c1 != c2) {
+ char c1_upc = c1 | 0x20;
+ if ((c1_upc >= 'a') && (c1_upc <= 'z')) {
+ /* characters are not equal an one is in the alphabet range:
+ downcase both chars and check again */
+ char c2_upc = c2 | 0x20;
+ if (c1_upc != c2_upc) {
+ /* still not equal */
+ /* don't care for < or > */
+ return 1;
+ }
+ } else {
+ /* characters are not equal but none is in the alphabet range */
+ return 1;
+ }
+ }
+ } while (c1 != 0);
+ return 0;
+}
+#endif
+
+#ifndef lwip_strnicmp
+/**
+ * @ingroup sys_nonstandard
+ * lwIP default implementation for strnicmp() non-standard function.
+ * This can be \#defined to strnicmp() depending on your platform port.
+ */
+int
+lwip_strnicmp(const char *str1, const char *str2, size_t len)
+{
+ char c1, c2;
+
+ do {
+ c1 = *str1++;
+ c2 = *str2++;
+ if (c1 != c2) {
+ char c1_upc = c1 | 0x20;
+ if ((c1_upc >= 'a') && (c1_upc <= 'z')) {
+ /* characters are not equal an one is in the alphabet range:
+ downcase both chars and check again */
+ char c2_upc = c2 | 0x20;
+ if (c1_upc != c2_upc) {
+ /* still not equal */
+ /* don't care for < or > */
+ return 1;
+ }
+ } else {
+ /* characters are not equal but none is in the alphabet range */
+ return 1;
+ }
+ }
+ len--;
+ } while ((len != 0) && (c1 != 0));
+ return 0;
+}
+#endif
+
+#ifndef lwip_itoa
+/**
+ * @ingroup sys_nonstandard
+ * lwIP default implementation for itoa() non-standard function.
+ * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port.
+ */
+void
+lwip_itoa(char *result, size_t bufsize, int number)
+{
+ char *res = result;
+ char *tmp = result + bufsize - 1;
+ int n = (number >= 0) ? number : -number;
+
+ /* handle invalid bufsize */
+ if (bufsize < 2) {
+ if (bufsize == 1) {
+ *result = 0;
+ }
+ return;
+ }
+
+ /* First, add sign */
+ if (number < 0) {
+ *res++ = '-';
+ }
+ /* Then create the string from the end and stop if buffer full,
+ and ensure output string is zero terminated */
+ *tmp = 0;
+ while ((n != 0) && (tmp > res)) {
+ char val = (char)('0' + (n % 10));
+ tmp--;
+ *tmp = val;
+ n = n / 10;
+ }
+ if (n) {
+ /* buffer is too small */
+ *result = 0;
+ return;
+ }
+ if (*tmp == 0) {
+ /* Nothing added? */
+ *res++ = '0';
+ *res++ = 0;
+ return;
+ }
+ /* move from temporary buffer to output buffer (sign is not moved) */
+ memmove(res, tmp, (size_t)((result + bufsize) - tmp));
+}
+#endif
diff --git a/lwip/src/core/dns.c b/lwip/src/core/dns.c
new file mode 100644
index 0000000..9d2f61e
--- /dev/null
+++ b/lwip/src/core/dns.c
@@ -0,0 +1,1631 @@
+/**
+ * @file
+ * DNS - host name to IP address resolver.
+ *
+ * @defgroup dns DNS
+ * @ingroup callbackstyle_api
+ *
+ * Implements a DNS host name to IP address resolver.
+ *
+ * The lwIP DNS resolver functions are used to lookup a host name and
+ * map it to a numerical IP address. It maintains a list of resolved
+ * hostnames that can be queried with the dns_lookup() function.
+ * New hostnames can be resolved using the dns_query() function.
+ *
+ * The lwIP version of the resolver also adds a non-blocking version of
+ * gethostbyname() that will work with a raw API application. This function
+ * checks for an IP address string first and converts it if it is valid.
+ * gethostbyname() then does a dns_lookup() to see if the name is
+ * already in the table. If so, the IP is returned. If not, a query is
+ * issued and the function returns with a ERR_INPROGRESS status. The app
+ * using the dns client must then go into a waiting state.
+ *
+ * Once a hostname has been resolved (or found to be non-existent),
+ * the resolver code calls a specified callback function (which
+ * must be implemented by the module that uses the resolver).
+ *
+ * Multicast DNS queries are supported for names ending on ".local".
+ * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762
+ * chapter 5.1), this is not a fully compliant implementation of continuous
+ * mDNS querying!
+ *
+ * All functions must be called from TCPIP thread.
+ *
+ * @see DNS_MAX_SERVERS
+ * @see LWIP_DHCP_MAX_DNS_SERVERS
+ * @see @ref netconn_common for thread-safe access.
+ */
+
+/*
+ * Port to lwIP from uIP
+ * by Jim Pettinato April 2007
+ *
+ * security fixes and more by Simon Goldschmidt
+ *
+ * uIP version Copyright (c) 2002-2003, Adam Dunkels.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-----------------------------------------------------------------------------
+ * RFC 1035 - Domain names - implementation and specification
+ * RFC 2181 - Clarifications to the DNS Specification
+ *----------------------------------------------------------------------------*/
+
+/** @todo: define good default values (rfc compliance) */
+/** @todo: improve answer parsing, more checkings... */
+/** @todo: check RFC1035 - 7.3. Processing responses */
+/** @todo: one-shot mDNS: dual-stack fallback to another IP version */
+
+/*-----------------------------------------------------------------------------
+ * Includes
+ *----------------------------------------------------------------------------*/
+
+#include "lwip/opt.h"
+
+#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/def.h"
+#include "lwip/udp.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/dns.h"
+#include "lwip/prot/dns.h"
+
+#include <string.h>
+
+/** Random generator function to create random TXIDs and source ports for queries */
+#ifndef DNS_RAND_TXID
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0)
+#define DNS_RAND_TXID LWIP_RAND
+#else
+static u16_t dns_txid;
+#define DNS_RAND_TXID() (++dns_txid)
+#endif
+#endif
+
+/** Limits the source port to be >= 1024 by default */
+#ifndef DNS_PORT_ALLOWED
+#define DNS_PORT_ALLOWED(port) ((port) >= 1024)
+#endif
+
+/** DNS resource record max. TTL (one week as default) */
+#ifndef DNS_MAX_TTL
+#define DNS_MAX_TTL 604800
+#elif DNS_MAX_TTL > 0x7FFFFFFF
+#error DNS_MAX_TTL must be a positive 32-bit value
+#endif
+
+#if DNS_TABLE_SIZE > 255
+#error DNS_TABLE_SIZE must fit into an u8_t
+#endif
+#if DNS_MAX_SERVERS > 255
+#error DNS_MAX_SERVERS must fit into an u8_t
+#endif
+
+/* The number of parallel requests (i.e. calls to dns_gethostbyname
+ * that cannot be answered from the DNS table.
+ * This is set to the table size by default.
+ */
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0)
+#ifndef DNS_MAX_REQUESTS
+#define DNS_MAX_REQUESTS DNS_TABLE_SIZE
+#else
+#if DNS_MAX_REQUESTS > 255
+#error DNS_MAX_REQUESTS must fit into an u8_t
+#endif
+#endif
+#else
+/* In this configuration, both arrays have to have the same size and are used
+ * like one entry (used/free) */
+#define DNS_MAX_REQUESTS DNS_TABLE_SIZE
+#endif
+
+/* The number of UDP source ports used in parallel */
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+#ifndef DNS_MAX_SOURCE_PORTS
+#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS
+#else
+#if DNS_MAX_SOURCE_PORTS > 255
+#error DNS_MAX_SOURCE_PORTS must fit into an u8_t
+#endif
+#endif
+#else
+#ifdef DNS_MAX_SOURCE_PORTS
+#undef DNS_MAX_SOURCE_PORTS
+#endif
+#define DNS_MAX_SOURCE_PORTS 1
+#endif
+
+#if LWIP_IPV4 && LWIP_IPV6
+#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6))
+#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t)))
+#define LWIP_DNS_ADDRTYPE_ARG(x) , x
+#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x
+#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0)
+#else
+#if LWIP_IPV6
+#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1
+#else
+#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0
+#endif
+#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1
+#define LWIP_DNS_ADDRTYPE_ARG(x)
+#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0
+#define LWIP_DNS_SET_ADDRTYPE(x, y)
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+#define LWIP_DNS_ISMDNS_ARG(x) , x
+#else
+#define LWIP_DNS_ISMDNS_ARG(x)
+#endif
+
+/** DNS query message structure.
+ No packing needed: only used locally on the stack. */
+struct dns_query {
+ /* DNS query record starts with either a domain name or a pointer
+ to a name already present somewhere in the packet. */
+ u16_t type;
+ u16_t cls;
+};
+#define SIZEOF_DNS_QUERY 4
+
+/** DNS answer message structure.
+ No packing needed: only used locally on the stack. */
+struct dns_answer {
+ /* DNS answer record starts with either a domain name or a pointer
+ to a name already present somewhere in the packet. */
+ u16_t type;
+ u16_t cls;
+ u32_t ttl;
+ u16_t len;
+};
+#define SIZEOF_DNS_ANSWER 10
+/* maximum allowed size for the struct due to non-packed */
+#define SIZEOF_DNS_ANSWER_ASSERT 12
+
+/* DNS table entry states */
+typedef enum {
+ DNS_STATE_UNUSED = 0,
+ DNS_STATE_NEW = 1,
+ DNS_STATE_ASKING = 2,
+ DNS_STATE_DONE = 3
+} dns_state_enum_t;
+
+/** DNS table entry */
+struct dns_table_entry {
+ u32_t ttl;
+ ip_addr_t ipaddr;
+ u16_t txid;
+ u8_t state;
+ u8_t server_idx;
+ u8_t tmr;
+ u8_t retries;
+ u8_t seqno;
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+ u8_t pcb_idx;
+#endif
+ char name[DNS_MAX_NAME_LENGTH];
+#if LWIP_IPV4 && LWIP_IPV6
+ u8_t reqaddrtype;
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ u8_t is_mdns;
+#endif
+};
+
+/** DNS request table entry: used when dns_gehostbyname cannot answer the
+ * request from the DNS table */
+struct dns_req_entry {
+ /* pointer to callback on DNS query done */
+ dns_found_callback found;
+ /* argument passed to the callback function */
+ void *arg;
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0)
+ u8_t dns_table_idx;
+#endif
+#if LWIP_IPV4 && LWIP_IPV6
+ u8_t reqaddrtype;
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+};
+
+#if DNS_LOCAL_HOSTLIST
+
+#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC
+/** Local host-list. For hostnames in this list, no
+ * external name resolution is performed */
+static struct local_hostlist_entry *local_hostlist_dynamic;
+#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */
+
+/** Defining this allows the local_hostlist_static to be placed in a different
+ * linker section (e.g. FLASH) */
+#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE
+#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static
+#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */
+/** Defining this allows the local_hostlist_static to be placed in a different
+ * linker section (e.g. FLASH) */
+#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST
+#define DNS_LOCAL_HOSTLIST_STORAGE_POST
+#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */
+DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[]
+ DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT;
+
+#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */
+
+static void dns_init_local(void);
+static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype));
+#endif /* DNS_LOCAL_HOSTLIST */
+
+
+/* forward declarations */
+static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port);
+static void dns_check_entries(void);
+static void dns_call_found(u8_t idx, ip_addr_t *addr);
+
+/*-----------------------------------------------------------------------------
+ * Globals
+ *----------------------------------------------------------------------------*/
+
+/* DNS variables */
+static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS];
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+static u8_t dns_last_pcb_idx;
+#endif
+static u8_t dns_seqno;
+static struct dns_table_entry dns_table[DNS_TABLE_SIZE];
+static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS];
+static ip_addr_t dns_servers[DNS_MAX_SERVERS];
+
+#if LWIP_IPV4
+const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT;
+#endif /* LWIP_IPV4 */
+#if LWIP_IPV6
+const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT;
+#endif /* LWIP_IPV6 */
+
+/**
+ * Initialize the resolver: set up the UDP pcb and configure the default server
+ * (if DNS_SERVER_ADDRESS is set).
+ */
+void
+dns_init(void)
+{
+#ifdef DNS_SERVER_ADDRESS
+ /* initialize default DNS server address */
+ ip_addr_t dnsserver;
+ DNS_SERVER_ADDRESS(&dnsserver);
+ dns_setserver(0, &dnsserver);
+#endif /* DNS_SERVER_ADDRESS */
+
+ LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY",
+ sizeof(struct dns_query) == SIZEOF_DNS_QUERY);
+ LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER",
+ sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT);
+
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n"));
+
+ /* if dns client not yet initialized... */
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0)
+ if (dns_pcbs[0] == NULL) {
+ dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY);
+ LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL);
+
+ /* initialize DNS table not needed (initialized to zero since it is a
+ * global variable) */
+ LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0",
+ DNS_STATE_UNUSED == 0);
+
+ /* initialize DNS client */
+ udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0);
+ udp_recv(dns_pcbs[0], dns_recv, NULL);
+ }
+#endif
+
+#if DNS_LOCAL_HOSTLIST
+ dns_init_local();
+#endif
+}
+
+/**
+ * @ingroup dns
+ * Initialize one of the DNS servers.
+ *
+ * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS
+ * @param dnsserver IP address of the DNS server to set
+ */
+void
+dns_setserver(u8_t numdns, const ip_addr_t *dnsserver)
+{
+ if (numdns < DNS_MAX_SERVERS) {
+ if (dnsserver != NULL) {
+ dns_servers[numdns] = (*dnsserver);
+ } else {
+ dns_servers[numdns] = *IP_ADDR_ANY;
+ }
+ }
+}
+
+/**
+ * @ingroup dns
+ * Obtain one of the currently configured DNS server.
+ *
+ * @param numdns the index of the DNS server
+ * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS
+ * server has not been configured.
+ */
+const ip_addr_t *
+dns_getserver(u8_t numdns)
+{
+ if (numdns < DNS_MAX_SERVERS) {
+ return &dns_servers[numdns];
+ } else {
+ return IP_ADDR_ANY;
+ }
+}
+
+/**
+ * The DNS resolver client timer - handle retries and timeouts and should
+ * be called every DNS_TMR_INTERVAL milliseconds (every second by default).
+ */
+void
+dns_tmr(void)
+{
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n"));
+ dns_check_entries();
+}
+
+#if DNS_LOCAL_HOSTLIST
+static void
+dns_init_local(void)
+{
+#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT)
+ size_t i;
+ struct local_hostlist_entry *entry;
+ /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */
+ struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT;
+ size_t namelen;
+ for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) {
+ struct local_hostlist_entry *init_entry = &local_hostlist_init[i];
+ LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL);
+ namelen = strlen(init_entry->name);
+ LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN);
+ entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST);
+ LWIP_ASSERT("mem-error in dns_init_local", entry != NULL);
+ if (entry != NULL) {
+ char *entry_name = (char *)entry + sizeof(struct local_hostlist_entry);
+ MEMCPY(entry_name, init_entry->name, namelen);
+ entry_name[namelen] = 0;
+ entry->name = entry_name;
+ entry->addr = init_entry->addr;
+ entry->next = local_hostlist_dynamic;
+ local_hostlist_dynamic = entry;
+ }
+ }
+#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */
+}
+
+/**
+ * @ingroup dns
+ * Iterate the local host-list for a hostname.
+ *
+ * @param iterator_fn a function that is called for every entry in the local host-list
+ * @param iterator_arg 3rd argument passed to iterator_fn
+ * @return the number of entries in the local host-list
+ */
+size_t
+dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg)
+{
+ size_t i;
+#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC
+ struct local_hostlist_entry *entry = local_hostlist_dynamic;
+ i = 0;
+ while (entry != NULL) {
+ if (iterator_fn != NULL) {
+ iterator_fn(entry->name, &entry->addr, iterator_arg);
+ }
+ i++;
+ entry = entry->next;
+ }
+#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */
+ for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) {
+ if (iterator_fn != NULL) {
+ iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg);
+ }
+ }
+#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */
+ return i;
+}
+
+/**
+ * @ingroup dns
+ * Scans the local host-list for a hostname.
+ *
+ * @param hostname Hostname to look for in the local host-list
+ * @param addr the first IP address for the hostname in the local host-list or
+ * IPADDR_NONE if not found.
+ * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!)
+ * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!)
+ * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only
+ * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only
+ * @return ERR_OK if found, ERR_ARG if not found
+ */
+err_t
+dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype)
+{
+ LWIP_UNUSED_ARG(dns_addrtype);
+ return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype));
+}
+
+/* Internal implementation for dns_local_lookup and dns_lookup */
+static err_t
+dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype))
+{
+#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC
+ struct local_hostlist_entry *entry = local_hostlist_dynamic;
+ while (entry != NULL) {
+ if ((lwip_stricmp(entry->name, hostname) == 0) &&
+ LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) {
+ if (addr) {
+ ip_addr_copy(*addr, entry->addr);
+ }
+ return ERR_OK;
+ }
+ entry = entry->next;
+ }
+#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */
+ size_t i;
+ for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) {
+ if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) &&
+ LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) {
+ if (addr) {
+ ip_addr_copy(*addr, local_hostlist_static[i].addr);
+ }
+ return ERR_OK;
+ }
+ }
+#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */
+ return ERR_ARG;
+}
+
+#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC
+/**
+ * @ingroup dns
+ * Remove all entries from the local host-list for a specific hostname
+ * and/or IP address
+ *
+ * @param hostname hostname for which entries shall be removed from the local
+ * host-list
+ * @param addr address for which entries shall be removed from the local host-list
+ * @return the number of removed entries
+ */
+int
+dns_local_removehost(const char *hostname, const ip_addr_t *addr)
+{
+ int removed = 0;
+ struct local_hostlist_entry *entry = local_hostlist_dynamic;
+ struct local_hostlist_entry *last_entry = NULL;
+ while (entry != NULL) {
+ if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) &&
+ ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) {
+ struct local_hostlist_entry *free_entry;
+ if (last_entry != NULL) {
+ last_entry->next = entry->next;
+ } else {
+ local_hostlist_dynamic = entry->next;
+ }
+ free_entry = entry;
+ entry = entry->next;
+ memp_free(MEMP_LOCALHOSTLIST, free_entry);
+ removed++;
+ } else {
+ last_entry = entry;
+ entry = entry->next;
+ }
+ }
+ return removed;
+}
+
+/**
+ * @ingroup dns
+ * Add a hostname/IP address pair to the local host-list.
+ * Duplicates are not checked.
+ *
+ * @param hostname hostname of the new entry
+ * @param addr IP address of the new entry
+ * @return ERR_OK if succeeded or ERR_MEM on memory error
+ */
+err_t
+dns_local_addhost(const char *hostname, const ip_addr_t *addr)
+{
+ struct local_hostlist_entry *entry;
+ size_t namelen;
+ char *entry_name;
+ LWIP_ASSERT("invalid host name (NULL)", hostname != NULL);
+ namelen = strlen(hostname);
+ LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN);
+ entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST);
+ if (entry == NULL) {
+ return ERR_MEM;
+ }
+ entry_name = (char *)entry + sizeof(struct local_hostlist_entry);
+ MEMCPY(entry_name, hostname, namelen);
+ entry_name[namelen] = 0;
+ entry->name = entry_name;
+ ip_addr_copy(entry->addr, *addr);
+ entry->next = local_hostlist_dynamic;
+ local_hostlist_dynamic = entry;
+ return ERR_OK;
+}
+#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/
+#endif /* DNS_LOCAL_HOSTLIST */
+
+/**
+ * @ingroup dns
+ * Look up a hostname in the array of known hostnames.
+ *
+ * @note This function only looks in the internal array of known
+ * hostnames, it does not send out a query for the hostname if none
+ * was found. The function dns_enqueue() can be used to send a query
+ * for a hostname.
+ *
+ * @param name the hostname to look up
+ * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to
+ * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname
+ * was not found in the cached dns_table.
+ * @return ERR_OK if found, ERR_ARG if not found
+ */
+static err_t
+dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype))
+{
+ u8_t i;
+#if DNS_LOCAL_HOSTLIST
+ if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) {
+ return ERR_OK;
+ }
+#endif /* DNS_LOCAL_HOSTLIST */
+#ifdef DNS_LOOKUP_LOCAL_EXTERN
+ if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) {
+ return ERR_OK;
+ }
+#endif /* DNS_LOOKUP_LOCAL_EXTERN */
+
+ /* Walk through name list, return entry if found. If not, return NULL. */
+ for (i = 0; i < DNS_TABLE_SIZE; ++i) {
+ if ((dns_table[i].state == DNS_STATE_DONE) &&
+ (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) &&
+ LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name));
+ ip_addr_debug_print_val(DNS_DEBUG, dns_table[i].ipaddr);
+ LWIP_DEBUGF(DNS_DEBUG, ("\n"));
+ if (addr) {
+ ip_addr_copy(*addr, dns_table[i].ipaddr);
+ }
+ return ERR_OK;
+ }
+ }
+
+ return ERR_ARG;
+}
+
+/**
+ * Compare the "dotted" name "query" with the encoded name "response"
+ * to make sure an answer from the DNS server matches the current dns_table
+ * entry (otherwise, answers might arrive late for hostname not on the list
+ * any more).
+ *
+ * For now, this function compares case-insensitive to cope with all kinds of
+ * servers. This also means that "dns 0x20 bit encoding" must be checked
+ * externally, if we want to implement it.
+ * Currently, the request is sent exactly as passed in by he user request.
+ *
+ * @param query hostname (not encoded) from the dns_table
+ * @param p pbuf containing the encoded hostname in the DNS response
+ * @param start_offset offset into p where the name starts
+ * @return 0xFFFF: names differ, other: names equal -> offset behind name
+ */
+static u16_t
+dns_compare_name(const char *query, struct pbuf *p, u16_t start_offset)
+{
+ int n;
+ u16_t response_offset = start_offset;
+
+ do {
+ n = pbuf_try_get_at(p, response_offset);
+ if ((n < 0) || (response_offset == 0xFFFF)) {
+ /* error or overflow */
+ return 0xFFFF;
+ }
+ response_offset++;
+ /** @see RFC 1035 - 4.1.4. Message compression */
+ if ((n & 0xc0) == 0xc0) {
+ /* Compressed name: cannot be equal since we don't send them */
+ return 0xFFFF;
+ } else {
+ /* Not compressed name */
+ while (n > 0) {
+ int c = pbuf_try_get_at(p, response_offset);
+ if (c < 0) {
+ return 0xFFFF;
+ }
+ if (lwip_tolower((*query)) != lwip_tolower((u8_t)c)) {
+ return 0xFFFF;
+ }
+ if (response_offset == 0xFFFF) {
+ /* would overflow */
+ return 0xFFFF;
+ }
+ response_offset++;
+ ++query;
+ --n;
+ }
+ ++query;
+ }
+ n = pbuf_try_get_at(p, response_offset);
+ if (n < 0) {
+ return 0xFFFF;
+ }
+ } while (n != 0);
+
+ if (response_offset == 0xFFFF) {
+ /* would overflow */
+ return 0xFFFF;
+ }
+ return (u16_t)(response_offset + 1);
+}
+
+/**
+ * Walk through a compact encoded DNS name and return the end of the name.
+ *
+ * @param p pbuf containing the name
+ * @param query_idx start index into p pointing to encoded DNS name in the DNS server response
+ * @return index to end of the name
+ */
+static u16_t
+dns_skip_name(struct pbuf *p, u16_t query_idx)
+{
+ int n;
+ u16_t offset = query_idx;
+
+ do {
+ n = pbuf_try_get_at(p, offset++);
+ if ((n < 0) || (offset == 0)) {
+ return 0xFFFF;
+ }
+ /** @see RFC 1035 - 4.1.4. Message compression */
+ if ((n & 0xc0) == 0xc0) {
+ /* Compressed name: since we only want to skip it (not check it), stop here */
+ break;
+ } else {
+ /* Not compressed name */
+ if (offset + n >= p->tot_len) {
+ return 0xFFFF;
+ }
+ offset = (u16_t)(offset + n);
+ }
+ n = pbuf_try_get_at(p, offset);
+ if (n < 0) {
+ return 0xFFFF;
+ }
+ } while (n != 0);
+
+ if (offset == 0xFFFF) {
+ return 0xFFFF;
+ }
+ return (u16_t)(offset + 1);
+}
+
+/**
+ * Send a DNS query packet.
+ *
+ * @param idx the DNS table entry index for which to send a request
+ * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise
+ */
+static err_t
+dns_send(u8_t idx)
+{
+ err_t err;
+ struct dns_hdr hdr;
+ struct dns_query qry;
+ struct pbuf *p;
+ u16_t query_idx, copy_len;
+ const char *hostname, *hostname_part;
+ u8_t n;
+ u8_t pcb_idx;
+ struct dns_table_entry *entry = &dns_table[idx];
+
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n",
+ (u16_t)(entry->server_idx), entry->name));
+ LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS);
+ if (ip_addr_isany_val(dns_servers[entry->server_idx])
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ && !entry->is_mdns
+#endif
+ ) {
+ /* DNS server not valid anymore, e.g. PPP netif has been shut down */
+ /* call specified callback function if provided */
+ dns_call_found(idx, NULL);
+ /* flush this entry */
+ entry->state = DNS_STATE_UNUSED;
+ return ERR_OK;
+ }
+
+ /* if here, we have either a new query or a retry on a previous query to process */
+ p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 +
+ SIZEOF_DNS_QUERY), PBUF_RAM);
+ if (p != NULL) {
+ const ip_addr_t *dst;
+ u16_t dst_port;
+ /* fill dns header */
+ memset(&hdr, 0, SIZEOF_DNS_HDR);
+ hdr.id = lwip_htons(entry->txid);
+ hdr.flags1 = DNS_FLAG1_RD;
+ hdr.numquestions = PP_HTONS(1);
+ pbuf_take(p, &hdr, SIZEOF_DNS_HDR);
+ hostname = entry->name;
+ --hostname;
+
+ /* convert hostname into suitable query format. */
+ query_idx = SIZEOF_DNS_HDR;
+ do {
+ ++hostname;
+ hostname_part = hostname;
+ for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) {
+ ++n;
+ }
+ copy_len = (u16_t)(hostname - hostname_part);
+ if (query_idx + n + 1 > 0xFFFF) {
+ /* u16_t overflow */
+ goto overflow_return;
+ }
+ pbuf_put_at(p, query_idx, n);
+ pbuf_take_at(p, hostname_part, copy_len, (u16_t)(query_idx + 1));
+ query_idx = (u16_t)(query_idx + n + 1);
+ } while (*hostname != 0);
+ pbuf_put_at(p, query_idx, 0);
+ query_idx++;
+
+ /* fill dns query */
+ if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) {
+ qry.type = PP_HTONS(DNS_RRTYPE_AAAA);
+ } else {
+ qry.type = PP_HTONS(DNS_RRTYPE_A);
+ }
+ qry.cls = PP_HTONS(DNS_RRCLASS_IN);
+ pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx);
+
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+ pcb_idx = entry->pcb_idx;
+#else
+ pcb_idx = 0;
+#endif
+ /* send dns packet */
+ LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n",
+ entry->txid, entry->name, entry->server_idx));
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ if (entry->is_mdns) {
+ dst_port = DNS_MQUERY_PORT;
+#if LWIP_IPV6
+ if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) {
+ dst = &dns_mquery_v6group;
+ }
+#endif
+#if LWIP_IPV4 && LWIP_IPV6
+ else
+#endif
+#if LWIP_IPV4
+ {
+ dst = &dns_mquery_v4group;
+ }
+#endif
+ } else
+#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
+ {
+ dst_port = DNS_SERVER_PORT;
+ dst = &dns_servers[entry->server_idx];
+ }
+ err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port);
+
+ /* free pbuf */
+ pbuf_free(p);
+ } else {
+ err = ERR_MEM;
+ }
+
+ return err;
+overflow_return:
+ pbuf_free(p);
+ return ERR_VAL;
+}
+
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+static struct udp_pcb *
+dns_alloc_random_port(void)
+{
+ err_t err;
+ struct udp_pcb *pcb;
+
+ pcb = udp_new_ip_type(IPADDR_TYPE_ANY);
+ if (pcb == NULL) {
+ /* out of memory, have to reuse an existing pcb */
+ return NULL;
+ }
+ do {
+ u16_t port = (u16_t)DNS_RAND_TXID();
+ if (DNS_PORT_ALLOWED(port)) {
+ err = udp_bind(pcb, IP_ANY_TYPE, port);
+ } else {
+ /* this port is not allowed, try again */
+ err = ERR_USE;
+ }
+ } while (err == ERR_USE);
+ if (err != ERR_OK) {
+ udp_remove(pcb);
+ return NULL;
+ }
+ udp_recv(pcb, dns_recv, NULL);
+ return pcb;
+}
+
+/**
+ * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used
+ * for sending a request
+ *
+ * @return an index into dns_pcbs
+ */
+static u8_t
+dns_alloc_pcb(void)
+{
+ u8_t i;
+ u8_t idx;
+
+ for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) {
+ if (dns_pcbs[i] == NULL) {
+ break;
+ }
+ }
+ if (i < DNS_MAX_SOURCE_PORTS) {
+ dns_pcbs[i] = dns_alloc_random_port();
+ if (dns_pcbs[i] != NULL) {
+ /* succeeded */
+ dns_last_pcb_idx = i;
+ return i;
+ }
+ }
+ /* if we come here, creating a new UDP pcb failed, so we have to use
+ an already existing one (so overflow is no issue) */
+ for (i = 0, idx = (u8_t)(dns_last_pcb_idx + 1); i < DNS_MAX_SOURCE_PORTS; i++, idx++) {
+ if (idx >= DNS_MAX_SOURCE_PORTS) {
+ idx = 0;
+ }
+ if (dns_pcbs[idx] != NULL) {
+ dns_last_pcb_idx = idx;
+ return idx;
+ }
+ }
+ return DNS_MAX_SOURCE_PORTS;
+}
+#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */
+
+/**
+ * dns_call_found() - call the found callback and check if there are duplicate
+ * entries for the given hostname. If there are any, their found callback will
+ * be called and they will be removed.
+ *
+ * @param idx dns table index of the entry that is resolved or removed
+ * @param addr IP address for the hostname (or NULL on error or memory shortage)
+ */
+static void
+dns_call_found(u8_t idx, ip_addr_t *addr)
+{
+#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0)
+ u8_t i;
+#endif
+
+#if LWIP_IPV4 && LWIP_IPV6
+ if (addr != NULL) {
+ /* check that address type matches the request and adapt the table entry */
+ if (IP_IS_V6_VAL(*addr)) {
+ LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype));
+ dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6;
+ } else {
+ LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype));
+ dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4;
+ }
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0)
+ for (i = 0; i < DNS_MAX_REQUESTS; i++) {
+ if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) {
+ (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg);
+ /* flush this entry */
+ dns_requests[i].found = NULL;
+ }
+ }
+#else
+ if (dns_requests[idx].found) {
+ (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg);
+ }
+ dns_requests[idx].found = NULL;
+#endif
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+ /* close the pcb used unless other request are using it */
+ for (i = 0; i < DNS_MAX_REQUESTS; i++) {
+ if (i == idx) {
+ continue; /* only check other requests */
+ }
+ if (dns_table[i].state == DNS_STATE_ASKING) {
+ if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) {
+ /* another request is still using the same pcb */
+ dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS;
+ break;
+ }
+ }
+ }
+ if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) {
+ /* if we come here, the pcb is not used any more and can be removed */
+ udp_remove(dns_pcbs[dns_table[idx].pcb_idx]);
+ dns_pcbs[dns_table[idx].pcb_idx] = NULL;
+ dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS;
+ }
+#endif
+}
+
+/* Create a query transmission ID that is unique for all outstanding queries */
+static u16_t
+dns_create_txid(void)
+{
+ u16_t txid;
+ u8_t i;
+
+again:
+ txid = (u16_t)DNS_RAND_TXID();
+
+ /* check whether the ID is unique */
+ for (i = 0; i < DNS_TABLE_SIZE; i++) {
+ if ((dns_table[i].state == DNS_STATE_ASKING) &&
+ (dns_table[i].txid == txid)) {
+ /* ID already used by another pending query */
+ goto again;
+ }
+ }
+
+ return txid;
+}
+
+/**
+ * Check whether there are other backup DNS servers available to try
+ */
+static u8_t
+dns_backupserver_available(struct dns_table_entry *pentry)
+{
+ u8_t ret = 0;
+
+ if (pentry) {
+ if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) {
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query.
+ * Check an entry in the dns_table:
+ * - send out query for new entries
+ * - retry old pending entries on timeout (also with different servers)
+ * - remove completed entries from the table if their TTL has expired
+ *
+ * @param i index of the dns_table entry to check
+ */
+static void
+dns_check_entry(u8_t i)
+{
+ err_t err;
+ struct dns_table_entry *entry = &dns_table[i];
+
+ LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE);
+
+ switch (entry->state) {
+ case DNS_STATE_NEW:
+ /* initialize new entry */
+ entry->txid = dns_create_txid();
+ entry->state = DNS_STATE_ASKING;
+ entry->server_idx = 0;
+ entry->tmr = 1;
+ entry->retries = 0;
+
+ /* send DNS packet for this entry */
+ err = dns_send(i);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING,
+ ("dns_send returned error: %s\n", lwip_strerr(err)));
+ }
+ break;
+ case DNS_STATE_ASKING:
+ if (--entry->tmr == 0) {
+ if (++entry->retries == DNS_MAX_RETRIES) {
+ if (dns_backupserver_available(entry)
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ && !entry->is_mdns
+#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
+ ) {
+ /* change of server */
+ entry->server_idx++;
+ entry->tmr = 1;
+ entry->retries = 0;
+ } else {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name));
+ /* call specified callback function if provided */
+ dns_call_found(i, NULL);
+ /* flush this entry */
+ entry->state = DNS_STATE_UNUSED;
+ break;
+ }
+ } else {
+ /* wait longer for the next retry */
+ entry->tmr = entry->retries;
+ }
+
+ /* send DNS packet for this entry */
+ err = dns_send(i);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING,
+ ("dns_send returned error: %s\n", lwip_strerr(err)));
+ }
+ }
+ break;
+ case DNS_STATE_DONE:
+ /* if the time to live is nul */
+ if ((entry->ttl == 0) || (--entry->ttl == 0)) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name));
+ /* flush this entry, there cannot be any related pending entries in this state */
+ entry->state = DNS_STATE_UNUSED;
+ }
+ break;
+ case DNS_STATE_UNUSED:
+ /* nothing to do */
+ break;
+ default:
+ LWIP_ASSERT("unknown dns_table entry state:", 0);
+ break;
+ }
+}
+
+/**
+ * Call dns_check_entry for each entry in dns_table - check all entries.
+ */
+static void
+dns_check_entries(void)
+{
+ u8_t i;
+
+ for (i = 0; i < DNS_TABLE_SIZE; ++i) {
+ dns_check_entry(i);
+ }
+}
+
+/**
+ * Save TTL and call dns_call_found for correct response.
+ */
+static void
+dns_correct_response(u8_t idx, u32_t ttl)
+{
+ struct dns_table_entry *entry = &dns_table[idx];
+
+ entry->state = DNS_STATE_DONE;
+
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name));
+ ip_addr_debug_print_val(DNS_DEBUG, entry->ipaddr);
+ LWIP_DEBUGF(DNS_DEBUG, ("\n"));
+
+ /* read the answer resource record's TTL, and maximize it if needed */
+ entry->ttl = ttl;
+ if (entry->ttl > DNS_MAX_TTL) {
+ entry->ttl = DNS_MAX_TTL;
+ }
+ dns_call_found(idx, &entry->ipaddr);
+
+ if (entry->ttl == 0) {
+ /* RFC 883, page 29: "Zero values are
+ interpreted to mean that the RR can only be used for the
+ transaction in progress, and should not be cached."
+ -> flush this entry now */
+ /* entry reused during callback? */
+ if (entry->state == DNS_STATE_DONE) {
+ entry->state = DNS_STATE_UNUSED;
+ }
+ }
+}
+
+/**
+ * Receive input function for DNS response packets arriving for the dns UDP pcb.
+ */
+static void
+dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port)
+{
+ u8_t i;
+ u16_t txid;
+ u16_t res_idx;
+ struct dns_hdr hdr;
+ struct dns_answer ans;
+ struct dns_query qry;
+ u16_t nquestions, nanswers;
+
+ LWIP_UNUSED_ARG(arg);
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_UNUSED_ARG(port);
+
+ /* is the dns message big enough ? */
+ if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n"));
+ /* free pbuf and return */
+ goto ignore_packet;
+ }
+
+ /* copy dns payload inside static buffer for processing */
+ if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) {
+ /* Match the ID in the DNS header with the name table. */
+ txid = lwip_htons(hdr.id);
+ for (i = 0; i < DNS_TABLE_SIZE; i++) {
+ struct dns_table_entry *entry = &dns_table[i];
+ if ((entry->state == DNS_STATE_ASKING) &&
+ (entry->txid == txid)) {
+
+ /* We only care about the question(s) and the answers. The authrr
+ and the extrarr are simply discarded. */
+ nquestions = lwip_htons(hdr.numquestions);
+ nanswers = lwip_htons(hdr.numanswers);
+
+ /* Check for correct response. */
+ if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name));
+ goto ignore_packet; /* ignore this packet */
+ }
+ if (nquestions != 1) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name));
+ goto ignore_packet; /* ignore this packet */
+ }
+
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ if (!entry->is_mdns)
+#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
+ {
+ /* Check whether response comes from the same network address to which the
+ question was sent. (RFC 5452) */
+ if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) {
+ goto ignore_packet; /* ignore this packet */
+ }
+ }
+
+ /* Check if the name in the "question" part match with the name in the entry and
+ skip it if equal. */
+ res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR);
+ if (res_idx == 0xFFFF) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name));
+ goto ignore_packet; /* ignore this packet */
+ }
+
+ /* check if "question" part matches the request */
+ if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) {
+ goto ignore_packet; /* ignore this packet */
+ }
+ if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) ||
+ (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) ||
+ (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name));
+ goto ignore_packet; /* ignore this packet */
+ }
+ /* skip the rest of the "question" part */
+ if (res_idx + SIZEOF_DNS_QUERY > 0xFFFF) {
+ goto ignore_packet;
+ }
+ res_idx = (u16_t)(res_idx + SIZEOF_DNS_QUERY);
+
+ /* Check for error. If so, call callback to inform. */
+ if (hdr.flags2 & DNS_FLAG2_ERR_MASK) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name));
+
+ /* if there is another backup DNS server to try
+ * then don't stop the DNS request
+ */
+ if (dns_backupserver_available(entry)) {
+ /* avoid retrying the same server */
+ entry->retries = DNS_MAX_RETRIES-1;
+ entry->tmr = 1;
+
+ /* contact next available server for this entry */
+ dns_check_entry(i);
+
+ goto ignore_packet;
+ }
+ } else {
+ while ((nanswers > 0) && (res_idx < p->tot_len)) {
+ /* skip answer resource record's host name */
+ res_idx = dns_skip_name(p, res_idx);
+ if (res_idx == 0xFFFF) {
+ goto ignore_packet; /* ignore this packet */
+ }
+
+ /* Check for IP address type and Internet class. Others are discarded. */
+ if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) {
+ goto ignore_packet; /* ignore this packet */
+ }
+ if (res_idx + SIZEOF_DNS_ANSWER > 0xFFFF) {
+ goto ignore_packet;
+ }
+ res_idx = (u16_t)(res_idx + SIZEOF_DNS_ANSWER);
+
+ if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) {
+#if LWIP_IPV4
+ if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) {
+#if LWIP_IPV4 && LWIP_IPV6
+ if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype))
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ {
+ ip4_addr_t ip4addr;
+ /* read the IP address after answer resource record's header */
+ if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) {
+ goto ignore_packet; /* ignore this packet */
+ }
+ ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr);
+ pbuf_free(p);
+ /* handle correct response */
+ dns_correct_response(i, lwip_ntohl(ans.ttl));
+ return;
+ }
+ }
+#endif /* LWIP_IPV4 */
+#if LWIP_IPV6
+ if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_p_t)))) {
+#if LWIP_IPV4 && LWIP_IPV6
+ if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype))
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ {
+ ip6_addr_p_t ip6addr;
+ /* read the IP address after answer resource record's header */
+ if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_p_t), res_idx) != sizeof(ip6_addr_p_t)) {
+ goto ignore_packet; /* ignore this packet */
+ }
+ /* @todo: scope ip6addr? Might be required for link-local addresses at least? */
+ ip_addr_copy_from_ip6_packed(dns_table[i].ipaddr, ip6addr);
+ pbuf_free(p);
+ /* handle correct response */
+ dns_correct_response(i, lwip_ntohl(ans.ttl));
+ return;
+ }
+ }
+#endif /* LWIP_IPV6 */
+ }
+ /* skip this answer */
+ if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) {
+ goto ignore_packet; /* ignore this packet */
+ }
+ res_idx = (u16_t)(res_idx + lwip_htons(ans.len));
+ --nanswers;
+ }
+#if LWIP_IPV4 && LWIP_IPV6
+ if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) ||
+ (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) {
+ if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) {
+ /* IPv4 failed, try IPv6 */
+ dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6;
+ } else {
+ /* IPv6 failed, try IPv4 */
+ dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4;
+ }
+ pbuf_free(p);
+ dns_table[i].state = DNS_STATE_NEW;
+ dns_check_entry(i);
+ return;
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name));
+ }
+ /* call callback to indicate error, clean up memory and return */
+ pbuf_free(p);
+ dns_call_found(i, NULL);
+ dns_table[i].state = DNS_STATE_UNUSED;
+ return;
+ }
+ }
+ }
+
+ignore_packet:
+ /* deallocate memory and return */
+ pbuf_free(p);
+ return;
+}
+
+/**
+ * Queues a new hostname to resolve and sends out a DNS query for that hostname
+ *
+ * @param name the hostname that is to be queried
+ * @param hostnamelen length of the hostname
+ * @param found a callback function to be called on success, failure or timeout
+ * @param callback_arg argument to pass to the callback function
+ * @return err_t return code.
+ */
+static err_t
+dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found,
+ void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns))
+{
+ u8_t i;
+ u8_t lseq, lseqi;
+ struct dns_table_entry *entry = NULL;
+ size_t namelen;
+ struct dns_req_entry *req;
+
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0)
+ u8_t r;
+ /* check for duplicate entries */
+ for (i = 0; i < DNS_TABLE_SIZE; i++) {
+ if ((dns_table[i].state == DNS_STATE_ASKING) &&
+ (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) {
+#if LWIP_IPV4 && LWIP_IPV6
+ if (dns_table[i].reqaddrtype != dns_addrtype) {
+ /* requested address types don't match
+ this can lead to 2 concurrent requests, but mixing the address types
+ for the same host should not be that common */
+ continue;
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ /* this is a duplicate entry, find a free request entry */
+ for (r = 0; r < DNS_MAX_REQUESTS; r++) {
+ if (dns_requests[r].found == 0) {
+ dns_requests[r].found = found;
+ dns_requests[r].arg = callback_arg;
+ dns_requests[r].dns_table_idx = i;
+ LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype);
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name));
+ return ERR_INPROGRESS;
+ }
+ }
+ }
+ }
+ /* no duplicate entries found */
+#endif
+
+ /* search an unused entry, or the oldest one */
+ lseq = 0;
+ lseqi = DNS_TABLE_SIZE;
+ for (i = 0; i < DNS_TABLE_SIZE; ++i) {
+ entry = &dns_table[i];
+ /* is it an unused entry ? */
+ if (entry->state == DNS_STATE_UNUSED) {
+ break;
+ }
+ /* check if this is the oldest completed entry */
+ if (entry->state == DNS_STATE_DONE) {
+ u8_t age = (u8_t)(dns_seqno - entry->seqno);
+ if (age > lseq) {
+ lseq = age;
+ lseqi = i;
+ }
+ }
+ }
+
+ /* if we don't have found an unused entry, use the oldest completed one */
+ if (i == DNS_TABLE_SIZE) {
+ if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) {
+ /* no entry can be used now, table is full */
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name));
+ return ERR_MEM;
+ } else {
+ /* use the oldest completed one */
+ i = lseqi;
+ entry = &dns_table[i];
+ }
+ }
+
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0)
+ /* find a free request entry */
+ req = NULL;
+ for (r = 0; r < DNS_MAX_REQUESTS; r++) {
+ if (dns_requests[r].found == NULL) {
+ req = &dns_requests[r];
+ break;
+ }
+ }
+ if (req == NULL) {
+ /* no request entry can be used now, table is full */
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name));
+ return ERR_MEM;
+ }
+ req->dns_table_idx = i;
+#else
+ /* in this configuration, the entry index is the same as the request index */
+ req = &dns_requests[i];
+#endif
+
+ /* use this entry */
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i)));
+
+ /* fill the entry */
+ entry->state = DNS_STATE_NEW;
+ entry->seqno = dns_seqno;
+ LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype);
+ LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype);
+ req->found = found;
+ req->arg = callback_arg;
+ namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH - 1);
+ MEMCPY(entry->name, name, namelen);
+ entry->name[namelen] = 0;
+
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0)
+ entry->pcb_idx = dns_alloc_pcb();
+ if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) {
+ /* failed to get a UDP pcb */
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name));
+ entry->state = DNS_STATE_UNUSED;
+ req->found = NULL;
+ return ERR_MEM;
+ }
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx)));
+#endif
+
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ entry->is_mdns = is_mdns;
+#endif
+
+ dns_seqno++;
+
+ /* force to send query without waiting timer */
+ dns_check_entry(i);
+
+ /* dns query is enqueued */
+ return ERR_INPROGRESS;
+}
+
+/**
+ * @ingroup dns
+ * Resolve a hostname (string) into an IP address.
+ * NON-BLOCKING callback version for use with raw API!!!
+ *
+ * Returns immediately with one of err_t return codes:
+ * - ERR_OK if hostname is a valid IP address string or the host
+ * name is already in the local names table.
+ * - ERR_INPROGRESS enqueue a request to be sent to the DNS server
+ * for resolution if no errors are present.
+ * - ERR_ARG: dns client not initialized or invalid hostname
+ *
+ * @param hostname the hostname that is to be queried
+ * @param addr pointer to a ip_addr_t where to store the address if it is already
+ * cached in the dns_table (only valid if ERR_OK is returned!)
+ * @param found a callback function to be called on success, failure or timeout (only if
+ * ERR_INPROGRESS is returned!)
+ * @param callback_arg argument to pass to the callback function
+ * @return a err_t return code.
+ */
+err_t
+dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found,
+ void *callback_arg)
+{
+ return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT);
+}
+
+/**
+ * @ingroup dns
+ * Like dns_gethostbyname, but returned address type can be controlled:
+ * @param hostname the hostname that is to be queried
+ * @param addr pointer to a ip_addr_t where to store the address if it is already
+ * cached in the dns_table (only valid if ERR_OK is returned!)
+ * @param found a callback function to be called on success, failure or timeout (only if
+ * ERR_INPROGRESS is returned!)
+ * @param callback_arg argument to pass to the callback function
+ * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only
+ * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only
+ * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only
+ * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only
+ */
+err_t
+dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found,
+ void *callback_arg, u8_t dns_addrtype)
+{
+ size_t hostnamelen;
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ u8_t is_mdns;
+#endif
+ /* not initialized or no valid server yet, or invalid addr pointer
+ * or invalid hostname or invalid hostname length */
+ if ((addr == NULL) ||
+ (!hostname) || (!hostname[0])) {
+ return ERR_ARG;
+ }
+#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0)
+ if (dns_pcbs[0] == NULL) {
+ return ERR_ARG;
+ }
+#endif
+ hostnamelen = strlen(hostname);
+ if (hostnamelen >= DNS_MAX_NAME_LENGTH) {
+ LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve"));
+ return ERR_ARG;
+ }
+
+
+#if LWIP_HAVE_LOOPIF
+ if (strcmp(hostname, "localhost") == 0) {
+ ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr);
+ return ERR_OK;
+ }
+#endif /* LWIP_HAVE_LOOPIF */
+
+ /* host name already in octet notation? set ip addr and return ERR_OK */
+ if (ipaddr_aton(hostname, addr)) {
+#if LWIP_IPV4 && LWIP_IPV6
+ if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) ||
+ (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6)))
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ {
+ return ERR_OK;
+ }
+ }
+ /* already have this address cached? */
+ if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) {
+ return ERR_OK;
+ }
+#if LWIP_IPV4 && LWIP_IPV6
+ if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) {
+ /* fallback to 2nd IP type and try again to lookup */
+ u8_t fallback;
+ if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) {
+ fallback = LWIP_DNS_ADDRTYPE_IPV6;
+ } else {
+ fallback = LWIP_DNS_ADDRTYPE_IPV4;
+ }
+ if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) {
+ return ERR_OK;
+ }
+ }
+#else /* LWIP_IPV4 && LWIP_IPV6 */
+ LWIP_UNUSED_ARG(dns_addrtype);
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+#if LWIP_DNS_SUPPORT_MDNS_QUERIES
+ if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) {
+ is_mdns = 1;
+ } else {
+ is_mdns = 0;
+ }
+
+ if (!is_mdns)
+#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
+ {
+ /* prevent calling found callback if no server is set, return error instead */
+ if (ip_addr_isany_val(dns_servers[0])) {
+ return ERR_VAL;
+ }
+ }
+
+ /* queue query with specified callback */
+ return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)
+ LWIP_DNS_ISMDNS_ARG(is_mdns));
+}
+
+#endif /* LWIP_DNS */
diff --git a/lwip/src/core/inet_chksum.c b/lwip/src/core/inet_chksum.c
new file mode 100644
index 0000000..818c68f
--- /dev/null
+++ b/lwip/src/core/inet_chksum.c
@@ -0,0 +1,608 @@
+/**
+ * @file
+ * Internet checksum functions.\n
+ *
+ * These are some reference implementations of the checksum algorithm, with the
+ * aim of being simple, correct and fully portable. Checksumming is the
+ * first thing you would want to optimize for your platform. If you create
+ * your own version, link it in and in your cc.h put:
+ *
+ * \#define LWIP_CHKSUM your_checksum_routine
+ *
+ * Or you can select from the implementations below by defining
+ * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3.
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/inet_chksum.h"
+#include "lwip/def.h"
+#include "lwip/ip_addr.h"
+
+#include <string.h>
+
+#ifndef LWIP_CHKSUM
+# define LWIP_CHKSUM lwip_standard_chksum
+# ifndef LWIP_CHKSUM_ALGORITHM
+# define LWIP_CHKSUM_ALGORITHM 2
+# endif
+u16_t lwip_standard_chksum(const void *dataptr, int len);
+#endif
+/* If none set: */
+#ifndef LWIP_CHKSUM_ALGORITHM
+# define LWIP_CHKSUM_ALGORITHM 0
+#endif
+
+#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */
+/**
+ * lwip checksum
+ *
+ * @param dataptr points to start of data to be summed at any boundary
+ * @param len length of data to be summed
+ * @return host order (!) lwip checksum (non-inverted Internet sum)
+ *
+ * @note accumulator size limits summable length to 64k
+ * @note host endianess is irrelevant (p3 RFC1071)
+ */
+u16_t
+lwip_standard_chksum(const void *dataptr, int len)
+{
+ u32_t acc;
+ u16_t src;
+ const u8_t *octetptr;
+
+ acc = 0;
+ /* dataptr may be at odd or even addresses */
+ octetptr = (const u8_t *)dataptr;
+ while (len > 1) {
+ /* declare first octet as most significant
+ thus assume network order, ignoring host order */
+ src = (*octetptr) << 8;
+ octetptr++;
+ /* declare second octet as least significant */
+ src |= (*octetptr);
+ octetptr++;
+ acc += src;
+ len -= 2;
+ }
+ if (len > 0) {
+ /* accumulate remaining octet */
+ src = (*octetptr) << 8;
+ acc += src;
+ }
+ /* add deferred carry bits */
+ acc = (acc >> 16) + (acc & 0x0000ffffUL);
+ if ((acc & 0xffff0000UL) != 0) {
+ acc = (acc >> 16) + (acc & 0x0000ffffUL);
+ }
+ /* This maybe a little confusing: reorder sum using lwip_htons()
+ instead of lwip_ntohs() since it has a little less call overhead.
+ The caller must invert bits for Internet sum ! */
+ return lwip_htons((u16_t)acc);
+}
+#endif
+
+#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */
+/*
+ * Curt McDowell
+ * Broadcom Corp.
+ * csm@broadcom.com
+ *
+ * IP checksum two bytes at a time with support for
+ * unaligned buffer.
+ * Works for len up to and including 0x20000.
+ * by Curt McDowell, Broadcom Corp. 12/08/2005
+ *
+ * @param dataptr points to start of data to be summed at any boundary
+ * @param len length of data to be summed
+ * @return host order (!) lwip checksum (non-inverted Internet sum)
+ */
+u16_t
+lwip_standard_chksum(const void *dataptr, int len)
+{
+ const u8_t *pb = (const u8_t *)dataptr;
+ const u16_t *ps;
+ u16_t t = 0;
+ u32_t sum = 0;
+ int odd = ((mem_ptr_t)pb & 1);
+
+ /* Get aligned to u16_t */
+ if (odd && len > 0) {
+ ((u8_t *)&t)[1] = *pb++;
+ len--;
+ }
+
+ /* Add the bulk of the data */
+ ps = (const u16_t *)(const void *)pb;
+ while (len > 1) {
+ sum += *ps++;
+ len -= 2;
+ }
+
+ /* Consume left-over byte, if any */
+ if (len > 0) {
+ ((u8_t *)&t)[0] = *(const u8_t *)ps;
+ }
+
+ /* Add end bytes */
+ sum += t;
+
+ /* Fold 32-bit sum to 16 bits
+ calling this twice is probably faster than if statements... */
+ sum = FOLD_U32T(sum);
+ sum = FOLD_U32T(sum);
+
+ /* Swap if alignment was odd */
+ if (odd) {
+ sum = SWAP_BYTES_IN_WORD(sum);
+ }
+
+ return (u16_t)sum;
+}
+#endif
+
+#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */
+/**
+ * An optimized checksum routine. Basically, it uses loop-unrolling on
+ * the checksum loop, treating the head and tail bytes specially, whereas
+ * the inner loop acts on 8 bytes at a time.
+ *
+ * @arg start of buffer to be checksummed. May be an odd byte address.
+ * @len number of bytes in the buffer to be checksummed.
+ * @return host order (!) lwip checksum (non-inverted Internet sum)
+ *
+ * by Curt McDowell, Broadcom Corp. December 8th, 2005
+ */
+u16_t
+lwip_standard_chksum(const void *dataptr, int len)
+{
+ const u8_t *pb = (const u8_t *)dataptr;
+ const u16_t *ps;
+ u16_t t = 0;
+ const u32_t *pl;
+ u32_t sum = 0, tmp;
+ /* starts at odd byte address? */
+ int odd = ((mem_ptr_t)pb & 1);
+
+ if (odd && len > 0) {
+ ((u8_t *)&t)[1] = *pb++;
+ len--;
+ }
+
+ ps = (const u16_t *)(const void *)pb;
+
+ if (((mem_ptr_t)ps & 3) && len > 1) {
+ sum += *ps++;
+ len -= 2;
+ }
+
+ pl = (const u32_t *)(const void *)ps;
+
+ while (len > 7) {
+ tmp = sum + *pl++; /* ping */
+ if (tmp < sum) {
+ tmp++; /* add back carry */
+ }
+
+ sum = tmp + *pl++; /* pong */
+ if (sum < tmp) {
+ sum++; /* add back carry */
+ }
+
+ len -= 8;
+ }
+
+ /* make room in upper bits */
+ sum = FOLD_U32T(sum);
+
+ ps = (const u16_t *)pl;
+
+ /* 16-bit aligned word remaining? */
+ while (len > 1) {
+ sum += *ps++;
+ len -= 2;
+ }
+
+ /* dangling tail byte remaining? */
+ if (len > 0) { /* include odd byte */
+ ((u8_t *)&t)[0] = *(const u8_t *)ps;
+ }
+
+ sum += t; /* add end bytes */
+
+ /* Fold 32-bit sum to 16 bits
+ calling this twice is probably faster than if statements... */
+ sum = FOLD_U32T(sum);
+ sum = FOLD_U32T(sum);
+
+ if (odd) {
+ sum = SWAP_BYTES_IN_WORD(sum);
+ }
+
+ return (u16_t)sum;
+}
+#endif
+
+/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */
+static u16_t
+inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc)
+{
+ struct pbuf *q;
+ int swapped = 0;
+
+ /* iterate through all pbuf in chain */
+ for (q = p; q != NULL; q = q->next) {
+ LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n",
+ (void *)q, (void *)q->next));
+ acc += LWIP_CHKSUM(q->payload, q->len);
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
+ /* just executing this next line is probably faster that the if statement needed
+ to check whether we really need to execute it, and does no harm */
+ acc = FOLD_U32T(acc);
+ if (q->len % 2 != 0) {
+ swapped = !swapped;
+ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
+ }
+
+ if (swapped) {
+ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+
+ acc += (u32_t)lwip_htons((u16_t)proto);
+ acc += (u32_t)lwip_htons(proto_len);
+
+ /* Fold 32-bit sum to 16 bits
+ calling this twice is probably faster than if statements... */
+ acc = FOLD_U32T(acc);
+ acc = FOLD_U32T(acc);
+ LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
+ return (u16_t)~(acc & 0xffffUL);
+}
+
+#if LWIP_IPV4
+/* inet_chksum_pseudo:
+ *
+ * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
+ * IP addresses are expected to be in network byte order.
+ *
+ * @param p chain of pbufs over that a checksum should be calculated (ip data part)
+ * @param src source ip address (used for checksum of pseudo header)
+ * @param dst destination ip address (used for checksum of pseudo header)
+ * @param proto ip protocol (used for checksum of pseudo header)
+ * @param proto_len length of the ip data part (used for checksum of pseudo header)
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
+ const ip4_addr_t *src, const ip4_addr_t *dest)
+{
+ u32_t acc;
+ u32_t addr;
+
+ addr = ip4_addr_get_u32(src);
+ acc = (addr & 0xffffUL);
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ addr = ip4_addr_get_u32(dest);
+ acc = (u32_t)(acc + (addr & 0xffffUL));
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ /* fold down to 16 bits */
+ acc = FOLD_U32T(acc);
+ acc = FOLD_U32T(acc);
+
+ return inet_cksum_pseudo_base(p, proto, proto_len, acc);
+}
+#endif /* LWIP_IPV4 */
+
+#if LWIP_IPV6
+/**
+ * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain.
+ * IPv6 addresses are expected to be in network byte order.
+ *
+ * @param p chain of pbufs over that a checksum should be calculated (ip data part)
+ * @param proto ipv6 protocol/next header (used for checksum of pseudo header)
+ * @param proto_len length of the ipv6 payload (used for checksum of pseudo header)
+ * @param src source ipv6 address (used for checksum of pseudo header)
+ * @param dest destination ipv6 address (used for checksum of pseudo header)
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
+ const ip6_addr_t *src, const ip6_addr_t *dest)
+{
+ u32_t acc = 0;
+ u32_t addr;
+ u8_t addr_part;
+
+ for (addr_part = 0; addr_part < 4; addr_part++) {
+ addr = src->addr[addr_part];
+ acc = (u32_t)(acc + (addr & 0xffffUL));
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ addr = dest->addr[addr_part];
+ acc = (u32_t)(acc + (addr & 0xffffUL));
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ }
+ /* fold down to 16 bits */
+ acc = FOLD_U32T(acc);
+ acc = FOLD_U32T(acc);
+
+ return inet_cksum_pseudo_base(p, proto, proto_len, acc);
+}
+#endif /* LWIP_IPV6 */
+
+/* ip_chksum_pseudo:
+ *
+ * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
+ * IP addresses are expected to be in network byte order.
+ *
+ * @param p chain of pbufs over that a checksum should be calculated (ip data part)
+ * @param src source ip address (used for checksum of pseudo header)
+ * @param dst destination ip address (used for checksum of pseudo header)
+ * @param proto ip protocol (used for checksum of pseudo header)
+ * @param proto_len length of the ip data part (used for checksum of pseudo header)
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
+ const ip_addr_t *src, const ip_addr_t *dest)
+{
+#if LWIP_IPV6
+ if (IP_IS_V6(dest)) {
+ return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest));
+ }
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4 && LWIP_IPV6
+ else
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+#if LWIP_IPV4
+ {
+ return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest));
+ }
+#endif /* LWIP_IPV4 */
+}
+
+/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */
+static u16_t
+inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len,
+ u16_t chksum_len, u32_t acc)
+{
+ struct pbuf *q;
+ int swapped = 0;
+ u16_t chklen;
+
+ /* iterate through all pbuf in chain */
+ for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) {
+ LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n",
+ (void *)q, (void *)q->next));
+ chklen = q->len;
+ if (chklen > chksum_len) {
+ chklen = chksum_len;
+ }
+ acc += LWIP_CHKSUM(q->payload, chklen);
+ chksum_len = (u16_t)(chksum_len - chklen);
+ LWIP_ASSERT("delete me", chksum_len < 0x7fff);
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
+ /* fold the upper bit down */
+ acc = FOLD_U32T(acc);
+ if (q->len % 2 != 0) {
+ swapped = !swapped;
+ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
+ }
+
+ if (swapped) {
+ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+
+ acc += (u32_t)lwip_htons((u16_t)proto);
+ acc += (u32_t)lwip_htons(proto_len);
+
+ /* Fold 32-bit sum to 16 bits
+ calling this twice is probably faster than if statements... */
+ acc = FOLD_U32T(acc);
+ acc = FOLD_U32T(acc);
+ LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
+ return (u16_t)~(acc & 0xffffUL);
+}
+
+#if LWIP_IPV4
+/* inet_chksum_pseudo_partial:
+ *
+ * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
+ * IP addresses are expected to be in network byte order.
+ *
+ * @param p chain of pbufs over that a checksum should be calculated (ip data part)
+ * @param src source ip address (used for checksum of pseudo header)
+ * @param dst destination ip address (used for checksum of pseudo header)
+ * @param proto ip protocol (used for checksum of pseudo header)
+ * @param proto_len length of the ip data part (used for checksum of pseudo header)
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
+ u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest)
+{
+ u32_t acc;
+ u32_t addr;
+
+ addr = ip4_addr_get_u32(src);
+ acc = (addr & 0xffffUL);
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ addr = ip4_addr_get_u32(dest);
+ acc = (u32_t)(acc + (addr & 0xffffUL));
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ /* fold down to 16 bits */
+ acc = FOLD_U32T(acc);
+ acc = FOLD_U32T(acc);
+
+ return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc);
+}
+#endif /* LWIP_IPV4 */
+
+#if LWIP_IPV6
+/**
+ * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain.
+ * IPv6 addresses are expected to be in network byte order. Will only compute for a
+ * portion of the payload.
+ *
+ * @param p chain of pbufs over that a checksum should be calculated (ip data part)
+ * @param proto ipv6 protocol/next header (used for checksum of pseudo header)
+ * @param proto_len length of the ipv6 payload (used for checksum of pseudo header)
+ * @param chksum_len number of payload bytes used to compute chksum
+ * @param src source ipv6 address (used for checksum of pseudo header)
+ * @param dest destination ipv6 address (used for checksum of pseudo header)
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
+ u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest)
+{
+ u32_t acc = 0;
+ u32_t addr;
+ u8_t addr_part;
+
+ for (addr_part = 0; addr_part < 4; addr_part++) {
+ addr = src->addr[addr_part];
+ acc = (u32_t)(acc + (addr & 0xffffUL));
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ addr = dest->addr[addr_part];
+ acc = (u32_t)(acc + (addr & 0xffffUL));
+ acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
+ }
+ /* fold down to 16 bits */
+ acc = FOLD_U32T(acc);
+ acc = FOLD_U32T(acc);
+
+ return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc);
+}
+#endif /* LWIP_IPV6 */
+
+/* ip_chksum_pseudo_partial:
+ *
+ * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
+ *
+ * @param p chain of pbufs over that a checksum should be calculated (ip data part)
+ * @param src source ip address (used for checksum of pseudo header)
+ * @param dst destination ip address (used for checksum of pseudo header)
+ * @param proto ip protocol (used for checksum of pseudo header)
+ * @param proto_len length of the ip data part (used for checksum of pseudo header)
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
+ u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest)
+{
+#if LWIP_IPV6
+ if (IP_IS_V6(dest)) {
+ return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest));
+ }
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4 && LWIP_IPV6
+ else
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+#if LWIP_IPV4
+ {
+ return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest));
+ }
+#endif /* LWIP_IPV4 */
+}
+
+/* inet_chksum:
+ *
+ * Calculates the Internet checksum over a portion of memory. Used primarily for IP
+ * and ICMP.
+ *
+ * @param dataptr start of the buffer to calculate the checksum (no alignment needed)
+ * @param len length of the buffer to calculate the checksum
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+
+u16_t
+inet_chksum(const void *dataptr, u16_t len)
+{
+ return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len);
+}
+
+/**
+ * Calculate a checksum over a chain of pbufs (without pseudo-header, much like
+ * inet_chksum only pbufs are used).
+ *
+ * @param p pbuf chain over that the checksum should be calculated
+ * @return checksum (as u16_t) to be saved directly in the protocol header
+ */
+u16_t
+inet_chksum_pbuf(struct pbuf *p)
+{
+ u32_t acc;
+ struct pbuf *q;
+ int swapped = 0;
+
+ acc = 0;
+ for (q = p; q != NULL; q = q->next) {
+ acc += LWIP_CHKSUM(q->payload, q->len);
+ acc = FOLD_U32T(acc);
+ if (q->len % 2 != 0) {
+ swapped = !swapped;
+ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ }
+
+ if (swapped) {
+ acc = SWAP_BYTES_IN_WORD(acc);
+ }
+ return (u16_t)~(acc & 0xffffUL);
+}
+
+/* These are some implementations for LWIP_CHKSUM_COPY, which copies data
+ * like MEMCPY but generates a checksum at the same time. Since this is a
+ * performance-sensitive function, you might want to create your own version
+ * in assembly targeted at your hardware by defining it in lwipopts.h:
+ * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len)
+ */
+
+#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */
+/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM.
+ * For architectures with big caches, data might still be in cache when
+ * generating the checksum after copying.
+ */
+u16_t
+lwip_chksum_copy(void *dst, const void *src, u16_t len)
+{
+ MEMCPY(dst, src, len);
+ return LWIP_CHKSUM(dst, len);
+}
+#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */
diff --git a/lwip/src/core/init.c b/lwip/src/core/init.c
new file mode 100644
index 0000000..b3737a3
--- /dev/null
+++ b/lwip/src/core/init.c
@@ -0,0 +1,380 @@
+/**
+ * @file
+ * Modules initialization
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/init.h"
+#include "lwip/stats.h"
+#include "lwip/sys.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/pbuf.h"
+#include "lwip/netif.h"
+#include "lwip/sockets.h"
+#include "lwip/ip.h"
+#include "lwip/raw.h"
+#include "lwip/udp.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/igmp.h"
+#include "lwip/dns.h"
+#include "lwip/timeouts.h"
+#include "lwip/etharp.h"
+#include "lwip/ip6.h"
+#include "lwip/nd6.h"
+#include "lwip/mld6.h"
+#include "lwip/api.h"
+
+#include "netif/ppp/ppp_opts.h"
+#include "netif/ppp/ppp_impl.h"
+
+#ifndef LWIP_SKIP_PACKING_CHECK
+
+#ifdef PACK_STRUCT_USE_INCLUDES
+# include "arch/bpstruct.h"
+#endif
+PACK_STRUCT_BEGIN
+struct packed_struct_test {
+ PACK_STRUCT_FLD_8(u8_t dummy1);
+ PACK_STRUCT_FIELD(u32_t dummy2);
+} PACK_STRUCT_STRUCT;
+PACK_STRUCT_END
+#ifdef PACK_STRUCT_USE_INCLUDES
+# include "arch/epstruct.h"
+#endif
+#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5
+
+#endif
+
+/* Compile-time sanity checks for configuration errors.
+ * These can be done independently of LWIP_DEBUG, without penalty.
+ */
+#ifndef BYTE_ORDER
+#error "BYTE_ORDER is not defined, you have to define it in your cc.h"
+#endif
+#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV)
+#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h"
+#endif
+#if (!LWIP_UDP && LWIP_UDPLITE)
+#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h"
+#endif
+#if (!LWIP_UDP && LWIP_DHCP)
+#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h"
+#endif
+#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS)
+#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h"
+#endif
+#if (!LWIP_UDP && LWIP_DNS)
+#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h"
+#endif
+#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */
+#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0))
+#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h"
+#endif
+#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0))
+#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h"
+#endif
+#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0))
+#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h"
+#endif
+#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0))
+#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h"
+#endif
+#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1))
+#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h"
+#endif
+#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS)
+#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h"
+#endif
+#if (LWIP_IGMP && !LWIP_IPV4)
+#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h"
+#endif
+#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0))
+#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h"
+#endif
+/* There must be sufficient timeouts, taking into account requirements of the subsystems. */
+#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL)
+#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts"
+#endif
+#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS))
+#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!"
+#endif
+#endif /* !MEMP_MEM_MALLOC */
+#if LWIP_WND_SCALE
+#if (LWIP_TCP && (TCP_WND > 0xffffffff))
+#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h"
+#endif
+#if (LWIP_TCP && (TCP_RCV_SCALE > 14))
+#error "The maximum valid window scale value is 14!"
+#endif
+#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE)))
+#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!"
+#endif
+#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0))
+#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!"
+#endif
+#else /* LWIP_WND_SCALE */
+#if (LWIP_TCP && (TCP_WND > 0xffff))
+#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)"
+#endif
+#endif /* LWIP_WND_SCALE */
+#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff))
+#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h"
+#endif
+#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2))
+#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work"
+#endif
+#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12)))
+#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h"
+#endif
+#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff)))
+#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t"
+#endif
+#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ)
+#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled"
+#endif
+#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1))
+#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0"
+#endif
+#if (LWIP_NETIF_API && (NO_SYS==1))
+#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h"
+#endif
+#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1))
+#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h"
+#endif
+#if (LWIP_PPP_API && (NO_SYS==1))
+#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h"
+#endif
+#if (LWIP_PPP_API && (PPP_SUPPORT==0))
+#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h"
+#endif
+#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP)
+#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h"
+#endif
+#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK)
+#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h"
+#endif
+#if (!LWIP_ARP && LWIP_AUTOIP)
+#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h"
+#endif
+#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API)))
+#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h"
+#endif
+#if (LWIP_ALTCP && LWIP_EVENT_API)
+#error "The application layered tcp API does not work with LWIP_EVENT_API"
+#endif
+#if (MEM_LIBC_MALLOC && MEM_USE_POOLS)
+#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h"
+#endif
+#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS)
+#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h"
+#endif
+#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT)
+#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf"
+#endif
+#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT)))
+#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST"
+#endif
+#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT
+#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on"
+#endif
+#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT
+#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on"
+#endif
+#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4
+#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on"
+#endif
+#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6
+#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on"
+#endif
+#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT)
+#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT"
+#endif
+#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING
+#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too"
+#endif
+#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE
+#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets"
+#endif
+#if LWIP_NETCONN && LWIP_TCP
+#if NETCONN_COPY != TCP_WRITE_FLAG_COPY
+#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY"
+#endif
+#if NETCONN_MORE != TCP_WRITE_FLAG_MORE
+#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE"
+#endif
+#endif /* LWIP_NETCONN && LWIP_TCP */
+#if LWIP_SOCKET
+#endif /* LWIP_SOCKET */
+
+
+/* Compile-time checks for deprecated options.
+ */
+#ifdef MEMP_NUM_TCPIP_MSG
+#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h."
+#endif
+#ifdef TCP_REXMIT_DEBUG
+#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h."
+#endif
+#ifdef RAW_STATS
+#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h."
+#endif
+#ifdef ETHARP_QUEUE_FIRST
+#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h."
+#endif
+#ifdef ETHARP_ALWAYS_INSERT
+#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h."
+#endif
+#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED)
+#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)"
+#endif
+
+#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS
+#define LWIP_DISABLE_TCP_SANITY_CHECKS 0
+#endif
+#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS
+#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0
+#endif
+
+/* MEMP sanity checks */
+#if MEMP_MEM_MALLOC
+#if !LWIP_DISABLE_MEMP_SANITY_CHECKS
+#if LWIP_NETCONN || LWIP_SOCKET
+#if !MEMP_NUM_NETCONN && LWIP_SOCKET
+#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!"
+#endif
+#else /* MEMP_MEM_MALLOC */
+#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB)
+#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#endif /* LWIP_NETCONN || LWIP_SOCKET */
+#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */
+#if MEM_USE_POOLS
+#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time"
+#endif
+#ifdef LWIP_HOOK_MEMP_AVAILABLE
+#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC"
+#endif
+#endif /* MEMP_MEM_MALLOC */
+
+/* TCP sanity checks */
+#if !LWIP_DISABLE_TCP_SANITY_CHECKS
+#if LWIP_TCP
+#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN)
+#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if TCP_SND_BUF < (2 * TCP_MSS)
+#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS))
+#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if TCP_SNDLOWAT >= TCP_SND_BUF
+#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS))
+#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!"
+#endif
+#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN
+#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))
+#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))))
+#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#if TCP_WND < TCP_MSS
+#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
+#endif
+#endif /* LWIP_TCP */
+#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */
+
+/**
+ * @ingroup lwip_nosys
+ * Initialize all modules.
+ * Use this in NO_SYS mode. Use tcpip_init() otherwise.
+ */
+void
+lwip_init(void)
+{
+#ifndef LWIP_SKIP_CONST_CHECK
+ int a = 0;
+ LWIP_UNUSED_ARG(a);
+ LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a);
+#endif
+#ifndef LWIP_SKIP_PACKING_CHECK
+ LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE);
+#endif
+
+ /* Modules initialization */
+ stats_init();
+#if !NO_SYS
+ sys_init();
+#endif /* !NO_SYS */
+ mem_init();
+ memp_init();
+ pbuf_init();
+ netif_init();
+#if LWIP_IPV4
+ ip_init();
+#if LWIP_ARP
+ etharp_init();
+#endif /* LWIP_ARP */
+#endif /* LWIP_IPV4 */
+#if LWIP_RAW
+ raw_init();
+#endif /* LWIP_RAW */
+#if LWIP_UDP
+ udp_init();
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ tcp_init();
+#endif /* LWIP_TCP */
+#if LWIP_IGMP
+ igmp_init();
+#endif /* LWIP_IGMP */
+#if LWIP_DNS
+ dns_init();
+#endif /* LWIP_DNS */
+#if PPP_SUPPORT
+ ppp_init();
+#endif
+
+#if LWIP_TIMERS
+ sys_timeouts_init();
+#endif /* LWIP_TIMERS */
+}
diff --git a/lwip/src/core/ip.c b/lwip/src/core/ip.c
new file mode 100644
index 0000000..18514cf
--- /dev/null
+++ b/lwip/src/core/ip.c
@@ -0,0 +1,167 @@
+/**
+ * @file
+ * Common IPv4 and IPv6 code
+ *
+ * @defgroup ip IP
+ * @ingroup callbackstyle_api
+ *
+ * @defgroup ip4 IPv4
+ * @ingroup ip
+ *
+ * @defgroup ip6 IPv6
+ * @ingroup ip
+ *
+ * @defgroup ipaddr IP address handling
+ * @ingroup infrastructure
+ *
+ * @defgroup ip4addr IPv4 only
+ * @ingroup ipaddr
+ *
+ * @defgroup ip6addr IPv6 only
+ * @ingroup ipaddr
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4 || LWIP_IPV6
+
+#include "lwip/ip_addr.h"
+#include "lwip/ip.h"
+
+/** Global data for both IPv4 and IPv6 */
+struct ip_globals ip_data;
+
+#if LWIP_IPV4 && LWIP_IPV6
+
+const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT;
+
+/**
+ * @ingroup ipaddr
+ * Convert numeric IP address (both versions) into ASCII representation.
+ * returns ptr to static buffer; not reentrant!
+ *
+ * @param addr ip address in network order to convert
+ * @return pointer to a global static (!) buffer that holds the ASCII
+ * representation of addr
+ */
+char *ipaddr_ntoa(const ip_addr_t *addr)
+{
+ if (addr == NULL) {
+ return NULL;
+ }
+ if (IP_IS_V6(addr)) {
+ return ip6addr_ntoa(ip_2_ip6(addr));
+ } else {
+ return ip4addr_ntoa(ip_2_ip4(addr));
+ }
+}
+
+/**
+ * @ingroup ipaddr
+ * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used.
+ *
+ * @param addr ip address in network order to convert
+ * @param buf target buffer where the string is stored
+ * @param buflen length of buf
+ * @return either pointer to buf which now holds the ASCII
+ * representation of addr or NULL if buf was too small
+ */
+char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen)
+{
+ if (addr == NULL) {
+ return NULL;
+ }
+ if (IP_IS_V6(addr)) {
+ return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen);
+ } else {
+ return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen);
+ }
+}
+
+/**
+ * @ingroup ipaddr
+ * Convert IP address string (both versions) to numeric.
+ * The version is auto-detected from the string.
+ *
+ * @param cp IP address string to convert
+ * @param addr conversion result is stored here
+ * @return 1 on success, 0 on error
+ */
+int
+ipaddr_aton(const char *cp, ip_addr_t *addr)
+{
+ if (cp != NULL) {
+ const char *c;
+ for (c = cp; *c != 0; c++) {
+ if (*c == ':') {
+ /* contains a colon: IPv6 address */
+ if (addr) {
+ IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6);
+ }
+ return ip6addr_aton(cp, ip_2_ip6(addr));
+ } else if (*c == '.') {
+ /* contains a dot: IPv4 address */
+ break;
+ }
+ }
+ /* call ip4addr_aton as fallback or if IPv4 was found */
+ if (addr) {
+ IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4);
+ }
+ return ip4addr_aton(cp, ip_2_ip4(addr));
+ }
+ return 0;
+}
+
+/**
+ * @ingroup lwip_nosys
+ * If both IP versions are enabled, this function can dispatch packets to the correct one.
+ * Don't call directly, pass to netif_add() and call netif->input().
+ */
+err_t
+ip_input(struct pbuf *p, struct netif *inp)
+{
+ if (p != NULL) {
+ if (IP_HDR_GET_VERSION(p->payload) == 6) {
+ return ip6_input(p, inp);
+ }
+ return ip4_input(p, inp);
+ }
+ return ERR_VAL;
+}
+
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+#endif /* LWIP_IPV4 || LWIP_IPV6 */
diff --git a/lwip/src/core/ipv4/autoip.c b/lwip/src/core/ipv4/autoip.c
new file mode 100644
index 0000000..9f7139b
--- /dev/null
+++ b/lwip/src/core/ipv4/autoip.c
@@ -0,0 +1,527 @@
+/**
+ * @file
+ * AutoIP Automatic LinkLocal IP Configuration
+ *
+ * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform
+ * with RFC 3927.
+ *
+ * @defgroup autoip AUTOIP
+ * @ingroup ip4
+ * AUTOIP related functions
+ * USAGE:
+ *
+ * define @ref LWIP_AUTOIP 1 in your lwipopts.h
+ * Options:
+ * AUTOIP_TMR_INTERVAL msecs,
+ * I recommend a value of 100. The value must divide 1000 with a remainder almost 0.
+ * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 ....
+ *
+ * Without DHCP:
+ * - Call autoip_start() after netif_add().
+ *
+ * With DHCP:
+ * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h.
+ * - Configure your DHCP Client.
+ *
+ * @see netifapi_autoip
+ */
+
+/*
+ *
+ * Copyright (c) 2007 Dominik Spies <kontakt@dspies.de>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * Author: Dominik Spies <kontakt@dspies.de>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/mem.h"
+/* #include "lwip/udp.h" */
+#include "lwip/ip_addr.h"
+#include "lwip/netif.h"
+#include "lwip/autoip.h"
+#include "lwip/etharp.h"
+#include "lwip/prot/autoip.h"
+
+#include <string.h>
+
+/** Pseudo random macro based on netif informations.
+ * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */
+#ifndef LWIP_AUTOIP_RAND
+#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \
+ ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \
+ ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \
+ ((u32_t)((netif->hwaddr[4]) & 0xff))) + \
+ (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0))
+#endif /* LWIP_AUTOIP_RAND */
+
+/**
+ * Macro that generates the initial IP address to be tried by AUTOIP.
+ * If you want to override this, define it to something else in lwipopts.h.
+ */
+#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR
+#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \
+ lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \
+ ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8)))
+#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */
+
+/* static functions */
+static err_t autoip_arp_announce(struct netif *netif);
+static void autoip_start_probing(struct netif *netif);
+
+/**
+ * @ingroup autoip
+ * Set a statically allocated struct autoip to work with.
+ * Using this prevents autoip_start to allocate it using mem_malloc.
+ *
+ * @param netif the netif for which to set the struct autoip
+ * @param autoip (uninitialised) autoip struct allocated by the application
+ */
+void
+autoip_set_struct(struct netif *netif, struct autoip *autoip)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("autoip != NULL", autoip != NULL);
+ LWIP_ASSERT("netif already has a struct autoip set",
+ netif_autoip_data(netif) == NULL);
+
+ /* clear data structure */
+ memset(autoip, 0, sizeof(struct autoip));
+ /* autoip->state = AUTOIP_STATE_OFF; */
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip);
+}
+
+/** Restart AutoIP client and check the next address (conflict detected)
+ *
+ * @param netif The netif under AutoIP control
+ */
+static void
+autoip_restart(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+ autoip->tried_llipaddr++;
+ autoip_start(netif);
+}
+
+/**
+ * Handle a IP address conflict after an ARP conflict detection
+ */
+static void
+autoip_handle_arp_conflict(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+
+ /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where
+ a) means retreat on the first conflict and
+ b) allows to keep an already configured address when having only one
+ conflict in 10 seconds
+ We use option b) since it helps to improve the chance that one of the two
+ conflicting hosts may be able to retain its address. */
+
+ if (autoip->lastconflict > 0) {
+ /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n"));
+
+ /* Active TCP sessions are aborted when removing the ip addresss */
+ autoip_restart(netif);
+ } else {
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n"));
+ autoip_arp_announce(netif);
+ autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND;
+ }
+}
+
+/**
+ * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255
+ *
+ * @param netif network interface on which create the IP-Address
+ * @param ipaddr ip address to initialize
+ */
+static void
+autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+
+ /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255
+ * compliant to RFC 3927 Section 2.1
+ * We have 254 * 256 possibilities */
+
+ u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif));
+ addr += autoip->tried_llipaddr;
+ addr = AUTOIP_NET | (addr & 0xffff);
+ /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */
+
+ if (addr < AUTOIP_RANGE_START) {
+ addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1;
+ }
+ if (addr > AUTOIP_RANGE_END) {
+ addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1;
+ }
+ LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) &&
+ (addr <= AUTOIP_RANGE_END));
+ ip4_addr_set_u32(ipaddr, lwip_htonl(addr));
+
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr),
+ ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr)));
+}
+
+/**
+ * Sends an ARP probe from a network interface
+ *
+ * @param netif network interface used to send the probe
+ */
+static err_t
+autoip_arp_probe(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+ /* this works because netif->ip_addr is ANY */
+ return etharp_request(netif, &autoip->llipaddr);
+}
+
+/**
+ * Sends an ARP announce from a network interface
+ *
+ * @param netif network interface used to send the announce
+ */
+static err_t
+autoip_arp_announce(struct netif *netif)
+{
+ return etharp_gratuitous(netif);
+}
+
+/**
+ * Configure interface for use with current LL IP-Address
+ *
+ * @param netif network interface to configure with current LL IP-Address
+ */
+static err_t
+autoip_bind(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+ ip4_addr_t sn_mask, gw_addr;
+
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
+ ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num,
+ ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
+ ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
+
+ IP4_ADDR(&sn_mask, 255, 255, 0, 0);
+ IP4_ADDR(&gw_addr, 0, 0, 0, 0);
+
+ netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr);
+ /* interface is used by routing now that an address is set */
+
+ return ERR_OK;
+}
+
+/**
+ * @ingroup autoip
+ * Start AutoIP client
+ *
+ * @param netif network interface on which start the AutoIP client
+ */
+err_t
+autoip_start(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+ err_t result = ERR_OK;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;);
+
+ /* Set IP-Address, Netmask and Gateway to 0 to make sure that
+ * ARP Packets are formed correctly
+ */
+ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4);
+
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0],
+ netif->name[1], (u16_t)netif->num));
+ if (autoip == NULL) {
+ /* no AutoIP client attached yet? */
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
+ ("autoip_start(): starting new AUTOIP client\n"));
+ autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip));
+ if (autoip == NULL) {
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
+ ("autoip_start(): could not allocate autoip\n"));
+ return ERR_MEM;
+ }
+ /* store this AutoIP client in the netif */
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip);
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip"));
+ } else {
+ autoip->state = AUTOIP_STATE_OFF;
+ autoip->ttw = 0;
+ autoip->sent_num = 0;
+ ip4_addr_set_zero(&autoip->llipaddr);
+ autoip->lastconflict = 0;
+ }
+
+ autoip_create_addr(netif, &(autoip->llipaddr));
+ autoip_start_probing(netif);
+
+ return result;
+}
+
+static void
+autoip_start_probing(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+
+ autoip->state = AUTOIP_STATE_PROBING;
+ autoip->sent_num = 0;
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
+ ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
+
+ /* time to wait to first probe, this is randomly
+ * chosen out of 0 to PROBE_WAIT seconds.
+ * compliant to RFC 3927 Section 2.2.1
+ */
+ autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND));
+
+ /*
+ * if we tried more then MAX_CONFLICTS we must limit our rate for
+ * acquiring and probing address
+ * compliant to RFC 3927 Section 2.2.1
+ */
+ if (autoip->tried_llipaddr > MAX_CONFLICTS) {
+ autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND;
+ }
+}
+
+/**
+ * Handle a possible change in the network configuration.
+ *
+ * If there is an AutoIP address configured, take the interface down
+ * and begin probing with the same address.
+ */
+void
+autoip_network_changed(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+
+ if (autoip && (autoip->state != AUTOIP_STATE_OFF)) {
+ autoip_start_probing(netif);
+ }
+}
+
+/**
+ * @ingroup autoip
+ * Stop AutoIP client
+ *
+ * @param netif network interface on which stop the AutoIP client
+ */
+err_t
+autoip_stop(struct netif *netif)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+
+ LWIP_ASSERT_CORE_LOCKED();
+ if (autoip != NULL) {
+ autoip->state = AUTOIP_STATE_OFF;
+ if (ip4_addr_islinklocal(netif_ip4_addr(netif))) {
+ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4);
+ }
+ }
+ return ERR_OK;
+}
+
+/**
+ * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds
+ */
+void
+autoip_tmr(void)
+{
+ struct netif *netif;
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ struct autoip *autoip = netif_autoip_data(netif);
+ /* only act on AutoIP configured interfaces */
+ if (autoip != NULL) {
+ if (autoip->lastconflict > 0) {
+ autoip->lastconflict--;
+ }
+
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
+ ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n",
+ (u16_t)(autoip->state), autoip->ttw));
+
+ if (autoip->ttw > 0) {
+ autoip->ttw--;
+ }
+
+ switch (autoip->state) {
+ case AUTOIP_STATE_PROBING:
+ if (autoip->ttw == 0) {
+ if (autoip->sent_num >= PROBE_NUM) {
+ /* Switch to ANNOUNCING: now we can bind to an IP address and use it */
+ autoip->state = AUTOIP_STATE_ANNOUNCING;
+ autoip_bind(netif);
+ /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP
+ which counts as an announcement */
+ autoip->sent_num = 1;
+ autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND;
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
+ ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
+ } else {
+ autoip_arp_probe(netif);
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n"));
+ autoip->sent_num++;
+ if (autoip->sent_num == PROBE_NUM) {
+ /* calculate time to wait to for announce */
+ autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND;
+ } else {
+ /* calculate time to wait to next probe */
+ autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) %
+ ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) +
+ PROBE_MIN * AUTOIP_TICKS_PER_SECOND);
+ }
+ }
+ }
+ break;
+
+ case AUTOIP_STATE_ANNOUNCING:
+ if (autoip->ttw == 0) {
+ autoip_arp_announce(netif);
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n"));
+ autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND;
+ autoip->sent_num++;
+
+ if (autoip->sent_num >= ANNOUNCE_NUM) {
+ autoip->state = AUTOIP_STATE_BOUND;
+ autoip->sent_num = 0;
+ autoip->ttw = 0;
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
+ ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
+ }
+ }
+ break;
+
+ default:
+ /* nothing to do in other states */
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Handles every incoming ARP Packet, called by etharp_input().
+ *
+ * @param netif network interface to use for autoip processing
+ * @param hdr Incoming ARP packet
+ */
+void
+autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n"));
+ if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) {
+ /* when ip.src == llipaddr && hw.src != netif->hwaddr
+ *
+ * when probing ip.dst == llipaddr && hw.src != netif->hwaddr
+ * we have a conflict and must solve it
+ */
+ ip4_addr_t sipaddr, dipaddr;
+ struct eth_addr netifaddr;
+ SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN);
+
+ /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without
+ * structure packing (not using structure copy which breaks strict-aliasing rules).
+ */
+ IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr);
+ IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr);
+
+ if (autoip->state == AUTOIP_STATE_PROBING) {
+ /* RFC 3927 Section 2.2.1:
+ * from beginning to after ANNOUNCE_WAIT
+ * seconds we have a conflict if
+ * ip.src == llipaddr OR
+ * ip.dst == llipaddr && hw.src != own hwaddr
+ */
+ if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) ||
+ (ip4_addr_isany_val(sipaddr) &&
+ ip4_addr_cmp(&dipaddr, &autoip->llipaddr) &&
+ !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) {
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
+ ("autoip_arp_reply(): Probe Conflict detected\n"));
+ autoip_restart(netif);
+ }
+ } else {
+ /* RFC 3927 Section 2.5:
+ * in any state we have a conflict if
+ * ip.src == llipaddr && hw.src != own hwaddr
+ */
+ if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) &&
+ !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) {
+ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
+ ("autoip_arp_reply(): Conflicting ARP-Packet detected\n"));
+ autoip_handle_arp_conflict(netif);
+ }
+ }
+ }
+}
+
+/** check if AutoIP supplied netif->ip_addr
+ *
+ * @param netif the netif to check
+ * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING),
+ * 0 otherwise
+ */
+u8_t
+autoip_supplied_address(const struct netif *netif)
+{
+ if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) {
+ struct autoip *autoip = netif_autoip_data(netif);
+ return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING);
+ }
+ return 0;
+}
+
+u8_t
+autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr)
+{
+ struct autoip *autoip = netif_autoip_data(netif);
+ return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr));
+}
+
+#endif /* LWIP_IPV4 && LWIP_AUTOIP */
diff --git a/lwip/src/core/ipv4/dhcp.c b/lwip/src/core/ipv4/dhcp.c
new file mode 100644
index 0000000..534574f
--- /dev/null
+++ b/lwip/src/core/ipv4/dhcp.c
@@ -0,0 +1,1990 @@
+/**
+ * @file
+ * Dynamic Host Configuration Protocol client
+ *
+ * @defgroup dhcp4 DHCPv4
+ * @ingroup ip4
+ * DHCP (IPv4) related functions
+ * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform
+ * with RFC 2131 and RFC 2132.
+ *
+ * @todo:
+ * - Support for interfaces other than Ethernet (SLIP, PPP, ...)
+ *
+ * Options:
+ * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute)
+ * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer)
+ *
+ * dhcp_start() starts a DHCP client instance which
+ * configures the interface by obtaining an IP address lease and maintaining it.
+ *
+ * Use dhcp_release() to end the lease and use dhcp_stop()
+ * to remove the DHCP client.
+ *
+ * @see LWIP_HOOK_DHCP_APPEND_OPTIONS
+ * @see LWIP_HOOK_DHCP_PARSE_OPTION
+ *
+ * @see netifapi_dhcp4
+ */
+
+/*
+ * Copyright (c) 2001-2004 Leon Woestenberg <leon.woestenberg@gmx.net>
+ * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ * The Swedish Institute of Computer Science and Adam Dunkels
+ * are specifically granted permission to redistribute this
+ * source code.
+ *
+ * Author: Leon Woestenberg <leon.woestenberg@gmx.net>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/stats.h"
+#include "lwip/mem.h"
+#include "lwip/udp.h"
+#include "lwip/ip_addr.h"
+#include "lwip/netif.h"
+#include "lwip/def.h"
+#include "lwip/dhcp.h"
+#include "lwip/autoip.h"
+#include "lwip/dns.h"
+#include "lwip/etharp.h"
+#include "lwip/prot/dhcp.h"
+#include "lwip/prot/iana.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+#ifndef LWIP_HOOK_DHCP_APPEND_OPTIONS
+#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr)
+#endif
+#ifndef LWIP_HOOK_DHCP_PARSE_OPTION
+#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0)
+#endif
+
+/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using
+ * LWIP_RAND() (this overrides DHCP_GLOBAL_XID)
+ */
+#ifndef DHCP_CREATE_RAND_XID
+#define DHCP_CREATE_RAND_XID 1
+#endif
+
+/** Default for DHCP_GLOBAL_XID is 0xABCD0000
+ * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g.
+ * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h"
+ * \#define DHCP_GLOBAL_XID rand()
+ */
+#ifdef DHCP_GLOBAL_XID_HEADER
+#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */
+#endif
+
+/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU
+ * MTU is checked to be big enough in dhcp_start */
+#define DHCP_MAX_MSG_LEN(netif) (netif->mtu)
+#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576
+/** Minimum length for reply before packet is parsed */
+#define DHCP_MIN_REPLY_LEN 44
+
+#define REBOOT_TRIES 2
+
+#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS
+#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS
+#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS
+#else
+#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS
+#endif
+#else
+#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0
+#endif
+
+/** Option handling: options are parsed in dhcp_parse_reply
+ * and saved in an array where other functions can load them from.
+ * This might be moved into the struct dhcp (not necessarily since
+ * lwIP is single-threaded and the array is only used while in recv
+ * callback). */
+enum dhcp_option_idx {
+ DHCP_OPTION_IDX_OVERLOAD = 0,
+ DHCP_OPTION_IDX_MSG_TYPE,
+ DHCP_OPTION_IDX_SERVER_ID,
+ DHCP_OPTION_IDX_LEASE_TIME,
+ DHCP_OPTION_IDX_T1,
+ DHCP_OPTION_IDX_T2,
+ DHCP_OPTION_IDX_SUBNET_MASK,
+ DHCP_OPTION_IDX_ROUTER,
+#if LWIP_DHCP_PROVIDE_DNS_SERVERS
+ DHCP_OPTION_IDX_DNS_SERVER,
+ DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1,
+#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
+#if LWIP_DHCP_GET_NTP_SRV
+ DHCP_OPTION_IDX_NTP_SERVER,
+ DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1,
+#endif /* LWIP_DHCP_GET_NTP_SRV */
+ DHCP_OPTION_IDX_MAX
+};
+
+/** Holds the decoded option values, only valid while in dhcp_recv.
+ @todo: move this into struct dhcp? */
+u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX];
+/** Holds a flag which option was received and is contained in dhcp_rx_options_val,
+ only valid while in dhcp_recv.
+ @todo: move this into struct dhcp? */
+u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX];
+
+static u8_t dhcp_discover_request_options[] = {
+ DHCP_OPTION_SUBNET_MASK,
+ DHCP_OPTION_ROUTER,
+ DHCP_OPTION_BROADCAST
+#if LWIP_DHCP_PROVIDE_DNS_SERVERS
+ , DHCP_OPTION_DNS_SERVER
+#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
+#if LWIP_DHCP_GET_NTP_SRV
+ , DHCP_OPTION_NTP
+#endif /* LWIP_DHCP_GET_NTP_SRV */
+};
+
+#ifdef DHCP_GLOBAL_XID
+static u32_t xid;
+static u8_t xid_initialised;
+#endif /* DHCP_GLOBAL_XID */
+
+#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0)
+#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1)
+#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0)
+#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given)))
+#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx])
+#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val))
+
+static struct udp_pcb *dhcp_pcb;
+static u8_t dhcp_pcb_refcount;
+
+/* DHCP client state machine functions */
+static err_t dhcp_discover(struct netif *netif);
+static err_t dhcp_select(struct netif *netif);
+static void dhcp_bind(struct netif *netif);
+#if DHCP_DOES_ARP_CHECK
+static err_t dhcp_decline(struct netif *netif);
+#endif /* DHCP_DOES_ARP_CHECK */
+static err_t dhcp_rebind(struct netif *netif);
+static err_t dhcp_reboot(struct netif *netif);
+static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state);
+
+/* receive, unfold, parse and free incoming messages */
+static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port);
+
+/* set the DHCP timers */
+static void dhcp_timeout(struct netif *netif);
+static void dhcp_t1_timeout(struct netif *netif);
+static void dhcp_t2_timeout(struct netif *netif);
+
+/* build outgoing messages */
+/* create a DHCP message, fill in common headers */
+static struct pbuf *dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len);
+/* add a DHCP option (type, then length in bytes) */
+static u16_t dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len);
+/* add option values */
+static u16_t dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value);
+static u16_t dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value);
+static u16_t dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value);
+#if LWIP_NETIF_HOSTNAME
+static u16_t dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif);
+#endif /* LWIP_NETIF_HOSTNAME */
+/* always add the DHCP options trailer to end and pad */
+static void dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out);
+
+/** Ensure DHCP PCB is allocated and bound */
+static err_t
+dhcp_inc_pcb_refcount(void)
+{
+ if (dhcp_pcb_refcount == 0) {
+ LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL);
+
+ /* allocate UDP PCB */
+ dhcp_pcb = udp_new();
+
+ if (dhcp_pcb == NULL) {
+ return ERR_MEM;
+ }
+
+ ip_set_option(dhcp_pcb, SOF_BROADCAST);
+
+ /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */
+ udp_bind(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_CLIENT);
+ udp_connect(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_SERVER);
+ udp_recv(dhcp_pcb, dhcp_recv, NULL);
+ }
+
+ dhcp_pcb_refcount++;
+
+ return ERR_OK;
+}
+
+/** Free DHCP PCB if the last netif stops using it */
+static void
+dhcp_dec_pcb_refcount(void)
+{
+ LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0));
+ dhcp_pcb_refcount--;
+
+ if (dhcp_pcb_refcount == 0) {
+ udp_remove(dhcp_pcb);
+ dhcp_pcb = NULL;
+ }
+}
+
+/**
+ * Back-off the DHCP client (because of a received NAK response).
+ *
+ * Back-off the DHCP client because of a received NAK. Receiving a
+ * NAK means the client asked for something non-sensible, for
+ * example when it tries to renew a lease obtained on another network.
+ *
+ * We clear any existing set IP address and restart DHCP negotiation
+ * afresh (as per RFC2131 3.2.3).
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_handle_nak(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n",
+ (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+ /* Change to a defined state - set this before assigning the address
+ to ensure the callback can use dhcp_supplied_address() */
+ dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF);
+ /* remove IP address from interface (must no longer be used, as per RFC2131) */
+ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4);
+ /* We can immediately restart discovery */
+ dhcp_discover(netif);
+}
+
+#if DHCP_DOES_ARP_CHECK
+/**
+ * Checks if the offered IP address is already in use.
+ *
+ * It does so by sending an ARP request for the offered address and
+ * entering CHECKING state. If no ARP reply is received within a small
+ * interval, the address is assumed to be free for use by us.
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_check(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ err_t result;
+ u16_t msecs;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0],
+ (s16_t)netif->name[1]));
+ dhcp_set_state(dhcp, DHCP_STATE_CHECKING);
+ /* create an ARP query for the offered IP address, expecting that no host
+ responds, as the IP address should not be in use. */
+ result = etharp_query(netif, &dhcp->offered_ip_addr, NULL);
+ if (result != ERR_OK) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n"));
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+ msecs = 500;
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs));
+}
+#endif /* DHCP_DOES_ARP_CHECK */
+
+/**
+ * Remember the configuration offered by a DHCP server.
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_handle_offer(struct netif *netif, struct dhcp_msg *msg_in)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n",
+ (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+ /* obtain the server address */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) {
+ dhcp->request_timeout = 0; /* stop timer */
+
+ ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID)));
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n",
+ ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr))));
+ /* remember offered address */
+ ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n",
+ ip4_addr_get_u32(&dhcp->offered_ip_addr)));
+
+ dhcp_select(netif);
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void *)netif));
+ }
+}
+
+/**
+ * Select a DHCP server offer out of all offers.
+ *
+ * Simply select the first offer received.
+ *
+ * @param netif the netif under DHCP control
+ * @return lwIP specific error (see error.h)
+ */
+static err_t
+dhcp_select(struct netif *netif)
+{
+ struct dhcp *dhcp;
+ err_t result;
+ u16_t msecs;
+ u8_t i;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_ERROR("dhcp_select: netif != NULL", (netif != NULL), return ERR_ARG;);
+ dhcp = netif_dhcp_data(netif);
+ LWIP_ERROR("dhcp_select: dhcp != NULL", (dhcp != NULL), return ERR_VAL;);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+ dhcp_set_state(dhcp, DHCP_STATE_REQUESTING);
+
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN);
+ options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif));
+
+ /* MUST request the offered IP address */
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4);
+ options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr)));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4);
+ options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr))));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options));
+ for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) {
+ options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]);
+ }
+
+#if LWIP_NETIF_HOSTNAME
+ options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif);
+#endif /* LWIP_NETIF_HOSTNAME */
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REQUESTING, msg_out, DHCP_REQUEST, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ /* send broadcast to any DHCP server */
+ result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY);
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n"));
+ result = ERR_MEM;
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+ msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000);
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs));
+ return result;
+}
+
+/**
+ * The DHCP timer that checks for lease renewal/rebind timeouts.
+ * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS).
+ */
+void
+dhcp_coarse_tmr(void)
+{
+ struct netif *netif;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n"));
+ /* iterate through all network interfaces */
+ NETIF_FOREACH(netif) {
+ /* only act on DHCP configured interfaces */
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) {
+ /* compare lease time to expire timeout */
+ if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n"));
+ /* this clients' lease time has expired */
+ dhcp_release_and_stop(netif);
+ dhcp_start(netif);
+ /* timer is active (non zero), and triggers (zeroes) now? */
+ } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n"));
+ /* this clients' rebind timeout triggered */
+ dhcp_t2_timeout(netif);
+ /* timer is active (non zero), and triggers (zeroes) now */
+ } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n"));
+ /* this clients' renewal timeout triggered */
+ dhcp_t1_timeout(netif);
+ }
+ }
+ }
+}
+
+/**
+ * DHCP transaction timeout handling (this function must be called every 500ms,
+ * see @ref DHCP_FINE_TIMER_MSECS).
+ *
+ * A DHCP server is expected to respond within a short period of time.
+ * This timer checks whether an outstanding DHCP request is timed out.
+ */
+void
+dhcp_fine_tmr(void)
+{
+ struct netif *netif;
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ /* only act on DHCP configured interfaces */
+ if (dhcp != NULL) {
+ /* timer is active (non zero), and is about to trigger now */
+ if (dhcp->request_timeout > 1) {
+ dhcp->request_timeout--;
+ } else if (dhcp->request_timeout == 1) {
+ dhcp->request_timeout--;
+ /* { dhcp->request_timeout == 0 } */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n"));
+ /* this client's request timeout triggered */
+ dhcp_timeout(netif);
+ }
+ }
+ }
+}
+
+/**
+ * A DHCP negotiation transaction, or ARP request, has timed out.
+ *
+ * The timer that was started with the DHCP or ARP request has
+ * timed out, indicating no response was received in time.
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_timeout(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n"));
+ /* back-off period has passed, or server selection timed out */
+ if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n"));
+ dhcp_discover(netif);
+ /* receiving the requested lease timed out */
+ } else if (dhcp->state == DHCP_STATE_REQUESTING) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n"));
+ if (dhcp->tries <= 5) {
+ dhcp_select(netif);
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n"));
+ dhcp_release_and_stop(netif);
+ dhcp_start(netif);
+ }
+#if DHCP_DOES_ARP_CHECK
+ /* received no ARP reply for the offered address (which is good) */
+ } else if (dhcp->state == DHCP_STATE_CHECKING) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n"));
+ if (dhcp->tries <= 1) {
+ dhcp_check(netif);
+ /* no ARP replies on the offered address,
+ looks like the IP address is indeed free */
+ } else {
+ /* bind the interface to the offered address */
+ dhcp_bind(netif);
+ }
+#endif /* DHCP_DOES_ARP_CHECK */
+ } else if (dhcp->state == DHCP_STATE_REBOOTING) {
+ if (dhcp->tries < REBOOT_TRIES) {
+ dhcp_reboot(netif);
+ } else {
+ dhcp_discover(netif);
+ }
+ }
+}
+
+/**
+ * The renewal period has timed out.
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_t1_timeout(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n"));
+ if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) ||
+ (dhcp->state == DHCP_STATE_RENEWING)) {
+ /* just retry to renew - note that the rebind timer (t2) will
+ * eventually time-out if renew tries fail. */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("dhcp_t1_timeout(): must renew\n"));
+ /* This slightly different to RFC2131: DHCPREQUEST will be sent from state
+ DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */
+ dhcp_renew(netif);
+ /* Calculate next timeout */
+ if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) {
+ dhcp->t1_renew_time = (u16_t)((dhcp->t2_timeout - dhcp->lease_used) / 2);
+ }
+ }
+}
+
+/**
+ * The rebind period has timed out.
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_t2_timeout(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n"));
+ if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) ||
+ (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) {
+ /* just retry to rebind */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ ("dhcp_t2_timeout(): must rebind\n"));
+ /* This slightly different to RFC2131: DHCPREQUEST will be sent from state
+ DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */
+ dhcp_rebind(netif);
+ /* Calculate next timeout */
+ if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) {
+ dhcp->t2_rebind_time = (u16_t)((dhcp->t0_timeout - dhcp->lease_used) / 2);
+ }
+ }
+}
+
+/**
+ * Handle a DHCP ACK packet
+ *
+ * @param netif the netif under DHCP control
+ */
+static void
+dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV
+ u8_t n;
+#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */
+#if LWIP_DHCP_GET_NTP_SRV
+ ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS];
+#endif
+
+ /* clear options we might not get from the ACK */
+ ip4_addr_set_zero(&dhcp->offered_sn_mask);
+ ip4_addr_set_zero(&dhcp->offered_gw_addr);
+#if LWIP_DHCP_BOOTP_FILE
+ ip4_addr_set_zero(&dhcp->offered_si_addr);
+#endif /* LWIP_DHCP_BOOTP_FILE */
+
+ /* lease time given? */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) {
+ /* remember offered lease time */
+ dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME);
+ }
+ /* renewal period given? */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) {
+ /* remember given renewal period */
+ dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1);
+ } else {
+ /* calculate safe periods for renewal */
+ dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2;
+ }
+
+ /* renewal period given? */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) {
+ /* remember given rebind period */
+ dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2);
+ } else {
+ /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/
+ dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U;
+ }
+
+ /* (y)our internet address */
+ ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr);
+
+#if LWIP_DHCP_BOOTP_FILE
+ /* copy boot server address,
+ boot file name copied in dhcp_parse_reply if not overloaded */
+ ip4_addr_copy(dhcp->offered_si_addr, msg_in->siaddr);
+#endif /* LWIP_DHCP_BOOTP_FILE */
+
+ /* subnet mask given? */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) {
+ /* remember given subnet mask */
+ ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)));
+ dhcp->subnet_mask_given = 1;
+ } else {
+ dhcp->subnet_mask_given = 0;
+ }
+
+ /* gateway router */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) {
+ ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER)));
+ }
+
+#if LWIP_DHCP_GET_NTP_SRV
+ /* NTP servers */
+ for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) {
+ ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n)));
+ }
+ dhcp_set_ntp_servers(n, ntp_server_addrs);
+#endif /* LWIP_DHCP_GET_NTP_SRV */
+
+#if LWIP_DHCP_PROVIDE_DNS_SERVERS
+ /* DNS servers */
+ for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) {
+ ip_addr_t dns_addr;
+ ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n)));
+ dns_setserver(n, &dns_addr);
+ }
+#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
+}
+
+/**
+ * @ingroup dhcp4
+ * Set a statically allocated struct dhcp to work with.
+ * Using this prevents dhcp_start to allocate it using mem_malloc.
+ *
+ * @param netif the netif for which to set the struct dhcp
+ * @param dhcp (uninitialised) dhcp struct allocated by the application
+ */
+void
+dhcp_set_struct(struct netif *netif, struct dhcp *dhcp)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("dhcp != NULL", dhcp != NULL);
+ LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL);
+
+ /* clear data structure */
+ memset(dhcp, 0, sizeof(struct dhcp));
+ /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp);
+}
+
+/**
+ * @ingroup dhcp4
+ * Removes a struct dhcp from a netif.
+ *
+ * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the
+ * struct dhcp since the memory is passed back to the heap.
+ *
+ * @param netif the netif from which to remove the struct dhcp
+ */
+void dhcp_cleanup(struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+
+ if (netif_dhcp_data(netif) != NULL) {
+ mem_free(netif_dhcp_data(netif));
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL);
+ }
+}
+
+/**
+ * @ingroup dhcp4
+ * Start DHCP negotiation for a network interface.
+ *
+ * If no DHCP client instance was attached to this interface,
+ * a new client is created first. If a DHCP client instance
+ * was already present, it restarts negotiation.
+ *
+ * @param netif The lwIP network interface
+ * @return lwIP error code
+ * - ERR_OK - No error
+ * - ERR_MEM - Out of memory
+ */
+err_t
+dhcp_start(struct netif *netif)
+{
+ struct dhcp *dhcp;
+ err_t result;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;);
+ LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;);
+ dhcp = netif_dhcp_data(netif);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+
+ /* check MTU of the netif */
+ if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n"));
+ return ERR_MEM;
+ }
+
+ /* no DHCP client attached yet? */
+ if (dhcp == NULL) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n"));
+ dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp));
+ if (dhcp == NULL) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n"));
+ return ERR_MEM;
+ }
+
+ /* store this dhcp client in the netif */
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp"));
+ /* already has DHCP client attached */
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n"));
+
+ if (dhcp->pcb_allocated != 0) {
+ dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */
+ }
+ /* dhcp is cleared below, no need to reset flag*/
+ }
+
+ /* clear data structure */
+ memset(dhcp, 0, sizeof(struct dhcp));
+ /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n"));
+
+ if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */
+ return ERR_MEM;
+ }
+ dhcp->pcb_allocated = 1;
+
+ if (!netif_is_link_up(netif)) {
+ /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */
+ dhcp_set_state(dhcp, DHCP_STATE_INIT);
+ return ERR_OK;
+ }
+
+ /* (re)start the DHCP negotiation */
+ result = dhcp_discover(netif);
+ if (result != ERR_OK) {
+ /* free resources allocated above */
+ dhcp_release_and_stop(netif);
+ return ERR_MEM;
+ }
+ return result;
+}
+
+/**
+ * @ingroup dhcp4
+ * Inform a DHCP server of our manual configuration.
+ *
+ * This informs DHCP servers of our fixed IP address configuration
+ * by sending an INFORM message. It does not involve DHCP address
+ * configuration, it is just here to be nice to the network.
+ *
+ * @param netif The lwIP network interface
+ */
+void
+dhcp_inform(struct netif *netif)
+{
+ struct dhcp dhcp;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ERROR("netif != NULL", (netif != NULL), return;);
+
+ if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */
+ return;
+ }
+
+ memset(&dhcp, 0, sizeof(struct dhcp));
+ dhcp_set_state(&dhcp, DHCP_STATE_INFORMING);
+
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, &dhcp, DHCP_INFORM, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN);
+ options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif));
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, &dhcp, DHCP_STATE_INFORMING, msg_out, DHCP_INFORM, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n"));
+
+ udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif);
+
+ pbuf_free(p_out);
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n"));
+ }
+
+ dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */
+}
+
+/** Handle a possible change in the network configuration.
+ *
+ * This enters the REBOOTING state to verify that the currently bound
+ * address is still valid.
+ */
+void
+dhcp_network_changed(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+
+ if (!dhcp) {
+ return;
+ }
+ switch (dhcp->state) {
+ case DHCP_STATE_REBINDING:
+ case DHCP_STATE_RENEWING:
+ case DHCP_STATE_BOUND:
+ case DHCP_STATE_REBOOTING:
+ dhcp->tries = 0;
+ dhcp_reboot(netif);
+ break;
+ case DHCP_STATE_OFF:
+ /* stay off */
+ break;
+ default:
+ LWIP_ASSERT("invalid dhcp->state", dhcp->state <= DHCP_STATE_BACKING_OFF);
+ /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the
+ state changes, SELECTING: continue with current 'rid' as we stay in the
+ same state */
+#if LWIP_DHCP_AUTOIP_COOP
+ if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) {
+ autoip_stop(netif);
+ dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF;
+ }
+#endif /* LWIP_DHCP_AUTOIP_COOP */
+ /* ensure we start with short timeouts, even if already discovering */
+ dhcp->tries = 0;
+ dhcp_discover(netif);
+ break;
+ }
+}
+
+#if DHCP_DOES_ARP_CHECK
+/**
+ * Match an ARP reply with the offered IP address:
+ * check whether the offered IP address is not in use using ARP
+ *
+ * @param netif the network interface on which the reply was received
+ * @param addr The IP address we received a reply from
+ */
+void
+dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr)
+{
+ struct dhcp *dhcp;
+
+ LWIP_ERROR("netif != NULL", (netif != NULL), return;);
+ dhcp = netif_dhcp_data(netif);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n"));
+ /* is a DHCP client doing an ARP check? */
+ if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n",
+ ip4_addr_get_u32(addr)));
+ /* did a host respond with the address we
+ were offered by the DHCP server? */
+ if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) {
+ /* we will not accept the offered address */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
+ ("dhcp_arp_reply(): arp reply matched with offered address, declining\n"));
+ dhcp_decline(netif);
+ }
+ }
+}
+
+/**
+ * Decline an offered lease.
+ *
+ * Tell the DHCP server we do not accept the offered address.
+ * One reason to decline the lease is when we find out the address
+ * is already in use by another host (through ARP).
+ *
+ * @param netif the netif under DHCP control
+ */
+static err_t
+dhcp_decline(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ err_t result;
+ u16_t msecs;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n"));
+ dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF);
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_DECLINE, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4);
+ options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr)));
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_BACKING_OFF, msg_out, DHCP_DECLINE, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ /* per section 4.4.4, broadcast DECLINE messages */
+ result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY);
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("dhcp_decline: could not allocate DHCP request\n"));
+ result = ERR_MEM;
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+ msecs = 10 * 1000;
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs));
+ return result;
+}
+#endif /* DHCP_DOES_ARP_CHECK */
+
+
+/**
+ * Start the DHCP process, discover a DHCP server.
+ *
+ * @param netif the netif under DHCP control
+ */
+static err_t
+dhcp_discover(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ err_t result = ERR_OK;
+ u16_t msecs;
+ u8_t i;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n"));
+
+ ip4_addr_set_any(&dhcp->offered_ip_addr);
+ dhcp_set_state(dhcp, DHCP_STATE_SELECTING);
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n"));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN);
+ options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options));
+ for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) {
+ options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]);
+ }
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_SELECTING, msg_out, DHCP_DISCOVER, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER)\n"));
+ udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n"));
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n"));
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+#if LWIP_DHCP_AUTOIP_COOP
+ if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) {
+ dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON;
+ autoip_start(netif);
+ }
+#endif /* LWIP_DHCP_AUTOIP_COOP */
+ msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000);
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs));
+ return result;
+}
+
+
+/**
+ * Bind the interface to the offered IP address.
+ *
+ * @param netif network interface to bind to the offered address
+ */
+static void
+dhcp_bind(struct netif *netif)
+{
+ u32_t timeout;
+ struct dhcp *dhcp;
+ ip4_addr_t sn_mask, gw_addr;
+ LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;);
+ dhcp = netif_dhcp_data(netif);
+ LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+
+ /* reset time used of lease */
+ dhcp->lease_used = 0;
+
+ if (dhcp->offered_t0_lease != 0xffffffffUL) {
+ /* set renewal period timer */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease));
+ timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS;
+ if (timeout > 0xffff) {
+ timeout = 0xffff;
+ }
+ dhcp->t0_timeout = (u16_t)timeout;
+ if (dhcp->t0_timeout == 0) {
+ dhcp->t0_timeout = 1;
+ }
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease * 1000));
+ }
+
+ /* temporary DHCP lease? */
+ if (dhcp->offered_t1_renew != 0xffffffffUL) {
+ /* set renewal period timer */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew));
+ timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS;
+ if (timeout > 0xffff) {
+ timeout = 0xffff;
+ }
+ dhcp->t1_timeout = (u16_t)timeout;
+ if (dhcp->t1_timeout == 0) {
+ dhcp->t1_timeout = 1;
+ }
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew * 1000));
+ dhcp->t1_renew_time = dhcp->t1_timeout;
+ }
+ /* set renewal period timer */
+ if (dhcp->offered_t2_rebind != 0xffffffffUL) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind));
+ timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS;
+ if (timeout > 0xffff) {
+ timeout = 0xffff;
+ }
+ dhcp->t2_timeout = (u16_t)timeout;
+ if (dhcp->t2_timeout == 0) {
+ dhcp->t2_timeout = 1;
+ }
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind * 1000));
+ dhcp->t2_rebind_time = dhcp->t2_timeout;
+ }
+
+ /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */
+ if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) {
+ dhcp->t1_timeout = 0;
+ }
+
+ if (dhcp->subnet_mask_given) {
+ /* copy offered network mask */
+ ip4_addr_copy(sn_mask, dhcp->offered_sn_mask);
+ } else {
+ /* subnet mask not given, choose a safe subnet mask given the network class */
+ u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr);
+ if (first_octet <= 127) {
+ ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL));
+ } else if (first_octet >= 192) {
+ ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL));
+ } else {
+ ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL));
+ }
+ }
+
+ ip4_addr_copy(gw_addr, dhcp->offered_gw_addr);
+ /* gateway address not given? */
+ if (ip4_addr_isany_val(gw_addr)) {
+ /* copy network address */
+ ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask);
+ /* use first host address on network as gateway */
+ ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL));
+ }
+
+#if LWIP_DHCP_AUTOIP_COOP
+ if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) {
+ autoip_stop(netif);
+ dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF;
+ }
+#endif /* LWIP_DHCP_AUTOIP_COOP */
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n",
+ ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr)));
+ /* netif is now bound to DHCP leased address - set this before assigning the address
+ to ensure the callback can use dhcp_supplied_address() */
+ dhcp_set_state(dhcp, DHCP_STATE_BOUND);
+
+ netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr);
+ /* interface is used by routing now that an address is set */
+}
+
+/**
+ * @ingroup dhcp4
+ * Renew an existing DHCP lease at the involved DHCP server.
+ *
+ * @param netif network interface which must renew its lease
+ */
+err_t
+dhcp_renew(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ err_t result;
+ u16_t msecs;
+ u8_t i;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n"));
+ dhcp_set_state(dhcp, DHCP_STATE_RENEWING);
+
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN);
+ options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options));
+ for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) {
+ options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]);
+ }
+
+#if LWIP_NETIF_HOSTNAME
+ options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif);
+#endif /* LWIP_NETIF_HOSTNAME */
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_RENEWING, msg_out, DHCP_REQUEST, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ result = udp_sendto_if(dhcp_pcb, p_out, &dhcp->server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif);
+ pbuf_free(p_out);
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n"));
+ result = ERR_MEM;
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+ /* back-off on retries, but to a maximum of 20 seconds */
+ msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000);
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs));
+ return result;
+}
+
+/**
+ * Rebind with a DHCP server for an existing DHCP lease.
+ *
+ * @param netif network interface which must rebind with a DHCP server
+ */
+static err_t
+dhcp_rebind(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ err_t result;
+ u16_t msecs;
+ u8_t i;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n"));
+ dhcp_set_state(dhcp, DHCP_STATE_REBINDING);
+
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN);
+ options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options));
+ for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) {
+ options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]);
+ }
+
+#if LWIP_NETIF_HOSTNAME
+ options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif);
+#endif /* LWIP_NETIF_HOSTNAME */
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBINDING, msg_out, DHCP_DISCOVER, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ /* broadcast to server */
+ result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif);
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n"));
+ result = ERR_MEM;
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+ msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000);
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs));
+ return result;
+}
+
+/**
+ * Enter REBOOTING state to verify an existing lease
+ *
+ * @param netif network interface which must reboot
+ */
+static err_t
+dhcp_reboot(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ err_t result;
+ u16_t msecs;
+ u8_t i;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n"));
+ dhcp_set_state(dhcp, DHCP_STATE_REBOOTING);
+
+ /* create and initialize the DHCP message header */
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN);
+ options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN_MIN_REQUIRED);
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4);
+ options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr)));
+
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options));
+ for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) {
+ options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]);
+ }
+
+#if LWIP_NETIF_HOSTNAME
+ options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif);
+#endif /* LWIP_NETIF_HOSTNAME */
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBOOTING, msg_out, DHCP_REQUEST, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ /* broadcast to server */
+ result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif);
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n"));
+ result = ERR_MEM;
+ }
+ if (dhcp->tries < 255) {
+ dhcp->tries++;
+ }
+ msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000);
+ dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs));
+ return result;
+}
+
+/**
+ * @ingroup dhcp4
+ * Release a DHCP lease and stop DHCP statemachine (and AUTOIP if LWIP_DHCP_AUTOIP_COOP).
+ *
+ * @param netif network interface
+ */
+void
+dhcp_release_and_stop(struct netif *netif)
+{
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ ip_addr_t server_ip_addr;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release_and_stop()\n"));
+ if (dhcp == NULL) {
+ return;
+ }
+
+ /* already off? -> nothing to do */
+ if (dhcp->state == DHCP_STATE_OFF) {
+ return;
+ }
+
+ ip_addr_copy(server_ip_addr, dhcp->server_ip_addr);
+
+ /* clean old DHCP offer */
+ ip_addr_set_zero_ip4(&dhcp->server_ip_addr);
+ ip4_addr_set_zero(&dhcp->offered_ip_addr);
+ ip4_addr_set_zero(&dhcp->offered_sn_mask);
+ ip4_addr_set_zero(&dhcp->offered_gw_addr);
+#if LWIP_DHCP_BOOTP_FILE
+ ip4_addr_set_zero(&dhcp->offered_si_addr);
+#endif /* LWIP_DHCP_BOOTP_FILE */
+ dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0;
+ dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0;
+
+ /* send release message when current IP was assigned via DHCP */
+ if (dhcp_supplied_address(netif)) {
+ /* create and initialize the DHCP message header */
+ struct pbuf *p_out;
+ u16_t options_out_len;
+ p_out = dhcp_create_msg(netif, dhcp, DHCP_RELEASE, &options_out_len);
+ if (p_out != NULL) {
+ struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload;
+ options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4);
+ options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr))));
+
+ LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, dhcp->state, msg_out, DHCP_RELEASE, &options_out_len);
+ dhcp_option_trailer(options_out_len, msg_out->options, p_out);
+
+ udp_sendto_if(dhcp_pcb, p_out, &server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif);
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n"));
+ } else {
+ /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n"));
+ }
+ }
+
+ /* remove IP address from interface (prevents routing from selecting this interface) */
+ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4);
+
+#if LWIP_DHCP_AUTOIP_COOP
+ if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) {
+ autoip_stop(netif);
+ dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF;
+ }
+#endif /* LWIP_DHCP_AUTOIP_COOP */
+
+ dhcp_set_state(dhcp, DHCP_STATE_OFF);
+
+ if (dhcp->pcb_allocated != 0) {
+ dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */
+ dhcp->pcb_allocated = 0;
+ }
+}
+
+/**
+ * @ingroup dhcp4
+ * This function calls dhcp_release_and_stop() internally.
+ * @deprecated Use dhcp_release_and_stop() instead.
+ */
+err_t
+dhcp_release(struct netif *netif)
+{
+ dhcp_release_and_stop(netif);
+ return ERR_OK;
+}
+
+/**
+ * @ingroup dhcp4
+ * This function calls dhcp_release_and_stop() internally.
+ * @deprecated Use dhcp_release_and_stop() instead.
+ */
+void
+dhcp_stop(struct netif *netif)
+{
+ dhcp_release_and_stop(netif);
+}
+
+/*
+ * Set the DHCP state of a DHCP client.
+ *
+ * If the state changed, reset the number of tries.
+ */
+static void
+dhcp_set_state(struct dhcp *dhcp, u8_t new_state)
+{
+ if (new_state != dhcp->state) {
+ dhcp->state = new_state;
+ dhcp->tries = 0;
+ dhcp->request_timeout = 0;
+ }
+}
+
+/*
+ * Concatenate an option type and length field to the outgoing
+ * DHCP message.
+ *
+ */
+static u16_t
+dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len)
+{
+ LWIP_ASSERT("dhcp_option: options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN);
+ options[options_out_len++] = option_type;
+ options[options_out_len++] = option_len;
+ return options_out_len;
+}
+/*
+ * Concatenate a single byte to the outgoing DHCP message.
+ *
+ */
+static u16_t
+dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value)
+{
+ LWIP_ASSERT("dhcp_option_byte: options_out_len < DHCP_OPTIONS_LEN", options_out_len < DHCP_OPTIONS_LEN);
+ options[options_out_len++] = value;
+ return options_out_len;
+}
+
+static u16_t
+dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value)
+{
+ LWIP_ASSERT("dhcp_option_short: options_out_len + 2 <= DHCP_OPTIONS_LEN", options_out_len + 2U <= DHCP_OPTIONS_LEN);
+ options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8);
+ options[options_out_len++] = (u8_t) (value & 0x00ffU);
+ return options_out_len;
+}
+
+static u16_t
+dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value)
+{
+ LWIP_ASSERT("dhcp_option_long: options_out_len + 4 <= DHCP_OPTIONS_LEN", options_out_len + 4U <= DHCP_OPTIONS_LEN);
+ options[options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24);
+ options[options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16);
+ options[options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8);
+ options[options_out_len++] = (u8_t)((value & 0x000000ffUL));
+ return options_out_len;
+}
+
+#if LWIP_NETIF_HOSTNAME
+static u16_t
+dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif)
+{
+ if (netif->hostname != NULL) {
+ size_t namelen = strlen(netif->hostname);
+ if (namelen > 0) {
+ size_t len;
+ const char *p = netif->hostname;
+ /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME
+ and 1 byte for trailer) */
+ size_t available = DHCP_OPTIONS_LEN - options_out_len - 3;
+ LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available);
+ len = LWIP_MIN(namelen, available);
+ LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF);
+ options_out_len = dhcp_option(options_out_len, options, DHCP_OPTION_HOSTNAME, (u8_t)len);
+ while (len--) {
+ options_out_len = dhcp_option_byte(options_out_len, options, *p++);
+ }
+ }
+ }
+ return options_out_len;
+}
+#endif /* LWIP_NETIF_HOSTNAME */
+
+/**
+ * Extract the DHCP message and the DHCP options.
+ *
+ * Extract the DHCP message and the DHCP options, each into a contiguous
+ * piece of memory. As a DHCP message is variable sized by its options,
+ * and also allows overriding some fields for options, the easy approach
+ * is to first unfold the options into a contiguous piece of memory, and
+ * use that further on.
+ *
+ */
+static err_t
+dhcp_parse_reply(struct pbuf *p, struct dhcp *dhcp)
+{
+ u8_t *options;
+ u16_t offset;
+ u16_t offset_max;
+ u16_t options_idx;
+ u16_t options_idx_max;
+ struct pbuf *q;
+ int parse_file_as_options = 0;
+ int parse_sname_as_options = 0;
+ struct dhcp_msg *msg_in;
+#if LWIP_DHCP_BOOTP_FILE
+ int file_overloaded = 0;
+#endif
+
+ LWIP_UNUSED_ARG(dhcp);
+
+ /* clear received options */
+ dhcp_clear_all_options(dhcp);
+ /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */
+ if (p->len < DHCP_SNAME_OFS) {
+ return ERR_BUF;
+ }
+ msg_in = (struct dhcp_msg *)p->payload;
+#if LWIP_DHCP_BOOTP_FILE
+ /* clear boot file name */
+ dhcp->boot_file_name[0] = 0;
+#endif /* LWIP_DHCP_BOOTP_FILE */
+
+ /* parse options */
+
+ /* start with options field */
+ options_idx = DHCP_OPTIONS_OFS;
+ /* parse options to the end of the received packet */
+ options_idx_max = p->tot_len;
+again:
+ q = p;
+ while ((q != NULL) && (options_idx >= q->len)) {
+ options_idx = (u16_t)(options_idx - q->len);
+ options_idx_max = (u16_t)(options_idx_max - q->len);
+ q = q->next;
+ }
+ if (q == NULL) {
+ return ERR_BUF;
+ }
+ offset = options_idx;
+ offset_max = options_idx_max;
+ options = (u8_t *)q->payload;
+ /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */
+ while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) {
+ u8_t op = options[offset];
+ u8_t len;
+ u8_t decode_len = 0;
+ int decode_idx = -1;
+ u16_t val_offset = (u16_t)(offset + 2);
+ if (val_offset < offset) {
+ /* overflow */
+ return ERR_BUF;
+ }
+ /* len byte might be in the next pbuf */
+ if ((offset + 1) < q->len) {
+ len = options[offset + 1];
+ } else {
+ len = (q->next != NULL ? ((u8_t *)q->next->payload)[0] : 0);
+ }
+ /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */
+ decode_len = len;
+ switch (op) {
+ /* case(DHCP_OPTION_END): handled above */
+ case (DHCP_OPTION_PAD):
+ /* special option: no len encoded */
+ decode_len = len = 0;
+ /* will be increased below */
+ break;
+ case (DHCP_OPTION_SUBNET_MASK):
+ LWIP_ERROR("len == 4", len == 4, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_SUBNET_MASK;
+ break;
+ case (DHCP_OPTION_ROUTER):
+ decode_len = 4; /* only copy the first given router */
+ LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_ROUTER;
+ break;
+#if LWIP_DHCP_PROVIDE_DNS_SERVERS
+ case (DHCP_OPTION_DNS_SERVER):
+ /* special case: there might be more than one server */
+ LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;);
+ /* limit number of DNS servers */
+ decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS);
+ LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_DNS_SERVER;
+ break;
+#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
+ case (DHCP_OPTION_LEASE_TIME):
+ LWIP_ERROR("len == 4", len == 4, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_LEASE_TIME;
+ break;
+#if LWIP_DHCP_GET_NTP_SRV
+ case (DHCP_OPTION_NTP):
+ /* special case: there might be more than one server */
+ LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;);
+ /* limit number of NTP servers */
+ decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS);
+ LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_NTP_SERVER;
+ break;
+#endif /* LWIP_DHCP_GET_NTP_SRV*/
+ case (DHCP_OPTION_OVERLOAD):
+ LWIP_ERROR("len == 1", len == 1, return ERR_VAL;);
+ /* decode overload only in options, not in file/sname: invalid packet */
+ LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_OVERLOAD;
+ break;
+ case (DHCP_OPTION_MESSAGE_TYPE):
+ LWIP_ERROR("len == 1", len == 1, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_MSG_TYPE;
+ break;
+ case (DHCP_OPTION_SERVER_ID):
+ LWIP_ERROR("len == 4", len == 4, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_SERVER_ID;
+ break;
+ case (DHCP_OPTION_T1):
+ LWIP_ERROR("len == 4", len == 4, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_T1;
+ break;
+ case (DHCP_OPTION_T2):
+ LWIP_ERROR("len == 4", len == 4, return ERR_VAL;);
+ decode_idx = DHCP_OPTION_IDX_T2;
+ break;
+ default:
+ decode_len = 0;
+ LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op));
+ LWIP_HOOK_DHCP_PARSE_OPTION(ip_current_netif(), dhcp, dhcp->state, msg_in,
+ dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) ? (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) : 0,
+ op, len, q, val_offset);
+ break;
+ }
+ if (op == DHCP_OPTION_PAD) {
+ offset++;
+ } else {
+ if (offset + len + 2 > 0xFFFF) {
+ /* overflow */
+ return ERR_BUF;
+ }
+ offset = (u16_t)(offset + len + 2);
+ if (decode_len > 0) {
+ u32_t value = 0;
+ u16_t copy_len;
+decode_next:
+ LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX);
+ if (!dhcp_option_given(dhcp, decode_idx)) {
+ copy_len = LWIP_MIN(decode_len, 4);
+ if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) {
+ return ERR_BUF;
+ }
+ if (decode_len > 4) {
+ /* decode more than one u32_t */
+ u16_t next_val_offset;
+ LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;);
+ dhcp_got_option(dhcp, decode_idx);
+ dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value));
+ decode_len = (u8_t)(decode_len - 4);
+ next_val_offset = (u16_t)(val_offset + 4);
+ if (next_val_offset < val_offset) {
+ /* overflow */
+ return ERR_BUF;
+ }
+ val_offset = next_val_offset;
+ decode_idx++;
+ goto decode_next;
+ } else if (decode_len == 4) {
+ value = lwip_ntohl(value);
+ } else {
+ LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;);
+ value = ((u8_t *)&value)[0];
+ }
+ dhcp_got_option(dhcp, decode_idx);
+ dhcp_set_option_value(dhcp, decode_idx, value);
+ }
+ }
+ }
+ if (offset >= q->len) {
+ offset = (u16_t)(offset - q->len);
+ offset_max = (u16_t)(offset_max - q->len);
+ if (offset < offset_max) {
+ q = q->next;
+ LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;);
+ options = (u8_t *)q->payload;
+ } else {
+ /* We've run out of bytes, probably no end marker. Don't proceed. */
+ return ERR_BUF;
+ }
+ }
+ }
+ /* is this an overloaded message? */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) {
+ u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD);
+ dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD);
+ if (overload == DHCP_OVERLOAD_FILE) {
+ parse_file_as_options = 1;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n"));
+ } else if (overload == DHCP_OVERLOAD_SNAME) {
+ parse_sname_as_options = 1;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n"));
+ } else if (overload == DHCP_OVERLOAD_SNAME_FILE) {
+ parse_sname_as_options = 1;
+ parse_file_as_options = 1;
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n"));
+ } else {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload));
+ }
+ }
+ if (parse_file_as_options) {
+ /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */
+ parse_file_as_options = 0;
+ options_idx = DHCP_FILE_OFS;
+ options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN;
+#if LWIP_DHCP_BOOTP_FILE
+ file_overloaded = 1;
+#endif
+ goto again;
+ } else if (parse_sname_as_options) {
+ parse_sname_as_options = 0;
+ options_idx = DHCP_SNAME_OFS;
+ options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN;
+ goto again;
+ }
+#if LWIP_DHCP_BOOTP_FILE
+ if (!file_overloaded) {
+ /* only do this for ACK messages */
+ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) &&
+ (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK))
+ /* copy bootp file name, don't care for sname (server hostname) */
+ if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) {
+ return ERR_BUF;
+ }
+ /* make sure the string is really NULL-terminated */
+ dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0;
+ }
+#endif /* LWIP_DHCP_BOOTP_FILE */
+ return ERR_OK;
+}
+
+/**
+ * If an incoming DHCP message is in response to us, then trigger the state machine
+ */
+static void
+dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port)
+{
+ struct netif *netif = ip_current_input_netif();
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload;
+ u8_t msg_type;
+ u8_t i;
+ struct dhcp_msg *msg_in;
+
+ LWIP_UNUSED_ARG(arg);
+
+ /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */
+ if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) {
+ goto free_pbuf_and_return;
+ }
+
+ LWIP_ASSERT("invalid server address type", IP_IS_V4(addr));
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void *)p,
+ ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port));
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len));
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len));
+ /* prevent warnings about unused arguments */
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_UNUSED_ARG(addr);
+ LWIP_UNUSED_ARG(port);
+
+ if (p->len < DHCP_MIN_REPLY_LEN) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n"));
+ goto free_pbuf_and_return;
+ }
+
+ if (reply_msg->op != DHCP_BOOTREPLY) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op));
+ goto free_pbuf_and_return;
+ }
+ /* iterate through hardware address and match against DHCP message */
+ for (i = 0; i < netif->hwaddr_len && i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) {
+ if (netif->hwaddr[i] != reply_msg->chaddr[i]) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING,
+ ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n",
+ (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i]));
+ goto free_pbuf_and_return;
+ }
+ }
+ /* match transaction ID against what we expected */
+ if (lwip_ntohl(reply_msg->xid) != dhcp->xid) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING,
+ ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n", lwip_ntohl(reply_msg->xid), dhcp->xid));
+ goto free_pbuf_and_return;
+ }
+ /* option fields could be unfold? */
+ if (dhcp_parse_reply(p, dhcp) != ERR_OK) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("problem unfolding DHCP message - too short on memory?\n"));
+ goto free_pbuf_and_return;
+ }
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n"));
+ /* obtain pointer to DHCP message type */
+ if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n"));
+ goto free_pbuf_and_return;
+ }
+
+ msg_in = (struct dhcp_msg *)p->payload;
+ /* read DHCP message type */
+ msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE);
+ /* message type is DHCP ACK? */
+ if (msg_type == DHCP_ACK) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n"));
+ /* in requesting state? */
+ if (dhcp->state == DHCP_STATE_REQUESTING) {
+ dhcp_handle_ack(netif, msg_in);
+#if DHCP_DOES_ARP_CHECK
+ if ((netif->flags & NETIF_FLAG_ETHARP) != 0) {
+ /* check if the acknowledged lease address is already in use */
+ dhcp_check(netif);
+ } else {
+ /* bind interface to the acknowledged lease address */
+ dhcp_bind(netif);
+ }
+#else
+ /* bind interface to the acknowledged lease address */
+ dhcp_bind(netif);
+#endif
+ }
+ /* already bound to the given lease address? */
+ else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) ||
+ (dhcp->state == DHCP_STATE_RENEWING)) {
+ dhcp_handle_ack(netif, msg_in);
+ dhcp_bind(netif);
+ }
+ }
+ /* received a DHCP_NAK in appropriate state? */
+ else if ((msg_type == DHCP_NAK) &&
+ ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) ||
+ (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n"));
+ dhcp_handle_nak(netif);
+ }
+ /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */
+ else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n"));
+ /* remember offered lease */
+ dhcp_handle_offer(netif, msg_in);
+ }
+
+free_pbuf_and_return:
+ pbuf_free(p);
+}
+
+/**
+ * Create a DHCP request, fill in common headers
+ *
+ * @param netif the netif under DHCP control
+ * @param dhcp dhcp control struct
+ * @param message_type message type of the request
+ */
+static struct pbuf *
+dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len)
+{
+ u16_t i;
+ struct pbuf *p_out;
+ struct dhcp_msg *msg_out;
+ u16_t options_out_len_loc;
+
+#ifndef DHCP_GLOBAL_XID
+ /** default global transaction identifier starting value (easy to match
+ * with a packet analyser). We simply increment for each new request.
+ * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one
+ * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */
+#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND)
+ static u32_t xid;
+#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */
+ static u32_t xid = 0xABCD0000;
+#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */
+#else
+ if (!xid_initialised) {
+ xid = DHCP_GLOBAL_XID;
+ xid_initialised = !xid_initialised;
+ }
+#endif
+ LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return NULL;);
+ LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return NULL;);
+ p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM);
+ if (p_out == NULL) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("dhcp_create_msg(): could not allocate pbuf\n"));
+ return NULL;
+ }
+ LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg",
+ (p_out->len >= sizeof(struct dhcp_msg)));
+
+ /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */
+ if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) {
+ /* reuse transaction identifier in retransmissions */
+ if (dhcp->tries == 0) {
+#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND)
+ xid = LWIP_RAND();
+#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */
+ xid++;
+#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */
+ }
+ dhcp->xid = xid;
+ }
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE,
+ ("transaction id xid(%"X32_F")\n", xid));
+
+ msg_out = (struct dhcp_msg *)p_out->payload;
+ memset(msg_out, 0, sizeof(struct dhcp_msg));
+
+ msg_out->op = DHCP_BOOTREQUEST;
+ /* @todo: make link layer independent */
+ msg_out->htype = LWIP_IANA_HWTYPE_ETHERNET;
+ msg_out->hlen = netif->hwaddr_len;
+ msg_out->xid = lwip_htonl(dhcp->xid);
+ /* we don't need the broadcast flag since we can receive unicast traffic
+ before being fully configured! */
+ /* set ciaddr to netif->ip_addr based on message_type and state */
+ if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) ||
+ ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */
+ ((dhcp->state == DHCP_STATE_RENEWING) || dhcp->state == DHCP_STATE_REBINDING))) {
+ ip4_addr_copy(msg_out->ciaddr, *netif_ip4_addr(netif));
+ }
+ for (i = 0; i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) {
+ /* copy netif hardware address (padded with zeroes through memset already) */
+ msg_out->chaddr[i] = netif->hwaddr[i];
+ }
+ msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE);
+ /* Add option MESSAGE_TYPE */
+ options_out_len_loc = dhcp_option(0, msg_out->options, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN);
+ options_out_len_loc = dhcp_option_byte(options_out_len_loc, msg_out->options, message_type);
+ if (options_out_len) {
+ *options_out_len = options_out_len_loc;
+ }
+ return p_out;
+}
+
+/**
+ * Add a DHCP message trailer
+ *
+ * Adds the END option to the DHCP message, and if
+ * necessary, up to three padding bytes.
+ */
+static void
+dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out)
+{
+ options[options_out_len++] = DHCP_OPTION_END;
+ /* packet is too small, or not 4 byte aligned? */
+ while (((options_out_len < DHCP_MIN_OPTIONS_LEN) || (options_out_len & 3)) &&
+ (options_out_len < DHCP_OPTIONS_LEN)) {
+ /* add a fill/padding byte */
+ options[options_out_len++] = 0;
+ }
+ /* shrink the pbuf to the actual content length */
+ pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + options_out_len));
+}
+
+/** check if DHCP supplied netif->ip_addr
+ *
+ * @param netif the netif to check
+ * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING),
+ * 0 otherwise
+ */
+u8_t
+dhcp_supplied_address(const struct netif *netif)
+{
+ if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) {
+ struct dhcp *dhcp = netif_dhcp_data(netif);
+ return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) ||
+ (dhcp->state == DHCP_STATE_REBINDING);
+ }
+ return 0;
+}
+
+#endif /* LWIP_IPV4 && LWIP_DHCP */
diff --git a/lwip/src/core/ipv4/etharp.c b/lwip/src/core/ipv4/etharp.c
new file mode 100644
index 0000000..442aac0
--- /dev/null
+++ b/lwip/src/core/ipv4/etharp.c
@@ -0,0 +1,1204 @@
+/**
+ * @file
+ * Address Resolution Protocol module for IP over Ethernet
+ *
+ * Functionally, ARP is divided into two parts. The first maps an IP address
+ * to a physical address when sending a packet, and the second part answers
+ * requests from other machines for our physical address.
+ *
+ * This implementation complies with RFC 826 (Ethernet ARP). It supports
+ * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6
+ * if an interface calls etharp_gratuitous(our_netif) upon address change.
+ */
+
+/*
+ * Copyright (c) 2001-2003 Swedish Institute of Computer Science.
+ * Copyright (c) 2003-2004 Leon Woestenberg <leon.woestenberg@axon.tv>
+ * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/etharp.h"
+#include "lwip/stats.h"
+#include "lwip/snmp.h"
+#include "lwip/dhcp.h"
+#include "lwip/autoip.h"
+#include "lwip/prot/iana.h"
+#include "netif/ethernet.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+/** Re-request a used ARP entry 1 minute before it would expire to prevent
+ * breaking a steadily used connection because the ARP entry timed out. */
+#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30)
+#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15)
+
+/** the time an ARP entry stays pending after first request,
+ * for ARP_TMR_INTERVAL = 1000, this is
+ * 10 seconds.
+ *
+ * @internal Keep this number at least 2, otherwise it might
+ * run out instantly if the timeout occurs directly after a request.
+ */
+#define ARP_MAXPENDING 5
+
+/** ARP states */
+enum etharp_state {
+ ETHARP_STATE_EMPTY = 0,
+ ETHARP_STATE_PENDING,
+ ETHARP_STATE_STABLE,
+ ETHARP_STATE_STABLE_REREQUESTING_1,
+ ETHARP_STATE_STABLE_REREQUESTING_2
+#if ETHARP_SUPPORT_STATIC_ENTRIES
+ , ETHARP_STATE_STATIC
+#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */
+};
+
+struct etharp_entry {
+#if ARP_QUEUEING
+ /** Pointer to queue of pending outgoing packets on this ARP entry. */
+ struct etharp_q_entry *q;
+#else /* ARP_QUEUEING */
+ /** Pointer to a single pending outgoing packet on this ARP entry. */
+ struct pbuf *q;
+#endif /* ARP_QUEUEING */
+ ip4_addr_t ipaddr;
+ struct netif *netif;
+ struct eth_addr ethaddr;
+ u16_t ctime;
+ u8_t state;
+};
+
+static struct etharp_entry arp_table[ARP_TABLE_SIZE];
+
+#if !LWIP_NETIF_HWADDRHINT
+static netif_addr_idx_t etharp_cached_entry;
+#endif /* !LWIP_NETIF_HWADDRHINT */
+
+/** Try hard to create a new entry - we want the IP address to appear in
+ the cache (even if this means removing an active entry or so). */
+#define ETHARP_FLAG_TRY_HARD 1
+#define ETHARP_FLAG_FIND_ONLY 2
+#if ETHARP_SUPPORT_STATIC_ENTRIES
+#define ETHARP_FLAG_STATIC_ENTRY 4
+#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */
+
+#if LWIP_NETIF_HWADDRHINT
+#define ETHARP_SET_ADDRHINT(netif, addrhint) do { if (((netif) != NULL) && ((netif)->hints != NULL)) { \
+ (netif)->hints->addr_hint = (addrhint); }} while(0)
+#else /* LWIP_NETIF_HWADDRHINT */
+#define ETHARP_SET_ADDRHINT(netif, addrhint) (etharp_cached_entry = (addrhint))
+#endif /* LWIP_NETIF_HWADDRHINT */
+
+
+/* Check for maximum ARP_TABLE_SIZE */
+#if (ARP_TABLE_SIZE > NETIF_ADDR_IDX_MAX)
+#error "ARP_TABLE_SIZE must fit in an s16_t, you have to reduce it in your lwipopts.h"
+#endif
+
+
+static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr);
+static err_t etharp_raw(struct netif *netif,
+ const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr,
+ const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr,
+ const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr,
+ const u16_t opcode);
+
+#if ARP_QUEUEING
+/**
+ * Free a complete queue of etharp entries
+ *
+ * @param q a qeueue of etharp_q_entry's to free
+ */
+static void
+free_etharp_q(struct etharp_q_entry *q)
+{
+ struct etharp_q_entry *r;
+ LWIP_ASSERT("q != NULL", q != NULL);
+ while (q) {
+ r = q;
+ q = q->next;
+ LWIP_ASSERT("r->p != NULL", (r->p != NULL));
+ pbuf_free(r->p);
+ memp_free(MEMP_ARP_QUEUE, r);
+ }
+}
+#else /* ARP_QUEUEING */
+
+/** Compatibility define: free the queued pbuf */
+#define free_etharp_q(q) pbuf_free(q)
+
+#endif /* ARP_QUEUEING */
+
+/** Clean up ARP table entries */
+static void
+etharp_free_entry(int i)
+{
+ /* remove from SNMP ARP index tree */
+ mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr);
+ /* and empty packet queue */
+ if (arp_table[i].q != NULL) {
+ /* remove all queued packets */
+ LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q)));
+ free_etharp_q(arp_table[i].q);
+ arp_table[i].q = NULL;
+ }
+ /* recycle entry for re-use */
+ arp_table[i].state = ETHARP_STATE_EMPTY;
+#ifdef LWIP_DEBUG
+ /* for debugging, clean out the complete entry */
+ arp_table[i].ctime = 0;
+ arp_table[i].netif = NULL;
+ ip4_addr_set_zero(&arp_table[i].ipaddr);
+ arp_table[i].ethaddr = ethzero;
+#endif /* LWIP_DEBUG */
+}
+
+/**
+ * Clears expired entries in the ARP table.
+ *
+ * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second),
+ * in order to expire entries in the ARP table.
+ */
+void
+etharp_tmr(void)
+{
+ int i;
+
+ LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n"));
+ /* remove expired entries from the ARP table */
+ for (i = 0; i < ARP_TABLE_SIZE; ++i) {
+ u8_t state = arp_table[i].state;
+ if (state != ETHARP_STATE_EMPTY
+#if ETHARP_SUPPORT_STATIC_ENTRIES
+ && (state != ETHARP_STATE_STATIC)
+#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */
+ ) {
+ arp_table[i].ctime++;
+ if ((arp_table[i].ctime >= ARP_MAXAGE) ||
+ ((arp_table[i].state == ETHARP_STATE_PENDING) &&
+ (arp_table[i].ctime >= ARP_MAXPENDING))) {
+ /* pending or stable entry has become old! */
+ LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %d.\n",
+ arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", i));
+ /* clean up entries that have just been expired */
+ etharp_free_entry(i);
+ } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) {
+ /* Don't send more than one request every 2 seconds. */
+ arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2;
+ } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) {
+ /* Reset state to stable, so that the next transmitted packet will
+ re-send an ARP request. */
+ arp_table[i].state = ETHARP_STATE_STABLE;
+ } else if (arp_table[i].state == ETHARP_STATE_PENDING) {
+ /* still pending, resend an ARP query */
+ etharp_request(arp_table[i].netif, &arp_table[i].ipaddr);
+ }
+ }
+ }
+}
+
+/**
+ * Search the ARP table for a matching or new entry.
+ *
+ * If an IP address is given, return a pending or stable ARP entry that matches
+ * the address. If no match is found, create a new entry with this address set,
+ * but in state ETHARP_EMPTY. The caller must check and possibly change the
+ * state of the returned entry.
+ *
+ * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY.
+ *
+ * In all cases, attempt to create new entries from an empty entry. If no
+ * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle
+ * old entries. Heuristic choose the least important entry for recycling.
+ *
+ * @param ipaddr IP address to find in ARP cache, or to add if not found.
+ * @param flags See @ref etharp_state
+ * @param netif netif related to this address (used for NETIF_HWADDRHINT)
+ *
+ * @return The ARP entry index that matched or is created, ERR_MEM if no
+ * entry is found or could be recycled.
+ */
+static s16_t
+etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif *netif)
+{
+ s16_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE;
+ s16_t empty = ARP_TABLE_SIZE;
+ s16_t i = 0;
+ /* oldest entry with packets on queue */
+ s16_t old_queue = ARP_TABLE_SIZE;
+ /* its age */
+ u16_t age_queue = 0, age_pending = 0, age_stable = 0;
+
+ LWIP_UNUSED_ARG(netif);
+
+ /**
+ * a) do a search through the cache, remember candidates
+ * b) select candidate entry
+ * c) create new entry
+ */
+
+ /* a) in a single search sweep, do all of this
+ * 1) remember the first empty entry (if any)
+ * 2) remember the oldest stable entry (if any)
+ * 3) remember the oldest pending entry without queued packets (if any)
+ * 4) remember the oldest pending entry with queued packets (if any)
+ * 5) search for a matching IP entry, either pending or stable
+ * until 5 matches, or all entries are searched for.
+ */
+
+ for (i = 0; i < ARP_TABLE_SIZE; ++i) {
+ u8_t state = arp_table[i].state;
+ /* no empty entry found yet and now we do find one? */
+ if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) {
+ LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %d\n", (int)i));
+ /* remember first empty entry */
+ empty = i;
+ } else if (state != ETHARP_STATE_EMPTY) {
+ LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE",
+ state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE);
+ /* if given, does IP address match IP address in ARP entry? */
+ if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr)
+#if ETHARP_TABLE_MATCH_NETIF
+ && ((netif == NULL) || (netif == arp_table[i].netif))
+#endif /* ETHARP_TABLE_MATCH_NETIF */
+ ) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %d\n", (int)i));
+ /* found exact IP address match, simply bail out */
+ return i;
+ }
+ /* pending entry? */
+ if (state == ETHARP_STATE_PENDING) {
+ /* pending with queued packets? */
+ if (arp_table[i].q != NULL) {
+ if (arp_table[i].ctime >= age_queue) {
+ old_queue = i;
+ age_queue = arp_table[i].ctime;
+ }
+ } else
+ /* pending without queued packets? */
+ {
+ if (arp_table[i].ctime >= age_pending) {
+ old_pending = i;
+ age_pending = arp_table[i].ctime;
+ }
+ }
+ /* stable entry? */
+ } else if (state >= ETHARP_STATE_STABLE) {
+#if ETHARP_SUPPORT_STATIC_ENTRIES
+ /* don't record old_stable for static entries since they never expire */
+ if (state < ETHARP_STATE_STATIC)
+#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */
+ {
+ /* remember entry with oldest stable entry in oldest, its age in maxtime */
+ if (arp_table[i].ctime >= age_stable) {
+ old_stable = i;
+ age_stable = arp_table[i].ctime;
+ }
+ }
+ }
+ }
+ }
+ /* { we have no match } => try to create a new entry */
+
+ /* don't create new entry, only search? */
+ if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) ||
+ /* or no empty entry found and not allowed to recycle? */
+ ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n"));
+ return (s16_t)ERR_MEM;
+ }
+
+ /* b) choose the least destructive entry to recycle:
+ * 1) empty entry
+ * 2) oldest stable entry
+ * 3) oldest pending entry without queued packets
+ * 4) oldest pending entry with queued packets
+ *
+ * { ETHARP_FLAG_TRY_HARD is set at this point }
+ */
+
+ /* 1) empty entry available? */
+ if (empty < ARP_TABLE_SIZE) {
+ i = empty;
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %d\n", (int)i));
+ } else {
+ /* 2) found recyclable stable entry? */
+ if (old_stable < ARP_TABLE_SIZE) {
+ /* recycle oldest stable*/
+ i = old_stable;
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %d\n", (int)i));
+ /* no queued packets should exist on stable entries */
+ LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL);
+ /* 3) found recyclable pending entry without queued packets? */
+ } else if (old_pending < ARP_TABLE_SIZE) {
+ /* recycle oldest pending */
+ i = old_pending;
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d (without queue)\n", (int)i));
+ /* 4) found recyclable pending entry with queued packets? */
+ } else if (old_queue < ARP_TABLE_SIZE) {
+ /* recycle oldest pending (queued packets are free in etharp_free_entry) */
+ i = old_queue;
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d, freeing packet queue %p\n", (int)i, (void *)(arp_table[i].q)));
+ /* no empty or recyclable entries found */
+ } else {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n"));
+ return (s16_t)ERR_MEM;
+ }
+
+ /* { empty or recyclable entry found } */
+ LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE);
+ etharp_free_entry(i);
+ }
+
+ LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE);
+ LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY",
+ arp_table[i].state == ETHARP_STATE_EMPTY);
+
+ /* IP address given? */
+ if (ipaddr != NULL) {
+ /* set IP address */
+ ip4_addr_copy(arp_table[i].ipaddr, *ipaddr);
+ }
+ arp_table[i].ctime = 0;
+#if ETHARP_TABLE_MATCH_NETIF
+ arp_table[i].netif = netif;
+#endif /* ETHARP_TABLE_MATCH_NETIF */
+ return (s16_t)i;
+}
+
+/**
+ * Update (or insert) a IP/MAC address pair in the ARP cache.
+ *
+ * If a pending entry is resolved, any queued packets will be sent
+ * at this point.
+ *
+ * @param netif netif related to this entry (used for NETIF_ADDRHINT)
+ * @param ipaddr IP address of the inserted ARP entry.
+ * @param ethaddr Ethernet address of the inserted ARP entry.
+ * @param flags See @ref etharp_state
+ *
+ * @return
+ * - ERR_OK Successfully updated ARP cache.
+ * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set.
+ * - ERR_ARG Non-unicast address given, those will not appear in ARP cache.
+ *
+ * @see pbuf_free()
+ */
+static err_t
+etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags)
+{
+ s16_t i;
+ LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN);
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n",
+ ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr),
+ (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2],
+ (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5]));
+ /* non-unicast address? */
+ if (ip4_addr_isany(ipaddr) ||
+ ip4_addr_isbroadcast(ipaddr, netif) ||
+ ip4_addr_ismulticast(ipaddr)) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n"));
+ return ERR_ARG;
+ }
+ /* find or create ARP entry */
+ i = etharp_find_entry(ipaddr, flags, netif);
+ /* bail out if no entry could be found */
+ if (i < 0) {
+ return (err_t)i;
+ }
+
+#if ETHARP_SUPPORT_STATIC_ENTRIES
+ if (flags & ETHARP_FLAG_STATIC_ENTRY) {
+ /* record static type */
+ arp_table[i].state = ETHARP_STATE_STATIC;
+ } else if (arp_table[i].state == ETHARP_STATE_STATIC) {
+ /* found entry is a static type, don't overwrite it */
+ return ERR_VAL;
+ } else
+#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */
+ {
+ /* mark it stable */
+ arp_table[i].state = ETHARP_STATE_STABLE;
+ }
+
+ /* record network interface */
+ arp_table[i].netif = netif;
+ /* insert in SNMP ARP index tree */
+ mib2_add_arp_entry(netif, &arp_table[i].ipaddr);
+
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", i));
+ /* update address */
+ SMEMCPY(&arp_table[i].ethaddr, ethaddr, ETH_HWADDR_LEN);
+ /* reset time stamp */
+ arp_table[i].ctime = 0;
+ /* this is where we will send out queued packets! */
+#if ARP_QUEUEING
+ while (arp_table[i].q != NULL) {
+ struct pbuf *p;
+ /* remember remainder of queue */
+ struct etharp_q_entry *q = arp_table[i].q;
+ /* pop first item off the queue */
+ arp_table[i].q = q->next;
+ /* get the packet pointer */
+ p = q->p;
+ /* now queue entry can be freed */
+ memp_free(MEMP_ARP_QUEUE, q);
+#else /* ARP_QUEUEING */
+ if (arp_table[i].q != NULL) {
+ struct pbuf *p = arp_table[i].q;
+ arp_table[i].q = NULL;
+#endif /* ARP_QUEUEING */
+ /* send the queued IP packet */
+ ethernet_output(netif, p, (struct eth_addr *)(netif->hwaddr), ethaddr, ETHTYPE_IP);
+ /* free the queued IP packet */
+ pbuf_free(p);
+ }
+ return ERR_OK;
+}
+
+#if ETHARP_SUPPORT_STATIC_ENTRIES
+/** Add a new static entry to the ARP table. If an entry exists for the
+ * specified IP address, this entry is overwritten.
+ * If packets are queued for the specified IP address, they are sent out.
+ *
+ * @param ipaddr IP address for the new static entry
+ * @param ethaddr ethernet address for the new static entry
+ * @return See return values of etharp_add_static_entry
+ */
+err_t
+etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr)
+{
+ struct netif *netif;
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n",
+ ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr),
+ (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2],
+ (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5]));
+
+ netif = ip4_route(ipaddr);
+ if (netif == NULL) {
+ return ERR_RTE;
+ }
+
+ return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY);
+}
+
+/** Remove a static entry from the ARP table previously added with a call to
+ * etharp_add_static_entry.
+ *
+ * @param ipaddr IP address of the static entry to remove
+ * @return ERR_OK: entry removed
+ * ERR_MEM: entry wasn't found
+ * ERR_ARG: entry wasn't a static entry but a dynamic one
+ */
+err_t
+etharp_remove_static_entry(const ip4_addr_t *ipaddr)
+{
+ s16_t i;
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr)));
+
+ /* find or create ARP entry */
+ i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL);
+ /* bail out if no entry could be found */
+ if (i < 0) {
+ return (err_t)i;
+ }
+
+ if (arp_table[i].state != ETHARP_STATE_STATIC) {
+ /* entry wasn't a static entry, cannot remove it */
+ return ERR_ARG;
+ }
+ /* entry found, free it */
+ etharp_free_entry(i);
+ return ERR_OK;
+}
+#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */
+
+/**
+ * Remove all ARP table entries of the specified netif.
+ *
+ * @param netif points to a network interface
+ */
+void
+etharp_cleanup_netif(struct netif *netif)
+{
+ int i;
+
+ for (i = 0; i < ARP_TABLE_SIZE; ++i) {
+ u8_t state = arp_table[i].state;
+ if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) {
+ etharp_free_entry(i);
+ }
+ }
+}
+
+/**
+ * Finds (stable) ethernet/IP address pair from ARP table
+ * using interface and IP address index.
+ * @note the addresses in the ARP table are in network order!
+ *
+ * @param netif points to interface index
+ * @param ipaddr points to the (network order) IP address index
+ * @param eth_ret points to return pointer
+ * @param ip_ret points to return pointer
+ * @return table index if found, -1 otherwise
+ */
+ssize_t
+etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr,
+ struct eth_addr **eth_ret, const ip4_addr_t **ip_ret)
+{
+ s16_t i;
+
+ LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL",
+ eth_ret != NULL && ip_ret != NULL);
+
+ LWIP_UNUSED_ARG(netif);
+
+ i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif);
+ if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) {
+ *eth_ret = &arp_table[i].ethaddr;
+ *ip_ret = &arp_table[i].ipaddr;
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * Possibility to iterate over stable ARP table entries
+ *
+ * @param i entry number, 0 to ARP_TABLE_SIZE
+ * @param ipaddr return value: IP address
+ * @param netif return value: points to interface
+ * @param eth_ret return value: ETH address
+ * @return 1 on valid index, 0 otherwise
+ */
+int
+etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret)
+{
+ LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL);
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL);
+
+ if ((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) {
+ *ipaddr = &arp_table[i].ipaddr;
+ *netif = arp_table[i].netif;
+ *eth_ret = &arp_table[i].ethaddr;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache
+ * send out queued IP packets. Updates cache with snooped address pairs.
+ *
+ * Should be called for incoming ARP packets. The pbuf in the argument
+ * is freed by this function.
+ *
+ * @param p The ARP packet that arrived on netif. Is freed by this function.
+ * @param netif The lwIP network interface on which the ARP packet pbuf arrived.
+ *
+ * @see pbuf_free()
+ */
+void
+etharp_input(struct pbuf *p, struct netif *netif)
+{
+ struct etharp_hdr *hdr;
+ /* these are aligned properly, whereas the ARP header fields might not be */
+ ip4_addr_t sipaddr, dipaddr;
+ u8_t for_us;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif != NULL", (netif != NULL), return;);
+
+ hdr = (struct etharp_hdr *)p->payload;
+
+ /* RFC 826 "Packet Reception": */
+ if ((hdr->hwtype != PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET)) ||
+ (hdr->hwlen != ETH_HWADDR_LEN) ||
+ (hdr->protolen != sizeof(ip4_addr_t)) ||
+ (hdr->proto != PP_HTONS(ETHTYPE_IP))) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING,
+ ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n",
+ hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen));
+ ETHARP_STATS_INC(etharp.proterr);
+ ETHARP_STATS_INC(etharp.drop);
+ pbuf_free(p);
+ return;
+ }
+ ETHARP_STATS_INC(etharp.recv);
+
+#if LWIP_AUTOIP
+ /* We have to check if a host already has configured our random
+ * created link local address and continuously check if there is
+ * a host with this IP-address so we can detect collisions */
+ autoip_arp_reply(netif, hdr);
+#endif /* LWIP_AUTOIP */
+
+ /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without
+ * structure packing (not using structure copy which breaks strict-aliasing rules). */
+ IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr);
+ IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr);
+
+ /* this interface is not configured? */
+ if (ip4_addr_isany_val(*netif_ip4_addr(netif))) {
+ for_us = 0;
+ } else {
+ /* ARP packet directed to us? */
+ for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif));
+ }
+
+ /* ARP message directed to us?
+ -> add IP address in ARP cache; assume requester wants to talk to us,
+ can result in directly sending the queued packets for this host.
+ ARP message not directed to us?
+ -> update the source IP address in the cache, if present */
+ etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr),
+ for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY);
+
+ /* now act on the message itself */
+ switch (hdr->opcode) {
+ /* ARP request? */
+ case PP_HTONS(ARP_REQUEST):
+ /* ARP request. If it asked for our address, we send out a
+ * reply. In any case, we time-stamp any existing ARP entry,
+ * and possibly send out an IP packet that was queued on it. */
+
+ LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n"));
+ /* ARP request for our address? */
+ if (for_us) {
+ /* send ARP response */
+ etharp_raw(netif,
+ (struct eth_addr *)netif->hwaddr, &hdr->shwaddr,
+ (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif),
+ &hdr->shwaddr, &sipaddr,
+ ARP_REPLY);
+ /* we are not configured? */
+ } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) {
+ /* { for_us == 0 and netif->ip_addr.addr == 0 } */
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n"));
+ /* request was not directed to us */
+ } else {
+ /* { for_us == 0 and netif->ip_addr.addr != 0 } */
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n"));
+ }
+ break;
+ case PP_HTONS(ARP_REPLY):
+ /* ARP reply. We already updated the ARP cache earlier. */
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n"));
+#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK)
+ /* DHCP wants to know about ARP replies from any host with an
+ * IP address also offered to us by the DHCP server. We do not
+ * want to take a duplicate IP address on a single network.
+ * @todo How should we handle redundant (fail-over) interfaces? */
+ dhcp_arp_reply(netif, &sipaddr);
+#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */
+ break;
+ default:
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode)));
+ ETHARP_STATS_INC(etharp.err);
+ break;
+ }
+ /* free ARP packet */
+ pbuf_free(p);
+}
+
+/** Just a small helper function that sends a pbuf to an ethernet address
+ * in the arp_table specified by the index 'arp_idx'.
+ */
+static err_t
+etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, netif_addr_idx_t arp_idx)
+{
+ LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE",
+ arp_table[arp_idx].state >= ETHARP_STATE_STABLE);
+ /* if arp table entry is about to expire: re-request it,
+ but only if its state is ETHARP_STATE_STABLE to prevent flooding the
+ network with ARP requests if this address is used frequently. */
+ if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) {
+ if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) {
+ /* issue a standard request using broadcast */
+ if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) {
+ arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1;
+ }
+ } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) {
+ /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */
+ if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) {
+ arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1;
+ }
+ }
+ }
+
+ return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP);
+}
+
+/**
+ * Resolve and fill-in Ethernet address header for outgoing IP packet.
+ *
+ * For IP multicast and broadcast, corresponding Ethernet addresses
+ * are selected and the packet is transmitted on the link.
+ *
+ * For unicast addresses, the packet is submitted to etharp_query(). In
+ * case the IP address is outside the local network, the IP address of
+ * the gateway is used.
+ *
+ * @param netif The lwIP network interface which the IP packet will be sent on.
+ * @param q The pbuf(s) containing the IP packet to be sent.
+ * @param ipaddr The IP address of the packet destination.
+ *
+ * @return
+ * - ERR_RTE No route to destination (no gateway to external networks),
+ * or the return type of either etharp_query() or ethernet_output().
+ */
+err_t
+etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr)
+{
+ const struct eth_addr *dest;
+ struct eth_addr mcastaddr;
+ const ip4_addr_t *dst_addr = ipaddr;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("q != NULL", q != NULL);
+ LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL);
+
+ /* Determine on destination hardware address. Broadcasts and multicasts
+ * are special, other IP addresses are looked up in the ARP table. */
+
+ /* broadcast destination IP address? */
+ if (ip4_addr_isbroadcast(ipaddr, netif)) {
+ /* broadcast on Ethernet also */
+ dest = (const struct eth_addr *)&ethbroadcast;
+ /* multicast destination IP address? */
+ } else if (ip4_addr_ismulticast(ipaddr)) {
+ /* Hash IP multicast address to MAC address.*/
+ mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0;
+ mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1;
+ mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2;
+ mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f;
+ mcastaddr.addr[4] = ip4_addr3(ipaddr);
+ mcastaddr.addr[5] = ip4_addr4(ipaddr);
+ /* destination Ethernet address is multicast */
+ dest = &mcastaddr;
+ /* unicast destination IP address? */
+ } else {
+ netif_addr_idx_t i;
+ /* outside local network? if so, this can neither be a global broadcast nor
+ a subnet broadcast. */
+ if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) &&
+ !ip4_addr_islinklocal(ipaddr)) {
+#if LWIP_AUTOIP
+ struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr *, q->payload);
+ /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with
+ a link-local source address must always be "directly to its destination
+ on the same physical link. The host MUST NOT send the packet to any
+ router for forwarding". */
+ if (!ip4_addr_islinklocal(&iphdr->src))
+#endif /* LWIP_AUTOIP */
+ {
+#ifdef LWIP_HOOK_ETHARP_GET_GW
+ /* For advanced routing, a single default gateway might not be enough, so get
+ the IP address of the gateway to handle the current destination address. */
+ dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr);
+ if (dst_addr == NULL)
+#endif /* LWIP_HOOK_ETHARP_GET_GW */
+ {
+ /* interface has default gateway? */
+ if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) {
+ /* send to hardware address of default gateway IP address */
+ dst_addr = netif_ip4_gw(netif);
+ /* no default gateway available */
+ } else {
+ /* no route to destination error (default gateway missing) */
+ return ERR_RTE;
+ }
+ }
+ }
+ }
+#if LWIP_NETIF_HWADDRHINT
+ if (netif->hints != NULL) {
+ /* per-pcb cached entry was given */
+ netif_addr_idx_t etharp_cached_entry = netif->hints->addr_hint;
+ if (etharp_cached_entry < ARP_TABLE_SIZE) {
+#endif /* LWIP_NETIF_HWADDRHINT */
+ if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) &&
+#if ETHARP_TABLE_MATCH_NETIF
+ (arp_table[etharp_cached_entry].netif == netif) &&
+#endif
+ (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) {
+ /* the per-pcb-cached entry is stable and the right one! */
+ ETHARP_STATS_INC(etharp.cachehit);
+ return etharp_output_to_arp_index(netif, q, etharp_cached_entry);
+ }
+#if LWIP_NETIF_HWADDRHINT
+ }
+ }
+#endif /* LWIP_NETIF_HWADDRHINT */
+
+ /* find stable entry: do this here since this is a critical path for
+ throughput and etharp_find_entry() is kind of slow */
+ for (i = 0; i < ARP_TABLE_SIZE; i++) {
+ if ((arp_table[i].state >= ETHARP_STATE_STABLE) &&
+#if ETHARP_TABLE_MATCH_NETIF
+ (arp_table[i].netif == netif) &&
+#endif
+ (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) {
+ /* found an existing, stable entry */
+ ETHARP_SET_ADDRHINT(netif, i);
+ return etharp_output_to_arp_index(netif, q, i);
+ }
+ }
+ /* no stable entry found, use the (slower) query function:
+ queue on destination Ethernet address belonging to ipaddr */
+ return etharp_query(netif, dst_addr, q);
+ }
+
+ /* continuation for multicast/broadcast destinations */
+ /* obtain source Ethernet address of the given interface */
+ /* send packet directly on the link */
+ return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), dest, ETHTYPE_IP);
+}
+
+/**
+ * Send an ARP request for the given IP address and/or queue a packet.
+ *
+ * If the IP address was not yet in the cache, a pending ARP cache entry
+ * is added and an ARP request is sent for the given address. The packet
+ * is queued on this entry.
+ *
+ * If the IP address was already pending in the cache, a new ARP request
+ * is sent for the given address. The packet is queued on this entry.
+ *
+ * If the IP address was already stable in the cache, and a packet is
+ * given, it is directly sent and no ARP request is sent out.
+ *
+ * If the IP address was already stable in the cache, and no packet is
+ * given, an ARP request is sent out.
+ *
+ * @param netif The lwIP network interface on which ipaddr
+ * must be queried for.
+ * @param ipaddr The IP address to be resolved.
+ * @param q If non-NULL, a pbuf that must be delivered to the IP address.
+ * q is not freed by this function.
+ *
+ * @note q must only be ONE packet, not a packet queue!
+ *
+ * @return
+ * - ERR_BUF Could not make room for Ethernet header.
+ * - ERR_MEM Hardware address unknown, and no more ARP entries available
+ * to query for address or queue the packet.
+ * - ERR_MEM Could not queue packet due to memory shortage.
+ * - ERR_RTE No route to destination (no gateway to external networks).
+ * - ERR_ARG Non-unicast address given, those will not appear in ARP cache.
+ *
+ */
+err_t
+etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q)
+{
+ struct eth_addr *srcaddr = (struct eth_addr *)netif->hwaddr;
+ err_t result = ERR_MEM;
+ int is_new_entry = 0;
+ s16_t i_err;
+ netif_addr_idx_t i;
+
+ /* non-unicast address? */
+ if (ip4_addr_isbroadcast(ipaddr, netif) ||
+ ip4_addr_ismulticast(ipaddr) ||
+ ip4_addr_isany(ipaddr)) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n"));
+ return ERR_ARG;
+ }
+
+ /* find entry in ARP cache, ask to create entry if queueing packet */
+ i_err = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif);
+
+ /* could not find or create entry? */
+ if (i_err < 0) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n"));
+ if (q) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n"));
+ ETHARP_STATS_INC(etharp.memerr);
+ }
+ return (err_t)i_err;
+ }
+ LWIP_ASSERT("type overflow", (size_t)i_err < NETIF_ADDR_IDX_MAX);
+ i = (netif_addr_idx_t)i_err;
+
+ /* mark a fresh entry as pending (we just sent a request) */
+ if (arp_table[i].state == ETHARP_STATE_EMPTY) {
+ is_new_entry = 1;
+ arp_table[i].state = ETHARP_STATE_PENDING;
+ /* record network interface for re-sending arp request in etharp_tmr */
+ arp_table[i].netif = netif;
+ }
+
+ /* { i is either a STABLE or (new or existing) PENDING entry } */
+ LWIP_ASSERT("arp_table[i].state == PENDING or STABLE",
+ ((arp_table[i].state == ETHARP_STATE_PENDING) ||
+ (arp_table[i].state >= ETHARP_STATE_STABLE)));
+
+ /* do we have a new entry? or an implicit query request? */
+ if (is_new_entry || (q == NULL)) {
+ /* try to resolve it; send out ARP request */
+ result = etharp_request(netif, ipaddr);
+ if (result != ERR_OK) {
+ /* ARP request couldn't be sent */
+ /* We don't re-send arp request in etharp_tmr, but we still queue packets,
+ since this failure could be temporary, and the next packet calling
+ etharp_query again could lead to sending the queued packets. */
+ }
+ if (q == NULL) {
+ return result;
+ }
+ }
+
+ /* packet given? */
+ LWIP_ASSERT("q != NULL", q != NULL);
+ /* stable entry? */
+ if (arp_table[i].state >= ETHARP_STATE_STABLE) {
+ /* we have a valid IP->Ethernet address mapping */
+ ETHARP_SET_ADDRHINT(netif, i);
+ /* send the packet */
+ result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP);
+ /* pending entry? (either just created or already pending */
+ } else if (arp_table[i].state == ETHARP_STATE_PENDING) {
+ /* entry is still pending, queue the given packet 'q' */
+ struct pbuf *p;
+ int copy_needed = 0;
+ /* IF q includes a pbuf that must be copied, copy the whole chain into a
+ * new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */
+ p = q;
+ while (p) {
+ LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0));
+ if (PBUF_NEEDS_COPY(p)) {
+ copy_needed = 1;
+ break;
+ }
+ p = p->next;
+ }
+ if (copy_needed) {
+ /* copy the whole packet into new pbufs */
+ p = pbuf_clone(PBUF_LINK, PBUF_RAM, q);
+ } else {
+ /* referencing the old pbuf is enough */
+ p = q;
+ pbuf_ref(p);
+ }
+ /* packet could be taken over? */
+ if (p != NULL) {
+ /* queue packet ... */
+#if ARP_QUEUEING
+ struct etharp_q_entry *new_entry;
+ /* allocate a new arp queue entry */
+ new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE);
+ if (new_entry != NULL) {
+ unsigned int qlen = 0;
+ new_entry->next = 0;
+ new_entry->p = p;
+ if (arp_table[i].q != NULL) {
+ /* queue was already existent, append the new entry to the end */
+ struct etharp_q_entry *r;
+ r = arp_table[i].q;
+ qlen++;
+ while (r->next != NULL) {
+ r = r->next;
+ qlen++;
+ }
+ r->next = new_entry;
+ } else {
+ /* queue did not exist, first item in queue */
+ arp_table[i].q = new_entry;
+ }
+#if ARP_QUEUE_LEN
+ if (qlen >= ARP_QUEUE_LEN) {
+ struct etharp_q_entry *old;
+ old = arp_table[i].q;
+ arp_table[i].q = arp_table[i].q->next;
+ pbuf_free(old->p);
+ memp_free(MEMP_ARP_QUEUE, old);
+ }
+#endif
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, i));
+ result = ERR_OK;
+ } else {
+ /* the pool MEMP_ARP_QUEUE is empty */
+ pbuf_free(p);
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q));
+ result = ERR_MEM;
+ }
+#else /* ARP_QUEUEING */
+ /* always queue one packet per ARP request only, freeing a previously queued packet */
+ if (arp_table[i].q != NULL) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"U16_F"\n", (void *)q, (u16_t)i));
+ pbuf_free(arp_table[i].q);
+ }
+ arp_table[i].q = p;
+ result = ERR_OK;
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, (u16_t)i));
+#endif /* ARP_QUEUEING */
+ } else {
+ ETHARP_STATS_INC(etharp.memerr);
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q));
+ result = ERR_MEM;
+ }
+ }
+ return result;
+}
+
+/**
+ * Send a raw ARP packet (opcode and all addresses can be modified)
+ *
+ * @param netif the lwip network interface on which to send the ARP packet
+ * @param ethsrc_addr the source MAC address for the ethernet header
+ * @param ethdst_addr the destination MAC address for the ethernet header
+ * @param hwsrc_addr the source MAC address for the ARP protocol header
+ * @param ipsrc_addr the source IP address for the ARP protocol header
+ * @param hwdst_addr the destination MAC address for the ARP protocol header
+ * @param ipdst_addr the destination IP address for the ARP protocol header
+ * @param opcode the type of the ARP packet
+ * @return ERR_OK if the ARP packet has been sent
+ * ERR_MEM if the ARP packet couldn't be allocated
+ * any other err_t on failure
+ */
+static err_t
+etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr,
+ const struct eth_addr *ethdst_addr,
+ const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr,
+ const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr,
+ const u16_t opcode)
+{
+ struct pbuf *p;
+ err_t result = ERR_OK;
+ struct etharp_hdr *hdr;
+
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+
+ /* allocate a pbuf for the outgoing ARP request packet */
+ p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM);
+ /* could allocate a pbuf for an ARP request? */
+ if (p == NULL) {
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("etharp_raw: could not allocate pbuf for ARP request.\n"));
+ ETHARP_STATS_INC(etharp.memerr);
+ return ERR_MEM;
+ }
+ LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr",
+ (p->len >= SIZEOF_ETHARP_HDR));
+
+ hdr = (struct etharp_hdr *)p->payload;
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n"));
+ hdr->opcode = lwip_htons(opcode);
+
+ LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!",
+ (netif->hwaddr_len == ETH_HWADDR_LEN));
+
+ /* Write the ARP MAC-Addresses */
+ SMEMCPY(&hdr->shwaddr, hwsrc_addr, ETH_HWADDR_LEN);
+ SMEMCPY(&hdr->dhwaddr, hwdst_addr, ETH_HWADDR_LEN);
+ /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without
+ * structure packing. */
+ IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->sipaddr, ipsrc_addr);
+ IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->dipaddr, ipdst_addr);
+
+ hdr->hwtype = PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET);
+ hdr->proto = PP_HTONS(ETHTYPE_IP);
+ /* set hwlen and protolen */
+ hdr->hwlen = ETH_HWADDR_LEN;
+ hdr->protolen = sizeof(ip4_addr_t);
+
+ /* send ARP query */
+#if LWIP_AUTOIP
+ /* If we are using Link-Local, all ARP packets that contain a Link-Local
+ * 'sender IP address' MUST be sent using link-layer broadcast instead of
+ * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */
+ if (ip4_addr_islinklocal(ipsrc_addr)) {
+ ethernet_output(netif, p, ethsrc_addr, &ethbroadcast, ETHTYPE_ARP);
+ } else
+#endif /* LWIP_AUTOIP */
+ {
+ ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP);
+ }
+
+ ETHARP_STATS_INC(etharp.xmit);
+ /* free ARP query packet */
+ pbuf_free(p);
+ p = NULL;
+ /* could not allocate pbuf for ARP request */
+
+ return result;
+}
+
+/**
+ * Send an ARP request packet asking for ipaddr to a specific eth address.
+ * Used to send unicast request to refresh the ARP table just before an entry
+ * times out
+ *
+ * @param netif the lwip network interface on which to send the request
+ * @param ipaddr the IP address for which to ask
+ * @param hw_dst_addr the ethernet address to send this packet to
+ * @return ERR_OK if the request has been sent
+ * ERR_MEM if the ARP packet couldn't be allocated
+ * any other err_t on failure
+ */
+static err_t
+etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr)
+{
+ return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr,
+ (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), &ethzero,
+ ipaddr, ARP_REQUEST);
+}
+
+/**
+ * Send an ARP request packet asking for ipaddr.
+ *
+ * @param netif the lwip network interface on which to send the request
+ * @param ipaddr the IP address for which to ask
+ * @return ERR_OK if the request has been sent
+ * ERR_MEM if the ARP packet couldn't be allocated
+ * any other err_t on failure
+ */
+err_t
+etharp_request(struct netif *netif, const ip4_addr_t *ipaddr)
+{
+ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n"));
+ return etharp_request_dst(netif, ipaddr, &ethbroadcast);
+}
+
+#endif /* LWIP_IPV4 && LWIP_ARP */
diff --git a/lwip/src/core/ipv4/icmp.c b/lwip/src/core/ipv4/icmp.c
new file mode 100644
index 0000000..a462ccd
--- /dev/null
+++ b/lwip/src/core/ipv4/icmp.c
@@ -0,0 +1,404 @@
+/**
+ * @file
+ * ICMP - Internet Control Message Protocol
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+/* Some ICMP messages should be passed to the transport protocols. This
+ is not implemented. */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/icmp.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/ip.h"
+#include "lwip/def.h"
+#include "lwip/stats.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be
+ * used to modify and send a response packet (and to 1 if this is not the case,
+ * e.g. when link header is stripped off when receiving) */
+#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
+#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1
+#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
+
+/* The amount of data from the original packet to return in a dest-unreachable */
+#define ICMP_DEST_UNREACH_DATASIZE 8
+
+static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code);
+
+/**
+ * Processes ICMP input packets, called from ip_input().
+ *
+ * Currently only processes icmp echo requests and sends
+ * out the echo response.
+ *
+ * @param p the icmp echo request packet, p->payload pointing to the icmp header
+ * @param inp the netif on which this packet was received
+ */
+void
+icmp_input(struct pbuf *p, struct netif *inp)
+{
+ u8_t type;
+#ifdef LWIP_DEBUG
+ u8_t code;
+#endif /* LWIP_DEBUG */
+ struct icmp_echo_hdr *iecho;
+ const struct ip_hdr *iphdr_in;
+ u16_t hlen;
+ const ip4_addr_t *src;
+
+ ICMP_STATS_INC(icmp.recv);
+ MIB2_STATS_INC(mib2.icmpinmsgs);
+
+ iphdr_in = ip4_current_header();
+ hlen = IPH_HL_BYTES(iphdr_in);
+ if (hlen < IP_HLEN) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen));
+ goto lenerr;
+ }
+ if (p->len < sizeof(u16_t) * 2) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len));
+ goto lenerr;
+ }
+
+ type = *((u8_t *)p->payload);
+#ifdef LWIP_DEBUG
+ code = *(((u8_t *)p->payload) + 1);
+ /* if debug is enabled but debug statement below is somehow disabled: */
+ LWIP_UNUSED_ARG(code);
+#endif /* LWIP_DEBUG */
+ switch (type) {
+ case ICMP_ER:
+ /* This is OK, echo reply might have been parsed by a raw PCB
+ (as obviously, an echo request has been sent, too). */
+ MIB2_STATS_INC(mib2.icmpinechoreps);
+ break;
+ case ICMP_ECHO:
+ MIB2_STATS_INC(mib2.icmpinechos);
+ src = ip4_current_dest_addr();
+ /* multicast destination address? */
+ if (ip4_addr_ismulticast(ip4_current_dest_addr())) {
+#if LWIP_MULTICAST_PING
+ /* For multicast, use address of receiving interface as source address */
+ src = netif_ip4_addr(inp);
+#else /* LWIP_MULTICAST_PING */
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n"));
+ goto icmperr;
+#endif /* LWIP_MULTICAST_PING */
+ }
+ /* broadcast destination address? */
+ if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) {
+#if LWIP_BROADCAST_PING
+ /* For broadcast, use address of receiving interface as source address */
+ src = netif_ip4_addr(inp);
+#else /* LWIP_BROADCAST_PING */
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n"));
+ goto icmperr;
+#endif /* LWIP_BROADCAST_PING */
+ }
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n"));
+ if (p->tot_len < sizeof(struct icmp_echo_hdr)) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n"));
+ goto lenerr;
+ }
+#if CHECKSUM_CHECK_ICMP
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) {
+ if (inet_chksum_pbuf(p) != 0) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n"));
+ pbuf_free(p);
+ ICMP_STATS_INC(icmp.chkerr);
+ MIB2_STATS_INC(mib2.icmpinerrors);
+ return;
+ }
+ }
+#endif
+#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
+ if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) {
+ /* p is not big enough to contain link headers
+ * allocate a new one and copy p into it
+ */
+ struct pbuf *r;
+ u16_t alloc_len = (u16_t)(p->tot_len + hlen);
+ if (alloc_len < p->tot_len) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n"));
+ goto icmperr;
+ }
+ /* allocate new packet buffer with space for link headers */
+ r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM);
+ if (r == NULL) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n"));
+ goto icmperr;
+ }
+ if (r->len < hlen + sizeof(struct icmp_echo_hdr)) {
+ LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header"));
+ pbuf_free(r);
+ goto icmperr;
+ }
+ /* copy the ip header */
+ MEMCPY(r->payload, iphdr_in, hlen);
+ /* switch r->payload back to icmp header (cannot fail) */
+ if (pbuf_remove_header(r, hlen)) {
+ LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0);
+ pbuf_free(r);
+ goto icmperr;
+ }
+ /* copy the rest of the packet without ip header */
+ if (pbuf_copy(r, p) != ERR_OK) {
+ LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed"));
+ pbuf_free(r);
+ goto icmperr;
+ }
+ /* free the original p */
+ pbuf_free(p);
+ /* we now have an identical copy of p that has room for link headers */
+ p = r;
+ } else {
+ /* restore p->payload to point to icmp header (cannot fail) */
+ if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) {
+ LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0);
+ goto icmperr;
+ }
+ }
+#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
+ /* At this point, all checks are OK. */
+ /* We generate an answer by switching the dest and src ip addresses,
+ * setting the icmp type to ECHO_RESPONSE and updating the checksum. */
+ iecho = (struct icmp_echo_hdr *)p->payload;
+ if (pbuf_add_header(p, hlen)) {
+ LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet"));
+ } else {
+ err_t ret;
+ struct ip_hdr *iphdr = (struct ip_hdr *)p->payload;
+ ip4_addr_copy(iphdr->src, *src);
+ ip4_addr_copy(iphdr->dest, *ip4_current_src_addr());
+ ICMPH_TYPE_SET(iecho, ICMP_ER);
+#if CHECKSUM_GEN_ICMP
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) {
+ /* adjust the checksum */
+ if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) {
+ iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1);
+ } else {
+ iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8));
+ }
+ }
+#if LWIP_CHECKSUM_CTRL_PER_NETIF
+ else {
+ iecho->chksum = 0;
+ }
+#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */
+#else /* CHECKSUM_GEN_ICMP */
+ iecho->chksum = 0;
+#endif /* CHECKSUM_GEN_ICMP */
+
+ /* Set the correct TTL and recalculate the header checksum. */
+ IPH_TTL_SET(iphdr, ICMP_TTL);
+ IPH_CHKSUM_SET(iphdr, 0);
+#if CHECKSUM_GEN_IP
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) {
+ IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen));
+ }
+#endif /* CHECKSUM_GEN_IP */
+
+ ICMP_STATS_INC(icmp.xmit);
+ /* increase number of messages attempted to send */
+ MIB2_STATS_INC(mib2.icmpoutmsgs);
+ /* increase number of echo replies attempted to send */
+ MIB2_STATS_INC(mib2.icmpoutechoreps);
+
+ /* send an ICMP packet */
+ ret = ip4_output_if(p, src, LWIP_IP_HDRINCL,
+ ICMP_TTL, 0, IP_PROTO_ICMP, inp);
+ if (ret != ERR_OK) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret)));
+ }
+ }
+ break;
+ default:
+ if (type == ICMP_DUR) {
+ MIB2_STATS_INC(mib2.icmpindestunreachs);
+ } else if (type == ICMP_TE) {
+ MIB2_STATS_INC(mib2.icmpintimeexcds);
+ } else if (type == ICMP_PP) {
+ MIB2_STATS_INC(mib2.icmpinparmprobs);
+ } else if (type == ICMP_SQ) {
+ MIB2_STATS_INC(mib2.icmpinsrcquenchs);
+ } else if (type == ICMP_RD) {
+ MIB2_STATS_INC(mib2.icmpinredirects);
+ } else if (type == ICMP_TS) {
+ MIB2_STATS_INC(mib2.icmpintimestamps);
+ } else if (type == ICMP_TSR) {
+ MIB2_STATS_INC(mib2.icmpintimestampreps);
+ } else if (type == ICMP_AM) {
+ MIB2_STATS_INC(mib2.icmpinaddrmasks);
+ } else if (type == ICMP_AMR) {
+ MIB2_STATS_INC(mib2.icmpinaddrmaskreps);
+ }
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n",
+ (s16_t)type, (s16_t)code));
+ ICMP_STATS_INC(icmp.proterr);
+ ICMP_STATS_INC(icmp.drop);
+ }
+ pbuf_free(p);
+ return;
+lenerr:
+ pbuf_free(p);
+ ICMP_STATS_INC(icmp.lenerr);
+ MIB2_STATS_INC(mib2.icmpinerrors);
+ return;
+#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING
+icmperr:
+ pbuf_free(p);
+ ICMP_STATS_INC(icmp.err);
+ MIB2_STATS_INC(mib2.icmpinerrors);
+ return;
+#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */
+}
+
+/**
+ * Send an icmp 'destination unreachable' packet, called from ip_input() if
+ * the transport layer protocol is unknown and from udp_input() if the local
+ * port is not bound.
+ *
+ * @param p the input packet for which the 'unreachable' should be sent,
+ * p->payload pointing to the IP header
+ * @param t type of the 'unreachable' packet
+ */
+void
+icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t)
+{
+ MIB2_STATS_INC(mib2.icmpoutdestunreachs);
+ icmp_send_response(p, ICMP_DUR, t);
+}
+
+#if IP_FORWARD || IP_REASSEMBLY
+/**
+ * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0.
+ *
+ * @param p the input packet for which the 'time exceeded' should be sent,
+ * p->payload pointing to the IP header
+ * @param t type of the 'time exceeded' packet
+ */
+void
+icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t)
+{
+ MIB2_STATS_INC(mib2.icmpouttimeexcds);
+ icmp_send_response(p, ICMP_TE, t);
+}
+
+#endif /* IP_FORWARD || IP_REASSEMBLY */
+
+/**
+ * Send an icmp packet in response to an incoming packet.
+ *
+ * @param p the input packet for which the 'unreachable' should be sent,
+ * p->payload pointing to the IP header
+ * @param type Type of the ICMP header
+ * @param code Code of the ICMP header
+ */
+static void
+icmp_send_response(struct pbuf *p, u8_t type, u8_t code)
+{
+ struct pbuf *q;
+ struct ip_hdr *iphdr;
+ /* we can use the echo header here */
+ struct icmp_echo_hdr *icmphdr;
+ ip4_addr_t iphdr_src;
+ struct netif *netif;
+
+ /* increase number of messages attempted to send */
+ MIB2_STATS_INC(mib2.icmpoutmsgs);
+
+ /* ICMP header + IP header + 8 bytes of data */
+ q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE,
+ PBUF_RAM);
+ if (q == NULL) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n"));
+ MIB2_STATS_INC(mib2.icmpouterrors);
+ return;
+ }
+ LWIP_ASSERT("check that first pbuf can hold icmp message",
+ (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE)));
+
+ iphdr = (struct ip_hdr *)p->payload;
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from "));
+ ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src);
+ LWIP_DEBUGF(ICMP_DEBUG, (" to "));
+ ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest);
+ LWIP_DEBUGF(ICMP_DEBUG, ("\n"));
+
+ icmphdr = (struct icmp_echo_hdr *)q->payload;
+ icmphdr->type = type;
+ icmphdr->code = code;
+ icmphdr->id = 0;
+ icmphdr->seqno = 0;
+
+ /* copy fields from original packet */
+ SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload,
+ IP_HLEN + ICMP_DEST_UNREACH_DATASIZE);
+
+ ip4_addr_copy(iphdr_src, iphdr->src);
+#ifdef LWIP_HOOK_IP4_ROUTE_SRC
+ {
+ ip4_addr_t iphdr_dst;
+ ip4_addr_copy(iphdr_dst, iphdr->dest);
+ netif = ip4_route_src(&iphdr_dst, &iphdr_src);
+ }
+#else
+ netif = ip4_route(&iphdr_src);
+#endif
+ if (netif != NULL) {
+ /* calculate checksum */
+ icmphdr->chksum = 0;
+#if CHECKSUM_GEN_ICMP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) {
+ icmphdr->chksum = inet_chksum(icmphdr, q->len);
+ }
+#endif
+ ICMP_STATS_INC(icmp.xmit);
+ ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif);
+ }
+ pbuf_free(q);
+}
+
+#endif /* LWIP_IPV4 && LWIP_ICMP */
diff --git a/lwip/src/core/ipv4/igmp.c b/lwip/src/core/ipv4/igmp.c
new file mode 100644
index 0000000..b655aa3
--- /dev/null
+++ b/lwip/src/core/ipv4/igmp.c
@@ -0,0 +1,801 @@
+/**
+ * @file
+ * IGMP - Internet Group Management Protocol
+ *
+ * @defgroup igmp IGMP
+ * @ingroup ip4
+ * To be called from TCPIP thread
+ */
+
+/*
+ * Copyright (c) 2002 CITEL Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This file is a contribution to the lwIP TCP/IP stack.
+ * The Swedish Institute of Computer Science and Adam Dunkels
+ * are specifically granted permission to redistribute this
+ * source code.
+*/
+
+/*-------------------------------------------------------------
+Note 1)
+Although the rfc requires V1 AND V2 capability
+we will only support v2 since now V1 is very old (August 1989)
+V1 can be added if required
+
+a debug print and statistic have been implemented to
+show this up.
+-------------------------------------------------------------
+-------------------------------------------------------------
+Note 2)
+A query for a specific group address (as opposed to ALLHOSTS)
+has now been implemented as I am unsure if it is required
+
+a debug print and statistic have been implemented to
+show this up.
+-------------------------------------------------------------
+-------------------------------------------------------------
+Note 3)
+The router alert rfc 2113 is implemented in outgoing packets
+but not checked rigorously incoming
+-------------------------------------------------------------
+Steve Reynolds
+------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------------------
+ * RFC 988 - Host extensions for IP multicasting - V0
+ * RFC 1054 - Host extensions for IP multicasting -
+ * RFC 1112 - Host extensions for IP multicasting - V1
+ * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard)
+ * RFC 3376 - Internet Group Management Protocol, Version 3 - V3
+ * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+
+ * RFC 2113 - IP Router Alert Option -
+ *----------------------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------------------
+ * Includes
+ *----------------------------------------------------------------------------*/
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/igmp.h"
+#include "lwip/debug.h"
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/ip.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/netif.h"
+#include "lwip/stats.h"
+#include "lwip/prot/igmp.h"
+
+#include <string.h>
+
+static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr);
+static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group);
+static void igmp_timeout(struct netif *netif, struct igmp_group *group);
+static void igmp_start_timer(struct igmp_group *group, u8_t max_time);
+static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp);
+static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif);
+static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type);
+
+static ip4_addr_t allsystems;
+static ip4_addr_t allrouters;
+
+/**
+ * Initialize the IGMP module
+ */
+void
+igmp_init(void)
+{
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n"));
+
+ IP4_ADDR(&allsystems, 224, 0, 0, 1);
+ IP4_ADDR(&allrouters, 224, 0, 0, 2);
+}
+
+/**
+ * Start IGMP processing on interface
+ *
+ * @param netif network interface on which start IGMP processing
+ */
+err_t
+igmp_start(struct netif *netif)
+{
+ struct igmp_group *group;
+
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif));
+
+ group = igmp_lookup_group(netif, &allsystems);
+
+ if (group != NULL) {
+ group->group_state = IGMP_GROUP_IDLE_MEMBER;
+ group->use++;
+
+ /* Allow the igmp messages at the MAC level */
+ if (netif->igmp_mac_filter != NULL) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD "));
+ ip4_addr_debug_print_val(IGMP_DEBUG, allsystems);
+ LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
+ netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER);
+ }
+
+ return ERR_OK;
+ }
+
+ return ERR_MEM;
+}
+
+/**
+ * Stop IGMP processing on interface
+ *
+ * @param netif network interface on which stop IGMP processing
+ */
+err_t
+igmp_stop(struct netif *netif)
+{
+ struct igmp_group *group = netif_igmp_data(netif);
+
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL);
+
+ while (group != NULL) {
+ struct igmp_group *next = group->next; /* avoid use-after-free below */
+
+ /* disable the group at the MAC level */
+ if (netif->igmp_mac_filter != NULL) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL "));
+ ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address);
+ LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
+ netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER);
+ }
+
+ /* free group */
+ memp_free(MEMP_IGMP_GROUP, group);
+
+ /* move to "next" */
+ group = next;
+ }
+ return ERR_OK;
+}
+
+/**
+ * Report IGMP memberships for this interface
+ *
+ * @param netif network interface on which report IGMP memberships
+ */
+void
+igmp_report_groups(struct netif *netif)
+{
+ struct igmp_group *group = netif_igmp_data(netif);
+
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif));
+
+ /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */
+ if (group != NULL) {
+ group = group->next;
+ }
+
+ while (group != NULL) {
+ igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR);
+ group = group->next;
+ }
+}
+
+/**
+ * Search for a group in the netif's igmp group list
+ *
+ * @param ifp the network interface for which to look
+ * @param addr the group ip address to search for
+ * @return a struct igmp_group* if the group has been found,
+ * NULL if the group wasn't found.
+ */
+struct igmp_group *
+igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr)
+{
+ struct igmp_group *group = netif_igmp_data(ifp);
+
+ while (group != NULL) {
+ if (ip4_addr_cmp(&(group->group_address), addr)) {
+ return group;
+ }
+ group = group->next;
+ }
+
+ /* to be clearer, we return NULL here instead of
+ * 'group' (which is also NULL at this point).
+ */
+ return NULL;
+}
+
+/**
+ * Search for a specific igmp group and create a new one if not found-
+ *
+ * @param ifp the network interface for which to look
+ * @param addr the group ip address to search
+ * @return a struct igmp_group*,
+ * NULL on memory error.
+ */
+static struct igmp_group *
+igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr)
+{
+ struct igmp_group *group;
+ struct igmp_group *list_head = netif_igmp_data(ifp);
+
+ /* Search if the group already exists */
+ group = igmp_lookfor_group(ifp, addr);
+ if (group != NULL) {
+ /* Group already exists. */
+ return group;
+ }
+
+ /* Group doesn't exist yet, create a new one */
+ group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP);
+ if (group != NULL) {
+ ip4_addr_set(&(group->group_address), addr);
+ group->timer = 0; /* Not running */
+ group->group_state = IGMP_GROUP_NON_MEMBER;
+ group->last_reporter_flag = 0;
+ group->use = 0;
+
+ /* Ensure allsystems group is always first in list */
+ if (list_head == NULL) {
+ /* this is the first entry in linked list */
+ LWIP_ASSERT("igmp_lookup_group: first group must be allsystems",
+ (ip4_addr_cmp(addr, &allsystems) != 0));
+ group->next = NULL;
+ netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group);
+ } else {
+ /* append _after_ first entry */
+ LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems",
+ (ip4_addr_cmp(addr, &allsystems) == 0));
+ group->next = list_head->next;
+ list_head->next = group;
+ }
+ }
+
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to ")));
+ ip4_addr_debug_print(IGMP_DEBUG, addr);
+ LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp));
+
+ return group;
+}
+
+/**
+ * Remove a group from netif's igmp group list, but don't free it yet
+ *
+ * @param group the group to remove from the netif's igmp group list
+ * @return ERR_OK if group was removed from the list, an err_t otherwise
+ */
+static err_t
+igmp_remove_group(struct netif *netif, struct igmp_group *group)
+{
+ err_t err = ERR_OK;
+ struct igmp_group *tmp_group;
+
+ /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */
+ for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) {
+ if (tmp_group->next == group) {
+ tmp_group->next = group->next;
+ break;
+ }
+ }
+ /* Group not found in netif's igmp group list */
+ if (tmp_group == NULL) {
+ err = ERR_ARG;
+ }
+
+ return err;
+}
+
+/**
+ * Called from ip_input() if a new IGMP packet is received.
+ *
+ * @param p received igmp packet, p->payload pointing to the igmp header
+ * @param inp network interface on which the packet was received
+ * @param dest destination ip address of the igmp packet
+ */
+void
+igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest)
+{
+ struct igmp_msg *igmp;
+ struct igmp_group *group;
+ struct igmp_group *groupref;
+
+ IGMP_STATS_INC(igmp.recv);
+
+ /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */
+ if (p->len < IGMP_MINLEN) {
+ pbuf_free(p);
+ IGMP_STATS_INC(igmp.lenerr);
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n"));
+ return;
+ }
+
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from "));
+ ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src);
+ LWIP_DEBUGF(IGMP_DEBUG, (" to address "));
+ ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest);
+ LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp));
+
+ /* Now calculate and check the checksum */
+ igmp = (struct igmp_msg *)p->payload;
+ if (inet_chksum(igmp, p->len)) {
+ pbuf_free(p);
+ IGMP_STATS_INC(igmp.chkerr);
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n"));
+ return;
+ }
+
+ /* Packet is ok so find an existing group */
+ group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */
+
+ /* If group can be found or create... */
+ if (!group) {
+ pbuf_free(p);
+ IGMP_STATS_INC(igmp.drop);
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n"));
+ return;
+ }
+
+ /* NOW ACT ON THE INCOMING MESSAGE TYPE... */
+ switch (igmp->igmp_msgtype) {
+ case IGMP_MEMB_QUERY:
+ /* IGMP_MEMB_QUERY to the "all systems" address ? */
+ if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) {
+ /* THIS IS THE GENERAL QUERY */
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
+
+ if (igmp->igmp_maxresp == 0) {
+ IGMP_STATS_INC(igmp.rx_v1);
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n"));
+ igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR;
+ } else {
+ IGMP_STATS_INC(igmp.rx_general);
+ }
+
+ groupref = netif_igmp_data(inp);
+
+ /* Do not send messages on the all systems group address! */
+ /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */
+ if (groupref != NULL) {
+ groupref = groupref->next;
+ }
+
+ while (groupref) {
+ igmp_delaying_member(groupref, igmp->igmp_maxresp);
+ groupref = groupref->next;
+ }
+ } else {
+ /* IGMP_MEMB_QUERY to a specific group ? */
+ if (!ip4_addr_isany(&igmp->igmp_group_address)) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group "));
+ ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address);
+ if (ip4_addr_cmp(dest, &allsystems)) {
+ ip4_addr_t groupaddr;
+ LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
+ /* we first need to re-look for the group since we used dest last time */
+ ip4_addr_copy(groupaddr, igmp->igmp_group_address);
+ group = igmp_lookfor_group(inp, &groupaddr);
+ } else {
+ LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
+ }
+
+ if (group != NULL) {
+ IGMP_STATS_INC(igmp.rx_group);
+ igmp_delaying_member(group, igmp->igmp_maxresp);
+ } else {
+ IGMP_STATS_INC(igmp.drop);
+ }
+ } else {
+ IGMP_STATS_INC(igmp.proterr);
+ }
+ }
+ break;
+ case IGMP_V2_MEMB_REPORT:
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n"));
+ IGMP_STATS_INC(igmp.rx_report);
+ if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) {
+ /* This is on a specific group we have already looked up */
+ group->timer = 0; /* stopped */
+ group->group_state = IGMP_GROUP_IDLE_MEMBER;
+ group->last_reporter_flag = 0;
+ }
+ break;
+ default:
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n",
+ igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp));
+ IGMP_STATS_INC(igmp.proterr);
+ break;
+ }
+
+ pbuf_free(p);
+ return;
+}
+
+/**
+ * @ingroup igmp
+ * Join a group on one network interface.
+ *
+ * @param ifaddr ip address of the network interface which should join a new group
+ * @param groupaddr the ip address of the group which to join
+ * @return ERR_OK if group was joined on the netif(s), an err_t otherwise
+ */
+err_t
+igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr)
+{
+ err_t err = ERR_VAL; /* no matching interface */
+ struct netif *netif;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* make sure it is multicast address */
+ LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
+ LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
+
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ /* Should we join this interface ? */
+ if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) {
+ err = igmp_joingroup_netif(netif, groupaddr);
+ if (err != ERR_OK) {
+ /* Return an error even if some network interfaces are joined */
+ /** @todo undo any other netif already joined */
+ return err;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @ingroup igmp
+ * Join a group on one network interface.
+ *
+ * @param netif the network interface which should join a new group
+ * @param groupaddr the ip address of the group which to join
+ * @return ERR_OK if group was joined on the netif, an err_t otherwise
+ */
+err_t
+igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr)
+{
+ struct igmp_group *group;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* make sure it is multicast address */
+ LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
+ LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
+
+ /* make sure it is an igmp-enabled netif */
+ LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;);
+
+ /* find group or create a new one if not found */
+ group = igmp_lookup_group(netif, groupaddr);
+
+ if (group != NULL) {
+ /* This should create a new group, check the state to make sure */
+ if (group->group_state != IGMP_GROUP_NON_MEMBER) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n"));
+ } else {
+ /* OK - it was new group */
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: "));
+ ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
+ LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
+
+ /* If first use of the group, allow the group at the MAC level */
+ if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD "));
+ ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
+ LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
+ netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER);
+ }
+
+ IGMP_STATS_INC(igmp.tx_join);
+ igmp_send(netif, group, IGMP_V2_MEMB_REPORT);
+
+ igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR);
+
+ /* Need to work out where this timer comes from */
+ group->group_state = IGMP_GROUP_DELAYING_MEMBER;
+ }
+ /* Increment group use */
+ group->use++;
+ /* Join on this interface */
+ return ERR_OK;
+ } else {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n"));
+ return ERR_MEM;
+ }
+}
+
+/**
+ * @ingroup igmp
+ * Leave a group on one network interface.
+ *
+ * @param ifaddr ip address of the network interface which should leave a group
+ * @param groupaddr the ip address of the group which to leave
+ * @return ERR_OK if group was left on the netif(s), an err_t otherwise
+ */
+err_t
+igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr)
+{
+ err_t err = ERR_VAL; /* no matching interface */
+ struct netif *netif;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* make sure it is multicast address */
+ LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
+ LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
+
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ /* Should we leave this interface ? */
+ if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) {
+ err_t res = igmp_leavegroup_netif(netif, groupaddr);
+ if (err != ERR_OK) {
+ /* Store this result if we have not yet gotten a success */
+ err = res;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @ingroup igmp
+ * Leave a group on one network interface.
+ *
+ * @param netif the network interface which should leave a group
+ * @param groupaddr the ip address of the group which to leave
+ * @return ERR_OK if group was left on the netif, an err_t otherwise
+ */
+err_t
+igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr)
+{
+ struct igmp_group *group;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* make sure it is multicast address */
+ LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
+ LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
+
+ /* make sure it is an igmp-enabled netif */
+ LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;);
+
+ /* find group */
+ group = igmp_lookfor_group(netif, groupaddr);
+
+ if (group != NULL) {
+ /* Only send a leave if the flag is set according to the state diagram */
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: "));
+ ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
+ LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
+
+ /* If there is no other use of the group */
+ if (group->use <= 1) {
+ /* Remove the group from the list */
+ igmp_remove_group(netif, group);
+
+ /* If we are the last reporter for this group */
+ if (group->last_reporter_flag) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n"));
+ IGMP_STATS_INC(igmp.tx_leave);
+ igmp_send(netif, group, IGMP_LEAVE_GROUP);
+ }
+
+ /* Disable the group at the MAC level */
+ if (netif->igmp_mac_filter != NULL) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL "));
+ ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
+ LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
+ netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER);
+ }
+
+ /* Free group struct */
+ memp_free(MEMP_IGMP_GROUP, group);
+ } else {
+ /* Decrement group use */
+ group->use--;
+ }
+ return ERR_OK;
+ } else {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n"));
+ return ERR_VAL;
+ }
+}
+
+/**
+ * The igmp timer function (both for NO_SYS=1 and =0)
+ * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default).
+ */
+void
+igmp_tmr(void)
+{
+ struct netif *netif;
+
+ NETIF_FOREACH(netif) {
+ struct igmp_group *group = netif_igmp_data(netif);
+
+ while (group != NULL) {
+ if (group->timer > 0) {
+ group->timer--;
+ if (group->timer == 0) {
+ igmp_timeout(netif, group);
+ }
+ }
+ group = group->next;
+ }
+ }
+}
+
+/**
+ * Called if a timeout for one group is reached.
+ * Sends a report for this group.
+ *
+ * @param group an igmp_group for which a timeout is reached
+ */
+static void
+igmp_timeout(struct netif *netif, struct igmp_group *group)
+{
+ /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group
+ (unless it is the allsystems group) */
+ if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) &&
+ (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address "));
+ ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address);
+ LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif));
+
+ group->group_state = IGMP_GROUP_IDLE_MEMBER;
+
+ IGMP_STATS_INC(igmp.tx_report);
+ igmp_send(netif, group, IGMP_V2_MEMB_REPORT);
+ }
+}
+
+/**
+ * Start a timer for an igmp group
+ *
+ * @param group the igmp_group for which to start a timer
+ * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with
+ * every call to igmp_tmr())
+ */
+static void
+igmp_start_timer(struct igmp_group *group, u8_t max_time)
+{
+#ifdef LWIP_RAND
+ group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1);
+#else /* LWIP_RAND */
+ /* ATTENTION: use this only if absolutely necessary! */
+ group->timer = max_time / 2;
+#endif /* LWIP_RAND */
+
+ if (group->timer == 0) {
+ group->timer = 1;
+ }
+}
+
+/**
+ * Delaying membership report for a group if necessary
+ *
+ * @param group the igmp_group for which "delaying" membership report
+ * @param maxresp query delay
+ */
+static void
+igmp_delaying_member(struct igmp_group *group, u8_t maxresp)
+{
+ if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) ||
+ ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) &&
+ ((group->timer == 0) || (maxresp < group->timer)))) {
+ igmp_start_timer(group, maxresp);
+ group->group_state = IGMP_GROUP_DELAYING_MEMBER;
+ }
+}
+
+
+/**
+ * Sends an IP packet on a network interface. This function constructs the IP header
+ * and calculates the IP header checksum. If the source IP address is NULL,
+ * the IP address of the outgoing network interface is filled in as source address.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IP header and p->payload points to that IP header)
+ * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
+ * IP address of the netif used to send is used as source address)
+ * @param dest the destination IP address to send the packet to
+ * @param netif the netif on which to send this packet
+ * @return ERR_OK if the packet was sent OK
+ * ERR_BUF if p doesn't have enough space for IP/LINK headers
+ * returns errors returned by netif->output
+ */
+static err_t
+igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif)
+{
+ /* This is the "router alert" option */
+ u16_t ra[2];
+ ra[0] = PP_HTONS(ROUTER_ALERT);
+ ra[1] = 0x0000; /* Router shall examine packet */
+ IGMP_STATS_INC(igmp.xmit);
+ return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN);
+}
+
+/**
+ * Send an igmp packet to a specific group.
+ *
+ * @param group the group to which to send the packet
+ * @param type the type of igmp packet to send
+ */
+static void
+igmp_send(struct netif *netif, struct igmp_group *group, u8_t type)
+{
+ struct pbuf *p = NULL;
+ struct igmp_msg *igmp = NULL;
+ ip4_addr_t src = *IP4_ADDR_ANY4;
+ ip4_addr_t *dest = NULL;
+
+ /* IP header + "router alert" option + IGMP header */
+ p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM);
+
+ if (p) {
+ igmp = (struct igmp_msg *)p->payload;
+ LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg",
+ (p->len >= sizeof(struct igmp_msg)));
+ ip4_addr_copy(src, *netif_ip4_addr(netif));
+
+ if (type == IGMP_V2_MEMB_REPORT) {
+ dest = &(group->group_address);
+ ip4_addr_copy(igmp->igmp_group_address, group->group_address);
+ group->last_reporter_flag = 1; /* Remember we were the last to report */
+ } else {
+ if (type == IGMP_LEAVE_GROUP) {
+ dest = &allrouters;
+ ip4_addr_copy(igmp->igmp_group_address, group->group_address);
+ }
+ }
+
+ if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) {
+ igmp->igmp_msgtype = type;
+ igmp->igmp_maxresp = 0;
+ igmp->igmp_checksum = 0;
+ igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN);
+
+ igmp_ip_output_if(p, &src, dest, netif);
+ }
+
+ pbuf_free(p);
+ } else {
+ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n"));
+ IGMP_STATS_INC(igmp.memerr);
+ }
+}
+
+#endif /* LWIP_IPV4 && LWIP_IGMP */
diff --git a/lwip/src/core/ipv4/ip4.c b/lwip/src/core/ipv4/ip4.c
new file mode 100644
index 0000000..26c26a9
--- /dev/null
+++ b/lwip/src/core/ipv4/ip4.c
@@ -0,0 +1,1132 @@
+/**
+ * @file
+ * This is the IPv4 layer implementation for incoming and outgoing IP traffic.
+ *
+ * @see ip_frag.c
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4
+
+#include "lwip/ip.h"
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/ip4_frag.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/netif.h"
+#include "lwip/icmp.h"
+#include "lwip/igmp.h"
+#include "lwip/priv/raw_priv.h"
+#include "lwip/udp.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/autoip.h"
+#include "lwip/stats.h"
+#include "lwip/prot/iana.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+/** Set this to 0 in the rare case of wanting to call an extra function to
+ * generate the IP checksum (in contrast to calculating it on-the-fly). */
+#ifndef LWIP_INLINE_IP_CHKSUM
+#if LWIP_CHECKSUM_CTRL_PER_NETIF
+#define LWIP_INLINE_IP_CHKSUM 0
+#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */
+#define LWIP_INLINE_IP_CHKSUM 1
+#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */
+#endif
+
+#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP
+#define CHECKSUM_GEN_IP_INLINE 1
+#else
+#define CHECKSUM_GEN_IP_INLINE 0
+#endif
+
+#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT)
+#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1
+
+/** Some defines for DHCP to let link-layer-addressed packets through while the
+ * netif is down.
+ * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port)
+ * to return 1 if the port is accepted and 0 if the port is not accepted.
+ */
+#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT)
+/* accept DHCP client port and custom port */
+#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) \
+ || (LWIP_IP_ACCEPT_UDP_PORT(port)))
+#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */
+/* accept custom port only */
+#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port))
+#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */
+/* accept DHCP client port only */
+#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT))
+#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */
+
+#else /* LWIP_DHCP */
+#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0
+#endif /* LWIP_DHCP */
+
+/** The IP header ID of the next outgoing IP packet */
+static u16_t ip_id;
+
+#if LWIP_MULTICAST_TX_OPTIONS
+/** The default netif used for multicast */
+static struct netif *ip4_default_multicast_netif;
+
+/**
+ * @ingroup ip4
+ * Set a default netif for IPv4 multicast. */
+void
+ip4_set_default_multicast_netif(struct netif *default_multicast_netif)
+{
+ ip4_default_multicast_netif = default_multicast_netif;
+}
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+
+#ifdef LWIP_HOOK_IP4_ROUTE_SRC
+/**
+ * Source based IPv4 routing must be fully implemented in
+ * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides the parameters.
+ */
+struct netif *
+ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest)
+{
+ if (src != NULL) {
+ /* when src==NULL, the hook is called from ip4_route(dest) */
+ struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(src, dest);
+ if (netif != NULL) {
+ return netif;
+ }
+ }
+ return ip4_route(dest);
+}
+#endif /* LWIP_HOOK_IP4_ROUTE_SRC */
+
+/**
+ * Finds the appropriate network interface for a given IP address. It
+ * searches the list of network interfaces linearly. A match is found
+ * if the masked IP address of the network interface equals the masked
+ * IP address given to the function.
+ *
+ * @param dest the destination IP address for which to find the route
+ * @return the netif on which to send to reach dest
+ */
+struct netif *
+ip4_route(const ip4_addr_t *dest)
+{
+#if !LWIP_SINGLE_NETIF
+ struct netif *netif;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_MULTICAST_TX_OPTIONS
+ /* Use administratively selected interface for multicast by default */
+ if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) {
+ return ip4_default_multicast_netif;
+ }
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+
+ /* bug #54569: in case LWIP_SINGLE_NETIF=1 and LWIP_DEBUGF() disabled, the following loop is optimized away */
+ LWIP_UNUSED_ARG(dest);
+
+ /* iterate through netifs */
+ NETIF_FOREACH(netif) {
+ /* is the netif up, does it have a link and a valid address? */
+ if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) {
+ /* network mask matches? */
+ if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) {
+ /* return netif on which to forward IP packet */
+ return netif;
+ }
+ /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */
+ if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) {
+ /* return netif on which to forward IP packet */
+ return netif;
+ }
+ }
+ }
+
+#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF
+ /* loopif is disabled, looopback traffic is passed through any netif */
+ if (ip4_addr_isloopback(dest)) {
+ /* don't check for link on loopback traffic */
+ if (netif_default != NULL && netif_is_up(netif_default)) {
+ return netif_default;
+ }
+ /* default netif is not up, just use any netif for loopback traffic */
+ NETIF_FOREACH(netif) {
+ if (netif_is_up(netif)) {
+ return netif;
+ }
+ }
+ return NULL;
+ }
+#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */
+
+#ifdef LWIP_HOOK_IP4_ROUTE_SRC
+ netif = LWIP_HOOK_IP4_ROUTE_SRC(NULL, dest);
+ if (netif != NULL) {
+ return netif;
+ }
+#elif defined(LWIP_HOOK_IP4_ROUTE)
+ netif = LWIP_HOOK_IP4_ROUTE(dest);
+ if (netif != NULL) {
+ return netif;
+ }
+#endif
+#endif /* !LWIP_SINGLE_NETIF */
+
+ if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) ||
+ ip4_addr_isany_val(*netif_ip4_addr(netif_default)) || ip4_addr_isloopback(dest)) {
+ /* No matching netif found and default netif is not usable.
+ If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest)));
+ IP_STATS_INC(ip.rterr);
+ MIB2_STATS_INC(mib2.ipoutnoroutes);
+ return NULL;
+ }
+
+ return netif_default;
+}
+
+#if IP_FORWARD
+/**
+ * Determine whether an IP address is in a reserved set of addresses
+ * that may not be forwarded, or whether datagrams to that destination
+ * may be forwarded.
+ * @param p the packet to forward
+ * @return 1: can forward 0: discard
+ */
+static int
+ip4_canforward(struct pbuf *p)
+{
+ u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr()));
+
+#ifdef LWIP_HOOK_IP4_CANFORWARD
+ int ret = LWIP_HOOK_IP4_CANFORWARD(p, addr);
+ if (ret >= 0) {
+ return ret;
+ }
+#endif /* LWIP_HOOK_IP4_CANFORWARD */
+
+ if (p->flags & PBUF_FLAG_LLBCAST) {
+ /* don't route link-layer broadcasts */
+ return 0;
+ }
+ if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) {
+ /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */
+ return 0;
+ }
+ if (IP_EXPERIMENTAL(addr)) {
+ return 0;
+ }
+ if (IP_CLASSA(addr)) {
+ u32_t net = addr & IP_CLASSA_NET;
+ if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) {
+ /* don't route loopback packets */
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Forwards an IP packet. It finds an appropriate route for the
+ * packet, decrements the TTL value of the packet, adjusts the
+ * checksum and outputs the packet on the appropriate interface.
+ *
+ * @param p the packet to forward (p->payload points to IP header)
+ * @param iphdr the IP header of the input packet
+ * @param inp the netif on which this packet was received
+ */
+static void
+ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp)
+{
+ struct netif *netif;
+
+ PERF_START;
+ LWIP_UNUSED_ARG(inp);
+
+ if (!ip4_canforward(p)) {
+ goto return_noroute;
+ }
+
+ /* RFC3927 2.7: do not forward link-local addresses */
+ if (ip4_addr_islinklocal(ip4_current_dest_addr())) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
+ ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
+ goto return_noroute;
+ }
+
+ /* Find network interface where to forward this IP packet to. */
+ netif = ip4_route_src(ip4_current_src_addr(), ip4_current_dest_addr());
+ if (netif == NULL) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n",
+ ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
+ ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
+ /* @todo: send ICMP_DUR_NET? */
+ goto return_noroute;
+ }
+#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF
+ /* Do not forward packets onto the same network interface on which
+ * they arrived. */
+ if (netif == inp) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n"));
+ goto return_noroute;
+ }
+#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */
+
+ /* decrement TTL */
+ IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1);
+ /* send ICMP if TTL == 0 */
+ if (IPH_TTL(iphdr) == 0) {
+ MIB2_STATS_INC(mib2.ipinhdrerrors);
+#if LWIP_ICMP
+ /* Don't send ICMP messages in response to ICMP messages */
+ if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) {
+ icmp_time_exceeded(p, ICMP_TE_TTL);
+ }
+#endif /* LWIP_ICMP */
+ return;
+ }
+
+ /* Incrementally update the IP checksum. */
+ if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) {
+ IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1));
+ } else {
+ IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100)));
+ }
+
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
+ ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
+
+ IP_STATS_INC(ip.fw);
+ MIB2_STATS_INC(mib2.ipforwdatagrams);
+ IP_STATS_INC(ip.xmit);
+
+ PERF_STOP("ip4_forward");
+ /* don't fragment if interface has mtu set to 0 [loopif] */
+ if (netif->mtu && (p->tot_len > netif->mtu)) {
+ if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) {
+#if IP_FRAG
+ ip4_frag(p, netif, ip4_current_dest_addr());
+#else /* IP_FRAG */
+ /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */
+#endif /* IP_FRAG */
+ } else {
+#if LWIP_ICMP
+ /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */
+ icmp_dest_unreach(p, ICMP_DUR_FRAG);
+#endif /* LWIP_ICMP */
+ }
+ return;
+ }
+ /* transmit pbuf on chosen interface */
+ netif->output(netif, p, ip4_current_dest_addr());
+ return;
+return_noroute:
+ MIB2_STATS_INC(mib2.ipoutnoroutes);
+}
+#endif /* IP_FORWARD */
+
+/** Return true if the current input packet should be accepted on this netif */
+static int
+ip4_input_accept(struct netif *netif)
+{
+ LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n",
+ ip4_addr_get_u32(ip4_current_dest_addr()), ip4_addr_get_u32(netif_ip4_addr(netif)),
+ ip4_addr_get_u32(ip4_current_dest_addr()) & ip4_addr_get_u32(netif_ip4_netmask(netif)),
+ ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)),
+ ip4_addr_get_u32(ip4_current_dest_addr()) & ~ip4_addr_get_u32(netif_ip4_netmask(netif))));
+
+ /* interface is up and configured? */
+ if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) {
+ /* unicast to this interface address? */
+ if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) ||
+ /* or broadcast on this interface network address? */
+ ip4_addr_isbroadcast(ip4_current_dest_addr(), netif)
+#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF
+ || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK))
+#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */
+ ) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n",
+ netif->name[0], netif->name[1]));
+ /* accept on this netif */
+ return 1;
+ }
+#if LWIP_AUTOIP
+ /* connections to link-local addresses must persist after changing
+ the netif's address (RFC3927 ch. 1.9) */
+ if (autoip_accept_packet(netif, ip4_current_dest_addr())) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n",
+ netif->name[0], netif->name[1]));
+ /* accept on this netif */
+ return 1;
+ }
+#endif /* LWIP_AUTOIP */
+ }
+ return 0;
+}
+
+/**
+ * This function is called by the network interface device driver when
+ * an IP packet is received. The function does the basic checks of the
+ * IP header such as packet size being at least larger than the header
+ * size etc. If the packet was not destined for us, the packet is
+ * forwarded (using ip_forward). The IP checksum is always checked.
+ *
+ * Finally, the packet is sent to the upper layer protocol input function.
+ *
+ * @param p the received IP packet (p->payload points to IP header)
+ * @param inp the netif on which this packet was received
+ * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't
+ * processed, but currently always returns ERR_OK)
+ */
+err_t
+ip4_input(struct pbuf *p, struct netif *inp)
+{
+ const struct ip_hdr *iphdr;
+ struct netif *netif;
+ u16_t iphdr_hlen;
+ u16_t iphdr_len;
+#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP
+ int check_ip_src = 1;
+#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */
+#if LWIP_RAW
+ raw_input_state_t raw_status;
+#endif /* LWIP_RAW */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ IP_STATS_INC(ip.recv);
+ MIB2_STATS_INC(mib2.ipinreceives);
+
+ /* identify the IP header */
+ iphdr = (struct ip_hdr *)p->payload;
+ if (IPH_V(iphdr) != 4) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr)));
+ ip4_debug_print(p);
+ pbuf_free(p);
+ IP_STATS_INC(ip.err);
+ IP_STATS_INC(ip.drop);
+ MIB2_STATS_INC(mib2.ipinhdrerrors);
+ return ERR_OK;
+ }
+
+#ifdef LWIP_HOOK_IP4_INPUT
+ if (LWIP_HOOK_IP4_INPUT(p, inp)) {
+ /* the packet has been eaten */
+ return ERR_OK;
+ }
+#endif
+
+ /* obtain IP header length in bytes */
+ iphdr_hlen = IPH_HL_BYTES(iphdr);
+ /* obtain ip length in bytes */
+ iphdr_len = lwip_ntohs(IPH_LEN(iphdr));
+
+ /* Trim pbuf. This is especially required for packets < 60 bytes. */
+ if (iphdr_len < p->tot_len) {
+ pbuf_realloc(p, iphdr_len);
+ }
+
+ /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */
+ if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) {
+ if (iphdr_hlen < IP_HLEN) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen));
+ }
+ if (iphdr_hlen > p->len) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n",
+ iphdr_hlen, p->len));
+ }
+ if (iphdr_len > p->tot_len) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n",
+ iphdr_len, p->tot_len));
+ }
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP_STATS_INC(ip.lenerr);
+ IP_STATS_INC(ip.drop);
+ MIB2_STATS_INC(mib2.ipindiscards);
+ return ERR_OK;
+ }
+
+ /* verify checksum */
+#if CHECKSUM_CHECK_IP
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) {
+ if (inet_chksum(iphdr, iphdr_hlen) != 0) {
+
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen)));
+ ip4_debug_print(p);
+ pbuf_free(p);
+ IP_STATS_INC(ip.chkerr);
+ IP_STATS_INC(ip.drop);
+ MIB2_STATS_INC(mib2.ipinhdrerrors);
+ return ERR_OK;
+ }
+ }
+#endif
+
+ /* copy IP addresses to aligned ip_addr_t */
+ ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest);
+ ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src);
+
+ /* match packet against an interface, i.e. is this packet for us? */
+ if (ip4_addr_ismulticast(ip4_current_dest_addr())) {
+#if LWIP_IGMP
+ if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) {
+ /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */
+ ip4_addr_t allsystems;
+ IP4_ADDR(&allsystems, 224, 0, 0, 1);
+ if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) &&
+ ip4_addr_isany(ip4_current_src_addr())) {
+ check_ip_src = 0;
+ }
+ netif = inp;
+ } else {
+ netif = NULL;
+ }
+#else /* LWIP_IGMP */
+ if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) {
+ netif = inp;
+ } else {
+ netif = NULL;
+ }
+#endif /* LWIP_IGMP */
+ } else {
+ /* start trying with inp. if that's not acceptable, start walking the
+ list of configured netifs. */
+ if (ip4_input_accept(inp)) {
+ netif = inp;
+ } else {
+ netif = NULL;
+#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF
+ /* Packets sent to the loopback address must not be accepted on an
+ * interface that does not have the loopback address assigned to it,
+ * unless a non-loopback interface is used for loopback traffic. */
+ if (!ip4_addr_isloopback(ip4_current_dest_addr()))
+#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */
+ {
+#if !LWIP_SINGLE_NETIF
+ NETIF_FOREACH(netif) {
+ if (netif == inp) {
+ /* we checked that before already */
+ continue;
+ }
+ if (ip4_input_accept(netif)) {
+ break;
+ }
+ }
+#endif /* !LWIP_SINGLE_NETIF */
+ }
+ }
+ }
+
+#if IP_ACCEPT_LINK_LAYER_ADDRESSING
+ /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed
+ * using link layer addressing (such as Ethernet MAC) so we must not filter on IP.
+ * According to RFC 1542 section 3.1.1, referred by RFC 2131).
+ *
+ * If you want to accept private broadcast communication while a netif is down,
+ * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.:
+ *
+ * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345))
+ */
+ if (netif == NULL) {
+ /* remote port is DHCP server? */
+ if (IPH_PROTO(iphdr) == IP_PROTO_UDP) {
+ const struct udp_hdr *udphdr = (const struct udp_hdr *)((const u8_t *)iphdr + iphdr_hlen);
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n",
+ lwip_ntohs(udphdr->dest)));
+ if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n"));
+ netif = inp;
+ check_ip_src = 0;
+ }
+ }
+ }
+#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */
+
+ /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */
+#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING
+ if (check_ip_src
+#if IP_ACCEPT_LINK_LAYER_ADDRESSING
+ /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */
+ && !ip4_addr_isany_val(*ip4_current_src_addr())
+#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */
+ )
+#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */
+ {
+ if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) ||
+ (ip4_addr_ismulticast(ip4_current_src_addr()))) {
+ /* packet source is not valid */
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n"));
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP_STATS_INC(ip.drop);
+ MIB2_STATS_INC(mib2.ipinaddrerrors);
+ MIB2_STATS_INC(mib2.ipindiscards);
+ return ERR_OK;
+ }
+ }
+
+ /* packet not for us? */
+ if (netif == NULL) {
+ /* packet not for us, route or discard */
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n"));
+#if IP_FORWARD
+ /* non-broadcast packet? */
+ if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) {
+ /* try to forward IP packet on (other) interfaces */
+ ip4_forward(p, (struct ip_hdr *)p->payload, inp);
+ } else
+#endif /* IP_FORWARD */
+ {
+ IP_STATS_INC(ip.drop);
+ MIB2_STATS_INC(mib2.ipinaddrerrors);
+ MIB2_STATS_INC(mib2.ipindiscards);
+ }
+ pbuf_free(p);
+ return ERR_OK;
+ }
+ /* packet consists of multiple fragments? */
+ if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) {
+#if IP_REASSEMBLY /* packet fragment reassembly code present? */
+ LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n",
+ lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8)));
+ /* reassemble the packet*/
+ p = ip4_reass(p);
+ /* packet not fully reassembled yet? */
+ if (p == NULL) {
+ return ERR_OK;
+ }
+ iphdr = (const struct ip_hdr *)p->payload;
+#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */
+ pbuf_free(p);
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n",
+ lwip_ntohs(IPH_OFFSET(iphdr))));
+ IP_STATS_INC(ip.opterr);
+ IP_STATS_INC(ip.drop);
+ /* unsupported protocol feature */
+ MIB2_STATS_INC(mib2.ipinunknownprotos);
+ return ERR_OK;
+#endif /* IP_REASSEMBLY */
+ }
+
+#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */
+
+#if LWIP_IGMP
+ /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */
+ if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) {
+#else
+ if (iphdr_hlen > IP_HLEN) {
+#endif /* LWIP_IGMP */
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n"));
+ pbuf_free(p);
+ IP_STATS_INC(ip.opterr);
+ IP_STATS_INC(ip.drop);
+ /* unsupported protocol feature */
+ MIB2_STATS_INC(mib2.ipinunknownprotos);
+ return ERR_OK;
+ }
+#endif /* IP_OPTIONS_ALLOWED == 0 */
+
+ /* send to upper layers */
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n"));
+ ip4_debug_print(p);
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len));
+
+ ip_data.current_netif = netif;
+ ip_data.current_input_netif = inp;
+ ip_data.current_ip4_header = iphdr;
+ ip_data.current_ip_header_tot_len = IPH_HL_BYTES(iphdr);
+
+#if LWIP_RAW
+ /* raw input did not eat the packet? */
+ raw_status = raw_input(p, inp);
+ if (raw_status != RAW_INPUT_EATEN)
+#endif /* LWIP_RAW */
+ {
+ pbuf_remove_header(p, iphdr_hlen); /* Move to payload, no check necessary. */
+
+ switch (IPH_PROTO(iphdr)) {
+#if LWIP_UDP
+ case IP_PROTO_UDP:
+#if LWIP_UDPLITE
+ case IP_PROTO_UDPLITE:
+#endif /* LWIP_UDPLITE */
+ MIB2_STATS_INC(mib2.ipindelivers);
+ udp_input(p, inp);
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case IP_PROTO_TCP:
+ MIB2_STATS_INC(mib2.ipindelivers);
+ tcp_input(p, inp);
+ break;
+#endif /* LWIP_TCP */
+#if LWIP_ICMP
+ case IP_PROTO_ICMP:
+ MIB2_STATS_INC(mib2.ipindelivers);
+ icmp_input(p, inp);
+ break;
+#endif /* LWIP_ICMP */
+#if LWIP_IGMP
+ case IP_PROTO_IGMP:
+ igmp_input(p, inp, ip4_current_dest_addr());
+ break;
+#endif /* LWIP_IGMP */
+ default:
+#if LWIP_RAW
+ if (raw_status == RAW_INPUT_DELIVERED) {
+ MIB2_STATS_INC(mib2.ipindelivers);
+ } else
+#endif /* LWIP_RAW */
+ {
+#if LWIP_ICMP
+ /* send ICMP destination protocol unreachable unless is was a broadcast */
+ if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) &&
+ !ip4_addr_ismulticast(ip4_current_dest_addr())) {
+ pbuf_header_force(p, (s16_t)iphdr_hlen); /* Move to ip header, no check necessary. */
+ icmp_dest_unreach(p, ICMP_DUR_PROTO);
+ }
+#endif /* LWIP_ICMP */
+
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr)));
+
+ IP_STATS_INC(ip.proterr);
+ IP_STATS_INC(ip.drop);
+ MIB2_STATS_INC(mib2.ipinunknownprotos);
+ }
+ pbuf_free(p);
+ break;
+ }
+ }
+
+ /* @todo: this is not really necessary... */
+ ip_data.current_netif = NULL;
+ ip_data.current_input_netif = NULL;
+ ip_data.current_ip4_header = NULL;
+ ip_data.current_ip_header_tot_len = 0;
+ ip4_addr_set_any(ip4_current_src_addr());
+ ip4_addr_set_any(ip4_current_dest_addr());
+
+ return ERR_OK;
+}
+
+/**
+ * Sends an IP packet on a network interface. This function constructs
+ * the IP header and calculates the IP header checksum. If the source
+ * IP address is NULL, the IP address of the outgoing network
+ * interface is filled in as source address.
+ * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already
+ * include an IP header and p->payload points to it instead of the data.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IP header and p->payload points to that IP header)
+ * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
+ * IP address of the netif used to send is used as source address)
+ * @param dest the destination IP address to send the packet to
+ * @param ttl the TTL value to be set in the IP header
+ * @param tos the TOS value to be set in the IP header
+ * @param proto the PROTOCOL to be set in the IP header
+ * @param netif the netif on which to send this packet
+ * @return ERR_OK if the packet was sent OK
+ * ERR_BUF if p doesn't have enough space for IP/LINK headers
+ * returns errors returned by netif->output
+ *
+ * @note ip_id: RFC791 "some host may be able to simply use
+ * unique identifiers independent of destination"
+ */
+err_t
+ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
+ u8_t ttl, u8_t tos,
+ u8_t proto, struct netif *netif)
+{
+#if IP_OPTIONS_SEND
+ return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0);
+}
+
+/**
+ * Same as ip_output_if() but with the possibility to include IP options:
+ *
+ * @ param ip_options pointer to the IP options, copied into the IP header
+ * @ param optlen length of ip_options
+ */
+err_t
+ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
+ u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options,
+ u16_t optlen)
+{
+#endif /* IP_OPTIONS_SEND */
+ const ip4_addr_t *src_used = src;
+ if (dest != LWIP_IP_HDRINCL) {
+ if (ip4_addr_isany(src)) {
+ src_used = netif_ip4_addr(netif);
+ }
+ }
+
+#if IP_OPTIONS_SEND
+ return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif,
+ ip_options, optlen);
+#else /* IP_OPTIONS_SEND */
+ return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif);
+#endif /* IP_OPTIONS_SEND */
+}
+
+/**
+ * Same as ip_output_if() but 'src' address is not replaced by netif address
+ * when it is 'any'.
+ */
+err_t
+ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
+ u8_t ttl, u8_t tos,
+ u8_t proto, struct netif *netif)
+{
+#if IP_OPTIONS_SEND
+ return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0);
+}
+
+/**
+ * Same as ip_output_if_opt() but 'src' address is not replaced by netif address
+ * when it is 'any'.
+ */
+err_t
+ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
+ u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options,
+ u16_t optlen)
+{
+#endif /* IP_OPTIONS_SEND */
+ struct ip_hdr *iphdr;
+ ip4_addr_t dest_addr;
+#if CHECKSUM_GEN_IP_INLINE
+ u32_t chk_sum = 0;
+#endif /* CHECKSUM_GEN_IP_INLINE */
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
+
+ MIB2_STATS_INC(mib2.ipoutrequests);
+
+ /* Should the IP header be generated or is it already included in p? */
+ if (dest != LWIP_IP_HDRINCL) {
+ u16_t ip_hlen = IP_HLEN;
+#if IP_OPTIONS_SEND
+ u16_t optlen_aligned = 0;
+ if (optlen != 0) {
+#if CHECKSUM_GEN_IP_INLINE
+ int i;
+#endif /* CHECKSUM_GEN_IP_INLINE */
+ if (optlen > (IP_HLEN_MAX - IP_HLEN)) {
+ /* optlen too long */
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: optlen too long\n"));
+ IP_STATS_INC(ip.err);
+ MIB2_STATS_INC(mib2.ipoutdiscards);
+ return ERR_VAL;
+ }
+ /* round up to a multiple of 4 */
+ optlen_aligned = (u16_t)((optlen + 3) & ~3);
+ ip_hlen = (u16_t)(ip_hlen + optlen_aligned);
+ /* First write in the IP options */
+ if (pbuf_add_header(p, optlen_aligned)) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n"));
+ IP_STATS_INC(ip.err);
+ MIB2_STATS_INC(mib2.ipoutdiscards);
+ return ERR_BUF;
+ }
+ MEMCPY(p->payload, ip_options, optlen);
+ if (optlen < optlen_aligned) {
+ /* zero the remaining bytes */
+ memset(((char *)p->payload) + optlen, 0, (size_t)(optlen_aligned - optlen));
+ }
+#if CHECKSUM_GEN_IP_INLINE
+ for (i = 0; i < optlen_aligned / 2; i++) {
+ chk_sum += ((u16_t *)p->payload)[i];
+ }
+#endif /* CHECKSUM_GEN_IP_INLINE */
+ }
+#endif /* IP_OPTIONS_SEND */
+ /* generate IP header */
+ if (pbuf_add_header(p, IP_HLEN)) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n"));
+
+ IP_STATS_INC(ip.err);
+ MIB2_STATS_INC(mib2.ipoutdiscards);
+ return ERR_BUF;
+ }
+
+ iphdr = (struct ip_hdr *)p->payload;
+ LWIP_ASSERT("check that first pbuf can hold struct ip_hdr",
+ (p->len >= sizeof(struct ip_hdr)));
+
+ IPH_TTL_SET(iphdr, ttl);
+ IPH_PROTO_SET(iphdr, proto);
+#if CHECKSUM_GEN_IP_INLINE
+ chk_sum += PP_NTOHS(proto | (ttl << 8));
+#endif /* CHECKSUM_GEN_IP_INLINE */
+
+ /* dest cannot be NULL here */
+ ip4_addr_copy(iphdr->dest, *dest);
+#if CHECKSUM_GEN_IP_INLINE
+ chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF;
+ chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16;
+#endif /* CHECKSUM_GEN_IP_INLINE */
+
+ IPH_VHL_SET(iphdr, 4, ip_hlen / 4);
+ IPH_TOS_SET(iphdr, tos);
+#if CHECKSUM_GEN_IP_INLINE
+ chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8));
+#endif /* CHECKSUM_GEN_IP_INLINE */
+ IPH_LEN_SET(iphdr, lwip_htons(p->tot_len));
+#if CHECKSUM_GEN_IP_INLINE
+ chk_sum += iphdr->_len;
+#endif /* CHECKSUM_GEN_IP_INLINE */
+ IPH_OFFSET_SET(iphdr, 0);
+ IPH_ID_SET(iphdr, lwip_htons(ip_id));
+#if CHECKSUM_GEN_IP_INLINE
+ chk_sum += iphdr->_id;
+#endif /* CHECKSUM_GEN_IP_INLINE */
+ ++ip_id;
+
+ if (src == NULL) {
+ ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4);
+ } else {
+ /* src cannot be NULL here */
+ ip4_addr_copy(iphdr->src, *src);
+ }
+
+#if CHECKSUM_GEN_IP_INLINE
+ chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF;
+ chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16;
+ chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF);
+ chk_sum = (chk_sum >> 16) + chk_sum;
+ chk_sum = ~chk_sum;
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) {
+ iphdr->_chksum = (u16_t)chk_sum; /* network order */
+ }
+#if LWIP_CHECKSUM_CTRL_PER_NETIF
+ else {
+ IPH_CHKSUM_SET(iphdr, 0);
+ }
+#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/
+#else /* CHECKSUM_GEN_IP_INLINE */
+ IPH_CHKSUM_SET(iphdr, 0);
+#if CHECKSUM_GEN_IP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) {
+ IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen));
+ }
+#endif /* CHECKSUM_GEN_IP */
+#endif /* CHECKSUM_GEN_IP_INLINE */
+ } else {
+ /* IP header already included in p */
+ if (p->len < IP_HLEN) {
+ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: LWIP_IP_HDRINCL but pbuf is too short\n"));
+ IP_STATS_INC(ip.err);
+ MIB2_STATS_INC(mib2.ipoutdiscards);
+ return ERR_BUF;
+ }
+ iphdr = (struct ip_hdr *)p->payload;
+ ip4_addr_copy(dest_addr, iphdr->dest);
+ dest = &dest_addr;
+ }
+
+ IP_STATS_INC(ip.xmit);
+
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num));
+ ip4_debug_print(p);
+
+#if ENABLE_LOOPBACK
+ if (ip4_addr_cmp(dest, netif_ip4_addr(netif))
+#if !LWIP_HAVE_LOOPIF
+ || ip4_addr_isloopback(dest)
+#endif /* !LWIP_HAVE_LOOPIF */
+ ) {
+ /* Packet to self, enqueue it for loopback */
+ LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()"));
+ return netif_loop_output(netif, p);
+ }
+#if LWIP_MULTICAST_TX_OPTIONS
+ if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) {
+ netif_loop_output(netif, p);
+ }
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+#endif /* ENABLE_LOOPBACK */
+#if IP_FRAG
+ /* don't fragment if interface has mtu set to 0 [loopif] */
+ if (netif->mtu && (p->tot_len > netif->mtu)) {
+ return ip4_frag(p, netif, dest);
+ }
+#endif /* IP_FRAG */
+
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n"));
+ return netif->output(netif, p, dest);
+}
+
+/**
+ * Simple interface to ip_output_if. It finds the outgoing network
+ * interface and calls upon ip_output_if to do the actual work.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IP header and p->payload points to that IP header)
+ * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
+ * IP address of the netif used to send is used as source address)
+ * @param dest the destination IP address to send the packet to
+ * @param ttl the TTL value to be set in the IP header
+ * @param tos the TOS value to be set in the IP header
+ * @param proto the PROTOCOL to be set in the IP header
+ *
+ * @return ERR_RTE if no route is found
+ * see ip_output_if() for more return values
+ */
+err_t
+ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
+ u8_t ttl, u8_t tos, u8_t proto)
+{
+ struct netif *netif;
+
+ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
+
+ if ((netif = ip4_route_src(src, dest)) == NULL) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest)));
+ IP_STATS_INC(ip.rterr);
+ return ERR_RTE;
+ }
+
+ return ip4_output_if(p, src, dest, ttl, tos, proto, netif);
+}
+
+#if LWIP_NETIF_USE_HINTS
+/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint
+ * before calling ip_output_if.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IP header and p->payload points to that IP header)
+ * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
+ * IP address of the netif used to send is used as source address)
+ * @param dest the destination IP address to send the packet to
+ * @param ttl the TTL value to be set in the IP header
+ * @param tos the TOS value to be set in the IP header
+ * @param proto the PROTOCOL to be set in the IP header
+ * @param netif_hint netif output hint pointer set to netif->hint before
+ * calling ip_output_if()
+ *
+ * @return ERR_RTE if no route is found
+ * see ip_output_if() for more return values
+ */
+err_t
+ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest,
+ u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint)
+{
+ struct netif *netif;
+ err_t err;
+
+ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
+
+ if ((netif = ip4_route_src(src, dest)) == NULL) {
+ LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest)));
+ IP_STATS_INC(ip.rterr);
+ return ERR_RTE;
+ }
+
+ NETIF_SET_HINTS(netif, netif_hint);
+ err = ip4_output_if(p, src, dest, ttl, tos, proto, netif);
+ NETIF_RESET_HINTS(netif);
+
+ return err;
+}
+#endif /* LWIP_NETIF_USE_HINTS*/
+
+#if IP_DEBUG
+/* Print an IP header by using LWIP_DEBUGF
+ * @param p an IP packet, p->payload pointing to the IP header
+ */
+void
+ip4_debug_print(struct pbuf *p)
+{
+ struct ip_hdr *iphdr = (struct ip_hdr *)p->payload;
+
+ LWIP_DEBUGF(IP_DEBUG, ("IP header:\n"));
+ LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n",
+ (u16_t)IPH_V(iphdr),
+ (u16_t)IPH_HL(iphdr),
+ (u16_t)IPH_TOS(iphdr),
+ lwip_ntohs(IPH_LEN(iphdr))));
+ LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n",
+ lwip_ntohs(IPH_ID(iphdr)),
+ (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1),
+ (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1),
+ (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1),
+ (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK)));
+ LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n",
+ (u16_t)IPH_TTL(iphdr),
+ (u16_t)IPH_PROTO(iphdr),
+ lwip_ntohs(IPH_CHKSUM(iphdr))));
+ LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n",
+ ip4_addr1_16_val(iphdr->src),
+ ip4_addr2_16_val(iphdr->src),
+ ip4_addr3_16_val(iphdr->src),
+ ip4_addr4_16_val(iphdr->src)));
+ LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n",
+ ip4_addr1_16_val(iphdr->dest),
+ ip4_addr2_16_val(iphdr->dest),
+ ip4_addr3_16_val(iphdr->dest),
+ ip4_addr4_16_val(iphdr->dest)));
+ LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n"));
+}
+#endif /* IP_DEBUG */
+
+#endif /* LWIP_IPV4 */
diff --git a/lwip/src/core/ipv4/ip4_addr.c b/lwip/src/core/ipv4/ip4_addr.c
new file mode 100644
index 0000000..33204d1
--- /dev/null
+++ b/lwip/src/core/ipv4/ip4_addr.c
@@ -0,0 +1,321 @@
+/**
+ * @file
+ * This is the IPv4 address tools implementation.
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4
+
+#include "lwip/ip_addr.h"
+#include "lwip/netif.h"
+
+/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */
+const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY);
+const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST);
+
+/**
+ * Determine if an address is a broadcast address on a network interface
+ *
+ * @param addr address to be checked
+ * @param netif the network interface against which the address is checked
+ * @return returns non-zero if the address is a broadcast address
+ */
+u8_t
+ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif)
+{
+ ip4_addr_t ipaddr;
+ ip4_addr_set_u32(&ipaddr, addr);
+
+ /* all ones (broadcast) or all zeroes (old skool broadcast) */
+ if ((~addr == IPADDR_ANY) ||
+ (addr == IPADDR_ANY)) {
+ return 1;
+ /* no broadcast support on this network interface? */
+ } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) {
+ /* the given address cannot be a broadcast address
+ * nor can we check against any broadcast addresses */
+ return 0;
+ /* address matches network interface address exactly? => no broadcast */
+ } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) {
+ return 0;
+ /* on the same (sub) network... */
+ } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif))
+ /* ...and host identifier bits are all ones? =>... */
+ && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) ==
+ (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) {
+ /* => network broadcast address */
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/** Checks if a netmask is valid (starting with ones, then only zeros)
+ *
+ * @param netmask the IPv4 netmask to check (in network byte order!)
+ * @return 1 if the netmask is valid, 0 if it is not
+ */
+u8_t
+ip4_addr_netmask_valid(u32_t netmask)
+{
+ u32_t mask;
+ u32_t nm_hostorder = lwip_htonl(netmask);
+
+ /* first, check for the first zero */
+ for (mask = 1UL << 31 ; mask != 0; mask >>= 1) {
+ if ((nm_hostorder & mask) == 0) {
+ break;
+ }
+ }
+ /* then check that there is no one */
+ for (; mask != 0; mask >>= 1) {
+ if ((nm_hostorder & mask) != 0) {
+ /* there is a one after the first zero -> invalid */
+ return 0;
+ }
+ }
+ /* no one after the first zero -> valid */
+ return 1;
+}
+
+/**
+ * Ascii internet address interpretation routine.
+ * The value returned is in network order.
+ *
+ * @param cp IP address in ascii representation (e.g. "127.0.0.1")
+ * @return ip address in network order
+ */
+u32_t
+ipaddr_addr(const char *cp)
+{
+ ip4_addr_t val;
+
+ if (ip4addr_aton(cp, &val)) {
+ return ip4_addr_get_u32(&val);
+ }
+ return (IPADDR_NONE);
+}
+
+/**
+ * Check whether "cp" is a valid ascii representation
+ * of an Internet address and convert to a binary address.
+ * Returns 1 if the address is valid, 0 if not.
+ * This replaces inet_addr, the return value from which
+ * cannot distinguish between failure and a local broadcast address.
+ *
+ * @param cp IP address in ascii representation (e.g. "127.0.0.1")
+ * @param addr pointer to which to save the ip address in network order
+ * @return 1 if cp could be converted to addr, 0 on failure
+ */
+int
+ip4addr_aton(const char *cp, ip4_addr_t *addr)
+{
+ u32_t val;
+ u8_t base;
+ char c;
+ u32_t parts[4];
+ u32_t *pp = parts;
+
+ c = *cp;
+ for (;;) {
+ /*
+ * Collect number up to ``.''.
+ * Values are specified as for C:
+ * 0x=hex, 0=octal, 1-9=decimal.
+ */
+ if (!lwip_isdigit(c)) {
+ return 0;
+ }
+ val = 0;
+ base = 10;
+ if (c == '0') {
+ c = *++cp;
+ if (c == 'x' || c == 'X') {
+ base = 16;
+ c = *++cp;
+ } else {
+ base = 8;
+ }
+ }
+ for (;;) {
+ if (lwip_isdigit(c)) {
+ val = (val * base) + (u32_t)(c - '0');
+ c = *++cp;
+ } else if (base == 16 && lwip_isxdigit(c)) {
+ val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A'));
+ c = *++cp;
+ } else {
+ break;
+ }
+ }
+ if (c == '.') {
+ /*
+ * Internet format:
+ * a.b.c.d
+ * a.b.c (with c treated as 16 bits)
+ * a.b (with b treated as 24 bits)
+ */
+ if (pp >= parts + 3) {
+ return 0;
+ }
+ *pp++ = val;
+ c = *++cp;
+ } else {
+ break;
+ }
+ }
+ /*
+ * Check for trailing characters.
+ */
+ if (c != '\0' && !lwip_isspace(c)) {
+ return 0;
+ }
+ /*
+ * Concoct the address according to
+ * the number of parts specified.
+ */
+ switch (pp - parts + 1) {
+
+ case 0:
+ return 0; /* initial nondigit */
+
+ case 1: /* a -- 32 bits */
+ break;
+
+ case 2: /* a.b -- 8.24 bits */
+ if (val > 0xffffffUL) {
+ return 0;
+ }
+ if (parts[0] > 0xff) {
+ return 0;
+ }
+ val |= parts[0] << 24;
+ break;
+
+ case 3: /* a.b.c -- 8.8.16 bits */
+ if (val > 0xffff) {
+ return 0;
+ }
+ if ((parts[0] > 0xff) || (parts[1] > 0xff)) {
+ return 0;
+ }
+ val |= (parts[0] << 24) | (parts[1] << 16);
+ break;
+
+ case 4: /* a.b.c.d -- 8.8.8.8 bits */
+ if (val > 0xff) {
+ return 0;
+ }
+ if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) {
+ return 0;
+ }
+ val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8);
+ break;
+ default:
+ LWIP_ASSERT("unhandled", 0);
+ break;
+ }
+ if (addr) {
+ ip4_addr_set_u32(addr, lwip_htonl(val));
+ }
+ return 1;
+}
+
+/**
+ * Convert numeric IP address into decimal dotted ASCII representation.
+ * returns ptr to static buffer; not reentrant!
+ *
+ * @param addr ip address in network order to convert
+ * @return pointer to a global static (!) buffer that holds the ASCII
+ * representation of addr
+ */
+char *
+ip4addr_ntoa(const ip4_addr_t *addr)
+{
+ static char str[IP4ADDR_STRLEN_MAX];
+ return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX);
+}
+
+/**
+ * Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used.
+ *
+ * @param addr ip address in network order to convert
+ * @param buf target buffer where the string is stored
+ * @param buflen length of buf
+ * @return either pointer to buf which now holds the ASCII
+ * representation of addr or NULL if buf was too small
+ */
+char *
+ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen)
+{
+ u32_t s_addr;
+ char inv[3];
+ char *rp;
+ u8_t *ap;
+ u8_t rem;
+ u8_t n;
+ u8_t i;
+ int len = 0;
+
+ s_addr = ip4_addr_get_u32(addr);
+
+ rp = buf;
+ ap = (u8_t *)&s_addr;
+ for (n = 0; n < 4; n++) {
+ i = 0;
+ do {
+ rem = *ap % (u8_t)10;
+ *ap /= (u8_t)10;
+ inv[i++] = (char)('0' + rem);
+ } while (*ap);
+ while (i--) {
+ if (len++ >= buflen) {
+ return NULL;
+ }
+ *rp++ = inv[i];
+ }
+ if (len++ >= buflen) {
+ return NULL;
+ }
+ *rp++ = '.';
+ ap++;
+ }
+ *--rp = 0;
+ return buf;
+}
+
+#endif /* LWIP_IPV4 */
diff --git a/lwip/src/core/ipv4/ip4_frag.c b/lwip/src/core/ipv4/ip4_frag.c
new file mode 100644
index 0000000..a445530
--- /dev/null
+++ b/lwip/src/core/ipv4/ip4_frag.c
@@ -0,0 +1,894 @@
+/**
+ * @file
+ * This is the IPv4 packet segmentation and reassembly implementation.
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Jani Monoses <jani@iv.ro>
+ * Simon Goldschmidt
+ * original reassembly code by Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV4
+
+#include "lwip/ip4_frag.h"
+#include "lwip/def.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/netif.h"
+#include "lwip/stats.h"
+#include "lwip/icmp.h"
+
+#include <string.h>
+
+#if IP_REASSEMBLY
+/**
+ * The IP reassembly code currently has the following limitations:
+ * - IP header options are not supported
+ * - fragments must not overlap (e.g. due to different routes),
+ * currently, overlapping or duplicate fragments are thrown away
+ * if IP_REASS_CHECK_OVERLAP=1 (the default)!
+ *
+ * @todo: work with IP header options
+ */
+
+/** Setting this to 0, you can turn off checking the fragments for overlapping
+ * regions. The code gets a little smaller. Only use this if you know that
+ * overlapping won't occur on your network! */
+#ifndef IP_REASS_CHECK_OVERLAP
+#define IP_REASS_CHECK_OVERLAP 1
+#endif /* IP_REASS_CHECK_OVERLAP */
+
+/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
+ * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
+ * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
+ * is set to 1, so one datagram can be reassembled at a time, only. */
+#ifndef IP_REASS_FREE_OLDEST
+#define IP_REASS_FREE_OLDEST 1
+#endif /* IP_REASS_FREE_OLDEST */
+
+#define IP_REASS_FLAG_LASTFRAG 0x01
+
+#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1
+#define IP_REASS_VALIDATE_PBUF_QUEUED 0
+#define IP_REASS_VALIDATE_PBUF_DROPPED -1
+
+/** This is a helper struct which holds the starting
+ * offset and the ending offset of this fragment to
+ * easily chain the fragments.
+ * It has the same packing requirements as the IP header, since it replaces
+ * the IP header in memory in incoming fragments (after copying it) to keep
+ * track of the various fragments. (-> If the IP header doesn't need packing,
+ * this struct doesn't need packing, too.)
+ */
+#ifdef PACK_STRUCT_USE_INCLUDES
+# include "arch/bpstruct.h"
+#endif
+PACK_STRUCT_BEGIN
+struct ip_reass_helper {
+ PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
+ PACK_STRUCT_FIELD(u16_t start);
+ PACK_STRUCT_FIELD(u16_t end);
+} PACK_STRUCT_STRUCT;
+PACK_STRUCT_END
+#ifdef PACK_STRUCT_USE_INCLUDES
+# include "arch/epstruct.h"
+#endif
+
+#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \
+ (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \
+ ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \
+ IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0
+
+/* global variables */
+static struct ip_reassdata *reassdatagrams;
+static u16_t ip_reass_pbufcount;
+
+/* function prototypes */
+static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
+static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
+
+/**
+ * Reassembly timer base function
+ * for both NO_SYS == 0 and 1 (!).
+ *
+ * Should be called every 1000 msec (defined by IP_TMR_INTERVAL).
+ */
+void
+ip_reass_tmr(void)
+{
+ struct ip_reassdata *r, *prev = NULL;
+
+ r = reassdatagrams;
+ while (r != NULL) {
+ /* Decrement the timer. Once it reaches 0,
+ * clean up the incomplete fragment assembly */
+ if (r->timer > 0) {
+ r->timer--;
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer));
+ prev = r;
+ r = r->next;
+ } else {
+ /* reassembly timed out */
+ struct ip_reassdata *tmp;
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n"));
+ tmp = r;
+ /* get the next pointer before freeing */
+ r = r->next;
+ /* free the helper struct and all enqueued pbufs */
+ ip_reass_free_complete_datagram(tmp, prev);
+ }
+ }
+}
+
+/**
+ * Free a datagram (struct ip_reassdata) and all its pbufs.
+ * Updates the total count of enqueued pbufs (ip_reass_pbufcount),
+ * SNMP counters and sends an ICMP time exceeded packet.
+ *
+ * @param ipr datagram to free
+ * @param prev the previous datagram in the linked list
+ * @return the number of pbufs freed
+ */
+static int
+ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
+{
+ u16_t pbufs_freed = 0;
+ u16_t clen;
+ struct pbuf *p;
+ struct ip_reass_helper *iprh;
+
+ LWIP_ASSERT("prev != ipr", prev != ipr);
+ if (prev != NULL) {
+ LWIP_ASSERT("prev->next == ipr", prev->next == ipr);
+ }
+
+ MIB2_STATS_INC(mib2.ipreasmfails);
+#if LWIP_ICMP
+ iprh = (struct ip_reass_helper *)ipr->p->payload;
+ if (iprh->start == 0) {
+ /* The first fragment was received, send ICMP time exceeded. */
+ /* First, de-queue the first pbuf from r->p. */
+ p = ipr->p;
+ ipr->p = iprh->next_pbuf;
+ /* Then, copy the original header into it. */
+ SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN);
+ icmp_time_exceeded(p, ICMP_TE_FRAG);
+ clen = pbuf_clen(p);
+ LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
+ pbufs_freed = (u16_t)(pbufs_freed + clen);
+ pbuf_free(p);
+ }
+#endif /* LWIP_ICMP */
+
+ /* First, free all received pbufs. The individual pbufs need to be released
+ separately as they have not yet been chained */
+ p = ipr->p;
+ while (p != NULL) {
+ struct pbuf *pcur;
+ iprh = (struct ip_reass_helper *)p->payload;
+ pcur = p;
+ /* get the next pointer before freeing */
+ p = iprh->next_pbuf;
+ clen = pbuf_clen(pcur);
+ LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
+ pbufs_freed = (u16_t)(pbufs_freed + clen);
+ pbuf_free(pcur);
+ }
+ /* Then, unchain the struct ip_reassdata from the list and free it. */
+ ip_reass_dequeue_datagram(ipr, prev);
+ LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed);
+ ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed);
+
+ return pbufs_freed;
+}
+
+#if IP_REASS_FREE_OLDEST
+/**
+ * Free the oldest datagram to make room for enqueueing new fragments.
+ * The datagram 'fraghdr' belongs to is not freed!
+ *
+ * @param fraghdr IP header of the current fragment
+ * @param pbufs_needed number of pbufs needed to enqueue
+ * (used for freeing other datagrams if not enough space)
+ * @return the number of pbufs freed
+ */
+static int
+ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed)
+{
+ /* @todo Can't we simply remove the last datagram in the
+ * linked list behind reassdatagrams?
+ */
+ struct ip_reassdata *r, *oldest, *prev, *oldest_prev;
+ int pbufs_freed = 0, pbufs_freed_current;
+ int other_datagrams;
+
+ /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
+ * but don't free the datagram that 'fraghdr' belongs to! */
+ do {
+ oldest = NULL;
+ prev = NULL;
+ oldest_prev = NULL;
+ other_datagrams = 0;
+ r = reassdatagrams;
+ while (r != NULL) {
+ if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) {
+ /* Not the same datagram as fraghdr */
+ other_datagrams++;
+ if (oldest == NULL) {
+ oldest = r;
+ oldest_prev = prev;
+ } else if (r->timer <= oldest->timer) {
+ /* older than the previous oldest */
+ oldest = r;
+ oldest_prev = prev;
+ }
+ }
+ if (r->next != NULL) {
+ prev = r;
+ }
+ r = r->next;
+ }
+ if (oldest != NULL) {
+ pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev);
+ pbufs_freed += pbufs_freed_current;
+ }
+ } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1));
+ return pbufs_freed;
+}
+#endif /* IP_REASS_FREE_OLDEST */
+
+/**
+ * Enqueues a new fragment into the fragment queue
+ * @param fraghdr points to the new fragments IP hdr
+ * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space)
+ * @return A pointer to the queue location into which the fragment was enqueued
+ */
+static struct ip_reassdata *
+ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen)
+{
+ struct ip_reassdata *ipr;
+#if ! IP_REASS_FREE_OLDEST
+ LWIP_UNUSED_ARG(clen);
+#endif
+
+ /* No matching previous fragment found, allocate a new reassdata struct */
+ ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
+ if (ipr == NULL) {
+#if IP_REASS_FREE_OLDEST
+ if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) {
+ ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
+ }
+ if (ipr == NULL)
+#endif /* IP_REASS_FREE_OLDEST */
+ {
+ IPFRAG_STATS_INC(ip_frag.memerr);
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n"));
+ return NULL;
+ }
+ }
+ memset(ipr, 0, sizeof(struct ip_reassdata));
+ ipr->timer = IP_REASS_MAXAGE;
+
+ /* enqueue the new structure to the front of the list */
+ ipr->next = reassdatagrams;
+ reassdatagrams = ipr;
+ /* copy the ip header for later tests and input */
+ /* @todo: no ip options supported? */
+ SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN);
+ return ipr;
+}
+
+/**
+ * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs.
+ * @param ipr points to the queue entry to dequeue
+ */
+static void
+ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
+{
+ /* dequeue the reass struct */
+ if (reassdatagrams == ipr) {
+ /* it was the first in the list */
+ reassdatagrams = ipr->next;
+ } else {
+ /* it wasn't the first, so it must have a valid 'prev' */
+ LWIP_ASSERT("sanity check linked list", prev != NULL);
+ prev->next = ipr->next;
+ }
+
+ /* now we can free the ip_reassdata struct */
+ memp_free(MEMP_REASSDATA, ipr);
+}
+
+/**
+ * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list
+ * will grow over time as new pbufs are rx.
+ * Also checks that the datagram passes basic continuity checks (if the last
+ * fragment was received at least once).
+ * @param ipr points to the reassembly state
+ * @param new_p points to the pbuf for the current fragment
+ * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet)
+ * @return see IP_REASS_VALIDATE_* defines
+ */
+static int
+ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last)
+{
+ struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL;
+ struct pbuf *q;
+ u16_t offset, len;
+ u8_t hlen;
+ struct ip_hdr *fraghdr;
+ int valid = 1;
+
+ /* Extract length and fragment offset from current fragment */
+ fraghdr = (struct ip_hdr *)new_p->payload;
+ len = lwip_ntohs(IPH_LEN(fraghdr));
+ hlen = IPH_HL_BYTES(fraghdr);
+ if (hlen > len) {
+ /* invalid datagram */
+ return IP_REASS_VALIDATE_PBUF_DROPPED;
+ }
+ len = (u16_t)(len - hlen);
+ offset = IPH_OFFSET_BYTES(fraghdr);
+
+ /* overwrite the fragment's ip header from the pbuf with our helper struct,
+ * and setup the embedded helper structure. */
+ /* make sure the struct ip_reass_helper fits into the IP header */
+ LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
+ sizeof(struct ip_reass_helper) <= IP_HLEN);
+ iprh = (struct ip_reass_helper *)new_p->payload;
+ iprh->next_pbuf = NULL;
+ iprh->start = offset;
+ iprh->end = (u16_t)(offset + len);
+ if (iprh->end < offset) {
+ /* u16_t overflow, cannot handle this */
+ return IP_REASS_VALIDATE_PBUF_DROPPED;
+ }
+
+ /* Iterate through until we either get to the end of the list (append),
+ * or we find one with a larger offset (insert). */
+ for (q = ipr->p; q != NULL;) {
+ iprh_tmp = (struct ip_reass_helper *)q->payload;
+ if (iprh->start < iprh_tmp->start) {
+ /* the new pbuf should be inserted before this */
+ iprh->next_pbuf = q;
+ if (iprh_prev != NULL) {
+ /* not the fragment with the lowest offset */
+#if IP_REASS_CHECK_OVERLAP
+ if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
+ /* fragment overlaps with previous or following, throw away */
+ return IP_REASS_VALIDATE_PBUF_DROPPED;
+ }
+#endif /* IP_REASS_CHECK_OVERLAP */
+ iprh_prev->next_pbuf = new_p;
+ if (iprh_prev->end != iprh->start) {
+ /* There is a fragment missing between the current
+ * and the previous fragment */
+ valid = 0;
+ }
+ } else {
+#if IP_REASS_CHECK_OVERLAP
+ if (iprh->end > iprh_tmp->start) {
+ /* fragment overlaps with following, throw away */
+ return IP_REASS_VALIDATE_PBUF_DROPPED;
+ }
+#endif /* IP_REASS_CHECK_OVERLAP */
+ /* fragment with the lowest offset */
+ ipr->p = new_p;
+ }
+ break;
+ } else if (iprh->start == iprh_tmp->start) {
+ /* received the same datagram twice: no need to keep the datagram */
+ return IP_REASS_VALIDATE_PBUF_DROPPED;
+#if IP_REASS_CHECK_OVERLAP
+ } else if (iprh->start < iprh_tmp->end) {
+ /* overlap: no need to keep the new datagram */
+ return IP_REASS_VALIDATE_PBUF_DROPPED;
+#endif /* IP_REASS_CHECK_OVERLAP */
+ } else {
+ /* Check if the fragments received so far have no holes. */
+ if (iprh_prev != NULL) {
+ if (iprh_prev->end != iprh_tmp->start) {
+ /* There is a fragment missing between the current
+ * and the previous fragment */
+ valid = 0;
+ }
+ }
+ }
+ q = iprh_tmp->next_pbuf;
+ iprh_prev = iprh_tmp;
+ }
+
+ /* If q is NULL, then we made it to the end of the list. Determine what to do now */
+ if (q == NULL) {
+ if (iprh_prev != NULL) {
+ /* this is (for now), the fragment with the highest offset:
+ * chain it to the last fragment */
+#if IP_REASS_CHECK_OVERLAP
+ LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
+#endif /* IP_REASS_CHECK_OVERLAP */
+ iprh_prev->next_pbuf = new_p;
+ if (iprh_prev->end != iprh->start) {
+ valid = 0;
+ }
+ } else {
+#if IP_REASS_CHECK_OVERLAP
+ LWIP_ASSERT("no previous fragment, this must be the first fragment!",
+ ipr->p == NULL);
+#endif /* IP_REASS_CHECK_OVERLAP */
+ /* this is the first fragment we ever received for this ip datagram */
+ ipr->p = new_p;
+ }
+ }
+
+ /* At this point, the validation part begins: */
+ /* If we already received the last fragment */
+ if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) {
+ /* and had no holes so far */
+ if (valid) {
+ /* then check if the rest of the fragments is here */
+ /* Check if the queue starts with the first datagram */
+ if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) {
+ valid = 0;
+ } else {
+ /* and check that there are no holes after this datagram */
+ iprh_prev = iprh;
+ q = iprh->next_pbuf;
+ while (q != NULL) {
+ iprh = (struct ip_reass_helper *)q->payload;
+ if (iprh_prev->end != iprh->start) {
+ valid = 0;
+ break;
+ }
+ iprh_prev = iprh;
+ q = iprh->next_pbuf;
+ }
+ /* if still valid, all fragments are received
+ * (because to the MF==0 already arrived */
+ if (valid) {
+ LWIP_ASSERT("sanity check", ipr->p != NULL);
+ LWIP_ASSERT("sanity check",
+ ((struct ip_reass_helper *)ipr->p->payload) != iprh);
+ LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
+ iprh->next_pbuf == NULL);
+ }
+ }
+ }
+ /* If valid is 0 here, there are some fragments missing in the middle
+ * (since MF == 0 has already arrived). Such datagrams simply time out if
+ * no more fragments are received... */
+ return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED;
+ }
+ /* If we come here, not all fragments were received, yet! */
+ return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */
+}
+
+/**
+ * Reassembles incoming IP fragments into an IP datagram.
+ *
+ * @param p points to a pbuf chain of the fragment
+ * @return NULL if reassembly is incomplete, ? otherwise
+ */
+struct pbuf *
+ip4_reass(struct pbuf *p)
+{
+ struct pbuf *r;
+ struct ip_hdr *fraghdr;
+ struct ip_reassdata *ipr;
+ struct ip_reass_helper *iprh;
+ u16_t offset, len, clen;
+ u8_t hlen;
+ int valid;
+ int is_last;
+
+ IPFRAG_STATS_INC(ip_frag.recv);
+ MIB2_STATS_INC(mib2.ipreasmreqds);
+
+ fraghdr = (struct ip_hdr *)p->payload;
+
+ if (IPH_HL_BYTES(fraghdr) != IP_HLEN) {
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n"));
+ IPFRAG_STATS_INC(ip_frag.err);
+ goto nullreturn;
+ }
+
+ offset = IPH_OFFSET_BYTES(fraghdr);
+ len = lwip_ntohs(IPH_LEN(fraghdr));
+ hlen = IPH_HL_BYTES(fraghdr);
+ if (hlen > len) {
+ /* invalid datagram */
+ goto nullreturn;
+ }
+ len = (u16_t)(len - hlen);
+
+ /* Check if we are allowed to enqueue more datagrams. */
+ clen = pbuf_clen(p);
+ if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
+#if IP_REASS_FREE_OLDEST
+ if (!ip_reass_remove_oldest_datagram(fraghdr, clen) ||
+ ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS))
+#endif /* IP_REASS_FREE_OLDEST */
+ {
+ /* No datagram could be freed and still too many pbufs enqueued */
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n",
+ ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS));
+ IPFRAG_STATS_INC(ip_frag.memerr);
+ /* @todo: send ICMP time exceeded here? */
+ /* drop this pbuf */
+ goto nullreturn;
+ }
+ }
+
+ /* Look for the datagram the fragment belongs to in the current datagram queue,
+ * remembering the previous in the queue for later dequeueing. */
+ for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) {
+ /* Check if the incoming fragment matches the one currently present
+ in the reassembly buffer. If so, we proceed with copying the
+ fragment into the buffer. */
+ if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) {
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n",
+ lwip_ntohs(IPH_ID(fraghdr))));
+ IPFRAG_STATS_INC(ip_frag.cachehit);
+ break;
+ }
+ }
+
+ if (ipr == NULL) {
+ /* Enqueue a new datagram into the datagram queue */
+ ipr = ip_reass_enqueue_new_datagram(fraghdr, clen);
+ /* Bail if unable to enqueue */
+ if (ipr == NULL) {
+ goto nullreturn;
+ }
+ } else {
+ if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) &&
+ ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) {
+ /* ipr->iphdr is not the header from the first fragment, but fraghdr is
+ * -> copy fraghdr into ipr->iphdr since we want to have the header
+ * of the first fragment (for ICMP time exceeded and later, for copying
+ * all options, if supported)*/
+ SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN);
+ }
+ }
+
+ /* At this point, we have either created a new entry or pointing
+ * to an existing one */
+
+ /* check for 'no more fragments', and update queue entry*/
+ is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0;
+ if (is_last) {
+ u16_t datagram_len = (u16_t)(offset + len);
+ if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) {
+ /* u16_t overflow, cannot handle this */
+ goto nullreturn_ipr;
+ }
+ }
+ /* find the right place to insert this pbuf */
+ /* @todo: trim pbufs if fragments are overlapping */
+ valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last);
+ if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) {
+ goto nullreturn_ipr;
+ }
+ /* if we come here, the pbuf has been enqueued */
+
+ /* Track the current number of pbufs current 'in-flight', in order to limit
+ the number of fragments that may be enqueued at any one time
+ (overflow checked by testing against IP_REASS_MAX_PBUFS) */
+ ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen);
+ if (is_last) {
+ u16_t datagram_len = (u16_t)(offset + len);
+ ipr->datagram_len = datagram_len;
+ ipr->flags |= IP_REASS_FLAG_LASTFRAG;
+ LWIP_DEBUGF(IP_REASS_DEBUG,
+ ("ip4_reass: last fragment seen, total len %"S16_F"\n",
+ ipr->datagram_len));
+ }
+
+ if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) {
+ struct ip_reassdata *ipr_prev;
+ /* the totally last fragment (flag more fragments = 0) was received at least
+ * once AND all fragments are received */
+ u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN);
+
+ /* save the second pbuf before copying the header over the pointer */
+ r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf;
+
+ /* copy the original ip header back to the first pbuf */
+ fraghdr = (struct ip_hdr *)(ipr->p->payload);
+ SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN);
+ IPH_LEN_SET(fraghdr, lwip_htons(datagram_len));
+ IPH_OFFSET_SET(fraghdr, 0);
+ IPH_CHKSUM_SET(fraghdr, 0);
+ /* @todo: do we need to set/calculate the correct checksum? */
+#if CHECKSUM_GEN_IP
+ IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) {
+ IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN));
+ }
+#endif /* CHECKSUM_GEN_IP */
+
+ p = ipr->p;
+
+ /* chain together the pbufs contained within the reass_data list. */
+ while (r != NULL) {
+ iprh = (struct ip_reass_helper *)r->payload;
+
+ /* hide the ip header for every succeeding fragment */
+ pbuf_remove_header(r, IP_HLEN);
+ pbuf_cat(p, r);
+ r = iprh->next_pbuf;
+ }
+
+ /* find the previous entry in the linked list */
+ if (ipr == reassdatagrams) {
+ ipr_prev = NULL;
+ } else {
+ for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
+ if (ipr_prev->next == ipr) {
+ break;
+ }
+ }
+ }
+
+ /* release the sources allocate for the fragment queue entry */
+ ip_reass_dequeue_datagram(ipr, ipr_prev);
+
+ /* and adjust the number of pbufs currently queued for reassembly. */
+ clen = pbuf_clen(p);
+ LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen);
+ ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen);
+
+ MIB2_STATS_INC(mib2.ipreasmoks);
+
+ /* Return the pbuf chain */
+ return p;
+ }
+ /* the datagram is not (yet?) reassembled completely */
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount));
+ return NULL;
+
+nullreturn_ipr:
+ LWIP_ASSERT("ipr != NULL", ipr != NULL);
+ if (ipr->p == NULL) {
+ /* dropped pbuf after creating a new datagram entry: remove the entry, too */
+ LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams);
+ ip_reass_dequeue_datagram(ipr, NULL);
+ }
+
+nullreturn:
+ LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n"));
+ IPFRAG_STATS_INC(ip_frag.drop);
+ pbuf_free(p);
+ return NULL;
+}
+#endif /* IP_REASSEMBLY */
+
+#if IP_FRAG
+#if !LWIP_NETIF_TX_SINGLE_PBUF
+/** Allocate a new struct pbuf_custom_ref */
+static struct pbuf_custom_ref *
+ip_frag_alloc_pbuf_custom_ref(void)
+{
+ return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF);
+}
+
+/** Free a struct pbuf_custom_ref */
+static void
+ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p)
+{
+ LWIP_ASSERT("p != NULL", p != NULL);
+ memp_free(MEMP_FRAG_PBUF, p);
+}
+
+/** Free-callback function to free a 'struct pbuf_custom_ref', called by
+ * pbuf_free. */
+static void
+ipfrag_free_pbuf_custom(struct pbuf *p)
+{
+ struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p;
+ LWIP_ASSERT("pcr != NULL", pcr != NULL);
+ LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p);
+ if (pcr->original != NULL) {
+ pbuf_free(pcr->original);
+ }
+ ip_frag_free_pbuf_custom_ref(pcr);
+}
+#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
+
+/**
+ * Fragment an IP datagram if too large for the netif.
+ *
+ * Chop the datagram in MTU sized chunks and send them in order
+ * by pointing PBUF_REFs into p.
+ *
+ * @param p ip packet to send
+ * @param netif the netif on which to send
+ * @param dest destination ip address to which to send
+ *
+ * @return ERR_OK if sent successfully, err_t otherwise
+ */
+err_t
+ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest)
+{
+ struct pbuf *rambuf;
+#if !LWIP_NETIF_TX_SINGLE_PBUF
+ struct pbuf *newpbuf;
+ u16_t newpbuflen = 0;
+ u16_t left_to_copy;
+#endif
+ struct ip_hdr *original_iphdr;
+ struct ip_hdr *iphdr;
+ const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8);
+ u16_t left, fragsize;
+ u16_t ofo;
+ int last;
+ u16_t poff = IP_HLEN;
+ u16_t tmp;
+ int mf_set;
+
+ original_iphdr = (struct ip_hdr *)p->payload;
+ iphdr = original_iphdr;
+ if (IPH_HL_BYTES(iphdr) != IP_HLEN) {
+ /* ip4_frag() does not support IP options */
+ return ERR_VAL;
+ }
+ LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL);
+
+ /* Save original offset */
+ tmp = lwip_ntohs(IPH_OFFSET(iphdr));
+ ofo = tmp & IP_OFFMASK;
+ /* already fragmented? if so, the last fragment we create must have MF, too */
+ mf_set = tmp & IP_MF;
+
+ left = (u16_t)(p->tot_len - IP_HLEN);
+
+ while (left) {
+ /* Fill this fragment */
+ fragsize = LWIP_MIN(left, (u16_t)(nfb * 8));
+
+#if LWIP_NETIF_TX_SINGLE_PBUF
+ rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM);
+ if (rambuf == NULL) {
+ goto memerr;
+ }
+ LWIP_ASSERT("this needs a pbuf in one piece!",
+ (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
+ poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff);
+ /* make room for the IP header */
+ if (pbuf_add_header(rambuf, IP_HLEN)) {
+ pbuf_free(rambuf);
+ goto memerr;
+ }
+ /* fill in the IP header */
+ SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
+ iphdr = (struct ip_hdr *)rambuf->payload;
+#else /* LWIP_NETIF_TX_SINGLE_PBUF */
+ /* When not using a static buffer, create a chain of pbufs.
+ * The first will be a PBUF_RAM holding the link and IP header.
+ * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
+ * but limited to the size of an mtu.
+ */
+ rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
+ if (rambuf == NULL) {
+ goto memerr;
+ }
+ LWIP_ASSERT("this needs a pbuf in one piece!",
+ (rambuf->len >= (IP_HLEN)));
+ SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
+ iphdr = (struct ip_hdr *)rambuf->payload;
+
+ left_to_copy = fragsize;
+ while (left_to_copy) {
+ struct pbuf_custom_ref *pcr;
+ u16_t plen = (u16_t)(p->len - poff);
+ LWIP_ASSERT("p->len >= poff", p->len >= poff);
+ newpbuflen = LWIP_MIN(left_to_copy, plen);
+ /* Is this pbuf already empty? */
+ if (!newpbuflen) {
+ poff = 0;
+ p = p->next;
+ continue;
+ }
+ pcr = ip_frag_alloc_pbuf_custom_ref();
+ if (pcr == NULL) {
+ pbuf_free(rambuf);
+ goto memerr;
+ }
+ /* Mirror this pbuf, although we might not need all of it. */
+ newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc,
+ (u8_t *)p->payload + poff, newpbuflen);
+ if (newpbuf == NULL) {
+ ip_frag_free_pbuf_custom_ref(pcr);
+ pbuf_free(rambuf);
+ goto memerr;
+ }
+ pbuf_ref(p);
+ pcr->original = p;
+ pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;
+
+ /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
+ * so that it is removed when pbuf_dechain is later called on rambuf.
+ */
+ pbuf_cat(rambuf, newpbuf);
+ left_to_copy = (u16_t)(left_to_copy - newpbuflen);
+ if (left_to_copy) {
+ poff = 0;
+ p = p->next;
+ }
+ }
+ poff = (u16_t)(poff + newpbuflen);
+#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
+
+ /* Correct header */
+ last = (left <= netif->mtu - IP_HLEN);
+
+ /* Set new offset and MF flag */
+ tmp = (IP_OFFMASK & (ofo));
+ if (!last || mf_set) {
+ /* the last fragment has MF set if the input frame had it */
+ tmp = tmp | IP_MF;
+ }
+ IPH_OFFSET_SET(iphdr, lwip_htons(tmp));
+ IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN)));
+ IPH_CHKSUM_SET(iphdr, 0);
+#if CHECKSUM_GEN_IP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) {
+ IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));
+ }
+#endif /* CHECKSUM_GEN_IP */
+
+ /* No need for separate header pbuf - we allowed room for it in rambuf
+ * when allocated.
+ */
+ netif->output(netif, rambuf, dest);
+ IPFRAG_STATS_INC(ip_frag.xmit);
+
+ /* Unfortunately we can't reuse rambuf - the hardware may still be
+ * using the buffer. Instead we free it (and the ensuing chain) and
+ * recreate it next time round the loop. If we're lucky the hardware
+ * will have already sent the packet, the free will really free, and
+ * there will be zero memory penalty.
+ */
+
+ pbuf_free(rambuf);
+ left = (u16_t)(left - fragsize);
+ ofo = (u16_t)(ofo + nfb);
+ }
+ MIB2_STATS_INC(mib2.ipfragoks);
+ return ERR_OK;
+memerr:
+ MIB2_STATS_INC(mib2.ipfragfails);
+ return ERR_MEM;
+}
+#endif /* IP_FRAG */
+
+#endif /* LWIP_IPV4 */
diff --git a/lwip/src/core/ipv6/dhcp6.c b/lwip/src/core/ipv6/dhcp6.c
new file mode 100644
index 0000000..7cf98a5
--- /dev/null
+++ b/lwip/src/core/ipv6/dhcp6.c
@@ -0,0 +1,812 @@
+/**
+ * @file
+ *
+ * @defgroup dhcp6 DHCPv6
+ * @ingroup ip6
+ * DHCPv6 client: IPv6 address autoconfiguration as per
+ * RFC 3315 (stateful DHCPv6) and
+ * RFC 3736 (stateless DHCPv6).
+ *
+ * For now, only stateless DHCPv6 is implemented!
+ *
+ * TODO:
+ * - enable/disable API to not always start when RA is received
+ * - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works)
+ * - create Client Identifier?
+ * - only start requests if a valid local address is available on the netif
+ * - only start information requests if required (not for every RA)
+ *
+ * dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n
+ * dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n
+ * dhcp6_disable() disable DHCPv6 for a netif
+ *
+ * When enabled, requests are only issued after receipt of RA with the
+ * corresponding bits set.
+ */
+
+/*
+ * Copyright (c) 2018 Simon Goldschmidt
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Simon Goldschmidt <goldsimon@gmx.de>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/dhcp6.h"
+#include "lwip/prot/dhcp6.h"
+#include "lwip/def.h"
+#include "lwip/udp.h"
+#include "lwip/dns.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS
+#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len)
+#endif
+#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION
+#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0)
+#endif
+
+#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS
+#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS
+#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS
+#else
+#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS
+#endif
+#else
+#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0
+#endif
+
+
+/** Option handling: options are parsed in dhcp6_parse_reply
+ * and saved in an array where other functions can load them from.
+ * This might be moved into the struct dhcp6 (not necessarily since
+ * lwIP is single-threaded and the array is only used while in recv
+ * callback). */
+enum dhcp6_option_idx {
+ DHCP6_OPTION_IDX_CLI_ID = 0,
+ DHCP6_OPTION_IDX_SERVER_ID,
+#if LWIP_DHCP6_PROVIDE_DNS_SERVERS
+ DHCP6_OPTION_IDX_DNS_SERVER,
+ DHCP6_OPTION_IDX_DOMAIN_LIST,
+#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
+#if LWIP_DHCP6_GET_NTP_SRV
+ DHCP6_OPTION_IDX_NTP_SERVER,
+#endif /* LWIP_DHCP_GET_NTP_SRV */
+ DHCP6_OPTION_IDX_MAX
+};
+
+struct dhcp6_option_info {
+ u8_t option_given;
+ u16_t val_start;
+ u16_t val_length;
+};
+
+/** Holds the decoded option info, only valid while in dhcp6_recv. */
+struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX];
+
+#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0)
+#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1)
+#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0)
+#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options)))
+#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start)
+#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length)
+#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0)
+
+
+const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002);
+const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003);
+
+static struct udp_pcb *dhcp6_pcb;
+static u8_t dhcp6_pcb_refcount;
+
+
+/* receive, unfold, parse and free incoming messages */
+static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port);
+
+/** Ensure DHCP PCB is allocated and bound */
+static err_t
+dhcp6_inc_pcb_refcount(void)
+{
+ if (dhcp6_pcb_refcount == 0) {
+ LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL);
+
+ /* allocate UDP PCB */
+ dhcp6_pcb = udp_new_ip6();
+
+ if (dhcp6_pcb == NULL) {
+ return ERR_MEM;
+ }
+
+ ip_set_option(dhcp6_pcb, SOF_BROADCAST);
+
+ /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */
+ udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT);
+ udp_recv(dhcp6_pcb, dhcp6_recv, NULL);
+ }
+
+ dhcp6_pcb_refcount++;
+
+ return ERR_OK;
+}
+
+/** Free DHCP PCB if the last netif stops using it */
+static void
+dhcp6_dec_pcb_refcount(void)
+{
+ LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0));
+ dhcp6_pcb_refcount--;
+
+ if (dhcp6_pcb_refcount == 0) {
+ udp_remove(dhcp6_pcb);
+ dhcp6_pcb = NULL;
+ }
+}
+
+/**
+ * @ingroup dhcp6
+ * Set a statically allocated struct dhcp6 to work with.
+ * Using this prevents dhcp6_start to allocate it using mem_malloc.
+ *
+ * @param netif the netif for which to set the struct dhcp
+ * @param dhcp6 (uninitialised) dhcp6 struct allocated by the application
+ */
+void
+dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6)
+{
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL);
+ LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL);
+
+ /* clear data structure */
+ memset(dhcp6, 0, sizeof(struct dhcp6));
+ /* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6);
+}
+
+/**
+ * @ingroup dhcp6
+ * Removes a struct dhcp6 from a netif.
+ *
+ * ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the
+ * struct dhcp6 since the memory is passed back to the heap.
+ *
+ * @param netif the netif from which to remove the struct dhcp
+ */
+void dhcp6_cleanup(struct netif *netif)
+{
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+
+ if (netif_dhcp6_data(netif) != NULL) {
+ mem_free(netif_dhcp6_data(netif));
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL);
+ }
+}
+
+static struct dhcp6*
+dhcp6_get_struct(struct netif *netif, const char *dbg_requester)
+{
+ struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
+ if (dhcp6 == NULL) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester));
+ dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6));
+ if (dhcp6 == NULL) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester));
+ return NULL;
+ }
+
+ /* clear data structure, this implies DHCP6_STATE_OFF */
+ memset(dhcp6, 0, sizeof(struct dhcp6));
+ /* store this dhcp6 client in the netif */
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6);
+ } else {
+ /* already has DHCP6 client attached */
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester));
+ }
+
+ if (!dhcp6->pcb_allocated) {
+ if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */
+ mem_free(dhcp6);
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL);
+ return NULL;
+ }
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester));
+ dhcp6->pcb_allocated = 1;
+ }
+ return dhcp6;
+}
+
+/*
+ * Set the DHCPv6 state
+ * If the state changed, reset the number of tries.
+ */
+static void
+dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller)
+{
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n",
+ dhcp6->state, new_state, dbg_caller));
+ if (new_state != dhcp6->state) {
+ dhcp6->state = new_state;
+ dhcp6->tries = 0;
+ dhcp6->request_timeout = 0;
+ }
+}
+
+static int
+dhcp6_stateless_enabled(struct dhcp6 *dhcp6)
+{
+ if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) ||
+ (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) {
+ return 1;
+ }
+ return 0;
+}
+
+/*static int
+dhcp6_stateful_enabled(struct dhcp6 *dhcp6)
+{
+ if (dhcp6->state == DHCP6_STATE_OFF) {
+ return 0;
+ }
+ if (dhcp6_stateless_enabled(dhcp6)) {
+ return 0;
+ }
+ return 1;
+}*/
+
+/**
+ * @ingroup dhcp6
+ * Enable stateful DHCPv6 on this netif
+ * Requests are sent on receipt of an RA message with the
+ * ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set.
+ *
+ * A struct dhcp6 will be allocated for this netif if not
+ * set via @ref dhcp6_set_struct before.
+ *
+ * @todo: stateful DHCPv6 not supported, yet
+ */
+err_t
+dhcp6_enable_stateful(struct netif *netif)
+{
+ LWIP_UNUSED_ARG(netif);
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet"));
+ return ERR_VAL;
+}
+
+/**
+ * @ingroup dhcp6
+ * Enable stateless DHCPv6 on this netif
+ * Requests are sent on receipt of an RA message with the
+ * ND6_RA_FLAG_OTHER_CONFIG flag set.
+ *
+ * A struct dhcp6 will be allocated for this netif if not
+ * set via @ref dhcp6_set_struct before.
+ */
+err_t
+dhcp6_enable_stateless(struct netif *netif)
+{
+ struct dhcp6 *dhcp6;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+
+ dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()");
+ if (dhcp6 == NULL) {
+ return ERR_MEM;
+ }
+ if (dhcp6_stateless_enabled(dhcp6)) {
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled"));
+ return ERR_OK;
+ } else if (dhcp6->state != DHCP6_STATE_OFF) {
+ /* stateful running */
+ /* @todo: stop stateful once it is implemented */
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6"));
+ }
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n"));
+ dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless");
+ return ERR_OK;
+}
+
+/**
+ * @ingroup dhcp6
+ * Disable stateful or stateless DHCPv6 on this netif
+ * Requests are sent on receipt of an RA message with the
+ * ND6_RA_FLAG_OTHER_CONFIG flag set.
+ */
+void
+dhcp6_disable(struct netif *netif)
+{
+ struct dhcp6 *dhcp6;
+
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
+
+ dhcp6 = netif_dhcp6_data(netif);
+ if (dhcp6 != NULL) {
+ if (dhcp6->state != DHCP6_STATE_OFF) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n",
+ (dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful")));
+ dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable");
+ if (dhcp6->pcb_allocated != 0) {
+ dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */
+ dhcp6->pcb_allocated = 0;
+ }
+ }
+ }
+}
+
+/**
+ * Create a DHCPv6 request, fill in common headers
+ *
+ * @param netif the netif under DHCPv6 control
+ * @param dhcp6 dhcp6 control struct
+ * @param message_type message type of the request
+ * @param opt_len_alloc option length to allocate
+ * @param options_out_len option length on exit
+ * @return a pbuf for the message
+ */
+static struct pbuf *
+dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type,
+ u16_t opt_len_alloc, u16_t *options_out_len)
+{
+ struct pbuf *p_out;
+ struct dhcp6_msg *msg_out;
+
+ LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;);
+ LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;);
+ p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM);
+ if (p_out == NULL) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("dhcp6_create_msg(): could not allocate pbuf\n"));
+ return NULL;
+ }
+ LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg",
+ (p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc));
+
+ /* @todo: limit new xid for certain message types? */
+ /* reuse transaction identifier in retransmissions */
+ if (dhcp6->tries == 0) {
+ dhcp6->xid = LWIP_RAND() & 0xFFFFFF;
+ }
+
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE,
+ ("transaction id xid(%"X32_F")\n", dhcp6->xid));
+
+ msg_out = (struct dhcp6_msg *)p_out->payload;
+ memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc);
+
+ msg_out->msgtype = message_type;
+ msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16);
+ msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8);
+ msg_out->transaction_id[2] = (u8_t)dhcp6->xid;
+ *options_out_len = 0;
+ return p_out;
+}
+
+static u16_t
+dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value)
+{
+ options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8);
+ options[options_out_len++] = (u8_t) (value & 0x00ffU);
+ return options_out_len;
+}
+
+static u16_t
+dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options,
+ u16_t num_req_options, u16_t max_len)
+{
+ size_t i;
+ u16_t ret;
+
+ LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len",
+ sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len);
+ LWIP_UNUSED_ARG(max_len);
+
+ ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO);
+ ret = dhcp6_option_short(ret, options, 2 * num_req_options);
+ for (i = 0; i < num_req_options; i++) {
+ ret = dhcp6_option_short(ret, options, req_options[i]);
+ }
+ return ret;
+}
+
+/* All options are added, shrink the pbuf to the required size */
+static void
+dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out)
+{
+ /* shrink the pbuf to the actual content length */
+ pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len));
+}
+
+
+#if LWIP_IPV6_DHCP6_STATELESS
+static void
+dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6)
+{
+ const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS};
+ u16_t msecs;
+ struct pbuf *p_out;
+ u16_t options_out_len;
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n"));
+ /* create and initialize the DHCP message header */
+ p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len);
+ if (p_out != NULL) {
+ err_t err;
+ struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload;
+ u8_t *options = (u8_t *)(msg_out + 1);
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n"));
+
+ options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options,
+ LWIP_ARRAYSIZE(requested_options), p_out->len);
+ LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out,
+ DHCP6_INFOREQUEST, options_out_len, p_out->len);
+ dhcp6_msg_finalize(options_out_len, p_out);
+
+ err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif);
+ pbuf_free(p_out);
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err));
+ LWIP_UNUSED_ARG(err);
+ } else {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n"));
+ }
+ dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request");
+ if (dhcp6->tries < 255) {
+ dhcp6->tries++;
+ }
+ msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000);
+ dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS);
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs));
+}
+
+static err_t
+dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6)
+{
+ /* stateless mode enabled and no request running? */
+ if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) {
+ /* send Information-request and wait for answer; setup receive timeout */
+ dhcp6_information_request(netif, dhcp6);
+ }
+
+ return ERR_OK;
+}
+
+static void
+dhcp6_abort_config_request(struct dhcp6 *dhcp6)
+{
+ if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) {
+ /* abort running request */
+ dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request");
+ }
+}
+
+/* Handle a REPLY to INFOREQUEST
+ * This parses DNS and NTP server addresses from the reply.
+ */
+static void
+dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in)
+{
+ struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
+
+ LWIP_UNUSED_ARG(dhcp6);
+ LWIP_UNUSED_ARG(p_msg_in);
+
+#if LWIP_DHCP6_PROVIDE_DNS_SERVERS
+ if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) {
+ ip_addr_t dns_addr;
+ ip6_addr_t *dns_addr6;
+ u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER);
+ u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER);
+ u16_t idx;
+ u8_t n;
+
+ memset(&dns_addr, 0, sizeof(dns_addr));
+ dns_addr6 = ip_2_ip6(&dns_addr);
+ for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS);
+ n++, idx += sizeof(struct ip6_addr_packed)) {
+ u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx);
+ if (copied != sizeof(struct ip6_addr_packed)) {
+ /* pbuf length mismatch */
+ return;
+ }
+ ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif);
+ /* @todo: do we need a different offset than DHCP(v4)? */
+ dns_setserver(n, &dns_addr);
+ }
+ }
+ /* @ todo: parse and set Domain Search List */
+#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */
+
+#if LWIP_DHCP6_GET_NTP_SRV
+ if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) {
+ ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS];
+ u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER);
+ u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER);
+ u16_t idx;
+ u8_t n;
+
+ for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS);
+ n++, idx += sizeof(struct ip6_addr_packed)) {
+ u16_t copied;
+ ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]);
+ ip_addr_set_zero_ip6(&ntp_server_addrs[n]);
+ copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx);
+ if (copied != sizeof(struct ip6_addr_packed)) {
+ /* pbuf length mismatch */
+ return;
+ }
+ ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif);
+ }
+ dhcp6_set_ntp_servers(n, ntp_server_addrs);
+ }
+#endif /* LWIP_DHCP6_GET_NTP_SRV */
+}
+#endif /* LWIP_IPV6_DHCP6_STATELESS */
+
+/** This function is called from nd6 module when an RA messsage is received
+ * It triggers DHCPv6 requests (if enabled).
+ */
+void
+dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config)
+{
+ struct dhcp6 *dhcp6;
+
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ dhcp6 = netif_dhcp6_data(netif);
+
+ LWIP_UNUSED_ARG(managed_addr_config);
+ LWIP_UNUSED_ARG(other_config);
+ LWIP_UNUSED_ARG(dhcp6);
+
+#if LWIP_IPV6_DHCP6_STATELESS
+ if (dhcp6 != NULL) {
+ if (dhcp6_stateless_enabled(dhcp6)) {
+ if (other_config) {
+ dhcp6_request_config(netif, dhcp6);
+ } else {
+ dhcp6_abort_config_request(dhcp6);
+ }
+ }
+ }
+#endif /* LWIP_IPV6_DHCP6_STATELESS */
+}
+
+/**
+ * Parse the DHCPv6 message and extract the DHCPv6 options.
+ *
+ * Extract the DHCPv6 options (offset + length) so that we can later easily
+ * check for them or extract the contents.
+ */
+static err_t
+dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6)
+{
+ u16_t offset;
+ u16_t offset_max;
+ u16_t options_idx;
+ struct dhcp6_msg *msg_in;
+
+ LWIP_UNUSED_ARG(dhcp6);
+
+ /* clear received options */
+ dhcp6_clear_all_options(dhcp6);
+ msg_in = (struct dhcp6_msg *)p->payload;
+
+ /* parse options */
+
+ options_idx = sizeof(struct dhcp6_msg);
+ /* parse options to the end of the received packet */
+ offset_max = p->tot_len;
+
+ offset = options_idx;
+ /* at least 4 byte to read? */
+ while ((offset + 4 <= offset_max)) {
+ u8_t op_len_buf[4];
+ u8_t *op_len;
+ u16_t op;
+ u16_t len;
+ u16_t val_offset = (u16_t)(offset + 4);
+ if (val_offset < offset) {
+ /* overflow */
+ return ERR_BUF;
+ }
+ /* copy option + length, might be split accross pbufs */
+ op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset);
+ if (op_len == NULL) {
+ /* failed to get option and length */
+ return ERR_VAL;
+ }
+ op = (op_len[0] << 8) | op_len[1];
+ len = (op_len[2] << 8) | op_len[3];
+ offset = val_offset + len;
+ if (offset < val_offset) {
+ /* overflow */
+ return ERR_BUF;
+ }
+
+ switch (op) {
+ case (DHCP6_OPTION_CLIENTID):
+ dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID);
+ dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len);
+ break;
+ case (DHCP6_OPTION_SERVERID):
+ dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID);
+ dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len);
+ break;
+#if LWIP_DHCP6_PROVIDE_DNS_SERVERS
+ case (DHCP6_OPTION_DNS_SERVERS):
+ dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER);
+ dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len);
+ break;
+ case (DHCP6_OPTION_DOMAIN_LIST):
+ dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST);
+ dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len);
+ break;
+#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */
+#if LWIP_DHCP6_GET_NTP_SRV
+ case (DHCP6_OPTION_SNTP_SERVERS):
+ dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER);
+ dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len);
+ break;
+#endif /* LWIP_DHCP6_GET_NTP_SRV*/
+ default:
+ LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op));
+ LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in,
+ msg_in->msgtype, op, len, q, val_offset);
+ break;
+ }
+ }
+ return ERR_OK;
+}
+
+static void
+dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port)
+{
+ struct netif *netif = ip_current_input_netif();
+ struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
+ struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload;
+ u8_t msg_type;
+ u32_t xid;
+
+ LWIP_UNUSED_ARG(arg);
+
+ /* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */
+ if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) {
+ goto free_pbuf_and_return;
+ }
+
+ LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;);
+
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p,
+ ipaddr_ntoa(addr), port));
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len));
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len));
+ /* prevent warnings about unused arguments */
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_UNUSED_ARG(addr);
+ LWIP_UNUSED_ARG(port);
+
+ if (p->len < sizeof(struct dhcp6_msg)) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n"));
+ goto free_pbuf_and_return;
+ }
+
+ /* match transaction ID against what we expected */
+ xid = reply_msg->transaction_id[0] << 16;
+ xid |= reply_msg->transaction_id[1] << 8;
+ xid |= reply_msg->transaction_id[2];
+ if (xid != dhcp6->xid) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING,
+ ("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid));
+ goto free_pbuf_and_return;
+ }
+ /* option fields could be unfold? */
+ if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
+ ("problem unfolding DHCPv6 message - too short on memory?\n"));
+ goto free_pbuf_and_return;
+ }
+
+ /* read DHCP message type */
+ msg_type = reply_msg->msgtype;
+ /* message type is DHCP6 REPLY? */
+ if (msg_type == DHCP6_REPLY) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n"));
+#if LWIP_IPV6_DHCP6_STATELESS
+ /* in info-requesting state? */
+ if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) {
+ dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv");
+ dhcp6_handle_config_reply(netif, p);
+ } else
+#endif /* LWIP_IPV6_DHCP6_STATELESS */
+ {
+ /* @todo: handle reply in other states? */
+ }
+ } else {
+ /* @todo: handle other message types */
+ }
+
+free_pbuf_and_return:
+ pbuf_free(p);
+}
+
+/**
+ * A DHCPv6 request has timed out.
+ *
+ * The timer that was started with the DHCPv6 request has
+ * timed out, indicating no response was received in time.
+ */
+static void
+dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6)
+{
+ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n"));
+
+ LWIP_UNUSED_ARG(netif);
+ LWIP_UNUSED_ARG(dhcp6);
+
+#if LWIP_IPV6_DHCP6_STATELESS
+ /* back-off period has passed, or server selection timed out */
+ if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) {
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n"));
+ dhcp6_information_request(netif, dhcp6);
+ }
+#endif /* LWIP_IPV6_DHCP6_STATELESS */
+}
+
+/**
+ * DHCPv6 timeout handling (this function must be called every 500ms,
+ * see @ref DHCP6_TIMER_MSECS).
+ *
+ * A DHCPv6 server is expected to respond within a short period of time.
+ * This timer checks whether an outstanding DHCPv6 request is timed out.
+ */
+void
+dhcp6_tmr(void)
+{
+ struct netif *netif;
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
+ /* only act on DHCPv6 configured interfaces */
+ if (dhcp6 != NULL) {
+ /* timer is active (non zero), and is about to trigger now */
+ if (dhcp6->request_timeout > 1) {
+ dhcp6->request_timeout--;
+ } else if (dhcp6->request_timeout == 1) {
+ dhcp6->request_timeout--;
+ /* { dhcp6->request_timeout == 0 } */
+ LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n"));
+ /* this client's request timeout triggered */
+ dhcp6_timeout(netif, dhcp6);
+ }
+ }
+ }
+}
+
+#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */
diff --git a/lwip/src/core/ipv6/ethip6.c b/lwip/src/core/ipv6/ethip6.c
new file mode 100644
index 0000000..fec8b28
--- /dev/null
+++ b/lwip/src/core/ipv6/ethip6.c
@@ -0,0 +1,123 @@
+/**
+ * @file
+ *
+ * Ethernet output for IPv6. Uses ND tables for link-layer addressing.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 && LWIP_ETHERNET
+
+#include "lwip/ethip6.h"
+#include "lwip/nd6.h"
+#include "lwip/pbuf.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/netif.h"
+#include "lwip/icmp6.h"
+#include "lwip/prot/ethernet.h"
+#include "netif/ethernet.h"
+
+#include <string.h>
+
+/**
+ * Resolve and fill-in Ethernet address header for outgoing IPv6 packet.
+ *
+ * For IPv6 multicast, corresponding Ethernet addresses
+ * are selected and the packet is transmitted on the link.
+ *
+ * For unicast addresses, ask the ND6 module what to do. It will either let us
+ * send the the packet right away, or queue the packet for later itself, unless
+ * an error occurs.
+ *
+ * @todo anycast addresses
+ *
+ * @param netif The lwIP network interface which the IP packet will be sent on.
+ * @param q The pbuf(s) containing the IP packet to be sent.
+ * @param ip6addr The IP address of the packet destination.
+ *
+ * @return
+ * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue.
+ */
+err_t
+ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr)
+{
+ struct eth_addr dest;
+ const u8_t *hwaddr;
+ err_t result;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* The destination IP address must be properly zoned from here on down. */
+ IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif);
+
+ /* multicast destination IP address? */
+ if (ip6_addr_ismulticast(ip6addr)) {
+ /* Hash IP multicast address to MAC address.*/
+ dest.addr[0] = 0x33;
+ dest.addr[1] = 0x33;
+ dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0];
+ dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1];
+ dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2];
+ dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3];
+
+ /* Send out. */
+ return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6);
+ }
+
+ /* We have a unicast destination IP address */
+ /* @todo anycast? */
+
+ /* Ask ND6 what to do with the packet. */
+ result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr);
+ if (result != ERR_OK) {
+ return result;
+ }
+
+ /* If no hardware address is returned, nd6 has queued the packet for later. */
+ if (hwaddr == NULL) {
+ return ERR_OK;
+ }
+
+ /* Send out the packet using the returned hardware address. */
+ SMEMCPY(dest.addr, hwaddr, 6);
+ return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6);
+}
+
+#endif /* LWIP_IPV6 && LWIP_ETHERNET */
diff --git a/lwip/src/core/ipv6/icmp6.c b/lwip/src/core/ipv6/icmp6.c
new file mode 100644
index 0000000..167738a
--- /dev/null
+++ b/lwip/src/core/ipv6/icmp6.c
@@ -0,0 +1,425 @@
+/**
+ * @file
+ *
+ * IPv6 version of ICMP, as per RFC 4443.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/icmp6.h"
+#include "lwip/prot/icmp6.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/pbuf.h"
+#include "lwip/netif.h"
+#include "lwip/nd6.h"
+#include "lwip/mld6.h"
+#include "lwip/ip.h"
+#include "lwip/stats.h"
+
+#include <string.h>
+
+#if LWIP_ICMP6_DATASIZE == 0
+#undef LWIP_ICMP6_DATASIZE
+#define LWIP_ICMP6_DATASIZE 8
+#endif
+
+/* Forward declarations */
+static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type);
+static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data,
+ u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr);
+static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data,
+ u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif);
+
+
+/**
+ * Process an input ICMPv6 message. Called by ip6_input.
+ *
+ * Will generate a reply for echo requests. Other messages are forwarded
+ * to nd6_input, or mld6_input.
+ *
+ * @param p the mld packet, p->payload pointing to the icmpv6 header
+ * @param inp the netif on which this packet was received
+ */
+void
+icmp6_input(struct pbuf *p, struct netif *inp)
+{
+ struct icmp6_hdr *icmp6hdr;
+ struct pbuf *r;
+ const ip6_addr_t *reply_src;
+
+ ICMP6_STATS_INC(icmp6.recv);
+
+ /* Check that ICMPv6 header fits in payload */
+ if (p->len < sizeof(struct icmp6_hdr)) {
+ /* drop short packets */
+ pbuf_free(p);
+ ICMP6_STATS_INC(icmp6.lenerr);
+ ICMP6_STATS_INC(icmp6.drop);
+ return;
+ }
+
+ icmp6hdr = (struct icmp6_hdr *)p->payload;
+
+#if CHECKSUM_CHECK_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) {
+ if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(),
+ ip6_current_dest_addr()) != 0) {
+ /* Checksum failed */
+ pbuf_free(p);
+ ICMP6_STATS_INC(icmp6.chkerr);
+ ICMP6_STATS_INC(icmp6.drop);
+ return;
+ }
+ }
+#endif /* CHECKSUM_CHECK_ICMP6 */
+
+ switch (icmp6hdr->type) {
+ case ICMP6_TYPE_NA: /* Neighbor advertisement */
+ case ICMP6_TYPE_NS: /* Neighbor solicitation */
+ case ICMP6_TYPE_RA: /* Router advertisement */
+ case ICMP6_TYPE_RD: /* Redirect */
+ case ICMP6_TYPE_PTB: /* Packet too big */
+ nd6_input(p, inp);
+ return;
+ case ICMP6_TYPE_RS:
+#if LWIP_IPV6_FORWARD
+ /* @todo implement router functionality */
+#endif
+ break;
+#if LWIP_IPV6_MLD
+ case ICMP6_TYPE_MLQ:
+ case ICMP6_TYPE_MLR:
+ case ICMP6_TYPE_MLD:
+ mld6_input(p, inp);
+ return;
+#endif
+ case ICMP6_TYPE_EREQ:
+#if !LWIP_MULTICAST_PING
+ /* multicast destination address? */
+ if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ /* drop */
+ pbuf_free(p);
+ ICMP6_STATS_INC(icmp6.drop);
+ return;
+ }
+#endif /* LWIP_MULTICAST_PING */
+
+ /* Allocate reply. */
+ r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM);
+ if (r == NULL) {
+ /* drop */
+ pbuf_free(p);
+ ICMP6_STATS_INC(icmp6.memerr);
+ return;
+ }
+
+ /* Copy echo request. */
+ if (pbuf_copy(r, p) != ERR_OK) {
+ /* drop */
+ pbuf_free(p);
+ pbuf_free(r);
+ ICMP6_STATS_INC(icmp6.err);
+ return;
+ }
+
+ /* Determine reply source IPv6 address. */
+#if LWIP_MULTICAST_PING
+ if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr()));
+ if (reply_src == NULL) {
+ /* drop */
+ pbuf_free(p);
+ pbuf_free(r);
+ ICMP6_STATS_INC(icmp6.rterr);
+ return;
+ }
+ }
+ else
+#endif /* LWIP_MULTICAST_PING */
+ {
+ reply_src = ip6_current_dest_addr();
+ }
+
+ /* Set fields in reply. */
+ ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP;
+ ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0;
+#if CHECKSUM_GEN_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) {
+ ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r,
+ IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr());
+ }
+#endif /* CHECKSUM_GEN_ICMP6 */
+
+ /* Send reply. */
+ ICMP6_STATS_INC(icmp6.xmit);
+ ip6_output_if(r, reply_src, ip6_current_src_addr(),
+ LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp);
+ pbuf_free(r);
+
+ break;
+ default:
+ ICMP6_STATS_INC(icmp6.proterr);
+ ICMP6_STATS_INC(icmp6.drop);
+ break;
+ }
+
+ pbuf_free(p);
+}
+
+
+/**
+ * Send an icmpv6 'destination unreachable' packet.
+ *
+ * This function must be used only in direct response to a packet that is being
+ * received right now. Otherwise, address zones would be lost.
+ *
+ * @param p the input packet for which the 'unreachable' should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param c ICMPv6 code for the unreachable type
+ */
+void
+icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c)
+{
+ icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR);
+}
+
+/**
+ * Send an icmpv6 'packet too big' packet.
+ *
+ * This function must be used only in direct response to a packet that is being
+ * received right now. Otherwise, address zones would be lost.
+ *
+ * @param p the input packet for which the 'packet too big' should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param mtu the maximum mtu that we can accept
+ */
+void
+icmp6_packet_too_big(struct pbuf *p, u32_t mtu)
+{
+ icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB);
+}
+
+/**
+ * Send an icmpv6 'time exceeded' packet.
+ *
+ * This function must be used only in direct response to a packet that is being
+ * received right now. Otherwise, address zones would be lost.
+ *
+ * @param p the input packet for which the 'time exceeded' should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param c ICMPv6 code for the time exceeded type
+ */
+void
+icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c)
+{
+ icmp6_send_response(p, c, 0, ICMP6_TYPE_TE);
+}
+
+/**
+ * Send an icmpv6 'time exceeded' packet, with explicit source and destination
+ * addresses.
+ *
+ * This function may be used to send a response sometime after receiving the
+ * packet for which this response is meant. The provided source and destination
+ * addresses are used primarily to retain their zone information.
+ *
+ * @param p the input packet for which the 'time exceeded' should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param c ICMPv6 code for the time exceeded type
+ * @param src_addr source address of the original packet, with zone information
+ * @param dest_addr destination address of the original packet, with zone
+ * information
+ */
+void
+icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c,
+ const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr)
+{
+ icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr);
+}
+
+/**
+ * Send an icmpv6 'parameter problem' packet.
+ *
+ * This function must be used only in direct response to a packet that is being
+ * received right now. Otherwise, address zones would be lost and the calculated
+ * offset would be wrong (calculated against ip6_current_header()).
+ *
+ * @param p the input packet for which the 'param problem' should be sent,
+ * p->payload pointing to the IP header
+ * @param c ICMPv6 code for the param problem type
+ * @param pointer the pointer to the byte where the parameter is found
+ */
+void
+icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer)
+{
+ u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header());
+ icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP);
+}
+
+/**
+ * Send an ICMPv6 packet in response to an incoming packet.
+ * The packet is sent *to* ip_current_src_addr() on ip_current_netif().
+ *
+ * @param p the input packet for which the response should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param code Code of the ICMPv6 header
+ * @param data Additional 32-bit parameter in the ICMPv6 header
+ * @param type Type of the ICMPv6 header
+ */
+static void
+icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type)
+{
+ const struct ip6_addr *reply_src, *reply_dest;
+ struct netif *netif = ip_current_netif();
+
+ LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL);
+ reply_dest = ip6_current_src_addr();
+
+ /* Select an address to use as source. */
+ reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest));
+ if (reply_src == NULL) {
+ ICMP6_STATS_INC(icmp6.rterr);
+ return;
+ }
+ icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif);
+}
+
+/**
+ * Send an ICMPv6 packet in response to an incoming packet.
+ *
+ * Call this function if the packet is NOT sent as a direct response to an
+ * incoming packet, but rather sometime later (e.g. for a fragment reassembly
+ * timeout). The caller must provide the zoned source and destination addresses
+ * from the original packet with the src_addr and dest_addr parameters. The
+ * reason for this approach is that while the addresses themselves are part of
+ * the original packet, their zone information is not, thus possibly resulting
+ * in a link-local response being sent over the wrong link.
+ *
+ * @param p the input packet for which the response should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param code Code of the ICMPv6 header
+ * @param data Additional 32-bit parameter in the ICMPv6 header
+ * @param type Type of the ICMPv6 header
+ * @param src_addr original source address
+ * @param dest_addr original destination address
+ */
+static void
+icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type,
+ const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr)
+{
+ const struct ip6_addr *reply_src, *reply_dest;
+ struct netif *netif;
+
+ /* Get the destination address and netif for this ICMP message. */
+ LWIP_ASSERT("must provide both source and destination", src_addr != NULL);
+ LWIP_ASSERT("must provide both source and destination", dest_addr != NULL);
+
+ /* Special case, as ip6_current_xxx is either NULL, or points
+ to a different packet than the one that expired. */
+ IP6_ADDR_ZONECHECK(src_addr);
+ IP6_ADDR_ZONECHECK(dest_addr);
+ /* Swap source and destination for the reply. */
+ reply_dest = src_addr;
+ reply_src = dest_addr;
+ netif = ip6_route(reply_src, reply_dest);
+ if (netif == NULL) {
+ ICMP6_STATS_INC(icmp6.rterr);
+ return;
+ }
+ icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src,
+ reply_dest, netif);
+}
+
+/**
+ * Send an ICMPv6 packet (with srd/dst address and netif given).
+ *
+ * @param p the input packet for which the response should be sent,
+ * p->payload pointing to the IPv6 header
+ * @param code Code of the ICMPv6 header
+ * @param data Additional 32-bit parameter in the ICMPv6 header
+ * @param type Type of the ICMPv6 header
+ * @param reply_src source address of the packet to send
+ * @param reply_dest destination address of the packet to send
+ * @param netif netif to send the packet
+ */
+static void
+icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type,
+ const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif)
+{
+ struct pbuf *q;
+ struct icmp6_hdr *icmp6hdr;
+
+ /* ICMPv6 header + IPv6 header + data */
+ q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE,
+ PBUF_RAM);
+ if (q == NULL) {
+ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n"));
+ ICMP6_STATS_INC(icmp6.memerr);
+ return;
+ }
+ LWIP_ASSERT("check that first pbuf can hold icmp 6message",
+ (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE)));
+
+ icmp6hdr = (struct icmp6_hdr *)q->payload;
+ icmp6hdr->type = type;
+ icmp6hdr->code = code;
+ icmp6hdr->data = lwip_htonl(data);
+
+ /* copy fields from original packet */
+ SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload,
+ IP6_HLEN + LWIP_ICMP6_DATASIZE);
+
+ /* calculate checksum */
+ icmp6hdr->chksum = 0;
+#if CHECKSUM_GEN_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
+ icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len,
+ reply_src, reply_dest);
+ }
+#endif /* CHECKSUM_GEN_ICMP6 */
+
+ ICMP6_STATS_INC(icmp6.xmit);
+ ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif);
+ pbuf_free(q);
+}
+
+#endif /* LWIP_ICMP6 && LWIP_IPV6 */
diff --git a/lwip/src/core/ipv6/inet6.c b/lwip/src/core/ipv6/inet6.c
new file mode 100644
index 0000000..d9a992c
--- /dev/null
+++ b/lwip/src/core/ipv6/inet6.c
@@ -0,0 +1,53 @@
+/**
+ * @file
+ *
+ * INET v6 addresses.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/def.h"
+#include "lwip/inet.h"
+
+/** This variable is initialized by the system to contain the wildcard IPv6 address.
+ */
+const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+
+#endif /* LWIP_IPV6 */
diff --git a/lwip/src/core/ipv6/ip6.c b/lwip/src/core/ipv6/ip6.c
new file mode 100644
index 0000000..eda11dc
--- /dev/null
+++ b/lwip/src/core/ipv6/ip6.c
@@ -0,0 +1,1492 @@
+/**
+ * @file
+ *
+ * IPv6 layer.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/netif.h"
+#include "lwip/ip.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/ip6_frag.h"
+#include "lwip/icmp6.h"
+#include "lwip/priv/raw_priv.h"
+#include "lwip/udp.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/dhcp6.h"
+#include "lwip/nd6.h"
+#include "lwip/mld6.h"
+#include "lwip/debug.h"
+#include "lwip/stats.h"
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+/**
+ * Finds the appropriate network interface for a given IPv6 address. It tries to select
+ * a netif following a sequence of heuristics:
+ * 1) if there is only 1 netif, return it
+ * 2) if the destination is a zoned address, match its zone to a netif
+ * 3) if the either the source or destination address is a scoped address,
+ * match the source address's zone (if set) or address (if not) to a netif
+ * 4) tries to match the destination subnet to a configured address
+ * 5) tries to find a router-announced route
+ * 6) tries to match the (unscoped) source address to the netif
+ * 7) returns the default netif, if configured
+ *
+ * Note that each of the two given addresses may or may not be properly zoned.
+ *
+ * @param src the source IPv6 address, if known
+ * @param dest the destination IPv6 address for which to find the route
+ * @return the netif on which to send to reach dest
+ */
+struct netif *
+ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest)
+{
+#if LWIP_SINGLE_NETIF
+ LWIP_UNUSED_ARG(src);
+ LWIP_UNUSED_ARG(dest);
+#else /* LWIP_SINGLE_NETIF */
+ struct netif *netif;
+ s8_t i;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* If single netif configuration, fast return. */
+ if ((netif_list != NULL) && (netif_list->next == NULL)) {
+ if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list) ||
+ (ip6_addr_has_zone(dest) && !ip6_addr_test_zone(dest, netif_list))) {
+ return NULL;
+ }
+ return netif_list;
+ }
+
+#if LWIP_IPV6_SCOPES
+ /* Special processing for zoned destination addresses. This includes link-
+ * local unicast addresses and interface/link-local multicast addresses. Use
+ * the zone to find a matching netif. If the address is not zoned, then there
+ * is technically no "wrong" netif to choose, and we leave routing to other
+ * rules; in most cases this should be the scoped-source rule below. */
+ if (ip6_addr_has_zone(dest)) {
+ IP6_ADDR_ZONECHECK(dest);
+ /* Find a netif based on the zone. For custom mappings, one zone may map
+ * to multiple netifs, so find one that can actually send a packet. */
+ NETIF_FOREACH(netif) {
+ if (ip6_addr_test_zone(dest, netif) &&
+ netif_is_up(netif) && netif_is_link_up(netif)) {
+ return netif;
+ }
+ }
+ /* No matching netif found. Do no try to route to a different netif,
+ * as that would be a zone violation, resulting in any packets sent to
+ * that netif being dropped on output. */
+ return NULL;
+ }
+#endif /* LWIP_IPV6_SCOPES */
+
+ /* Special processing for scoped source and destination addresses. If we get
+ * here, the destination address does not have a zone, so either way we need
+ * to look at the source address, which may or may not have a zone. If it
+ * does, the zone is restrictive: there is (typically) only one matching
+ * netif for it, and we should avoid routing to any other netif as that would
+ * result in guaranteed zone violations. For scoped source addresses that do
+ * not have a zone, use (only) a netif that has that source address locally
+ * assigned. This case also applies to the loopback source address, which has
+ * an implied link-local scope. If only the destination address is scoped
+ * (but, again, not zoned), we still want to use only the source address to
+ * determine its zone because that's most likely what the user/application
+ * wants, regardless of whether the source address is scoped. Finally, some
+ * of this story also applies if scoping is disabled altogether. */
+#if LWIP_IPV6_SCOPES
+ if (ip6_addr_has_scope(dest, IP6_UNKNOWN) ||
+ ip6_addr_has_scope(src, IP6_UNICAST) ||
+#else /* LWIP_IPV6_SCOPES */
+ if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_iflocal(dest) ||
+ ip6_addr_ismulticast_linklocal(dest) || ip6_addr_islinklocal(src) ||
+#endif /* LWIP_IPV6_SCOPES */
+ ip6_addr_isloopback(src)) {
+#if LWIP_IPV6_SCOPES
+ if (ip6_addr_has_zone(src)) {
+ /* Find a netif matching the source zone (relatively cheap). */
+ NETIF_FOREACH(netif) {
+ if (netif_is_up(netif) && netif_is_link_up(netif) &&
+ ip6_addr_test_zone(src, netif)) {
+ return netif;
+ }
+ }
+ } else
+#endif /* LWIP_IPV6_SCOPES */
+ {
+ /* Find a netif matching the source address (relatively expensive). */
+ NETIF_FOREACH(netif) {
+ if (!netif_is_up(netif) || !netif_is_link_up(netif)) {
+ continue;
+ }
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) &&
+ ip6_addr_cmp_zoneless(src, netif_ip6_addr(netif, i))) {
+ return netif;
+ }
+ }
+ }
+ }
+ /* Again, do not use any other netif in this case, as that could result in
+ * zone boundary violations. */
+ return NULL;
+ }
+
+ /* We come here only if neither source nor destination is scoped. */
+ IP6_ADDR_ZONECHECK(src);
+
+#ifdef LWIP_HOOK_IP6_ROUTE
+ netif = LWIP_HOOK_IP6_ROUTE(src, dest);
+ if (netif != NULL) {
+ return netif;
+ }
+#endif
+
+ /* See if the destination subnet matches a configured address. In accordance
+ * with RFC 5942, dynamically configured addresses do not have an implied
+ * local subnet, and thus should be considered /128 assignments. However, as
+ * such, the destination address may still match a local address, and so we
+ * still need to check for exact matches here. By (lwIP) policy, statically
+ * configured addresses do always have an implied local /64 subnet. */
+ NETIF_FOREACH(netif) {
+ if (!netif_is_up(netif) || !netif_is_link_up(netif)) {
+ continue;
+ }
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) &&
+ ip6_addr_netcmp(dest, netif_ip6_addr(netif, i)) &&
+ (netif_ip6_addr_isstatic(netif, i) ||
+ ip6_addr_nethostcmp(dest, netif_ip6_addr(netif, i)))) {
+ return netif;
+ }
+ }
+ }
+
+ /* Get the netif for a suitable router-announced route. */
+ netif = nd6_find_route(dest);
+ if (netif != NULL) {
+ return netif;
+ }
+
+ /* Try with the netif that matches the source address. Given the earlier rule
+ * for scoped source addresses, this applies to unscoped addresses only. */
+ if (!ip6_addr_isany(src)) {
+ NETIF_FOREACH(netif) {
+ if (!netif_is_up(netif) || !netif_is_link_up(netif)) {
+ continue;
+ }
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) &&
+ ip6_addr_cmp(src, netif_ip6_addr(netif, i))) {
+ return netif;
+ }
+ }
+ }
+ }
+
+#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF
+ /* loopif is disabled, loopback traffic is passed through any netif */
+ if (ip6_addr_isloopback(dest)) {
+ /* don't check for link on loopback traffic */
+ if (netif_default != NULL && netif_is_up(netif_default)) {
+ return netif_default;
+ }
+ /* default netif is not up, just use any netif for loopback traffic */
+ NETIF_FOREACH(netif) {
+ if (netif_is_up(netif)) {
+ return netif;
+ }
+ }
+ return NULL;
+ }
+#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */
+#endif /* !LWIP_SINGLE_NETIF */
+
+ /* no matching netif found, use default netif, if up */
+ if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) {
+ return NULL;
+ }
+ return netif_default;
+}
+
+/**
+ * @ingroup ip6
+ * Select the best IPv6 source address for a given destination IPv6 address.
+ *
+ * This implementation follows RFC 6724 Sec. 5 to the following extent:
+ * - Rules 1, 2, 3: fully implemented
+ * - Rules 4, 5, 5.5: not applicable
+ * - Rule 6: not implemented
+ * - Rule 7: not applicable
+ * - Rule 8: limited to "prefer /64 subnet match over non-match"
+ *
+ * For Rule 2, we deliberately deviate from RFC 6724 Sec. 3.1 by considering
+ * ULAs to be of smaller scope than global addresses, to avoid that a preferred
+ * ULA is picked over a deprecated global address when given a global address
+ * as destination, as that would likely result in broken two-way communication.
+ *
+ * As long as temporary addresses are not supported (as used in Rule 7), a
+ * proper implementation of Rule 8 would obviate the need to implement Rule 6.
+ *
+ * @param netif the netif on which to send a packet
+ * @param dest the destination we are trying to reach (possibly not properly
+ * zoned)
+ * @return the most suitable source address to use, or NULL if no suitable
+ * source address is found
+ */
+const ip_addr_t *
+ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest)
+{
+ const ip_addr_t *best_addr;
+ const ip6_addr_t *cand_addr;
+ s8_t dest_scope, cand_scope;
+ s8_t best_scope = IP6_MULTICAST_SCOPE_RESERVED;
+ u8_t i, cand_pref, cand_bits;
+ u8_t best_pref = 0;
+ u8_t best_bits = 0;
+
+ /* Start by determining the scope of the given destination address. These
+ * tests are hopefully (roughly) in order of likeliness to match. */
+ if (ip6_addr_isglobal(dest)) {
+ dest_scope = IP6_MULTICAST_SCOPE_GLOBAL;
+ } else if (ip6_addr_islinklocal(dest) || ip6_addr_isloopback(dest)) {
+ dest_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL;
+ } else if (ip6_addr_isuniquelocal(dest)) {
+ dest_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL;
+ } else if (ip6_addr_ismulticast(dest)) {
+ dest_scope = ip6_addr_multicast_scope(dest);
+ } else if (ip6_addr_issitelocal(dest)) {
+ dest_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL;
+ } else {
+ /* no match, consider scope global */
+ dest_scope = IP6_MULTICAST_SCOPE_GLOBAL;
+ }
+
+ best_addr = NULL;
+
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ /* Consider only valid (= preferred and deprecated) addresses. */
+ if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) {
+ continue;
+ }
+ /* Determine the scope of this candidate address. Same ordering idea. */
+ cand_addr = netif_ip6_addr(netif, i);
+ if (ip6_addr_isglobal(cand_addr)) {
+ cand_scope = IP6_MULTICAST_SCOPE_GLOBAL;
+ } else if (ip6_addr_islinklocal(cand_addr)) {
+ cand_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL;
+ } else if (ip6_addr_isuniquelocal(cand_addr)) {
+ cand_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL;
+ } else if (ip6_addr_issitelocal(cand_addr)) {
+ cand_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL;
+ } else {
+ /* no match, treat as low-priority global scope */
+ cand_scope = IP6_MULTICAST_SCOPE_RESERVEDF;
+ }
+ cand_pref = ip6_addr_ispreferred(netif_ip6_addr_state(netif, i));
+ /* @todo compute the actual common bits, for longest matching prefix. */
+ /* We cannot count on the destination address having a proper zone
+ * assignment, so do not compare zones in this case. */
+ cand_bits = ip6_addr_netcmp_zoneless(cand_addr, dest); /* just 1 or 0 for now */
+ if (cand_bits && ip6_addr_nethostcmp(cand_addr, dest)) {
+ return netif_ip_addr6(netif, i); /* Rule 1 */
+ }
+ if ((best_addr == NULL) || /* no alternative yet */
+ ((cand_scope < best_scope) && (cand_scope >= dest_scope)) ||
+ ((cand_scope > best_scope) && (best_scope < dest_scope)) || /* Rule 2 */
+ ((cand_scope == best_scope) && ((cand_pref > best_pref) || /* Rule 3 */
+ ((cand_pref == best_pref) && (cand_bits > best_bits))))) { /* Rule 8 */
+ /* We found a new "winning" candidate. */
+ best_addr = netif_ip_addr6(netif, i);
+ best_scope = cand_scope;
+ best_pref = cand_pref;
+ best_bits = cand_bits;
+ }
+ }
+
+ return best_addr; /* may be NULL */
+}
+
+#if LWIP_IPV6_FORWARD
+/**
+ * Forwards an IPv6 packet. It finds an appropriate route for the
+ * packet, decrements the HL value of the packet, and outputs
+ * the packet on the appropriate interface.
+ *
+ * @param p the packet to forward (p->payload points to IP header)
+ * @param iphdr the IPv6 header of the input packet
+ * @param inp the netif on which this packet was received
+ */
+static void
+ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp)
+{
+ struct netif *netif;
+
+ /* do not forward link-local or loopback addresses */
+ if (ip6_addr_islinklocal(ip6_current_dest_addr()) ||
+ ip6_addr_isloopback(ip6_current_dest_addr())) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n"));
+ IP6_STATS_INC(ip6.rterr);
+ IP6_STATS_INC(ip6.drop);
+ return;
+ }
+
+ /* Find network interface where to forward this IP packet to. */
+ netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr());
+ if (netif == NULL) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n",
+ IP6_ADDR_BLOCK1(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK2(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK3(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK4(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK5(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK6(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK7(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK8(ip6_current_dest_addr())));
+#if LWIP_ICMP6
+ /* Don't send ICMP messages in response to ICMP messages */
+ if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) {
+ icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE);
+ }
+#endif /* LWIP_ICMP6 */
+ IP6_STATS_INC(ip6.rterr);
+ IP6_STATS_INC(ip6.drop);
+ return;
+ }
+#if LWIP_IPV6_SCOPES
+ /* Do not forward packets with a zoned (e.g., link-local) source address
+ * outside of their zone. We determined the zone a bit earlier, so we know
+ * that the address is properly zoned here, so we can safely use has_zone.
+ * Also skip packets with a loopback source address (link-local implied). */
+ if ((ip6_addr_has_zone(ip6_current_src_addr()) &&
+ !ip6_addr_test_zone(ip6_current_src_addr(), netif)) ||
+ ip6_addr_isloopback(ip6_current_src_addr())) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding packet beyond its source address zone.\n"));
+ IP6_STATS_INC(ip6.rterr);
+ IP6_STATS_INC(ip6.drop);
+ return;
+ }
+#endif /* LWIP_IPV6_SCOPES */
+ /* Do not forward packets onto the same network interface on which
+ * they arrived. */
+ if (netif == inp) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n"));
+ IP6_STATS_INC(ip6.rterr);
+ IP6_STATS_INC(ip6.drop);
+ return;
+ }
+
+ /* decrement HL */
+ IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1);
+ /* send ICMP6 if HL == 0 */
+ if (IP6H_HOPLIM(iphdr) == 0) {
+#if LWIP_ICMP6
+ /* Don't send ICMP messages in response to ICMP messages */
+ if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) {
+ icmp6_time_exceeded(p, ICMP6_TE_HL);
+ }
+#endif /* LWIP_ICMP6 */
+ IP6_STATS_INC(ip6.drop);
+ return;
+ }
+
+ if (netif->mtu && (p->tot_len > netif->mtu)) {
+#if LWIP_ICMP6
+ /* Don't send ICMP messages in response to ICMP messages */
+ if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) {
+ icmp6_packet_too_big(p, netif->mtu);
+ }
+#endif /* LWIP_ICMP6 */
+ IP6_STATS_INC(ip6.drop);
+ return;
+ }
+
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n",
+ IP6_ADDR_BLOCK1(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK2(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK3(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK4(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK5(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK6(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK7(ip6_current_dest_addr()),
+ IP6_ADDR_BLOCK8(ip6_current_dest_addr())));
+
+ /* transmit pbuf on chosen interface */
+ netif->output_ip6(netif, p, ip6_current_dest_addr());
+ IP6_STATS_INC(ip6.fw);
+ IP6_STATS_INC(ip6.xmit);
+ return;
+}
+#endif /* LWIP_IPV6_FORWARD */
+
+/** Return true if the current input packet should be accepted on this netif */
+static int
+ip6_input_accept(struct netif *netif)
+{
+ /* interface is up? */
+ if (netif_is_up(netif)) {
+ u8_t i;
+ /* unicast to this interface address? address configured? */
+ /* If custom scopes are used, the destination zone will be tested as
+ * part of the local-address comparison, but we need to test the source
+ * scope as well (e.g., is this interface on the same link?). */
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) &&
+ ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i))
+#if IPV6_CUSTOM_SCOPES
+ && (!ip6_addr_has_zone(ip6_current_src_addr()) ||
+ ip6_addr_test_zone(ip6_current_src_addr(), netif))
+#endif /* IPV6_CUSTOM_SCOPES */
+ ) {
+ /* accept on this netif */
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * This function is called by the network interface device driver when
+ * an IPv6 packet is received. The function does the basic checks of the
+ * IP header such as packet size being at least larger than the header
+ * size etc. If the packet was not destined for us, the packet is
+ * forwarded (using ip6_forward).
+ *
+ * Finally, the packet is sent to the upper layer protocol input function.
+ *
+ * @param p the received IPv6 packet (p->payload points to IPv6 header)
+ * @param inp the netif on which this packet was received
+ * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't
+ * processed, but currently always returns ERR_OK)
+ */
+err_t
+ip6_input(struct pbuf *p, struct netif *inp)
+{
+ struct ip6_hdr *ip6hdr;
+ struct netif *netif;
+ const u8_t *nexth;
+ u16_t hlen, hlen_tot; /* the current header length */
+#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/
+ @todo
+ int check_ip_src=1;
+#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */
+#if LWIP_RAW
+ raw_input_state_t raw_status;
+#endif /* LWIP_RAW */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ IP6_STATS_INC(ip6.recv);
+
+ /* identify the IP header */
+ ip6hdr = (struct ip6_hdr *)p->payload;
+ if (IP6H_V(ip6hdr) != 6) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n",
+ IP6H_V(ip6hdr)));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.err);
+ IP6_STATS_INC(ip6.drop);
+ return ERR_OK;
+ }
+
+#ifdef LWIP_HOOK_IP6_INPUT
+ if (LWIP_HOOK_IP6_INPUT(p, inp)) {
+ /* the packet has been eaten */
+ return ERR_OK;
+ }
+#endif
+
+ /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */
+ if ((IP6_HLEN > p->len) || (IP6H_PLEN(ip6hdr) > (p->tot_len - IP6_HLEN))) {
+ if (IP6_HLEN > p->len) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n",
+ (u16_t)IP6_HLEN, p->len));
+ }
+ if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n",
+ (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len));
+ }
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.lenerr);
+ IP6_STATS_INC(ip6.drop);
+ return ERR_OK;
+ }
+
+ /* Trim pbuf. This should have been done at the netif layer,
+ * but we'll do it anyway just to be sure that its done. */
+ pbuf_realloc(p, (u16_t)(IP6_HLEN + IP6H_PLEN(ip6hdr)));
+
+ /* copy IP addresses to aligned ip6_addr_t */
+ ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_dest, ip6hdr->dest);
+ ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_src, ip6hdr->src);
+
+ /* Don't accept virtual IPv4 mapped IPv6 addresses.
+ * Don't accept multicast source addresses. */
+ if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) ||
+ ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) ||
+ ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) {
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.err);
+ IP6_STATS_INC(ip6.drop);
+ return ERR_OK;
+ }
+
+ /* Set the appropriate zone identifier on the addresses. */
+ ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_dest), IP6_UNKNOWN, inp);
+ ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_src), IP6_UNICAST, inp);
+
+ /* current header pointer. */
+ ip_data.current_ip6_header = ip6hdr;
+
+ /* In netif, used in case we need to send ICMPv6 packets back. */
+ ip_data.current_netif = inp;
+ ip_data.current_input_netif = inp;
+
+ /* match packet against an interface, i.e. is this packet for us? */
+ if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ /* Always joined to multicast if-local and link-local all-nodes group. */
+ if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) ||
+ ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) {
+ netif = inp;
+ }
+#if LWIP_IPV6_MLD
+ else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) {
+ netif = inp;
+ }
+#else /* LWIP_IPV6_MLD */
+ else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) {
+ u8_t i;
+ /* Filter solicited node packets when MLD is not enabled
+ * (for Neighbor discovery). */
+ netif = NULL;
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) &&
+ ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) {
+ netif = inp;
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n",
+ netif->name[0], netif->name[1]));
+ break;
+ }
+ }
+ }
+#endif /* LWIP_IPV6_MLD */
+ else {
+ netif = NULL;
+ }
+ } else {
+ /* start trying with inp. if that's not acceptable, start walking the
+ list of configured netifs. */
+ if (ip6_input_accept(inp)) {
+ netif = inp;
+ } else {
+ netif = NULL;
+#if !IPV6_CUSTOM_SCOPES
+ /* Shortcut: stop looking for other interfaces if either the source or
+ * the destination has a scope constrained to this interface. Custom
+ * scopes may break the 1:1 link/interface mapping, however. */
+ if (ip6_addr_islinklocal(ip6_current_dest_addr()) ||
+ ip6_addr_islinklocal(ip6_current_src_addr())) {
+ goto netif_found;
+ }
+#endif /* !IPV6_CUSTOM_SCOPES */
+#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF
+ /* The loopback address is to be considered link-local. Packets to it
+ * should be dropped on other interfaces, as per RFC 4291 Sec. 2.5.3.
+ * Its implied scope means packets *from* the loopback address should
+ * not be accepted on other interfaces, either. These requirements
+ * cannot be implemented in the case that loopback traffic is sent
+ * across a non-loopback interface, however. */
+ if (ip6_addr_isloopback(ip6_current_dest_addr()) ||
+ ip6_addr_isloopback(ip6_current_src_addr())) {
+ goto netif_found;
+ }
+#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */
+#if !LWIP_SINGLE_NETIF
+ NETIF_FOREACH(netif) {
+ if (netif == inp) {
+ /* we checked that before already */
+ continue;
+ }
+ if (ip6_input_accept(netif)) {
+ break;
+ }
+ }
+#endif /* !LWIP_SINGLE_NETIF */
+ }
+netif_found:
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n",
+ netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X'));
+ }
+
+ /* "::" packet source address? (used in duplicate address detection) */
+ if (ip6_addr_isany(ip6_current_src_addr()) &&
+ (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) {
+ /* packet source is not valid */
+ /* free (drop) packet pbufs */
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+
+ /* packet not for us? */
+ if (netif == NULL) {
+ /* packet not for us, route or discard */
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n"));
+#if LWIP_IPV6_FORWARD
+ /* non-multicast packet? */
+ if (!ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ /* try to forward IP packet on (other) interfaces */
+ ip6_forward(p, ip6hdr, inp);
+ }
+#endif /* LWIP_IPV6_FORWARD */
+ pbuf_free(p);
+ goto ip6_input_cleanup;
+ }
+
+ /* current netif pointer. */
+ ip_data.current_netif = netif;
+
+ /* Save next header type. */
+ nexth = &IP6H_NEXTH(ip6hdr);
+
+ /* Init header length. */
+ hlen = hlen_tot = IP6_HLEN;
+
+ /* Move to payload. */
+ pbuf_remove_header(p, IP6_HLEN);
+
+ /* Process known option extension headers, if present. */
+ while (*nexth != IP6_NEXTH_NONE)
+ {
+ switch (*nexth) {
+ case IP6_NEXTH_HOPBYHOP:
+ {
+ s32_t opt_offset;
+ struct ip6_hbh_hdr *hbh_hdr;
+ struct ip6_opt_hdr *opt_hdr;
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n"));
+
+ /* Get and check the header length, while staying in packet bounds. */
+ hbh_hdr = (struct ip6_hbh_hdr *)p->payload;
+
+ /* Get next header type. */
+ nexth = &IP6_HBH_NEXTH(hbh_hdr);
+
+ /* Get the header length. */
+ hlen = (u16_t)(8 * (1 + hbh_hdr->_hlen));
+
+ if ((p->len < 8) || (hlen > p->len)) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n",
+ hlen, p->len));
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.lenerr);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+
+ hlen_tot = (u16_t)(hlen_tot + hlen);
+
+ /* The extended option header starts right after Hop-by-Hop header. */
+ opt_offset = IP6_HBH_HLEN;
+ while (opt_offset < hlen)
+ {
+ s32_t opt_dlen = 0;
+
+ opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + opt_offset);
+
+ switch (IP6_OPT_TYPE(opt_hdr)) {
+ /* @todo: process IPV6 Hop-by-Hop option data */
+ case IP6_PAD1_OPTION:
+ /* PAD1 option doesn't have length and value field */
+ opt_dlen = -1;
+ break;
+ case IP6_PADN_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ case IP6_ROUTER_ALERT_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ case IP6_JUMBO_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ default:
+ /* Check 2 MSB of Hop-by-Hop header type. */
+ switch (IP6_OPT_TYPE_ACTION(opt_hdr)) {
+ case 1:
+ /* Discard the packet. */
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ case 2:
+ /* Send ICMP Parameter Problem */
+ icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr);
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ case 3:
+ /* Send ICMP Parameter Problem if destination address is not a multicast address */
+ if (!ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr);
+ }
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ default:
+ /* Skip over this option. */
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ }
+ break;
+ }
+
+ /* Adjust the offset to move to the next extended option header */
+ opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen;
+ }
+ pbuf_remove_header(p, hlen);
+ break;
+ }
+ case IP6_NEXTH_DESTOPTS:
+ {
+ s32_t opt_offset;
+ struct ip6_dest_hdr *dest_hdr;
+ struct ip6_opt_hdr *opt_hdr;
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n"));
+
+ dest_hdr = (struct ip6_dest_hdr *)p->payload;
+
+ /* Get next header type. */
+ nexth = &IP6_DEST_NEXTH(dest_hdr);
+
+ /* Get the header length. */
+ hlen = 8 * (1 + dest_hdr->_hlen);
+ if ((p->len < 8) || (hlen > p->len)) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n",
+ hlen, p->len));
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.lenerr);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+
+ hlen_tot = (u16_t)(hlen_tot + hlen);
+
+ /* The extended option header starts right after Destination header. */
+ opt_offset = IP6_DEST_HLEN;
+ while (opt_offset < hlen)
+ {
+ s32_t opt_dlen = 0;
+
+ opt_hdr = (struct ip6_opt_hdr *)((u8_t *)dest_hdr + opt_offset);
+
+ switch (IP6_OPT_TYPE(opt_hdr))
+ {
+ /* @todo: process IPV6 Destination option data */
+ case IP6_PAD1_OPTION:
+ /* PAD1 option deosn't have length and value field */
+ opt_dlen = -1;
+ break;
+ case IP6_PADN_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ case IP6_ROUTER_ALERT_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ case IP6_JUMBO_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ case IP6_HOME_ADDRESS_OPTION:
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ default:
+ /* Check 2 MSB of Destination header type. */
+ switch (IP6_OPT_TYPE_ACTION(opt_hdr))
+ {
+ case 1:
+ /* Discard the packet. */
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ case 2:
+ /* Send ICMP Parameter Problem */
+ icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr);
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ case 3:
+ /* Send ICMP Parameter Problem if destination address is not a multicast address */
+ if (!ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr);
+ }
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ default:
+ /* Skip over this option. */
+ opt_dlen = IP6_OPT_DLEN(opt_hdr);
+ break;
+ }
+ break;
+ }
+
+ /* Adjust the offset to move to the next extended option header */
+ opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen;
+ }
+
+ pbuf_remove_header(p, hlen);
+ break;
+ }
+ case IP6_NEXTH_ROUTING:
+ {
+ struct ip6_rout_hdr *rout_hdr;
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n"));
+
+ rout_hdr = (struct ip6_rout_hdr *)p->payload;
+
+ /* Get next header type. */
+ nexth = &IP6_ROUT_NEXTH(rout_hdr);
+
+ /* Get the header length. */
+ hlen = 8 * (1 + rout_hdr->_hlen);
+
+ if ((p->len < 8) || (hlen > p->len)) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n",
+ hlen, p->len));
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.lenerr);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+
+ /* Skip over this header. */
+ hlen_tot = (u16_t)(hlen_tot + hlen);
+
+ /* if segment left value is 0 in routing header, ignore the option */
+ if (IP6_ROUT_SEG_LEFT(rout_hdr)) {
+ /* The length field of routing option header must be even */
+ if (rout_hdr->_hlen & 0x1) {
+ /* Discard and send parameter field error */
+ icmp6_param_problem(p, ICMP6_PP_FIELD, &rout_hdr->_hlen);
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+
+ switch (IP6_ROUT_TYPE(rout_hdr))
+ {
+ /* TODO: process routing by the type */
+ case IP6_ROUT_TYPE2:
+ break;
+ case IP6_ROUT_RPL:
+ break;
+ default:
+ /* Discard unrecognized routing type and send parameter field error */
+ icmp6_param_problem(p, ICMP6_PP_FIELD, &IP6_ROUT_TYPE(rout_hdr));
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+ }
+
+ pbuf_remove_header(p, hlen);
+ break;
+ }
+ case IP6_NEXTH_FRAGMENT:
+ {
+ struct ip6_frag_hdr *frag_hdr;
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n"));
+
+ frag_hdr = (struct ip6_frag_hdr *)p->payload;
+
+ /* Get next header type. */
+ nexth = &IP6_FRAG_NEXTH(frag_hdr);
+
+ /* Fragment Header length. */
+ hlen = 8;
+
+ /* Make sure this header fits in current pbuf. */
+ if (hlen > p->len) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n",
+ hlen, p->len));
+ /* free (drop) packet pbufs */
+ pbuf_free(p);
+ IP6_FRAG_STATS_INC(ip6_frag.lenerr);
+ IP6_FRAG_STATS_INC(ip6_frag.drop);
+ goto ip6_input_cleanup;
+ }
+
+ hlen_tot = (u16_t)(hlen_tot + hlen);
+
+ /* check payload length is multiple of 8 octets when mbit is set */
+ if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(ip6hdr) & 0x7)) {
+ /* ipv6 payload length is not multiple of 8 octets */
+ icmp6_param_problem(p, ICMP6_PP_FIELD, LWIP_PACKED_CAST(const void *, &ip6hdr->_plen));
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid payload length dropped\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+
+ /* Offset == 0 and more_fragments == 0? */
+ if ((frag_hdr->_fragment_offset &
+ PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) {
+ /* This is a 1-fragment packet. Skip this header and continue. */
+ pbuf_remove_header(p, hlen);
+ } else {
+#if LWIP_IPV6_REASS
+ /* reassemble the packet */
+ ip_data.current_ip_header_tot_len = hlen_tot;
+ p = ip6_reass(p);
+ /* packet not fully reassembled yet? */
+ if (p == NULL) {
+ goto ip6_input_cleanup;
+ }
+
+ /* Returned p point to IPv6 header.
+ * Update all our variables and pointers and continue. */
+ ip6hdr = (struct ip6_hdr *)p->payload;
+ nexth = &IP6H_NEXTH(ip6hdr);
+ hlen = hlen_tot = IP6_HLEN;
+ pbuf_remove_header(p, IP6_HLEN);
+
+#else /* LWIP_IPV6_REASS */
+ /* free (drop) packet pbufs */
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.opterr);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+#endif /* LWIP_IPV6_REASS */
+ }
+ break;
+ }
+ default:
+ goto options_done;
+ }
+
+ if (*nexth == IP6_NEXTH_HOPBYHOP) {
+ /* Hop-by-Hop header comes only as a first option */
+ icmp6_param_problem(p, ICMP6_PP_HEADER, nexth);
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header dropped (only valid as a first option)\n"));
+ pbuf_free(p);
+ IP6_STATS_INC(ip6.drop);
+ goto ip6_input_cleanup;
+ }
+ }
+
+options_done:
+
+ /* send to upper layers */
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n"));
+ ip6_debug_print(p);
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len));
+
+ ip_data.current_ip_header_tot_len = hlen_tot;
+
+#if LWIP_RAW
+ /* p points to IPv6 header again for raw_input. */
+ pbuf_add_header_force(p, hlen_tot);
+ /* raw input did not eat the packet? */
+ raw_status = raw_input(p, inp);
+ if (raw_status != RAW_INPUT_EATEN)
+ {
+ /* Point to payload. */
+ pbuf_remove_header(p, hlen_tot);
+#else /* LWIP_RAW */
+ {
+#endif /* LWIP_RAW */
+ switch (*nexth) {
+ case IP6_NEXTH_NONE:
+ pbuf_free(p);
+ break;
+#if LWIP_UDP
+ case IP6_NEXTH_UDP:
+#if LWIP_UDPLITE
+ case IP6_NEXTH_UDPLITE:
+#endif /* LWIP_UDPLITE */
+ udp_input(p, inp);
+ break;
+#endif /* LWIP_UDP */
+#if LWIP_TCP
+ case IP6_NEXTH_TCP:
+ tcp_input(p, inp);
+ break;
+#endif /* LWIP_TCP */
+#if LWIP_ICMP6
+ case IP6_NEXTH_ICMP6:
+ icmp6_input(p, inp);
+ break;
+#endif /* LWIP_ICMP */
+ default:
+#if LWIP_RAW
+ if (raw_status == RAW_INPUT_DELIVERED) {
+ /* @todo: ipv6 mib in-delivers? */
+ } else
+#endif /* LWIP_RAW */
+ {
+#if LWIP_ICMP6
+ /* p points to IPv6 header again for raw_input. */
+ pbuf_add_header_force(p, hlen_tot);
+ /* send ICMP parameter problem unless it was a multicast or ICMPv6 */
+ if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) &&
+ (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) {
+ icmp6_param_problem(p, ICMP6_PP_HEADER, nexth);
+ }
+#endif /* LWIP_ICMP */
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr)));
+ IP6_STATS_INC(ip6.proterr);
+ IP6_STATS_INC(ip6.drop);
+ }
+ pbuf_free(p);
+ break;
+ }
+ }
+
+ip6_input_cleanup:
+ ip_data.current_netif = NULL;
+ ip_data.current_input_netif = NULL;
+ ip_data.current_ip6_header = NULL;
+ ip_data.current_ip_header_tot_len = 0;
+ ip6_addr_set_zero(ip6_current_src_addr());
+ ip6_addr_set_zero(ip6_current_dest_addr());
+
+ return ERR_OK;
+}
+
+
+/**
+ * Sends an IPv6 packet on a network interface. This function constructs
+ * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is
+ * used as source (usually during network startup). If the source IPv6 address it
+ * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network
+ * interface is filled in as source address. If the destination IPv6 address is
+ * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and
+ * p->payload points to it instead of the data.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IPv6 header and p->payload points to that IPv6 header)
+ * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an
+ * IP address of the netif is selected and used as source address.
+ * if src == NULL, IP6_ADDR_ANY is used as source) (src is possibly not
+ * properly zoned)
+ * @param dest the destination IPv6 address to send the packet to (possibly not
+ * properly zoned)
+ * @param hl the Hop Limit value to be set in the IPv6 header
+ * @param tc the Traffic Class value to be set in the IPv6 header
+ * @param nexth the Next Header to be set in the IPv6 header
+ * @param netif the netif on which to send this packet
+ * @return ERR_OK if the packet was sent OK
+ * ERR_BUF if p doesn't have enough space for IPv6/LINK headers
+ * returns errors returned by netif->output_ip6
+ */
+err_t
+ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest,
+ u8_t hl, u8_t tc,
+ u8_t nexth, struct netif *netif)
+{
+ const ip6_addr_t *src_used = src;
+ if (dest != LWIP_IP_HDRINCL) {
+ if (src != NULL && ip6_addr_isany(src)) {
+ src_used = ip_2_ip6(ip6_select_source_address(netif, dest));
+ if ((src_used == NULL) || ip6_addr_isany(src_used)) {
+ /* No appropriate source address was found for this packet. */
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n"));
+ IP6_STATS_INC(ip6.rterr);
+ return ERR_RTE;
+ }
+ }
+ }
+ return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif);
+}
+
+/**
+ * Same as ip6_output_if() but 'src' address is not replaced by netif address
+ * when it is 'any'.
+ */
+err_t
+ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest,
+ u8_t hl, u8_t tc,
+ u8_t nexth, struct netif *netif)
+{
+ struct ip6_hdr *ip6hdr;
+ ip6_addr_t dest_addr;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
+
+ /* Should the IPv6 header be generated or is it already included in p? */
+ if (dest != LWIP_IP_HDRINCL) {
+#if LWIP_IPV6_SCOPES
+ /* If the destination address is scoped but lacks a zone, add a zone now,
+ * based on the outgoing interface. The lower layers (e.g., nd6) absolutely
+ * require addresses to be properly zoned for correctness. In some cases,
+ * earlier attempts will have been made to add a zone to the destination,
+ * but this function is the only one that is called in all (other) cases,
+ * so we must do this here. */
+ if (ip6_addr_lacks_zone(dest, IP6_UNKNOWN)) {
+ ip6_addr_copy(dest_addr, *dest);
+ ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif);
+ dest = &dest_addr;
+ }
+#endif /* LWIP_IPV6_SCOPES */
+
+ /* generate IPv6 header */
+ if (pbuf_add_header(p, IP6_HLEN)) {
+ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n"));
+ IP6_STATS_INC(ip6.err);
+ return ERR_BUF;
+ }
+
+ ip6hdr = (struct ip6_hdr *)p->payload;
+ LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr",
+ (p->len >= sizeof(struct ip6_hdr)));
+
+ IP6H_HOPLIM_SET(ip6hdr, hl);
+ IP6H_NEXTH_SET(ip6hdr, nexth);
+
+ /* dest cannot be NULL here */
+ ip6_addr_copy_to_packed(ip6hdr->dest, *dest);
+
+ IP6H_VTCFL_SET(ip6hdr, 6, tc, 0);
+ IP6H_PLEN_SET(ip6hdr, (u16_t)(p->tot_len - IP6_HLEN));
+
+ if (src == NULL) {
+ src = IP6_ADDR_ANY6;
+ }
+ /* src cannot be NULL here */
+ ip6_addr_copy_to_packed(ip6hdr->src, *src);
+
+ } else {
+ /* IP header already included in p */
+ ip6hdr = (struct ip6_hdr *)p->payload;
+ ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest);
+ ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif);
+ dest = &dest_addr;
+ }
+
+ IP6_STATS_INC(ip6.xmit);
+
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num));
+ ip6_debug_print(p);
+
+#if ENABLE_LOOPBACK
+ {
+ int i;
+#if !LWIP_HAVE_LOOPIF
+ if (ip6_addr_isloopback(dest)) {
+ return netif_loop_output(netif, p);
+ }
+#endif /* !LWIP_HAVE_LOOPIF */
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) &&
+ ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) {
+ /* Packet to self, enqueue it for loopback */
+ LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n"));
+ return netif_loop_output(netif, p);
+ }
+ }
+ }
+#if LWIP_MULTICAST_TX_OPTIONS
+ if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) {
+ netif_loop_output(netif, p);
+ }
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+#endif /* ENABLE_LOOPBACK */
+#if LWIP_IPV6_FRAG
+ /* don't fragment if interface has mtu set to 0 [loopif] */
+ if (netif_mtu6(netif) && (p->tot_len > nd6_get_destination_mtu(dest, netif))) {
+ return ip6_frag(p, netif, dest);
+ }
+#endif /* LWIP_IPV6_FRAG */
+
+ LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n"));
+ return netif->output_ip6(netif, p, dest);
+}
+
+/**
+ * Simple interface to ip6_output_if. It finds the outgoing network
+ * interface and calls upon ip6_output_if to do the actual work.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IPv6 header and p->payload points to that IPv6 header)
+ * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an
+ * IP address of the netif is selected and used as source address.
+ * if src == NULL, IP6_ADDR_ANY is used as source)
+ * @param dest the destination IPv6 address to send the packet to
+ * @param hl the Hop Limit value to be set in the IPv6 header
+ * @param tc the Traffic Class value to be set in the IPv6 header
+ * @param nexth the Next Header to be set in the IPv6 header
+ *
+ * @return ERR_RTE if no route is found
+ * see ip_output_if() for more return values
+ */
+err_t
+ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest,
+ u8_t hl, u8_t tc, u8_t nexth)
+{
+ struct netif *netif;
+ struct ip6_hdr *ip6hdr;
+ ip6_addr_t src_addr, dest_addr;
+
+ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
+
+ if (dest != LWIP_IP_HDRINCL) {
+ netif = ip6_route(src, dest);
+ } else {
+ /* IP header included in p, read addresses. */
+ ip6hdr = (struct ip6_hdr *)p->payload;
+ ip6_addr_copy_from_packed(src_addr, ip6hdr->src);
+ ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest);
+ netif = ip6_route(&src_addr, &dest_addr);
+ }
+
+ if (netif == NULL) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n",
+ IP6_ADDR_BLOCK1(dest),
+ IP6_ADDR_BLOCK2(dest),
+ IP6_ADDR_BLOCK3(dest),
+ IP6_ADDR_BLOCK4(dest),
+ IP6_ADDR_BLOCK5(dest),
+ IP6_ADDR_BLOCK6(dest),
+ IP6_ADDR_BLOCK7(dest),
+ IP6_ADDR_BLOCK8(dest)));
+ IP6_STATS_INC(ip6.rterr);
+ return ERR_RTE;
+ }
+
+ return ip6_output_if(p, src, dest, hl, tc, nexth, netif);
+}
+
+
+#if LWIP_NETIF_USE_HINTS
+/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint
+ * before calling ip6_output_if.
+ *
+ * @param p the packet to send (p->payload points to the data, e.g. next
+ protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
+ IPv6 header and p->payload points to that IPv6 header)
+ * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an
+ * IP address of the netif is selected and used as source address.
+ * if src == NULL, IP6_ADDR_ANY is used as source)
+ * @param dest the destination IPv6 address to send the packet to
+ * @param hl the Hop Limit value to be set in the IPv6 header
+ * @param tc the Traffic Class value to be set in the IPv6 header
+ * @param nexth the Next Header to be set in the IPv6 header
+ * @param netif_hint netif output hint pointer set to netif->hint before
+ * calling ip_output_if()
+ *
+ * @return ERR_RTE if no route is found
+ * see ip_output_if() for more return values
+ */
+err_t
+ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest,
+ u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint)
+{
+ struct netif *netif;
+ struct ip6_hdr *ip6hdr;
+ ip6_addr_t src_addr, dest_addr;
+ err_t err;
+
+ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p);
+
+ if (dest != LWIP_IP_HDRINCL) {
+ netif = ip6_route(src, dest);
+ } else {
+ /* IP header included in p, read addresses. */
+ ip6hdr = (struct ip6_hdr *)p->payload;
+ ip6_addr_copy_from_packed(src_addr, ip6hdr->src);
+ ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest);
+ netif = ip6_route(&src_addr, &dest_addr);
+ }
+
+ if (netif == NULL) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n",
+ IP6_ADDR_BLOCK1(dest),
+ IP6_ADDR_BLOCK2(dest),
+ IP6_ADDR_BLOCK3(dest),
+ IP6_ADDR_BLOCK4(dest),
+ IP6_ADDR_BLOCK5(dest),
+ IP6_ADDR_BLOCK6(dest),
+ IP6_ADDR_BLOCK7(dest),
+ IP6_ADDR_BLOCK8(dest)));
+ IP6_STATS_INC(ip6.rterr);
+ return ERR_RTE;
+ }
+
+ NETIF_SET_HINTS(netif, netif_hint);
+ err = ip6_output_if(p, src, dest, hl, tc, nexth, netif);
+ NETIF_RESET_HINTS(netif);
+
+ return err;
+}
+#endif /* LWIP_NETIF_USE_HINTS*/
+
+#if LWIP_IPV6_MLD
+/**
+ * Add a hop-by-hop options header with a router alert option and padding.
+ *
+ * Used by MLD when sending a Multicast listener report/done message.
+ *
+ * @param p the packet to which we will prepend the options header
+ * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6)
+ * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD)
+ * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise
+ */
+err_t
+ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value)
+{
+ u8_t *opt_data;
+ u32_t offset = 0;
+ struct ip6_hbh_hdr *hbh_hdr;
+ struct ip6_opt_hdr *opt_hdr;
+
+ /* fixed 4 bytes for router alert option and 2 bytes padding */
+ const u8_t hlen = (sizeof(struct ip6_opt_hdr) * 2) + IP6_ROUTER_ALERT_DLEN;
+ /* Move pointer to make room for hop-by-hop options header. */
+ if (pbuf_add_header(p, sizeof(struct ip6_hbh_hdr) + hlen)) {
+ LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n"));
+ IP6_STATS_INC(ip6.err);
+ return ERR_BUF;
+ }
+
+ /* Set fields of Hop-by-Hop header */
+ hbh_hdr = (struct ip6_hbh_hdr *)p->payload;
+ IP6_HBH_NEXTH(hbh_hdr) = nexth;
+ hbh_hdr->_hlen = 0;
+ offset = IP6_HBH_HLEN;
+
+ /* Set router alert options to Hop-by-Hop extended option header */
+ opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset);
+ IP6_OPT_TYPE(opt_hdr) = IP6_ROUTER_ALERT_OPTION;
+ IP6_OPT_DLEN(opt_hdr) = IP6_ROUTER_ALERT_DLEN;
+ offset += IP6_OPT_HLEN;
+
+ /* Set router alert option data */
+ opt_data = (u8_t *)hbh_hdr + offset;
+ opt_data[0] = value;
+ opt_data[1] = 0;
+ offset += IP6_OPT_DLEN(opt_hdr);
+
+ /* add 2 bytes padding to make 8 bytes Hop-by-Hop header length */
+ opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset);
+ IP6_OPT_TYPE(opt_hdr) = IP6_PADN_OPTION;
+ IP6_OPT_DLEN(opt_hdr) = 0;
+
+ return ERR_OK;
+}
+#endif /* LWIP_IPV6_MLD */
+
+#if IP6_DEBUG
+/* Print an IPv6 header by using LWIP_DEBUGF
+ * @param p an IPv6 packet, p->payload pointing to the IPv6 header
+ */
+void
+ip6_debug_print(struct pbuf *p)
+{
+ struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload;
+
+ LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n"));
+ LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n",
+ IP6H_V(ip6hdr),
+ IP6H_TC(ip6hdr),
+ IP6H_FL(ip6hdr)));
+ LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n",
+ IP6H_PLEN(ip6hdr),
+ IP6H_NEXTH(ip6hdr),
+ IP6H_HOPLIM(ip6hdr)));
+ LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n",
+ IP6_ADDR_BLOCK1(&(ip6hdr->src)),
+ IP6_ADDR_BLOCK2(&(ip6hdr->src)),
+ IP6_ADDR_BLOCK3(&(ip6hdr->src)),
+ IP6_ADDR_BLOCK4(&(ip6hdr->src))));
+ LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n",
+ IP6_ADDR_BLOCK5(&(ip6hdr->src)),
+ IP6_ADDR_BLOCK6(&(ip6hdr->src)),
+ IP6_ADDR_BLOCK7(&(ip6hdr->src)),
+ IP6_ADDR_BLOCK8(&(ip6hdr->src))));
+ LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n",
+ IP6_ADDR_BLOCK1(&(ip6hdr->dest)),
+ IP6_ADDR_BLOCK2(&(ip6hdr->dest)),
+ IP6_ADDR_BLOCK3(&(ip6hdr->dest)),
+ IP6_ADDR_BLOCK4(&(ip6hdr->dest))));
+ LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n",
+ IP6_ADDR_BLOCK5(&(ip6hdr->dest)),
+ IP6_ADDR_BLOCK6(&(ip6hdr->dest)),
+ IP6_ADDR_BLOCK7(&(ip6hdr->dest)),
+ IP6_ADDR_BLOCK8(&(ip6hdr->dest))));
+ LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n"));
+}
+#endif /* IP6_DEBUG */
+
+#endif /* LWIP_IPV6 */
diff --git a/lwip/src/core/ipv6/ip6_addr.c b/lwip/src/core/ipv6/ip6_addr.c
new file mode 100644
index 0000000..687c02f
--- /dev/null
+++ b/lwip/src/core/ipv6/ip6_addr.c
@@ -0,0 +1,343 @@
+/**
+ * @file
+ *
+ * IPv6 addresses.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ * Functions for handling IPv6 addresses.
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/ip_addr.h"
+#include "lwip/def.h"
+
+#include <string.h>
+
+#if LWIP_IPV4
+#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */
+#endif /* LWIP_IPV4 */
+
+/* used by IP6_ADDR_ANY(6) in ip6_addr.h */
+const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul);
+
+#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10))
+
+/**
+ * Check whether "cp" is a valid ascii representation
+ * of an IPv6 address and convert to a binary address.
+ * Returns 1 if the address is valid, 0 if not.
+ *
+ * @param cp IPv6 address in ascii representation (e.g. "FF01::1")
+ * @param addr pointer to which to save the ip address in network order
+ * @return 1 if cp could be converted to addr, 0 on failure
+ */
+int
+ip6addr_aton(const char *cp, ip6_addr_t *addr)
+{
+ u32_t addr_index, zero_blocks, current_block_index, current_block_value;
+ const char *s;
+#if LWIP_IPV4
+ int check_ipv4_mapped = 0;
+#endif /* LWIP_IPV4 */
+
+ /* Count the number of colons, to count the number of blocks in a "::" sequence
+ zero_blocks may be 1 even if there are no :: sequences */
+ zero_blocks = 8;
+ for (s = cp; *s != 0; s++) {
+ if (*s == ':') {
+ zero_blocks--;
+#if LWIP_IPV4
+ } else if (*s == '.') {
+ if ((zero_blocks == 5) ||(zero_blocks == 2)) {
+ check_ipv4_mapped = 1;
+ /* last block could be the start of an IPv4 address */
+ zero_blocks--;
+ } else {
+ /* invalid format */
+ return 0;
+ }
+ break;
+#endif /* LWIP_IPV4 */
+ } else if (!lwip_isxdigit(*s)) {
+ break;
+ }
+ }
+
+ /* parse each block */
+ addr_index = 0;
+ current_block_index = 0;
+ current_block_value = 0;
+ for (s = cp; *s != 0; s++) {
+ if (*s == ':') {
+ if (addr) {
+ if (current_block_index & 0x1) {
+ addr->addr[addr_index++] |= current_block_value;
+ }
+ else {
+ addr->addr[addr_index] = current_block_value << 16;
+ }
+ }
+ current_block_index++;
+#if LWIP_IPV4
+ if (check_ipv4_mapped) {
+ if (current_block_index == 6) {
+ ip4_addr_t ip4;
+ int ret = ip4addr_aton(s + 1, &ip4);
+ if (ret) {
+ if (addr) {
+ addr->addr[3] = lwip_htonl(ip4.addr);
+ current_block_index++;
+ goto fix_byte_order_and_return;
+ }
+ return 1;
+ }
+ }
+ }
+#endif /* LWIP_IPV4 */
+ current_block_value = 0;
+ if (current_block_index > 7) {
+ /* address too long! */
+ return 0;
+ }
+ if (s[1] == ':') {
+ if (s[2] == ':') {
+ /* invalid format: three successive colons */
+ return 0;
+ }
+ s++;
+ /* "::" found, set zeros */
+ while (zero_blocks > 0) {
+ zero_blocks--;
+ if (current_block_index & 0x1) {
+ addr_index++;
+ } else {
+ if (addr) {
+ addr->addr[addr_index] = 0;
+ }
+ }
+ current_block_index++;
+ if (current_block_index > 7) {
+ /* address too long! */
+ return 0;
+ }
+ }
+ }
+ } else if (lwip_isxdigit(*s)) {
+ /* add current digit */
+ current_block_value = (current_block_value << 4) +
+ (lwip_isdigit(*s) ? (u32_t)(*s - '0') :
+ (u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A')));
+ } else {
+ /* unexpected digit, space? CRLF? */
+ break;
+ }
+ }
+
+ if (addr) {
+ if (current_block_index & 0x1) {
+ addr->addr[addr_index++] |= current_block_value;
+ }
+ else {
+ addr->addr[addr_index] = current_block_value << 16;
+ }
+#if LWIP_IPV4
+fix_byte_order_and_return:
+#endif
+ /* convert to network byte order. */
+ for (addr_index = 0; addr_index < 4; addr_index++) {
+ addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]);
+ }
+
+ ip6_addr_clear_zone(addr);
+ }
+
+ if (current_block_index != 7) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Convert numeric IPv6 address into ASCII representation.
+ * returns ptr to static buffer; not reentrant!
+ *
+ * @param addr ip6 address in network order to convert
+ * @return pointer to a global static (!) buffer that holds the ASCII
+ * representation of addr
+ */
+char *
+ip6addr_ntoa(const ip6_addr_t *addr)
+{
+ static char str[40];
+ return ip6addr_ntoa_r(addr, str, 40);
+}
+
+/**
+ * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used.
+ *
+ * @param addr ip6 address in network order to convert
+ * @param buf target buffer where the string is stored
+ * @param buflen length of buf
+ * @return either pointer to buf which now holds the ASCII
+ * representation of addr or NULL if buf was too small
+ */
+char *
+ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen)
+{
+ u32_t current_block_index, current_block_value, next_block_value;
+ s32_t i;
+ u8_t zero_flag, empty_block_flag;
+
+#if LWIP_IPV4
+ if (ip6_addr_isipv4mappedipv6(addr)) {
+ /* This is an IPv4 mapped address */
+ ip4_addr_t addr4;
+ char *ret;
+#define IP4MAPPED_HEADER "::FFFF:"
+ char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1;
+ int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1;
+ if (buflen < (int)sizeof(IP4MAPPED_HEADER)) {
+ return NULL;
+ }
+ memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER));
+ addr4.addr = addr->addr[3];
+ ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4);
+ if (ret != buf_ip4) {
+ return NULL;
+ }
+ return buf;
+ }
+#endif /* LWIP_IPV4 */
+ i = 0;
+ empty_block_flag = 0; /* used to indicate a zero chain for "::' */
+
+ for (current_block_index = 0; current_block_index < 8; current_block_index++) {
+ /* get the current 16-bit block */
+ current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]);
+ if ((current_block_index & 0x1) == 0) {
+ current_block_value = current_block_value >> 16;
+ }
+ current_block_value &= 0xffff;
+
+ /* Check for empty block. */
+ if (current_block_value == 0) {
+ if (current_block_index == 7 && empty_block_flag == 1) {
+ /* special case, we must render a ':' for the last block. */
+ buf[i++] = ':';
+ if (i >= buflen) {
+ return NULL;
+ }
+ break;
+ }
+ if (empty_block_flag == 0) {
+ /* generate empty block "::", but only if more than one contiguous zero block,
+ * according to current formatting suggestions RFC 5952. */
+ next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]);
+ if ((current_block_index & 0x1) == 0x01) {
+ next_block_value = next_block_value >> 16;
+ }
+ next_block_value &= 0xffff;
+ if (next_block_value == 0) {
+ empty_block_flag = 1;
+ buf[i++] = ':';
+ if (i >= buflen) {
+ return NULL;
+ }
+ continue; /* move on to next block. */
+ }
+ } else if (empty_block_flag == 1) {
+ /* move on to next block. */
+ continue;
+ }
+ } else if (empty_block_flag == 1) {
+ /* Set this flag value so we don't produce multiple empty blocks. */
+ empty_block_flag = 2;
+ }
+
+ if (current_block_index > 0) {
+ buf[i++] = ':';
+ if (i >= buflen) {
+ return NULL;
+ }
+ }
+
+ if ((current_block_value & 0xf000) == 0) {
+ zero_flag = 1;
+ } else {
+ buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12));
+ zero_flag = 0;
+ if (i >= buflen) {
+ return NULL;
+ }
+ }
+
+ if (((current_block_value & 0xf00) == 0) && (zero_flag)) {
+ /* do nothing */
+ } else {
+ buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8));
+ zero_flag = 0;
+ if (i >= buflen) {
+ return NULL;
+ }
+ }
+
+ if (((current_block_value & 0xf0) == 0) && (zero_flag)) {
+ /* do nothing */
+ }
+ else {
+ buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4));
+ zero_flag = 0;
+ if (i >= buflen) {
+ return NULL;
+ }
+ }
+
+ buf[i++] = lwip_xchar((current_block_value & 0xf));
+ if (i >= buflen) {
+ return NULL;
+ }
+ }
+
+ buf[i] = 0;
+
+ return buf;
+}
+
+#endif /* LWIP_IPV6 */
diff --git a/lwip/src/core/ipv6/ip6_frag.c b/lwip/src/core/ipv6/ip6_frag.c
new file mode 100644
index 0000000..d6c5d22
--- /dev/null
+++ b/lwip/src/core/ipv6/ip6_frag.c
@@ -0,0 +1,862 @@
+/**
+ * @file
+ *
+ * IPv6 fragmentation and reassembly.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+#include "lwip/ip6_frag.h"
+#include "lwip/ip6.h"
+#include "lwip/icmp6.h"
+#include "lwip/nd6.h"
+#include "lwip/ip.h"
+
+#include "lwip/pbuf.h"
+#include "lwip/memp.h"
+#include "lwip/stats.h"
+
+#include <string.h>
+
+#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */
+
+
+/** Setting this to 0, you can turn off checking the fragments for overlapping
+ * regions. The code gets a little smaller. Only use this if you know that
+ * overlapping won't occur on your network! */
+#ifndef IP_REASS_CHECK_OVERLAP
+#define IP_REASS_CHECK_OVERLAP 1
+#endif /* IP_REASS_CHECK_OVERLAP */
+
+/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
+ * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
+ * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
+ * is set to 1, so one datagram can be reassembled at a time, only. */
+#ifndef IP_REASS_FREE_OLDEST
+#define IP_REASS_FREE_OLDEST 1
+#endif /* IP_REASS_FREE_OLDEST */
+
+#if IPV6_FRAG_COPYHEADER
+/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header
+ * that precedes the fragment header for reassembly pruposes. */
+#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN))
+#endif
+
+#define IP_REASS_FLAG_LASTFRAG 0x01
+
+/** This is a helper struct which holds the starting
+ * offset and the ending offset of this fragment to
+ * easily chain the fragments.
+ * It has the same packing requirements as the IPv6 header, since it replaces
+ * the Fragment Header in memory in incoming fragments to keep
+ * track of the various fragments.
+ */
+#ifdef PACK_STRUCT_USE_INCLUDES
+# include "arch/bpstruct.h"
+#endif
+PACK_STRUCT_BEGIN
+struct ip6_reass_helper {
+ PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
+ PACK_STRUCT_FIELD(u16_t start);
+ PACK_STRUCT_FIELD(u16_t end);
+} PACK_STRUCT_STRUCT;
+PACK_STRUCT_END
+#ifdef PACK_STRUCT_USE_INCLUDES
+# include "arch/epstruct.h"
+#endif
+
+/* static variables */
+static struct ip6_reassdata *reassdatagrams;
+static u16_t ip6_reass_pbufcount;
+
+/* Forward declarations. */
+static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr);
+#if IP_REASS_FREE_OLDEST
+static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed);
+#endif /* IP_REASS_FREE_OLDEST */
+
+void
+ip6_reass_tmr(void)
+{
+ struct ip6_reassdata *r, *tmp;
+
+#if !IPV6_FRAG_COPYHEADER
+ LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
+ sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
+#endif /* !IPV6_FRAG_COPYHEADER */
+
+ r = reassdatagrams;
+ while (r != NULL) {
+ /* Decrement the timer. Once it reaches 0,
+ * clean up the incomplete fragment assembly */
+ if (r->timer > 0) {
+ r->timer--;
+ r = r->next;
+ } else {
+ /* reassembly timed out */
+ tmp = r;
+ /* get the next pointer before freeing */
+ r = r->next;
+ /* free the helper struct and all enqueued pbufs */
+ ip6_reass_free_complete_datagram(tmp);
+ }
+ }
+}
+
+/**
+ * Free a datagram (struct ip6_reassdata) and all its pbufs.
+ * Updates the total count of enqueued pbufs (ip6_reass_pbufcount),
+ * sends an ICMP time exceeded packet.
+ *
+ * @param ipr datagram to free
+ */
+static void
+ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr)
+{
+ struct ip6_reassdata *prev;
+ u16_t pbufs_freed = 0;
+ u16_t clen;
+ struct pbuf *p;
+ struct ip6_reass_helper *iprh;
+
+#if LWIP_ICMP6
+ iprh = (struct ip6_reass_helper *)ipr->p->payload;
+ if (iprh->start == 0) {
+ /* The first fragment was received, send ICMP time exceeded. */
+ /* First, de-queue the first pbuf from r->p. */
+ p = ipr->p;
+ ipr->p = iprh->next_pbuf;
+ /* Restore the part that we've overwritten with our helper structure, or we
+ * might send garbage (and disclose a pointer) in the ICMPv6 reply. */
+ MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh));
+ /* Then, move back to the original ipv6 header (we are now pointing to Fragment header).
+ This cannot fail since we already checked when receiving this fragment. */
+ if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) {
+ LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0);
+ }
+ else {
+ /* Reconstruct the zoned source and destination addresses, so that we do
+ * not end up sending the ICMP response over the wrong link. */
+ ip6_addr_t src_addr, dest_addr;
+ ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr));
+ ip6_addr_set_zone(&src_addr, ipr->src_zone);
+ ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr));
+ ip6_addr_set_zone(&dest_addr, ipr->dest_zone);
+ /* Send the actual ICMP response. */
+ icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr);
+ }
+ clen = pbuf_clen(p);
+ LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
+ pbufs_freed = (u16_t)(pbufs_freed + clen);
+ pbuf_free(p);
+ }
+#endif /* LWIP_ICMP6 */
+
+ /* First, free all received pbufs. The individual pbufs need to be released
+ separately as they have not yet been chained */
+ p = ipr->p;
+ while (p != NULL) {
+ struct pbuf *pcur;
+ iprh = (struct ip6_reass_helper *)p->payload;
+ pcur = p;
+ /* get the next pointer before freeing */
+ p = iprh->next_pbuf;
+ clen = pbuf_clen(pcur);
+ LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
+ pbufs_freed = (u16_t)(pbufs_freed + clen);
+ pbuf_free(pcur);
+ }
+
+ /* Then, unchain the struct ip6_reassdata from the list and free it. */
+ if (ipr == reassdatagrams) {
+ reassdatagrams = ipr->next;
+ } else {
+ prev = reassdatagrams;
+ while (prev != NULL) {
+ if (prev->next == ipr) {
+ break;
+ }
+ prev = prev->next;
+ }
+ if (prev != NULL) {
+ prev->next = ipr->next;
+ }
+ }
+ memp_free(MEMP_IP6_REASSDATA, ipr);
+
+ /* Finally, update number of pbufs in reassembly queue */
+ LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed);
+ ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed);
+}
+
+#if IP_REASS_FREE_OLDEST
+/**
+ * Free the oldest datagram to make room for enqueueing new fragments.
+ * The datagram ipr is not freed!
+ *
+ * @param ipr ip6_reassdata for the current fragment
+ * @param pbufs_needed number of pbufs needed to enqueue
+ * (used for freeing other datagrams if not enough space)
+ */
+static void
+ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed)
+{
+ struct ip6_reassdata *r, *oldest;
+
+ /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
+ * but don't free the current datagram! */
+ do {
+ r = oldest = reassdatagrams;
+ while (r != NULL) {
+ if (r != ipr) {
+ if (r->timer <= oldest->timer) {
+ /* older than the previous oldest */
+ oldest = r;
+ }
+ }
+ r = r->next;
+ }
+ if (oldest == ipr) {
+ /* nothing to free, ipr is the only element on the list */
+ return;
+ }
+ if (oldest != NULL) {
+ ip6_reass_free_complete_datagram(oldest);
+ }
+ } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL));
+}
+#endif /* IP_REASS_FREE_OLDEST */
+
+/**
+ * Reassembles incoming IPv6 fragments into an IPv6 datagram.
+ *
+ * @param p points to the IPv6 Fragment Header
+ * @return NULL if reassembly is incomplete, pbuf pointing to
+ * IPv6 Header if reassembly is complete
+ */
+struct pbuf *
+ip6_reass(struct pbuf *p)
+{
+ struct ip6_reassdata *ipr, *ipr_prev;
+ struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
+ struct ip6_frag_hdr *frag_hdr;
+ u16_t offset, len, start, end;
+ ptrdiff_t hdrdiff;
+ u16_t clen;
+ u8_t valid = 1;
+ struct pbuf *q, *next_pbuf;
+
+ IP6_FRAG_STATS_INC(ip6_frag.recv);
+
+ /* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */
+ LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf",
+ p->len >= sizeof(struct ip6_frag_hdr));
+
+ frag_hdr = (struct ip6_frag_hdr *) p->payload;
+
+ clen = pbuf_clen(p);
+
+ offset = lwip_ntohs(frag_hdr->_fragment_offset);
+
+ /* Calculate fragment length from IPv6 payload length.
+ * Adjust for headers before Fragment Header.
+ * And finally adjust by Fragment Header length. */
+ len = lwip_ntohs(ip6_current_header()->_plen);
+ hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header();
+ LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF);
+ LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN);
+ hdrdiff -= IP6_HLEN;
+ hdrdiff += IP6_FRAG_HLEN;
+ if (hdrdiff > len) {
+ IP6_FRAG_STATS_INC(ip6_frag.proterr);
+ goto nullreturn;
+ }
+ len = (u16_t)(len - hdrdiff);
+ start = (offset & IP6_FRAG_OFFSET_MASK);
+ if (start > (0xFFFF - len)) {
+ /* u16_t overflow, cannot handle this */
+ IP6_FRAG_STATS_INC(ip6_frag.proterr);
+ goto nullreturn;
+ }
+
+ /* Look for the datagram the fragment belongs to in the current datagram queue,
+ * remembering the previous in the queue for later dequeueing. */
+ for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) {
+ /* Check if the incoming fragment matches the one currently present
+ in the reassembly buffer. If so, we proceed with copying the
+ fragment into the buffer. */
+ if ((frag_hdr->_identification == ipr->identification) &&
+ ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) &&
+ ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) {
+ IP6_FRAG_STATS_INC(ip6_frag.cachehit);
+ break;
+ }
+ ipr_prev = ipr;
+ }
+
+ if (ipr == NULL) {
+ /* Enqueue a new datagram into the datagram queue */
+ ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
+ if (ipr == NULL) {
+#if IP_REASS_FREE_OLDEST
+ /* Make room and try again. */
+ ip6_reass_remove_oldest_datagram(ipr, clen);
+ ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
+ if (ipr != NULL) {
+ /* re-search ipr_prev since it might have been removed */
+ for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
+ if (ipr_prev->next == ipr) {
+ break;
+ }
+ }
+ } else
+#endif /* IP_REASS_FREE_OLDEST */
+ {
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ goto nullreturn;
+ }
+ }
+
+ memset(ipr, 0, sizeof(struct ip6_reassdata));
+ ipr->timer = IPV6_REASS_MAXAGE;
+
+ /* enqueue the new structure to the front of the list */
+ ipr->next = reassdatagrams;
+ reassdatagrams = ipr;
+
+ /* Use the current IPv6 header for src/dest address reference.
+ * Eventually, we will replace it when we get the first fragment
+ * (it might be this one, in any case, it is done later). */
+ /* need to use the none-const pointer here: */
+ ipr->iphdr = ip_data.current_ip6_header;
+#if IPV6_FRAG_COPYHEADER
+ MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src));
+ MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest));
+#endif /* IPV6_FRAG_COPYHEADER */
+#if LWIP_IPV6_SCOPES
+ /* Also store the address zone information.
+ * @todo It is possible that due to netif destruction and recreation, the
+ * stored zones end up resolving to a different interface. In that case, we
+ * risk sending a "time exceeded" ICMP response over the wrong link.
+ * Ideally, netif destruction would clean up matching pending reassembly
+ * structures, but custom zone mappings would make that non-trivial. */
+ ipr->src_zone = ip6_addr_zone(ip6_current_src_addr());
+ ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr());
+#endif /* LWIP_IPV6_SCOPES */
+ /* copy the fragmented packet id. */
+ ipr->identification = frag_hdr->_identification;
+
+ /* copy the nexth field */
+ ipr->nexth = frag_hdr->_nexth;
+ }
+
+ /* Check if we are allowed to enqueue more datagrams. */
+ if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
+#if IP_REASS_FREE_OLDEST
+ ip6_reass_remove_oldest_datagram(ipr, clen);
+ if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) {
+ /* re-search ipr_prev since it might have been removed */
+ for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
+ if (ipr_prev->next == ipr) {
+ break;
+ }
+ }
+ } else
+#endif /* IP_REASS_FREE_OLDEST */
+ {
+ /* @todo: send ICMPv6 time exceeded here? */
+ /* drop this pbuf */
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ goto nullreturn;
+ }
+ }
+
+ /* Overwrite Fragment Header with our own helper struct. */
+#if IPV6_FRAG_COPYHEADER
+ if (IPV6_FRAG_REQROOM > 0) {
+ /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4).
+ This cannot fail since we already checked when receiving this fragment. */
+ u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM);
+ LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */
+ LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
+ }
+#else /* IPV6_FRAG_COPYHEADER */
+ LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
+ sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
+#endif /* IPV6_FRAG_COPYHEADER */
+
+ /* Prepare the pointer to the helper structure, and its initial values.
+ * Do not yet write to the structure itself, as we still have to make a
+ * backup of the original data, and we should not do that until we know for
+ * sure that we are going to add this packet to the list. */
+ iprh = (struct ip6_reass_helper *)p->payload;
+ next_pbuf = NULL;
+ end = (u16_t)(start + len);
+
+ /* find the right place to insert this pbuf */
+ /* Iterate through until we either get to the end of the list (append),
+ * or we find on with a larger offset (insert). */
+ for (q = ipr->p; q != NULL;) {
+ iprh_tmp = (struct ip6_reass_helper*)q->payload;
+ if (start < iprh_tmp->start) {
+#if IP_REASS_CHECK_OVERLAP
+ if (end > iprh_tmp->start) {
+ /* fragment overlaps with following, throw away */
+ IP6_FRAG_STATS_INC(ip6_frag.proterr);
+ goto nullreturn;
+ }
+ if (iprh_prev != NULL) {
+ if (start < iprh_prev->end) {
+ /* fragment overlaps with previous, throw away */
+ IP6_FRAG_STATS_INC(ip6_frag.proterr);
+ goto nullreturn;
+ }
+ }
+#endif /* IP_REASS_CHECK_OVERLAP */
+ /* the new pbuf should be inserted before this */
+ next_pbuf = q;
+ if (iprh_prev != NULL) {
+ /* not the fragment with the lowest offset */
+ iprh_prev->next_pbuf = p;
+ } else {
+ /* fragment with the lowest offset */
+ ipr->p = p;
+ }
+ break;
+ } else if (start == iprh_tmp->start) {
+ /* received the same datagram twice: no need to keep the datagram */
+ goto nullreturn;
+#if IP_REASS_CHECK_OVERLAP
+ } else if (start < iprh_tmp->end) {
+ /* overlap: no need to keep the new datagram */
+ IP6_FRAG_STATS_INC(ip6_frag.proterr);
+ goto nullreturn;
+#endif /* IP_REASS_CHECK_OVERLAP */
+ } else {
+ /* Check if the fragments received so far have no gaps. */
+ if (iprh_prev != NULL) {
+ if (iprh_prev->end != iprh_tmp->start) {
+ /* There is a fragment missing between the current
+ * and the previous fragment */
+ valid = 0;
+ }
+ }
+ }
+ q = iprh_tmp->next_pbuf;
+ iprh_prev = iprh_tmp;
+ }
+
+ /* If q is NULL, then we made it to the end of the list. Determine what to do now */
+ if (q == NULL) {
+ if (iprh_prev != NULL) {
+ /* this is (for now), the fragment with the highest offset:
+ * chain it to the last fragment */
+#if IP_REASS_CHECK_OVERLAP
+ LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start);
+#endif /* IP_REASS_CHECK_OVERLAP */
+ iprh_prev->next_pbuf = p;
+ if (iprh_prev->end != start) {
+ valid = 0;
+ }
+ } else {
+#if IP_REASS_CHECK_OVERLAP
+ LWIP_ASSERT("no previous fragment, this must be the first fragment!",
+ ipr->p == NULL);
+#endif /* IP_REASS_CHECK_OVERLAP */
+ /* this is the first fragment we ever received for this ip datagram */
+ ipr->p = p;
+ }
+ }
+
+ /* Track the current number of pbufs current 'in-flight', in order to limit
+ the number of fragments that may be enqueued at any one time */
+ ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen);
+
+ /* Remember IPv6 header if this is the first fragment. */
+ if (start == 0) {
+ /* need to use the none-const pointer here: */
+ ipr->iphdr = ip_data.current_ip6_header;
+ /* Make a backup of the part of the packet data that we are about to
+ * overwrite, so that we can restore the original later. */
+ MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh));
+ /* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they
+ * will be the same as they were. With LWIP_IPV6_SCOPES, the same applies
+ * to the source/destination zones. */
+ }
+ /* Only after the backup do we get to fill in the actual helper structure. */
+ iprh->next_pbuf = next_pbuf;
+ iprh->start = start;
+ iprh->end = end;
+
+ /* If this is the last fragment, calculate total packet length. */
+ if ((offset & IP6_FRAG_MORE_FLAG) == 0) {
+ ipr->datagram_len = iprh->end;
+ }
+
+ /* Additional validity tests: we have received first and last fragment. */
+ iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload;
+ if (iprh_tmp->start != 0) {
+ valid = 0;
+ }
+ if (ipr->datagram_len == 0) {
+ valid = 0;
+ }
+
+ /* Final validity test: no gaps between current and last fragment. */
+ iprh_prev = iprh;
+ q = iprh->next_pbuf;
+ while ((q != NULL) && valid) {
+ iprh = (struct ip6_reass_helper*)q->payload;
+ if (iprh_prev->end != iprh->start) {
+ valid = 0;
+ break;
+ }
+ iprh_prev = iprh;
+ q = iprh->next_pbuf;
+ }
+
+ if (valid) {
+ /* All fragments have been received */
+ struct ip6_hdr* iphdr_ptr;
+
+ /* chain together the pbufs contained within the ip6_reassdata list. */
+ iprh = (struct ip6_reass_helper*) ipr->p->payload;
+ while (iprh != NULL) {
+ next_pbuf = iprh->next_pbuf;
+ if (next_pbuf != NULL) {
+ /* Save next helper struct (will be hidden in next step). */
+ iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload;
+
+ /* hide the fragment header for every succeeding fragment */
+ pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN);
+#if IPV6_FRAG_COPYHEADER
+ if (IPV6_FRAG_REQROOM > 0) {
+ /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */
+ u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM);
+ LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */
+ LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
+ }
+#endif
+ pbuf_cat(ipr->p, next_pbuf);
+ }
+ else {
+ iprh_tmp = NULL;
+ }
+
+ iprh = iprh_tmp;
+ }
+
+ /* Get the first pbuf. */
+ p = ipr->p;
+
+#if IPV6_FRAG_COPYHEADER
+ if (IPV6_FRAG_REQROOM > 0) {
+ u8_t hdrerr;
+ /* Restore (only) the bytes that we overwrote beyond the fragment header.
+ * Those bytes may belong to either the IPv6 header or an extension
+ * header placed before the fragment header. */
+ MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM);
+ /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */
+ hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM);
+ LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */
+ LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
+ }
+#endif
+
+ /* We need to get rid of the fragment header itself, which is somewhere in
+ * the middle of the packet (but still in the first pbuf of the chain).
+ * Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary
+ * in order to be able to reassemble packets that are close to full size
+ * (i.e., around 65535 bytes). We simply move up all the headers before the
+ * fragment header, including the IPv6 header, and adjust the payload start
+ * accordingly. This works because all these headers are in the first pbuf
+ * of the chain, and because the caller adjusts all its pointers on
+ * successful reassembly. */
+ MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr,
+ (size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr));
+
+ /* This is where the IPv6 header is now. */
+ iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr +
+ sizeof(struct ip6_frag_hdr));
+
+ /* Adjust datagram length by adding header lengths. */
+ ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr)
+ - IP6_HLEN);
+
+ /* Set payload length in ip header. */
+ iphdr_ptr->_plen = lwip_htons(ipr->datagram_len);
+
+ /* With the fragment header gone, we now need to adjust the next-header
+ * field of whatever header was originally before it. Since the packet made
+ * it through the original header processing routines at least up to the
+ * fragment header, we do not need any further sanity checks here. */
+ if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) {
+ iphdr_ptr->_nexth = ipr->nexth;
+ } else {
+ u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN;
+ while (*ptr != IP6_NEXTH_FRAGMENT) {
+ ptr += 8 * (1 + ptr[1]);
+ }
+ *ptr = ipr->nexth;
+ }
+
+ /* release the resources allocated for the fragment queue entry */
+ if (reassdatagrams == ipr) {
+ /* it was the first in the list */
+ reassdatagrams = ipr->next;
+ } else {
+ /* it wasn't the first, so it must have a valid 'prev' */
+ LWIP_ASSERT("sanity check linked list", ipr_prev != NULL);
+ ipr_prev->next = ipr->next;
+ }
+ memp_free(MEMP_IP6_REASSDATA, ipr);
+
+ /* adjust the number of pbufs currently queued for reassembly. */
+ clen = pbuf_clen(p);
+ LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen);
+ ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen);
+
+ /* Move pbuf back to IPv6 header. This should never fail. */
+ if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) {
+ LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0);
+ pbuf_free(p);
+ return NULL;
+ }
+
+ /* Return the pbuf chain */
+ return p;
+ }
+ /* the datagram is not (yet?) reassembled completely */
+ return NULL;
+
+nullreturn:
+ IP6_FRAG_STATS_INC(ip6_frag.drop);
+ pbuf_free(p);
+ return NULL;
+}
+
+#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */
+
+#if LWIP_IPV6 && LWIP_IPV6_FRAG
+
+#if !LWIP_NETIF_TX_SINGLE_PBUF
+/** Allocate a new struct pbuf_custom_ref */
+static struct pbuf_custom_ref*
+ip6_frag_alloc_pbuf_custom_ref(void)
+{
+ return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF);
+}
+
+/** Free a struct pbuf_custom_ref */
+static void
+ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p)
+{
+ LWIP_ASSERT("p != NULL", p != NULL);
+ memp_free(MEMP_FRAG_PBUF, p);
+}
+
+/** Free-callback function to free a 'struct pbuf_custom_ref', called by
+ * pbuf_free. */
+static void
+ip6_frag_free_pbuf_custom(struct pbuf *p)
+{
+ struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p;
+ LWIP_ASSERT("pcr != NULL", pcr != NULL);
+ LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p);
+ if (pcr->original != NULL) {
+ pbuf_free(pcr->original);
+ }
+ ip6_frag_free_pbuf_custom_ref(pcr);
+}
+#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
+
+/**
+ * Fragment an IPv6 datagram if too large for the netif or path MTU.
+ *
+ * Chop the datagram in MTU sized chunks and send them in order
+ * by pointing PBUF_REFs into p
+ *
+ * @param p ipv6 packet to send
+ * @param netif the netif on which to send
+ * @param dest destination ipv6 address to which to send
+ *
+ * @return ERR_OK if sent successfully, err_t otherwise
+ */
+err_t
+ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest)
+{
+ struct ip6_hdr *original_ip6hdr;
+ struct ip6_hdr *ip6hdr;
+ struct ip6_frag_hdr *frag_hdr;
+ struct pbuf *rambuf;
+#if !LWIP_NETIF_TX_SINGLE_PBUF
+ struct pbuf *newpbuf;
+ u16_t newpbuflen = 0;
+ u16_t left_to_copy;
+#endif
+ static u32_t identification;
+ u16_t left, cop;
+ const u16_t mtu = nd6_get_destination_mtu(dest, netif);
+ const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK);
+ u16_t fragment_offset = 0;
+ u16_t last;
+ u16_t poff = IP6_HLEN;
+
+ identification++;
+
+ original_ip6hdr = (struct ip6_hdr *)p->payload;
+
+ /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */
+ LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN);
+ left = (u16_t)(p->tot_len - IP6_HLEN);
+
+ while (left) {
+ last = (left <= nfb);
+
+ /* Fill this fragment */
+ cop = last ? left : nfb;
+
+#if LWIP_NETIF_TX_SINGLE_PBUF
+ rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM);
+ if (rambuf == NULL) {
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ return ERR_MEM;
+ }
+ LWIP_ASSERT("this needs a pbuf in one piece!",
+ (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
+ poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff);
+ /* make room for the IP header */
+ if (pbuf_add_header(rambuf, IP6_HLEN)) {
+ pbuf_free(rambuf);
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ return ERR_MEM;
+ }
+ /* fill in the IP header */
+ SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN);
+ ip6hdr = (struct ip6_hdr *)rambuf->payload;
+ frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN);
+#else
+ /* When not using a static buffer, create a chain of pbufs.
+ * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header.
+ * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
+ * but limited to the size of an mtu.
+ */
+ rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM);
+ if (rambuf == NULL) {
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ return ERR_MEM;
+ }
+ LWIP_ASSERT("this needs a pbuf in one piece!",
+ (p->len >= (IP6_HLEN)));
+ SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN);
+ ip6hdr = (struct ip6_hdr *)rambuf->payload;
+ frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN);
+
+ /* Can just adjust p directly for needed offset. */
+ p->payload = (u8_t *)p->payload + poff;
+ p->len = (u16_t)(p->len - poff);
+ p->tot_len = (u16_t)(p->tot_len - poff);
+
+ left_to_copy = cop;
+ while (left_to_copy) {
+ struct pbuf_custom_ref *pcr;
+ newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
+ /* Is this pbuf already empty? */
+ if (!newpbuflen) {
+ p = p->next;
+ continue;
+ }
+ pcr = ip6_frag_alloc_pbuf_custom_ref();
+ if (pcr == NULL) {
+ pbuf_free(rambuf);
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ return ERR_MEM;
+ }
+ /* Mirror this pbuf, although we might not need all of it. */
+ newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
+ if (newpbuf == NULL) {
+ ip6_frag_free_pbuf_custom_ref(pcr);
+ pbuf_free(rambuf);
+ IP6_FRAG_STATS_INC(ip6_frag.memerr);
+ return ERR_MEM;
+ }
+ pbuf_ref(p);
+ pcr->original = p;
+ pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom;
+
+ /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
+ * so that it is removed when pbuf_dechain is later called on rambuf.
+ */
+ pbuf_cat(rambuf, newpbuf);
+ left_to_copy = (u16_t)(left_to_copy - newpbuflen);
+ if (left_to_copy) {
+ p = p->next;
+ }
+ }
+ poff = newpbuflen;
+#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
+
+ /* Set headers */
+ frag_hdr->_nexth = original_ip6hdr->_nexth;
+ frag_hdr->reserved = 0;
+ frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG)));
+ frag_hdr->_identification = lwip_htonl(identification);
+
+ IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT);
+ IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN));
+
+ /* No need for separate header pbuf - we allowed room for it in rambuf
+ * when allocated.
+ */
+ IP6_FRAG_STATS_INC(ip6_frag.xmit);
+ netif->output_ip6(netif, rambuf, dest);
+
+ /* Unfortunately we can't reuse rambuf - the hardware may still be
+ * using the buffer. Instead we free it (and the ensuing chain) and
+ * recreate it next time round the loop. If we're lucky the hardware
+ * will have already sent the packet, the free will really free, and
+ * there will be zero memory penalty.
+ */
+
+ pbuf_free(rambuf);
+ left = (u16_t)(left - cop);
+ fragment_offset = (u16_t)(fragment_offset + cop);
+ }
+ return ERR_OK;
+}
+
+#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */
diff --git a/lwip/src/core/ipv6/mld6.c b/lwip/src/core/ipv6/mld6.c
new file mode 100644
index 0000000..6387d46
--- /dev/null
+++ b/lwip/src/core/ipv6/mld6.c
@@ -0,0 +1,626 @@
+/**
+ * @file
+ * Multicast listener discovery
+ *
+ * @defgroup mld6 MLD6
+ * @ingroup ip6
+ * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710.
+ * No support for MLDv2.\n
+ * Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your
+ * netif since it must always be received for correct IPv6 operation (e.g. SLAAC).
+ * Ensure the netif filters are configured accordingly!\n
+ * The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a
+ * netif ("netif->flags |= NETIF_FLAG_MLD6;").\n
+ * To be called from TCPIP thread.
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+/* Based on igmp.c implementation of igmp v2 protocol */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/mld6.h"
+#include "lwip/prot/mld6.h"
+#include "lwip/icmp6.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/ip.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/pbuf.h"
+#include "lwip/netif.h"
+#include "lwip/memp.h"
+#include "lwip/stats.h"
+
+#include <string.h>
+
+
+/*
+ * MLD constants
+ */
+#define MLD6_HL 1
+#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500)
+
+#define MLD6_GROUP_NON_MEMBER 0
+#define MLD6_GROUP_DELAYING_MEMBER 1
+#define MLD6_GROUP_IDLE_MEMBER 2
+
+/* Forward declarations. */
+static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr);
+static err_t mld6_remove_group(struct netif *netif, struct mld_group *group);
+static void mld6_delayed_report(struct mld_group *group, u16_t maxresp);
+static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type);
+
+
+/**
+ * Stop MLD processing on interface
+ *
+ * @param netif network interface on which stop MLD processing
+ */
+err_t
+mld6_stop(struct netif *netif)
+{
+ struct mld_group *group = netif_mld6_data(netif);
+
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL);
+
+ while (group != NULL) {
+ struct mld_group *next = group->next; /* avoid use-after-free below */
+
+ /* disable the group at the MAC level */
+ if (netif->mld_mac_filter != NULL) {
+ netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER);
+ }
+
+ /* free group */
+ memp_free(MEMP_MLD6_GROUP, group);
+
+ /* move to "next" */
+ group = next;
+ }
+ return ERR_OK;
+}
+
+/**
+ * Report MLD memberships for this interface
+ *
+ * @param netif network interface on which report MLD memberships
+ */
+void
+mld6_report_groups(struct netif *netif)
+{
+ struct mld_group *group = netif_mld6_data(netif);
+
+ while (group != NULL) {
+ mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS);
+ group = group->next;
+ }
+}
+
+/**
+ * Search for a group that is joined on a netif
+ *
+ * @param ifp the network interface for which to look
+ * @param addr the group ipv6 address to search for
+ * @return a struct mld_group* if the group has been found,
+ * NULL if the group wasn't found.
+ */
+struct mld_group *
+mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr)
+{
+ struct mld_group *group = netif_mld6_data(ifp);
+
+ while (group != NULL) {
+ if (ip6_addr_cmp(&(group->group_address), addr)) {
+ return group;
+ }
+ group = group->next;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * create a new group
+ *
+ * @param ifp the network interface for which to create
+ * @param addr the new group ipv6
+ * @return a struct mld_group*,
+ * NULL on memory error.
+ */
+static struct mld_group *
+mld6_new_group(struct netif *ifp, const ip6_addr_t *addr)
+{
+ struct mld_group *group;
+
+ group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP);
+ if (group != NULL) {
+ ip6_addr_set(&(group->group_address), addr);
+ group->timer = 0; /* Not running */
+ group->group_state = MLD6_GROUP_IDLE_MEMBER;
+ group->last_reporter_flag = 0;
+ group->use = 0;
+ group->next = netif_mld6_data(ifp);
+
+ netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group);
+ }
+
+ return group;
+}
+
+/**
+ * Remove a group from the mld_group_list, but do not free it yet
+ *
+ * @param group the group to remove
+ * @return ERR_OK if group was removed from the list, an err_t otherwise
+ */
+static err_t
+mld6_remove_group(struct netif *netif, struct mld_group *group)
+{
+ err_t err = ERR_OK;
+
+ /* Is it the first group? */
+ if (netif_mld6_data(netif) == group) {
+ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next);
+ } else {
+ /* look for group further down the list */
+ struct mld_group *tmpGroup;
+ for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) {
+ if (tmpGroup->next == group) {
+ tmpGroup->next = group->next;
+ break;
+ }
+ }
+ /* Group not find group */
+ if (tmpGroup == NULL) {
+ err = ERR_ARG;
+ }
+ }
+
+ return err;
+}
+
+
+/**
+ * Process an input MLD message. Called by icmp6_input.
+ *
+ * @param p the mld packet, p->payload pointing to the icmpv6 header
+ * @param inp the netif on which this packet was received
+ */
+void
+mld6_input(struct pbuf *p, struct netif *inp)
+{
+ struct mld_header *mld_hdr;
+ struct mld_group *group;
+
+ MLD6_STATS_INC(mld6.recv);
+
+ /* Check that mld header fits in packet. */
+ if (p->len < sizeof(struct mld_header)) {
+ /* @todo debug message */
+ pbuf_free(p);
+ MLD6_STATS_INC(mld6.lenerr);
+ MLD6_STATS_INC(mld6.drop);
+ return;
+ }
+
+ mld_hdr = (struct mld_header *)p->payload;
+
+ switch (mld_hdr->type) {
+ case ICMP6_TYPE_MLQ: /* Multicast listener query. */
+ /* Is it a general query? */
+ if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) &&
+ ip6_addr_isany(&(mld_hdr->multicast_address))) {
+ MLD6_STATS_INC(mld6.rx_general);
+ /* Report all groups, except all nodes group, and if-local groups. */
+ group = netif_mld6_data(inp);
+ while (group != NULL) {
+ if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) &&
+ (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) {
+ mld6_delayed_report(group, mld_hdr->max_resp_delay);
+ }
+ group = group->next;
+ }
+ } else {
+ /* Have we joined this group?
+ * We use IP6 destination address to have a memory aligned copy.
+ * mld_hdr->multicast_address should be the same. */
+ MLD6_STATS_INC(mld6.rx_group);
+ group = mld6_lookfor_group(inp, ip6_current_dest_addr());
+ if (group != NULL) {
+ /* Schedule a report. */
+ mld6_delayed_report(group, mld_hdr->max_resp_delay);
+ }
+ }
+ break; /* ICMP6_TYPE_MLQ */
+ case ICMP6_TYPE_MLR: /* Multicast listener report. */
+ /* Have we joined this group?
+ * We use IP6 destination address to have a memory aligned copy.
+ * mld_hdr->multicast_address should be the same. */
+ MLD6_STATS_INC(mld6.rx_report);
+ group = mld6_lookfor_group(inp, ip6_current_dest_addr());
+ if (group != NULL) {
+ /* If we are waiting to report, cancel it. */
+ if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) {
+ group->timer = 0; /* stopped */
+ group->group_state = MLD6_GROUP_IDLE_MEMBER;
+ group->last_reporter_flag = 0;
+ }
+ }
+ break; /* ICMP6_TYPE_MLR */
+ case ICMP6_TYPE_MLD: /* Multicast listener done. */
+ /* Do nothing, router will query us. */
+ break; /* ICMP6_TYPE_MLD */
+ default:
+ MLD6_STATS_INC(mld6.proterr);
+ MLD6_STATS_INC(mld6.drop);
+ break;
+ }
+
+ pbuf_free(p);
+}
+
+/**
+ * @ingroup mld6
+ * Join a group on one or all network interfaces.
+ *
+ * If the group is to be joined on all interfaces, the given group address must
+ * not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE).
+ * If the group is to be joined on one particular interface, the given group
+ * address may or may not have a zone set.
+ *
+ * @param srcaddr ipv6 address (zoned) of the network interface which should
+ * join a new group. If IP6_ADDR_ANY6, join on all netifs
+ * @param groupaddr the ipv6 address of the group to join (possibly but not
+ * necessarily zoned)
+ * @return ERR_OK if group was joined on the netif(s), an err_t otherwise
+ */
+err_t
+mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr)
+{
+ err_t err = ERR_VAL; /* no matching interface */
+ struct netif *netif;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ /* Should we join this interface ? */
+ if (ip6_addr_isany(srcaddr) ||
+ netif_get_ip6_addr_match(netif, srcaddr) >= 0) {
+ err = mld6_joingroup_netif(netif, groupaddr);
+ if (err != ERR_OK) {
+ return err;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @ingroup mld6
+ * Join a group on a network interface.
+ *
+ * @param netif the network interface which should join a new group.
+ * @param groupaddr the ipv6 address of the group to join (possibly but not
+ * necessarily zoned)
+ * @return ERR_OK if group was joined on the netif, an err_t otherwise
+ */
+err_t
+mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr)
+{
+ struct mld_group *group;
+#if LWIP_IPV6_SCOPES
+ ip6_addr_t ip6addr;
+
+ /* If the address has a particular scope but no zone set, use the netif to
+ * set one now. Within the mld6 module, all addresses are properly zoned. */
+ if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) {
+ ip6_addr_set(&ip6addr, groupaddr);
+ ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif);
+ groupaddr = &ip6addr;
+ }
+ IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif);
+#endif /* LWIP_IPV6_SCOPES */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* find group or create a new one if not found */
+ group = mld6_lookfor_group(netif, groupaddr);
+
+ if (group == NULL) {
+ /* Joining a new group. Create a new group entry. */
+ group = mld6_new_group(netif, groupaddr);
+ if (group == NULL) {
+ return ERR_MEM;
+ }
+
+ /* Activate this address on the MAC layer. */
+ if (netif->mld_mac_filter != NULL) {
+ netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER);
+ }
+
+ /* Report our membership. */
+ MLD6_STATS_INC(mld6.tx_report);
+ mld6_send(netif, group, ICMP6_TYPE_MLR);
+ mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS);
+ }
+
+ /* Increment group use */
+ group->use++;
+ return ERR_OK;
+}
+
+/**
+ * @ingroup mld6
+ * Leave a group on a network interface.
+ *
+ * Zoning of address follows the same rules as @ref mld6_joingroup.
+ *
+ * @param srcaddr ipv6 address (zoned) of the network interface which should
+ * leave the group. If IP6_ADDR_ANY6, leave on all netifs
+ * @param groupaddr the ipv6 address of the group to leave (possibly, but not
+ * necessarily zoned)
+ * @return ERR_OK if group was left on the netif(s), an err_t otherwise
+ */
+err_t
+mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr)
+{
+ err_t err = ERR_VAL; /* no matching interface */
+ struct netif *netif;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* loop through netif's */
+ NETIF_FOREACH(netif) {
+ /* Should we leave this interface ? */
+ if (ip6_addr_isany(srcaddr) ||
+ netif_get_ip6_addr_match(netif, srcaddr) >= 0) {
+ err_t res = mld6_leavegroup_netif(netif, groupaddr);
+ if (err != ERR_OK) {
+ /* Store this result if we have not yet gotten a success */
+ err = res;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * @ingroup mld6
+ * Leave a group on a network interface.
+ *
+ * @param netif the network interface which should leave the group.
+ * @param groupaddr the ipv6 address of the group to leave (possibly, but not
+ * necessarily zoned)
+ * @return ERR_OK if group was left on the netif, an err_t otherwise
+ */
+err_t
+mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr)
+{
+ struct mld_group *group;
+#if LWIP_IPV6_SCOPES
+ ip6_addr_t ip6addr;
+
+ if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) {
+ ip6_addr_set(&ip6addr, groupaddr);
+ ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif);
+ groupaddr = &ip6addr;
+ }
+ IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif);
+#endif /* LWIP_IPV6_SCOPES */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* find group */
+ group = mld6_lookfor_group(netif, groupaddr);
+
+ if (group != NULL) {
+ /* Leave if there is no other use of the group */
+ if (group->use <= 1) {
+ /* Remove the group from the list */
+ mld6_remove_group(netif, group);
+
+ /* If we are the last reporter for this group */
+ if (group->last_reporter_flag) {
+ MLD6_STATS_INC(mld6.tx_leave);
+ mld6_send(netif, group, ICMP6_TYPE_MLD);
+ }
+
+ /* Disable the group at the MAC level */
+ if (netif->mld_mac_filter != NULL) {
+ netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER);
+ }
+
+ /* free group struct */
+ memp_free(MEMP_MLD6_GROUP, group);
+ } else {
+ /* Decrement group use */
+ group->use--;
+ }
+
+ /* Left group */
+ return ERR_OK;
+ }
+
+ /* Group not found */
+ return ERR_VAL;
+}
+
+
+/**
+ * Periodic timer for mld processing. Must be called every
+ * MLD6_TMR_INTERVAL milliseconds (100).
+ *
+ * When a delaying member expires, a membership report is sent.
+ */
+void
+mld6_tmr(void)
+{
+ struct netif *netif;
+
+ NETIF_FOREACH(netif) {
+ struct mld_group *group = netif_mld6_data(netif);
+
+ while (group != NULL) {
+ if (group->timer > 0) {
+ group->timer--;
+ if (group->timer == 0) {
+ /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */
+ if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) {
+ MLD6_STATS_INC(mld6.tx_report);
+ mld6_send(netif, group, ICMP6_TYPE_MLR);
+ group->group_state = MLD6_GROUP_IDLE_MEMBER;
+ }
+ }
+ }
+ group = group->next;
+ }
+ }
+}
+
+/**
+ * Schedule a delayed membership report for a group
+ *
+ * @param group the mld_group for which "delaying" membership report
+ * should be sent
+ * @param maxresp_in the max resp delay provided in the query
+ */
+static void
+mld6_delayed_report(struct mld_group *group, u16_t maxresp_in)
+{
+ /* Convert maxresp from milliseconds to tmr ticks */
+ u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL;
+ if (maxresp == 0) {
+ maxresp = 1;
+ }
+
+#ifdef LWIP_RAND
+ /* Randomize maxresp. (if LWIP_RAND is supported) */
+ maxresp = (u16_t)(LWIP_RAND() % maxresp);
+ if (maxresp == 0) {
+ maxresp = 1;
+ }
+#endif /* LWIP_RAND */
+
+ /* Apply timer value if no report has been scheduled already. */
+ if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) ||
+ ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) &&
+ ((group->timer == 0) || (maxresp < group->timer)))) {
+ group->timer = maxresp;
+ group->group_state = MLD6_GROUP_DELAYING_MEMBER;
+ }
+}
+
+/**
+ * Send a MLD message (report or done).
+ *
+ * An IPv6 hop-by-hop options header with a router alert option
+ * is prepended.
+ *
+ * @param group the group to report or quit
+ * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done)
+ */
+static void
+mld6_send(struct netif *netif, struct mld_group *group, u8_t type)
+{
+ struct mld_header *mld_hdr;
+ struct pbuf *p;
+ const ip6_addr_t *src_addr;
+
+ /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */
+ p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM);
+ if (p == NULL) {
+ MLD6_STATS_INC(mld6.memerr);
+ return;
+ }
+
+ /* Move to make room for Hop-by-hop options header. */
+ if (pbuf_remove_header(p, MLD6_HBH_HLEN)) {
+ pbuf_free(p);
+ MLD6_STATS_INC(mld6.lenerr);
+ return;
+ }
+
+ /* Select our source address. */
+ if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) {
+ /* This is a special case, when we are performing duplicate address detection.
+ * We must join the multicast group, but we don't have a valid address yet. */
+ src_addr = IP6_ADDR_ANY6;
+ } else {
+ /* Use link-local address as source address. */
+ src_addr = netif_ip6_addr(netif, 0);
+ }
+
+ /* MLD message header pointer. */
+ mld_hdr = (struct mld_header *)p->payload;
+
+ /* Set fields. */
+ mld_hdr->type = type;
+ mld_hdr->code = 0;
+ mld_hdr->chksum = 0;
+ mld_hdr->max_resp_delay = 0;
+ mld_hdr->reserved = 0;
+ ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address);
+
+#if CHECKSUM_GEN_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
+ mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len,
+ src_addr, &(group->group_address));
+ }
+#endif /* CHECKSUM_GEN_ICMP6 */
+
+ /* Add hop-by-hop headers options: router alert with MLD value. */
+ ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD);
+
+ if (type == ICMP6_TYPE_MLR) {
+ /* Remember we were the last to report */
+ group->last_reporter_flag = 1;
+ }
+
+ /* Send the packet out. */
+ MLD6_STATS_INC(mld6.xmit);
+ ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address),
+ MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif);
+ pbuf_free(p);
+}
+
+#endif /* LWIP_IPV6 */
diff --git a/lwip/src/core/ipv6/nd6.c b/lwip/src/core/ipv6/nd6.c
new file mode 100644
index 0000000..db0c132
--- /dev/null
+++ b/lwip/src/core/ipv6/nd6.c
@@ -0,0 +1,2434 @@
+/**
+ * @file
+ *
+ * Neighbor discovery and stateless address autoconfiguration for IPv6.
+ * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862
+ * (Address autoconfiguration).
+ */
+
+/*
+ * Copyright (c) 2010 Inico Technologies Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Ivan Delamer <delamer@inicotech.com>
+ *
+ *
+ * Please coordinate changes and requests with Ivan Delamer
+ * <delamer@inicotech.com>
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/nd6.h"
+#include "lwip/priv/nd6_priv.h"
+#include "lwip/prot/nd6.h"
+#include "lwip/prot/icmp6.h"
+#include "lwip/pbuf.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/netif.h"
+#include "lwip/icmp6.h"
+#include "lwip/mld6.h"
+#include "lwip/dhcp6.h"
+#include "lwip/ip.h"
+#include "lwip/stats.h"
+#include "lwip/dns.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK
+#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK
+#endif
+
+/* Router tables. */
+struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS];
+struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS];
+struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES];
+struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS];
+
+/* Default values, can be updated by a RA message. */
+u32_t reachable_time = LWIP_ND6_REACHABLE_TIME;
+u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */
+
+/* Index for cache entries. */
+static u8_t nd6_cached_neighbor_index;
+static netif_addr_idx_t nd6_cached_destination_index;
+
+/* Multicast address holder. */
+static ip6_addr_t multicast_address;
+
+static u8_t nd6_tmr_rs_reduction;
+
+/* Static buffer to parse RA packet options */
+union ra_options {
+ struct lladdr_option lladdr;
+ struct mtu_option mtu;
+ struct prefix_option prefix;
+#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS
+ struct rdnss_option rdnss;
+#endif
+};
+static union ra_options nd6_ra_buffer;
+
+/* Forward declarations. */
+static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr);
+static s8_t nd6_new_neighbor_cache_entry(void);
+static void nd6_free_neighbor_cache_entry(s8_t i);
+static s16_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr);
+static s16_t nd6_new_destination_cache_entry(void);
+static int nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif);
+static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif);
+static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif);
+static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif);
+static s8_t nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif);
+static s8_t nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif);
+static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif);
+static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q);
+
+#define ND6_SEND_FLAG_MULTICAST_DEST 0x01
+#define ND6_SEND_FLAG_ALLNODES_DEST 0x02
+#define ND6_SEND_FLAG_ANY_SRC 0x04
+static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags);
+static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags);
+static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags);
+#if LWIP_IPV6_SEND_ROUTER_SOLICIT
+static err_t nd6_send_rs(struct netif *netif);
+#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */
+
+#if LWIP_ND6_QUEUEING
+static void nd6_free_q(struct nd6_q_entry *q);
+#else /* LWIP_ND6_QUEUEING */
+#define nd6_free_q(q) pbuf_free(q)
+#endif /* LWIP_ND6_QUEUEING */
+static void nd6_send_q(s8_t i);
+
+
+/**
+ * A local address has been determined to be a duplicate. Take the appropriate
+ * action(s) on the address and the interface as a whole.
+ *
+ * @param netif the netif that owns the address
+ * @param addr_idx the index of the address detected to be a duplicate
+ */
+static void
+nd6_duplicate_addr_detected(struct netif *netif, s8_t addr_idx)
+{
+
+ /* Mark the address as duplicate, but leave its lifetimes alone. If this was
+ * a manually assigned address, it will remain in existence as duplicate, and
+ * as such be unusable for any practical purposes until manual intervention.
+ * If this was an autogenerated address, the address will follow normal
+ * expiration rules, and thus disappear once its valid lifetime expires. */
+ netif_ip6_addr_set_state(netif, addr_idx, IP6_ADDR_DUPLICATED);
+
+#if LWIP_IPV6_AUTOCONFIG
+ /* If the affected address was the link-local address that we use to generate
+ * all other addresses, then we should not continue to use those derived
+ * addresses either, so mark them as duplicate as well. For autoconfig-only
+ * setups, this will make the interface effectively unusable, approaching the
+ * intention of RFC 4862 Sec. 5.4.5. @todo implement the full requirements */
+ if (addr_idx == 0) {
+ s8_t i;
+ for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) &&
+ !netif_ip6_addr_isstatic(netif, i)) {
+ netif_ip6_addr_set_state(netif, i, IP6_ADDR_DUPLICATED);
+ }
+ }
+ }
+#endif /* LWIP_IPV6_AUTOCONFIG */
+}
+
+#if LWIP_IPV6_AUTOCONFIG
+/**
+ * We received a router advertisement that contains a prefix with the
+ * autoconfiguration flag set. Add or update an associated autogenerated
+ * address.
+ *
+ * @param netif the netif on which the router advertisement arrived
+ * @param prefix_opt a pointer to the prefix option data
+ * @param prefix_addr an aligned copy of the prefix address
+ */
+static void
+nd6_process_autoconfig_prefix(struct netif *netif,
+ struct prefix_option *prefix_opt, const ip6_addr_t *prefix_addr)
+{
+ ip6_addr_t ip6addr;
+ u32_t valid_life, pref_life;
+ u8_t addr_state;
+ s8_t i, free_idx;
+
+ /* The caller already checks RFC 4862 Sec. 5.5.3 points (a) and (b). We do
+ * the rest, starting with checks for (c) and (d) here. */
+ valid_life = lwip_htonl(prefix_opt->valid_lifetime);
+ pref_life = lwip_htonl(prefix_opt->preferred_lifetime);
+ if (pref_life > valid_life || prefix_opt->prefix_length != 64) {
+ return; /* silently ignore this prefix for autoconfiguration purposes */
+ }
+
+ /* If an autogenerated address already exists for this prefix, update its
+ * lifetimes. An address is considered autogenerated if 1) it is not static
+ * (i.e., manually assigned), and 2) there is an advertised autoconfiguration
+ * prefix for it (the one we are processing here). This does not necessarily
+ * exclude the possibility that the address was actually assigned by, say,
+ * DHCPv6. If that distinction becomes important in the future, more state
+ * must be kept. As explained elsewhere we also update lifetimes of tentative
+ * and duplicate addresses. Skip address slot 0 (the link-local address). */
+ for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ addr_state = netif_ip6_addr_state(netif, i);
+ if (!ip6_addr_isinvalid(addr_state) && !netif_ip6_addr_isstatic(netif, i) &&
+ ip6_addr_netcmp(prefix_addr, netif_ip6_addr(netif, i))) {
+ /* Update the valid lifetime, as per RFC 4862 Sec. 5.5.3 point (e).
+ * The valid lifetime will never drop to zero as a result of this. */
+ u32_t remaining_life = netif_ip6_addr_valid_life(netif, i);
+ if (valid_life > ND6_2HRS || valid_life > remaining_life) {
+ netif_ip6_addr_set_valid_life(netif, i, valid_life);
+ } else if (remaining_life > ND6_2HRS) {
+ netif_ip6_addr_set_valid_life(netif, i, ND6_2HRS);
+ }
+ LWIP_ASSERT("bad valid lifetime", !netif_ip6_addr_isstatic(netif, i));
+ /* Update the preferred lifetime. No bounds checks are needed here. In
+ * rare cases the advertisement may un-deprecate the address, though.
+ * Deprecation is left to the timer code where it is handled anyway. */
+ if (pref_life > 0 && addr_state == IP6_ADDR_DEPRECATED) {
+ netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED);
+ }
+ netif_ip6_addr_set_pref_life(netif, i, pref_life);
+ return; /* there should be at most one matching address */
+ }
+ }
+
+ /* No autogenerated address exists for this prefix yet. See if we can add a
+ * new one. However, if IPv6 autoconfiguration is administratively disabled,
+ * do not generate new addresses, but do keep updating lifetimes for existing
+ * addresses. Also, when adding new addresses, we must protect explicitly
+ * against a valid lifetime of zero, because again, we use that as a special
+ * value. The generated address would otherwise expire immediately anyway.
+ * Finally, the original link-local address must be usable at all. We start
+ * creating addresses even if the link-local address is still in tentative
+ * state though, and deal with the fallout of that upon DAD collision. */
+ addr_state = netif_ip6_addr_state(netif, 0);
+ if (!netif->ip6_autoconfig_enabled || valid_life == IP6_ADDR_LIFE_STATIC ||
+ ip6_addr_isinvalid(addr_state) || ip6_addr_isduplicated(addr_state)) {
+ return;
+ }
+
+ /* Construct the new address that we intend to use, and then see if that
+ * address really does not exist. It might have been added manually, after
+ * all. As a side effect, find a free slot. Note that we cannot use
+ * netif_add_ip6_address() here, as it would return ERR_OK if the address
+ * already did exist, resulting in that address being given lifetimes. */
+ IP6_ADDR(&ip6addr, prefix_addr->addr[0], prefix_addr->addr[1],
+ netif_ip6_addr(netif, 0)->addr[2], netif_ip6_addr(netif, 0)->addr[3]);
+ ip6_addr_assign_zone(&ip6addr, IP6_UNICAST, netif);
+
+ free_idx = 0;
+ for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) {
+ if (ip6_addr_cmp(&ip6addr, netif_ip6_addr(netif, i))) {
+ return; /* formed address already exists */
+ }
+ } else if (free_idx == 0) {
+ free_idx = i;
+ }
+ }
+ if (free_idx == 0) {
+ return; /* no address slots available, try again on next advertisement */
+ }
+
+ /* Assign the new address to the interface. */
+ ip_addr_copy_from_ip6(netif->ip6_addr[free_idx], ip6addr);
+ netif_ip6_addr_set_valid_life(netif, free_idx, valid_life);
+ netif_ip6_addr_set_pref_life(netif, free_idx, pref_life);
+ netif_ip6_addr_set_state(netif, free_idx, IP6_ADDR_TENTATIVE);
+}
+#endif /* LWIP_IPV6_AUTOCONFIG */
+
+/**
+ * Process an incoming neighbor discovery message
+ *
+ * @param p the nd packet, p->payload pointing to the icmpv6 header
+ * @param inp the netif on which this packet was received
+ */
+void
+nd6_input(struct pbuf *p, struct netif *inp)
+{
+ u8_t msg_type;
+ s8_t i;
+ s16_t dest_idx;
+
+ ND6_STATS_INC(nd6.recv);
+
+ msg_type = *((u8_t *)p->payload);
+ switch (msg_type) {
+ case ICMP6_TYPE_NA: /* Neighbor Advertisement. */
+ {
+ struct na_header *na_hdr;
+ struct lladdr_option *lladdr_opt;
+ ip6_addr_t target_address;
+
+ /* Check that na header fits in packet. */
+ if (p->len < (sizeof(struct na_header))) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ na_hdr = (struct na_header *)p->payload;
+
+ /* Create an aligned, zoned copy of the target address. */
+ ip6_addr_copy_from_packed(target_address, na_hdr->target_address);
+ ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp);
+
+ /* Check a subset of the other RFC 4861 Sec. 7.1.2 requirements. */
+ if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || na_hdr->code != 0 ||
+ ip6_addr_ismulticast(&target_address)) {
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.proterr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ /* @todo RFC MUST: if IP destination is multicast, Solicited flag is zero */
+ /* @todo RFC MUST: all included options have a length greater than zero */
+
+ /* Unsolicited NA?*/
+ if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
+ /* This is an unsolicited NA.
+ * link-layer changed?
+ * part of DAD mechanism? */
+
+#if LWIP_IPV6_DUP_DETECT_ATTEMPTS
+ /* If the target address matches this netif, it is a DAD response. */
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) &&
+ !ip6_addr_isduplicated(netif_ip6_addr_state(inp, i)) &&
+ ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) {
+ /* We are using a duplicate address. */
+ nd6_duplicate_addr_detected(inp, i);
+
+ pbuf_free(p);
+ return;
+ }
+ }
+#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */
+
+ /* Check that link-layer address option also fits in packet. */
+ if (p->len < (sizeof(struct na_header) + 2)) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header));
+
+ if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ /* This is an unsolicited NA, most likely there was a LLADDR change. */
+ i = nd6_find_neighbor_cache_entry(&target_address);
+ if (i >= 0) {
+ if (na_hdr->flags & ND6_FLAG_OVERRIDE) {
+ MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len);
+ }
+ }
+ } else {
+ /* This is a solicited NA.
+ * neighbor address resolution response?
+ * neighbor unreachability detection response? */
+
+ /* Find the cache entry corresponding to this na. */
+ i = nd6_find_neighbor_cache_entry(&target_address);
+ if (i < 0) {
+ /* We no longer care about this target address. drop it. */
+ pbuf_free(p);
+ return;
+ }
+
+ /* Update cache entry. */
+ if ((na_hdr->flags & ND6_FLAG_OVERRIDE) ||
+ (neighbor_cache[i].state == ND6_INCOMPLETE)) {
+ /* Check that link-layer address option also fits in packet. */
+ if (p->len < (sizeof(struct na_header) + 2)) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header));
+
+ if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len);
+ }
+
+ neighbor_cache[i].netif = inp;
+ neighbor_cache[i].state = ND6_REACHABLE;
+ neighbor_cache[i].counter.reachable_time = reachable_time;
+
+ /* Send queued packets, if any. */
+ if (neighbor_cache[i].q != NULL) {
+ nd6_send_q(i);
+ }
+ }
+
+ break; /* ICMP6_TYPE_NA */
+ }
+ case ICMP6_TYPE_NS: /* Neighbor solicitation. */
+ {
+ struct ns_header *ns_hdr;
+ struct lladdr_option *lladdr_opt;
+ ip6_addr_t target_address;
+ u8_t accepted;
+
+ /* Check that ns header fits in packet. */
+ if (p->len < sizeof(struct ns_header)) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ ns_hdr = (struct ns_header *)p->payload;
+
+ /* Create an aligned, zoned copy of the target address. */
+ ip6_addr_copy_from_packed(target_address, ns_hdr->target_address);
+ ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp);
+
+ /* Check a subset of the other RFC 4861 Sec. 7.1.1 requirements. */
+ if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ns_hdr->code != 0 ||
+ ip6_addr_ismulticast(&target_address)) {
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.proterr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ /* @todo RFC MUST: all included options have a length greater than zero */
+ /* @todo RFC MUST: if IP source is 'any', destination is solicited-node multicast address */
+ /* @todo RFC MUST: if IP source is 'any', there is no source LL address option */
+
+ /* Check if there is a link-layer address provided. Only point to it if in this buffer. */
+ if (p->len >= (sizeof(struct ns_header) + 2)) {
+ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header));
+ if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) {
+ lladdr_opt = NULL;
+ }
+ } else {
+ lladdr_opt = NULL;
+ }
+
+ /* Check if the target address is configured on the receiving netif. */
+ accepted = 0;
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) {
+ if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) ||
+ (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) &&
+ ip6_addr_isany(ip6_current_src_addr()))) &&
+ ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) {
+ accepted = 1;
+ break;
+ }
+ }
+
+ /* NS not for us? */
+ if (!accepted) {
+ pbuf_free(p);
+ return;
+ }
+
+ /* Check for ANY address in src (DAD algorithm). */
+ if (ip6_addr_isany(ip6_current_src_addr())) {
+ /* Sender is validating this address. */
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) {
+ if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) &&
+ ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) {
+ /* Send a NA back so that the sender does not use this address. */
+ nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST);
+ if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) {
+ /* We shouldn't use this address either. */
+ nd6_duplicate_addr_detected(inp, i);
+ }
+ }
+ }
+ } else {
+ /* Sender is trying to resolve our address. */
+ /* Verify that they included their own link-layer address. */
+ if (lladdr_opt == NULL) {
+ /* Not a valid message. */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.proterr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ i = nd6_find_neighbor_cache_entry(ip6_current_src_addr());
+ if (i>= 0) {
+ /* We already have a record for the solicitor. */
+ if (neighbor_cache[i].state == ND6_INCOMPLETE) {
+ neighbor_cache[i].netif = inp;
+ MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len);
+
+ /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */
+ neighbor_cache[i].state = ND6_DELAY;
+ neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL;
+ }
+ } else {
+ /* Add their IPv6 address and link-layer address to neighbor cache.
+ * We will need it at least to send a unicast NA message, but most
+ * likely we will also be communicating with this node soon. */
+ i = nd6_new_neighbor_cache_entry();
+ if (i < 0) {
+ /* We couldn't assign a cache entry for this neighbor.
+ * we won't be able to reply. drop it. */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.memerr);
+ return;
+ }
+ neighbor_cache[i].netif = inp;
+ MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len);
+ ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr());
+
+ /* Receiving a message does not prove reachability: only in one direction.
+ * Delay probe in case we get confirmation of reachability from upper layer (TCP). */
+ neighbor_cache[i].state = ND6_DELAY;
+ neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL;
+ }
+
+ /* Send back a NA for us. Allocate the reply pbuf. */
+ nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE);
+ }
+
+ break; /* ICMP6_TYPE_NS */
+ }
+ case ICMP6_TYPE_RA: /* Router Advertisement. */
+ {
+ struct ra_header *ra_hdr;
+ u8_t *buffer; /* Used to copy options. */
+ u16_t offset;
+#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS
+ /* There can be multiple RDNSS options per RA */
+ u8_t rdnss_server_idx = 0;
+#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */
+
+ /* Check that RA header fits in packet. */
+ if (p->len < sizeof(struct ra_header)) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ ra_hdr = (struct ra_header *)p->payload;
+
+ /* Check a subset of the other RFC 4861 Sec. 6.1.2 requirements. */
+ if (!ip6_addr_islinklocal(ip6_current_src_addr()) ||
+ IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ra_hdr->code != 0) {
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.proterr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ /* @todo RFC MUST: all included options have a length greater than zero */
+
+ /* If we are sending RS messages, stop. */
+#if LWIP_IPV6_SEND_ROUTER_SOLICIT
+ /* ensure at least one solicitation is sent (see RFC 4861, ch. 6.3.7) */
+ if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) ||
+ (nd6_send_rs(inp) == ERR_OK)) {
+ inp->rs_count = 0;
+ } else {
+ inp->rs_count = 1;
+ }
+#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */
+
+ /* Get the matching default router entry. */
+ i = nd6_get_router(ip6_current_src_addr(), inp);
+ if (i < 0) {
+ /* Create a new router entry. */
+ i = nd6_new_router(ip6_current_src_addr(), inp);
+ }
+
+ if (i < 0) {
+ /* Could not create a new router entry. */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.memerr);
+ return;
+ }
+
+ /* Re-set invalidation timer. */
+ default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime);
+
+ /* Re-set default timer values. */
+#if LWIP_ND6_ALLOW_RA_UPDATES
+ if (ra_hdr->retrans_timer > 0) {
+ retrans_timer = lwip_htonl(ra_hdr->retrans_timer);
+ }
+ if (ra_hdr->reachable_time > 0) {
+ reachable_time = lwip_htonl(ra_hdr->reachable_time);
+ }
+#endif /* LWIP_ND6_ALLOW_RA_UPDATES */
+
+ /* @todo set default hop limit... */
+ /* ra_hdr->current_hop_limit;*/
+
+ /* Update flags in local entry (incl. preference). */
+ default_router_list[i].flags = ra_hdr->flags;
+
+#if LWIP_IPV6_DHCP6
+ /* Trigger DHCPv6 if enabled */
+ dhcp6_nd6_ra_trigger(inp, ra_hdr->flags & ND6_RA_FLAG_MANAGED_ADDR_CONFIG,
+ ra_hdr->flags & ND6_RA_FLAG_OTHER_CONFIG);
+#endif
+
+ /* Offset to options. */
+ offset = sizeof(struct ra_header);
+
+ /* Process each option. */
+ while ((p->tot_len - offset) >= 2) {
+ u8_t option_type;
+ u16_t option_len;
+ int option_len8 = pbuf_try_get_at(p, offset + 1);
+ if (option_len8 <= 0) {
+ /* read beyond end or zero length */
+ goto lenerr_drop_free_return;
+ }
+ option_len = ((u8_t)option_len8) << 3;
+ if (option_len > p->tot_len - offset) {
+ /* short packet (option does not fit in) */
+ goto lenerr_drop_free_return;
+ }
+ if (p->len == p->tot_len) {
+ /* no need to copy from contiguous pbuf */
+ buffer = &((u8_t*)p->payload)[offset];
+ } else {
+ /* check if this option fits into our buffer */
+ if (option_len > sizeof(nd6_ra_buffer)) {
+ option_type = pbuf_get_at(p, offset);
+ /* invalid option length */
+ if (option_type != ND6_OPTION_TYPE_RDNSS) {
+ goto lenerr_drop_free_return;
+ }
+ /* we allow RDNSS option to be longer - we'll just drop some servers */
+ option_len = sizeof(nd6_ra_buffer);
+ }
+ buffer = (u8_t*)&nd6_ra_buffer;
+ option_len = pbuf_copy_partial(p, &nd6_ra_buffer, option_len, offset);
+ }
+ option_type = buffer[0];
+ switch (option_type) {
+ case ND6_OPTION_TYPE_SOURCE_LLADDR:
+ {
+ struct lladdr_option *lladdr_opt;
+ if (option_len < sizeof(struct lladdr_option)) {
+ goto lenerr_drop_free_return;
+ }
+ lladdr_opt = (struct lladdr_option *)buffer;
+ if ((default_router_list[i].neighbor_entry != NULL) &&
+ (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) {
+ SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len);
+ default_router_list[i].neighbor_entry->state = ND6_REACHABLE;
+ default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time;
+ }
+ break;
+ }
+ case ND6_OPTION_TYPE_MTU:
+ {
+ struct mtu_option *mtu_opt;
+ u32_t mtu32;
+ if (option_len < sizeof(struct mtu_option)) {
+ goto lenerr_drop_free_return;
+ }
+ mtu_opt = (struct mtu_option *)buffer;
+ mtu32 = lwip_htonl(mtu_opt->mtu);
+ if ((mtu32 >= 1280) && (mtu32 <= 0xffff)) {
+#if LWIP_ND6_ALLOW_RA_UPDATES
+ if (inp->mtu) {
+ /* don't set the mtu for IPv6 higher than the netif driver supports */
+ inp->mtu6 = LWIP_MIN(inp->mtu, (u16_t)mtu32);
+ } else {
+ inp->mtu6 = (u16_t)mtu32;
+ }
+#endif /* LWIP_ND6_ALLOW_RA_UPDATES */
+ }
+ break;
+ }
+ case ND6_OPTION_TYPE_PREFIX_INFO:
+ {
+ struct prefix_option *prefix_opt;
+ ip6_addr_t prefix_addr;
+ if (option_len < sizeof(struct prefix_option)) {
+ goto lenerr_drop_free_return;
+ }
+
+ prefix_opt = (struct prefix_option *)buffer;
+
+ /* Get a memory-aligned copy of the prefix. */
+ ip6_addr_copy_from_packed(prefix_addr, prefix_opt->prefix);
+ ip6_addr_assign_zone(&prefix_addr, IP6_UNICAST, inp);
+
+ if (!ip6_addr_islinklocal(&prefix_addr)) {
+ if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) &&
+ (prefix_opt->prefix_length == 64)) {
+ /* Add to on-link prefix list. */
+ u32_t valid_life;
+ s8_t prefix;
+
+ valid_life = lwip_htonl(prefix_opt->valid_lifetime);
+
+ /* find cache entry for this prefix. */
+ prefix = nd6_get_onlink_prefix(&prefix_addr, inp);
+ if (prefix < 0 && valid_life > 0) {
+ /* Create a new cache entry. */
+ prefix = nd6_new_onlink_prefix(&prefix_addr, inp);
+ }
+ if (prefix >= 0) {
+ prefix_list[prefix].invalidation_timer = valid_life;
+ }
+ }
+#if LWIP_IPV6_AUTOCONFIG
+ if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) {
+ /* Perform processing for autoconfiguration. */
+ nd6_process_autoconfig_prefix(inp, prefix_opt, &prefix_addr);
+ }
+#endif /* LWIP_IPV6_AUTOCONFIG */
+ }
+
+ break;
+ }
+ case ND6_OPTION_TYPE_ROUTE_INFO:
+ /* @todo implement preferred routes.
+ struct route_option * route_opt;
+ route_opt = (struct route_option *)buffer;*/
+
+ break;
+#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS
+ case ND6_OPTION_TYPE_RDNSS:
+ {
+ u8_t num, n;
+ u16_t copy_offset = offset + SIZEOF_RDNSS_OPTION_BASE;
+ struct rdnss_option * rdnss_opt;
+ if (option_len < SIZEOF_RDNSS_OPTION_BASE) {
+ goto lenerr_drop_free_return;
+ }
+
+ rdnss_opt = (struct rdnss_option *)buffer;
+ num = (rdnss_opt->length - 1) / 2;
+ for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) {
+ ip_addr_t rdnss_address;
+
+ /* Copy directly from pbuf to get an aligned, zoned copy of the prefix. */
+ if (pbuf_copy_partial(p, &rdnss_address, sizeof(ip6_addr_p_t), copy_offset) == sizeof(ip6_addr_p_t)) {
+ IP_SET_TYPE_VAL(rdnss_address, IPADDR_TYPE_V6);
+ ip6_addr_assign_zone(ip_2_ip6(&rdnss_address), IP6_UNKNOWN, inp);
+
+ if (htonl(rdnss_opt->lifetime) > 0) {
+ /* TODO implement Lifetime > 0 */
+ dns_setserver(rdnss_server_idx++, &rdnss_address);
+ } else {
+ /* TODO implement DNS removal in dns.c */
+ u8_t s;
+ for (s = 0; s < DNS_MAX_SERVERS; s++) {
+ const ip_addr_t *addr = dns_getserver(s);
+ if(ip_addr_cmp(addr, &rdnss_address)) {
+ dns_setserver(s, NULL);
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */
+ default:
+ /* Unrecognized option, abort. */
+ ND6_STATS_INC(nd6.proterr);
+ break;
+ }
+ /* option length is checked earlier to be non-zero to make sure loop ends */
+ offset += 8 * (u8_t)option_len8;
+ }
+
+ break; /* ICMP6_TYPE_RA */
+ }
+ case ICMP6_TYPE_RD: /* Redirect */
+ {
+ struct redirect_header *redir_hdr;
+ struct lladdr_option *lladdr_opt;
+ ip6_addr_t destination_address, target_address;
+
+ /* Check that Redir header fits in packet. */
+ if (p->len < sizeof(struct redirect_header)) {
+ /* @todo debug message */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ redir_hdr = (struct redirect_header *)p->payload;
+
+ /* Create an aligned, zoned copy of the destination address. */
+ ip6_addr_copy_from_packed(destination_address, redir_hdr->destination_address);
+ ip6_addr_assign_zone(&destination_address, IP6_UNICAST, inp);
+
+ /* Check a subset of the other RFC 4861 Sec. 8.1 requirements. */
+ if (!ip6_addr_islinklocal(ip6_current_src_addr()) ||
+ IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM ||
+ redir_hdr->code != 0 || ip6_addr_ismulticast(&destination_address)) {
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.proterr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ /* @todo RFC MUST: IP source address equals first-hop router for destination_address */
+ /* @todo RFC MUST: ICMP target address is either link-local address or same as destination_address */
+ /* @todo RFC MUST: all included options have a length greater than zero */
+
+ if (p->len >= (sizeof(struct redirect_header) + 2)) {
+ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header));
+ if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) {
+ lladdr_opt = NULL;
+ }
+ } else {
+ lladdr_opt = NULL;
+ }
+
+ /* Find dest address in cache */
+ dest_idx = nd6_find_destination_cache_entry(&destination_address);
+ if (dest_idx < 0) {
+ /* Destination not in cache, drop packet. */
+ pbuf_free(p);
+ return;
+ }
+
+ /* Create an aligned, zoned copy of the target address. */
+ ip6_addr_copy_from_packed(target_address, redir_hdr->target_address);
+ ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp);
+
+ /* Set the new target address. */
+ ip6_addr_copy(destination_cache[dest_idx].next_hop_addr, target_address);
+
+ /* If Link-layer address of other router is given, try to add to neighbor cache. */
+ if (lladdr_opt != NULL) {
+ if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) {
+ i = nd6_find_neighbor_cache_entry(&target_address);
+ if (i < 0) {
+ i = nd6_new_neighbor_cache_entry();
+ if (i >= 0) {
+ neighbor_cache[i].netif = inp;
+ MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len);
+ ip6_addr_copy(neighbor_cache[i].next_hop_address, target_address);
+
+ /* Receiving a message does not prove reachability: only in one direction.
+ * Delay probe in case we get confirmation of reachability from upper layer (TCP). */
+ neighbor_cache[i].state = ND6_DELAY;
+ neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL;
+ }
+ }
+ if (i >= 0) {
+ if (neighbor_cache[i].state == ND6_INCOMPLETE) {
+ MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len);
+ /* Receiving a message does not prove reachability: only in one direction.
+ * Delay probe in case we get confirmation of reachability from upper layer (TCP). */
+ neighbor_cache[i].state = ND6_DELAY;
+ neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL;
+ }
+ }
+ }
+ }
+ break; /* ICMP6_TYPE_RD */
+ }
+ case ICMP6_TYPE_PTB: /* Packet too big */
+ {
+ struct icmp6_hdr *icmp6hdr; /* Packet too big message */
+ struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */
+ u32_t pmtu;
+ ip6_addr_t destination_address;
+
+ /* Check that ICMPv6 header + IPv6 header fit in payload */
+ if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) {
+ /* drop short packets */
+ pbuf_free(p);
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ return;
+ }
+
+ icmp6hdr = (struct icmp6_hdr *)p->payload;
+ ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr));
+
+ /* Create an aligned, zoned copy of the destination address. */
+ ip6_addr_copy_from_packed(destination_address, ip6hdr->dest);
+ ip6_addr_assign_zone(&destination_address, IP6_UNKNOWN, inp);
+
+ /* Look for entry in destination cache. */
+ dest_idx = nd6_find_destination_cache_entry(&destination_address);
+ if (dest_idx < 0) {
+ /* Destination not in cache, drop packet. */
+ pbuf_free(p);
+ return;
+ }
+
+ /* Change the Path MTU. */
+ pmtu = lwip_htonl(icmp6hdr->data);
+ destination_cache[dest_idx].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF);
+
+ break; /* ICMP6_TYPE_PTB */
+ }
+
+ default:
+ ND6_STATS_INC(nd6.proterr);
+ ND6_STATS_INC(nd6.drop);
+ break; /* default */
+ }
+
+ pbuf_free(p);
+ return;
+lenerr_drop_free_return:
+ ND6_STATS_INC(nd6.lenerr);
+ ND6_STATS_INC(nd6.drop);
+ pbuf_free(p);
+}
+
+
+/**
+ * Periodic timer for Neighbor discovery functions:
+ *
+ * - Update neighbor reachability states
+ * - Update destination cache entries age
+ * - Update invalidation timers of default routers and on-link prefixes
+ * - Update lifetimes of our addresses
+ * - Perform duplicate address detection (DAD) for our addresses
+ * - Send router solicitations
+ */
+void
+nd6_tmr(void)
+{
+ s8_t i;
+ struct netif *netif;
+
+ /* Process neighbor entries. */
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ switch (neighbor_cache[i].state) {
+ case ND6_INCOMPLETE:
+ if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) &&
+ (!neighbor_cache[i].isrouter)) {
+ /* Retries exceeded. */
+ nd6_free_neighbor_cache_entry(i);
+ } else {
+ /* Send a NS for this entry. */
+ neighbor_cache[i].counter.probes_sent++;
+ nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST);
+ }
+ break;
+ case ND6_REACHABLE:
+ /* Send queued packets, if any are left. Should have been sent already. */
+ if (neighbor_cache[i].q != NULL) {
+ nd6_send_q(i);
+ }
+ if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) {
+ /* Change to stale state. */
+ neighbor_cache[i].state = ND6_STALE;
+ neighbor_cache[i].counter.stale_time = 0;
+ } else {
+ neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL;
+ }
+ break;
+ case ND6_STALE:
+ neighbor_cache[i].counter.stale_time++;
+ break;
+ case ND6_DELAY:
+ if (neighbor_cache[i].counter.delay_time <= 1) {
+ /* Change to PROBE state. */
+ neighbor_cache[i].state = ND6_PROBE;
+ neighbor_cache[i].counter.probes_sent = 0;
+ } else {
+ neighbor_cache[i].counter.delay_time--;
+ }
+ break;
+ case ND6_PROBE:
+ if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) &&
+ (!neighbor_cache[i].isrouter)) {
+ /* Retries exceeded. */
+ nd6_free_neighbor_cache_entry(i);
+ } else {
+ /* Send a NS for this entry. */
+ neighbor_cache[i].counter.probes_sent++;
+ nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0);
+ }
+ break;
+ case ND6_NO_ENTRY:
+ default:
+ /* Do nothing. */
+ break;
+ }
+ }
+
+ /* Process destination entries. */
+ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) {
+ destination_cache[i].age++;
+ }
+
+ /* Process router entries. */
+ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) {
+ if (default_router_list[i].neighbor_entry != NULL) {
+ /* Active entry. */
+ if (default_router_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) {
+ /* No more than 1 second remaining. Clear this entry. Also clear any of
+ * its destination cache entries, as per RFC 4861 Sec. 5.3 and 6.3.5. */
+ s8_t j;
+ for (j = 0; j < LWIP_ND6_NUM_DESTINATIONS; j++) {
+ if (ip6_addr_cmp(&destination_cache[j].next_hop_addr,
+ &default_router_list[i].neighbor_entry->next_hop_address)) {
+ ip6_addr_set_any(&destination_cache[j].destination_addr);
+ }
+ }
+ default_router_list[i].neighbor_entry->isrouter = 0;
+ default_router_list[i].neighbor_entry = NULL;
+ default_router_list[i].invalidation_timer = 0;
+ default_router_list[i].flags = 0;
+ } else {
+ default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000;
+ }
+ }
+ }
+
+ /* Process prefix entries. */
+ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) {
+ if (prefix_list[i].netif != NULL) {
+ if (prefix_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) {
+ /* Entry timed out, remove it */
+ prefix_list[i].invalidation_timer = 0;
+ prefix_list[i].netif = NULL;
+ } else {
+ prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000;
+ }
+ }
+ }
+
+ /* Process our own addresses, updating address lifetimes and/or DAD state. */
+ NETIF_FOREACH(netif) {
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) {
+ u8_t addr_state;
+#if LWIP_IPV6_ADDRESS_LIFETIMES
+ /* Step 1: update address lifetimes (valid and preferred). */
+ addr_state = netif_ip6_addr_state(netif, i);
+ /* RFC 4862 is not entirely clear as to whether address lifetimes affect
+ * tentative addresses, and is even less clear as to what should happen
+ * with duplicate addresses. We choose to track and update lifetimes for
+ * both those types, although for different reasons:
+ * - for tentative addresses, the line of thought of Sec. 5.7 combined
+ * with the potentially long period that an address may be in tentative
+ * state (due to the interface being down) suggests that lifetimes
+ * should be independent of external factors which would include DAD;
+ * - for duplicate addresses, retiring them early could result in a new
+ * but unwanted attempt at marking them as valid, while retiring them
+ * late/never could clog up address slots on the netif.
+ * As a result, we may end up expiring addresses of either type here.
+ */
+ if (!ip6_addr_isinvalid(addr_state) &&
+ !netif_ip6_addr_isstatic(netif, i)) {
+ u32_t life = netif_ip6_addr_valid_life(netif, i);
+ if (life <= ND6_TMR_INTERVAL / 1000) {
+ /* The address has expired. */
+ netif_ip6_addr_set_valid_life(netif, i, 0);
+ netif_ip6_addr_set_pref_life(netif, i, 0);
+ netif_ip6_addr_set_state(netif, i, IP6_ADDR_INVALID);
+ } else {
+ if (!ip6_addr_life_isinfinite(life)) {
+ life -= ND6_TMR_INTERVAL / 1000;
+ LWIP_ASSERT("bad valid lifetime", life != IP6_ADDR_LIFE_STATIC);
+ netif_ip6_addr_set_valid_life(netif, i, life);
+ }
+ /* The address is still here. Update the preferred lifetime too. */
+ life = netif_ip6_addr_pref_life(netif, i);
+ if (life <= ND6_TMR_INTERVAL / 1000) {
+ /* This case must also trigger if 'life' was already zero, so as to
+ * deal correctly with advertised preferred-lifetime reductions. */
+ netif_ip6_addr_set_pref_life(netif, i, 0);
+ if (addr_state == IP6_ADDR_PREFERRED)
+ netif_ip6_addr_set_state(netif, i, IP6_ADDR_DEPRECATED);
+ } else if (!ip6_addr_life_isinfinite(life)) {
+ life -= ND6_TMR_INTERVAL / 1000;
+ netif_ip6_addr_set_pref_life(netif, i, life);
+ }
+ }
+ }
+ /* The address state may now have changed, so reobtain it next. */
+#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */
+ /* Step 2: update DAD state. */
+ addr_state = netif_ip6_addr_state(netif, i);
+ if (ip6_addr_istentative(addr_state)) {
+ if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) {
+ /* No NA received in response. Mark address as valid. For dynamic
+ * addresses with an expired preferred lifetime, the state is set to
+ * deprecated right away. That should almost never happen, though. */
+ addr_state = IP6_ADDR_PREFERRED;
+#if LWIP_IPV6_ADDRESS_LIFETIMES
+ if (!netif_ip6_addr_isstatic(netif, i) &&
+ netif_ip6_addr_pref_life(netif, i) == 0) {
+ addr_state = IP6_ADDR_DEPRECATED;
+ }
+#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */
+ netif_ip6_addr_set_state(netif, i, addr_state);
+ } else if (netif_is_up(netif) && netif_is_link_up(netif)) {
+ /* tentative: set next state by increasing by one */
+ netif_ip6_addr_set_state(netif, i, addr_state + 1);
+ /* Send a NS for this address. Use the unspecified address as source
+ * address in all cases (RFC 4862 Sec. 5.4.2), not in the least
+ * because as it is, we only consider multicast replies for DAD. */
+ nd6_send_ns(netif, netif_ip6_addr(netif, i),
+ ND6_SEND_FLAG_MULTICAST_DEST | ND6_SEND_FLAG_ANY_SRC);
+ }
+ }
+ }
+ }
+
+#if LWIP_IPV6_SEND_ROUTER_SOLICIT
+ /* Send router solicitation messages, if necessary. */
+ if (!nd6_tmr_rs_reduction) {
+ nd6_tmr_rs_reduction = (ND6_RTR_SOLICITATION_INTERVAL / ND6_TMR_INTERVAL) - 1;
+ NETIF_FOREACH(netif) {
+ if ((netif->rs_count > 0) && netif_is_up(netif) &&
+ netif_is_link_up(netif) &&
+ !ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)) &&
+ !ip6_addr_isduplicated(netif_ip6_addr_state(netif, 0))) {
+ if (nd6_send_rs(netif) == ERR_OK) {
+ netif->rs_count--;
+ }
+ }
+ }
+ } else {
+ nd6_tmr_rs_reduction--;
+ }
+#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */
+
+}
+
+/** Send a neighbor solicitation message for a specific neighbor cache entry
+ *
+ * @param entry the neightbor cache entry for wich to send the message
+ * @param flags one of ND6_SEND_FLAG_*
+ */
+static void
+nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags)
+{
+ nd6_send_ns(entry->netif, &entry->next_hop_address, flags);
+}
+
+/**
+ * Send a neighbor solicitation message
+ *
+ * @param netif the netif on which to send the message
+ * @param target_addr the IPv6 target address for the ND message
+ * @param flags one of ND6_SEND_FLAG_*
+ */
+static void
+nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags)
+{
+ struct ns_header *ns_hdr;
+ struct pbuf *p;
+ const ip6_addr_t *src_addr;
+ u16_t lladdr_opt_len;
+
+ LWIP_ASSERT("target address is required", target_addr != NULL);
+
+ if (!(flags & ND6_SEND_FLAG_ANY_SRC) &&
+ ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) {
+ /* Use link-local address as source address. */
+ src_addr = netif_ip6_addr(netif, 0);
+ /* calculate option length (in 8-byte-blocks) */
+ lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3;
+ } else {
+ src_addr = IP6_ADDR_ANY6;
+ /* Option "MUST NOT be included when the source IP address is the unspecified address." */
+ lladdr_opt_len = 0;
+ }
+
+ /* Allocate a packet. */
+ p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM);
+ if (p == NULL) {
+ ND6_STATS_INC(nd6.memerr);
+ return;
+ }
+
+ /* Set fields. */
+ ns_hdr = (struct ns_header *)p->payload;
+
+ ns_hdr->type = ICMP6_TYPE_NS;
+ ns_hdr->code = 0;
+ ns_hdr->chksum = 0;
+ ns_hdr->reserved = 0;
+ ip6_addr_copy_to_packed(ns_hdr->target_address, *target_addr);
+
+ if (lladdr_opt_len != 0) {
+ struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header));
+ lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR;
+ lladdr_opt->length = (u8_t)lladdr_opt_len;
+ SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len);
+ }
+
+ /* Generate the solicited node address for the target address. */
+ if (flags & ND6_SEND_FLAG_MULTICAST_DEST) {
+ ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]);
+ ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif);
+ target_addr = &multicast_address;
+ }
+
+#if CHECKSUM_GEN_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
+ ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr,
+ target_addr);
+ }
+#endif /* CHECKSUM_GEN_ICMP6 */
+
+ /* Send the packet out. */
+ ND6_STATS_INC(nd6.xmit);
+ ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr,
+ ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif);
+ pbuf_free(p);
+}
+
+/**
+ * Send a neighbor advertisement message
+ *
+ * @param netif the netif on which to send the message
+ * @param target_addr the IPv6 target address for the ND message
+ * @param flags one of ND6_SEND_FLAG_*
+ */
+static void
+nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags)
+{
+ struct na_header *na_hdr;
+ struct lladdr_option *lladdr_opt;
+ struct pbuf *p;
+ const ip6_addr_t *src_addr;
+ const ip6_addr_t *dest_addr;
+ u16_t lladdr_opt_len;
+
+ LWIP_ASSERT("target address is required", target_addr != NULL);
+
+ /* Use link-local address as source address. */
+ /* src_addr = netif_ip6_addr(netif, 0); */
+ /* Use target address as source address. */
+ src_addr = target_addr;
+
+ /* Allocate a packet. */
+ lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0);
+ p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM);
+ if (p == NULL) {
+ ND6_STATS_INC(nd6.memerr);
+ return;
+ }
+
+ /* Set fields. */
+ na_hdr = (struct na_header *)p->payload;
+ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header));
+
+ na_hdr->type = ICMP6_TYPE_NA;
+ na_hdr->code = 0;
+ na_hdr->chksum = 0;
+ na_hdr->flags = flags & 0xf0;
+ na_hdr->reserved[0] = 0;
+ na_hdr->reserved[1] = 0;
+ na_hdr->reserved[2] = 0;
+ ip6_addr_copy_to_packed(na_hdr->target_address, *target_addr);
+
+ lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR;
+ lladdr_opt->length = (u8_t)lladdr_opt_len;
+ SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len);
+
+ /* Generate the solicited node address for the target address. */
+ if (flags & ND6_SEND_FLAG_MULTICAST_DEST) {
+ ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]);
+ ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif);
+ dest_addr = &multicast_address;
+ } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) {
+ ip6_addr_set_allnodes_linklocal(&multicast_address);
+ ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif);
+ dest_addr = &multicast_address;
+ } else {
+ dest_addr = ip6_current_src_addr();
+ }
+
+#if CHECKSUM_GEN_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
+ na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr,
+ dest_addr);
+ }
+#endif /* CHECKSUM_GEN_ICMP6 */
+
+ /* Send the packet out. */
+ ND6_STATS_INC(nd6.xmit);
+ ip6_output_if(p, src_addr, dest_addr,
+ ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif);
+ pbuf_free(p);
+}
+
+#if LWIP_IPV6_SEND_ROUTER_SOLICIT
+/**
+ * Send a router solicitation message
+ *
+ * @param netif the netif on which to send the message
+ */
+static err_t
+nd6_send_rs(struct netif *netif)
+{
+ struct rs_header *rs_hdr;
+ struct lladdr_option *lladdr_opt;
+ struct pbuf *p;
+ const ip6_addr_t *src_addr;
+ err_t err;
+ u16_t lladdr_opt_len = 0;
+
+ /* Link-local source address, or unspecified address? */
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) {
+ src_addr = netif_ip6_addr(netif, 0);
+ } else {
+ src_addr = IP6_ADDR_ANY6;
+ }
+
+ /* Generate the all routers target address. */
+ ip6_addr_set_allrouters_linklocal(&multicast_address);
+ ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif);
+
+ /* Allocate a packet. */
+ if (src_addr != IP6_ADDR_ANY6) {
+ lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0);
+ }
+ p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM);
+ if (p == NULL) {
+ ND6_STATS_INC(nd6.memerr);
+ return ERR_BUF;
+ }
+
+ /* Set fields. */
+ rs_hdr = (struct rs_header *)p->payload;
+
+ rs_hdr->type = ICMP6_TYPE_RS;
+ rs_hdr->code = 0;
+ rs_hdr->chksum = 0;
+ rs_hdr->reserved = 0;
+
+ if (src_addr != IP6_ADDR_ANY6) {
+ /* Include our hw address. */
+ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header));
+ lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR;
+ lladdr_opt->length = (u8_t)lladdr_opt_len;
+ SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len);
+ }
+
+#if CHECKSUM_GEN_ICMP6
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
+ rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr,
+ &multicast_address);
+ }
+#endif /* CHECKSUM_GEN_ICMP6 */
+
+ /* Send the packet out. */
+ ND6_STATS_INC(nd6.xmit);
+
+ err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address,
+ ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif);
+ pbuf_free(p);
+
+ return err;
+}
+#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */
+
+/**
+ * Search for a neighbor cache entry
+ *
+ * @param ip6addr the IPv6 address of the neighbor
+ * @return The neighbor cache entry index that matched, -1 if no
+ * entry is found
+ */
+static s8_t
+nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr)
+{
+ s8_t i;
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/**
+ * Create a new neighbor cache entry.
+ *
+ * If no unused entry is found, will try to recycle an old entry
+ * according to ad-hoc "age" heuristic.
+ *
+ * @return The neighbor cache entry index that was created, -1 if no
+ * entry could be created
+ */
+static s8_t
+nd6_new_neighbor_cache_entry(void)
+{
+ s8_t i;
+ s8_t j;
+ u32_t time;
+
+
+ /* First, try to find an empty entry. */
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if (neighbor_cache[i].state == ND6_NO_ENTRY) {
+ return i;
+ }
+ }
+
+ /* We need to recycle an entry. in general, do not recycle if it is a router. */
+
+ /* Next, try to find a Stale entry. */
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if ((neighbor_cache[i].state == ND6_STALE) &&
+ (!neighbor_cache[i].isrouter)) {
+ nd6_free_neighbor_cache_entry(i);
+ return i;
+ }
+ }
+
+ /* Next, try to find a Probe entry. */
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if ((neighbor_cache[i].state == ND6_PROBE) &&
+ (!neighbor_cache[i].isrouter)) {
+ nd6_free_neighbor_cache_entry(i);
+ return i;
+ }
+ }
+
+ /* Next, try to find a Delayed entry. */
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if ((neighbor_cache[i].state == ND6_DELAY) &&
+ (!neighbor_cache[i].isrouter)) {
+ nd6_free_neighbor_cache_entry(i);
+ return i;
+ }
+ }
+
+ /* Next, try to find the oldest reachable entry. */
+ time = 0xfffffffful;
+ j = -1;
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if ((neighbor_cache[i].state == ND6_REACHABLE) &&
+ (!neighbor_cache[i].isrouter)) {
+ if (neighbor_cache[i].counter.reachable_time < time) {
+ j = i;
+ time = neighbor_cache[i].counter.reachable_time;
+ }
+ }
+ }
+ if (j >= 0) {
+ nd6_free_neighbor_cache_entry(j);
+ return j;
+ }
+
+ /* Next, find oldest incomplete entry without queued packets. */
+ time = 0;
+ j = -1;
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if (
+ (neighbor_cache[i].q == NULL) &&
+ (neighbor_cache[i].state == ND6_INCOMPLETE) &&
+ (!neighbor_cache[i].isrouter)) {
+ if (neighbor_cache[i].counter.probes_sent >= time) {
+ j = i;
+ time = neighbor_cache[i].counter.probes_sent;
+ }
+ }
+ }
+ if (j >= 0) {
+ nd6_free_neighbor_cache_entry(j);
+ return j;
+ }
+
+ /* Next, find oldest incomplete entry with queued packets. */
+ time = 0;
+ j = -1;
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if ((neighbor_cache[i].state == ND6_INCOMPLETE) &&
+ (!neighbor_cache[i].isrouter)) {
+ if (neighbor_cache[i].counter.probes_sent >= time) {
+ j = i;
+ time = neighbor_cache[i].counter.probes_sent;
+ }
+ }
+ }
+ if (j >= 0) {
+ nd6_free_neighbor_cache_entry(j);
+ return j;
+ }
+
+ /* No more entries to try. */
+ return -1;
+}
+
+/**
+ * Will free any resources associated with a neighbor cache
+ * entry, and will mark it as unused.
+ *
+ * @param i the neighbor cache entry index to free
+ */
+static void
+nd6_free_neighbor_cache_entry(s8_t i)
+{
+ if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) {
+ return;
+ }
+ if (neighbor_cache[i].isrouter) {
+ /* isrouter needs to be cleared before deleting a neighbor cache entry */
+ return;
+ }
+
+ /* Free any queued packets. */
+ if (neighbor_cache[i].q != NULL) {
+ nd6_free_q(neighbor_cache[i].q);
+ neighbor_cache[i].q = NULL;
+ }
+
+ neighbor_cache[i].state = ND6_NO_ENTRY;
+ neighbor_cache[i].isrouter = 0;
+ neighbor_cache[i].netif = NULL;
+ neighbor_cache[i].counter.reachable_time = 0;
+ ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address));
+}
+
+/**
+ * Search for a destination cache entry
+ *
+ * @param ip6addr the IPv6 address of the destination
+ * @return The destination cache entry index that matched, -1 if no
+ * entry is found
+ */
+static s16_t
+nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr)
+{
+ s16_t i;
+
+ IP6_ADDR_ZONECHECK(ip6addr);
+
+ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) {
+ if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/**
+ * Create a new destination cache entry. If no unused entry is found,
+ * will recycle oldest entry.
+ *
+ * @return The destination cache entry index that was created, -1 if no
+ * entry was created
+ */
+static s16_t
+nd6_new_destination_cache_entry(void)
+{
+ s16_t i, j;
+ u32_t age;
+
+ /* Find an empty entry. */
+ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) {
+ if (ip6_addr_isany(&(destination_cache[i].destination_addr))) {
+ return i;
+ }
+ }
+
+ /* Find oldest entry. */
+ age = 0;
+ j = LWIP_ND6_NUM_DESTINATIONS - 1;
+ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) {
+ if (destination_cache[i].age > age) {
+ j = i;
+ }
+ }
+
+ return j;
+}
+
+/**
+ * Clear the destination cache.
+ *
+ * This operation may be necessary for consistency in the light of changing
+ * local addresses and/or use of the gateway hook.
+ */
+void
+nd6_clear_destination_cache(void)
+{
+ int i;
+
+ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) {
+ ip6_addr_set_any(&destination_cache[i].destination_addr);
+ }
+}
+
+/**
+ * Determine whether an address matches an on-link prefix or the subnet of a
+ * statically assigned address.
+ *
+ * @param ip6addr the IPv6 address to match
+ * @return 1 if the address is on-link, 0 otherwise
+ */
+static int
+nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif)
+{
+ s8_t i;
+
+ /* Check to see if the address matches an on-link prefix. */
+ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) {
+ if ((prefix_list[i].netif == netif) &&
+ (prefix_list[i].invalidation_timer > 0) &&
+ ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) {
+ return 1;
+ }
+ }
+ /* Check to see if address prefix matches a manually configured (= static)
+ * address. Static addresses have an implied /64 subnet assignment. Dynamic
+ * addresses (from autoconfiguration) have no implied subnet assignment, and
+ * are thus effectively /128 assignments. See RFC 5942 for more on this. */
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) &&
+ netif_ip6_addr_isstatic(netif, i) &&
+ ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Select a default router for a destination.
+ *
+ * This function is used both for routing and for finding a next-hop target for
+ * a packet. In the former case, the given netif is NULL, and the returned
+ * router entry must be for a netif suitable for sending packets (up, link up).
+ * In the latter case, the given netif is not NULL and restricts router choice.
+ *
+ * @param ip6addr the destination address
+ * @param netif the netif for the outgoing packet, if known
+ * @return the default router entry index, or -1 if no suitable
+ * router is found
+ */
+static s8_t
+nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif)
+{
+ struct netif *router_netif;
+ s8_t i, j, valid_router;
+ static s8_t last_router;
+
+ LWIP_UNUSED_ARG(ip6addr); /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */
+
+ /* @todo: implement default router preference */
+
+ /* Look for valid routers. A reachable router is preferred. */
+ valid_router = -1;
+ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) {
+ /* Is the router netif both set and apppropriate? */
+ if (default_router_list[i].neighbor_entry != NULL) {
+ router_netif = default_router_list[i].neighbor_entry->netif;
+ if ((router_netif != NULL) && (netif != NULL ? netif == router_netif :
+ (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) {
+ /* Is the router valid, i.e., reachable or probably reachable as per
+ * RFC 4861 Sec. 6.3.6? Note that we will never return a router that
+ * has no neighbor cache entry, due to the netif association tests. */
+ if (default_router_list[i].neighbor_entry->state != ND6_INCOMPLETE) {
+ /* Is the router known to be reachable? */
+ if (default_router_list[i].neighbor_entry->state == ND6_REACHABLE) {
+ return i; /* valid and reachable - done! */
+ } else if (valid_router < 0) {
+ valid_router = i; /* valid but not known to be reachable */
+ }
+ }
+ }
+ }
+ }
+ if (valid_router >= 0) {
+ return valid_router;
+ }
+
+ /* Look for any router for which we have any information at all. */
+ /* last_router is used for round-robin selection of incomplete routers, as
+ * recommended in RFC 4861 Sec. 6.3.6 point (2). Advance only when picking a
+ * route, to select the same router as next-hop target in the common case. */
+ if ((netif == NULL) && (++last_router >= LWIP_ND6_NUM_ROUTERS)) {
+ last_router = 0;
+ }
+ i = last_router;
+ for (j = 0; j < LWIP_ND6_NUM_ROUTERS; j++) {
+ if (default_router_list[i].neighbor_entry != NULL) {
+ router_netif = default_router_list[i].neighbor_entry->netif;
+ if ((router_netif != NULL) && (netif != NULL ? netif == router_netif :
+ (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) {
+ return i;
+ }
+ }
+ if (++i >= LWIP_ND6_NUM_ROUTERS) {
+ i = 0;
+ }
+ }
+
+ /* no suitable router found. */
+ return -1;
+}
+
+/**
+ * Find a router-announced route to the given destination. This route may be
+ * based on an on-link prefix or a default router.
+ *
+ * If a suitable route is found, the returned netif is guaranteed to be in a
+ * suitable state (up, link up) to be used for packet transmission.
+ *
+ * @param ip6addr the destination IPv6 address
+ * @return the netif to use for the destination, or NULL if none found
+ */
+struct netif *
+nd6_find_route(const ip6_addr_t *ip6addr)
+{
+ struct netif *netif;
+ s8_t i;
+
+ /* @todo decide if it makes sense to check the destination cache first */
+
+ /* Check if there is a matching on-link prefix. There may be multiple
+ * matches. Pick the first one that is associated with a suitable netif. */
+ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) {
+ netif = prefix_list[i].netif;
+ if ((netif != NULL) && ip6_addr_netcmp(&prefix_list[i].prefix, ip6addr) &&
+ netif_is_up(netif) && netif_is_link_up(netif)) {
+ return netif;
+ }
+ }
+
+ /* No on-link prefix match. Find a router that can forward the packet. */
+ i = nd6_select_router(ip6addr, NULL);
+ if (i >= 0) {
+ LWIP_ASSERT("selected router must have a neighbor entry",
+ default_router_list[i].neighbor_entry != NULL);
+ return default_router_list[i].neighbor_entry->netif;
+ }
+
+ return NULL;
+}
+
+/**
+ * Find an entry for a default router.
+ *
+ * @param router_addr the IPv6 address of the router
+ * @param netif the netif on which the router is found, if known
+ * @return the index of the router entry, or -1 if not found
+ */
+static s8_t
+nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif)
+{
+ s8_t i;
+
+ IP6_ADDR_ZONECHECK_NETIF(router_addr, netif);
+
+ /* Look for router. */
+ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) {
+ if ((default_router_list[i].neighbor_entry != NULL) &&
+ ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) &&
+ ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) {
+ return i;
+ }
+ }
+
+ /* router not found. */
+ return -1;
+}
+
+/**
+ * Create a new entry for a default router.
+ *
+ * @param router_addr the IPv6 address of the router
+ * @param netif the netif on which the router is connected, if known
+ * @return the index on the router table, or -1 if could not be created
+ */
+static s8_t
+nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif)
+{
+ s8_t router_index;
+ s8_t free_router_index;
+ s8_t neighbor_index;
+
+ IP6_ADDR_ZONECHECK_NETIF(router_addr, netif);
+
+ /* Do we have a neighbor entry for this router? */
+ neighbor_index = nd6_find_neighbor_cache_entry(router_addr);
+ if (neighbor_index < 0) {
+ /* Create a neighbor entry for this router. */
+ neighbor_index = nd6_new_neighbor_cache_entry();
+ if (neighbor_index < 0) {
+ /* Could not create neighbor entry for this router. */
+ return -1;
+ }
+ ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr);
+ neighbor_cache[neighbor_index].netif = netif;
+ neighbor_cache[neighbor_index].q = NULL;
+ neighbor_cache[neighbor_index].state = ND6_INCOMPLETE;
+ neighbor_cache[neighbor_index].counter.probes_sent = 1;
+ nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST);
+ }
+
+ /* Mark neighbor as router. */
+ neighbor_cache[neighbor_index].isrouter = 1;
+
+ /* Look for empty entry. */
+ free_router_index = LWIP_ND6_NUM_ROUTERS;
+ for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) {
+ /* check if router already exists (this is a special case for 2 netifs on the same subnet
+ - e.g. wifi and cable) */
+ if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){
+ return router_index;
+ }
+ if (default_router_list[router_index].neighbor_entry == NULL) {
+ /* remember lowest free index to create a new entry */
+ free_router_index = router_index;
+ }
+ }
+ if (free_router_index < LWIP_ND6_NUM_ROUTERS) {
+ default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]);
+ return free_router_index;
+ }
+
+ /* Could not create a router entry. */
+
+ /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */
+ neighbor_cache[neighbor_index].isrouter = 0;
+
+ /* router not found. */
+ return -1;
+}
+
+/**
+ * Find the cached entry for an on-link prefix.
+ *
+ * @param prefix the IPv6 prefix that is on-link
+ * @param netif the netif on which the prefix is on-link
+ * @return the index on the prefix table, or -1 if not found
+ */
+static s8_t
+nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif)
+{
+ s8_t i;
+
+ /* Look for prefix in list. */
+ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) {
+ if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) &&
+ (prefix_list[i].netif == netif)) {
+ return i;
+ }
+ }
+
+ /* Entry not available. */
+ return -1;
+}
+
+/**
+ * Creates a new entry for an on-link prefix.
+ *
+ * @param prefix the IPv6 prefix that is on-link
+ * @param netif the netif on which the prefix is on-link
+ * @return the index on the prefix table, or -1 if not created
+ */
+static s8_t
+nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif)
+{
+ s8_t i;
+
+ /* Create new entry. */
+ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) {
+ if ((prefix_list[i].netif == NULL) ||
+ (prefix_list[i].invalidation_timer == 0)) {
+ /* Found empty prefix entry. */
+ prefix_list[i].netif = netif;
+ ip6_addr_set(&(prefix_list[i].prefix), prefix);
+ return i;
+ }
+ }
+
+ /* Entry not available. */
+ return -1;
+}
+
+/**
+ * Determine the next hop for a destination. Will determine if the
+ * destination is on-link, else a suitable on-link router is selected.
+ *
+ * The last entry index is cached for fast entry search.
+ *
+ * @param ip6addr the destination address
+ * @param netif the netif on which the packet will be sent
+ * @return the neighbor cache entry for the next hop, ERR_RTE if no
+ * suitable next hop was found, ERR_MEM if no cache entry
+ * could be created
+ */
+static s8_t
+nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif)
+{
+#ifdef LWIP_HOOK_ND6_GET_GW
+ const ip6_addr_t *next_hop_addr;
+#endif /* LWIP_HOOK_ND6_GET_GW */
+ s8_t i;
+ s16_t dst_idx;
+
+ IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif);
+
+#if LWIP_NETIF_HWADDRHINT
+ if (netif->hints != NULL) {
+ /* per-pcb cached entry was given */
+ netif_addr_idx_t addr_hint = netif->hints->addr_hint;
+ if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) {
+ nd6_cached_destination_index = addr_hint;
+ }
+ }
+#endif /* LWIP_NETIF_HWADDRHINT */
+
+ /* Look for ip6addr in destination cache. */
+ if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) {
+ /* the cached entry index is the right one! */
+ /* do nothing. */
+ ND6_STATS_INC(nd6.cachehit);
+ } else {
+ /* Search destination cache. */
+ dst_idx = nd6_find_destination_cache_entry(ip6addr);
+ if (dst_idx >= 0) {
+ /* found destination entry. make it our new cached index. */
+ LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX);
+ nd6_cached_destination_index = (netif_addr_idx_t)dst_idx;
+ } else {
+ /* Not found. Create a new destination entry. */
+ dst_idx = nd6_new_destination_cache_entry();
+ if (dst_idx >= 0) {
+ /* got new destination entry. make it our new cached index. */
+ LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX);
+ nd6_cached_destination_index = (netif_addr_idx_t)dst_idx;
+ } else {
+ /* Could not create a destination cache entry. */
+ return ERR_MEM;
+ }
+
+ /* Copy dest address to destination cache. */
+ ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr);
+
+ /* Now find the next hop. is it a neighbor? */
+ if (ip6_addr_islinklocal(ip6addr) ||
+ nd6_is_prefix_in_netif(ip6addr, netif)) {
+ /* Destination in local link. */
+ destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif);
+ ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr);
+#ifdef LWIP_HOOK_ND6_GET_GW
+ } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) {
+ /* Next hop for destination provided by hook function. */
+ destination_cache[nd6_cached_destination_index].pmtu = netif->mtu;
+ ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr);
+#endif /* LWIP_HOOK_ND6_GET_GW */
+ } else {
+ /* We need to select a router. */
+ i = nd6_select_router(ip6addr, netif);
+ if (i < 0) {
+ /* No router found. */
+ ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr));
+ return ERR_RTE;
+ }
+ destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); /* Start with netif mtu, correct through ICMPv6 if necessary */
+ ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address);
+ }
+ }
+ }
+
+#if LWIP_NETIF_HWADDRHINT
+ if (netif->hints != NULL) {
+ /* per-pcb cached entry was given */
+ netif->hints->addr_hint = nd6_cached_destination_index;
+ }
+#endif /* LWIP_NETIF_HWADDRHINT */
+
+ /* Look in neighbor cache for the next-hop address. */
+ if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr),
+ &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) {
+ /* Cache hit. */
+ /* Do nothing. */
+ ND6_STATS_INC(nd6.cachehit);
+ } else {
+ i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr));
+ if (i >= 0) {
+ /* Found a matching record, make it new cached entry. */
+ nd6_cached_neighbor_index = i;
+ } else {
+ /* Neighbor not in cache. Make a new entry. */
+ i = nd6_new_neighbor_cache_entry();
+ if (i >= 0) {
+ /* got new neighbor entry. make it our new cached index. */
+ nd6_cached_neighbor_index = i;
+ } else {
+ /* Could not create a neighbor cache entry. */
+ return ERR_MEM;
+ }
+
+ /* Initialize fields. */
+ ip6_addr_copy(neighbor_cache[i].next_hop_address,
+ destination_cache[nd6_cached_destination_index].next_hop_addr);
+ neighbor_cache[i].isrouter = 0;
+ neighbor_cache[i].netif = netif;
+ neighbor_cache[i].state = ND6_INCOMPLETE;
+ neighbor_cache[i].counter.probes_sent = 1;
+ nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST);
+ }
+ }
+
+ /* Reset this destination's age. */
+ destination_cache[nd6_cached_destination_index].age = 0;
+
+ return nd6_cached_neighbor_index;
+}
+
+/**
+ * Queue a packet for a neighbor.
+ *
+ * @param neighbor_index the index in the neighbor cache table
+ * @param q packet to be queued
+ * @return ERR_OK if succeeded, ERR_MEM if out of memory
+ */
+static err_t
+nd6_queue_packet(s8_t neighbor_index, struct pbuf *q)
+{
+ err_t result = ERR_MEM;
+ struct pbuf *p;
+ int copy_needed = 0;
+#if LWIP_ND6_QUEUEING
+ struct nd6_q_entry *new_entry, *r;
+#endif /* LWIP_ND6_QUEUEING */
+
+ if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) {
+ return ERR_ARG;
+ }
+
+ /* IF q includes a pbuf that must be copied, we have to copy the whole chain
+ * into a new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */
+ p = q;
+ while (p) {
+ if (PBUF_NEEDS_COPY(p)) {
+ copy_needed = 1;
+ break;
+ }
+ p = p->next;
+ }
+ if (copy_needed) {
+ /* copy the whole packet into new pbufs */
+ p = pbuf_clone(PBUF_LINK, PBUF_RAM, q);
+ while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) {
+ /* Free oldest packet (as per RFC recommendation) */
+#if LWIP_ND6_QUEUEING
+ r = neighbor_cache[neighbor_index].q;
+ neighbor_cache[neighbor_index].q = r->next;
+ r->next = NULL;
+ nd6_free_q(r);
+#else /* LWIP_ND6_QUEUEING */
+ pbuf_free(neighbor_cache[neighbor_index].q);
+ neighbor_cache[neighbor_index].q = NULL;
+#endif /* LWIP_ND6_QUEUEING */
+ p = pbuf_clone(PBUF_LINK, PBUF_RAM, q);
+ }
+ } else {
+ /* referencing the old pbuf is enough */
+ p = q;
+ pbuf_ref(p);
+ }
+ /* packet was copied/ref'd? */
+ if (p != NULL) {
+ /* queue packet ... */
+#if LWIP_ND6_QUEUEING
+ /* allocate a new nd6 queue entry */
+ new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE);
+ if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) {
+ /* Free oldest packet (as per RFC recommendation) */
+ r = neighbor_cache[neighbor_index].q;
+ neighbor_cache[neighbor_index].q = r->next;
+ r->next = NULL;
+ nd6_free_q(r);
+ new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE);
+ }
+ if (new_entry != NULL) {
+ new_entry->next = NULL;
+ new_entry->p = p;
+ if (neighbor_cache[neighbor_index].q != NULL) {
+ /* queue was already existent, append the new entry to the end */
+ r = neighbor_cache[neighbor_index].q;
+ while (r->next != NULL) {
+ r = r->next;
+ }
+ r->next = new_entry;
+ } else {
+ /* queue did not exist, first item in queue */
+ neighbor_cache[neighbor_index].q = new_entry;
+ }
+ LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index));
+ result = ERR_OK;
+ } else {
+ /* the pool MEMP_ND6_QUEUE is empty */
+ pbuf_free(p);
+ LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p));
+ /* { result == ERR_MEM } through initialization */
+ }
+#else /* LWIP_ND6_QUEUEING */
+ /* Queue a single packet. If an older packet is already queued, free it as per RFC. */
+ if (neighbor_cache[neighbor_index].q != NULL) {
+ pbuf_free(neighbor_cache[neighbor_index].q);
+ }
+ neighbor_cache[neighbor_index].q = p;
+ LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index));
+ result = ERR_OK;
+#endif /* LWIP_ND6_QUEUEING */
+ } else {
+ LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q));
+ /* { result == ERR_MEM } through initialization */
+ }
+
+ return result;
+}
+
+#if LWIP_ND6_QUEUEING
+/**
+ * Free a complete queue of nd6 q entries
+ *
+ * @param q a queue of nd6_q_entry to free
+ */
+static void
+nd6_free_q(struct nd6_q_entry *q)
+{
+ struct nd6_q_entry *r;
+ LWIP_ASSERT("q != NULL", q != NULL);
+ LWIP_ASSERT("q->p != NULL", q->p != NULL);
+ while (q) {
+ r = q;
+ q = q->next;
+ LWIP_ASSERT("r->p != NULL", (r->p != NULL));
+ pbuf_free(r->p);
+ memp_free(MEMP_ND6_QUEUE, r);
+ }
+}
+#endif /* LWIP_ND6_QUEUEING */
+
+/**
+ * Send queued packets for a neighbor
+ *
+ * @param i the neighbor to send packets to
+ */
+static void
+nd6_send_q(s8_t i)
+{
+ struct ip6_hdr *ip6hdr;
+ ip6_addr_t dest;
+#if LWIP_ND6_QUEUEING
+ struct nd6_q_entry *q;
+#endif /* LWIP_ND6_QUEUEING */
+
+ if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) {
+ return;
+ }
+
+#if LWIP_ND6_QUEUEING
+ while (neighbor_cache[i].q != NULL) {
+ /* remember first in queue */
+ q = neighbor_cache[i].q;
+ /* pop first item off the queue */
+ neighbor_cache[i].q = q->next;
+ /* Get ipv6 header. */
+ ip6hdr = (struct ip6_hdr *)(q->p->payload);
+ /* Create an aligned copy. */
+ ip6_addr_copy_from_packed(dest, ip6hdr->dest);
+ /* Restore the zone, if applicable. */
+ ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif);
+ /* send the queued IPv6 packet */
+ (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest);
+ /* free the queued IP packet */
+ pbuf_free(q->p);
+ /* now queue entry can be freed */
+ memp_free(MEMP_ND6_QUEUE, q);
+ }
+#else /* LWIP_ND6_QUEUEING */
+ if (neighbor_cache[i].q != NULL) {
+ /* Get ipv6 header. */
+ ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload);
+ /* Create an aligned copy. */
+ ip6_addr_copy_from_packed(dest, ip6hdr->dest);
+ /* Restore the zone, if applicable. */
+ ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif);
+ /* send the queued IPv6 packet */
+ (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest);
+ /* free the queued IP packet */
+ pbuf_free(neighbor_cache[i].q);
+ neighbor_cache[i].q = NULL;
+ }
+#endif /* LWIP_ND6_QUEUEING */
+}
+
+/**
+ * A packet is to be transmitted to a specific IPv6 destination on a specific
+ * interface. Check if we can find the hardware address of the next hop to use
+ * for the packet. If so, give the hardware address to the caller, which should
+ * use it to send the packet right away. Otherwise, enqueue the packet for
+ * later transmission while looking up the hardware address, if possible.
+ *
+ * As such, this function returns one of three different possible results:
+ *
+ * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now.
+ * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later.
+ * - not ERR_OK: something went wrong; forward the error upward in the stack.
+ *
+ * @param netif The lwIP network interface on which the IP packet will be sent.
+ * @param q The pbuf(s) containing the IP packet to be sent.
+ * @param ip6addr The destination IPv6 address of the packet.
+ * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning
+ * the packet has been queued).
+ * @return
+ * - ERR_OK on success, ERR_RTE if no route was found for the packet,
+ * or ERR_MEM if low memory conditions prohibit sending the packet at all.
+ */
+err_t
+nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp)
+{
+ s8_t i;
+
+ /* Get next hop record. */
+ i = nd6_get_next_hop_entry(ip6addr, netif);
+ if (i < 0) {
+ /* failed to get a next hop neighbor record. */
+ return i;
+ }
+
+ /* Now that we have a destination record, send or queue the packet. */
+ if (neighbor_cache[i].state == ND6_STALE) {
+ /* Switch to delay state. */
+ neighbor_cache[i].state = ND6_DELAY;
+ neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL;
+ }
+ /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */
+ if ((neighbor_cache[i].state == ND6_REACHABLE) ||
+ (neighbor_cache[i].state == ND6_DELAY) ||
+ (neighbor_cache[i].state == ND6_PROBE)) {
+
+ /* Tell the caller to send out the packet now. */
+ *hwaddrp = neighbor_cache[i].lladdr;
+ return ERR_OK;
+ }
+
+ /* We should queue packet on this interface. */
+ *hwaddrp = NULL;
+ return nd6_queue_packet(i, q);
+}
+
+
+/**
+ * Get the Path MTU for a destination.
+ *
+ * @param ip6addr the destination address
+ * @param netif the netif on which the packet will be sent
+ * @return the Path MTU, if known, or the netif default MTU
+ */
+u16_t
+nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif)
+{
+ s16_t i;
+
+ i = nd6_find_destination_cache_entry(ip6addr);
+ if (i >= 0) {
+ if (destination_cache[i].pmtu > 0) {
+ return destination_cache[i].pmtu;
+ }
+ }
+
+ if (netif != NULL) {
+ return netif_mtu6(netif);
+ }
+
+ return 1280; /* Minimum MTU */
+}
+
+
+#if LWIP_ND6_TCP_REACHABILITY_HINTS
+/**
+ * Provide the Neighbor discovery process with a hint that a
+ * destination is reachable. Called by tcp_receive when ACKs are
+ * received or sent (as per RFC). This is useful to avoid sending
+ * NS messages every 30 seconds.
+ *
+ * @param ip6addr the destination address which is know to be reachable
+ * by an upper layer protocol (TCP)
+ */
+void
+nd6_reachability_hint(const ip6_addr_t *ip6addr)
+{
+ s8_t i;
+ s16_t dst_idx;
+
+ /* Find destination in cache. */
+ if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) {
+ dst_idx = nd6_cached_destination_index;
+ ND6_STATS_INC(nd6.cachehit);
+ } else {
+ dst_idx = nd6_find_destination_cache_entry(ip6addr);
+ }
+ if (dst_idx < 0) {
+ return;
+ }
+
+ /* Find next hop neighbor in cache. */
+ if (ip6_addr_cmp(&(destination_cache[dst_idx].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) {
+ i = nd6_cached_neighbor_index;
+ ND6_STATS_INC(nd6.cachehit);
+ } else {
+ i = nd6_find_neighbor_cache_entry(&(destination_cache[dst_idx].next_hop_addr));
+ }
+ if (i < 0) {
+ return;
+ }
+
+ /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */
+ if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) {
+ return;
+ }
+
+ /* Set reachability state. */
+ neighbor_cache[i].state = ND6_REACHABLE;
+ neighbor_cache[i].counter.reachable_time = reachable_time;
+}
+#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */
+
+/**
+ * Remove all prefix, neighbor_cache and router entries of the specified netif.
+ *
+ * @param netif points to a network interface
+ */
+void
+nd6_cleanup_netif(struct netif *netif)
+{
+ u8_t i;
+ s8_t router_index;
+ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) {
+ if (prefix_list[i].netif == netif) {
+ prefix_list[i].netif = NULL;
+ }
+ }
+ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) {
+ if (neighbor_cache[i].netif == netif) {
+ for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) {
+ if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) {
+ default_router_list[router_index].neighbor_entry = NULL;
+ default_router_list[router_index].flags = 0;
+ }
+ }
+ neighbor_cache[i].isrouter = 0;
+ nd6_free_neighbor_cache_entry(i);
+ }
+ }
+ /* Clear the destination cache, since many entries may now have become
+ * invalid for one of several reasons. As destination cache entries have no
+ * netif association, use a sledgehammer approach (this can be improved). */
+ nd6_clear_destination_cache();
+}
+
+#if LWIP_IPV6_MLD
+/**
+ * The state of a local IPv6 address entry is about to change. If needed, join
+ * or leave the solicited-node multicast group for the address.
+ *
+ * @param netif The netif that owns the address.
+ * @param addr_idx The index of the address.
+ * @param new_state The new (IP6_ADDR_) state for the address.
+ */
+void
+nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state)
+{
+ u8_t old_state, old_member, new_member;
+
+ old_state = netif_ip6_addr_state(netif, addr_idx);
+
+ /* Determine whether we were, and should be, a member of the solicited-node
+ * multicast group for this address. For tentative addresses, the group is
+ * not joined until the address enters the TENTATIVE_1 (or VALID) state. */
+ old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_DUPLICATED && old_state != IP6_ADDR_TENTATIVE);
+ new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_DUPLICATED && new_state != IP6_ADDR_TENTATIVE);
+
+ if (old_member != new_member) {
+ ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]);
+ ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif);
+
+ if (new_member) {
+ mld6_joingroup_netif(netif, &multicast_address);
+ } else {
+ mld6_leavegroup_netif(netif, &multicast_address);
+ }
+ }
+}
+#endif /* LWIP_IPV6_MLD */
+
+/** Netif was added, set up, or reconnected (link up) */
+void
+nd6_restart_netif(struct netif *netif)
+{
+#if LWIP_IPV6_SEND_ROUTER_SOLICIT
+ /* Send Router Solicitation messages (see RFC 4861, ch. 6.3.7). */
+ netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT;
+#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */
+}
+
+#endif /* LWIP_IPV6 */
diff --git a/lwip/src/core/mem.c b/lwip/src/core/mem.c
new file mode 100644
index 0000000..315fb3c
--- /dev/null
+++ b/lwip/src/core/mem.c
@@ -0,0 +1,1017 @@
+/**
+ * @file
+ * Dynamic memory manager
+ *
+ * This is a lightweight replacement for the standard C library malloc().
+ *
+ * If you want to use the standard C library malloc() instead, define
+ * MEM_LIBC_MALLOC to 1 in your lwipopts.h
+ *
+ * To let mem_malloc() use pools (prevents fragmentation and is much faster than
+ * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
+ * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
+ * of pools like this (more pools can be added between _START and _END):
+ *
+ * Define three pools with sizes 256, 512, and 1512 bytes
+ * LWIP_MALLOC_MEMPOOL_START
+ * LWIP_MALLOC_MEMPOOL(20, 256)
+ * LWIP_MALLOC_MEMPOOL(10, 512)
+ * LWIP_MALLOC_MEMPOOL(5, 1512)
+ * LWIP_MALLOC_MEMPOOL_END
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ * Simon Goldschmidt
+ *
+ */
+
+#include "lwip/opt.h"
+#include "lwip/mem.h"
+#include "lwip/def.h"
+#include "lwip/sys.h"
+#include "lwip/stats.h"
+#include "lwip/err.h"
+
+#include <string.h>
+
+#if MEM_LIBC_MALLOC
+#include <stdlib.h> /* for malloc()/free() */
+#endif
+
+/* This is overridable for tests only... */
+#ifndef LWIP_MEM_ILLEGAL_FREE
+#define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0)
+#endif
+
+#define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x))
+#define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y))
+#define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y))
+
+#if MEM_OVERFLOW_CHECK
+#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED
+#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED)
+#else
+#define MEM_SANITY_OFFSET 0
+#define MEM_SANITY_OVERHEAD 0
+#endif
+
+#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK
+/**
+ * Check if a mep element was victim of an overflow or underflow
+ * (e.g. the restricted area after/before it has been altered)
+ *
+ * @param p the mem element to check
+ * @param size allocated size of the element
+ * @param descr1 description of the element source shown on error
+ * @param descr2 description of the element source shown on error
+ */
+void
+mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2)
+{
+#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED
+ u16_t k;
+ u8_t *m;
+
+#if MEM_SANITY_REGION_AFTER_ALIGNED > 0
+ m = (u8_t *)p + size;
+ for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) {
+ if (m[k] != 0xcd) {
+ char errstr[128];
+ snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2);
+ LWIP_ASSERT(errstr, 0);
+ }
+ }
+#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
+
+#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
+ m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
+ for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) {
+ if (m[k] != 0xcd) {
+ char errstr[128];
+ snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2);
+ LWIP_ASSERT(errstr, 0);
+ }
+ }
+#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */
+#else
+ LWIP_UNUSED_ARG(p);
+ LWIP_UNUSED_ARG(desc);
+ LWIP_UNUSED_ARG(descr);
+#endif
+}
+
+/**
+ * Initialize the restricted area of a mem element.
+ */
+void
+mem_overflow_init_raw(void *p, size_t size)
+{
+#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0
+ u8_t *m;
+#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0
+ m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED;
+ memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED);
+#endif
+#if MEM_SANITY_REGION_AFTER_ALIGNED > 0
+ m = (u8_t *)p + size;
+ memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED);
+#endif
+#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
+ LWIP_UNUSED_ARG(p);
+ LWIP_UNUSED_ARG(desc);
+#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */
+}
+#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */
+
+#if MEM_LIBC_MALLOC || MEM_USE_POOLS
+
+/** mem_init is not used when using pools instead of a heap or using
+ * C library malloc().
+ */
+void
+mem_init(void)
+{
+}
+
+/** mem_trim is not used when using pools instead of a heap or using
+ * C library malloc(): we can't free part of a pool element and the stack
+ * support mem_trim() to return a different pointer
+ */
+void *
+mem_trim(void *mem, mem_size_t size)
+{
+ LWIP_UNUSED_ARG(size);
+ return mem;
+}
+#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
+
+#if MEM_LIBC_MALLOC
+/* lwIP heap implemented using C library malloc() */
+
+/* in case C library malloc() needs extra protection,
+ * allow these defines to be overridden.
+ */
+#ifndef mem_clib_free
+#define mem_clib_free free
+#endif
+#ifndef mem_clib_malloc
+#define mem_clib_malloc malloc
+#endif
+#ifndef mem_clib_calloc
+#define mem_clib_calloc calloc
+#endif
+
+#if LWIP_STATS && MEM_STATS
+#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
+#else
+#define MEM_LIBC_STATSHELPER_SIZE 0
+#endif
+
+/**
+ * Allocate a block of memory with a minimum of 'size' bytes.
+ *
+ * @param size is the minimum size of the requested block in bytes.
+ * @return pointer to allocated memory or NULL if no free memory was found.
+ *
+ * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
+ */
+void *
+mem_malloc(mem_size_t size)
+{
+ void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
+ if (ret == NULL) {
+ MEM_STATS_INC_LOCKED(err);
+ } else {
+ LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
+#if LWIP_STATS && MEM_STATS
+ *(mem_size_t *)ret = size;
+ ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE;
+ MEM_STATS_INC_USED_LOCKED(used, size);
+#endif
+ }
+ return ret;
+}
+
+/** Put memory back on the heap
+ *
+ * @param rmem is the pointer as returned by a previous call to mem_malloc()
+ */
+void
+mem_free(void *rmem)
+{
+ LWIP_ASSERT("rmem != NULL", (rmem != NULL));
+ LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
+#if LWIP_STATS && MEM_STATS
+ rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE;
+ MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem);
+#endif
+ mem_clib_free(rmem);
+}
+
+#elif MEM_USE_POOLS
+
+/* lwIP heap implemented with different sized pools */
+
+/**
+ * Allocate memory: determine the smallest pool that is big enough
+ * to contain an element of 'size' and get an element from that pool.
+ *
+ * @param size the size in bytes of the memory needed
+ * @return a pointer to the allocated memory or NULL if the pool is empty
+ */
+void *
+mem_malloc(mem_size_t size)
+{
+ void *ret;
+ struct memp_malloc_helper *element = NULL;
+ memp_t poolnr;
+ mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
+
+ for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
+ /* is this pool big enough to hold an element of the required size
+ plus a struct memp_malloc_helper that saves the pool this element came from? */
+ if (required_size <= memp_pools[poolnr]->size) {
+ element = (struct memp_malloc_helper *)memp_malloc(poolnr);
+ if (element == NULL) {
+ /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
+#if MEM_USE_POOLS_TRY_BIGGER_POOL
+ /** Try a bigger pool if this one is empty! */
+ if (poolnr < MEMP_POOL_LAST) {
+ continue;
+ }
+#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
+ MEM_STATS_INC_LOCKED(err);
+ return NULL;
+ }
+ break;
+ }
+ }
+ if (poolnr > MEMP_POOL_LAST) {
+ LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
+ MEM_STATS_INC_LOCKED(err);
+ return NULL;
+ }
+
+ /* save the pool number this element came from */
+ element->poolnr = poolnr;
+ /* and return a pointer to the memory directly after the struct memp_malloc_helper */
+ ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
+
+#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
+ /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
+ element->size = (u16_t)size;
+ MEM_STATS_INC_USED_LOCKED(used, element->size);
+#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
+#if MEMP_OVERFLOW_CHECK
+ /* initialize unused memory (diff between requested size and selected pool's size) */
+ memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size);
+#endif /* MEMP_OVERFLOW_CHECK */
+ return ret;
+}
+
+/**
+ * Free memory previously allocated by mem_malloc. Loads the pool number
+ * and calls memp_free with that pool number to put the element back into
+ * its pool
+ *
+ * @param rmem the memory element to free
+ */
+void
+mem_free(void *rmem)
+{
+ struct memp_malloc_helper *hmem;
+
+ LWIP_ASSERT("rmem != NULL", (rmem != NULL));
+ LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
+
+ /* get the original struct memp_malloc_helper */
+ /* cast through void* to get rid of alignment warnings */
+ hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
+
+ LWIP_ASSERT("hmem != NULL", (hmem != NULL));
+ LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
+ LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
+
+ MEM_STATS_DEC_USED_LOCKED(used, hmem->size);
+#if MEMP_OVERFLOW_CHECK
+ {
+ u16_t i;
+ LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
+ hmem->size <= memp_pools[hmem->poolnr]->size);
+ /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
+ for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
+ u8_t data = *((u8_t *)rmem + i);
+ LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
+ }
+ }
+#endif /* MEMP_OVERFLOW_CHECK */
+
+ /* and put it in the pool we saved earlier */
+ memp_free(hmem->poolnr, hmem);
+}
+
+#else /* MEM_USE_POOLS */
+/* lwIP replacement for your libc malloc() */
+
+/**
+ * The heap is made up as a list of structs of this type.
+ * This does not have to be aligned since for getting its size,
+ * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
+ */
+struct mem {
+ /** index (-> ram[next]) of the next struct */
+ mem_size_t next;
+ /** index (-> ram[prev]) of the previous struct */
+ mem_size_t prev;
+ /** 1: this area is used; 0: this area is unused */
+ u8_t used;
+#if MEM_OVERFLOW_CHECK
+ /** this keeps track of the user allocation size for guard checks */
+ mem_size_t user_size;
+#endif
+};
+
+/** All allocated blocks will be MIN_SIZE bytes big, at least!
+ * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
+ * larger values could prevent too small blocks to fragment the RAM too much. */
+#ifndef MIN_SIZE
+#define MIN_SIZE 12
+#endif /* MIN_SIZE */
+/* some alignment macros: we define them here for better source code layout */
+#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
+#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
+#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
+
+/** If you want to relocate the heap to external memory, simply define
+ * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
+ * If so, make sure the memory at that location is big enough (see below on
+ * how that space is calculated). */
+#ifndef LWIP_RAM_HEAP_POINTER
+/** the heap. we need one struct mem at the end and some room for alignment */
+LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM));
+#define LWIP_RAM_HEAP_POINTER ram_heap
+#endif /* LWIP_RAM_HEAP_POINTER */
+
+/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
+static u8_t *ram;
+/** the last entry, always unused! */
+static struct mem *ram_end;
+
+/** concurrent access protection */
+#if !NO_SYS
+static sys_mutex_t mem_mutex;
+#endif
+
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+
+static volatile u8_t mem_free_count;
+
+/* Allow mem_free from other (e.g. interrupt) context */
+#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
+#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
+#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
+#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
+#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
+#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
+#define LWIP_MEM_LFREE_VOLATILE volatile
+
+#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+
+/* Protect the heap only by using a mutex */
+#define LWIP_MEM_FREE_DECL_PROTECT()
+#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
+#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
+/* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */
+#define LWIP_MEM_ALLOC_DECL_PROTECT()
+#define LWIP_MEM_ALLOC_PROTECT()
+#define LWIP_MEM_ALLOC_UNPROTECT()
+#define LWIP_MEM_LFREE_VOLATILE
+
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+
+/** pointer to the lowest free block, this is used for faster search */
+static struct mem * LWIP_MEM_LFREE_VOLATILE lfree;
+
+#if MEM_SANITY_CHECK
+static void mem_sanity(void);
+#define MEM_SANITY() mem_sanity()
+#else
+#define MEM_SANITY()
+#endif
+
+#if MEM_OVERFLOW_CHECK
+static void
+mem_overflow_init_element(struct mem *mem, mem_size_t user_size)
+{
+ void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
+ mem->user_size = user_size;
+ mem_overflow_init_raw(p, user_size);
+}
+
+static void
+mem_overflow_check_element(struct mem *mem)
+{
+ void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
+ mem_overflow_check_raw(p, mem->user_size, "heap", "");
+}
+#else /* MEM_OVERFLOW_CHECK */
+#define mem_overflow_init_element(mem, size)
+#define mem_overflow_check_element(mem)
+#endif /* MEM_OVERFLOW_CHECK */
+
+static struct mem *
+ptr_to_mem(mem_size_t ptr)
+{
+ return (struct mem *)(void *)&ram[ptr];
+}
+
+static mem_size_t
+mem_to_ptr(void *mem)
+{
+ return (mem_size_t)((u8_t *)mem - ram);
+}
+
+/**
+ * "Plug holes" by combining adjacent empty struct mems.
+ * After this function is through, there should not exist
+ * one empty struct mem pointing to another empty struct mem.
+ *
+ * @param mem this points to a struct mem which just has been freed
+ * @internal this function is only called by mem_free() and mem_trim()
+ *
+ * This assumes access to the heap is protected by the calling function
+ * already.
+ */
+static void
+plug_holes(struct mem *mem)
+{
+ struct mem *nmem;
+ struct mem *pmem;
+
+ LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
+ LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
+ LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
+
+ /* plug hole forward */
+ LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
+
+ nmem = ptr_to_mem(mem->next);
+ if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
+ /* if mem->next is unused and not end of ram, combine mem and mem->next */
+ if (lfree == nmem) {
+ lfree = mem;
+ }
+ mem->next = nmem->next;
+ if (nmem->next != MEM_SIZE_ALIGNED) {
+ ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem);
+ }
+ }
+
+ /* plug hole backward */
+ pmem = ptr_to_mem(mem->prev);
+ if (pmem != mem && pmem->used == 0) {
+ /* if mem->prev is unused, combine mem and mem->prev */
+ if (lfree == mem) {
+ lfree = pmem;
+ }
+ pmem->next = mem->next;
+ if (mem->next != MEM_SIZE_ALIGNED) {
+ ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem);
+ }
+ }
+}
+
+/**
+ * Zero the heap and initialize start, end and lowest-free
+ */
+void
+mem_init(void)
+{
+ struct mem *mem;
+
+ LWIP_ASSERT("Sanity check alignment",
+ (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
+
+ /* align the heap */
+ ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
+ /* initialize the start of the heap */
+ mem = (struct mem *)(void *)ram;
+ mem->next = MEM_SIZE_ALIGNED;
+ mem->prev = 0;
+ mem->used = 0;
+ /* initialize the end of the heap */
+ ram_end = ptr_to_mem(MEM_SIZE_ALIGNED);
+ ram_end->used = 1;
+ ram_end->next = MEM_SIZE_ALIGNED;
+ ram_end->prev = MEM_SIZE_ALIGNED;
+ MEM_SANITY();
+
+ /* initialize the lowest-free pointer to the start of the heap */
+ lfree = (struct mem *)(void *)ram;
+
+ MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
+
+ if (sys_mutex_new(&mem_mutex) != ERR_OK) {
+ LWIP_ASSERT("failed to create mem_mutex", 0);
+ }
+}
+
+/* Check if a struct mem is correctly linked.
+ * If not, double-free is a possible reason.
+ */
+static int
+mem_link_valid(struct mem *mem)
+{
+ struct mem *nmem, *pmem;
+ mem_size_t rmem_idx;
+ rmem_idx = mem_to_ptr(mem);
+ nmem = ptr_to_mem(mem->next);
+ pmem = ptr_to_mem(mem->prev);
+ if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) ||
+ ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) ||
+ ((nmem != ram_end) && (nmem->prev != rmem_idx))) {
+ return 0;
+ }
+ return 1;
+}
+
+#if MEM_SANITY_CHECK
+static void
+mem_sanity(void)
+{
+ struct mem *mem;
+ u8_t last_used;
+
+ /* begin with first element here */
+ mem = (struct mem *)ram;
+ LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1));
+ last_used = mem->used;
+ LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0);
+ LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
+ LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
+
+ /* check all elements before the end of the heap */
+ for (mem = ptr_to_mem(mem->next);
+ ((u8_t *)mem > ram) && (mem < ram_end);
+ mem = ptr_to_mem(mem->next)) {
+ LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem);
+ LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED);
+ LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED);
+ LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev)));
+ LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next)));
+
+ if (last_used == 0) {
+ /* 2 unused elements in a row? */
+ LWIP_ASSERT("heap element unused?", mem->used == 1);
+ } else {
+ LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1));
+ }
+
+ LWIP_ASSERT("heap element link valid", mem_link_valid(mem));
+
+ /* used/unused altering */
+ last_used = mem->used;
+ }
+ LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED));
+ LWIP_ASSERT("heap element used valid", mem->used == 1);
+ LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED);
+ LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED);
+}
+#endif /* MEM_SANITY_CHECK */
+
+/**
+ * Put a struct mem back on the heap
+ *
+ * @param rmem is the data portion of a struct mem as returned by a previous
+ * call to mem_malloc()
+ */
+void
+mem_free(void *rmem)
+{
+ struct mem *mem;
+ LWIP_MEM_FREE_DECL_PROTECT();
+
+ if (rmem == NULL) {
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
+ return;
+ }
+ if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) {
+ LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment");
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n"));
+ /* protect mem stats from concurrent access */
+ MEM_STATS_INC_LOCKED(illegal);
+ return;
+ }
+
+ /* Get the corresponding struct mem: */
+ /* cast through void* to get rid of alignment warnings */
+ mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
+
+ if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) {
+ LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory");
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
+ /* protect mem stats from concurrent access */
+ MEM_STATS_INC_LOCKED(illegal);
+ return;
+ }
+#if MEM_OVERFLOW_CHECK
+ mem_overflow_check_element(mem);
+#endif
+ /* protect the heap from concurrent access */
+ LWIP_MEM_FREE_PROTECT();
+ /* mem has to be in a used state */
+ if (!mem->used) {
+ LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free");
+ LWIP_MEM_FREE_UNPROTECT();
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n"));
+ /* protect mem stats from concurrent access */
+ MEM_STATS_INC_LOCKED(illegal);
+ return;
+ }
+
+ if (!mem_link_valid(mem)) {
+ LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free");
+ LWIP_MEM_FREE_UNPROTECT();
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n"));
+ /* protect mem stats from concurrent access */
+ MEM_STATS_INC_LOCKED(illegal);
+ return;
+ }
+
+ /* mem is now unused. */
+ mem->used = 0;
+
+ if (mem < lfree) {
+ /* the newly freed struct is now the lowest */
+ lfree = mem;
+ }
+
+ MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
+
+ /* finally, see if prev or next are free also */
+ plug_holes(mem);
+ MEM_SANITY();
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ mem_free_count = 1;
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ LWIP_MEM_FREE_UNPROTECT();
+}
+
+/**
+ * Shrink memory returned by mem_malloc().
+ *
+ * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
+ * @param new_size required size after shrinking (needs to be smaller than or
+ * equal to the previous size)
+ * @return for compatibility reasons: is always == rmem, at the moment
+ * or NULL if newsize is > old size, in which case rmem is NOT touched
+ * or freed!
+ */
+void *
+mem_trim(void *rmem, mem_size_t new_size)
+{
+ mem_size_t size, newsize;
+ mem_size_t ptr, ptr2;
+ struct mem *mem, *mem2;
+ /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
+ LWIP_MEM_FREE_DECL_PROTECT();
+
+ /* Expand the size of the allocated memory region so that we can
+ adjust for alignment. */
+ newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size);
+ if (newsize < MIN_SIZE_ALIGNED) {
+ /* every data block must be at least MIN_SIZE_ALIGNED long */
+ newsize = MIN_SIZE_ALIGNED;
+ }
+#if MEM_OVERFLOW_CHECK
+ newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
+#endif
+ if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) {
+ return NULL;
+ }
+
+ LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
+ (u8_t *)rmem < (u8_t *)ram_end);
+
+ if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
+ /* protect mem stats from concurrent access */
+ MEM_STATS_INC_LOCKED(illegal);
+ return rmem;
+ }
+ /* Get the corresponding struct mem ... */
+ /* cast through void* to get rid of alignment warnings */
+ mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET));
+#if MEM_OVERFLOW_CHECK
+ mem_overflow_check_element(mem);
+#endif
+ /* ... and its offset pointer */
+ ptr = mem_to_ptr(mem);
+
+ size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD));
+ LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
+ if (newsize > size) {
+ /* not supported */
+ return NULL;
+ }
+ if (newsize == size) {
+ /* No change in size, simply return */
+ return rmem;
+ }
+
+ /* protect the heap from concurrent access */
+ LWIP_MEM_FREE_PROTECT();
+
+ mem2 = ptr_to_mem(mem->next);
+ if (mem2->used == 0) {
+ /* The next struct is unused, we can simply move it at little */
+ mem_size_t next;
+ LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
+ /* remember the old next pointer */
+ next = mem2->next;
+ /* create new struct mem which is moved directly after the shrinked mem */
+ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
+ if (lfree == mem2) {
+ lfree = ptr_to_mem(ptr2);
+ }
+ mem2 = ptr_to_mem(ptr2);
+ mem2->used = 0;
+ /* restore the next pointer */
+ mem2->next = next;
+ /* link it back to mem */
+ mem2->prev = ptr;
+ /* link mem to it */
+ mem->next = ptr2;
+ /* last thing to restore linked list: as we have moved mem2,
+ * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
+ * the end of the heap */
+ if (mem2->next != MEM_SIZE_ALIGNED) {
+ ptr_to_mem(mem2->next)->prev = ptr2;
+ }
+ MEM_STATS_DEC_USED(used, (size - newsize));
+ /* no need to plug holes, we've already done that */
+ } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
+ /* Next struct is used but there's room for another struct mem with
+ * at least MIN_SIZE_ALIGNED of data.
+ * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
+ * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
+ * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
+ * region that couldn't hold data, but when mem->next gets freed,
+ * the 2 regions would be combined, resulting in more free memory */
+ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
+ LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED);
+ mem2 = ptr_to_mem(ptr2);
+ if (mem2 < lfree) {
+ lfree = mem2;
+ }
+ mem2->used = 0;
+ mem2->next = mem->next;
+ mem2->prev = ptr;
+ mem->next = ptr2;
+ if (mem2->next != MEM_SIZE_ALIGNED) {
+ ptr_to_mem(mem2->next)->prev = ptr2;
+ }
+ MEM_STATS_DEC_USED(used, (size - newsize));
+ /* the original mem->next is used, so no need to plug holes! */
+ }
+ /* else {
+ next struct mem is used but size between mem and mem2 is not big enough
+ to create another struct mem
+ -> don't do anyhting.
+ -> the remaining space stays unused since it is too small
+ } */
+#if MEM_OVERFLOW_CHECK
+ mem_overflow_init_element(mem, new_size);
+#endif
+ MEM_SANITY();
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ mem_free_count = 1;
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ LWIP_MEM_FREE_UNPROTECT();
+ return rmem;
+}
+
+/**
+ * Allocate a block of memory with a minimum of 'size' bytes.
+ *
+ * @param size_in is the minimum size of the requested block in bytes.
+ * @return pointer to allocated memory or NULL if no free memory was found.
+ *
+ * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
+ */
+void *
+mem_malloc(mem_size_t size_in)
+{
+ mem_size_t ptr, ptr2, size;
+ struct mem *mem, *mem2;
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ u8_t local_mem_free_count = 0;
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ LWIP_MEM_ALLOC_DECL_PROTECT();
+
+ if (size_in == 0) {
+ return NULL;
+ }
+
+ /* Expand the size of the allocated memory region so that we can
+ adjust for alignment. */
+ size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in);
+ if (size < MIN_SIZE_ALIGNED) {
+ /* every data block must be at least MIN_SIZE_ALIGNED long */
+ size = MIN_SIZE_ALIGNED;
+ }
+#if MEM_OVERFLOW_CHECK
+ size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED;
+#endif
+ if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) {
+ return NULL;
+ }
+
+ /* protect the heap from concurrent access */
+ sys_mutex_lock(&mem_mutex);
+ LWIP_MEM_ALLOC_PROTECT();
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ /* run as long as a mem_free disturbed mem_malloc or mem_trim */
+ do {
+ local_mem_free_count = 0;
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+
+ /* Scan through the heap searching for a free block that is big enough,
+ * beginning with the lowest free block.
+ */
+ for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size;
+ ptr = ptr_to_mem(ptr)->next) {
+ mem = ptr_to_mem(ptr);
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ mem_free_count = 0;
+ LWIP_MEM_ALLOC_UNPROTECT();
+ /* allow mem_free or mem_trim to run */
+ LWIP_MEM_ALLOC_PROTECT();
+ if (mem_free_count != 0) {
+ /* If mem_free or mem_trim have run, we have to restart since they
+ could have altered our current struct mem. */
+ local_mem_free_count = 1;
+ break;
+ }
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+
+ if ((!mem->used) &&
+ (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
+ /* mem is not used and at least perfect fit is possible:
+ * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
+
+ if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
+ /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
+ * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
+ * -> split large block, create empty remainder,
+ * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
+ * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
+ * struct mem would fit in but no data between mem2 and mem2->next
+ * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
+ * region that couldn't hold data, but when mem->next gets freed,
+ * the 2 regions would be combined, resulting in more free memory
+ */
+ ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size);
+ LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED);
+ /* create mem2 struct */
+ mem2 = ptr_to_mem(ptr2);
+ mem2->used = 0;
+ mem2->next = mem->next;
+ mem2->prev = ptr;
+ /* and insert it between mem and mem->next */
+ mem->next = ptr2;
+ mem->used = 1;
+
+ if (mem2->next != MEM_SIZE_ALIGNED) {
+ ptr_to_mem(mem2->next)->prev = ptr2;
+ }
+ MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
+ } else {
+ /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
+ * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
+ * take care of this).
+ * -> near fit or exact fit: do not split, no mem2 creation
+ * also can't move mem->next directly behind mem, since mem->next
+ * will always be used at this point!
+ */
+ mem->used = 1;
+ MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem));
+ }
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+mem_malloc_adjust_lfree:
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ if (mem == lfree) {
+ struct mem *cur = lfree;
+ /* Find next free block after mem and update lowest free pointer */
+ while (cur->used && cur != ram_end) {
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ mem_free_count = 0;
+ LWIP_MEM_ALLOC_UNPROTECT();
+ /* prevent high interrupt latency... */
+ LWIP_MEM_ALLOC_PROTECT();
+ if (mem_free_count != 0) {
+ /* If mem_free or mem_trim have run, we have to restart since they
+ could have altered our current struct mem or lfree. */
+ goto mem_malloc_adjust_lfree;
+ }
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ cur = ptr_to_mem(cur->next);
+ }
+ lfree = cur;
+ LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
+ }
+ LWIP_MEM_ALLOC_UNPROTECT();
+ sys_mutex_unlock(&mem_mutex);
+ LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
+ (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
+ LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
+ ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
+ LWIP_ASSERT("mem_malloc: sanity check alignment",
+ (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0);
+
+#if MEM_OVERFLOW_CHECK
+ mem_overflow_init_element(mem, size_in);
+#endif
+ MEM_SANITY();
+ return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET;
+ }
+ }
+#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
+ /* if we got interrupted by a mem_free, try again */
+ } while (local_mem_free_count != 0);
+#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
+ MEM_STATS_INC(err);
+ LWIP_MEM_ALLOC_UNPROTECT();
+ sys_mutex_unlock(&mem_mutex);
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
+ return NULL;
+}
+
+#endif /* MEM_USE_POOLS */
+
+#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
+void *
+mem_calloc(mem_size_t count, mem_size_t size)
+{
+ return mem_clib_calloc(count, size);
+}
+
+#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
+/**
+ * Contiguously allocates enough space for count objects that are size bytes
+ * of memory each and returns a pointer to the allocated memory.
+ *
+ * The allocated memory is filled with bytes of value zero.
+ *
+ * @param count number of objects to allocate
+ * @param size size of the objects to allocate
+ * @return pointer to allocated memory / NULL pointer if there is an error
+ */
+void *
+mem_calloc(mem_size_t count, mem_size_t size)
+{
+ void *p;
+ size_t alloc_size = (size_t)count * (size_t)size;
+
+ if ((size_t)(mem_size_t)alloc_size != alloc_size) {
+ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size));
+ return NULL;
+ }
+
+ /* allocate 'count' objects of size 'size' */
+ p = mem_malloc((mem_size_t)alloc_size);
+ if (p) {
+ /* zero the memory */
+ memset(p, 0, alloc_size);
+ }
+ return p;
+}
+#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
diff --git a/lwip/src/core/memp.c b/lwip/src/core/memp.c
new file mode 100644
index 0000000..352ce5a
--- /dev/null
+++ b/lwip/src/core/memp.c
@@ -0,0 +1,447 @@
+/**
+ * @file
+ * Dynamic pool memory manager
+ *
+ * lwIP has dedicated pools for many structures (netconn, protocol control blocks,
+ * packet buffers, ...). All these pools are managed here.
+ *
+ * @defgroup mempool Memory pools
+ * @ingroup infrastructure
+ * Custom memory pools
+
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/memp.h"
+#include "lwip/sys.h"
+#include "lwip/stats.h"
+
+#include <string.h>
+
+/* Make sure we include everything we need for size calculation required by memp_std.h */
+#include "lwip/pbuf.h"
+#include "lwip/raw.h"
+#include "lwip/udp.h"
+#include "lwip/tcp.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/altcp.h"
+#include "lwip/ip4_frag.h"
+#include "lwip/netbuf.h"
+#include "lwip/api.h"
+#include "lwip/priv/tcpip_priv.h"
+#include "lwip/priv/api_msg.h"
+#include "lwip/priv/sockets_priv.h"
+#include "lwip/etharp.h"
+#include "lwip/igmp.h"
+#include "lwip/timeouts.h"
+/* needed by default MEMP_NUM_SYS_TIMEOUT */
+#include "netif/ppp/ppp_opts.h"
+#include "lwip/netdb.h"
+#include "lwip/dns.h"
+#include "lwip/priv/nd6_priv.h"
+#include "lwip/ip6_frag.h"
+#include "lwip/mld6.h"
+
+#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc)
+#include "lwip/priv/memp_std.h"
+
+const struct memp_desc *const memp_pools[MEMP_MAX] = {
+#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name,
+#include "lwip/priv/memp_std.h"
+};
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2
+#undef MEMP_OVERFLOW_CHECK
+/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */
+#define MEMP_OVERFLOW_CHECK 1
+#endif
+
+#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC
+/**
+ * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm".
+ */
+static int
+memp_sanity(const struct memp_desc *desc)
+{
+ struct memp *t, *h;
+
+ t = *desc->tab;
+ if (t != NULL) {
+ for (h = t->next; (t != NULL) && (h != NULL); t = t->next,
+ h = ((h->next != NULL) ? h->next->next : NULL)) {
+ if (t == h) {
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */
+
+#if MEMP_OVERFLOW_CHECK
+/**
+ * Check if a memp element was victim of an overflow or underflow
+ * (e.g. the restricted area after/before it has been altered)
+ *
+ * @param p the memp element to check
+ * @param desc the pool p comes from
+ */
+static void
+memp_overflow_check_element(struct memp *p, const struct memp_desc *desc)
+{
+ mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc);
+}
+
+/**
+ * Initialize the restricted area of on memp element.
+ */
+static void
+memp_overflow_init_element(struct memp *p, const struct memp_desc *desc)
+{
+ mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size);
+}
+
+#if MEMP_OVERFLOW_CHECK >= 2
+/**
+ * Do an overflow check for all elements in every pool.
+ *
+ * @see memp_overflow_check_element for a description of the check
+ */
+static void
+memp_overflow_check_all(void)
+{
+ u16_t i, j;
+ struct memp *p;
+ SYS_ARCH_DECL_PROTECT(old_level);
+ SYS_ARCH_PROTECT(old_level);
+
+ for (i = 0; i < MEMP_MAX; ++i) {
+ p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base);
+ for (j = 0; j < memp_pools[i]->num; ++j) {
+ memp_overflow_check_element(p, memp_pools[i]);
+ p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED));
+ }
+ }
+ SYS_ARCH_UNPROTECT(old_level);
+}
+#endif /* MEMP_OVERFLOW_CHECK >= 2 */
+#endif /* MEMP_OVERFLOW_CHECK */
+
+/**
+ * Initialize custom memory pool.
+ * Related functions: memp_malloc_pool, memp_free_pool
+ *
+ * @param desc pool to initialize
+ */
+void
+memp_init_pool(const struct memp_desc *desc)
+{
+#if MEMP_MEM_MALLOC
+ LWIP_UNUSED_ARG(desc);
+#else
+ int i;
+ struct memp *memp;
+
+ *desc->tab = NULL;
+ memp = (struct memp *)LWIP_MEM_ALIGN(desc->base);
+#if MEMP_MEM_INIT
+ /* force memset on pool memory */
+ memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size
+#if MEMP_OVERFLOW_CHECK
+ + MEM_SANITY_REGION_AFTER_ALIGNED
+#endif
+ ));
+#endif
+ /* create a linked list of memp elements */
+ for (i = 0; i < desc->num; ++i) {
+ memp->next = *desc->tab;
+ *desc->tab = memp;
+#if MEMP_OVERFLOW_CHECK
+ memp_overflow_init_element(memp, desc);
+#endif /* MEMP_OVERFLOW_CHECK */
+ /* cast through void* to get rid of alignment warnings */
+ memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size
+#if MEMP_OVERFLOW_CHECK
+ + MEM_SANITY_REGION_AFTER_ALIGNED
+#endif
+ );
+ }
+#if MEMP_STATS
+ desc->stats->avail = desc->num;
+#endif /* MEMP_STATS */
+#endif /* !MEMP_MEM_MALLOC */
+
+#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY)
+ desc->stats->name = desc->desc;
+#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */
+}
+
+/**
+ * Initializes lwIP built-in pools.
+ * Related functions: memp_malloc, memp_free
+ *
+ * Carves out memp_memory into linked lists for each pool-type.
+ */
+void
+memp_init(void)
+{
+ u16_t i;
+
+ /* for every pool: */
+ for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) {
+ memp_init_pool(memp_pools[i]);
+
+#if LWIP_STATS && MEMP_STATS
+ lwip_stats.memp[i] = memp_pools[i]->stats;
+#endif
+ }
+
+#if MEMP_OVERFLOW_CHECK >= 2
+ /* check everything a first time to see if it worked */
+ memp_overflow_check_all();
+#endif /* MEMP_OVERFLOW_CHECK >= 2 */
+}
+
+static void *
+#if !MEMP_OVERFLOW_CHECK
+do_memp_malloc_pool(const struct memp_desc *desc)
+#else
+do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line)
+#endif
+{
+ struct memp *memp;
+ SYS_ARCH_DECL_PROTECT(old_level);
+
+#if MEMP_MEM_MALLOC
+ memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size));
+ SYS_ARCH_PROTECT(old_level);
+#else /* MEMP_MEM_MALLOC */
+ SYS_ARCH_PROTECT(old_level);
+
+ memp = *desc->tab;
+#endif /* MEMP_MEM_MALLOC */
+
+ if (memp != NULL) {
+#if !MEMP_MEM_MALLOC
+#if MEMP_OVERFLOW_CHECK == 1
+ memp_overflow_check_element(memp, desc);
+#endif /* MEMP_OVERFLOW_CHECK */
+
+ *desc->tab = memp->next;
+#if MEMP_OVERFLOW_CHECK
+ memp->next = NULL;
+#endif /* MEMP_OVERFLOW_CHECK */
+#endif /* !MEMP_MEM_MALLOC */
+#if MEMP_OVERFLOW_CHECK
+ memp->file = file;
+ memp->line = line;
+#if MEMP_MEM_MALLOC
+ memp_overflow_init_element(memp, desc);
+#endif /* MEMP_MEM_MALLOC */
+#endif /* MEMP_OVERFLOW_CHECK */
+ LWIP_ASSERT("memp_malloc: memp properly aligned",
+ ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0);
+#if MEMP_STATS
+ desc->stats->used++;
+ if (desc->stats->used > desc->stats->max) {
+ desc->stats->max = desc->stats->used;
+ }
+#endif
+ SYS_ARCH_UNPROTECT(old_level);
+ /* cast through u8_t* to get rid of alignment warnings */
+ return ((u8_t *)memp + MEMP_SIZE);
+ } else {
+#if MEMP_STATS
+ desc->stats->err++;
+#endif
+ SYS_ARCH_UNPROTECT(old_level);
+ LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc));
+ }
+
+ return NULL;
+}
+
+/**
+ * Get an element from a custom pool.
+ *
+ * @param desc the pool to get an element from
+ *
+ * @return a pointer to the allocated memory or a NULL pointer on error
+ */
+void *
+#if !MEMP_OVERFLOW_CHECK
+memp_malloc_pool(const struct memp_desc *desc)
+#else
+memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line)
+#endif
+{
+ LWIP_ASSERT("invalid pool desc", desc != NULL);
+ if (desc == NULL) {
+ return NULL;
+ }
+
+#if !MEMP_OVERFLOW_CHECK
+ return do_memp_malloc_pool(desc);
+#else
+ return do_memp_malloc_pool_fn(desc, file, line);
+#endif
+}
+
+/**
+ * Get an element from a specific pool.
+ *
+ * @param type the pool to get an element from
+ *
+ * @return a pointer to the allocated memory or a NULL pointer on error
+ */
+void *
+#if !MEMP_OVERFLOW_CHECK
+memp_malloc(memp_t type)
+#else
+memp_malloc_fn(memp_t type, const char *file, const int line)
+#endif
+{
+ void *memp;
+ LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;);
+
+#if MEMP_OVERFLOW_CHECK >= 2
+ memp_overflow_check_all();
+#endif /* MEMP_OVERFLOW_CHECK >= 2 */
+
+#if !MEMP_OVERFLOW_CHECK
+ memp = do_memp_malloc_pool(memp_pools[type]);
+#else
+ memp = do_memp_malloc_pool_fn(memp_pools[type], file, line);
+#endif
+
+ return memp;
+}
+
+static void
+do_memp_free_pool(const struct memp_desc *desc, void *mem)
+{
+ struct memp *memp;
+ SYS_ARCH_DECL_PROTECT(old_level);
+
+ LWIP_ASSERT("memp_free: mem properly aligned",
+ ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0);
+
+ /* cast through void* to get rid of alignment warnings */
+ memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE);
+
+ SYS_ARCH_PROTECT(old_level);
+
+#if MEMP_OVERFLOW_CHECK == 1
+ memp_overflow_check_element(memp, desc);
+#endif /* MEMP_OVERFLOW_CHECK */
+
+#if MEMP_STATS
+ desc->stats->used--;
+#endif
+
+#if MEMP_MEM_MALLOC
+ LWIP_UNUSED_ARG(desc);
+ SYS_ARCH_UNPROTECT(old_level);
+ mem_free(memp);
+#else /* MEMP_MEM_MALLOC */
+ memp->next = *desc->tab;
+ *desc->tab = memp;
+
+#if MEMP_SANITY_CHECK
+ LWIP_ASSERT("memp sanity", memp_sanity(desc));
+#endif /* MEMP_SANITY_CHECK */
+
+ SYS_ARCH_UNPROTECT(old_level);
+#endif /* !MEMP_MEM_MALLOC */
+}
+
+/**
+ * Put a custom pool element back into its pool.
+ *
+ * @param desc the pool where to put mem
+ * @param mem the memp element to free
+ */
+void
+memp_free_pool(const struct memp_desc *desc, void *mem)
+{
+ LWIP_ASSERT("invalid pool desc", desc != NULL);
+ if ((desc == NULL) || (mem == NULL)) {
+ return;
+ }
+
+ do_memp_free_pool(desc, mem);
+}
+
+/**
+ * Put an element back into its pool.
+ *
+ * @param type the pool where to put mem
+ * @param mem the memp element to free
+ */
+void
+memp_free(memp_t type, void *mem)
+{
+#ifdef LWIP_HOOK_MEMP_AVAILABLE
+ struct memp *old_first;
+#endif
+
+ LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;);
+
+ if (mem == NULL) {
+ return;
+ }
+
+#if MEMP_OVERFLOW_CHECK >= 2
+ memp_overflow_check_all();
+#endif /* MEMP_OVERFLOW_CHECK >= 2 */
+
+#ifdef LWIP_HOOK_MEMP_AVAILABLE
+ old_first = *memp_pools[type]->tab;
+#endif
+
+ do_memp_free_pool(memp_pools[type], mem);
+
+#ifdef LWIP_HOOK_MEMP_AVAILABLE
+ if (old_first == NULL) {
+ LWIP_HOOK_MEMP_AVAILABLE(type);
+ }
+#endif
+}
diff --git a/lwip/src/core/netif.c b/lwip/src/core/netif.c
new file mode 100644
index 0000000..e432823
--- /dev/null
+++ b/lwip/src/core/netif.c
@@ -0,0 +1,1795 @@
+/**
+ * @file
+ * lwIP network interface abstraction
+ *
+ * @defgroup netif Network interface (NETIF)
+ * @ingroup callbackstyle_api
+ *
+ * @defgroup netif_ip4 IPv4 address handling
+ * @ingroup netif
+ *
+ * @defgroup netif_ip6 IPv6 address handling
+ * @ingroup netif
+ *
+ * @defgroup netif_cd Client data handling
+ * Store data (void*) on a netif for application usage.
+ * @see @ref LWIP_NUM_NETIF_CLIENT_DATA
+ * @ingroup netif
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ */
+
+#include "lwip/opt.h"
+
+#include <string.h> /* memset */
+#include <stdlib.h> /* atoi */
+
+#include "lwip/def.h"
+#include "lwip/ip_addr.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/netif.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/udp.h"
+#include "lwip/priv/raw_priv.h"
+#include "lwip/snmp.h"
+#include "lwip/igmp.h"
+#include "lwip/etharp.h"
+#include "lwip/stats.h"
+#include "lwip/sys.h"
+#include "lwip/ip.h"
+#if ENABLE_LOOPBACK
+#if LWIP_NETIF_LOOPBACK_MULTITHREADING
+#include "lwip/tcpip.h"
+#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */
+#endif /* ENABLE_LOOPBACK */
+
+#include <netif/ethernet.h>
+
+#if LWIP_AUTOIP
+#include "lwip/autoip.h"
+#endif /* LWIP_AUTOIP */
+#if LWIP_DHCP
+#include "lwip/dhcp.h"
+#endif /* LWIP_DHCP */
+#if LWIP_IPV6_DHCP6
+#include "lwip/dhcp6.h"
+#endif /* LWIP_IPV6_DHCP6 */
+#if LWIP_IPV6_MLD
+#include "lwip/mld6.h"
+#endif /* LWIP_IPV6_MLD */
+#if LWIP_IPV6
+#include "lwip/nd6.h"
+#endif
+
+#if LWIP_NETIF_STATUS_CALLBACK
+#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0)
+#else
+#define NETIF_STATUS_CALLBACK(n)
+#endif /* LWIP_NETIF_STATUS_CALLBACK */
+
+#if LWIP_NETIF_LINK_CALLBACK
+#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0)
+#else
+#define NETIF_LINK_CALLBACK(n)
+#endif /* LWIP_NETIF_LINK_CALLBACK */
+
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+static netif_ext_callback_t *ext_callback;
+#endif
+
+#if !LWIP_SINGLE_NETIF
+struct netif *netif_list;
+#endif /* !LWIP_SINGLE_NETIF */
+struct netif *netif_default;
+
+#define netif_index_to_num(index) ((index) - 1)
+static u8_t netif_num;
+
+#if LWIP_NUM_NETIF_CLIENT_DATA > 0
+static u8_t netif_client_id;
+#endif
+
+#define NETIF_REPORT_TYPE_IPV4 0x01
+#define NETIF_REPORT_TYPE_IPV6 0x02
+static void netif_issue_reports(struct netif *netif, u8_t report_type);
+
+#if LWIP_IPV6
+static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr);
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4
+static err_t netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr);
+#endif /* LWIP_IPV4 */
+
+#if LWIP_HAVE_LOOPIF
+#if LWIP_IPV4
+static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr);
+#endif
+#if LWIP_IPV6
+static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr);
+#endif
+
+
+static struct netif loop_netif;
+
+/**
+ * Initialize a lwip network interface structure for a loopback interface
+ *
+ * @param netif the lwip network interface structure for this loopif
+ * @return ERR_OK if the loopif is initialized
+ * ERR_MEM if private data couldn't be allocated
+ */
+static err_t
+netif_loopif_init(struct netif *netif)
+{
+ LWIP_ASSERT("netif_loopif_init: invalid netif", netif != NULL);
+
+ /* initialize the snmp variables and counters inside the struct netif
+ * ifSpeed: no assumption can be made!
+ */
+ MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0);
+
+ netif->name[0] = 'l';
+ netif->name[1] = 'o';
+#if LWIP_IPV4
+ netif->output = netif_loop_output_ipv4;
+#endif
+#if LWIP_IPV6
+ netif->output_ip6 = netif_loop_output_ipv6;
+#endif
+#if LWIP_LOOPIF_MULTICAST
+ netif_set_flags(netif, NETIF_FLAG_IGMP);
+#endif
+ NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_DISABLE_ALL);
+ return ERR_OK;
+}
+#endif /* LWIP_HAVE_LOOPIF */
+
+void
+netif_init(void)
+{
+#if LWIP_HAVE_LOOPIF
+#if LWIP_IPV4
+#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw,
+ ip4_addr_t loop_ipaddr, loop_netmask, loop_gw;
+ IP4_ADDR(&loop_gw, 127, 0, 0, 1);
+ IP4_ADDR(&loop_ipaddr, 127, 0, 0, 1);
+ IP4_ADDR(&loop_netmask, 255, 0, 0, 0);
+#else /* LWIP_IPV4 */
+#define LOOPIF_ADDRINIT
+#endif /* LWIP_IPV4 */
+
+#if NO_SYS
+ netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input);
+#else /* NO_SYS */
+ netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input);
+#endif /* NO_SYS */
+
+#if LWIP_IPV6
+ IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL);
+ loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID;
+#endif /* LWIP_IPV6 */
+
+ netif_set_link_up(&loop_netif);
+ netif_set_up(&loop_netif);
+
+#endif /* LWIP_HAVE_LOOPIF */
+}
+
+/**
+ * @ingroup lwip_nosys
+ * Forwards a received packet for input processing with
+ * ethernet_input() or ip_input() depending on netif flags.
+ * Don't call directly, pass to netif_add() and call
+ * netif->input().
+ * Only works if the netif driver correctly sets
+ * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag!
+ */
+err_t
+netif_input(struct pbuf *p, struct netif *inp)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("netif_input: invalid pbuf", p != NULL);
+ LWIP_ASSERT("netif_input: invalid netif", inp != NULL);
+
+#if LWIP_ETHERNET
+ if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) {
+ return ethernet_input(p, inp);
+ } else
+#endif /* LWIP_ETHERNET */
+ return ip_input(p, inp);
+}
+
+/**
+ * @ingroup netif
+ * Add a network interface to the list of lwIP netifs.
+ *
+ * Same as @ref netif_add but without IPv4 addresses
+ */
+struct netif *
+netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input)
+{
+ return netif_add(netif,
+#if LWIP_IPV4
+ NULL, NULL, NULL,
+#endif /* LWIP_IPV4*/
+ state, init, input);
+}
+
+/**
+ * @ingroup netif
+ * Add a network interface to the list of lwIP netifs.
+ *
+ * @param netif a pre-allocated netif structure
+ * @param ipaddr IP address for the new netif
+ * @param netmask network mask for the new netif
+ * @param gw default gateway IP address for the new netif
+ * @param state opaque data passed to the new netif
+ * @param init callback function that initializes the interface
+ * @param input callback function that is called to pass
+ * ingress packets up in the protocol layer stack.\n
+ * It is recommended to use a function that passes the input directly
+ * to the stack (netif_input(), NO_SYS=1 mode) or via sending a
+ * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n
+ * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET
+ * to decide whether to forward to ethernet_input() or ip_input().
+ * In other words, the functions only work when the netif
+ * driver is implemented correctly!\n
+ * Most members of struct netif should be be initialized by the
+ * netif init function = netif driver (init parameter of this function).\n
+ * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after
+ * setting the MAC address in struct netif.hwaddr
+ * (IPv6 requires a link-local address).
+ *
+ * @return netif, or NULL if failed.
+ */
+struct netif *
+netif_add(struct netif *netif,
+#if LWIP_IPV4
+ const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw,
+#endif /* LWIP_IPV4 */
+ void *state, netif_init_fn init, netif_input_fn input)
+{
+#if LWIP_IPV6
+ s8_t i;
+#endif
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_SINGLE_NETIF
+ if (netif_default != NULL) {
+ LWIP_ASSERT("single netif already set", 0);
+ return NULL;
+ }
+#endif
+
+ LWIP_ERROR("netif_add: invalid netif", netif != NULL, return NULL);
+ LWIP_ERROR("netif_add: No init function given", init != NULL, return NULL);
+
+#if LWIP_IPV4
+ if (ipaddr == NULL) {
+ ipaddr = ip_2_ip4(IP4_ADDR_ANY);
+ }
+ if (netmask == NULL) {
+ netmask = ip_2_ip4(IP4_ADDR_ANY);
+ }
+ if (gw == NULL) {
+ gw = ip_2_ip4(IP4_ADDR_ANY);
+ }
+
+ /* reset new interface configuration state */
+ ip_addr_set_zero_ip4(&netif->ip_addr);
+ ip_addr_set_zero_ip4(&netif->netmask);
+ ip_addr_set_zero_ip4(&netif->gw);
+ netif->output = netif_null_output_ip4;
+#endif /* LWIP_IPV4 */
+#if LWIP_IPV6
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ ip_addr_set_zero_ip6(&netif->ip6_addr[i]);
+ netif->ip6_addr_state[i] = IP6_ADDR_INVALID;
+#if LWIP_IPV6_ADDRESS_LIFETIMES
+ netif->ip6_addr_valid_life[i] = IP6_ADDR_LIFE_STATIC;
+ netif->ip6_addr_pref_life[i] = IP6_ADDR_LIFE_STATIC;
+#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */
+ }
+ netif->output_ip6 = netif_null_output_ip6;
+#endif /* LWIP_IPV6 */
+ NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL);
+ netif->mtu = 0;
+ netif->flags = 0;
+#ifdef netif_get_client_data
+ memset(netif->client_data, 0, sizeof(netif->client_data));
+#endif /* LWIP_NUM_NETIF_CLIENT_DATA */
+#if LWIP_IPV6
+#if LWIP_IPV6_AUTOCONFIG
+ /* IPv6 address autoconfiguration not enabled by default */
+ netif->ip6_autoconfig_enabled = 0;
+#endif /* LWIP_IPV6_AUTOCONFIG */
+ nd6_restart_netif(netif);
+#endif /* LWIP_IPV6 */
+#if LWIP_NETIF_STATUS_CALLBACK
+ netif->status_callback = NULL;
+#endif /* LWIP_NETIF_STATUS_CALLBACK */
+#if LWIP_NETIF_LINK_CALLBACK
+ netif->link_callback = NULL;
+#endif /* LWIP_NETIF_LINK_CALLBACK */
+#if LWIP_IGMP
+ netif->igmp_mac_filter = NULL;
+#endif /* LWIP_IGMP */
+#if LWIP_IPV6 && LWIP_IPV6_MLD
+ netif->mld_mac_filter = NULL;
+#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */
+#if ENABLE_LOOPBACK
+ netif->loop_first = NULL;
+ netif->loop_last = NULL;
+#endif /* ENABLE_LOOPBACK */
+
+ /* remember netif specific state information data */
+ netif->state = state;
+ netif->num = netif_num;
+ netif->input = input;
+
+ NETIF_RESET_HINTS(netif);
+#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS
+ netif->loop_cnt_current = 0;
+#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */
+
+#if LWIP_IPV4
+ netif_set_addr(netif, ipaddr, netmask, gw);
+#endif /* LWIP_IPV4 */
+
+ /* call user specified initialization function for netif */
+ if (init(netif) != ERR_OK) {
+ return NULL;
+ }
+#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES
+ /* Initialize the MTU for IPv6 to the one set by the netif driver.
+ This can be updated later by RA. */
+ netif->mtu6 = netif->mtu;
+#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */
+
+#if !LWIP_SINGLE_NETIF
+ /* Assign a unique netif number in the range [0..254], so that (num+1) can
+ serve as an interface index that fits in a u8_t.
+ We assume that the new netif has not yet been added to the list here.
+ This algorithm is O(n^2), but that should be OK for lwIP.
+ */
+ {
+ struct netif *netif2;
+ int num_netifs;
+ do {
+ if (netif->num == 255) {
+ netif->num = 0;
+ }
+ num_netifs = 0;
+ for (netif2 = netif_list; netif2 != NULL; netif2 = netif2->next) {
+ LWIP_ASSERT("netif already added", netif2 != netif);
+ num_netifs++;
+ LWIP_ASSERT("too many netifs, max. supported number is 255", num_netifs <= 255);
+ if (netif2->num == netif->num) {
+ netif->num++;
+ break;
+ }
+ }
+ } while (netif2 != NULL);
+ }
+ if (netif->num == 254) {
+ netif_num = 0;
+ } else {
+ netif_num = (u8_t)(netif->num + 1);
+ }
+
+ /* add this netif to the list */
+ netif->next = netif_list;
+ netif_list = netif;
+#endif /* "LWIP_SINGLE_NETIF */
+ mib2_netif_added(netif);
+
+#if LWIP_IGMP
+ /* start IGMP processing */
+ if (netif->flags & NETIF_FLAG_IGMP) {
+ igmp_start(netif);
+ }
+#endif /* LWIP_IGMP */
+
+ LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP",
+ netif->name[0], netif->name[1]));
+#if LWIP_IPV4
+ LWIP_DEBUGF(NETIF_DEBUG, (" addr "));
+ ip4_addr_debug_print(NETIF_DEBUG, ipaddr);
+ LWIP_DEBUGF(NETIF_DEBUG, (" netmask "));
+ ip4_addr_debug_print(NETIF_DEBUG, netmask);
+ LWIP_DEBUGF(NETIF_DEBUG, (" gw "));
+ ip4_addr_debug_print(NETIF_DEBUG, gw);
+#endif /* LWIP_IPV4 */
+ LWIP_DEBUGF(NETIF_DEBUG, ("\n"));
+
+ netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_ADDED, NULL);
+
+ return netif;
+}
+
+static void
+netif_do_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr)
+{
+#if LWIP_TCP
+ tcp_netif_ip_addr_changed(old_addr, new_addr);
+#endif /* LWIP_TCP */
+#if LWIP_UDP
+ udp_netif_ip_addr_changed(old_addr, new_addr);
+#endif /* LWIP_UDP */
+#if LWIP_RAW
+ raw_netif_ip_addr_changed(old_addr, new_addr);
+#endif /* LWIP_RAW */
+}
+
+#if LWIP_IPV4
+static int
+netif_do_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr, ip_addr_t *old_addr)
+{
+ LWIP_ASSERT("invalid pointer", ipaddr != NULL);
+ LWIP_ASSERT("invalid pointer", old_addr != NULL);
+
+ /* address is actually being changed? */
+ if (ip4_addr_cmp(ipaddr, netif_ip4_addr(netif)) == 0) {
+ ip_addr_t new_addr;
+ *ip_2_ip4(&new_addr) = *ipaddr;
+ IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4);
+
+ ip_addr_copy(*old_addr, *netif_ip_addr4(netif));
+
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n"));
+ netif_do_ip_addr_changed(old_addr, &new_addr);
+
+ mib2_remove_ip4(netif);
+ mib2_remove_route_ip4(0, netif);
+ /* set new IP address to netif */
+ ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr);
+ IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4);
+ mib2_add_ip4(netif);
+ mib2_add_route_ip4(0, netif);
+
+ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4);
+
+ NETIF_STATUS_CALLBACK(netif);
+ return 1; /* address changed */
+ }
+ return 0; /* address unchanged */
+}
+
+/**
+ * @ingroup netif_ip4
+ * Change the IP address of a network interface
+ *
+ * @param netif the network interface to change
+ * @param ipaddr the new IP address
+ *
+ * @note call netif_set_addr() if you also want to change netmask and
+ * default gateway
+ */
+void
+netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr)
+{
+ ip_addr_t old_addr;
+
+ LWIP_ERROR("netif_set_ipaddr: invalid netif", netif != NULL, return);
+
+ /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
+ if (ipaddr == NULL) {
+ ipaddr = IP4_ADDR_ANY4;
+ }
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ netif_ext_callback_args_t args;
+ args.ipv4_changed.old_address = &old_addr;
+ netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_ADDRESS_CHANGED, &args);
+#endif
+ }
+}
+
+static int
+netif_do_set_netmask(struct netif *netif, const ip4_addr_t *netmask, ip_addr_t *old_nm)
+{
+ /* address is actually being changed? */
+ if (ip4_addr_cmp(netmask, netif_ip4_netmask(netif)) == 0) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ LWIP_ASSERT("invalid pointer", old_nm != NULL);
+ ip_addr_copy(*old_nm, *netif_ip_netmask4(netif));
+#else
+ LWIP_UNUSED_ARG(old_nm);
+#endif
+ mib2_remove_route_ip4(0, netif);
+ /* set new netmask to netif */
+ ip4_addr_set(ip_2_ip4(&netif->netmask), netmask);
+ IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4);
+ mib2_add_route_ip4(0, netif);
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ netif->name[0], netif->name[1],
+ ip4_addr1_16(netif_ip4_netmask(netif)),
+ ip4_addr2_16(netif_ip4_netmask(netif)),
+ ip4_addr3_16(netif_ip4_netmask(netif)),
+ ip4_addr4_16(netif_ip4_netmask(netif))));
+ return 1; /* netmask changed */
+ }
+ return 0; /* netmask unchanged */
+}
+
+/**
+ * @ingroup netif_ip4
+ * Change the netmask of a network interface
+ *
+ * @param netif the network interface to change
+ * @param netmask the new netmask
+ *
+ * @note call netif_set_addr() if you also want to change ip address and
+ * default gateway
+ */
+void
+netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask)
+{
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ ip_addr_t old_nm_val;
+ ip_addr_t *old_nm = &old_nm_val;
+#else
+ ip_addr_t *old_nm = NULL;
+#endif
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif_set_netmask: invalid netif", netif != NULL, return);
+
+ /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
+ if (netmask == NULL) {
+ netmask = IP4_ADDR_ANY4;
+ }
+
+ if (netif_do_set_netmask(netif, netmask, old_nm)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ netif_ext_callback_args_t args;
+ args.ipv4_changed.old_netmask = old_nm;
+ netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_NETMASK_CHANGED, &args);
+#endif
+ }
+}
+
+static int
+netif_do_set_gw(struct netif *netif, const ip4_addr_t *gw, ip_addr_t *old_gw)
+{
+ /* address is actually being changed? */
+ if (ip4_addr_cmp(gw, netif_ip4_gw(netif)) == 0) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ LWIP_ASSERT("invalid pointer", old_gw != NULL);
+ ip_addr_copy(*old_gw, *netif_ip_gw4(netif));
+#else
+ LWIP_UNUSED_ARG(old_gw);
+#endif
+
+ ip4_addr_set(ip_2_ip4(&netif->gw), gw);
+ IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4);
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
+ netif->name[0], netif->name[1],
+ ip4_addr1_16(netif_ip4_gw(netif)),
+ ip4_addr2_16(netif_ip4_gw(netif)),
+ ip4_addr3_16(netif_ip4_gw(netif)),
+ ip4_addr4_16(netif_ip4_gw(netif))));
+ return 1; /* gateway changed */
+ }
+ return 0; /* gateway unchanged */
+}
+
+/**
+ * @ingroup netif_ip4
+ * Change the default gateway for a network interface
+ *
+ * @param netif the network interface to change
+ * @param gw the new default gateway
+ *
+ * @note call netif_set_addr() if you also want to change ip address and netmask
+ */
+void
+netif_set_gw(struct netif *netif, const ip4_addr_t *gw)
+{
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ ip_addr_t old_gw_val;
+ ip_addr_t *old_gw = &old_gw_val;
+#else
+ ip_addr_t *old_gw = NULL;
+#endif
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif_set_gw: invalid netif", netif != NULL, return);
+
+ /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
+ if (gw == NULL) {
+ gw = IP4_ADDR_ANY4;
+ }
+
+ if (netif_do_set_gw(netif, gw, old_gw)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ netif_ext_callback_args_t args;
+ args.ipv4_changed.old_gw = old_gw;
+ netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_GATEWAY_CHANGED, &args);
+#endif
+ }
+}
+
+/**
+ * @ingroup netif_ip4
+ * Change IP address configuration for a network interface (including netmask
+ * and default gateway).
+ *
+ * @param netif the network interface to change
+ * @param ipaddr the new IP address
+ * @param netmask the new netmask
+ * @param gw the new default gateway
+ */
+void
+netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask,
+ const ip4_addr_t *gw)
+{
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ netif_nsc_reason_t change_reason = LWIP_NSC_NONE;
+ netif_ext_callback_args_t cb_args;
+ ip_addr_t old_nm_val;
+ ip_addr_t old_gw_val;
+ ip_addr_t *old_nm = &old_nm_val;
+ ip_addr_t *old_gw = &old_gw_val;
+#else
+ ip_addr_t *old_nm = NULL;
+ ip_addr_t *old_gw = NULL;
+#endif
+ ip_addr_t old_addr;
+ int remove;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
+ if (ipaddr == NULL) {
+ ipaddr = IP4_ADDR_ANY4;
+ }
+ if (netmask == NULL) {
+ netmask = IP4_ADDR_ANY4;
+ }
+ if (gw == NULL) {
+ gw = IP4_ADDR_ANY4;
+ }
+
+ remove = ip4_addr_isany(ipaddr);
+ if (remove) {
+ /* when removing an address, we have to remove it *before* changing netmask/gw
+ to ensure that tcp RST segment can be sent correctly */
+ if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED;
+ cb_args.ipv4_changed.old_address = &old_addr;
+#endif
+ }
+ }
+ if (netif_do_set_netmask(netif, netmask, old_nm)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ change_reason |= LWIP_NSC_IPV4_NETMASK_CHANGED;
+ cb_args.ipv4_changed.old_netmask = old_nm;
+#endif
+ }
+ if (netif_do_set_gw(netif, gw, old_gw)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ change_reason |= LWIP_NSC_IPV4_GATEWAY_CHANGED;
+ cb_args.ipv4_changed.old_gw = old_gw;
+#endif
+ }
+ if (!remove) {
+ /* set ipaddr last to ensure netmask/gw have been set when status callback is called */
+ if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED;
+ cb_args.ipv4_changed.old_address = &old_addr;
+#endif
+ }
+ }
+
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ if (change_reason != LWIP_NSC_NONE) {
+ change_reason |= LWIP_NSC_IPV4_SETTINGS_CHANGED;
+ netif_invoke_ext_callback(netif, change_reason, &cb_args);
+ }
+#endif
+}
+#endif /* LWIP_IPV4*/
+
+/**
+ * @ingroup netif
+ * Remove a network interface from the list of lwIP netifs.
+ *
+ * @param netif the network interface to remove
+ */
+void
+netif_remove(struct netif *netif)
+{
+#if LWIP_IPV6
+ int i;
+#endif
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif == NULL) {
+ return;
+ }
+
+ netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_REMOVED, NULL);
+
+#if LWIP_IPV4
+ if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) {
+ netif_do_ip_addr_changed(netif_ip_addr4(netif), NULL);
+ }
+
+#if LWIP_IGMP
+ /* stop IGMP processing */
+ if (netif->flags & NETIF_FLAG_IGMP) {
+ igmp_stop(netif);
+ }
+#endif /* LWIP_IGMP */
+#endif /* LWIP_IPV4*/
+
+#if LWIP_IPV6
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) {
+ netif_do_ip_addr_changed(netif_ip_addr6(netif, i), NULL);
+ }
+ }
+#if LWIP_IPV6_MLD
+ /* stop MLD processing */
+ mld6_stop(netif);
+#endif /* LWIP_IPV6_MLD */
+#endif /* LWIP_IPV6 */
+ if (netif_is_up(netif)) {
+ /* set netif down before removing (call callback function) */
+ netif_set_down(netif);
+ }
+
+ mib2_remove_ip4(netif);
+
+ /* this netif is default? */
+ if (netif_default == netif) {
+ /* reset default netif */
+ netif_set_default(NULL);
+ }
+#if !LWIP_SINGLE_NETIF
+ /* is it the first netif? */
+ if (netif_list == netif) {
+ netif_list = netif->next;
+ } else {
+ /* look for netif further down the list */
+ struct netif *tmp_netif;
+ NETIF_FOREACH(tmp_netif) {
+ if (tmp_netif->next == netif) {
+ tmp_netif->next = netif->next;
+ break;
+ }
+ }
+ if (tmp_netif == NULL) {
+ return; /* netif is not on the list */
+ }
+ }
+#endif /* !LWIP_SINGLE_NETIF */
+ mib2_netif_removed(netif);
+#if LWIP_NETIF_REMOVE_CALLBACK
+ if (netif->remove_callback) {
+ netif->remove_callback(netif);
+ }
+#endif /* LWIP_NETIF_REMOVE_CALLBACK */
+ LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") );
+}
+
+/**
+ * @ingroup netif
+ * Set a network interface as the default network interface
+ * (used to output all packets for which no specific route is found)
+ *
+ * @param netif the default network interface
+ */
+void
+netif_set_default(struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif == NULL) {
+ /* remove default route */
+ mib2_remove_route_ip4(1, netif);
+ } else {
+ /* install default route */
+ mib2_add_route_ip4(1, netif);
+ }
+ netif_default = netif;
+ LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n",
+ netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\''));
+}
+
+/**
+ * @ingroup netif
+ * Bring an interface up, available for processing
+ * traffic.
+ */
+void
+netif_set_up(struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif_set_up: invalid netif", netif != NULL, return);
+
+ if (!(netif->flags & NETIF_FLAG_UP)) {
+ netif_set_flags(netif, NETIF_FLAG_UP);
+
+ MIB2_COPY_SYSUPTIME_TO(&netif->ts);
+
+ NETIF_STATUS_CALLBACK(netif);
+
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ {
+ netif_ext_callback_args_t args;
+ args.status_changed.state = 1;
+ netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args);
+ }
+#endif
+
+ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6);
+#if LWIP_IPV6
+ nd6_restart_netif(netif);
+#endif /* LWIP_IPV6 */
+ }
+}
+
+/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change
+ */
+static void
+netif_issue_reports(struct netif *netif, u8_t report_type)
+{
+ LWIP_ASSERT("netif_issue_reports: invalid netif", netif != NULL);
+
+ /* Only send reports when both link and admin states are up */
+ if (!(netif->flags & NETIF_FLAG_LINK_UP) ||
+ !(netif->flags & NETIF_FLAG_UP)) {
+ return;
+ }
+
+#if LWIP_IPV4
+ if ((report_type & NETIF_REPORT_TYPE_IPV4) &&
+ !ip4_addr_isany_val(*netif_ip4_addr(netif))) {
+#if LWIP_ARP
+ /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */
+ if (netif->flags & (NETIF_FLAG_ETHARP)) {
+ etharp_gratuitous(netif);
+ }
+#endif /* LWIP_ARP */
+
+#if LWIP_IGMP
+ /* resend IGMP memberships */
+ if (netif->flags & NETIF_FLAG_IGMP) {
+ igmp_report_groups(netif);
+ }
+#endif /* LWIP_IGMP */
+ }
+#endif /* LWIP_IPV4 */
+
+#if LWIP_IPV6
+ if (report_type & NETIF_REPORT_TYPE_IPV6) {
+#if LWIP_IPV6_MLD
+ /* send mld memberships */
+ mld6_report_groups(netif);
+#endif /* LWIP_IPV6_MLD */
+ }
+#endif /* LWIP_IPV6 */
+}
+
+/**
+ * @ingroup netif
+ * Bring an interface down, disabling any traffic processing.
+ */
+void
+netif_set_down(struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif_set_down: invalid netif", netif != NULL, return);
+
+ if (netif->flags & NETIF_FLAG_UP) {
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ {
+ netif_ext_callback_args_t args;
+ args.status_changed.state = 0;
+ netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args);
+ }
+#endif
+
+ netif_clear_flags(netif, NETIF_FLAG_UP);
+ MIB2_COPY_SYSUPTIME_TO(&netif->ts);
+
+#if LWIP_IPV4 && LWIP_ARP
+ if (netif->flags & NETIF_FLAG_ETHARP) {
+ etharp_cleanup_netif(netif);
+ }
+#endif /* LWIP_IPV4 && LWIP_ARP */
+
+#if LWIP_IPV6
+ nd6_cleanup_netif(netif);
+#endif /* LWIP_IPV6 */
+
+ NETIF_STATUS_CALLBACK(netif);
+ }
+}
+
+#if LWIP_NETIF_STATUS_CALLBACK
+/**
+ * @ingroup netif
+ * Set callback to be called when interface is brought up/down or address is changed while up
+ */
+void
+netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif) {
+ netif->status_callback = status_callback;
+ }
+}
+#endif /* LWIP_NETIF_STATUS_CALLBACK */
+
+#if LWIP_NETIF_REMOVE_CALLBACK
+/**
+ * @ingroup netif
+ * Set callback to be called when the interface has been removed
+ */
+void
+netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif) {
+ netif->remove_callback = remove_callback;
+ }
+}
+#endif /* LWIP_NETIF_REMOVE_CALLBACK */
+
+/**
+ * @ingroup netif
+ * Called by a driver when its link goes up
+ */
+void
+netif_set_link_up(struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif_set_link_up: invalid netif", netif != NULL, return);
+
+ if (!(netif->flags & NETIF_FLAG_LINK_UP)) {
+ netif_set_flags(netif, NETIF_FLAG_LINK_UP);
+
+#if LWIP_DHCP
+ dhcp_network_changed(netif);
+#endif /* LWIP_DHCP */
+
+#if LWIP_AUTOIP
+ autoip_network_changed(netif);
+#endif /* LWIP_AUTOIP */
+
+ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6);
+#if LWIP_IPV6
+ nd6_restart_netif(netif);
+#endif /* LWIP_IPV6 */
+
+ NETIF_LINK_CALLBACK(netif);
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ {
+ netif_ext_callback_args_t args;
+ args.link_changed.state = 1;
+ netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args);
+ }
+#endif
+ }
+}
+
+/**
+ * @ingroup netif
+ * Called by a driver when its link goes down
+ */
+void
+netif_set_link_down(struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("netif_set_link_down: invalid netif", netif != NULL, return);
+
+ if (netif->flags & NETIF_FLAG_LINK_UP) {
+ netif_clear_flags(netif, NETIF_FLAG_LINK_UP);
+ NETIF_LINK_CALLBACK(netif);
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ {
+ netif_ext_callback_args_t args;
+ args.link_changed.state = 0;
+ netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args);
+ }
+#endif
+ }
+}
+
+#if LWIP_NETIF_LINK_CALLBACK
+/**
+ * @ingroup netif
+ * Set callback to be called when link is brought up/down
+ */
+void
+netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif) {
+ netif->link_callback = link_callback;
+ }
+}
+#endif /* LWIP_NETIF_LINK_CALLBACK */
+
+#if ENABLE_LOOPBACK
+/**
+ * @ingroup netif
+ * Send an IP packet to be received on the same netif (loopif-like).
+ * The pbuf is simply copied and handed back to netif->input.
+ * In multithreaded mode, this is done directly since netif->input must put
+ * the packet on a queue.
+ * In callback mode, the packet is put on an internal queue and is fed to
+ * netif->input by netif_poll().
+ *
+ * @param netif the lwip network interface structure
+ * @param p the (IP) packet to 'send'
+ * @return ERR_OK if the packet has been sent
+ * ERR_MEM if the pbuf used to copy the packet couldn't be allocated
+ */
+err_t
+netif_loop_output(struct netif *netif, struct pbuf *p)
+{
+ struct pbuf *r;
+ err_t err;
+ struct pbuf *last;
+#if LWIP_LOOPBACK_MAX_PBUFS
+ u16_t clen = 0;
+#endif /* LWIP_LOOPBACK_MAX_PBUFS */
+ /* If we have a loopif, SNMP counters are adjusted for it,
+ * if not they are adjusted for 'netif'. */
+#if MIB2_STATS
+#if LWIP_HAVE_LOOPIF
+ struct netif *stats_if = &loop_netif;
+#else /* LWIP_HAVE_LOOPIF */
+ struct netif *stats_if = netif;
+#endif /* LWIP_HAVE_LOOPIF */
+#endif /* MIB2_STATS */
+#if LWIP_NETIF_LOOPBACK_MULTITHREADING
+ u8_t schedule_poll = 0;
+#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ LWIP_ASSERT("netif_loop_output: invalid netif", netif != NULL);
+ LWIP_ASSERT("netif_loop_output: invalid pbuf", p != NULL);
+
+ /* Allocate a new pbuf */
+ r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
+ if (r == NULL) {
+ LINK_STATS_INC(link.memerr);
+ LINK_STATS_INC(link.drop);
+ MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards);
+ return ERR_MEM;
+ }
+#if LWIP_LOOPBACK_MAX_PBUFS
+ clen = pbuf_clen(r);
+ /* check for overflow or too many pbuf on queue */
+ if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
+ ((netif->loop_cnt_current + clen) > LWIP_MIN(LWIP_LOOPBACK_MAX_PBUFS, 0xFFFF))) {
+ pbuf_free(r);
+ LINK_STATS_INC(link.memerr);
+ LINK_STATS_INC(link.drop);
+ MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards);
+ return ERR_MEM;
+ }
+ netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current + clen);
+#endif /* LWIP_LOOPBACK_MAX_PBUFS */
+
+ /* Copy the whole pbuf queue p into the single pbuf r */
+ if ((err = pbuf_copy(r, p)) != ERR_OK) {
+ pbuf_free(r);
+ LINK_STATS_INC(link.memerr);
+ LINK_STATS_INC(link.drop);
+ MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards);
+ return err;
+ }
+
+ /* Put the packet on a linked list which gets emptied through calling
+ netif_poll(). */
+
+ /* let last point to the last pbuf in chain r */
+ for (last = r; last->next != NULL; last = last->next) {
+ /* nothing to do here, just get to the last pbuf */
+ }
+
+ SYS_ARCH_PROTECT(lev);
+ if (netif->loop_first != NULL) {
+ LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
+ netif->loop_last->next = r;
+ netif->loop_last = last;
+ } else {
+ netif->loop_first = r;
+ netif->loop_last = last;
+#if LWIP_NETIF_LOOPBACK_MULTITHREADING
+ /* No existing packets queued, schedule poll */
+ schedule_poll = 1;
+#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */
+ }
+ SYS_ARCH_UNPROTECT(lev);
+
+ LINK_STATS_INC(link.xmit);
+ MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len);
+ MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts);
+
+#if LWIP_NETIF_LOOPBACK_MULTITHREADING
+ /* For multithreading environment, schedule a call to netif_poll */
+ if (schedule_poll) {
+ tcpip_try_callback((tcpip_callback_fn)netif_poll, netif);
+ }
+#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */
+
+ return ERR_OK;
+}
+
+#if LWIP_HAVE_LOOPIF
+#if LWIP_IPV4
+static err_t
+netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr)
+{
+ LWIP_UNUSED_ARG(addr);
+ return netif_loop_output(netif, p);
+}
+#endif /* LWIP_IPV4 */
+
+#if LWIP_IPV6
+static err_t
+netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr)
+{
+ LWIP_UNUSED_ARG(addr);
+ return netif_loop_output(netif, p);
+}
+#endif /* LWIP_IPV6 */
+#endif /* LWIP_HAVE_LOOPIF */
+
+
+/**
+ * Call netif_poll() in the main loop of your application. This is to prevent
+ * reentering non-reentrant functions like tcp_input(). Packets passed to
+ * netif_loop_output() are put on a list that is passed to netif->input() by
+ * netif_poll().
+ */
+void
+netif_poll(struct netif *netif)
+{
+ /* If we have a loopif, SNMP counters are adjusted for it,
+ * if not they are adjusted for 'netif'. */
+#if MIB2_STATS
+#if LWIP_HAVE_LOOPIF
+ struct netif *stats_if = &loop_netif;
+#else /* LWIP_HAVE_LOOPIF */
+ struct netif *stats_if = netif;
+#endif /* LWIP_HAVE_LOOPIF */
+#endif /* MIB2_STATS */
+ SYS_ARCH_DECL_PROTECT(lev);
+
+ LWIP_ASSERT("netif_poll: invalid netif", netif != NULL);
+
+ /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */
+ SYS_ARCH_PROTECT(lev);
+ while (netif->loop_first != NULL) {
+ struct pbuf *in, *in_end;
+#if LWIP_LOOPBACK_MAX_PBUFS
+ u8_t clen = 1;
+#endif /* LWIP_LOOPBACK_MAX_PBUFS */
+
+ in = in_end = netif->loop_first;
+ while (in_end->len != in_end->tot_len) {
+ LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL);
+ in_end = in_end->next;
+#if LWIP_LOOPBACK_MAX_PBUFS
+ clen++;
+#endif /* LWIP_LOOPBACK_MAX_PBUFS */
+ }
+#if LWIP_LOOPBACK_MAX_PBUFS
+ /* adjust the number of pbufs on queue */
+ LWIP_ASSERT("netif->loop_cnt_current underflow",
+ ((netif->loop_cnt_current - clen) < netif->loop_cnt_current));
+ netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current - clen);
+#endif /* LWIP_LOOPBACK_MAX_PBUFS */
+
+ /* 'in_end' now points to the last pbuf from 'in' */
+ if (in_end == netif->loop_last) {
+ /* this was the last pbuf in the list */
+ netif->loop_first = netif->loop_last = NULL;
+ } else {
+ /* pop the pbuf off the list */
+ netif->loop_first = in_end->next;
+ LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL);
+ }
+ /* De-queue the pbuf from its successors on the 'loop_' list. */
+ in_end->next = NULL;
+ SYS_ARCH_UNPROTECT(lev);
+
+ in->if_idx = netif_get_index(netif);
+
+ LINK_STATS_INC(link.recv);
+ MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len);
+ MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts);
+ /* loopback packets are always IP packets! */
+ if (ip_input(in, netif) != ERR_OK) {
+ pbuf_free(in);
+ }
+ SYS_ARCH_PROTECT(lev);
+ }
+ SYS_ARCH_UNPROTECT(lev);
+}
+
+#if !LWIP_NETIF_LOOPBACK_MULTITHREADING
+/**
+ * Calls netif_poll() for every netif on the netif_list.
+ */
+void
+netif_poll_all(void)
+{
+ struct netif *netif;
+ /* loop through netifs */
+ NETIF_FOREACH(netif) {
+ netif_poll(netif);
+ }
+}
+#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */
+#endif /* ENABLE_LOOPBACK */
+
+#if LWIP_NUM_NETIF_CLIENT_DATA > 0
+/**
+ * @ingroup netif_cd
+ * Allocate an index to store data in client_data member of struct netif.
+ * Returned value is an index in mentioned array.
+ * @see LWIP_NUM_NETIF_CLIENT_DATA
+ */
+u8_t
+netif_alloc_client_data_id(void)
+{
+ u8_t result = netif_client_id;
+ netif_client_id++;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_NUM_NETIF_CLIENT_DATA > 256
+#error LWIP_NUM_NETIF_CLIENT_DATA must be <= 256
+#endif
+ LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA);
+ return (u8_t)(result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX);
+}
+#endif
+
+#if LWIP_IPV6
+/**
+ * @ingroup netif_ip6
+ * Change an IPv6 address of a network interface
+ *
+ * @param netif the network interface to change
+ * @param addr_idx index of the IPv6 address
+ * @param addr6 the new IPv6 address
+ *
+ * @note call netif_ip6_addr_set_state() to set the address valid/temptative
+ */
+void
+netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("netif_ip6_addr_set: invalid netif", netif != NULL);
+ LWIP_ASSERT("netif_ip6_addr_set: invalid addr6", addr6 != NULL);
+
+ netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1],
+ addr6->addr[2], addr6->addr[3]);
+}
+
+/*
+ * Change an IPv6 address of a network interface (internal version taking 4 * u32_t)
+ *
+ * @param netif the network interface to change
+ * @param addr_idx index of the IPv6 address
+ * @param i0 word0 of the new IPv6 address
+ * @param i1 word1 of the new IPv6 address
+ * @param i2 word2 of the new IPv6 address
+ * @param i3 word3 of the new IPv6 address
+ */
+void
+netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3)
+{
+ ip_addr_t old_addr;
+ ip_addr_t new_ipaddr;
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES);
+
+ ip6_addr_copy(*ip_2_ip6(&old_addr), *netif_ip6_addr(netif, addr_idx));
+ IP_SET_TYPE_VAL(old_addr, IPADDR_TYPE_V6);
+
+ /* address is actually being changed? */
+ if ((ip_2_ip6(&old_addr)->addr[0] != i0) || (ip_2_ip6(&old_addr)->addr[1] != i1) ||
+ (ip_2_ip6(&old_addr)->addr[2] != i2) || (ip_2_ip6(&old_addr)->addr[3] != i3)) {
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n"));
+
+ IP_ADDR6(&new_ipaddr, i0, i1, i2, i3);
+ ip6_addr_assign_zone(ip_2_ip6(&new_ipaddr), IP6_UNICAST, netif);
+
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) {
+ netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr);
+ }
+ /* @todo: remove/readd mib2 ip6 entries? */
+
+ ip_addr_copy(netif->ip6_addr[addr_idx], new_ipaddr);
+
+ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) {
+ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6);
+ NETIF_STATUS_CALLBACK(netif);
+ }
+
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ {
+ netif_ext_callback_args_t args;
+ args.ipv6_set.addr_index = addr_idx;
+ args.ipv6_set.old_address = &old_addr;
+ netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_SET, &args);
+ }
+#endif
+ }
+
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n",
+ addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)),
+ netif_ip6_addr_state(netif, addr_idx)));
+}
+
+/**
+ * @ingroup netif_ip6
+ * Change the state of an IPv6 address of a network interface
+ * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE
+ * includes the number of checks done, see ip6_addr.h)
+ *
+ * @param netif the network interface to change
+ * @param addr_idx index of the IPv6 address
+ * @param state the new IPv6 address state
+ */
+void
+netif_ip6_addr_set_state(struct netif *netif, s8_t addr_idx, u8_t state)
+{
+ u8_t old_state;
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("netif != NULL", netif != NULL);
+ LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES);
+
+ old_state = netif_ip6_addr_state(netif, addr_idx);
+ /* state is actually being changed? */
+ if (old_state != state) {
+ u8_t old_valid = old_state & IP6_ADDR_VALID;
+ u8_t new_valid = state & IP6_ADDR_VALID;
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n"));
+
+#if LWIP_IPV6_MLD
+ /* Reevaluate solicited-node multicast group membership. */
+ if (netif->flags & NETIF_FLAG_MLD6) {
+ nd6_adjust_mld_membership(netif, addr_idx, state);
+ }
+#endif /* LWIP_IPV6_MLD */
+
+ if (old_valid && !new_valid) {
+ /* address about to be removed by setting invalid */
+ netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL);
+ /* @todo: remove mib2 ip6 entries? */
+ }
+ netif->ip6_addr_state[addr_idx] = state;
+
+ if (!old_valid && new_valid) {
+ /* address added by setting valid */
+ /* This is a good moment to check that the address is properly zoned. */
+ IP6_ADDR_ZONECHECK_NETIF(netif_ip6_addr(netif, addr_idx), netif);
+ /* @todo: add mib2 ip6 entries? */
+ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6);
+ }
+ if ((old_state & ~IP6_ADDR_TENTATIVE_COUNT_MASK) !=
+ (state & ~IP6_ADDR_TENTATIVE_COUNT_MASK)) {
+ /* address state has changed -> call the callback function */
+ NETIF_STATUS_CALLBACK(netif);
+ }
+
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+ {
+ netif_ext_callback_args_t args;
+ args.ipv6_addr_state_changed.addr_index = addr_idx;
+ args.ipv6_addr_state_changed.old_state = old_state;
+ args.ipv6_addr_state_changed.address = netif_ip_addr6(netif, addr_idx);
+ netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_ADDR_STATE_CHANGED, &args);
+ }
+#endif
+ }
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n",
+ addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)),
+ netif_ip6_addr_state(netif, addr_idx)));
+}
+
+/**
+ * Checks if a specific local address is present on the netif and returns its
+ * index. Depending on its state, it may or may not be assigned to the
+ * interface (as per RFC terminology).
+ *
+ * The given address may or may not be zoned (i.e., have a zone index other
+ * than IP6_NO_ZONE). If the address is zoned, it must have the correct zone
+ * for the given netif, or no match will be found.
+ *
+ * @param netif the netif to check
+ * @param ip6addr the IPv6 address to find
+ * @return >= 0: address found, this is its index
+ * -1: address not found on this netif
+ */
+s8_t
+netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr)
+{
+ s8_t i;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("netif_get_ip6_addr_match: invalid netif", netif != NULL);
+ LWIP_ASSERT("netif_get_ip6_addr_match: invalid ip6addr", ip6addr != NULL);
+
+#if LWIP_IPV6_SCOPES
+ if (ip6_addr_has_zone(ip6addr) && !ip6_addr_test_zone(ip6addr, netif)) {
+ return -1; /* wrong zone, no match */
+ }
+#endif /* LWIP_IPV6_SCOPES */
+
+ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) &&
+ ip6_addr_cmp_zoneless(netif_ip6_addr(netif, i), ip6addr)) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/**
+ * @ingroup netif_ip6
+ * Create a link-local IPv6 address on a netif (stored in slot 0)
+ *
+ * @param netif the netif to create the address on
+ * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion)
+ * if == 0, use hwaddr directly as interface ID
+ */
+void
+netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit)
+{
+ u8_t i, addr_index;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("netif_create_ip6_linklocal_address: invalid netif", netif != NULL);
+
+ /* Link-local prefix. */
+ ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul);
+ ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0;
+
+ /* Generate interface ID. */
+ if (from_mac_48bit) {
+ /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */
+ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) |
+ ((u32_t)(netif->hwaddr[1]) << 16) |
+ ((u32_t)(netif->hwaddr[2]) << 8) |
+ (0xff));
+ ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((u32_t)(0xfeul << 24) |
+ ((u32_t)(netif->hwaddr[3]) << 16) |
+ ((u32_t)(netif->hwaddr[4]) << 8) |
+ (netif->hwaddr[5]));
+ } else {
+ /* Use hwaddr directly as interface ID. */
+ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0;
+ ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0;
+
+ addr_index = 3;
+ for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) {
+ if (i == 4) {
+ addr_index--;
+ }
+ ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= lwip_htonl(((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03)));
+ }
+ }
+
+ /* Set a link-local zone. Even though the zone is implied by the owning
+ * netif, setting the zone anyway has two important conceptual advantages:
+ * 1) it avoids the need for a ton of exceptions in internal code, allowing
+ * e.g. ip6_addr_cmp() to be used on local addresses;
+ * 2) the properly zoned address is visible externally, e.g. when any outside
+ * code enumerates available addresses or uses one to bind a socket.
+ * Any external code unaware of address scoping is likely to just ignore the
+ * zone field, so this should not create any compatibility problems. */
+ ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[0]), IP6_UNICAST, netif);
+
+ /* Set address state. */
+#if LWIP_IPV6_DUP_DETECT_ATTEMPTS
+ /* Will perform duplicate address detection (DAD). */
+ netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE);
+#else
+ /* Consider address valid. */
+ netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED);
+#endif /* LWIP_IPV6_AUTOCONFIG */
+}
+
+/**
+ * @ingroup netif_ip6
+ * This function allows for the easy addition of a new IPv6 address to an interface.
+ * It takes care of finding an empty slot and then sets the address tentative
+ * (to make sure that all the subsequent processing happens).
+ *
+ * @param netif netif to add the address on
+ * @param ip6addr address to add
+ * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here
+ */
+err_t
+netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx)
+{
+ s8_t i;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("netif_add_ip6_address: invalid netif", netif != NULL);
+ LWIP_ASSERT("netif_add_ip6_address: invalid ip6addr", ip6addr != NULL);
+
+ i = netif_get_ip6_addr_match(netif, ip6addr);
+ if (i >= 0) {
+ /* Address already added */
+ if (chosen_idx != NULL) {
+ *chosen_idx = i;
+ }
+ return ERR_OK;
+ }
+
+ /* Find a free slot. The first one is reserved for link-local addresses. */
+ for (i = ip6_addr_islinklocal(ip6addr) ? 0 : 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) {
+ if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) {
+ ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr);
+ ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[i]), IP6_UNICAST, netif);
+ netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE);
+ if (chosen_idx != NULL) {
+ *chosen_idx = i;
+ }
+ return ERR_OK;
+ }
+ }
+
+ if (chosen_idx != NULL) {
+ *chosen_idx = -1;
+ }
+ return ERR_VAL;
+}
+
+/** Dummy IPv6 output function for netifs not supporting IPv6
+ */
+static err_t
+netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr)
+{
+ LWIP_UNUSED_ARG(netif);
+ LWIP_UNUSED_ARG(p);
+ LWIP_UNUSED_ARG(ipaddr);
+
+ return ERR_IF;
+}
+#endif /* LWIP_IPV6 */
+
+#if LWIP_IPV4
+/** Dummy IPv4 output function for netifs not supporting IPv4
+ */
+static err_t
+netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr)
+{
+ LWIP_UNUSED_ARG(netif);
+ LWIP_UNUSED_ARG(p);
+ LWIP_UNUSED_ARG(ipaddr);
+
+ return ERR_IF;
+}
+#endif /* LWIP_IPV4 */
+
+/**
+* @ingroup netif
+* Return the interface index for the netif with name
+* or NETIF_NO_INDEX if not found/on error
+*
+* @param name the name of the netif
+*/
+u8_t
+netif_name_to_index(const char *name)
+{
+ struct netif *netif = netif_find(name);
+ if (netif != NULL) {
+ return netif_get_index(netif);
+ }
+ /* No name found, return invalid index */
+ return NETIF_NO_INDEX;
+}
+
+/**
+* @ingroup netif
+* Return the interface name for the netif matching index
+* or NULL if not found/on error
+*
+* @param idx the interface index of the netif
+* @param name char buffer of at least NETIF_NAMESIZE bytes
+*/
+char *
+netif_index_to_name(u8_t idx, char *name)
+{
+ struct netif *netif = netif_get_by_index(idx);
+
+ if (netif != NULL) {
+ name[0] = netif->name[0];
+ name[1] = netif->name[1];
+ lwip_itoa(&name[2], NETIF_NAMESIZE - 2, netif_index_to_num(idx));
+ return name;
+ }
+ return NULL;
+}
+
+/**
+* @ingroup netif
+* Return the interface for the netif index
+*
+* @param idx index of netif to find
+*/
+struct netif *
+netif_get_by_index(u8_t idx)
+{
+ struct netif *netif;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (idx != NETIF_NO_INDEX) {
+ NETIF_FOREACH(netif) {
+ if (idx == netif_get_index(netif)) {
+ return netif; /* found! */
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * @ingroup netif
+ * Find a network interface by searching for its name
+ *
+ * @param name the name of the netif (like netif->name) plus concatenated number
+ * in ascii representation (e.g. 'en0')
+ */
+struct netif *
+netif_find(const char *name)
+{
+ struct netif *netif;
+ u8_t num;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (name == NULL) {
+ return NULL;
+ }
+
+ num = (u8_t)atoi(&name[2]);
+
+ NETIF_FOREACH(netif) {
+ if (num == netif->num &&
+ name[0] == netif->name[0] &&
+ name[1] == netif->name[1]) {
+ LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1]));
+ return netif;
+ }
+ }
+ LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1]));
+ return NULL;
+}
+
+#if LWIP_NETIF_EXT_STATUS_CALLBACK
+/**
+ * @ingroup netif
+ * Add extended netif events listener
+ * @param callback pointer to listener structure
+ * @param fn callback function
+ */
+void
+netif_add_ext_callback(netif_ext_callback_t *callback, netif_ext_callback_fn fn)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("callback must be != NULL", callback != NULL);
+ LWIP_ASSERT("fn must be != NULL", fn != NULL);
+
+ callback->callback_fn = fn;
+ callback->next = ext_callback;
+ ext_callback = callback;
+}
+
+/**
+ * @ingroup netif
+ * Remove extended netif events listener
+ * @param callback pointer to listener structure
+ */
+void
+netif_remove_ext_callback(netif_ext_callback_t* callback)
+{
+ netif_ext_callback_t *last, *iter;
+
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("callback must be != NULL", callback != NULL);
+
+ if (ext_callback == NULL) {
+ return;
+ }
+
+ if (callback == ext_callback) {
+ ext_callback = ext_callback->next;
+ } else {
+ last = ext_callback;
+ for (iter = ext_callback->next; iter != NULL; last = iter, iter = iter->next) {
+ if (iter == callback) {
+ LWIP_ASSERT("last != NULL", last != NULL);
+ last->next = callback->next;
+ callback->next = NULL;
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * Invoke extended netif status event
+ * @param netif netif that is affected by change
+ * @param reason change reason
+ * @param args depends on reason, see reason description
+ */
+void
+netif_invoke_ext_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args)
+{
+ netif_ext_callback_t *callback = ext_callback;
+
+ LWIP_ASSERT("netif must be != NULL", netif != NULL);
+
+ while (callback != NULL) {
+ callback->callback_fn(netif, reason, args);
+ callback = callback->next;
+ }
+}
+#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK */
diff --git a/lwip/src/core/pbuf.c b/lwip/src/core/pbuf.c
new file mode 100644
index 0000000..a209e0c
--- /dev/null
+++ b/lwip/src/core/pbuf.c
@@ -0,0 +1,1514 @@
+/**
+ * @file
+ * Packet buffer management
+ */
+
+/**
+ * @defgroup pbuf Packet buffers (PBUF)
+ * @ingroup infrastructure
+ *
+ * Packets are built from the pbuf data structure. It supports dynamic
+ * memory allocation for packet contents or can reference externally
+ * managed packet contents both in RAM and ROM. Quick allocation for
+ * incoming packets is provided through pools with fixed sized pbufs.
+ *
+ * A packet may span over multiple pbufs, chained as a singly linked
+ * list. This is called a "pbuf chain".
+ *
+ * Multiple packets may be queued, also using this singly linked list.
+ * This is called a "packet queue".
+ *
+ * So, a packet queue consists of one or more pbuf chains, each of
+ * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
+ * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
+ *
+ * The differences between a pbuf chain and a packet queue are very
+ * precise but subtle.
+ *
+ * The last pbuf of a packet has a ->tot_len field that equals the
+ * ->len field. It can be found by traversing the list. If the last
+ * pbuf of a packet has a ->next field other than NULL, more packets
+ * are on the queue.
+ *
+ * Therefore, looping through a pbuf of a single packet, has an
+ * loop end condition (tot_len == p->len), NOT (next == NULL).
+ *
+ * Example of custom pbuf usage: @ref zerocopyrx
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/pbuf.h"
+#include "lwip/stats.h"
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/sys.h"
+#include "lwip/netif.h"
+#if LWIP_TCP && TCP_QUEUE_OOSEQ
+#include "lwip/priv/tcp_priv.h"
+#endif
+#if LWIP_CHECKSUM_ON_COPY
+#include "lwip/inet_chksum.h"
+#endif
+
+#include <string.h>
+
+#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
+/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
+ aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
+#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
+
+static const struct pbuf *
+pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset);
+
+#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
+#define PBUF_POOL_IS_EMPTY()
+#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
+
+#if !NO_SYS
+#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
+#include "lwip/tcpip.h"
+#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \
+ if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \
+ SYS_ARCH_PROTECT(old_level); \
+ pbuf_free_ooseq_pending = 0; \
+ SYS_ARCH_UNPROTECT(old_level); \
+ } } while(0)
+#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
+#endif /* !NO_SYS */
+
+volatile u8_t pbuf_free_ooseq_pending;
+#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
+
+/**
+ * Attempt to reclaim some memory from queued out-of-sequence TCP segments
+ * if we run out of pool pbufs. It's better to give priority to new packets
+ * if we're running out.
+ *
+ * This must be done in the correct thread context therefore this function
+ * can only be used with NO_SYS=0 and through tcpip_callback.
+ */
+#if !NO_SYS
+static
+#endif /* !NO_SYS */
+void
+pbuf_free_ooseq(void)
+{
+ struct tcp_pcb *pcb;
+ SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
+
+ for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
+ if (pcb->ooseq != NULL) {
+ /** Free the ooseq pbufs of one PCB only */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
+ tcp_free_ooseq(pcb);
+ return;
+ }
+ }
+}
+
+#if !NO_SYS
+/**
+ * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
+ */
+static void
+pbuf_free_ooseq_callback(void *arg)
+{
+ LWIP_UNUSED_ARG(arg);
+ pbuf_free_ooseq();
+}
+#endif /* !NO_SYS */
+
+/** Queue a call to pbuf_free_ooseq if not already queued. */
+static void
+pbuf_pool_is_empty(void)
+{
+#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
+ SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
+#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
+ u8_t queued;
+ SYS_ARCH_DECL_PROTECT(old_level);
+ SYS_ARCH_PROTECT(old_level);
+ queued = pbuf_free_ooseq_pending;
+ pbuf_free_ooseq_pending = 1;
+ SYS_ARCH_UNPROTECT(old_level);
+
+ if (!queued) {
+ /* queue a call to pbuf_free_ooseq if not already queued */
+ PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
+ }
+#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
+}
+#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
+
+/* Initialize members of struct pbuf after allocation */
+static void
+pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags)
+{
+ p->next = NULL;
+ p->payload = payload;
+ p->tot_len = tot_len;
+ p->len = len;
+ p->type_internal = (u8_t)type;
+ p->flags = flags;
+ p->ref = 1;
+ p->if_idx = NETIF_NO_INDEX;
+}
+
+/**
+ * @ingroup pbuf
+ * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
+ *
+ * The actual memory allocated for the pbuf is determined by the
+ * layer at which the pbuf is allocated and the requested size
+ * (from the size parameter).
+ *
+ * @param layer header size
+ * @param length size of the pbuf's payload
+ * @param type this parameter decides how and where the pbuf
+ * should be allocated as follows:
+ *
+ * - PBUF_RAM: buffer memory for pbuf is allocated as one large
+ * chunk. This includes protocol headers as well.
+ * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
+ * protocol headers. Additional headers must be prepended
+ * by allocating another pbuf and chain in to the front of
+ * the ROM pbuf. It is assumed that the memory used is really
+ * similar to ROM in that it is immutable and will not be
+ * changed. Memory which is dynamic should generally not
+ * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
+ * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
+ * protocol headers. It is assumed that the pbuf is only
+ * being used in a single thread. If the pbuf gets queued,
+ * then pbuf_take should be called to copy the buffer.
+ * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
+ * the pbuf pool that is allocated during pbuf_init().
+ *
+ * @return the allocated pbuf. If multiple pbufs where allocated, this
+ * is the first pbuf of a pbuf chain.
+ */
+struct pbuf *
+pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
+{
+ struct pbuf *p;
+ u16_t offset = (u16_t)layer;
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
+
+ switch (type) {
+ case PBUF_REF: /* fall through */
+ case PBUF_ROM:
+ p = pbuf_alloc_reference(NULL, length, type);
+ break;
+ case PBUF_POOL: {
+ struct pbuf *q, *last;
+ u16_t rem_len; /* remaining length */
+ p = NULL;
+ last = NULL;
+ rem_len = length;
+ do {
+ u16_t qlen;
+ q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
+ if (q == NULL) {
+ PBUF_POOL_IS_EMPTY();
+ /* free chain so far allocated */
+ if (p) {
+ pbuf_free(p);
+ }
+ /* bail out unsuccessfully */
+ return NULL;
+ }
+ qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)));
+ pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)),
+ rem_len, qlen, type, 0);
+ LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
+ ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
+ LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
+ (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
+ if (p == NULL) {
+ /* allocated head of pbuf chain (into p) */
+ p = q;
+ } else {
+ /* make previous pbuf point to this pbuf */
+ last->next = q;
+ }
+ last = q;
+ rem_len = (u16_t)(rem_len - qlen);
+ offset = 0;
+ } while (rem_len > 0);
+ break;
+ }
+ case PBUF_RAM: {
+ u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length));
+ mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len);
+
+ /* bug #50040: Check for integer overflow when calculating alloc_len */
+ if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) ||
+ (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) {
+ return NULL;
+ }
+
+ /* If pbuf is to be allocated in RAM, allocate memory for it. */
+ p = (struct pbuf *)mem_malloc(alloc_len);
+ if (p == NULL) {
+ return NULL;
+ }
+ pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)),
+ length, length, type, 0);
+ LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
+ ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
+ break;
+ }
+ default:
+ LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
+ return NULL;
+ }
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
+ return p;
+}
+
+/**
+ * @ingroup pbuf
+ * Allocates a pbuf for referenced data.
+ * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM).
+ *
+ * The actual memory allocated for the pbuf is determined by the
+ * layer at which the pbuf is allocated and the requested size
+ * (from the size parameter).
+ *
+ * @param payload referenced payload
+ * @param length size of the pbuf's payload
+ * @param type this parameter decides how and where the pbuf
+ * should be allocated as follows:
+ *
+ * - PBUF_ROM: It is assumed that the memory used is really
+ * similar to ROM in that it is immutable and will not be
+ * changed. Memory which is dynamic should generally not
+ * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
+ * - PBUF_REF: It is assumed that the pbuf is only
+ * being used in a single thread. If the pbuf gets queued,
+ * then pbuf_take should be called to copy the buffer.
+ *
+ * @return the allocated pbuf.
+ */
+struct pbuf *
+pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type)
+{
+ struct pbuf *p;
+ LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM));
+ /* only allocate memory for the pbuf structure */
+ p = (struct pbuf *)memp_malloc(MEMP_PBUF);
+ if (p == NULL) {
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n",
+ (type == PBUF_ROM) ? "ROM" : "REF"));
+ return NULL;
+ }
+ pbuf_init_alloced_pbuf(p, payload, length, length, type, 0);
+ return p;
+}
+
+
+#if LWIP_SUPPORT_CUSTOM_PBUF
+/**
+ * @ingroup pbuf
+ * Initialize a custom pbuf (already allocated).
+ * Example of custom pbuf usage: @ref zerocopyrx
+ *
+ * @param l header size
+ * @param length size of the pbuf's payload
+ * @param type type of the pbuf (only used to treat the pbuf accordingly, as
+ * this function allocates no memory)
+ * @param p pointer to the custom pbuf to initialize (already allocated)
+ * @param payload_mem pointer to the buffer that is used for payload and headers,
+ * must be at least big enough to hold 'length' plus the header size,
+ * may be NULL if set later.
+ * ATTENTION: The caller is responsible for correct alignment of this buffer!!
+ * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
+ * big enough to hold 'length' plus the header size
+ */
+struct pbuf *
+pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
+ void *payload_mem, u16_t payload_mem_len)
+{
+ u16_t offset = (u16_t)l;
+ void *payload;
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
+
+ if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
+ return NULL;
+ }
+
+ if (payload_mem != NULL) {
+ payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
+ } else {
+ payload = NULL;
+ }
+ pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM);
+ return &p->pbuf;
+}
+#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
+
+/**
+ * @ingroup pbuf
+ * Shrink a pbuf chain to a desired length.
+ *
+ * @param p pbuf to shrink.
+ * @param new_len desired new length of pbuf chain
+ *
+ * Depending on the desired length, the first few pbufs in a chain might
+ * be skipped and left unchanged. The new last pbuf in the chain will be
+ * resized, and any remaining pbufs will be freed.
+ *
+ * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
+ * @note May not be called on a packet queue.
+ *
+ * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
+ */
+void
+pbuf_realloc(struct pbuf *p, u16_t new_len)
+{
+ struct pbuf *q;
+ u16_t rem_len; /* remaining length */
+ u16_t shrink;
+
+ LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
+
+ /* desired length larger than current length? */
+ if (new_len >= p->tot_len) {
+ /* enlarging not yet supported */
+ return;
+ }
+
+ /* the pbuf chain grows by (new_len - p->tot_len) bytes
+ * (which may be negative in case of shrinking) */
+ shrink = (u16_t)(p->tot_len - new_len);
+
+ /* first, step over any pbufs that should remain in the chain */
+ rem_len = new_len;
+ q = p;
+ /* should this pbuf be kept? */
+ while (rem_len > q->len) {
+ /* decrease remaining length by pbuf length */
+ rem_len = (u16_t)(rem_len - q->len);
+ /* decrease total length indicator */
+ q->tot_len = (u16_t)(q->tot_len - shrink);
+ /* proceed to next pbuf in chain */
+ q = q->next;
+ LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
+ }
+ /* we have now reached the new last pbuf (in q) */
+ /* rem_len == desired length for pbuf q */
+
+ /* shrink allocated memory for PBUF_RAM */
+ /* (other types merely adjust their length fields */
+ if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len)
+#if LWIP_SUPPORT_CUSTOM_PBUF
+ && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
+#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
+ ) {
+ /* reallocate and adjust the length of the pbuf that will be split */
+ q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len));
+ LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
+ }
+ /* adjust length fields for new last pbuf */
+ q->len = rem_len;
+ q->tot_len = q->len;
+
+ /* any remaining pbufs in chain? */
+ if (q->next != NULL) {
+ /* free remaining pbufs in chain */
+ pbuf_free(q->next);
+ }
+ /* q is last packet in chain */
+ q->next = NULL;
+
+}
+
+/**
+ * Adjusts the payload pointer to reveal headers in the payload.
+ * @see pbuf_add_header.
+ *
+ * @param p pbuf to change the header size.
+ * @param header_size_increment Number of bytes to increment header size.
+ * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types
+ *
+ * @return non-zero on failure, zero on success.
+ *
+ */
+static u8_t
+pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force)
+{
+ u16_t type_internal;
+ void *payload;
+ u16_t increment_magnitude;
+
+ LWIP_ASSERT("p != NULL", p != NULL);
+ if ((p == NULL) || (header_size_increment > 0xFFFF)) {
+ return 1;
+ }
+ if (header_size_increment == 0) {
+ return 0;
+ }
+
+ increment_magnitude = (u16_t)header_size_increment;
+ /* Do not allow tot_len to wrap as a result. */
+ if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) {
+ return 1;
+ }
+
+ type_internal = p->type_internal;
+
+ /* pbuf types containing payloads? */
+ if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) {
+ /* set new payload pointer */
+ payload = (u8_t *)p->payload - header_size_increment;
+ /* boundary check fails? */
+ if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
+ LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE,
+ ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n",
+ (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF)));
+ /* bail out unsuccessfully */
+ return 1;
+ }
+ /* pbuf types referring to external payloads? */
+ } else {
+ /* hide a header in the payload? */
+ if (force) {
+ payload = (u8_t *)p->payload - header_size_increment;
+ } else {
+ /* cannot expand payload to front (yet!)
+ * bail out unsuccessfully */
+ return 1;
+ }
+ }
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n",
+ (void *)p->payload, (void *)payload, increment_magnitude));
+
+ /* modify pbuf fields */
+ p->payload = payload;
+ p->len = (u16_t)(p->len + increment_magnitude);
+ p->tot_len = (u16_t)(p->tot_len + increment_magnitude);
+
+
+ return 0;
+}
+
+/**
+ * Adjusts the payload pointer to reveal headers in the payload.
+ *
+ * Adjusts the ->payload pointer so that space for a header
+ * appears in the pbuf payload.
+ *
+ * The ->payload, ->tot_len and ->len fields are adjusted.
+ *
+ * @param p pbuf to change the header size.
+ * @param header_size_increment Number of bytes to increment header size which
+ * increases the size of the pbuf. New space is on the front.
+ * If header_size_increment is 0, this function does nothing and returns successful.
+ *
+ * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
+ * the call will fail. A check is made that the increase in header size does
+ * not move the payload pointer in front of the start of the buffer.
+ *
+ * @return non-zero on failure, zero on success.
+ *
+ */
+u8_t
+pbuf_add_header(struct pbuf *p, size_t header_size_increment)
+{
+ return pbuf_add_header_impl(p, header_size_increment, 0);
+}
+
+/**
+ * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed.
+ * This is used internally only, to allow PBUF_REF for RX.
+ */
+u8_t
+pbuf_add_header_force(struct pbuf *p, size_t header_size_increment)
+{
+ return pbuf_add_header_impl(p, header_size_increment, 1);
+}
+
+/**
+ * Adjusts the payload pointer to hide headers in the payload.
+ *
+ * Adjusts the ->payload pointer so that space for a header
+ * disappears in the pbuf payload.
+ *
+ * The ->payload, ->tot_len and ->len fields are adjusted.
+ *
+ * @param p pbuf to change the header size.
+ * @param header_size_decrement Number of bytes to decrement header size which
+ * decreases the size of the pbuf.
+ * If header_size_decrement is 0, this function does nothing and returns successful.
+ * @return non-zero on failure, zero on success.
+ *
+ */
+u8_t
+pbuf_remove_header(struct pbuf *p, size_t header_size_decrement)
+{
+ void *payload;
+ u16_t increment_magnitude;
+
+ LWIP_ASSERT("p != NULL", p != NULL);
+ if ((p == NULL) || (header_size_decrement > 0xFFFF)) {
+ return 1;
+ }
+ if (header_size_decrement == 0) {
+ return 0;
+ }
+
+ increment_magnitude = (u16_t)header_size_decrement;
+ /* Check that we aren't going to move off the end of the pbuf */
+ LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
+
+ /* remember current payload pointer */
+ payload = p->payload;
+ LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */
+
+ /* increase payload pointer (guarded by length check above) */
+ p->payload = (u8_t *)p->payload + header_size_decrement;
+ /* modify pbuf length fields */
+ p->len = (u16_t)(p->len - increment_magnitude);
+ p->tot_len = (u16_t)(p->tot_len - increment_magnitude);
+
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n",
+ (void *)payload, (void *)p->payload, increment_magnitude));
+
+ return 0;
+}
+
+static u8_t
+pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force)
+{
+ if (header_size_increment < 0) {
+ return pbuf_remove_header(p, (size_t) - header_size_increment);
+ } else {
+ return pbuf_add_header_impl(p, (size_t)header_size_increment, force);
+ }
+}
+
+/**
+ * Adjusts the payload pointer to hide or reveal headers in the payload.
+ *
+ * Adjusts the ->payload pointer so that space for a header
+ * (dis)appears in the pbuf payload.
+ *
+ * The ->payload, ->tot_len and ->len fields are adjusted.
+ *
+ * @param p pbuf to change the header size.
+ * @param header_size_increment Number of bytes to increment header size which
+ * increases the size of the pbuf. New space is on the front.
+ * (Using a negative value decreases the header size.)
+ * If header_size_increment is 0, this function does nothing and returns successful.
+ *
+ * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
+ * the call will fail. A check is made that the increase in header size does
+ * not move the payload pointer in front of the start of the buffer.
+ * @return non-zero on failure, zero on success.
+ *
+ */
+u8_t
+pbuf_header(struct pbuf *p, s16_t header_size_increment)
+{
+ return pbuf_header_impl(p, header_size_increment, 0);
+}
+
+/**
+ * Same as pbuf_header but does not check if 'header_size > 0' is allowed.
+ * This is used internally only, to allow PBUF_REF for RX.
+ */
+u8_t
+pbuf_header_force(struct pbuf *p, s16_t header_size_increment)
+{
+ return pbuf_header_impl(p, header_size_increment, 1);
+}
+
+/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len)
+ *
+ * @param q pbufs to operate on
+ * @param size The number of bytes to remove from the beginning of the pbuf list.
+ * While size >= p->len, pbufs are freed.
+ * ATTENTION: this is the opposite direction as @ref pbuf_header, but
+ * takes an u16_t not s16_t!
+ * @return the new head pbuf
+ */
+struct pbuf *
+pbuf_free_header(struct pbuf *q, u16_t size)
+{
+ struct pbuf *p = q;
+ u16_t free_left = size;
+ while (free_left && p) {
+ if (free_left >= p->len) {
+ struct pbuf *f = p;
+ free_left = (u16_t)(free_left - p->len);
+ p = p->next;
+ f->next = 0;
+ pbuf_free(f);
+ } else {
+ pbuf_remove_header(p, free_left);
+ free_left = 0;
+ }
+ }
+ return p;
+}
+
+/**
+ * @ingroup pbuf
+ * Dereference a pbuf chain or queue and deallocate any no-longer-used
+ * pbufs at the head of this chain or queue.
+ *
+ * Decrements the pbuf reference count. If it reaches zero, the pbuf is
+ * deallocated.
+ *
+ * For a pbuf chain, this is repeated for each pbuf in the chain,
+ * up to the first pbuf which has a non-zero reference count after
+ * decrementing. So, when all reference counts are one, the whole
+ * chain is free'd.
+ *
+ * @param p The pbuf (chain) to be dereferenced.
+ *
+ * @return the number of pbufs that were de-allocated
+ * from the head of the chain.
+ *
+ * @note MUST NOT be called on a packet queue (Not verified to work yet).
+ * @note the reference counter of a pbuf equals the number of pointers
+ * that refer to the pbuf (or into the pbuf).
+ *
+ * @internal examples:
+ *
+ * Assuming existing chains a->b->c with the following reference
+ * counts, calling pbuf_free(a) results in:
+ *
+ * 1->2->3 becomes ...1->3
+ * 3->3->3 becomes 2->3->3
+ * 1->1->2 becomes ......1
+ * 2->1->1 becomes 1->1->1
+ * 1->1->1 becomes .......
+ *
+ */
+u8_t
+pbuf_free(struct pbuf *p)
+{
+ u8_t alloc_src;
+ struct pbuf *q;
+ u8_t count;
+
+ if (p == NULL) {
+ LWIP_ASSERT("p != NULL", p != NULL);
+ /* if assertions are disabled, proceed with debug output */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("pbuf_free(p == NULL) was called.\n"));
+ return 0;
+ }
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
+
+ PERF_START;
+
+ count = 0;
+ /* de-allocate all consecutive pbufs from the head of the chain that
+ * obtain a zero reference count after decrementing*/
+ while (p != NULL) {
+ LWIP_PBUF_REF_T ref;
+ SYS_ARCH_DECL_PROTECT(old_level);
+ /* Since decrementing ref cannot be guaranteed to be a single machine operation
+ * we must protect it. We put the new ref into a local variable to prevent
+ * further protection. */
+ SYS_ARCH_PROTECT(old_level);
+ /* all pbufs in a chain are referenced at least once */
+ LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
+ /* decrease reference count (number of pointers to pbuf) */
+ ref = --(p->ref);
+ SYS_ARCH_UNPROTECT(old_level);
+ /* this pbuf is no longer referenced to? */
+ if (ref == 0) {
+ /* remember next pbuf in chain for next iteration */
+ q = p->next;
+ LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
+ alloc_src = pbuf_get_allocsrc(p);
+#if LWIP_SUPPORT_CUSTOM_PBUF
+ /* is this a custom pbuf? */
+ if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
+ struct pbuf_custom *pc = (struct pbuf_custom *)p;
+ LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
+ pc->custom_free_function(p);
+ } else
+#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
+ {
+ /* is this a pbuf from the pool? */
+ if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) {
+ memp_free(MEMP_PBUF_POOL, p);
+ /* is this a ROM or RAM referencing pbuf? */
+ } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) {
+ memp_free(MEMP_PBUF, p);
+ /* type == PBUF_RAM */
+ } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) {
+ mem_free(p);
+ } else {
+ /* @todo: support freeing other types */
+ LWIP_ASSERT("invalid pbuf type", 0);
+ }
+ }
+ count++;
+ /* proceed to next pbuf */
+ p = q;
+ /* p->ref > 0, this pbuf is still referenced to */
+ /* (and so the remaining pbufs in chain as well) */
+ } else {
+ LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref));
+ /* stop walking through the chain */
+ p = NULL;
+ }
+ }
+ PERF_STOP("pbuf_free");
+ /* return number of de-allocated pbufs */
+ return count;
+}
+
+/**
+ * Count number of pbufs in a chain
+ *
+ * @param p first pbuf of chain
+ * @return the number of pbufs in a chain
+ */
+u16_t
+pbuf_clen(const struct pbuf *p)
+{
+ u16_t len;
+
+ len = 0;
+ while (p != NULL) {
+ ++len;
+ p = p->next;
+ }
+ return len;
+}
+
+/**
+ * @ingroup pbuf
+ * Increment the reference count of the pbuf.
+ *
+ * @param p pbuf to increase reference counter of
+ *
+ */
+void
+pbuf_ref(struct pbuf *p)
+{
+ /* pbuf given? */
+ if (p != NULL) {
+ SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1));
+ LWIP_ASSERT("pbuf ref overflow", p->ref > 0);
+ }
+}
+
+/**
+ * @ingroup pbuf
+ * Concatenate two pbufs (each may be a pbuf chain) and take over
+ * the caller's reference of the tail pbuf.
+ *
+ * @note The caller MAY NOT reference the tail pbuf afterwards.
+ * Use pbuf_chain() for that purpose.
+ *
+ * This function explicitly does not check for tot_len overflow to prevent
+ * failing to queue too long pbufs. This can produce invalid pbufs, so
+ * handle with care!
+ *
+ * @see pbuf_chain()
+ */
+void
+pbuf_cat(struct pbuf *h, struct pbuf *t)
+{
+ struct pbuf *p;
+
+ LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
+ ((h != NULL) && (t != NULL)), return;);
+
+ /* proceed to last pbuf of chain */
+ for (p = h; p->next != NULL; p = p->next) {
+ /* add total length of second chain to all totals of first chain */
+ p->tot_len = (u16_t)(p->tot_len + t->tot_len);
+ }
+ /* { p is last pbuf of first h chain, p->next == NULL } */
+ LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
+ LWIP_ASSERT("p->next == NULL", p->next == NULL);
+ /* add total length of second chain to last pbuf total of first chain */
+ p->tot_len = (u16_t)(p->tot_len + t->tot_len);
+ /* chain last pbuf of head (p) with first of tail (t) */
+ p->next = t;
+ /* p->next now references t, but the caller will drop its reference to t,
+ * so netto there is no change to the reference count of t.
+ */
+}
+
+/**
+ * @ingroup pbuf
+ * Chain two pbufs (or pbuf chains) together.
+ *
+ * The caller MUST call pbuf_free(t) once it has stopped
+ * using it. Use pbuf_cat() instead if you no longer use t.
+ *
+ * @param h head pbuf (chain)
+ * @param t tail pbuf (chain)
+ * @note The pbufs MUST belong to the same packet.
+ * @note MAY NOT be called on a packet queue.
+ *
+ * The ->tot_len fields of all pbufs of the head chain are adjusted.
+ * The ->next field of the last pbuf of the head chain is adjusted.
+ * The ->ref field of the first pbuf of the tail chain is adjusted.
+ *
+ */
+void
+pbuf_chain(struct pbuf *h, struct pbuf *t)
+{
+ pbuf_cat(h, t);
+ /* t is now referenced by h */
+ pbuf_ref(t);
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
+}
+
+/**
+ * Dechains the first pbuf from its succeeding pbufs in the chain.
+ *
+ * Makes p->tot_len field equal to p->len.
+ * @param p pbuf to dechain
+ * @return remainder of the pbuf chain, or NULL if it was de-allocated.
+ * @note May not be called on a packet queue.
+ */
+struct pbuf *
+pbuf_dechain(struct pbuf *p)
+{
+ struct pbuf *q;
+ u8_t tail_gone = 1;
+ /* tail */
+ q = p->next;
+ /* pbuf has successor in chain? */
+ if (q != NULL) {
+ /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
+ LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
+ /* enforce invariant if assertion is disabled */
+ q->tot_len = (u16_t)(p->tot_len - p->len);
+ /* decouple pbuf from remainder */
+ p->next = NULL;
+ /* total length of pbuf p is its own length only */
+ p->tot_len = p->len;
+ /* q is no longer referenced by p, free it */
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
+ tail_gone = pbuf_free(q);
+ if (tail_gone > 0) {
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
+ ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
+ }
+ /* return remaining tail or NULL if deallocated */
+ }
+ /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
+ LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
+ return ((tail_gone > 0) ? NULL : q);
+}
+
+/**
+ * @ingroup pbuf
+ * Create PBUF_RAM copies of pbufs.
+ *
+ * Used to queue packets on behalf of the lwIP stack, such as
+ * ARP based queueing.
+ *
+ * @note You MUST explicitly use p = pbuf_take(p);
+ *
+ * @note Only one packet is copied, no packet queue!
+ *
+ * @param p_to pbuf destination of the copy
+ * @param p_from pbuf source of the copy
+ *
+ * @return ERR_OK if pbuf was copied
+ * ERR_ARG if one of the pbufs is NULL or p_to is not big
+ * enough to hold p_from
+ */
+err_t
+pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
+{
+ size_t offset_to = 0, offset_from = 0, len;
+
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
+ (const void *)p_to, (const void *)p_from));
+
+ /* is the target big enough to hold the source? */
+ LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
+ (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
+
+ /* iterate through pbuf chain */
+ do {
+ /* copy one part of the original chain */
+ if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
+ /* complete current p_from fits into current p_to */
+ len = p_from->len - offset_from;
+ } else {
+ /* current p_from does not fit into current p_to */
+ len = p_to->len - offset_to;
+ }
+ MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len);
+ offset_to += len;
+ offset_from += len;
+ LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
+ LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
+ if (offset_from >= p_from->len) {
+ /* on to next p_from (if any) */
+ offset_from = 0;
+ p_from = p_from->next;
+ }
+ if (offset_to == p_to->len) {
+ /* on to next p_to (if any) */
+ offset_to = 0;
+ p_to = p_to->next;
+ LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;);
+ }
+
+ if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
+ /* don't copy more than one packet! */
+ LWIP_ERROR("pbuf_copy() does not allow packet queues!",
+ (p_from->next == NULL), return ERR_VAL;);
+ }
+ if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
+ /* don't copy more than one packet! */
+ LWIP_ERROR("pbuf_copy() does not allow packet queues!",
+ (p_to->next == NULL), return ERR_VAL;);
+ }
+ } while (p_from);
+ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
+ return ERR_OK;
+}
+
+/**
+ * @ingroup pbuf
+ * Copy (part of) the contents of a packet buffer
+ * to an application supplied buffer.
+ *
+ * @param buf the pbuf from which to copy data
+ * @param dataptr the application supplied buffer
+ * @param len length of data to copy (dataptr must be big enough). No more
+ * than buf->tot_len will be copied, irrespective of len
+ * @param offset offset into the packet buffer from where to begin copying len bytes
+ * @return the number of bytes copied, or 0 on failure
+ */
+u16_t
+pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
+{
+ const struct pbuf *p;
+ u16_t left = 0;
+ u16_t buf_copy_len;
+ u16_t copied_total = 0;
+
+ LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
+ LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
+
+ /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
+ for (p = buf; len != 0 && p != NULL; p = p->next) {
+ if ((offset != 0) && (offset >= p->len)) {
+ /* don't copy from this buffer -> on to the next */
+ offset = (u16_t)(offset - p->len);
+ } else {
+ /* copy from this buffer. maybe only partially. */
+ buf_copy_len = (u16_t)(p->len - offset);
+ if (buf_copy_len > len) {
+ buf_copy_len = len;
+ }
+ /* copy the necessary parts of the buffer */
+ MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len);
+ copied_total = (u16_t)(copied_total + buf_copy_len);
+ left = (u16_t)(left + buf_copy_len);
+ len = (u16_t)(len - buf_copy_len);
+ offset = 0;
+ }
+ }
+ return copied_total;
+}
+
+/**
+ * @ingroup pbuf
+ * Get part of a pbuf's payload as contiguous memory. The returned memory is
+ * either a pointer into the pbuf's payload or, if split over multiple pbufs,
+ * a copy into the user-supplied buffer.
+ *
+ * @param p the pbuf from which to copy data
+ * @param buffer the application supplied buffer
+ * @param bufsize size of the application supplied buffer
+ * @param len length of data to copy (dataptr must be big enough). No more
+ * than buf->tot_len will be copied, irrespective of len
+ * @param offset offset into the packet buffer from where to begin copying len bytes
+ * @return the number of bytes copied, or 0 on failure
+ */
+void *
+pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset)
+{
+ const struct pbuf *q;
+ u16_t out_offset;
+
+ LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;);
+ LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;);
+ LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;);
+
+ q = pbuf_skip_const(p, offset, &out_offset);
+ if (q != NULL) {
+ if (q->len >= (out_offset + len)) {
+ /* all data in this pbuf, return zero-copy */
+ return (u8_t *)q->payload + out_offset;
+ }
+ /* need to copy */
+ if (pbuf_copy_partial(q, buffer, len, out_offset) != len) {
+ /* copying failed: pbuf is too short */
+ return NULL;
+ }
+ return buffer;
+ }
+ /* pbuf is too short (offset does not fit in) */
+ return NULL;
+}
+
+#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+/**
+ * This method modifies a 'pbuf chain', so that its total length is
+ * smaller than 64K. The remainder of the original pbuf chain is stored
+ * in *rest.
+ * This function never creates new pbufs, but splits an existing chain
+ * in two parts. The tot_len of the modified packet queue will likely be
+ * smaller than 64K.
+ * 'packet queues' are not supported by this function.
+ *
+ * @param p the pbuf queue to be split
+ * @param rest pointer to store the remainder (after the first 64K)
+ */
+void pbuf_split_64k(struct pbuf *p, struct pbuf **rest)
+{
+ *rest = NULL;
+ if ((p != NULL) && (p->next != NULL)) {
+ u16_t tot_len_front = p->len;
+ struct pbuf *i = p;
+ struct pbuf *r = p->next;
+
+ /* continue until the total length (summed up as u16_t) overflows */
+ while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) {
+ tot_len_front = (u16_t)(tot_len_front + r->len);
+ i = r;
+ r = r->next;
+ }
+ /* i now points to last packet of the first segment. Set next
+ pointer to NULL */
+ i->next = NULL;
+
+ if (r != NULL) {
+ /* Update the tot_len field in the first part */
+ for (i = p; i != NULL; i = i->next) {
+ i->tot_len = (u16_t)(i->tot_len - r->tot_len);
+ LWIP_ASSERT("tot_len/len mismatch in last pbuf",
+ (i->next != NULL) || (i->tot_len == i->len));
+ }
+ if (p->flags & PBUF_FLAG_TCP_FIN) {
+ r->flags |= PBUF_FLAG_TCP_FIN;
+ }
+
+ /* tot_len field in rest does not need modifications */
+ /* reference counters do not need modifications */
+ *rest = r;
+ }
+ }
+}
+#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+
+/* Actual implementation of pbuf_skip() but returning const pointer... */
+static const struct pbuf *
+pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset)
+{
+ u16_t offset_left = in_offset;
+ const struct pbuf *q = in;
+
+ /* get the correct pbuf */
+ while ((q != NULL) && (q->len <= offset_left)) {
+ offset_left = (u16_t)(offset_left - q->len);
+ q = q->next;
+ }
+ if (out_offset != NULL) {
+ *out_offset = offset_left;
+ }
+ return q;
+}
+
+/**
+ * @ingroup pbuf
+ * Skip a number of bytes at the start of a pbuf
+ *
+ * @param in input pbuf
+ * @param in_offset offset to skip
+ * @param out_offset resulting offset in the returned pbuf
+ * @return the pbuf in the queue where the offset is
+ */
+struct pbuf *
+pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset)
+{
+ const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset);
+ return LWIP_CONST_CAST(struct pbuf *, out);
+}
+
+/**
+ * @ingroup pbuf
+ * Copy application supplied data into a pbuf.
+ * This function can only be used to copy the equivalent of buf->tot_len data.
+ *
+ * @param buf pbuf to fill with data
+ * @param dataptr application supplied data buffer
+ * @param len length of the application supplied data buffer
+ *
+ * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
+ */
+err_t
+pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
+{
+ struct pbuf *p;
+ size_t buf_copy_len;
+ size_t total_copy_len = len;
+ size_t copied_total = 0;
+
+ LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
+ LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
+ LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
+
+ if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
+ return ERR_ARG;
+ }
+
+ /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
+ for (p = buf; total_copy_len != 0; p = p->next) {
+ LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
+ buf_copy_len = total_copy_len;
+ if (buf_copy_len > p->len) {
+ /* this pbuf cannot hold all remaining data */
+ buf_copy_len = p->len;
+ }
+ /* copy the necessary parts of the buffer */
+ MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len);
+ total_copy_len -= buf_copy_len;
+ copied_total += buf_copy_len;
+ }
+ LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
+ return ERR_OK;
+}
+
+/**
+ * @ingroup pbuf
+ * Same as pbuf_take() but puts data at an offset
+ *
+ * @param buf pbuf to fill with data
+ * @param dataptr application supplied data buffer
+ * @param len length of the application supplied data buffer
+ * @param offset offset in pbuf where to copy dataptr to
+ *
+ * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
+ */
+err_t
+pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
+{
+ u16_t target_offset;
+ struct pbuf *q = pbuf_skip(buf, offset, &target_offset);
+
+ /* return requested data if pbuf is OK */
+ if ((q != NULL) && (q->tot_len >= target_offset + len)) {
+ u16_t remaining_len = len;
+ const u8_t *src_ptr = (const u8_t *)dataptr;
+ /* copy the part that goes into the first pbuf */
+ u16_t first_copy_len;
+ LWIP_ASSERT("check pbuf_skip result", target_offset < q->len);
+ first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len);
+ MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len);
+ remaining_len = (u16_t)(remaining_len - first_copy_len);
+ src_ptr += first_copy_len;
+ if (remaining_len > 0) {
+ return pbuf_take(q->next, src_ptr, remaining_len);
+ }
+ return ERR_OK;
+ }
+ return ERR_MEM;
+}
+
+/**
+ * @ingroup pbuf
+ * Creates a single pbuf out of a queue of pbufs.
+ *
+ * @remark: Either the source pbuf 'p' is freed by this function or the original
+ * pbuf 'p' is returned, therefore the caller has to check the result!
+ *
+ * @param p the source pbuf
+ * @param layer pbuf_layer of the new pbuf
+ *
+ * @return a new, single pbuf (p->next is NULL)
+ * or the old pbuf if allocation fails
+ */
+struct pbuf *
+pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
+{
+ struct pbuf *q;
+ if (p->next == NULL) {
+ return p;
+ }
+ q = pbuf_clone(layer, PBUF_RAM, p);
+ if (q == NULL) {
+ /* @todo: what do we do now? */
+ return p;
+ }
+ pbuf_free(p);
+ return q;
+}
+
+/**
+ * @ingroup pbuf
+ * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source
+ * pbuf into this new pbuf (using pbuf_copy()).
+ *
+ * @param layer pbuf_layer of the new pbuf
+ * @param type this parameter decides how and where the pbuf should be allocated
+ * (@see pbuf_alloc())
+ * @param p the source pbuf
+ *
+ * @return a new pbuf or NULL if allocation fails
+ */
+struct pbuf *
+pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p)
+{
+ struct pbuf *q;
+ err_t err;
+ q = pbuf_alloc(layer, p->tot_len, type);
+ if (q == NULL) {
+ return NULL;
+ }
+ err = pbuf_copy(q, p);
+ LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */
+ LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
+ return q;
+}
+
+#if LWIP_CHECKSUM_ON_COPY
+/**
+ * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
+ * the checksum while copying
+ *
+ * @param p the pbuf to copy data into
+ * @param start_offset offset of p->payload where to copy the data to
+ * @param dataptr data to copy into the pbuf
+ * @param len length of data to copy into the pbuf
+ * @param chksum pointer to the checksum which is updated
+ * @return ERR_OK if successful, another error if the data does not fit
+ * within the (first) pbuf (no pbuf queues!)
+ */
+err_t
+pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
+ u16_t len, u16_t *chksum)
+{
+ u32_t acc;
+ u16_t copy_chksum;
+ char *dst_ptr;
+ LWIP_ASSERT("p != NULL", p != NULL);
+ LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
+ LWIP_ASSERT("chksum != NULL", chksum != NULL);
+ LWIP_ASSERT("len != 0", len != 0);
+
+ if ((start_offset >= p->len) || (start_offset + len > p->len)) {
+ return ERR_ARG;
+ }
+
+ dst_ptr = ((char *)p->payload) + start_offset;
+ copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
+ if ((start_offset & 1) != 0) {
+ copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
+ }
+ acc = *chksum;
+ acc += copy_chksum;
+ *chksum = FOLD_U32T(acc);
+ return ERR_OK;
+}
+#endif /* LWIP_CHECKSUM_ON_COPY */
+
+/**
+ * @ingroup pbuf
+ * Get one byte from the specified position in a pbuf
+ * WARNING: returns zero for offset >= p->tot_len
+ *
+ * @param p pbuf to parse
+ * @param offset offset into p of the byte to return
+ * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
+ */
+u8_t
+pbuf_get_at(const struct pbuf *p, u16_t offset)
+{
+ int ret = pbuf_try_get_at(p, offset);
+ if (ret >= 0) {
+ return (u8_t)ret;
+ }
+ return 0;
+}
+
+/**
+ * @ingroup pbuf
+ * Get one byte from the specified position in a pbuf
+ *
+ * @param p pbuf to parse
+ * @param offset offset into p of the byte to return
+ * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
+ */
+int
+pbuf_try_get_at(const struct pbuf *p, u16_t offset)
+{
+ u16_t q_idx;
+ const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx);
+
+ /* return requested data if pbuf is OK */
+ if ((q != NULL) && (q->len > q_idx)) {
+ return ((u8_t *)q->payload)[q_idx];
+ }
+ return -1;
+}
+
+/**
+ * @ingroup pbuf
+ * Put one byte to the specified position in a pbuf
+ * WARNING: silently ignores offset >= p->tot_len
+ *
+ * @param p pbuf to fill
+ * @param offset offset into p of the byte to write
+ * @param data byte to write at an offset into p
+ */
+void
+pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data)
+{
+ u16_t q_idx;
+ struct pbuf *q = pbuf_skip(p, offset, &q_idx);
+
+ /* write requested data if pbuf is OK */
+ if ((q != NULL) && (q->len > q_idx)) {
+ ((u8_t *)q->payload)[q_idx] = data;
+ }
+}
+
+/**
+ * @ingroup pbuf
+ * Compare pbuf contents at specified offset with memory s2, both of length n
+ *
+ * @param p pbuf to compare
+ * @param offset offset into p at which to start comparing
+ * @param s2 buffer to compare
+ * @param n length of buffer to compare
+ * @return zero if equal, nonzero otherwise
+ * (0xffff if p is too short, diffoffset+1 otherwise)
+ */
+u16_t
+pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n)
+{
+ u16_t start = offset;
+ const struct pbuf *q = p;
+ u16_t i;
+
+ /* pbuf long enough to perform check? */
+ if (p->tot_len < (offset + n)) {
+ return 0xffff;
+ }
+
+ /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
+ while ((q != NULL) && (q->len <= start)) {
+ start = (u16_t)(start - q->len);
+ q = q->next;
+ }
+
+ /* return requested data if pbuf is OK */
+ for (i = 0; i < n; i++) {
+ /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
+ u8_t a = pbuf_get_at(q, (u16_t)(start + i));
+ u8_t b = ((const u8_t *)s2)[i];
+ if (a != b) {
+ return (u16_t)LWIP_MIN(i + 1, 0xFFFF);
+ }
+ }
+ return 0;
+}
+
+/**
+ * @ingroup pbuf
+ * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
+ * start_offset.
+ *
+ * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
+ * return value 'not found'
+ * @param mem search for the contents of this buffer
+ * @param mem_len length of 'mem'
+ * @param start_offset offset into p at which to start searching
+ * @return 0xFFFF if substr was not found in p or the index where it was found
+ */
+u16_t
+pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset)
+{
+ u16_t i;
+ u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len);
+ if (p->tot_len >= mem_len + start_offset) {
+ for (i = start_offset; i <= max_cmp_start; i++) {
+ u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
+ if (plus == 0) {
+ return i;
+ }
+ }
+ }
+ return 0xFFFF;
+}
+
+/**
+ * Find occurrence of substr with length substr_len in pbuf p, start at offset
+ * start_offset
+ * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
+ * the pbuf/source string!
+ *
+ * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
+ * return value 'not found'
+ * @param substr string to search for in p, maximum length is 0xFFFE
+ * @return 0xFFFF if substr was not found in p or the index where it was found
+ */
+u16_t
+pbuf_strstr(const struct pbuf *p, const char *substr)
+{
+ size_t substr_len;
+ if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
+ return 0xFFFF;
+ }
+ substr_len = strlen(substr);
+ if (substr_len >= 0xFFFF) {
+ return 0xFFFF;
+ }
+ return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
+}
diff --git a/lwip/src/core/raw.c b/lwip/src/core/raw.c
new file mode 100644
index 0000000..3b34544
--- /dev/null
+++ b/lwip/src/core/raw.c
@@ -0,0 +1,671 @@
+/**
+ * @file
+ * Implementation of raw protocol PCBs for low-level handling of
+ * different types of protocols besides (or overriding) those
+ * already available in lwIP.\n
+ * See also @ref raw_raw
+ *
+ * @defgroup raw_raw RAW
+ * @ingroup callbackstyle_api
+ * Implementation of raw protocol PCBs for low-level handling of
+ * different types of protocols besides (or overriding) those
+ * already available in lwIP.\n
+ * @see @ref api
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/def.h"
+#include "lwip/memp.h"
+#include "lwip/ip_addr.h"
+#include "lwip/netif.h"
+#include "lwip/raw.h"
+#include "lwip/priv/raw_priv.h"
+#include "lwip/stats.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/inet_chksum.h"
+
+#include <string.h>
+
+/** The list of RAW PCBs */
+static struct raw_pcb *raw_pcbs;
+
+static u8_t
+raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast)
+{
+ LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */
+
+ /* check if PCB is bound to specific netif */
+ if ((pcb->netif_idx != NETIF_NO_INDEX) &&
+ (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) {
+ return 0;
+ }
+
+#if LWIP_IPV4 && LWIP_IPV6
+ /* Dual-stack: PCBs listening to any IP type also listen to any IP address */
+ if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
+#if IP_SOF_BROADCAST_RECV
+ if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) {
+ return 0;
+ }
+#endif /* IP_SOF_BROADCAST_RECV */
+ return 1;
+ }
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+
+ /* Only need to check PCB if incoming IP version matches PCB IP version */
+ if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) {
+#if LWIP_IPV4
+ /* Special case: IPv4 broadcast: receive all broadcasts
+ * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */
+ if (broadcast != 0) {
+#if IP_SOF_BROADCAST_RECV
+ if (ip_get_option(pcb, SOF_BROADCAST))
+#endif /* IP_SOF_BROADCAST_RECV */
+ {
+ if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) {
+ return 1;
+ }
+ }
+ } else
+#endif /* LWIP_IPV4 */
+ /* Handle IPv4 and IPv6: catch all or exact match */
+ if (ip_addr_isany(&pcb->local_ip) ||
+ ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Determine if in incoming IP packet is covered by a RAW PCB
+ * and if so, pass it to a user-provided receive callback function.
+ *
+ * Given an incoming IP datagram (as a chain of pbufs) this function
+ * finds a corresponding RAW PCB and calls the corresponding receive
+ * callback function.
+ *
+ * @param p pbuf to be demultiplexed to a RAW PCB.
+ * @param inp network interface on which the datagram was received.
+ * @return - 1 if the packet has been eaten by a RAW PCB receive
+ * callback function. The caller MAY NOT not reference the
+ * packet any longer, and MAY NOT call pbuf_free().
+ * @return - 0 if packet is not eaten (pbuf is still referenced by the
+ * caller).
+ *
+ */
+raw_input_state_t
+raw_input(struct pbuf *p, struct netif *inp)
+{
+ struct raw_pcb *pcb, *prev;
+ s16_t proto;
+ raw_input_state_t ret = RAW_INPUT_NONE;
+ u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif());
+
+ LWIP_UNUSED_ARG(inp);
+
+#if LWIP_IPV6
+#if LWIP_IPV4
+ if (IP_HDR_GET_VERSION(p->payload) == 6)
+#endif /* LWIP_IPV4 */
+ {
+ struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload;
+ proto = IP6H_NEXTH(ip6hdr);
+ }
+#if LWIP_IPV4
+ else
+#endif /* LWIP_IPV4 */
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4
+ {
+ proto = IPH_PROTO((struct ip_hdr *)p->payload);
+ }
+#endif /* LWIP_IPV4 */
+
+ prev = NULL;
+ pcb = raw_pcbs;
+ /* loop through all raw pcbs until the packet is eaten by one */
+ /* this allows multiple pcbs to match against the packet by design */
+ while (pcb != NULL) {
+ if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) &&
+ (((pcb->flags & RAW_FLAGS_CONNECTED) == 0) ||
+ ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) {
+ /* receive callback function available? */
+ if (pcb->recv != NULL) {
+ u8_t eaten;
+#ifndef LWIP_NOASSERT
+ void *old_payload = p->payload;
+#endif
+ ret = RAW_INPUT_DELIVERED;
+ /* the receive callback function did not eat the packet? */
+ eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr());
+ if (eaten != 0) {
+ /* receive function ate the packet */
+ p = NULL;
+ if (prev != NULL) {
+ /* move the pcb to the front of raw_pcbs so that is
+ found faster next time */
+ prev->next = pcb->next;
+ pcb->next = raw_pcbs;
+ raw_pcbs = pcb;
+ }
+ return RAW_INPUT_EATEN;
+ } else {
+ /* sanity-check that the receive callback did not alter the pbuf */
+ LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet",
+ p->payload == old_payload);
+ }
+ }
+ /* no receive callback function was set for this raw PCB */
+ }
+ /* drop the packet */
+ prev = pcb;
+ pcb = pcb->next;
+ }
+ return ret;
+}
+
+/**
+ * @ingroup raw_raw
+ * Bind a RAW PCB.
+ *
+ * @param pcb RAW PCB to be bound with a local address ipaddr.
+ * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to
+ * bind to all local interfaces.
+ *
+ * @return lwIP error code.
+ * - ERR_OK. Successful. No error occurred.
+ * - ERR_USE. The specified IP address is already bound to by
+ * another RAW PCB.
+ *
+ * @see raw_disconnect()
+ */
+err_t
+raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if ((pcb == NULL) || (ipaddr == NULL)) {
+ return ERR_VAL;
+ }
+ ip_addr_set_ipaddr(&pcb->local_ip, ipaddr);
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ /* If the given IP address should have a zone but doesn't, assign one now.
+ * This is legacy support: scope-aware callers should always provide properly
+ * zoned source addresses. */
+ if (IP_IS_V6(&pcb->local_ip) &&
+ ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) {
+ ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip));
+ }
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+ return ERR_OK;
+}
+
+/**
+ * @ingroup raw_raw
+ * Bind an RAW PCB to a specific netif.
+ * After calling this function, all packets received via this PCB
+ * are guaranteed to have come in via the specified netif, and all
+ * outgoing packets will go out via the specified netif.
+ *
+ * @param pcb RAW PCB to be bound with netif.
+ * @param netif netif to bind to. Can be NULL.
+ *
+ * @see raw_disconnect()
+ */
+void
+raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if (netif != NULL) {
+ pcb->netif_idx = netif_get_index(netif);
+ } else {
+ pcb->netif_idx = NETIF_NO_INDEX;
+ }
+}
+
+/**
+ * @ingroup raw_raw
+ * Connect an RAW PCB. This function is required by upper layers
+ * of lwip. Using the raw api you could use raw_sendto() instead
+ *
+ * This will associate the RAW PCB with the remote address.
+ *
+ * @param pcb RAW PCB to be connected with remote address ipaddr and port.
+ * @param ipaddr remote IP address to connect with.
+ *
+ * @return lwIP error code
+ *
+ * @see raw_disconnect() and raw_sendto()
+ */
+err_t
+raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if ((pcb == NULL) || (ipaddr == NULL)) {
+ return ERR_VAL;
+ }
+ ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr);
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ /* If the given IP address should have a zone but doesn't, assign one now,
+ * using the bound address to make a more informed decision when possible. */
+ if (IP_IS_V6(&pcb->remote_ip) &&
+ ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) {
+ ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip));
+ }
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+ raw_set_flags(pcb, RAW_FLAGS_CONNECTED);
+ return ERR_OK;
+}
+
+/**
+ * @ingroup raw_raw
+ * Disconnect a RAW PCB.
+ *
+ * @param pcb the raw pcb to disconnect.
+ */
+void
+raw_disconnect(struct raw_pcb *pcb)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ /* reset remote address association */
+#if LWIP_IPV4 && LWIP_IPV6
+ if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
+ ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE);
+ } else {
+#endif
+ ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip);
+#if LWIP_IPV4 && LWIP_IPV6
+ }
+#endif
+ pcb->netif_idx = NETIF_NO_INDEX;
+ /* mark PCB as unconnected */
+ raw_clear_flags(pcb, RAW_FLAGS_CONNECTED);
+}
+
+/**
+ * @ingroup raw_raw
+ * Set the callback function for received packets that match the
+ * raw PCB's protocol and binding.
+ *
+ * The callback function MUST either
+ * - eat the packet by calling pbuf_free() and returning non-zero. The
+ * packet will not be passed to other raw PCBs or other protocol layers.
+ * - not free the packet, and return zero. The packet will be matched
+ * against further PCBs and/or forwarded to another protocol layers.
+ */
+void
+raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ /* remember recv() callback and user data */
+ pcb->recv = recv;
+ pcb->recv_arg = recv_arg;
+}
+
+/**
+ * @ingroup raw_raw
+ * Send the raw IP packet to the given address. An IP header will be prepended
+ * to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that
+ * case, the packet must include an IP header, which will then be sent as is.
+ *
+ * @param pcb the raw pcb which to send
+ * @param p the IP payload to send
+ * @param ipaddr the destination address of the IP packet
+ *
+ */
+err_t
+raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr)
+{
+ struct netif *netif;
+ const ip_addr_t *src_ip;
+
+ if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) {
+ return ERR_VAL;
+ }
+
+ LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n"));
+
+ if (pcb->netif_idx != NETIF_NO_INDEX) {
+ netif = netif_get_by_index(pcb->netif_idx);
+ } else {
+#if LWIP_MULTICAST_TX_OPTIONS
+ netif = NULL;
+ if (ip_addr_ismulticast(ipaddr)) {
+ /* For multicast-destined packets, use the user-provided interface index to
+ * determine the outgoing interface, if an interface index is set and a
+ * matching netif can be found. Otherwise, fall back to regular routing. */
+ netif = netif_get_by_index(pcb->mcast_ifindex);
+ }
+
+ if (netif == NULL)
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+ {
+ netif = ip_route(&pcb->local_ip, ipaddr);
+ }
+ }
+
+ if (netif == NULL) {
+ LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to "));
+ ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr);
+ return ERR_RTE;
+ }
+
+ if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) {
+ /* use outgoing network interface IP address as source address */
+ src_ip = ip_netif_get_local_ip(netif, ipaddr);
+#if LWIP_IPV6
+ if (src_ip == NULL) {
+ return ERR_RTE;
+ }
+#endif /* LWIP_IPV6 */
+ } else {
+ /* use RAW PCB local IP address as source address */
+ src_ip = &pcb->local_ip;
+ }
+
+ return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip);
+}
+
+/**
+ * @ingroup raw_raw
+ * Send the raw IP packet to the given address, using a particular outgoing
+ * netif and source IP address. An IP header will be prepended to the packet,
+ * unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the
+ * packet must include an IP header, which will then be sent as is.
+ *
+ * @param pcb RAW PCB used to send the data
+ * @param p chain of pbufs to be sent
+ * @param dst_ip destination IP address
+ * @param netif the netif used for sending
+ * @param src_ip source IP address
+ */
+err_t
+raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip,
+ struct netif *netif, const ip_addr_t *src_ip)
+{
+ err_t err;
+ struct pbuf *q; /* q will be sent down the stack */
+ u16_t header_size;
+ u8_t ttl;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) ||
+ !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) {
+ return ERR_VAL;
+ }
+
+ header_size = (
+#if LWIP_IPV4 && LWIP_IPV6
+ IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN);
+#elif LWIP_IPV4
+ IP_HLEN);
+#else
+ IP6_HLEN);
+#endif
+
+ /* Handle the HDRINCL option as an exception: none of the code below applies
+ * to this case, and sending the packet needs to be done differently too. */
+ if (pcb->flags & RAW_FLAGS_HDRINCL) {
+ /* A full header *must* be present in the first pbuf of the chain, as the
+ * output routines may access its fields directly. */
+ if (p->len < header_size) {
+ return ERR_VAL;
+ }
+ /* @todo multicast loop support, if at all desired for this scenario.. */
+ NETIF_SET_HINTS(netif, &pcb->netif_hints);
+ err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif);
+ NETIF_RESET_HINTS(netif);
+ return err;
+ }
+
+ /* packet too large to add an IP header without causing an overflow? */
+ if ((u16_t)(p->tot_len + header_size) < p->tot_len) {
+ return ERR_MEM;
+ }
+ /* not enough space to add an IP header to first pbuf in given p chain? */
+ if (pbuf_add_header(p, header_size)) {
+ /* allocate header in new pbuf */
+ q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM);
+ /* new header pbuf could not be allocated? */
+ if (q == NULL) {
+ LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n"));
+ return ERR_MEM;
+ }
+ if (p->tot_len != 0) {
+ /* chain header q in front of given pbuf p */
+ pbuf_chain(q, p);
+ }
+ /* { first pbuf q points to header pbuf } */
+ LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p));
+ } else {
+ /* first pbuf q equals given pbuf */
+ q = p;
+ if (pbuf_remove_header(q, header_size)) {
+ LWIP_ASSERT("Can't restore header we just removed!", 0);
+ return ERR_MEM;
+ }
+ }
+
+#if IP_SOF_BROADCAST
+ if (IP_IS_V4(dst_ip)) {
+ /* broadcast filter? */
+ if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) {
+ LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb));
+ /* free any temporary header pbuf allocated by pbuf_header() */
+ if (q != p) {
+ pbuf_free(q);
+ }
+ return ERR_VAL;
+ }
+ }
+#endif /* IP_SOF_BROADCAST */
+
+ /* Multicast Loop? */
+#if LWIP_MULTICAST_TX_OPTIONS
+ if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) {
+ q->flags |= PBUF_FLAG_MCASTLOOP;
+ }
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+
+#if LWIP_IPV6
+ /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542,
+ compute the checksum and update the checksum in the payload. */
+ if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) {
+ u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip));
+ LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2));
+ SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t));
+ }
+#endif
+
+ /* Determine TTL to use */
+#if LWIP_MULTICAST_TX_OPTIONS
+ ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl);
+#else /* LWIP_MULTICAST_TX_OPTIONS */
+ ttl = pcb->ttl;
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+
+ NETIF_SET_HINTS(netif, &pcb->netif_hints);
+ err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif);
+ NETIF_RESET_HINTS(netif);
+
+ /* did we chain a header earlier? */
+ if (q != p) {
+ /* free the header */
+ pbuf_free(q);
+ }
+ return err;
+}
+
+/**
+ * @ingroup raw_raw
+ * Send the raw IP packet to the address given by raw_connect()
+ *
+ * @param pcb the raw pcb which to send
+ * @param p the IP payload to send
+ *
+ */
+err_t
+raw_send(struct raw_pcb *pcb, struct pbuf *p)
+{
+ return raw_sendto(pcb, p, &pcb->remote_ip);
+}
+
+/**
+ * @ingroup raw_raw
+ * Remove an RAW PCB.
+ *
+ * @param pcb RAW PCB to be removed. The PCB is removed from the list of
+ * RAW PCB's and the data structure is freed from memory.
+ *
+ * @see raw_new()
+ */
+void
+raw_remove(struct raw_pcb *pcb)
+{
+ struct raw_pcb *pcb2;
+ LWIP_ASSERT_CORE_LOCKED();
+ /* pcb to be removed is first in list? */
+ if (raw_pcbs == pcb) {
+ /* make list start at 2nd pcb */
+ raw_pcbs = raw_pcbs->next;
+ /* pcb not 1st in list */
+ } else {
+ for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) {
+ /* find pcb in raw_pcbs list */
+ if (pcb2->next != NULL && pcb2->next == pcb) {
+ /* remove pcb from list */
+ pcb2->next = pcb->next;
+ break;
+ }
+ }
+ }
+ memp_free(MEMP_RAW_PCB, pcb);
+}
+
+/**
+ * @ingroup raw_raw
+ * Create a RAW PCB.
+ *
+ * @return The RAW PCB which was created. NULL if the PCB data structure
+ * could not be allocated.
+ *
+ * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP)
+ *
+ * @see raw_remove()
+ */
+struct raw_pcb *
+raw_new(u8_t proto)
+{
+ struct raw_pcb *pcb;
+
+ LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n"));
+ LWIP_ASSERT_CORE_LOCKED();
+
+ pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB);
+ /* could allocate RAW PCB? */
+ if (pcb != NULL) {
+ /* initialize PCB to all zeroes */
+ memset(pcb, 0, sizeof(struct raw_pcb));
+ pcb->protocol = proto;
+ pcb->ttl = RAW_TTL;
+#if LWIP_MULTICAST_TX_OPTIONS
+ raw_set_multicast_ttl(pcb, RAW_TTL);
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+ pcb->next = raw_pcbs;
+ raw_pcbs = pcb;
+ }
+ return pcb;
+}
+
+/**
+ * @ingroup raw_raw
+ * Create a RAW PCB for specific IP type.
+ *
+ * @return The RAW PCB which was created. NULL if the PCB data structure
+ * could not be allocated.
+ *
+ * @param type IP address type, see @ref lwip_ip_addr_type definitions.
+ * If you want to listen to IPv4 and IPv6 (dual-stack) packets,
+ * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE.
+ * @param proto the protocol number (next header) of the IPv6 packet payload
+ * (e.g. IP6_NEXTH_ICMP6)
+ *
+ * @see raw_remove()
+ */
+struct raw_pcb *
+raw_new_ip_type(u8_t type, u8_t proto)
+{
+ struct raw_pcb *pcb;
+ LWIP_ASSERT_CORE_LOCKED();
+ pcb = raw_new(proto);
+#if LWIP_IPV4 && LWIP_IPV6
+ if (pcb != NULL) {
+ IP_SET_TYPE_VAL(pcb->local_ip, type);
+ IP_SET_TYPE_VAL(pcb->remote_ip, type);
+ }
+#else /* LWIP_IPV4 && LWIP_IPV6 */
+ LWIP_UNUSED_ARG(type);
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ return pcb;
+}
+
+/** This function is called from netif.c when address is changed
+ *
+ * @param old_addr IP address of the netif before change
+ * @param new_addr IP address of the netif after change
+ */
+void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr)
+{
+ struct raw_pcb *rpcb;
+
+ if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) {
+ for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) {
+ /* PCB bound to current local interface address? */
+ if (ip_addr_cmp(&rpcb->local_ip, old_addr)) {
+ /* The PCB is bound to the old ipaddr and
+ * is set to bound to the new one instead */
+ ip_addr_copy(rpcb->local_ip, *new_addr);
+ }
+ }
+ }
+}
+
+#endif /* LWIP_RAW */
diff --git a/lwip/src/core/stats.c b/lwip/src/core/stats.c
new file mode 100644
index 0000000..34e9b27
--- /dev/null
+++ b/lwip/src/core/stats.c
@@ -0,0 +1,169 @@
+/**
+ * @file
+ * Statistics module
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/def.h"
+#include "lwip/stats.h"
+#include "lwip/mem.h"
+#include "lwip/debug.h"
+
+#include <string.h>
+
+struct stats_ lwip_stats;
+
+void
+stats_init(void)
+{
+#ifdef LWIP_DEBUG
+#if MEM_STATS
+ lwip_stats.mem.name = "MEM";
+#endif /* MEM_STATS */
+#endif /* LWIP_DEBUG */
+}
+
+#if LWIP_STATS_DISPLAY
+void
+stats_display_proto(struct stats_proto *proto, const char *name)
+{
+ LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
+ LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit));
+ LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv));
+ LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw));
+ LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop));
+ LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr));
+ LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr));
+ LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr));
+ LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr));
+ LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr));
+ LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr));
+ LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err));
+ LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit));
+}
+
+#if IGMP_STATS || MLD6_STATS
+void
+stats_display_igmp(struct stats_igmp *igmp, const char *name)
+{
+ LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
+ LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit));
+ LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv));
+ LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop));
+ LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr));
+ LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr));
+ LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr));
+ LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr));
+ LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1));
+ LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group));
+ LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general));
+ LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report));
+ LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join));
+ LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave));
+ LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report));
+}
+#endif /* IGMP_STATS || MLD6_STATS */
+
+#if MEM_STATS || MEMP_STATS
+void
+stats_display_mem(struct stats_mem *mem, const char *name)
+{
+ LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name));
+ LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail));
+ LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used));
+ LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max));
+ LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err));
+}
+
+#if MEMP_STATS
+void
+stats_display_memp(struct stats_mem *mem, int idx)
+{
+ if (idx < MEMP_MAX) {
+ stats_display_mem(mem, mem->name);
+ }
+}
+#endif /* MEMP_STATS */
+#endif /* MEM_STATS || MEMP_STATS */
+
+#if SYS_STATS
+void
+stats_display_sys(struct stats_sys *sys)
+{
+ LWIP_PLATFORM_DIAG(("\nSYS\n\t"));
+ LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used));
+ LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max));
+ LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err));
+ LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used));
+ LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max));
+ LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err));
+ LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used));
+ LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max));
+ LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err));
+}
+#endif /* SYS_STATS */
+
+void
+stats_display(void)
+{
+ s16_t i;
+
+ LINK_STATS_DISPLAY();
+ ETHARP_STATS_DISPLAY();
+ IPFRAG_STATS_DISPLAY();
+ IP6_FRAG_STATS_DISPLAY();
+ IP_STATS_DISPLAY();
+ ND6_STATS_DISPLAY();
+ IP6_STATS_DISPLAY();
+ IGMP_STATS_DISPLAY();
+ MLD6_STATS_DISPLAY();
+ ICMP_STATS_DISPLAY();
+ ICMP6_STATS_DISPLAY();
+ UDP_STATS_DISPLAY();
+ TCP_STATS_DISPLAY();
+ MEM_STATS_DISPLAY();
+ for (i = 0; i < MEMP_MAX; i++) {
+ MEMP_STATS_DISPLAY(i);
+ }
+ SYS_STATS_DISPLAY();
+}
+#endif /* LWIP_STATS_DISPLAY */
+
+#endif /* LWIP_STATS */
+
diff --git a/lwip/src/core/sys.c b/lwip/src/core/sys.c
new file mode 100644
index 0000000..5f08352
--- /dev/null
+++ b/lwip/src/core/sys.c
@@ -0,0 +1,148 @@
+/**
+ * @file
+ * lwIP Operating System abstraction
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+/**
+ * @defgroup sys_layer Porting (system abstraction layer)
+ * @ingroup lwip
+ *
+ * @defgroup sys_os OS abstraction layer
+ * @ingroup sys_layer
+ * No need to implement functions in this section in NO_SYS mode.
+ * The OS-specific code should be implemented in arch/sys_arch.h
+ * and sys_arch.c of your port.
+ *
+ * The operating system emulation layer provides a common interface
+ * between the lwIP code and the underlying operating system kernel. The
+ * general idea is that porting lwIP to new architectures requires only
+ * small changes to a few header files and a new sys_arch
+ * implementation. It is also possible to do a sys_arch implementation
+ * that does not rely on any underlying operating system.
+ *
+ * The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full
+ * lwIP functionality, multiple threads support can be implemented in the
+ * sys_arch, but this is not required for the basic lwIP
+ * functionality. Timer scheduling is implemented in lwIP, but can be implemented
+ * by the sys_arch port (LWIP_TIMERS_CUSTOM==1).
+ *
+ * In addition to the source file providing the functionality of sys_arch,
+ * the OS emulation layer must provide several header files defining
+ * macros used throughout lwip. The files required and the macros they
+ * must define are listed below the sys_arch description.
+ *
+ * Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that
+ * allows both using pointers or actual OS structures to be used. This way, memory
+ * required for such types can be either allocated in place (globally or on the
+ * stack) or on the heap (allocated internally in the "*_new()" functions).
+ *
+ * Note:
+ * -----
+ * Be careful with using mem_malloc() in sys_arch. When malloc() refers to
+ * mem_malloc() you can run into a circular function call problem. In mem.c
+ * mem_init() tries to allocate a semaphore using mem_malloc, which of course
+ * can't be performed when sys_arch uses mem_malloc.
+ *
+ * @defgroup sys_sem Semaphores
+ * @ingroup sys_os
+ * Semaphores can be either counting or binary - lwIP works with both
+ * kinds.
+ * Semaphores are represented by the type "sys_sem_t" which is typedef'd
+ * in the sys_arch.h file. Mailboxes are equivalently represented by the
+ * type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t".
+ * lwIP does not place any restrictions on how these types are represented
+ * internally.
+ *
+ * @defgroup sys_mutex Mutexes
+ * @ingroup sys_os
+ * Mutexes are recommended to correctly handle priority inversion,
+ * especially if you use LWIP_CORE_LOCKING .
+ *
+ * @defgroup sys_mbox Mailboxes
+ * @ingroup sys_os
+ * Mailboxes should be implemented as a queue which allows multiple messages
+ * to be posted (implementing as a rendez-vous point where only one message can be
+ * posted at a time can have a highly negative impact on performance). A message
+ * in a mailbox is just a pointer, nothing more.
+ *
+ * @defgroup sys_time Time
+ * @ingroup sys_layer
+ *
+ * @defgroup sys_prot Critical sections
+ * @ingroup sys_layer
+ * Used to protect short regions of code against concurrent access.
+ * - Your system is a bare-metal system (probably with an RTOS)
+ * and interrupts are under your control:
+ * Implement this as LockInterrupts() / UnlockInterrupts()
+ * - Your system uses an RTOS with deferred interrupt handling from a
+ * worker thread: Implement as a global mutex or lock/unlock scheduler
+ * - Your system uses a high-level OS with e.g. POSIX signals:
+ * Implement as a global mutex
+ *
+ * @defgroup sys_misc Misc
+ * @ingroup sys_os
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/sys.h"
+
+/* Most of the functions defined in sys.h must be implemented in the
+ * architecture-dependent file sys_arch.c */
+
+#if !NO_SYS
+
+#ifndef sys_msleep
+/**
+ * Sleep for some ms. Timeouts are NOT processed while sleeping.
+ *
+ * @param ms number of milliseconds to sleep
+ */
+void
+sys_msleep(u32_t ms)
+{
+ if (ms > 0) {
+ sys_sem_t delaysem;
+ err_t err = sys_sem_new(&delaysem, 0);
+ if (err == ERR_OK) {
+ sys_arch_sem_wait(&delaysem, ms);
+ sys_sem_free(&delaysem);
+ }
+ }
+}
+#endif /* sys_msleep */
+
+#endif /* !NO_SYS */
diff --git a/lwip/src/core/tcp.c b/lwip/src/core/tcp.c
new file mode 100644
index 0000000..bd7d64e
--- /dev/null
+++ b/lwip/src/core/tcp.c
@@ -0,0 +1,2686 @@
+/**
+ * @file
+ * Transmission Control Protocol for IP
+ * See also @ref tcp_raw
+ *
+ * @defgroup tcp_raw TCP
+ * @ingroup callbackstyle_api
+ * Transmission Control Protocol for IP\n
+ * @see @ref api
+ *
+ * Common functions for the TCP implementation, such as functions
+ * for manipulating the data structures and the TCP timer functions. TCP functions
+ * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n
+ *
+ * TCP connection setup
+ * --------------------
+ * The functions used for setting up connections is similar to that of
+ * the sequential API and of the BSD socket API. A new TCP connection
+ * identifier (i.e., a protocol control block - PCB) is created with the
+ * tcp_new() function. This PCB can then be either set to listen for new
+ * incoming connections or be explicitly connected to another host.
+ * - tcp_new()
+ * - tcp_bind()
+ * - tcp_listen() and tcp_listen_with_backlog()
+ * - tcp_accept()
+ * - tcp_connect()
+ *
+ * Sending TCP data
+ * ----------------
+ * TCP data is sent by enqueueing the data with a call to tcp_write() and
+ * triggering to send by calling tcp_output(). When the data is successfully
+ * transmitted to the remote host, the application will be notified with a
+ * call to a specified callback function.
+ * - tcp_write()
+ * - tcp_output()
+ * - tcp_sent()
+ *
+ * Receiving TCP data
+ * ------------------
+ * TCP data reception is callback based - an application specified
+ * callback function is called when new data arrives. When the
+ * application has taken the data, it has to call the tcp_recved()
+ * function to indicate that TCP can advertise increase the receive
+ * window.
+ * - tcp_recv()
+ * - tcp_recved()
+ *
+ * Application polling
+ * -------------------
+ * When a connection is idle (i.e., no data is either transmitted or
+ * received), lwIP will repeatedly poll the application by calling a
+ * specified callback function. This can be used either as a watchdog
+ * timer for killing connections that have stayed idle for too long, or
+ * as a method of waiting for memory to become available. For instance,
+ * if a call to tcp_write() has failed because memory wasn't available,
+ * the application may use the polling functionality to call tcp_write()
+ * again when the connection has been idle for a while.
+ * - tcp_poll()
+ *
+ * Closing and aborting connections
+ * --------------------------------
+ * - tcp_close()
+ * - tcp_abort()
+ * - tcp_err()
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/tcp.h"
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/debug.h"
+#include "lwip/stats.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/nd6.h"
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+#ifndef TCP_LOCAL_PORT_RANGE_START
+/* From http://www.iana.org/assignments/port-numbers:
+ "The Dynamic and/or Private Ports are those from 49152 through 65535" */
+#define TCP_LOCAL_PORT_RANGE_START 0xc000
+#define TCP_LOCAL_PORT_RANGE_END 0xffff
+#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START))
+#endif
+
+#if LWIP_TCP_KEEPALIVE
+#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl)
+#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl)
+#else /* LWIP_TCP_KEEPALIVE */
+#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE
+#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT
+#endif /* LWIP_TCP_KEEPALIVE */
+
+/* As initial send MSS, we use TCP_MSS but limit it to 536. */
+#if TCP_MSS > 536
+#define INITIAL_MSS 536
+#else
+#define INITIAL_MSS TCP_MSS
+#endif
+
+static const char *const tcp_state_str[] = {
+ "CLOSED",
+ "LISTEN",
+ "SYN_SENT",
+ "SYN_RCVD",
+ "ESTABLISHED",
+ "FIN_WAIT_1",
+ "FIN_WAIT_2",
+ "CLOSE_WAIT",
+ "CLOSING",
+ "LAST_ACK",
+ "TIME_WAIT"
+};
+
+/* last local TCP port */
+static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START;
+
+/* Incremented every coarse grained timer shot (typically every 500 ms). */
+u32_t tcp_ticks;
+static const u8_t tcp_backoff[13] =
+{ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7};
+/* Times per slowtmr hits */
+static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 };
+
+/* The TCP PCB lists. */
+
+/** List of all TCP PCBs bound but not yet (connected || listening) */
+struct tcp_pcb *tcp_bound_pcbs;
+/** List of all TCP PCBs in LISTEN state */
+union tcp_listen_pcbs_t tcp_listen_pcbs;
+/** List of all TCP PCBs that are in a state in which
+ * they accept or send data. */
+struct tcp_pcb *tcp_active_pcbs;
+/** List of all TCP PCBs in TIME-WAIT state */
+struct tcp_pcb *tcp_tw_pcbs;
+
+/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */
+struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs,
+ &tcp_active_pcbs, &tcp_tw_pcbs
+};
+
+u8_t tcp_active_pcbs_changed;
+
+/** Timer counter to handle calling slow-timer from tcp_tmr() */
+static u8_t tcp_timer;
+static u8_t tcp_timer_ctr;
+static u16_t tcp_new_port(void);
+
+static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb);
+#if LWIP_TCP_PCB_NUM_EXT_ARGS
+static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args);
+#endif
+
+/**
+ * Initialize this module.
+ */
+void
+tcp_init(void)
+{
+#ifdef LWIP_RAND
+ tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND());
+#endif /* LWIP_RAND */
+}
+
+/** Free a tcp pcb */
+void
+tcp_free(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN);
+#if LWIP_TCP_PCB_NUM_EXT_ARGS
+ tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args);
+#endif
+ memp_free(MEMP_TCP_PCB, pcb);
+}
+
+/** Free a tcp listen pcb */
+static void
+tcp_free_listen(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN);
+#if LWIP_TCP_PCB_NUM_EXT_ARGS
+ tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args);
+#endif
+ memp_free(MEMP_TCP_PCB_LISTEN, pcb);
+}
+
+/**
+ * Called periodically to dispatch TCP timers.
+ */
+void
+tcp_tmr(void)
+{
+ /* Call tcp_fasttmr() every 250 ms */
+ tcp_fasttmr();
+
+ if (++tcp_timer & 1) {
+ /* Call tcp_slowtmr() every 500 ms, i.e., every other timer
+ tcp_tmr() is called. */
+ tcp_slowtmr();
+ }
+}
+
+#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
+/** Called when a listen pcb is closed. Iterates one pcb list and removes the
+ * closed listener pcb from pcb->listener if matching.
+ */
+static void
+tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb)
+{
+ struct tcp_pcb *pcb;
+
+ LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL);
+
+ for (pcb = list; pcb != NULL; pcb = pcb->next) {
+ if (pcb->listener == lpcb) {
+ pcb->listener = NULL;
+ }
+ }
+}
+#endif
+
+/** Called when a listen pcb is closed. Iterates all pcb lists and removes the
+ * closed listener pcb from pcb->listener if matching.
+ */
+static void
+tcp_listen_closed(struct tcp_pcb *pcb)
+{
+#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
+ size_t i;
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+ LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN);
+ for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) {
+ tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb);
+ }
+#endif
+ LWIP_UNUSED_ARG(pcb);
+}
+
+#if TCP_LISTEN_BACKLOG
+/** @ingroup tcp_raw
+ * Delay accepting a connection in respect to the listen backlog:
+ * the number of outstanding connections is increased until
+ * tcp_backlog_accepted() is called.
+ *
+ * ATTENTION: the caller is responsible for calling tcp_backlog_accepted()
+ * or else the backlog feature will get out of sync!
+ *
+ * @param pcb the connection pcb which is not fully accepted yet
+ */
+void
+tcp_backlog_delayed(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+ LWIP_ASSERT_CORE_LOCKED();
+ if ((pcb->flags & TF_BACKLOGPEND) == 0) {
+ if (pcb->listener != NULL) {
+ pcb->listener->accepts_pending++;
+ LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0);
+ tcp_set_flags(pcb, TF_BACKLOGPEND);
+ }
+ }
+}
+
+/** @ingroup tcp_raw
+ * A delayed-accept a connection is accepted (or closed/aborted): decreases
+ * the number of outstanding connections after calling tcp_backlog_delayed().
+ *
+ * ATTENTION: the caller is responsible for calling tcp_backlog_accepted()
+ * or else the backlog feature will get out of sync!
+ *
+ * @param pcb the connection pcb which is now fully accepted (or closed/aborted)
+ */
+void
+tcp_backlog_accepted(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+ LWIP_ASSERT_CORE_LOCKED();
+ if ((pcb->flags & TF_BACKLOGPEND) != 0) {
+ if (pcb->listener != NULL) {
+ LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0);
+ pcb->listener->accepts_pending--;
+ tcp_clear_flags(pcb, TF_BACKLOGPEND);
+ }
+ }
+}
+#endif /* TCP_LISTEN_BACKLOG */
+
+/**
+ * Closes the TX side of a connection held by the PCB.
+ * For tcp_close(), a RST is sent if the application didn't receive all data
+ * (tcp_recved() not called for all data passed to recv callback).
+ *
+ * Listening pcbs are freed and may not be referenced any more.
+ * Connection pcbs are freed if not yet connected and may not be referenced
+ * any more. If a connection is established (at least SYN received or in
+ * a closing state), the connection is closed, and put in a closing state.
+ * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
+ * unsafe to reference it.
+ *
+ * @param pcb the tcp_pcb to close
+ * @return ERR_OK if connection has been closed
+ * another err_t if closing failed and pcb is not freed
+ */
+static err_t
+tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data)
+{
+ LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL);
+
+ if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) {
+ if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) {
+ /* Not all data received by application, send RST to tell the remote
+ side about this. */
+ LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED);
+
+ /* don't call tcp_abort here: we must not deallocate the pcb since
+ that might not be expected when calling tcp_close */
+ tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
+ pcb->local_port, pcb->remote_port);
+
+ tcp_pcb_purge(pcb);
+ TCP_RMV_ACTIVE(pcb);
+ /* Deallocate the pcb since we already sent a RST for it */
+ if (tcp_input_pcb == pcb) {
+ /* prevent using a deallocated pcb: free it from tcp_input later */
+ tcp_trigger_input_pcb_close();
+ } else {
+ tcp_free(pcb);
+ }
+ return ERR_OK;
+ }
+ }
+
+ /* - states which free the pcb are handled here,
+ - states which send FIN and change state are handled in tcp_close_shutdown_fin() */
+ switch (pcb->state) {
+ case CLOSED:
+ /* Closing a pcb in the CLOSED state might seem erroneous,
+ * however, it is in this state once allocated and as yet unused
+ * and the user needs some way to free it should the need arise.
+ * Calling tcp_close() with a pcb that has already been closed, (i.e. twice)
+ * or for a pcb that has been used and then entered the CLOSED state
+ * is erroneous, but this should never happen as the pcb has in those cases
+ * been freed, and so any remaining handles are bogus. */
+ if (pcb->local_port != 0) {
+ TCP_RMV(&tcp_bound_pcbs, pcb);
+ }
+ tcp_free(pcb);
+ break;
+ case LISTEN:
+ tcp_listen_closed(pcb);
+ tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb);
+ tcp_free_listen(pcb);
+ break;
+ case SYN_SENT:
+ TCP_PCB_REMOVE_ACTIVE(pcb);
+ tcp_free(pcb);
+ MIB2_STATS_INC(mib2.tcpattemptfails);
+ break;
+ default:
+ return tcp_close_shutdown_fin(pcb);
+ }
+ return ERR_OK;
+}
+
+static err_t
+tcp_close_shutdown_fin(struct tcp_pcb *pcb)
+{
+ err_t err;
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+
+ switch (pcb->state) {
+ case SYN_RCVD:
+ err = tcp_send_fin(pcb);
+ if (err == ERR_OK) {
+ tcp_backlog_accepted(pcb);
+ MIB2_STATS_INC(mib2.tcpattemptfails);
+ pcb->state = FIN_WAIT_1;
+ }
+ break;
+ case ESTABLISHED:
+ err = tcp_send_fin(pcb);
+ if (err == ERR_OK) {
+ MIB2_STATS_INC(mib2.tcpestabresets);
+ pcb->state = FIN_WAIT_1;
+ }
+ break;
+ case CLOSE_WAIT:
+ err = tcp_send_fin(pcb);
+ if (err == ERR_OK) {
+ MIB2_STATS_INC(mib2.tcpestabresets);
+ pcb->state = LAST_ACK;
+ }
+ break;
+ default:
+ /* Has already been closed, do nothing. */
+ return ERR_OK;
+ }
+
+ if (err == ERR_OK) {
+ /* To ensure all data has been sent when tcp_close returns, we have
+ to make sure tcp_output doesn't fail.
+ Since we don't really have to ensure all data has been sent when tcp_close
+ returns (unsent data is sent from tcp timer functions, also), we don't care
+ for the return value of tcp_output for now. */
+ tcp_output(pcb);
+ } else if (err == ERR_MEM) {
+ /* Mark this pcb for closing. Closing is retried from tcp_tmr. */
+ tcp_set_flags(pcb, TF_CLOSEPEND);
+ /* We have to return ERR_OK from here to indicate to the callers that this
+ pcb should not be used any more as it will be freed soon via tcp_tmr.
+ This is OK here since sending FIN does not guarantee a time frime for
+ actually freeing the pcb, either (it is left in closure states for
+ remote ACK or timeout) */
+ return ERR_OK;
+ }
+ return err;
+}
+
+/**
+ * @ingroup tcp_raw
+ * Closes the connection held by the PCB.
+ *
+ * Listening pcbs are freed and may not be referenced any more.
+ * Connection pcbs are freed if not yet connected and may not be referenced
+ * any more. If a connection is established (at least SYN received or in
+ * a closing state), the connection is closed, and put in a closing state.
+ * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
+ * unsafe to reference it (unless an error is returned).
+ *
+ * The function may return ERR_MEM if no memory
+ * was available for closing the connection. If so, the application
+ * should wait and try again either by using the acknowledgment
+ * callback or the polling functionality. If the close succeeds, the
+ * function returns ERR_OK.
+ *
+ * @param pcb the tcp_pcb to close
+ * @return ERR_OK if connection has been closed
+ * another err_t if closing failed and pcb is not freed
+ */
+err_t
+tcp_close(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in "));
+
+ tcp_debug_print_state(pcb->state);
+
+ if (pcb->state != LISTEN) {
+ /* Set a flag not to receive any more data... */
+ tcp_set_flags(pcb, TF_RXCLOSED);
+ }
+ /* ... and close */
+ return tcp_close_shutdown(pcb, 1);
+}
+
+/**
+ * @ingroup tcp_raw
+ * Causes all or part of a full-duplex connection of this PCB to be shut down.
+ * This doesn't deallocate the PCB unless shutting down both sides!
+ * Shutting down both sides is the same as calling tcp_close, so if it succeds
+ * (i.e. returns ER_OK), the PCB must not be referenced any more!
+ *
+ * @param pcb PCB to shutdown
+ * @param shut_rx shut down receive side if this is != 0
+ * @param shut_tx shut down send side if this is != 0
+ * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down)
+ * another err_t on error.
+ */
+err_t
+tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG);
+
+ if (pcb->state == LISTEN) {
+ return ERR_CONN;
+ }
+ if (shut_rx) {
+ /* shut down the receive side: set a flag not to receive any more data... */
+ tcp_set_flags(pcb, TF_RXCLOSED);
+ if (shut_tx) {
+ /* shutting down the tx AND rx side is the same as closing for the raw API */
+ return tcp_close_shutdown(pcb, 1);
+ }
+ /* ... and free buffered data */
+ if (pcb->refused_data != NULL) {
+ pbuf_free(pcb->refused_data);
+ pcb->refused_data = NULL;
+ }
+ }
+ if (shut_tx) {
+ /* This can't happen twice since if it succeeds, the pcb's state is changed.
+ Only close in these states as the others directly deallocate the PCB */
+ switch (pcb->state) {
+ case SYN_RCVD:
+ case ESTABLISHED:
+ case CLOSE_WAIT:
+ return tcp_close_shutdown(pcb, (u8_t)shut_rx);
+ default:
+ /* Not (yet?) connected, cannot shutdown the TX side as that would bring us
+ into CLOSED state, where the PCB is deallocated. */
+ return ERR_CONN;
+ }
+ }
+ return ERR_OK;
+}
+
+/**
+ * Abandons a connection and optionally sends a RST to the remote
+ * host. Deletes the local protocol control block. This is done when
+ * a connection is killed because of shortage of memory.
+ *
+ * @param pcb the tcp_pcb to abort
+ * @param reset boolean to indicate whether a reset should be sent
+ */
+void
+tcp_abandon(struct tcp_pcb *pcb, int reset)
+{
+ u32_t seqno, ackno;
+#if LWIP_CALLBACK_API
+ tcp_err_fn errf;
+#endif /* LWIP_CALLBACK_API */
+ void *errf_arg;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return);
+
+ /* pcb->state LISTEN not allowed here */
+ LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs",
+ pcb->state != LISTEN);
+ /* Figure out on which TCP PCB list we are, and remove us. If we
+ are in an active state, call the receive function associated with
+ the PCB with a NULL argument, and send an RST to the remote end. */
+ if (pcb->state == TIME_WAIT) {
+ tcp_pcb_remove(&tcp_tw_pcbs, pcb);
+ tcp_free(pcb);
+ } else {
+ int send_rst = 0;
+ u16_t local_port = 0;
+ enum tcp_state last_state;
+ seqno = pcb->snd_nxt;
+ ackno = pcb->rcv_nxt;
+#if LWIP_CALLBACK_API
+ errf = pcb->errf;
+#endif /* LWIP_CALLBACK_API */
+ errf_arg = pcb->callback_arg;
+ if (pcb->state == CLOSED) {
+ if (pcb->local_port != 0) {
+ /* bound, not yet opened */
+ TCP_RMV(&tcp_bound_pcbs, pcb);
+ }
+ } else {
+ send_rst = reset;
+ local_port = pcb->local_port;
+ TCP_PCB_REMOVE_ACTIVE(pcb);
+ }
+ if (pcb->unacked != NULL) {
+ tcp_segs_free(pcb->unacked);
+ }
+ if (pcb->unsent != NULL) {
+ tcp_segs_free(pcb->unsent);
+ }
+#if TCP_QUEUE_OOSEQ
+ if (pcb->ooseq != NULL) {
+ tcp_segs_free(pcb->ooseq);
+ }
+#endif /* TCP_QUEUE_OOSEQ */
+ tcp_backlog_accepted(pcb);
+ if (send_rst) {
+ LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n"));
+ tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port);
+ }
+ last_state = pcb->state;
+ tcp_free(pcb);
+ TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT);
+ }
+}
+
+/**
+ * @ingroup tcp_raw
+ * Aborts the connection by sending a RST (reset) segment to the remote
+ * host. The pcb is deallocated. This function never fails.
+ *
+ * ATTENTION: When calling this from one of the TCP callbacks, make
+ * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise
+ * or you will risk accessing deallocated memory or memory leaks!
+ *
+ * @param pcb the tcp pcb to abort
+ */
+void
+tcp_abort(struct tcp_pcb *pcb)
+{
+ tcp_abandon(pcb, 1);
+}
+
+/**
+ * @ingroup tcp_raw
+ * Binds the connection to a local port number and IP address. If the
+ * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is
+ * bound to all local IP addresses.
+ * If another connection is bound to the same port, the function will
+ * return ERR_USE, otherwise ERR_OK is returned.
+ *
+ * @param pcb the tcp_pcb to bind (no check is done whether this pcb is
+ * already bound!)
+ * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind
+ * to any local address
+ * @param port the local port to bind to
+ * @return ERR_USE if the port is already in use
+ * ERR_VAL if bind failed because the PCB is not in a valid state
+ * ERR_OK if bound
+ */
+err_t
+tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port)
+{
+ int i;
+ int max_pcb_list = NUM_TCP_PCB_LISTS;
+ struct tcp_pcb *cpcb;
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ ip_addr_t zoned_ipaddr;
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_IPV4
+ /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
+ if (ipaddr == NULL) {
+ ipaddr = IP4_ADDR_ANY;
+ }
+#else /* LWIP_IPV4 */
+ LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG);
+#endif /* LWIP_IPV4 */
+
+ LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG);
+
+ LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL);
+
+#if SO_REUSE
+ /* Unless the REUSEADDR flag is set,
+ we have to check the pcbs in TIME-WAIT state, also.
+ We do not dump TIME_WAIT pcb's; they can still be matched by incoming
+ packets using both local and remote IP addresses and ports to distinguish.
+ */
+ if (ip_get_option(pcb, SOF_REUSEADDR)) {
+ max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT;
+ }
+#endif /* SO_REUSE */
+
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ /* If the given IP address should have a zone but doesn't, assign one now.
+ * This is legacy support: scope-aware callers should always provide properly
+ * zoned source addresses. Do the zone selection before the address-in-use
+ * check below; as such we have to make a temporary copy of the address. */
+ if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) {
+ ip_addr_copy(zoned_ipaddr, *ipaddr);
+ ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr));
+ ipaddr = &zoned_ipaddr;
+ }
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+
+ if (port == 0) {
+ port = tcp_new_port();
+ if (port == 0) {
+ return ERR_BUF;
+ }
+ } else {
+ /* Check if the address already is in use (on all lists) */
+ for (i = 0; i < max_pcb_list; i++) {
+ for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
+ if (cpcb->local_port == port) {
+#if SO_REUSE
+ /* Omit checking for the same port if both pcbs have REUSEADDR set.
+ For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in
+ tcp_connect. */
+ if (!ip_get_option(pcb, SOF_REUSEADDR) ||
+ !ip_get_option(cpcb, SOF_REUSEADDR))
+#endif /* SO_REUSE */
+ {
+ /* @todo: check accept_any_ip_version */
+ if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) &&
+ (ip_addr_isany(&cpcb->local_ip) ||
+ ip_addr_isany(ipaddr) ||
+ ip_addr_cmp(&cpcb->local_ip, ipaddr))) {
+ return ERR_USE;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (!ip_addr_isany(ipaddr)
+#if LWIP_IPV4 && LWIP_IPV6
+ || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip))
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ ) {
+ ip_addr_set(&pcb->local_ip, ipaddr);
+ }
+ pcb->local_port = port;
+ TCP_REG(&tcp_bound_pcbs, pcb);
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port));
+ return ERR_OK;
+}
+
+/**
+ * @ingroup tcp_raw
+ * Binds the connection to a netif and IP address.
+ * After calling this function, all packets received via this PCB
+ * are guaranteed to have come in via the specified netif, and all
+ * outgoing packets will go out via the specified netif.
+ *
+ * @param pcb the tcp_pcb to bind.
+ * @param netif the netif to bind to. Can be NULL.
+ */
+void
+tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if (netif != NULL) {
+ pcb->netif_idx = netif_get_index(netif);
+ } else {
+ pcb->netif_idx = NETIF_NO_INDEX;
+ }
+}
+
+#if LWIP_CALLBACK_API
+/**
+ * Default accept callback if no accept callback is specified by the user.
+ */
+static err_t
+tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err)
+{
+ LWIP_UNUSED_ARG(arg);
+ LWIP_UNUSED_ARG(err);
+
+ LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL);
+
+ tcp_abort(pcb);
+
+ return ERR_ABRT;
+}
+#endif /* LWIP_CALLBACK_API */
+
+/**
+ * @ingroup tcp_raw
+ * Set the state of the connection to be LISTEN, which means that it
+ * is able to accept incoming connections. The protocol control block
+ * is reallocated in order to consume less memory. Setting the
+ * connection to LISTEN is an irreversible process.
+ * When an incoming connection is accepted, the function specified with
+ * the tcp_accept() function will be called. The pcb has to be bound
+ * to a local port with the tcp_bind() function.
+ *
+ * The tcp_listen() function returns a new connection identifier, and
+ * the one passed as an argument to the function will be
+ * deallocated. The reason for this behavior is that less memory is
+ * needed for a connection that is listening, so tcp_listen() will
+ * reclaim the memory needed for the original connection and allocate a
+ * new smaller memory block for the listening connection.
+ *
+ * tcp_listen() may return NULL if no memory was available for the
+ * listening connection. If so, the memory associated with the pcb
+ * passed as an argument to tcp_listen() will not be deallocated.
+ *
+ * The backlog limits the number of outstanding connections
+ * in the listen queue to the value specified by the backlog argument.
+ * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h.
+ *
+ * @param pcb the original tcp_pcb
+ * @param backlog the incoming connections queue limit
+ * @return tcp_pcb used for listening, consumes less memory.
+ *
+ * @note The original tcp_pcb is freed. This function therefore has to be
+ * called like this:
+ * tpcb = tcp_listen_with_backlog(tpcb, backlog);
+ */
+struct tcp_pcb *
+tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ return tcp_listen_with_backlog_and_err(pcb, backlog, NULL);
+}
+
+/**
+ * @ingroup tcp_raw
+ * Set the state of the connection to be LISTEN, which means that it
+ * is able to accept incoming connections. The protocol control block
+ * is reallocated in order to consume less memory. Setting the
+ * connection to LISTEN is an irreversible process.
+ *
+ * @param pcb the original tcp_pcb
+ * @param backlog the incoming connections queue limit
+ * @param err when NULL is returned, this contains the error reason
+ * @return tcp_pcb used for listening, consumes less memory.
+ *
+ * @note The original tcp_pcb is freed. This function therefore has to be
+ * called like this:
+ * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err);
+ */
+struct tcp_pcb *
+tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err)
+{
+ struct tcp_pcb_listen *lpcb = NULL;
+ err_t res;
+
+ LWIP_UNUSED_ARG(backlog);
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done);
+ LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done);
+
+ /* already listening? */
+ if (pcb->state == LISTEN) {
+ lpcb = (struct tcp_pcb_listen *)pcb;
+ res = ERR_ALREADY;
+ goto done;
+ }
+#if SO_REUSE
+ if (ip_get_option(pcb, SOF_REUSEADDR)) {
+ /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage
+ is declared (listen-/connection-pcb), we have to make sure now that
+ this port is only used once for every local IP. */
+ for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
+ if ((lpcb->local_port == pcb->local_port) &&
+ ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) {
+ /* this address/port is already used */
+ lpcb = NULL;
+ res = ERR_USE;
+ goto done;
+ }
+ }
+ }
+#endif /* SO_REUSE */
+ lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN);
+ if (lpcb == NULL) {
+ res = ERR_MEM;
+ goto done;
+ }
+ lpcb->callback_arg = pcb->callback_arg;
+ lpcb->local_port = pcb->local_port;
+ lpcb->state = LISTEN;
+ lpcb->prio = pcb->prio;
+ lpcb->so_options = pcb->so_options;
+ lpcb->netif_idx = NETIF_NO_INDEX;
+ lpcb->ttl = pcb->ttl;
+ lpcb->tos = pcb->tos;
+#if LWIP_IPV4 && LWIP_IPV6
+ IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type);
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ ip_addr_copy(lpcb->local_ip, pcb->local_ip);
+ if (pcb->local_port != 0) {
+ TCP_RMV(&tcp_bound_pcbs, pcb);
+ }
+#if LWIP_TCP_PCB_NUM_EXT_ARGS
+ /* copy over ext_args to listening pcb */
+ memcpy(&lpcb->ext_args, &pcb->ext_args, sizeof(pcb->ext_args));
+#endif
+ tcp_free(pcb);
+#if LWIP_CALLBACK_API
+ lpcb->accept = tcp_accept_null;
+#endif /* LWIP_CALLBACK_API */
+#if TCP_LISTEN_BACKLOG
+ lpcb->accepts_pending = 0;
+ tcp_backlog_set(lpcb, backlog);
+#endif /* TCP_LISTEN_BACKLOG */
+ TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb);
+ res = ERR_OK;
+done:
+ if (err != NULL) {
+ *err = res;
+ }
+ return (struct tcp_pcb *)lpcb;
+}
+
+/**
+ * Update the state that tracks the available window space to advertise.
+ *
+ * Returns how much extra window would be advertised if we sent an
+ * update now.
+ */
+u32_t
+tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb)
+{
+ u32_t new_right_edge;
+
+ LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL);
+ new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd;
+
+ if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) {
+ /* we can advertise more window */
+ pcb->rcv_ann_wnd = pcb->rcv_wnd;
+ return new_right_edge - pcb->rcv_ann_right_edge;
+ } else {
+ if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) {
+ /* Can happen due to other end sending out of advertised window,
+ * but within actual available (but not yet advertised) window */
+ pcb->rcv_ann_wnd = 0;
+ } else {
+ /* keep the right edge of window constant */
+ u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt;
+#if !LWIP_WND_SCALE
+ LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff);
+#endif
+ pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd;
+ }
+ return 0;
+ }
+}
+
+/**
+ * @ingroup tcp_raw
+ * This function should be called by the application when it has
+ * processed the data. The purpose is to advertise a larger window
+ * when the data has been processed.
+ *
+ * @param pcb the tcp_pcb for which data is read
+ * @param len the amount of bytes that have been read by the application
+ */
+void
+tcp_recved(struct tcp_pcb *pcb, u16_t len)
+{
+ u32_t wnd_inflation;
+ tcpwnd_size_t rcv_wnd;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return);
+
+ /* pcb->state LISTEN not allowed here */
+ LWIP_ASSERT("don't call tcp_recved for listen-pcbs",
+ pcb->state != LISTEN);
+
+ rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len);
+ if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) {
+ /* window got too big or tcpwnd_size_t overflow */
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n"));
+ pcb->rcv_wnd = TCP_WND_MAX(pcb);
+ } else {
+ pcb->rcv_wnd = rcv_wnd;
+ }
+
+ wnd_inflation = tcp_update_rcv_ann_wnd(pcb);
+
+ /* If the change in the right edge of window is significant (default
+ * watermark is TCP_WND/4), then send an explicit update now.
+ * Otherwise wait for a packet to be sent in the normal course of
+ * events (or more window to be available later) */
+ if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) {
+ tcp_ack_now(pcb);
+ tcp_output(pcb);
+ }
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n",
+ len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd)));
+}
+
+/**
+ * Allocate a new local TCP port.
+ *
+ * @return a new (free) local TCP port number
+ */
+static u16_t
+tcp_new_port(void)
+{
+ u8_t i;
+ u16_t n = 0;
+ struct tcp_pcb *pcb;
+
+again:
+ tcp_port++;
+ if (tcp_port == TCP_LOCAL_PORT_RANGE_END) {
+ tcp_port = TCP_LOCAL_PORT_RANGE_START;
+ }
+ /* Check all PCB lists. */
+ for (i = 0; i < NUM_TCP_PCB_LISTS; i++) {
+ for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) {
+ if (pcb->local_port == tcp_port) {
+ n++;
+ if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) {
+ return 0;
+ }
+ goto again;
+ }
+ }
+ }
+ return tcp_port;
+}
+
+/**
+ * @ingroup tcp_raw
+ * Connects to another host. The function given as the "connected"
+ * argument will be called when the connection has been established.
+ * Sets up the pcb to connect to the remote host and sends the
+ * initial SYN segment which opens the connection.
+ *
+ * The tcp_connect() function returns immediately; it does not wait for
+ * the connection to be properly setup. Instead, it will call the
+ * function specified as the fourth argument (the "connected" argument)
+ * when the connection is established. If the connection could not be
+ * properly established, either because the other host refused the
+ * connection or because the other host didn't answer, the "err"
+ * callback function of this pcb (registered with tcp_err, see below)
+ * will be called.
+ *
+ * The tcp_connect() function can return ERR_MEM if no memory is
+ * available for enqueueing the SYN segment. If the SYN indeed was
+ * enqueued successfully, the tcp_connect() function returns ERR_OK.
+ *
+ * @param pcb the tcp_pcb used to establish the connection
+ * @param ipaddr the remote ip address to connect to
+ * @param port the remote tcp port to connect to
+ * @param connected callback function to call when connected (on error,
+ the err calback will be called)
+ * @return ERR_VAL if invalid arguments are given
+ * ERR_OK if connect request has been sent
+ * other err_t values if connect request couldn't be sent
+ */
+err_t
+tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port,
+ tcp_connected_fn connected)
+{
+ struct netif *netif = NULL;
+ err_t ret;
+ u32_t iss;
+ u16_t old_local_port;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG);
+
+ LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN);
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port));
+ ip_addr_set(&pcb->remote_ip, ipaddr);
+ pcb->remote_port = port;
+
+ if (pcb->netif_idx != NETIF_NO_INDEX) {
+ netif = netif_get_by_index(pcb->netif_idx);
+ } else {
+ /* check if we have a route to the remote host */
+ netif = ip_route(&pcb->local_ip, &pcb->remote_ip);
+ }
+ if (netif == NULL) {
+ /* Don't even try to send a SYN packet if we have no route since that will fail. */
+ return ERR_RTE;
+ }
+
+ /* check if local IP has been assigned to pcb, if not, get one */
+ if (ip_addr_isany(&pcb->local_ip)) {
+ const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, ipaddr);
+ if (local_ip == NULL) {
+ return ERR_RTE;
+ }
+ ip_addr_copy(pcb->local_ip, *local_ip);
+ }
+
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ /* If the given IP address should have a zone but doesn't, assign one now.
+ * Given that we already have the target netif, this is easy and cheap. */
+ if (IP_IS_V6(&pcb->remote_ip) &&
+ ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) {
+ ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif);
+ }
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+
+ old_local_port = pcb->local_port;
+ if (pcb->local_port == 0) {
+ pcb->local_port = tcp_new_port();
+ if (pcb->local_port == 0) {
+ return ERR_BUF;
+ }
+ } else {
+#if SO_REUSE
+ if (ip_get_option(pcb, SOF_REUSEADDR)) {
+ /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure
+ now that the 5-tuple is unique. */
+ struct tcp_pcb *cpcb;
+ int i;
+ /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */
+ for (i = 2; i < NUM_TCP_PCB_LISTS; i++) {
+ for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
+ if ((cpcb->local_port == pcb->local_port) &&
+ (cpcb->remote_port == port) &&
+ ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) &&
+ ip_addr_cmp(&cpcb->remote_ip, ipaddr)) {
+ /* linux returns EISCONN here, but ERR_USE should be OK for us */
+ return ERR_USE;
+ }
+ }
+ }
+ }
+#endif /* SO_REUSE */
+ }
+
+ iss = tcp_next_iss(pcb);
+ pcb->rcv_nxt = 0;
+ pcb->snd_nxt = iss;
+ pcb->lastack = iss - 1;
+ pcb->snd_wl2 = iss - 1;
+ pcb->snd_lbb = iss - 1;
+ /* Start with a window that does not need scaling. When window scaling is
+ enabled and used, the window is enlarged when both sides agree on scaling. */
+ pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND);
+ pcb->rcv_ann_right_edge = pcb->rcv_nxt;
+ pcb->snd_wnd = TCP_WND;
+ /* As initial send MSS, we use TCP_MSS but limit it to 536.
+ The send MSS is updated when an MSS option is received. */
+ pcb->mss = INITIAL_MSS;
+#if TCP_CALCULATE_EFF_SEND_MSS
+ pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip);
+#endif /* TCP_CALCULATE_EFF_SEND_MSS */
+ pcb->cwnd = 1;
+#if LWIP_CALLBACK_API
+ pcb->connected = connected;
+#else /* LWIP_CALLBACK_API */
+ LWIP_UNUSED_ARG(connected);
+#endif /* LWIP_CALLBACK_API */
+
+ /* Send a SYN together with the MSS option. */
+ ret = tcp_enqueue_flags(pcb, TCP_SYN);
+ if (ret == ERR_OK) {
+ /* SYN segment was enqueued, changed the pcbs state now */
+ pcb->state = SYN_SENT;
+ if (old_local_port != 0) {
+ TCP_RMV(&tcp_bound_pcbs, pcb);
+ }
+ TCP_REG_ACTIVE(pcb);
+ MIB2_STATS_INC(mib2.tcpactiveopens);
+
+ tcp_output(pcb);
+ }
+ return ret;
+}
+
+/**
+ * Called every 500 ms and implements the retransmission timer and the timer that
+ * removes PCBs that have been in TIME-WAIT for enough time. It also increments
+ * various timers such as the inactivity timer in each PCB.
+ *
+ * Automatically called from tcp_tmr().
+ */
+void
+tcp_slowtmr(void)
+{
+ struct tcp_pcb *pcb, *prev;
+ tcpwnd_size_t eff_wnd;
+ u8_t pcb_remove; /* flag if a PCB should be removed */
+ u8_t pcb_reset; /* flag if a RST should be sent when removing */
+ err_t err;
+
+ err = ERR_OK;
+
+ ++tcp_ticks;
+ ++tcp_timer_ctr;
+
+tcp_slowtmr_start:
+ /* Steps through all of the active PCBs. */
+ prev = NULL;
+ pcb = tcp_active_pcbs;
+ if (pcb == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n"));
+ }
+ while (pcb != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n"));
+ LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED);
+ LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN);
+ LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT);
+ if (pcb->last_timer == tcp_timer_ctr) {
+ /* skip this pcb, we have already processed it */
+ prev = pcb;
+ pcb = pcb->next;
+ continue;
+ }
+ pcb->last_timer = tcp_timer_ctr;
+
+ pcb_remove = 0;
+ pcb_reset = 0;
+
+ if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) {
+ ++pcb_remove;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n"));
+ } else if (pcb->nrtx >= TCP_MAXRTX) {
+ ++pcb_remove;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n"));
+ } else {
+ if (pcb->persist_backoff > 0) {
+ LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL);
+ LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL);
+ if (pcb->persist_probe >= TCP_MAXRTX) {
+ ++pcb_remove; /* max probes reached */
+ } else {
+ u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1];
+ if (pcb->persist_cnt < backoff_cnt) {
+ pcb->persist_cnt++;
+ }
+ if (pcb->persist_cnt >= backoff_cnt) {
+ int next_slot = 1; /* increment timer to next slot */
+ /* If snd_wnd is zero, send 1 byte probes */
+ if (pcb->snd_wnd == 0) {
+ if (tcp_zero_window_probe(pcb) != ERR_OK) {
+ next_slot = 0; /* try probe again with current slot */
+ }
+ /* snd_wnd not fully closed, split unsent head and fill window */
+ } else {
+ if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) {
+ if (tcp_output(pcb) == ERR_OK) {
+ /* sending will cancel persist timer, else retry with current slot */
+ next_slot = 0;
+ }
+ }
+ }
+ if (next_slot) {
+ pcb->persist_cnt = 0;
+ if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) {
+ pcb->persist_backoff++;
+ }
+ }
+ }
+ }
+ } else {
+ /* Increase the retransmission timer if it is running */
+ if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) {
+ ++pcb->rtime;
+ }
+
+ if (pcb->rtime >= pcb->rto) {
+ /* Time for a retransmission. */
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F
+ " pcb->rto %"S16_F"\n",
+ pcb->rtime, pcb->rto));
+ /* If prepare phase fails but we have unsent data but no unacked data,
+ still execute the backoff calculations below, as this means we somehow
+ failed to send segment. */
+ if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) {
+ /* Double retransmission time-out unless we are trying to
+ * connect to somebody (i.e., we are in SYN_SENT). */
+ if (pcb->state != SYN_SENT) {
+ u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1);
+ int calc_rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx];
+ pcb->rto = (s16_t)LWIP_MIN(calc_rto, 0x7FFF);
+ }
+
+ /* Reset the retransmission timer. */
+ pcb->rtime = 0;
+
+ /* Reduce congestion window and ssthresh. */
+ eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd);
+ pcb->ssthresh = eff_wnd >> 1;
+ if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) {
+ pcb->ssthresh = (tcpwnd_size_t)(pcb->mss << 1);
+ }
+ pcb->cwnd = pcb->mss;
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F
+ " ssthresh %"TCPWNDSIZE_F"\n",
+ pcb->cwnd, pcb->ssthresh));
+ pcb->bytes_acked = 0;
+
+ /* The following needs to be called AFTER cwnd is set to one
+ mss - STJ */
+ tcp_rexmit_rto_commit(pcb);
+ }
+ }
+ }
+ }
+ /* Check if this PCB has stayed too long in FIN-WAIT-2 */
+ if (pcb->state == FIN_WAIT_2) {
+ /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */
+ if (pcb->flags & TF_RXCLOSED) {
+ /* PCB was fully closed (either through close() or SHUT_RDWR):
+ normal FIN-WAIT timeout handling. */
+ if ((u32_t)(tcp_ticks - pcb->tmr) >
+ TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) {
+ ++pcb_remove;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n"));
+ }
+ }
+ }
+
+ /* Check if KEEPALIVE should be sent */
+ if (ip_get_option(pcb, SOF_KEEPALIVE) &&
+ ((pcb->state == ESTABLISHED) ||
+ (pcb->state == CLOSE_WAIT))) {
+ if ((u32_t)(tcp_ticks - pcb->tmr) >
+ (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to "));
+ ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip);
+ LWIP_DEBUGF(TCP_DEBUG, ("\n"));
+
+ ++pcb_remove;
+ ++pcb_reset;
+ } else if ((u32_t)(tcp_ticks - pcb->tmr) >
+ (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb))
+ / TCP_SLOW_INTERVAL) {
+ err = tcp_keepalive(pcb);
+ if (err == ERR_OK) {
+ pcb->keep_cnt_sent++;
+ }
+ }
+ }
+
+ /* If this PCB has queued out of sequence data, but has been
+ inactive for too long, will drop the data (it will eventually
+ be retransmitted). */
+#if TCP_QUEUE_OOSEQ
+ if (pcb->ooseq != NULL &&
+ (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) {
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n"));
+ tcp_free_ooseq(pcb);
+ }
+#endif /* TCP_QUEUE_OOSEQ */
+
+ /* Check if this PCB has stayed too long in SYN-RCVD */
+ if (pcb->state == SYN_RCVD) {
+ if ((u32_t)(tcp_ticks - pcb->tmr) >
+ TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) {
+ ++pcb_remove;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n"));
+ }
+ }
+
+ /* Check if this PCB has stayed too long in LAST-ACK */
+ if (pcb->state == LAST_ACK) {
+ if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
+ ++pcb_remove;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n"));
+ }
+ }
+
+ /* If the PCB should be removed, do it. */
+ if (pcb_remove) {
+ struct tcp_pcb *pcb2;
+#if LWIP_CALLBACK_API
+ tcp_err_fn err_fn = pcb->errf;
+#endif /* LWIP_CALLBACK_API */
+ void *err_arg;
+ enum tcp_state last_state;
+ tcp_pcb_purge(pcb);
+ /* Remove PCB from tcp_active_pcbs list. */
+ if (prev != NULL) {
+ LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs);
+ prev->next = pcb->next;
+ } else {
+ /* This PCB was the first. */
+ LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb);
+ tcp_active_pcbs = pcb->next;
+ }
+
+ if (pcb_reset) {
+ tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
+ pcb->local_port, pcb->remote_port);
+ }
+
+ err_arg = pcb->callback_arg;
+ last_state = pcb->state;
+ pcb2 = pcb;
+ pcb = pcb->next;
+ tcp_free(pcb2);
+
+ tcp_active_pcbs_changed = 0;
+ TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT);
+ if (tcp_active_pcbs_changed) {
+ goto tcp_slowtmr_start;
+ }
+ } else {
+ /* get the 'next' element now and work with 'prev' below (in case of abort) */
+ prev = pcb;
+ pcb = pcb->next;
+
+ /* We check if we should poll the connection. */
+ ++prev->polltmr;
+ if (prev->polltmr >= prev->pollinterval) {
+ prev->polltmr = 0;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n"));
+ tcp_active_pcbs_changed = 0;
+ TCP_EVENT_POLL(prev, err);
+ if (tcp_active_pcbs_changed) {
+ goto tcp_slowtmr_start;
+ }
+ /* if err == ERR_ABRT, 'prev' is already deallocated */
+ if (err == ERR_OK) {
+ tcp_output(prev);
+ }
+ }
+ }
+ }
+
+
+ /* Steps through all of the TIME-WAIT PCBs. */
+ prev = NULL;
+ pcb = tcp_tw_pcbs;
+ while (pcb != NULL) {
+ LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
+ pcb_remove = 0;
+
+ /* Check if this PCB has stayed long enough in TIME-WAIT */
+ if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
+ ++pcb_remove;
+ }
+
+ /* If the PCB should be removed, do it. */
+ if (pcb_remove) {
+ struct tcp_pcb *pcb2;
+ tcp_pcb_purge(pcb);
+ /* Remove PCB from tcp_tw_pcbs list. */
+ if (prev != NULL) {
+ LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs);
+ prev->next = pcb->next;
+ } else {
+ /* This PCB was the first. */
+ LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb);
+ tcp_tw_pcbs = pcb->next;
+ }
+ pcb2 = pcb;
+ pcb = pcb->next;
+ tcp_free(pcb2);
+ } else {
+ prev = pcb;
+ pcb = pcb->next;
+ }
+ }
+}
+
+/**
+ * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously
+ * "refused" by upper layer (application) and sends delayed ACKs or pending FINs.
+ *
+ * Automatically called from tcp_tmr().
+ */
+void
+tcp_fasttmr(void)
+{
+ struct tcp_pcb *pcb;
+
+ ++tcp_timer_ctr;
+
+tcp_fasttmr_start:
+ pcb = tcp_active_pcbs;
+
+ while (pcb != NULL) {
+ if (pcb->last_timer != tcp_timer_ctr) {
+ struct tcp_pcb *next;
+ pcb->last_timer = tcp_timer_ctr;
+ /* send delayed ACKs */
+ if (pcb->flags & TF_ACK_DELAY) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n"));
+ tcp_ack_now(pcb);
+ tcp_output(pcb);
+ tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
+ }
+ /* send pending FIN */
+ if (pcb->flags & TF_CLOSEPEND) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n"));
+ tcp_clear_flags(pcb, TF_CLOSEPEND);
+ tcp_close_shutdown_fin(pcb);
+ }
+
+ next = pcb->next;
+
+ /* If there is data which was previously "refused" by upper layer */
+ if (pcb->refused_data != NULL) {
+ tcp_active_pcbs_changed = 0;
+ tcp_process_refused_data(pcb);
+ if (tcp_active_pcbs_changed) {
+ /* application callback has changed the pcb list: restart the loop */
+ goto tcp_fasttmr_start;
+ }
+ }
+ pcb = next;
+ } else {
+ pcb = pcb->next;
+ }
+ }
+}
+
+/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */
+void
+tcp_txnow(void)
+{
+ struct tcp_pcb *pcb;
+
+ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
+ if (pcb->flags & TF_NAGLEMEMERR) {
+ tcp_output(pcb);
+ }
+ }
+}
+
+/** Pass pcb->refused_data to the recv callback */
+err_t
+tcp_process_refused_data(struct tcp_pcb *pcb)
+{
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ struct pbuf *rest;
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+
+ LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG);
+
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ while (pcb->refused_data != NULL)
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ {
+ err_t err;
+ u8_t refused_flags = pcb->refused_data->flags;
+ /* set pcb->refused_data to NULL in case the callback frees it and then
+ closes the pcb */
+ struct pbuf *refused_data = pcb->refused_data;
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ pbuf_split_64k(refused_data, &rest);
+ pcb->refused_data = rest;
+#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ pcb->refused_data = NULL;
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ /* Notify again application with data previously received. */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n"));
+ TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err);
+ if (err == ERR_OK) {
+ /* did refused_data include a FIN? */
+ if ((refused_flags & PBUF_FLAG_TCP_FIN)
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ && (rest == NULL)
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ ) {
+ /* correct rcv_wnd as the application won't call tcp_recved()
+ for the FIN's seqno */
+ if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) {
+ pcb->rcv_wnd++;
+ }
+ TCP_EVENT_CLOSED(pcb, err);
+ if (err == ERR_ABRT) {
+ return ERR_ABRT;
+ }
+ }
+ } else if (err == ERR_ABRT) {
+ /* if err == ERR_ABRT, 'pcb' is already deallocated */
+ /* Drop incoming packets because pcb is "full" (only if the incoming
+ segment contains data). */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n"));
+ return ERR_ABRT;
+ } else {
+ /* data is still refused, pbuf is still valid (go on for ACK-only packets) */
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ if (rest != NULL) {
+ pbuf_cat(refused_data, rest);
+ }
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ pcb->refused_data = refused_data;
+ return ERR_INPROGRESS;
+ }
+ }
+ return ERR_OK;
+}
+
+/**
+ * Deallocates a list of TCP segments (tcp_seg structures).
+ *
+ * @param seg tcp_seg list of TCP segments to free
+ */
+void
+tcp_segs_free(struct tcp_seg *seg)
+{
+ while (seg != NULL) {
+ struct tcp_seg *next = seg->next;
+ tcp_seg_free(seg);
+ seg = next;
+ }
+}
+
+/**
+ * Frees a TCP segment (tcp_seg structure).
+ *
+ * @param seg single tcp_seg to free
+ */
+void
+tcp_seg_free(struct tcp_seg *seg)
+{
+ if (seg != NULL) {
+ if (seg->p != NULL) {
+ pbuf_free(seg->p);
+#if TCP_DEBUG
+ seg->p = NULL;
+#endif /* TCP_DEBUG */
+ }
+ memp_free(MEMP_TCP_SEG, seg);
+ }
+}
+
+/**
+ * @ingroup tcp
+ * Sets the priority of a connection.
+ *
+ * @param pcb the tcp_pcb to manipulate
+ * @param prio new priority
+ */
+void
+tcp_setprio(struct tcp_pcb *pcb, u8_t prio)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return);
+
+ pcb->prio = prio;
+}
+
+#if TCP_QUEUE_OOSEQ
+/**
+ * Returns a copy of the given TCP segment.
+ * The pbuf and data are not copied, only the pointers
+ *
+ * @param seg the old tcp_seg
+ * @return a copy of seg
+ */
+struct tcp_seg *
+tcp_seg_copy(struct tcp_seg *seg)
+{
+ struct tcp_seg *cseg;
+
+ LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL);
+
+ cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG);
+ if (cseg == NULL) {
+ return NULL;
+ }
+ SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg));
+ pbuf_ref(cseg->p);
+ return cseg;
+}
+#endif /* TCP_QUEUE_OOSEQ */
+
+#if LWIP_CALLBACK_API
+/**
+ * Default receive callback that is called if the user didn't register
+ * a recv callback for the pcb.
+ */
+err_t
+tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
+{
+ LWIP_UNUSED_ARG(arg);
+
+ LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG);
+
+ if (p != NULL) {
+ tcp_recved(pcb, p->tot_len);
+ pbuf_free(p);
+ } else if (err == ERR_OK) {
+ return tcp_close(pcb);
+ }
+ return ERR_OK;
+}
+#endif /* LWIP_CALLBACK_API */
+
+/**
+ * Kills the oldest active connection that has a lower priority than 'prio'.
+ *
+ * @param prio minimum priority
+ */
+static void
+tcp_kill_prio(u8_t prio)
+{
+ struct tcp_pcb *pcb, *inactive;
+ u32_t inactivity;
+ u8_t mprio;
+
+ mprio = LWIP_MIN(TCP_PRIO_MAX, prio);
+
+ /* We want to kill connections with a lower prio, so bail out if
+ * supplied prio is 0 - there can never be a lower prio
+ */
+ if (mprio == 0) {
+ return;
+ }
+
+ /* We only want kill connections with a lower prio, so decrement prio by one
+ * and start searching for oldest connection with same or lower priority than mprio.
+ * We want to find the connections with the lowest possible prio, and among
+ * these the one with the longest inactivity time.
+ */
+ mprio--;
+
+ inactivity = 0;
+ inactive = NULL;
+ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
+ /* lower prio is always a kill candidate */
+ if ((pcb->prio < mprio) ||
+ /* longer inactivity is also a kill candidate */
+ ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) {
+ inactivity = tcp_ticks - pcb->tmr;
+ inactive = pcb;
+ mprio = pcb->prio;
+ }
+ }
+ if (inactive != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n",
+ (void *)inactive, inactivity));
+ tcp_abort(inactive);
+ }
+}
+
+/**
+ * Kills the oldest connection that is in specific state.
+ * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available.
+ */
+static void
+tcp_kill_state(enum tcp_state state)
+{
+ struct tcp_pcb *pcb, *inactive;
+ u32_t inactivity;
+
+ LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK));
+
+ inactivity = 0;
+ inactive = NULL;
+ /* Go through the list of active pcbs and get the oldest pcb that is in state
+ CLOSING/LAST_ACK. */
+ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
+ if (pcb->state == state) {
+ if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
+ inactivity = tcp_ticks - pcb->tmr;
+ inactive = pcb;
+ }
+ }
+ }
+ if (inactive != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n",
+ tcp_state_str[state], (void *)inactive, inactivity));
+ /* Don't send a RST, since no data is lost. */
+ tcp_abandon(inactive, 0);
+ }
+}
+
+/**
+ * Kills the oldest connection that is in TIME_WAIT state.
+ * Called from tcp_alloc() if no more connections are available.
+ */
+static void
+tcp_kill_timewait(void)
+{
+ struct tcp_pcb *pcb, *inactive;
+ u32_t inactivity;
+
+ inactivity = 0;
+ inactive = NULL;
+ /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */
+ for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
+ if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
+ inactivity = tcp_ticks - pcb->tmr;
+ inactive = pcb;
+ }
+ }
+ if (inactive != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n",
+ (void *)inactive, inactivity));
+ tcp_abort(inactive);
+ }
+}
+
+/* Called when allocating a pcb fails.
+ * In this case, we want to handle all pcbs that want to close first: if we can
+ * now send the FIN (which failed before), the pcb might be in a state that is
+ * OK for us to now free it.
+ */
+static void
+tcp_handle_closepend(void)
+{
+ struct tcp_pcb *pcb = tcp_active_pcbs;
+
+ while (pcb != NULL) {
+ struct tcp_pcb *next = pcb->next;
+ /* send pending FIN */
+ if (pcb->flags & TF_CLOSEPEND) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n"));
+ tcp_clear_flags(pcb, TF_CLOSEPEND);
+ tcp_close_shutdown_fin(pcb);
+ }
+ pcb = next;
+ }
+}
+
+/**
+ * Allocate a new tcp_pcb structure.
+ *
+ * @param prio priority for the new pcb
+ * @return a new tcp_pcb that initially is in state CLOSED
+ */
+struct tcp_pcb *
+tcp_alloc(u8_t prio)
+{
+ struct tcp_pcb *pcb;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
+ if (pcb == NULL) {
+ /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */
+ tcp_handle_closepend();
+
+ /* Try killing oldest connection in TIME-WAIT. */
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n"));
+ tcp_kill_timewait();
+ /* Try to allocate a tcp_pcb again. */
+ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
+ if (pcb == NULL) {
+ /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n"));
+ tcp_kill_state(LAST_ACK);
+ /* Try to allocate a tcp_pcb again. */
+ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
+ if (pcb == NULL) {
+ /* Try killing oldest connection in CLOSING. */
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n"));
+ tcp_kill_state(CLOSING);
+ /* Try to allocate a tcp_pcb again. */
+ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
+ if (pcb == NULL) {
+ /* Try killing oldest active connection with lower priority than the new one. */
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio));
+ tcp_kill_prio(prio);
+ /* Try to allocate a tcp_pcb again. */
+ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
+ if (pcb != NULL) {
+ /* adjust err stats: memp_malloc failed multiple times before */
+ MEMP_STATS_DEC(err, MEMP_TCP_PCB);
+ }
+ }
+ if (pcb != NULL) {
+ /* adjust err stats: memp_malloc failed multiple times before */
+ MEMP_STATS_DEC(err, MEMP_TCP_PCB);
+ }
+ }
+ if (pcb != NULL) {
+ /* adjust err stats: memp_malloc failed multiple times before */
+ MEMP_STATS_DEC(err, MEMP_TCP_PCB);
+ }
+ }
+ if (pcb != NULL) {
+ /* adjust err stats: memp_malloc failed above */
+ MEMP_STATS_DEC(err, MEMP_TCP_PCB);
+ }
+ }
+ if (pcb != NULL) {
+ /* zero out the whole pcb, so there is no need to initialize members to zero */
+ memset(pcb, 0, sizeof(struct tcp_pcb));
+ pcb->prio = prio;
+ pcb->snd_buf = TCP_SND_BUF;
+ /* Start with a window that does not need scaling. When window scaling is
+ enabled and used, the window is enlarged when both sides agree on scaling. */
+ pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND);
+ pcb->ttl = TCP_TTL;
+ /* As initial send MSS, we use TCP_MSS but limit it to 536.
+ The send MSS is updated when an MSS option is received. */
+ pcb->mss = INITIAL_MSS;
+ pcb->rto = 3000 / TCP_SLOW_INTERVAL;
+ pcb->sv = 3000 / TCP_SLOW_INTERVAL;
+ pcb->rtime = -1;
+ pcb->cwnd = 1;
+ pcb->tmr = tcp_ticks;
+ pcb->last_timer = tcp_timer_ctr;
+
+ /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example
+ of using the largest advertised receive window. We've seen complications with
+ receiving TCPs that use window scaling and/or window auto-tuning where the
+ initial advertised window is very small and then grows rapidly once the
+ connection is established. To avoid these complications, we set ssthresh to the
+ largest effective cwnd (amount of in-flight data) that the sender can have. */
+ pcb->ssthresh = TCP_SND_BUF;
+
+#if LWIP_CALLBACK_API
+ pcb->recv = tcp_recv_null;
+#endif /* LWIP_CALLBACK_API */
+
+ /* Init KEEPALIVE timer */
+ pcb->keep_idle = TCP_KEEPIDLE_DEFAULT;
+
+#if LWIP_TCP_KEEPALIVE
+ pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT;
+ pcb->keep_cnt = TCP_KEEPCNT_DEFAULT;
+#endif /* LWIP_TCP_KEEPALIVE */
+ }
+ return pcb;
+}
+
+/**
+ * @ingroup tcp_raw
+ * Creates a new TCP protocol control block but doesn't place it on
+ * any of the TCP PCB lists.
+ * The pcb is not put on any list until binding using tcp_bind().
+ * If memory is not available for creating the new pcb, NULL is returned.
+ *
+ * @internal: Maybe there should be a idle TCP PCB list where these
+ * PCBs are put on. Port reservation using tcp_bind() is implemented but
+ * allocated pcbs that are not bound can't be killed automatically if wanting
+ * to allocate a pcb with higher prio (@see tcp_kill_prio())
+ *
+ * @return a new tcp_pcb that initially is in state CLOSED
+ */
+struct tcp_pcb *
+tcp_new(void)
+{
+ return tcp_alloc(TCP_PRIO_NORMAL);
+}
+
+/**
+ * @ingroup tcp_raw
+ * Creates a new TCP protocol control block but doesn't
+ * place it on any of the TCP PCB lists.
+ * The pcb is not put on any list until binding using tcp_bind().
+ *
+ * @param type IP address type, see @ref lwip_ip_addr_type definitions.
+ * If you want to listen to IPv4 and IPv6 (dual-stack) connections,
+ * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE.
+ * @return a new tcp_pcb that initially is in state CLOSED
+ */
+struct tcp_pcb *
+tcp_new_ip_type(u8_t type)
+{
+ struct tcp_pcb *pcb;
+ pcb = tcp_alloc(TCP_PRIO_NORMAL);
+#if LWIP_IPV4 && LWIP_IPV6
+ if (pcb != NULL) {
+ IP_SET_TYPE_VAL(pcb->local_ip, type);
+ IP_SET_TYPE_VAL(pcb->remote_ip, type);
+ }
+#else
+ LWIP_UNUSED_ARG(type);
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ return pcb;
+}
+
+/**
+ * @ingroup tcp_raw
+ * Specifies the program specific state that should be passed to all
+ * other callback functions. The "pcb" argument is the current TCP
+ * connection control block, and the "arg" argument is the argument
+ * that will be passed to the callbacks.
+ *
+ * @param pcb tcp_pcb to set the callback argument
+ * @param arg void pointer argument to pass to callback functions
+ */
+void
+tcp_arg(struct tcp_pcb *pcb, void *arg)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ /* This function is allowed to be called for both listen pcbs and
+ connection pcbs. */
+ if (pcb != NULL) {
+ pcb->callback_arg = arg;
+ }
+}
+#if LWIP_CALLBACK_API
+
+/**
+ * @ingroup tcp_raw
+ * Sets the callback function that will be called when new data
+ * arrives. The callback function will be passed a NULL pbuf to
+ * indicate that the remote host has closed the connection. If the
+ * callback function returns ERR_OK or ERR_ABRT it must have
+ * freed the pbuf, otherwise it must not have freed it.
+ *
+ * @param pcb tcp_pcb to set the recv callback
+ * @param recv callback function to call for this pcb when data is received
+ */
+void
+tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if (pcb != NULL) {
+ LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN);
+ pcb->recv = recv;
+ }
+}
+
+/**
+ * @ingroup tcp_raw
+ * Specifies the callback function that should be called when data has
+ * successfully been received (i.e., acknowledged) by the remote
+ * host. The len argument passed to the callback function gives the
+ * amount bytes that was acknowledged by the last acknowledgment.
+ *
+ * @param pcb tcp_pcb to set the sent callback
+ * @param sent callback function to call for this pcb when data is successfully sent
+ */
+void
+tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if (pcb != NULL) {
+ LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN);
+ pcb->sent = sent;
+ }
+}
+
+/**
+ * @ingroup tcp_raw
+ * Used to specify the function that should be called when a fatal error
+ * has occurred on the connection.
+ *
+ * If a connection is aborted because of an error, the application is
+ * alerted of this event by the err callback. Errors that might abort a
+ * connection are when there is a shortage of memory. The callback
+ * function to be called is set using the tcp_err() function.
+ *
+ * @note The corresponding pcb is already freed when this callback is called!
+ *
+ * @param pcb tcp_pcb to set the err callback
+ * @param err callback function to call for this pcb when a fatal error
+ * has occurred on the connection
+ */
+void
+tcp_err(struct tcp_pcb *pcb, tcp_err_fn err)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if (pcb != NULL) {
+ LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN);
+ pcb->errf = err;
+ }
+}
+
+/**
+ * @ingroup tcp_raw
+ * Used for specifying the function that should be called when a
+ * LISTENing connection has been connected to another host.
+ *
+ * @param pcb tcp_pcb to set the accept callback
+ * @param accept callback function to call for this pcb when LISTENing
+ * connection has been connected to another host
+ */
+void
+tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+ if ((pcb != NULL) && (pcb->state == LISTEN)) {
+ struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb;
+ lpcb->accept = accept;
+ }
+}
+#endif /* LWIP_CALLBACK_API */
+
+
+/**
+ * @ingroup tcp_raw
+ * Specifies the polling interval and the callback function that should
+ * be called to poll the application. The interval is specified in
+ * number of TCP coarse grained timer shots, which typically occurs
+ * twice a second. An interval of 10 means that the application would
+ * be polled every 5 seconds.
+ *
+ * When a connection is idle (i.e., no data is either transmitted or
+ * received), lwIP will repeatedly poll the application by calling a
+ * specified callback function. This can be used either as a watchdog
+ * timer for killing connections that have stayed idle for too long, or
+ * as a method of waiting for memory to become available. For instance,
+ * if a call to tcp_write() has failed because memory wasn't available,
+ * the application may use the polling functionality to call tcp_write()
+ * again when the connection has been idle for a while.
+ */
+void
+tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return);
+ LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN);
+
+#if LWIP_CALLBACK_API
+ pcb->poll = poll;
+#else /* LWIP_CALLBACK_API */
+ LWIP_UNUSED_ARG(poll);
+#endif /* LWIP_CALLBACK_API */
+ pcb->pollinterval = interval;
+}
+
+/**
+ * Purges a TCP PCB. Removes any buffered data and frees the buffer memory
+ * (pcb->ooseq, pcb->unsent and pcb->unacked are freed).
+ *
+ * @param pcb tcp_pcb to purge. The pcb itself is not deallocated!
+ */
+void
+tcp_pcb_purge(struct tcp_pcb *pcb)
+{
+ LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return);
+
+ if (pcb->state != CLOSED &&
+ pcb->state != TIME_WAIT &&
+ pcb->state != LISTEN) {
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n"));
+
+ tcp_backlog_accepted(pcb);
+
+ if (pcb->refused_data != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n"));
+ pbuf_free(pcb->refused_data);
+ pcb->refused_data = NULL;
+ }
+ if (pcb->unsent != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n"));
+ }
+ if (pcb->unacked != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n"));
+ }
+#if TCP_QUEUE_OOSEQ
+ if (pcb->ooseq != NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n"));
+ tcp_free_ooseq(pcb);
+ }
+#endif /* TCP_QUEUE_OOSEQ */
+
+ /* Stop the retransmission timer as it will expect data on unacked
+ queue if it fires */
+ pcb->rtime = -1;
+
+ tcp_segs_free(pcb->unsent);
+ tcp_segs_free(pcb->unacked);
+ pcb->unacked = pcb->unsent = NULL;
+#if TCP_OVERSIZE
+ pcb->unsent_oversize = 0;
+#endif /* TCP_OVERSIZE */
+ }
+}
+
+/**
+ * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first.
+ *
+ * @param pcblist PCB list to purge.
+ * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated!
+ */
+void
+tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL);
+ LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL);
+
+ TCP_RMV(pcblist, pcb);
+
+ tcp_pcb_purge(pcb);
+
+ /* if there is an outstanding delayed ACKs, send it */
+ if ((pcb->state != TIME_WAIT) &&
+ (pcb->state != LISTEN) &&
+ (pcb->flags & TF_ACK_DELAY)) {
+ tcp_ack_now(pcb);
+ tcp_output(pcb);
+ }
+
+ if (pcb->state != LISTEN) {
+ LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL);
+ LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL);
+#if TCP_QUEUE_OOSEQ
+ LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL);
+#endif /* TCP_QUEUE_OOSEQ */
+ }
+
+ pcb->state = CLOSED;
+ /* reset the local port to prevent the pcb from being 'bound' */
+ pcb->local_port = 0;
+
+ LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane());
+}
+
+/**
+ * Calculates a new initial sequence number for new connections.
+ *
+ * @return u32_t pseudo random sequence number
+ */
+u32_t
+tcp_next_iss(struct tcp_pcb *pcb)
+{
+#ifdef LWIP_HOOK_TCP_ISN
+ LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL);
+ return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port);
+#else /* LWIP_HOOK_TCP_ISN */
+ static u32_t iss = 6510;
+
+ LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL);
+ LWIP_UNUSED_ARG(pcb);
+
+ iss += tcp_ticks; /* XXX */
+ return iss;
+#endif /* LWIP_HOOK_TCP_ISN */
+}
+
+#if TCP_CALCULATE_EFF_SEND_MSS
+/**
+ * Calculates the effective send mss that can be used for a specific IP address
+ * by calculating the minimum of TCP_MSS and the mtu (if set) of the target
+ * netif (if not NULL).
+ */
+u16_t
+tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest)
+{
+ u16_t mss_s;
+ u16_t mtu;
+
+ LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */
+
+ LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL);
+
+#if LWIP_IPV6
+#if LWIP_IPV4
+ if (IP_IS_V6(dest))
+#endif /* LWIP_IPV4 */
+ {
+ /* First look in destination cache, to see if there is a Path MTU. */
+ mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif);
+ }
+#if LWIP_IPV4
+ else
+#endif /* LWIP_IPV4 */
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4
+ {
+ if (outif == NULL) {
+ return sendmss;
+ }
+ mtu = outif->mtu;
+ }
+#endif /* LWIP_IPV4 */
+
+ if (mtu != 0) {
+ u16_t offset;
+#if LWIP_IPV6
+#if LWIP_IPV4
+ if (IP_IS_V6(dest))
+#endif /* LWIP_IPV4 */
+ {
+ offset = IP6_HLEN + TCP_HLEN;
+ }
+#if LWIP_IPV4
+ else
+#endif /* LWIP_IPV4 */
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4
+ {
+ offset = IP_HLEN + TCP_HLEN;
+ }
+#endif /* LWIP_IPV4 */
+ mss_s = (mtu > offset) ? (u16_t)(mtu - offset) : 0;
+ /* RFC 1122, chap 4.2.2.6:
+ * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize
+ * We correct for TCP options in tcp_write(), and don't support IP options.
+ */
+ sendmss = LWIP_MIN(sendmss, mss_s);
+ }
+ return sendmss;
+}
+#endif /* TCP_CALCULATE_EFF_SEND_MSS */
+
+/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */
+static void
+tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list)
+{
+ struct tcp_pcb *pcb;
+ pcb = pcb_list;
+
+ LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL);
+
+ while (pcb != NULL) {
+ /* PCB bound to current local interface address? */
+ if (ip_addr_cmp(&pcb->local_ip, old_addr)
+#if LWIP_AUTOIP
+ /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */
+ && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip)))
+#endif /* LWIP_AUTOIP */
+ ) {
+ /* this connection must be aborted */
+ struct tcp_pcb *next = pcb->next;
+ LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb));
+ tcp_abort(pcb);
+ pcb = next;
+ } else {
+ pcb = pcb->next;
+ }
+ }
+}
+
+/** This function is called from netif.c when address is changed or netif is removed
+ *
+ * @param old_addr IP address of the netif before change
+ * @param new_addr IP address of the netif after change or NULL if netif has been removed
+ */
+void
+tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr)
+{
+ struct tcp_pcb_listen *lpcb;
+
+ if (!ip_addr_isany(old_addr)) {
+ tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs);
+ tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs);
+
+ if (!ip_addr_isany(new_addr)) {
+ /* PCB bound to current local interface address? */
+ for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
+ /* PCB bound to current local interface address? */
+ if (ip_addr_cmp(&lpcb->local_ip, old_addr)) {
+ /* The PCB is listening to the old ipaddr and
+ * is set to listen to the new one instead */
+ ip_addr_copy(lpcb->local_ip, *new_addr);
+ }
+ }
+ }
+ }
+}
+
+const char *
+tcp_debug_state_str(enum tcp_state s)
+{
+ return tcp_state_str[s];
+}
+
+err_t
+tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port)
+{
+ if (pcb) {
+ if (local) {
+ if (addr) {
+ *addr = pcb->local_ip;
+ }
+ if (port) {
+ *port = pcb->local_port;
+ }
+ } else {
+ if (addr) {
+ *addr = pcb->remote_ip;
+ }
+ if (port) {
+ *port = pcb->remote_port;
+ }
+ }
+ return ERR_OK;
+ }
+ return ERR_VAL;
+}
+
+#if TCP_QUEUE_OOSEQ
+/* Free all ooseq pbufs (and possibly reset SACK state) */
+void
+tcp_free_ooseq(struct tcp_pcb *pcb)
+{
+ if (pcb->ooseq) {
+ tcp_segs_free(pcb->ooseq);
+ pcb->ooseq = NULL;
+#if LWIP_TCP_SACK_OUT
+ memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks));
+#endif /* LWIP_TCP_SACK_OUT */
+ }
+}
+#endif /* TCP_QUEUE_OOSEQ */
+
+#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG
+/**
+ * Print a tcp header for debugging purposes.
+ *
+ * @param tcphdr pointer to a struct tcp_hdr
+ */
+void
+tcp_debug_print(struct tcp_hdr *tcphdr)
+{
+ LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n",
+ lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest)));
+ LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n",
+ lwip_ntohl(tcphdr->seqno)));
+ LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n",
+ lwip_ntohl(tcphdr->ackno)));
+ LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (",
+ TCPH_HDRLEN(tcphdr),
+ (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1),
+ (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1),
+ (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1),
+ (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1),
+ (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1),
+ (u16_t)(TCPH_FLAGS(tcphdr) & 1),
+ lwip_ntohs(tcphdr->wnd)));
+ tcp_debug_print_flags(TCPH_FLAGS(tcphdr));
+ LWIP_DEBUGF(TCP_DEBUG, ("), win)\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n",
+ lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp)));
+ LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
+}
+
+/**
+ * Print a tcp state for debugging purposes.
+ *
+ * @param s enum tcp_state to print
+ */
+void
+tcp_debug_print_state(enum tcp_state s)
+{
+ LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s]));
+}
+
+/**
+ * Print tcp flags for debugging purposes.
+ *
+ * @param flags tcp flags, all active flags are printed
+ */
+void
+tcp_debug_print_flags(u8_t flags)
+{
+ if (flags & TCP_FIN) {
+ LWIP_DEBUGF(TCP_DEBUG, ("FIN "));
+ }
+ if (flags & TCP_SYN) {
+ LWIP_DEBUGF(TCP_DEBUG, ("SYN "));
+ }
+ if (flags & TCP_RST) {
+ LWIP_DEBUGF(TCP_DEBUG, ("RST "));
+ }
+ if (flags & TCP_PSH) {
+ LWIP_DEBUGF(TCP_DEBUG, ("PSH "));
+ }
+ if (flags & TCP_ACK) {
+ LWIP_DEBUGF(TCP_DEBUG, ("ACK "));
+ }
+ if (flags & TCP_URG) {
+ LWIP_DEBUGF(TCP_DEBUG, ("URG "));
+ }
+ if (flags & TCP_ECE) {
+ LWIP_DEBUGF(TCP_DEBUG, ("ECE "));
+ }
+ if (flags & TCP_CWR) {
+ LWIP_DEBUGF(TCP_DEBUG, ("CWR "));
+ }
+ LWIP_DEBUGF(TCP_DEBUG, ("\n"));
+}
+
+/**
+ * Print all tcp_pcbs in every list for debugging purposes.
+ */
+void
+tcp_debug_print_pcbs(void)
+{
+ struct tcp_pcb *pcb;
+ struct tcp_pcb_listen *pcbl;
+
+ LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n"));
+ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
+ LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
+ pcb->local_port, pcb->remote_port,
+ pcb->snd_nxt, pcb->rcv_nxt));
+ tcp_debug_print_state(pcb->state);
+ }
+
+ LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n"));
+ for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) {
+ LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port));
+ tcp_debug_print_state(pcbl->state);
+ }
+
+ LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n"));
+ for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
+ LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
+ pcb->local_port, pcb->remote_port,
+ pcb->snd_nxt, pcb->rcv_nxt));
+ tcp_debug_print_state(pcb->state);
+ }
+}
+
+/**
+ * Check state consistency of the tcp_pcb lists.
+ */
+s16_t
+tcp_pcbs_sane(void)
+{
+ struct tcp_pcb *pcb;
+ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
+ LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED);
+ LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN);
+ LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT);
+ }
+ for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
+ LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
+ }
+ return 1;
+}
+#endif /* TCP_DEBUG */
+
+#if LWIP_TCP_PCB_NUM_EXT_ARGS
+/**
+ * @defgroup tcp_raw_extargs ext arguments
+ * @ingroup tcp_raw
+ * Additional data storage per tcp pcb\n
+ * @see @ref tcp_raw
+ *
+ * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb)
+ * includes a number of additional argument entries in an array.
+ *
+ * To support memory management, in addition to a 'void *', callbacks can be
+ * provided to manage transition from listening pcbs to connections and to
+ * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks).
+ *
+ * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get
+ * to store and load arguments from this index for a given pcb.
+ */
+
+static u8_t tcp_ext_arg_id;
+
+/**
+ * @ingroup tcp_raw_extargs
+ * Allocate an index to store data in ext_args member of struct tcp_pcb.
+ * Returned value is an index in mentioned array.
+ * The index is *global* over all pcbs!
+ *
+ * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb)
+ * includes a number of additional argument entries in an array.
+ *
+ * To support memory management, in addition to a 'void *', callbacks can be
+ * provided to manage transition from listening pcbs to connections and to
+ * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks).
+ *
+ * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get
+ * to store and load arguments from this index for a given pcb.
+ *
+ * @return a unique index into struct tcp_pcb.ext_args
+ */
+u8_t
+tcp_ext_arg_alloc_id(void)
+{
+ u8_t result = tcp_ext_arg_id;
+ tcp_ext_arg_id++;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255
+#error LWIP_TCP_PCB_NUM_EXT_ARGS
+#endif
+ LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS);
+ return result;
+}
+
+/**
+ * @ingroup tcp_raw_extargs
+ * Set callbacks for a given index of ext_args on the specified pcb.
+ *
+ * @param pcb tcp_pcb for which to set the callback
+ * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id)
+ * @param callbacks callback table (const since it is referenced, not copied!)
+ */
+void
+tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks)
+{
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+ LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS);
+ LWIP_ASSERT("callbacks != NULL", callbacks != NULL);
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ pcb->ext_args[id].callbacks = callbacks;
+}
+
+/**
+ * @ingroup tcp_raw_extargs
+ * Set data for a given index of ext_args on the specified pcb.
+ *
+ * @param pcb tcp_pcb for which to set the data
+ * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id)
+ * @param arg data pointer to set
+ */
+void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg)
+{
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+ LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS);
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ pcb->ext_args[id].data = arg;
+}
+
+/**
+ * @ingroup tcp_raw_extargs
+ * Set data for a given index of ext_args on the specified pcb.
+ *
+ * @param pcb tcp_pcb for which to set the data
+ * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id)
+ * @return data pointer at the given index
+ */
+void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id)
+{
+ LWIP_ASSERT("pcb != NULL", pcb != NULL);
+ LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS);
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ return pcb->ext_args[id].data;
+}
+
+/** This function calls the "destroy" callback for all ext_args once a pcb is
+ * freed.
+ */
+static void
+tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args)
+{
+ int i;
+ LWIP_ASSERT("ext_args != NULL", ext_args != NULL);
+
+ for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) {
+ if (ext_args[i].callbacks != NULL) {
+ if (ext_args[i].callbacks->destroy != NULL) {
+ ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data);
+ }
+ }
+ }
+}
+
+/** This function calls the "passive_open" callback for all ext_args if a connection
+ * is in the process of being accepted. This is called just after the SYN is
+ * received and before a SYN/ACK is sent, to allow to modify the very first
+ * segment sent even on passive open. Naturally, the "accepted" callback of the
+ * pcb has not been called yet!
+ */
+err_t
+tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb)
+{
+ int i;
+ LWIP_ASSERT("lpcb != NULL", lpcb != NULL);
+ LWIP_ASSERT("cpcb != NULL", cpcb != NULL);
+
+ for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) {
+ if (lpcb->ext_args[i].callbacks != NULL) {
+ if (lpcb->ext_args[i].callbacks->passive_open != NULL) {
+ err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb);
+ if (err != ERR_OK) {
+ return err;
+ }
+ }
+ }
+ }
+ return ERR_OK;
+}
+#endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */
+
+#endif /* LWIP_TCP */
diff --git a/lwip/src/core/tcp_in.c b/lwip/src/core/tcp_in.c
new file mode 100644
index 0000000..428a6f4
--- /dev/null
+++ b/lwip/src/core/tcp_in.c
@@ -0,0 +1,2178 @@
+/**
+ * @file
+ * Transmission Control Protocol, incoming traffic
+ *
+ * The input processing functions of the TCP layer.
+ *
+ * These functions are generally called in the order (ip_input() ->)
+ * tcp_input() -> * tcp_process() -> tcp_receive() (-> application).
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/def.h"
+#include "lwip/ip_addr.h"
+#include "lwip/netif.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/stats.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#if LWIP_ND6_TCP_REACHABILITY_HINTS
+#include "lwip/nd6.h"
+#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+/** Initial CWND calculation as defined RFC 2581 */
+#define LWIP_TCP_CALC_INITIAL_CWND(mss) ((tcpwnd_size_t)LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U)))
+
+/* These variables are global to all functions involved in the input
+ processing of TCP segments. They are set by the tcp_input()
+ function. */
+static struct tcp_seg inseg;
+static struct tcp_hdr *tcphdr;
+static u16_t tcphdr_optlen;
+static u16_t tcphdr_opt1len;
+static u8_t *tcphdr_opt2;
+static u16_t tcp_optidx;
+static u32_t seqno, ackno;
+static tcpwnd_size_t recv_acked;
+static u16_t tcplen;
+static u8_t flags;
+
+static u8_t recv_flags;
+static struct pbuf *recv_data;
+
+struct tcp_pcb *tcp_input_pcb;
+
+/* Forward declarations. */
+static err_t tcp_process(struct tcp_pcb *pcb);
+static void tcp_receive(struct tcp_pcb *pcb);
+static void tcp_parseopt(struct tcp_pcb *pcb);
+
+static void tcp_listen_input(struct tcp_pcb_listen *pcb);
+static void tcp_timewait_input(struct tcp_pcb *pcb);
+
+static int tcp_input_delayed_close(struct tcp_pcb *pcb);
+
+#if LWIP_TCP_SACK_OUT
+static void tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right);
+static void tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq);
+#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT)
+static void tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq);
+#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */
+#endif /* LWIP_TCP_SACK_OUT */
+
+/**
+ * The initial input processing of TCP. It verifies the TCP header, demultiplexes
+ * the segment between the PCBs and passes it on to tcp_process(), which implements
+ * the TCP finite state machine. This function is called by the IP layer (in
+ * ip_input()).
+ *
+ * @param p received TCP segment to process (p->payload pointing to the TCP header)
+ * @param inp network interface on which this segment was received
+ */
+void
+tcp_input(struct pbuf *p, struct netif *inp)
+{
+ struct tcp_pcb *pcb, *prev;
+ struct tcp_pcb_listen *lpcb;
+#if SO_REUSE
+ struct tcp_pcb *lpcb_prev = NULL;
+ struct tcp_pcb_listen *lpcb_any = NULL;
+#endif /* SO_REUSE */
+ u8_t hdrlen_bytes;
+ err_t err;
+
+ LWIP_UNUSED_ARG(inp);
+ LWIP_ASSERT_CORE_LOCKED();
+ LWIP_ASSERT("tcp_input: invalid pbuf", p != NULL);
+
+ PERF_START;
+
+ TCP_STATS_INC(tcp.recv);
+ MIB2_STATS_INC(mib2.tcpinsegs);
+
+ tcphdr = (struct tcp_hdr *)p->payload;
+
+#if TCP_INPUT_DEBUG
+ tcp_debug_print(tcphdr);
+#endif
+
+ /* Check that TCP header fits in payload */
+ if (p->len < TCP_HLEN) {
+ /* drop short packets */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len));
+ TCP_STATS_INC(tcp.lenerr);
+ goto dropped;
+ }
+
+ /* Don't even process incoming broadcasts/multicasts. */
+ if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) ||
+ ip_addr_ismulticast(ip_current_dest_addr())) {
+ TCP_STATS_INC(tcp.proterr);
+ goto dropped;
+ }
+
+#if CHECKSUM_CHECK_TCP
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) {
+ /* Verify TCP checksum. */
+ u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
+ ip_current_src_addr(), ip_current_dest_addr());
+ if (chksum != 0) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n",
+ chksum));
+ tcp_debug_print(tcphdr);
+ TCP_STATS_INC(tcp.chkerr);
+ goto dropped;
+ }
+ }
+#endif /* CHECKSUM_CHECK_TCP */
+
+ /* sanity-check header length */
+ hdrlen_bytes = TCPH_HDRLEN_BYTES(tcphdr);
+ if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes));
+ TCP_STATS_INC(tcp.lenerr);
+ goto dropped;
+ }
+
+ /* Move the payload pointer in the pbuf so that it points to the
+ TCP data instead of the TCP header. */
+ tcphdr_optlen = (u16_t)(hdrlen_bytes - TCP_HLEN);
+ tcphdr_opt2 = NULL;
+ if (p->len >= hdrlen_bytes) {
+ /* all options are in the first pbuf */
+ tcphdr_opt1len = tcphdr_optlen;
+ pbuf_remove_header(p, hdrlen_bytes); /* cannot fail */
+ } else {
+ u16_t opt2len;
+ /* TCP header fits into first pbuf, options don't - data is in the next pbuf */
+ /* there must be a next pbuf, due to hdrlen_bytes sanity check above */
+ LWIP_ASSERT("p->next != NULL", p->next != NULL);
+
+ /* advance over the TCP header (cannot fail) */
+ pbuf_remove_header(p, TCP_HLEN);
+
+ /* determine how long the first and second parts of the options are */
+ tcphdr_opt1len = p->len;
+ opt2len = (u16_t)(tcphdr_optlen - tcphdr_opt1len);
+
+ /* options continue in the next pbuf: set p to zero length and hide the
+ options in the next pbuf (adjusting p->tot_len) */
+ pbuf_remove_header(p, tcphdr_opt1len);
+
+ /* check that the options fit in the second pbuf */
+ if (opt2len > p->next->len) {
+ /* drop short packets */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len));
+ TCP_STATS_INC(tcp.lenerr);
+ goto dropped;
+ }
+
+ /* remember the pointer to the second part of the options */
+ tcphdr_opt2 = (u8_t *)p->next->payload;
+
+ /* advance p->next to point after the options, and manually
+ adjust p->tot_len to keep it consistent with the changed p->next */
+ pbuf_remove_header(p->next, opt2len);
+ p->tot_len = (u16_t)(p->tot_len - opt2len);
+
+ LWIP_ASSERT("p->len == 0", p->len == 0);
+ LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len);
+ }
+
+ /* Convert fields in TCP header to host byte order. */
+ tcphdr->src = lwip_ntohs(tcphdr->src);
+ tcphdr->dest = lwip_ntohs(tcphdr->dest);
+ seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno);
+ ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno);
+ tcphdr->wnd = lwip_ntohs(tcphdr->wnd);
+
+ flags = TCPH_FLAGS(tcphdr);
+ tcplen = p->tot_len;
+ if (flags & (TCP_FIN | TCP_SYN)) {
+ tcplen++;
+ if (tcplen < p->tot_len) {
+ /* u16_t overflow, cannot handle this */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: length u16_t overflow, cannot handle this\n"));
+ TCP_STATS_INC(tcp.lenerr);
+ goto dropped;
+ }
+ }
+
+ /* Demultiplex an incoming segment. First, we check if it is destined
+ for an active connection. */
+ prev = NULL;
+
+ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
+ LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED);
+ LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT);
+ LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN);
+
+ /* check if PCB is bound to specific netif */
+ if ((pcb->netif_idx != NETIF_NO_INDEX) &&
+ (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) {
+ prev = pcb;
+ continue;
+ }
+
+ if (pcb->remote_port == tcphdr->src &&
+ pcb->local_port == tcphdr->dest &&
+ ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) &&
+ ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) {
+ /* Move this PCB to the front of the list so that subsequent
+ lookups will be faster (we exploit locality in TCP segment
+ arrivals). */
+ LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb);
+ if (prev != NULL) {
+ prev->next = pcb->next;
+ pcb->next = tcp_active_pcbs;
+ tcp_active_pcbs = pcb;
+ } else {
+ TCP_STATS_INC(tcp.cachehit);
+ }
+ LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb);
+ break;
+ }
+ prev = pcb;
+ }
+
+ if (pcb == NULL) {
+ /* If it did not go to an active connection, we check the connections
+ in the TIME-WAIT state. */
+ for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
+ LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
+
+ /* check if PCB is bound to specific netif */
+ if ((pcb->netif_idx != NETIF_NO_INDEX) &&
+ (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) {
+ continue;
+ }
+
+ if (pcb->remote_port == tcphdr->src &&
+ pcb->local_port == tcphdr->dest &&
+ ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) &&
+ ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) {
+ /* We don't really care enough to move this PCB to the front
+ of the list since we are not very likely to receive that
+ many segments for connections in TIME-WAIT. */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n"));
+#ifdef LWIP_HOOK_TCP_INPACKET_PCB
+ if (LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, tcphdr_opt1len,
+ tcphdr_opt2, p) == ERR_OK)
+#endif
+ {
+ tcp_timewait_input(pcb);
+ }
+ pbuf_free(p);
+ return;
+ }
+ }
+
+ /* Finally, if we still did not get a match, we check all PCBs that
+ are LISTENing for incoming connections. */
+ prev = NULL;
+ for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
+ /* check if PCB is bound to specific netif */
+ if ((lpcb->netif_idx != NETIF_NO_INDEX) &&
+ (lpcb->netif_idx != netif_get_index(ip_data.current_input_netif))) {
+ prev = (struct tcp_pcb *)lpcb;
+ continue;
+ }
+
+ if (lpcb->local_port == tcphdr->dest) {
+ if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) {
+ /* found an ANY TYPE (IPv4/IPv6) match */
+#if SO_REUSE
+ lpcb_any = lpcb;
+ lpcb_prev = prev;
+#else /* SO_REUSE */
+ break;
+#endif /* SO_REUSE */
+ } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) {
+ if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) {
+ /* found an exact match */
+ break;
+ } else if (ip_addr_isany(&lpcb->local_ip)) {
+ /* found an ANY-match */
+#if SO_REUSE
+ lpcb_any = lpcb;
+ lpcb_prev = prev;
+#else /* SO_REUSE */
+ break;
+#endif /* SO_REUSE */
+ }
+ }
+ }
+ prev = (struct tcp_pcb *)lpcb;
+ }
+#if SO_REUSE
+ /* first try specific local IP */
+ if (lpcb == NULL) {
+ /* only pass to ANY if no specific local IP has been found */
+ lpcb = lpcb_any;
+ prev = lpcb_prev;
+ }
+#endif /* SO_REUSE */
+ if (lpcb != NULL) {
+ /* Move this PCB to the front of the list so that subsequent
+ lookups will be faster (we exploit locality in TCP segment
+ arrivals). */
+ if (prev != NULL) {
+ ((struct tcp_pcb_listen *)prev)->next = lpcb->next;
+ /* our successor is the remainder of the listening list */
+ lpcb->next = tcp_listen_pcbs.listen_pcbs;
+ /* put this listening pcb at the head of the listening list */
+ tcp_listen_pcbs.listen_pcbs = lpcb;
+ } else {
+ TCP_STATS_INC(tcp.cachehit);
+ }
+
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n"));
+#ifdef LWIP_HOOK_TCP_INPACKET_PCB
+ if (LWIP_HOOK_TCP_INPACKET_PCB((struct tcp_pcb *)lpcb, tcphdr, tcphdr_optlen,
+ tcphdr_opt1len, tcphdr_opt2, p) == ERR_OK)
+#endif
+ {
+ tcp_listen_input(lpcb);
+ }
+ pbuf_free(p);
+ return;
+ }
+ }
+
+#if TCP_INPUT_DEBUG
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags "));
+ tcp_debug_print_flags(TCPH_FLAGS(tcphdr));
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n"));
+#endif /* TCP_INPUT_DEBUG */
+
+
+#ifdef LWIP_HOOK_TCP_INPACKET_PCB
+ if ((pcb != NULL) && LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen,
+ tcphdr_opt1len, tcphdr_opt2, p) != ERR_OK) {
+ pbuf_free(p);
+ return;
+ }
+#endif
+ if (pcb != NULL) {
+ /* The incoming segment belongs to a connection. */
+#if TCP_INPUT_DEBUG
+ tcp_debug_print_state(pcb->state);
+#endif /* TCP_INPUT_DEBUG */
+
+ /* Set up a tcp_seg structure. */
+ inseg.next = NULL;
+ inseg.len = p->tot_len;
+ inseg.p = p;
+ inseg.tcphdr = tcphdr;
+
+ recv_data = NULL;
+ recv_flags = 0;
+ recv_acked = 0;
+
+ if (flags & TCP_PSH) {
+ p->flags |= PBUF_FLAG_PUSH;
+ }
+
+ /* If there is data which was previously "refused" by upper layer */
+ if (pcb->refused_data != NULL) {
+ if ((tcp_process_refused_data(pcb) == ERR_ABRT) ||
+ ((pcb->refused_data != NULL) && (tcplen > 0))) {
+ /* pcb has been aborted or refused data is still refused and the new
+ segment contains data */
+ if (pcb->rcv_ann_wnd == 0) {
+ /* this is a zero-window probe, we respond to it with current RCV.NXT
+ and drop the data segment */
+ tcp_send_empty_ack(pcb);
+ }
+ TCP_STATS_INC(tcp.drop);
+ MIB2_STATS_INC(mib2.tcpinerrs);
+ goto aborted;
+ }
+ }
+ tcp_input_pcb = pcb;
+ err = tcp_process(pcb);
+ /* A return value of ERR_ABRT means that tcp_abort() was called
+ and that the pcb has been freed. If so, we don't do anything. */
+ if (err != ERR_ABRT) {
+ if (recv_flags & TF_RESET) {
+ /* TF_RESET means that the connection was reset by the other
+ end. We then call the error callback to inform the
+ application that the connection is dead before we
+ deallocate the PCB. */
+ TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST);
+ tcp_pcb_remove(&tcp_active_pcbs, pcb);
+ tcp_free(pcb);
+ } else {
+ err = ERR_OK;
+ /* If the application has registered a "sent" function to be
+ called when new send buffer space is available, we call it
+ now. */
+ if (recv_acked > 0) {
+ u16_t acked16;
+#if LWIP_WND_SCALE
+ /* recv_acked is u32_t but the sent callback only takes a u16_t,
+ so we might have to call it multiple times. */
+ u32_t acked = recv_acked;
+ while (acked > 0) {
+ acked16 = (u16_t)LWIP_MIN(acked, 0xffffu);
+ acked -= acked16;
+#else
+ {
+ acked16 = recv_acked;
+#endif
+ TCP_EVENT_SENT(pcb, (u16_t)acked16, err);
+ if (err == ERR_ABRT) {
+ goto aborted;
+ }
+ }
+ recv_acked = 0;
+ }
+ if (tcp_input_delayed_close(pcb)) {
+ goto aborted;
+ }
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ while (recv_data != NULL) {
+ struct pbuf *rest = NULL;
+ pbuf_split_64k(recv_data, &rest);
+#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ if (recv_data != NULL) {
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+
+ LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL);
+ if (pcb->flags & TF_RXCLOSED) {
+ /* received data although already closed -> abort (send RST) to
+ notify the remote host that not all data has been processed */
+ pbuf_free(recv_data);
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ if (rest != NULL) {
+ pbuf_free(rest);
+ }
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ tcp_abort(pcb);
+ goto aborted;
+ }
+
+ /* Notify application that data has been received. */
+ TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err);
+ if (err == ERR_ABRT) {
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ if (rest != NULL) {
+ pbuf_free(rest);
+ }
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ goto aborted;
+ }
+
+ /* If the upper layer can't receive this data, store it */
+ if (err != ERR_OK) {
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ if (rest != NULL) {
+ pbuf_cat(recv_data, rest);
+ }
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ pcb->refused_data = recv_data;
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n"));
+#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
+ break;
+ } else {
+ /* Upper layer received the data, go on with the rest if > 64K */
+ recv_data = rest;
+#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
+ }
+ }
+
+ /* If a FIN segment was received, we call the callback
+ function with a NULL buffer to indicate EOF. */
+ if (recv_flags & TF_GOT_FIN) {
+ if (pcb->refused_data != NULL) {
+ /* Delay this if we have refused data. */
+ pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN;
+ } else {
+ /* correct rcv_wnd as the application won't call tcp_recved()
+ for the FIN's seqno */
+ if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) {
+ pcb->rcv_wnd++;
+ }
+ TCP_EVENT_CLOSED(pcb, err);
+ if (err == ERR_ABRT) {
+ goto aborted;
+ }
+ }
+ }
+
+ tcp_input_pcb = NULL;
+ if (tcp_input_delayed_close(pcb)) {
+ goto aborted;
+ }
+ /* Try to send something out. */
+ tcp_output(pcb);
+#if TCP_INPUT_DEBUG
+#if TCP_DEBUG
+ tcp_debug_print_state(pcb->state);
+#endif /* TCP_DEBUG */
+#endif /* TCP_INPUT_DEBUG */
+ }
+ }
+ /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()).
+ Below this line, 'pcb' may not be dereferenced! */
+aborted:
+ tcp_input_pcb = NULL;
+ recv_data = NULL;
+
+ /* give up our reference to inseg.p */
+ if (inseg.p != NULL) {
+ pbuf_free(inseg.p);
+ inseg.p = NULL;
+ }
+ } else {
+ /* If no matching PCB was found, send a TCP RST (reset) to the
+ sender. */
+ LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n"));
+ if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) {
+ TCP_STATS_INC(tcp.proterr);
+ TCP_STATS_INC(tcp.drop);
+ tcp_rst(NULL, ackno, seqno + tcplen, ip_current_dest_addr(),
+ ip_current_src_addr(), tcphdr->dest, tcphdr->src);
+ }
+ pbuf_free(p);
+ }
+
+ LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane());
+ PERF_STOP("tcp_input");
+ return;
+dropped:
+ TCP_STATS_INC(tcp.drop);
+ MIB2_STATS_INC(mib2.tcpinerrs);
+ pbuf_free(p);
+}
+
+/** Called from tcp_input to check for TF_CLOSED flag. This results in closing
+ * and deallocating a pcb at the correct place to ensure noone references it
+ * any more.
+ * @returns 1 if the pcb has been closed and deallocated, 0 otherwise
+ */
+static int
+tcp_input_delayed_close(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_input_delayed_close: invalid pcb", pcb != NULL);
+
+ if (recv_flags & TF_CLOSED) {
+ /* The connection has been closed and we will deallocate the
+ PCB. */
+ if (!(pcb->flags & TF_RXCLOSED)) {
+ /* Connection closed although the application has only shut down the
+ tx side: call the PCB's err callback and indicate the closure to
+ ensure the application doesn't continue using the PCB. */
+ TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD);
+ }
+ tcp_pcb_remove(&tcp_active_pcbs, pcb);
+ tcp_free(pcb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Called by tcp_input() when a segment arrives for a listening
+ * connection (from tcp_input()).
+ *
+ * @param pcb the tcp_pcb_listen for which a segment arrived
+ *
+ * @note the segment which arrived is saved in global variables, therefore only the pcb
+ * involved is passed as a parameter to this function
+ */
+static void
+tcp_listen_input(struct tcp_pcb_listen *pcb)
+{
+ struct tcp_pcb *npcb;
+ u32_t iss;
+ err_t rc;
+
+ if (flags & TCP_RST) {
+ /* An incoming RST should be ignored. Return. */
+ return;
+ }
+
+ LWIP_ASSERT("tcp_listen_input: invalid pcb", pcb != NULL);
+
+ /* In the LISTEN state, we check for incoming SYN segments,
+ creates a new PCB, and responds with a SYN|ACK. */
+ if (flags & TCP_ACK) {
+ /* For incoming segments with the ACK flag set, respond with a
+ RST. */
+ LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n"));
+ tcp_rst((const struct tcp_pcb *)pcb, ackno, seqno + tcplen, ip_current_dest_addr(),
+ ip_current_src_addr(), tcphdr->dest, tcphdr->src);
+ } else if (flags & TCP_SYN) {
+ LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest));
+#if TCP_LISTEN_BACKLOG
+ if (pcb->accepts_pending >= pcb->backlog) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest));
+ return;
+ }
+#endif /* TCP_LISTEN_BACKLOG */
+ npcb = tcp_alloc(pcb->prio);
+ /* If a new PCB could not be created (probably due to lack of memory),
+ we don't do anything, but rely on the sender will retransmit the
+ SYN at a time when we have more memory available. */
+ if (npcb == NULL) {
+ err_t err;
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n"));
+ TCP_STATS_INC(tcp.memerr);
+ TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err);
+ LWIP_UNUSED_ARG(err); /* err not useful here */
+ return;
+ }
+#if TCP_LISTEN_BACKLOG
+ pcb->accepts_pending++;
+ tcp_set_flags(npcb, TF_BACKLOGPEND);
+#endif /* TCP_LISTEN_BACKLOG */
+ /* Set up the new PCB. */
+ ip_addr_copy(npcb->local_ip, *ip_current_dest_addr());
+ ip_addr_copy(npcb->remote_ip, *ip_current_src_addr());
+ npcb->local_port = pcb->local_port;
+ npcb->remote_port = tcphdr->src;
+ npcb->state = SYN_RCVD;
+ npcb->rcv_nxt = seqno + 1;
+ npcb->rcv_ann_right_edge = npcb->rcv_nxt;
+ iss = tcp_next_iss(npcb);
+ npcb->snd_wl2 = iss;
+ npcb->snd_nxt = iss;
+ npcb->lastack = iss;
+ npcb->snd_lbb = iss;
+ npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */
+ npcb->callback_arg = pcb->callback_arg;
+#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
+ npcb->listener = pcb;
+#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */
+ /* inherit socket options */
+ npcb->so_options = pcb->so_options & SOF_INHERITED;
+ npcb->netif_idx = pcb->netif_idx;
+ /* Register the new PCB so that we can begin receiving segments
+ for it. */
+ TCP_REG_ACTIVE(npcb);
+
+ /* Parse any options in the SYN. */
+ tcp_parseopt(npcb);
+ npcb->snd_wnd = tcphdr->wnd;
+ npcb->snd_wnd_max = npcb->snd_wnd;
+
+#if TCP_CALCULATE_EFF_SEND_MSS
+ npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip);
+#endif /* TCP_CALCULATE_EFF_SEND_MSS */
+
+ MIB2_STATS_INC(mib2.tcppassiveopens);
+
+#if LWIP_TCP_PCB_NUM_EXT_ARGS
+ if (tcp_ext_arg_invoke_callbacks_passive_open(pcb, npcb) != ERR_OK) {
+ tcp_abandon(npcb, 0);
+ return;
+ }
+#endif
+
+ /* Send a SYN|ACK together with the MSS option. */
+ rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK);
+ if (rc != ERR_OK) {
+ tcp_abandon(npcb, 0);
+ return;
+ }
+ tcp_output(npcb);
+ }
+ return;
+}
+
+/**
+ * Called by tcp_input() when a segment arrives for a connection in
+ * TIME_WAIT.
+ *
+ * @param pcb the tcp_pcb for which a segment arrived
+ *
+ * @note the segment which arrived is saved in global variables, therefore only the pcb
+ * involved is passed as a parameter to this function
+ */
+static void
+tcp_timewait_input(struct tcp_pcb *pcb)
+{
+ /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */
+ /* RFC 793 3.9 Event Processing - Segment Arrives:
+ * - first check sequence number - we skip that one in TIME_WAIT (always
+ * acceptable since we only send ACKs)
+ * - second check the RST bit (... return) */
+ if (flags & TCP_RST) {
+ return;
+ }
+
+ LWIP_ASSERT("tcp_timewait_input: invalid pcb", pcb != NULL);
+
+ /* - fourth, check the SYN bit, */
+ if (flags & TCP_SYN) {
+ /* If an incoming segment is not acceptable, an acknowledgment
+ should be sent in reply */
+ if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) {
+ /* If the SYN is in the window it is an error, send a reset */
+ tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(),
+ ip_current_src_addr(), tcphdr->dest, tcphdr->src);
+ return;
+ }
+ } else if (flags & TCP_FIN) {
+ /* - eighth, check the FIN bit: Remain in the TIME-WAIT state.
+ Restart the 2 MSL time-wait timeout.*/
+ pcb->tmr = tcp_ticks;
+ }
+
+ if ((tcplen > 0)) {
+ /* Acknowledge data, FIN or out-of-window SYN */
+ tcp_ack_now(pcb);
+ tcp_output(pcb);
+ }
+ return;
+}
+
+/**
+ * Implements the TCP state machine. Called by tcp_input. In some
+ * states tcp_receive() is called to receive data. The tcp_seg
+ * argument will be freed by the caller (tcp_input()) unless the
+ * recv_data pointer in the pcb is set.
+ *
+ * @param pcb the tcp_pcb for which a segment arrived
+ *
+ * @note the segment which arrived is saved in global variables, therefore only the pcb
+ * involved is passed as a parameter to this function
+ */
+static err_t
+tcp_process(struct tcp_pcb *pcb)
+{
+ struct tcp_seg *rseg;
+ u8_t acceptable = 0;
+ err_t err;
+
+ err = ERR_OK;
+
+ LWIP_ASSERT("tcp_process: invalid pcb", pcb != NULL);
+
+ /* Process incoming RST segments. */
+ if (flags & TCP_RST) {
+ /* First, determine if the reset is acceptable. */
+ if (pcb->state == SYN_SENT) {
+ /* "In the SYN-SENT state (a RST received in response to an initial SYN),
+ the RST is acceptable if the ACK field acknowledges the SYN." */
+ if (ackno == pcb->snd_nxt) {
+ acceptable = 1;
+ }
+ } else {
+ /* "In all states except SYN-SENT, all reset (RST) segments are validated
+ by checking their SEQ-fields." */
+ if (seqno == pcb->rcv_nxt) {
+ acceptable = 1;
+ } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt,
+ pcb->rcv_nxt + pcb->rcv_wnd)) {
+ /* If the sequence number is inside the window, we send a challenge ACK
+ and wait for a re-send with matching sequence number.
+ This follows RFC 5961 section 3.2 and addresses CVE-2004-0230
+ (RST spoofing attack), which is present in RFC 793 RST handling. */
+ tcp_ack_now(pcb);
+ }
+ }
+
+ if (acceptable) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n"));
+ LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED);
+ recv_flags |= TF_RESET;
+ tcp_clear_flags(pcb, TF_ACK_DELAY);
+ return ERR_RST;
+ } else {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n",
+ seqno, pcb->rcv_nxt));
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n",
+ seqno, pcb->rcv_nxt));
+ return ERR_OK;
+ }
+ }
+
+ if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) {
+ /* Cope with new connection attempt after remote end crashed */
+ tcp_ack_now(pcb);
+ return ERR_OK;
+ }
+
+ if ((pcb->flags & TF_RXCLOSED) == 0) {
+ /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */
+ pcb->tmr = tcp_ticks;
+ }
+ pcb->keep_cnt_sent = 0;
+ pcb->persist_probe = 0;
+
+ tcp_parseopt(pcb);
+
+ /* Do different things depending on the TCP state. */
+ switch (pcb->state) {
+ case SYN_SENT:
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno,
+ pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno)));
+ /* received SYN ACK with expected sequence number? */
+ if ((flags & TCP_ACK) && (flags & TCP_SYN)
+ && (ackno == pcb->lastack + 1)) {
+ pcb->rcv_nxt = seqno + 1;
+ pcb->rcv_ann_right_edge = pcb->rcv_nxt;
+ pcb->lastack = ackno;
+ pcb->snd_wnd = tcphdr->wnd;
+ pcb->snd_wnd_max = pcb->snd_wnd;
+ pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */
+ pcb->state = ESTABLISHED;
+
+#if TCP_CALCULATE_EFF_SEND_MSS
+ pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip);
+#endif /* TCP_CALCULATE_EFF_SEND_MSS */
+
+ pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss);
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F
+ " ssthresh %"TCPWNDSIZE_F"\n",
+ pcb->cwnd, pcb->ssthresh));
+ LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0));
+ --pcb->snd_queuelen;
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen));
+ rseg = pcb->unacked;
+ if (rseg == NULL) {
+ /* might happen if tcp_output fails in tcp_rexmit_rto()
+ in which case the segment is on the unsent list */
+ rseg = pcb->unsent;
+ LWIP_ASSERT("no segment to free", rseg != NULL);
+ pcb->unsent = rseg->next;
+ } else {
+ pcb->unacked = rseg->next;
+ }
+ tcp_seg_free(rseg);
+
+ /* If there's nothing left to acknowledge, stop the retransmit
+ timer, otherwise reset it to start again */
+ if (pcb->unacked == NULL) {
+ pcb->rtime = -1;
+ } else {
+ pcb->rtime = 0;
+ pcb->nrtx = 0;
+ }
+
+ /* Call the user specified function to call when successfully
+ * connected. */
+ TCP_EVENT_CONNECTED(pcb, ERR_OK, err);
+ if (err == ERR_ABRT) {
+ return ERR_ABRT;
+ }
+ tcp_ack_now(pcb);
+ }
+ /* received ACK? possibly a half-open connection */
+ else if (flags & TCP_ACK) {
+ /* send a RST to bring the other side in a non-synchronized state. */
+ tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(),
+ ip_current_src_addr(), tcphdr->dest, tcphdr->src);
+ /* Resend SYN immediately (don't wait for rto timeout) to establish
+ connection faster, but do not send more SYNs than we otherwise would
+ have, or we might get caught in a loop on loopback interfaces. */
+ if (pcb->nrtx < TCP_SYNMAXRTX) {
+ pcb->rtime = 0;
+ tcp_rexmit_rto(pcb);
+ }
+ }
+ break;
+ case SYN_RCVD:
+ if (flags & TCP_ACK) {
+ /* expected ACK number? */
+ if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) {
+ pcb->state = ESTABLISHED;
+ LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest));
+#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
+ if (pcb->listener == NULL) {
+ /* listen pcb might be closed by now */
+ err = ERR_VAL;
+ } else
+#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */
+ {
+#if LWIP_CALLBACK_API
+ LWIP_ASSERT("pcb->listener->accept != NULL", pcb->listener->accept != NULL);
+#endif
+ tcp_backlog_accepted(pcb);
+ /* Call the accept function. */
+ TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err);
+ }
+ if (err != ERR_OK) {
+ /* If the accept function returns with an error, we abort
+ * the connection. */
+ /* Already aborted? */
+ if (err != ERR_ABRT) {
+ tcp_abort(pcb);
+ }
+ return ERR_ABRT;
+ }
+ /* If there was any data contained within this ACK,
+ * we'd better pass it on to the application as well. */
+ tcp_receive(pcb);
+
+ /* Prevent ACK for SYN to generate a sent event */
+ if (recv_acked != 0) {
+ recv_acked--;
+ }
+
+ pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss);
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F
+ " ssthresh %"TCPWNDSIZE_F"\n",
+ pcb->cwnd, pcb->ssthresh));
+
+ if (recv_flags & TF_GOT_FIN) {
+ tcp_ack_now(pcb);
+ pcb->state = CLOSE_WAIT;
+ }
+ } else {
+ /* incorrect ACK number, send RST */
+ tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(),
+ ip_current_src_addr(), tcphdr->dest, tcphdr->src);
+ }
+ } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) {
+ /* Looks like another copy of the SYN - retransmit our SYN-ACK */
+ tcp_rexmit(pcb);
+ }
+ break;
+ case CLOSE_WAIT:
+ /* FALLTHROUGH */
+ case ESTABLISHED:
+ tcp_receive(pcb);
+ if (recv_flags & TF_GOT_FIN) { /* passive close */
+ tcp_ack_now(pcb);
+ pcb->state = CLOSE_WAIT;
+ }
+ break;
+ case FIN_WAIT_1:
+ tcp_receive(pcb);
+ if (recv_flags & TF_GOT_FIN) {
+ if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) &&
+ pcb->unsent == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG,
+ ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest));
+ tcp_ack_now(pcb);
+ tcp_pcb_purge(pcb);
+ TCP_RMV_ACTIVE(pcb);
+ pcb->state = TIME_WAIT;
+ TCP_REG(&tcp_tw_pcbs, pcb);
+ } else {
+ tcp_ack_now(pcb);
+ pcb->state = CLOSING;
+ }
+ } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) &&
+ pcb->unsent == NULL) {
+ pcb->state = FIN_WAIT_2;
+ }
+ break;
+ case FIN_WAIT_2:
+ tcp_receive(pcb);
+ if (recv_flags & TF_GOT_FIN) {
+ LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest));
+ tcp_ack_now(pcb);
+ tcp_pcb_purge(pcb);
+ TCP_RMV_ACTIVE(pcb);
+ pcb->state = TIME_WAIT;
+ TCP_REG(&tcp_tw_pcbs, pcb);
+ }
+ break;
+ case CLOSING:
+ tcp_receive(pcb);
+ if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest));
+ tcp_pcb_purge(pcb);
+ TCP_RMV_ACTIVE(pcb);
+ pcb->state = TIME_WAIT;
+ TCP_REG(&tcp_tw_pcbs, pcb);
+ }
+ break;
+ case LAST_ACK:
+ tcp_receive(pcb);
+ if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest));
+ /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */
+ recv_flags |= TF_CLOSED;
+ }
+ break;
+ default:
+ break;
+ }
+ return ERR_OK;
+}
+
+#if TCP_QUEUE_OOSEQ
+/**
+ * Insert segment into the list (segments covered with new one will be deleted)
+ *
+ * Called from tcp_receive()
+ */
+static void
+tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next)
+{
+ struct tcp_seg *old_seg;
+
+ LWIP_ASSERT("tcp_oos_insert_segment: invalid cseg", cseg != NULL);
+
+ if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) {
+ /* received segment overlaps all following segments */
+ tcp_segs_free(next);
+ next = NULL;
+ } else {
+ /* delete some following segments
+ oos queue may have segments with FIN flag */
+ while (next &&
+ TCP_SEQ_GEQ((seqno + cseg->len),
+ (next->tcphdr->seqno + next->len))) {
+ /* cseg with FIN already processed */
+ if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) {
+ TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN);
+ }
+ old_seg = next;
+ next = next->next;
+ tcp_seg_free(old_seg);
+ }
+ if (next &&
+ TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) {
+ /* We need to trim the incoming segment. */
+ cseg->len = (u16_t)(next->tcphdr->seqno - seqno);
+ pbuf_realloc(cseg->p, cseg->len);
+ }
+ }
+ cseg->next = next;
+}
+#endif /* TCP_QUEUE_OOSEQ */
+
+/** Remove segments from a list if the incoming ACK acknowledges them */
+static struct tcp_seg *
+tcp_free_acked_segments(struct tcp_pcb *pcb, struct tcp_seg *seg_list, const char *dbg_list_name,
+ struct tcp_seg *dbg_other_seg_list)
+{
+ struct tcp_seg *next;
+ u16_t clen;
+
+ LWIP_UNUSED_ARG(dbg_list_name);
+ LWIP_UNUSED_ARG(dbg_other_seg_list);
+
+ while (seg_list != NULL &&
+ TCP_SEQ_LEQ(lwip_ntohl(seg_list->tcphdr->seqno) +
+ TCP_TCPLEN(seg_list), ackno)) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->%s\n",
+ lwip_ntohl(seg_list->tcphdr->seqno),
+ lwip_ntohl(seg_list->tcphdr->seqno) + TCP_TCPLEN(seg_list),
+ dbg_list_name));
+
+ next = seg_list;
+ seg_list = seg_list->next;
+
+ clen = pbuf_clen(next->p);
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ",
+ (tcpwnd_size_t)pcb->snd_queuelen));
+ LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= clen));
+
+ pcb->snd_queuelen = (u16_t)(pcb->snd_queuelen - clen);
+ recv_acked = (tcpwnd_size_t)(recv_acked + next->len);
+ tcp_seg_free(next);
+
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing %s)\n",
+ (tcpwnd_size_t)pcb->snd_queuelen,
+ dbg_list_name));
+ if (pcb->snd_queuelen != 0) {
+ LWIP_ASSERT("tcp_receive: valid queue length",
+ seg_list != NULL || dbg_other_seg_list != NULL);
+ }
+ }
+ return seg_list;
+}
+
+/**
+ * Called by tcp_process. Checks if the given segment is an ACK for outstanding
+ * data, and if so frees the memory of the buffered data. Next, it places the
+ * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment
+ * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until
+ * it has been removed from the buffer.
+ *
+ * If the incoming segment constitutes an ACK for a segment that was used for RTT
+ * estimation, the RTT is estimated here as well.
+ *
+ * Called from tcp_process().
+ */
+static void
+tcp_receive(struct tcp_pcb *pcb)
+{
+ s16_t m;
+ u32_t right_wnd_edge;
+ int found_dupack = 0;
+
+ LWIP_ASSERT("tcp_receive: invalid pcb", pcb != NULL);
+ LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED);
+
+ if (flags & TCP_ACK) {
+ right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2;
+
+ /* Update window. */
+ if (TCP_SEQ_LT(pcb->snd_wl1, seqno) ||
+ (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) ||
+ (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) {
+ pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd);
+ /* keep track of the biggest window announced by the remote host to calculate
+ the maximum segment size */
+ if (pcb->snd_wnd_max < pcb->snd_wnd) {
+ pcb->snd_wnd_max = pcb->snd_wnd;
+ }
+ pcb->snd_wl1 = seqno;
+ pcb->snd_wl2 = ackno;
+ LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd));
+#if TCP_WND_DEBUG
+ } else {
+ if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) {
+ LWIP_DEBUGF(TCP_WND_DEBUG,
+ ("tcp_receive: no window update lastack %"U32_F" ackno %"
+ U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n",
+ pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2));
+ }
+#endif /* TCP_WND_DEBUG */
+ }
+
+ /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a
+ * duplicate ack if:
+ * 1) It doesn't ACK new data
+ * 2) length of received packet is zero (i.e. no payload)
+ * 3) the advertised window hasn't changed
+ * 4) There is outstanding unacknowledged data (retransmission timer running)
+ * 5) The ACK is == biggest ACK sequence number so far seen (snd_una)
+ *
+ * If it passes all five, should process as a dupack:
+ * a) dupacks < 3: do nothing
+ * b) dupacks == 3: fast retransmit
+ * c) dupacks > 3: increase cwnd
+ *
+ * If it only passes 1-3, should reset dupack counter (and add to
+ * stats, which we don't do in lwIP)
+ *
+ * If it only passes 1, should reset dupack counter
+ *
+ */
+
+ /* Clause 1 */
+ if (TCP_SEQ_LEQ(ackno, pcb->lastack)) {
+ /* Clause 2 */
+ if (tcplen == 0) {
+ /* Clause 3 */
+ if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) {
+ /* Clause 4 */
+ if (pcb->rtime >= 0) {
+ /* Clause 5 */
+ if (pcb->lastack == ackno) {
+ found_dupack = 1;
+ if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) {
+ ++pcb->dupacks;
+ }
+ if (pcb->dupacks > 3) {
+ /* Inflate the congestion window */
+ TCP_WND_INC(pcb->cwnd, pcb->mss);
+ }
+ if (pcb->dupacks >= 3) {
+ /* Do fast retransmit (checked via TF_INFR, not via dupacks count) */
+ tcp_rexmit_fast(pcb);
+ }
+ }
+ }
+ }
+ }
+ /* If Clause (1) or more is true, but not a duplicate ack, reset
+ * count of consecutive duplicate acks */
+ if (!found_dupack) {
+ pcb->dupacks = 0;
+ }
+ } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) {
+ /* We come here when the ACK acknowledges new data. */
+ tcpwnd_size_t acked;
+
+ /* Reset the "IN Fast Retransmit" flag, since we are no longer
+ in fast retransmit. Also reset the congestion window to the
+ slow start threshold. */
+ if (pcb->flags & TF_INFR) {
+ tcp_clear_flags(pcb, TF_INFR);
+ pcb->cwnd = pcb->ssthresh;
+ pcb->bytes_acked = 0;
+ }
+
+ /* Reset the number of retransmissions. */
+ pcb->nrtx = 0;
+
+ /* Reset the retransmission time-out. */
+ pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv);
+
+ /* Record how much data this ACK acks */
+ acked = (tcpwnd_size_t)(ackno - pcb->lastack);
+
+ /* Reset the fast retransmit variables. */
+ pcb->dupacks = 0;
+ pcb->lastack = ackno;
+
+ /* Update the congestion control variables (cwnd and
+ ssthresh). */
+ if (pcb->state >= ESTABLISHED) {
+ if (pcb->cwnd < pcb->ssthresh) {
+ tcpwnd_size_t increase;
+ /* limit to 1 SMSS segment during period following RTO */
+ u8_t num_seg = (pcb->flags & TF_RTO) ? 1 : 2;
+ /* RFC 3465, section 2.2 Slow Start */
+ increase = LWIP_MIN(acked, (tcpwnd_size_t)(num_seg * pcb->mss));
+ TCP_WND_INC(pcb->cwnd, increase);
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd));
+ } else {
+ /* RFC 3465, section 2.1 Congestion Avoidance */
+ TCP_WND_INC(pcb->bytes_acked, acked);
+ if (pcb->bytes_acked >= pcb->cwnd) {
+ pcb->bytes_acked = (tcpwnd_size_t)(pcb->bytes_acked - pcb->cwnd);
+ TCP_WND_INC(pcb->cwnd, pcb->mss);
+ }
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd));
+ }
+ }
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n",
+ ackno,
+ pcb->unacked != NULL ?
+ lwip_ntohl(pcb->unacked->tcphdr->seqno) : 0,
+ pcb->unacked != NULL ?
+ lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked) : 0));
+
+ /* Remove segment from the unacknowledged list if the incoming
+ ACK acknowledges them. */
+ pcb->unacked = tcp_free_acked_segments(pcb, pcb->unacked, "unacked", pcb->unsent);
+ /* We go through the ->unsent list to see if any of the segments
+ on the list are acknowledged by the ACK. This may seem
+ strange since an "unsent" segment shouldn't be acked. The
+ rationale is that lwIP puts all outstanding segments on the
+ ->unsent list after a retransmission, so these segments may
+ in fact have been sent once. */
+ pcb->unsent = tcp_free_acked_segments(pcb, pcb->unsent, "unsent", pcb->unacked);
+
+ /* If there's nothing left to acknowledge, stop the retransmit
+ timer, otherwise reset it to start again */
+ if (pcb->unacked == NULL) {
+ pcb->rtime = -1;
+ } else {
+ pcb->rtime = 0;
+ }
+
+ pcb->polltmr = 0;
+
+#if TCP_OVERSIZE
+ if (pcb->unsent == NULL) {
+ pcb->unsent_oversize = 0;
+ }
+#endif /* TCP_OVERSIZE */
+
+#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS
+ if (ip_current_is_v6()) {
+ /* Inform neighbor reachability of forward progress. */
+ nd6_reachability_hint(ip6_current_src_addr());
+ }
+#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/
+
+ pcb->snd_buf = (tcpwnd_size_t)(pcb->snd_buf + recv_acked);
+ /* check if this ACK ends our retransmission of in-flight data */
+ if (pcb->flags & TF_RTO) {
+ /* RTO is done if
+ 1) both queues are empty or
+ 2) unacked is empty and unsent head contains data not part of RTO or
+ 3) unacked head contains data not part of RTO */
+ if (pcb->unacked == NULL) {
+ if ((pcb->unsent == NULL) ||
+ (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unsent->tcphdr->seqno)))) {
+ tcp_clear_flags(pcb, TF_RTO);
+ }
+ } else if (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unacked->tcphdr->seqno))) {
+ tcp_clear_flags(pcb, TF_RTO);
+ }
+ }
+ /* End of ACK for new data processing. */
+ } else {
+ /* Out of sequence ACK, didn't really ack anything */
+ tcp_send_empty_ack(pcb);
+ }
+
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n",
+ pcb->rttest, pcb->rtseq, ackno));
+
+ /* RTT estimation calculations. This is done by checking if the
+ incoming segment acknowledges the segment we use to take a
+ round-trip time measurement. */
+ if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) {
+ /* diff between this shouldn't exceed 32K since this are tcp timer ticks
+ and a round-trip shouldn't be that long... */
+ m = (s16_t)(tcp_ticks - pcb->rttest);
+
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n",
+ m, (u16_t)(m * TCP_SLOW_INTERVAL)));
+
+ /* This is taken directly from VJs original code in his paper */
+ m = (s16_t)(m - (pcb->sa >> 3));
+ pcb->sa = (s16_t)(pcb->sa + m);
+ if (m < 0) {
+ m = (s16_t) - m;
+ }
+ m = (s16_t)(m - (pcb->sv >> 2));
+ pcb->sv = (s16_t)(pcb->sv + m);
+ pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv);
+
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n",
+ pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL)));
+
+ pcb->rttest = 0;
+ }
+ }
+
+ /* If the incoming segment contains data, we must process it
+ further unless the pcb already received a FIN.
+ (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING,
+ LAST-ACK and TIME-WAIT: "Ignore the segment text.") */
+ if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) {
+ /* This code basically does three things:
+
+ +) If the incoming segment contains data that is the next
+ in-sequence data, this data is passed to the application. This
+ might involve trimming the first edge of the data. The rcv_nxt
+ variable and the advertised window are adjusted.
+
+ +) If the incoming segment has data that is above the next
+ sequence number expected (->rcv_nxt), the segment is placed on
+ the ->ooseq queue. This is done by finding the appropriate
+ place in the ->ooseq queue (which is ordered by sequence
+ number) and trim the segment in both ends if needed. An
+ immediate ACK is sent to indicate that we received an
+ out-of-sequence segment.
+
+ +) Finally, we check if the first segment on the ->ooseq queue
+ now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If
+ rcv_nxt > ooseq->seqno, we must trim the first edge of the
+ segment on ->ooseq before we adjust rcv_nxt. The data in the
+ segments that are now on sequence are chained onto the
+ incoming segment so that we only need to call the application
+ once.
+ */
+
+ /* First, we check if we must trim the first edge. We have to do
+ this if the sequence number of the incoming segment is less
+ than rcv_nxt, and the sequence number plus the length of the
+ segment is larger than rcv_nxt. */
+ /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) {
+ if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/
+ if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) {
+ /* Trimming the first edge is done by pushing the payload
+ pointer in the pbuf downwards. This is somewhat tricky since
+ we do not want to discard the full contents of the pbuf up to
+ the new starting point of the data since we have to keep the
+ TCP header which is present in the first pbuf in the chain.
+
+ What is done is really quite a nasty hack: the first pbuf in
+ the pbuf chain is pointed to by inseg.p. Since we need to be
+ able to deallocate the whole pbuf, we cannot change this
+ inseg.p pointer to point to any of the later pbufs in the
+ chain. Instead, we point the ->payload pointer in the first
+ pbuf to data in one of the later pbufs. We also set the
+ inseg.data pointer to point to the right place. This way, the
+ ->p pointer will still point to the first pbuf, but the
+ ->p->payload pointer will point to data in another pbuf.
+
+ After we are done with adjusting the pbuf pointers we must
+ adjust the ->data pointer in the seg and the segment
+ length.*/
+
+ struct pbuf *p = inseg.p;
+ u32_t off32 = pcb->rcv_nxt - seqno;
+ u16_t new_tot_len, off;
+ LWIP_ASSERT("inseg.p != NULL", inseg.p);
+ LWIP_ASSERT("insane offset!", (off32 < 0xffff));
+ off = (u16_t)off32;
+ LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off));
+ inseg.len -= off;
+ new_tot_len = (u16_t)(inseg.p->tot_len - off);
+ while (p->len < off) {
+ off -= p->len;
+ /* all pbufs up to and including this one have len==0, so tot_len is equal */
+ p->tot_len = new_tot_len;
+ p->len = 0;
+ p = p->next;
+ }
+ /* cannot fail... */
+ pbuf_remove_header(p, off);
+ inseg.tcphdr->seqno = seqno = pcb->rcv_nxt;
+ } else {
+ if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) {
+ /* the whole segment is < rcv_nxt */
+ /* must be a duplicate of a packet that has already been correctly handled */
+
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno));
+ tcp_ack_now(pcb);
+ }
+ }
+
+ /* The sequence number must be within the window (above rcv_nxt
+ and below rcv_nxt + rcv_wnd) in order to be further
+ processed. */
+ if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt,
+ pcb->rcv_nxt + pcb->rcv_wnd - 1)) {
+ if (pcb->rcv_nxt == seqno) {
+ /* The incoming segment is the next in sequence. We check if
+ we have to trim the end of the segment and update rcv_nxt
+ and pass the data to the application. */
+ tcplen = TCP_TCPLEN(&inseg);
+
+ if (tcplen > pcb->rcv_wnd) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG,
+ ("tcp_receive: other end overran receive window"
+ "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n",
+ seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd));
+ if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) {
+ /* Must remove the FIN from the header as we're trimming
+ * that byte of sequence-space from the packet */
+ TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN);
+ }
+ /* Adjust length of segment to fit in the window. */
+ TCPWND_CHECK16(pcb->rcv_wnd);
+ inseg.len = (u16_t)pcb->rcv_wnd;
+ if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) {
+ inseg.len -= 1;
+ }
+ pbuf_realloc(inseg.p, inseg.len);
+ tcplen = TCP_TCPLEN(&inseg);
+ LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n",
+ (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd));
+ }
+#if TCP_QUEUE_OOSEQ
+ /* Received in-sequence data, adjust ooseq data if:
+ - FIN has been received or
+ - inseq overlaps with ooseq */
+ if (pcb->ooseq != NULL) {
+ if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG,
+ ("tcp_receive: received in-order FIN, binning ooseq queue\n"));
+ /* Received in-order FIN means anything that was received
+ * out of order must now have been received in-order, so
+ * bin the ooseq queue */
+ while (pcb->ooseq != NULL) {
+ struct tcp_seg *old_ooseq = pcb->ooseq;
+ pcb->ooseq = pcb->ooseq->next;
+ tcp_seg_free(old_ooseq);
+ }
+ } else {
+ struct tcp_seg *next = pcb->ooseq;
+ /* Remove all segments on ooseq that are covered by inseg already.
+ * FIN is copied from ooseq to inseg if present. */
+ while (next &&
+ TCP_SEQ_GEQ(seqno + tcplen,
+ next->tcphdr->seqno + next->len)) {
+ struct tcp_seg *tmp;
+ /* inseg cannot have FIN here (already processed above) */
+ if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 &&
+ (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) {
+ TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN);
+ tcplen = TCP_TCPLEN(&inseg);
+ }
+ tmp = next;
+ next = next->next;
+ tcp_seg_free(tmp);
+ }
+ /* Now trim right side of inseg if it overlaps with the first
+ * segment on ooseq */
+ if (next &&
+ TCP_SEQ_GT(seqno + tcplen,
+ next->tcphdr->seqno)) {
+ /* inseg cannot have FIN here (already processed above) */
+ inseg.len = (u16_t)(next->tcphdr->seqno - seqno);
+ if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) {
+ inseg.len -= 1;
+ }
+ pbuf_realloc(inseg.p, inseg.len);
+ tcplen = TCP_TCPLEN(&inseg);
+ LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n",
+ (seqno + tcplen) == next->tcphdr->seqno);
+ }
+ pcb->ooseq = next;
+ }
+ }
+#endif /* TCP_QUEUE_OOSEQ */
+
+ pcb->rcv_nxt = seqno + tcplen;
+
+ /* Update the receiver's (our) window. */
+ LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen);
+ pcb->rcv_wnd -= tcplen;
+
+ tcp_update_rcv_ann_wnd(pcb);
+
+ /* If there is data in the segment, we make preparations to
+ pass this up to the application. The ->recv_data variable
+ is used for holding the pbuf that goes to the
+ application. The code for reassembling out-of-sequence data
+ chains its data on this pbuf as well.
+
+ If the segment was a FIN, we set the TF_GOT_FIN flag that will
+ be used to indicate to the application that the remote side has
+ closed its end of the connection. */
+ if (inseg.p->tot_len > 0) {
+ recv_data = inseg.p;
+ /* Since this pbuf now is the responsibility of the
+ application, we delete our reference to it so that we won't
+ (mistakingly) deallocate it. */
+ inseg.p = NULL;
+ }
+ if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n"));
+ recv_flags |= TF_GOT_FIN;
+ }
+
+#if TCP_QUEUE_OOSEQ
+ /* We now check if we have segments on the ->ooseq queue that
+ are now in sequence. */
+ while (pcb->ooseq != NULL &&
+ pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) {
+
+ struct tcp_seg *cseg = pcb->ooseq;
+ seqno = pcb->ooseq->tcphdr->seqno;
+
+ pcb->rcv_nxt += TCP_TCPLEN(cseg);
+ LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n",
+ pcb->rcv_wnd >= TCP_TCPLEN(cseg));
+ pcb->rcv_wnd -= TCP_TCPLEN(cseg);
+
+ tcp_update_rcv_ann_wnd(pcb);
+
+ if (cseg->p->tot_len > 0) {
+ /* Chain this pbuf onto the pbuf that we will pass to
+ the application. */
+ /* With window scaling, this can overflow recv_data->tot_len, but
+ that's not a problem since we explicitly fix that before passing
+ recv_data to the application. */
+ if (recv_data) {
+ pbuf_cat(recv_data, cseg->p);
+ } else {
+ recv_data = cseg->p;
+ }
+ cseg->p = NULL;
+ }
+ if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n"));
+ recv_flags |= TF_GOT_FIN;
+ if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */
+ pcb->state = CLOSE_WAIT;
+ }
+ }
+
+ pcb->ooseq = cseg->next;
+ tcp_seg_free(cseg);
+ }
+#if LWIP_TCP_SACK_OUT
+ if (pcb->flags & TF_SACK) {
+ if (pcb->ooseq != NULL) {
+ /* Some segments may have been removed from ooseq, let's remove all SACKs that
+ describe anything before the new beginning of that list. */
+ tcp_remove_sacks_lt(pcb, pcb->ooseq->tcphdr->seqno);
+ } else if (LWIP_TCP_SACK_VALID(pcb, 0)) {
+ /* ooseq has been cleared. Nothing to SACK */
+ memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks));
+ }
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+#endif /* TCP_QUEUE_OOSEQ */
+
+
+ /* Acknowledge the segment(s). */
+ tcp_ack(pcb);
+
+#if LWIP_TCP_SACK_OUT
+ if (LWIP_TCP_SACK_VALID(pcb, 0)) {
+ /* Normally the ACK for the data received could be piggy-backed on a data packet,
+ but lwIP currently does not support including SACKs in data packets. So we force
+ it to respond with an empty ACK packet (only if there is at least one SACK to be sent).
+ NOTE: tcp_send_empty_ack() on success clears the ACK flags (set by tcp_ack()) */
+ tcp_send_empty_ack(pcb);
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+
+#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS
+ if (ip_current_is_v6()) {
+ /* Inform neighbor reachability of forward progress. */
+ nd6_reachability_hint(ip6_current_src_addr());
+ }
+#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/
+
+ } else {
+ /* We get here if the incoming segment is out-of-sequence. */
+
+#if TCP_QUEUE_OOSEQ
+ /* We queue the segment on the ->ooseq queue. */
+ if (pcb->ooseq == NULL) {
+ pcb->ooseq = tcp_seg_copy(&inseg);
+#if LWIP_TCP_SACK_OUT
+ if (pcb->flags & TF_SACK) {
+ /* All the SACKs should be invalid, so we can simply store the most recent one: */
+ pcb->rcv_sacks[0].left = seqno;
+ pcb->rcv_sacks[0].right = seqno + inseg.len;
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+ } else {
+ /* If the queue is not empty, we walk through the queue and
+ try to find a place where the sequence number of the
+ incoming segment is between the sequence numbers of the
+ previous and the next segment on the ->ooseq queue. That is
+ the place where we put the incoming segment. If needed, we
+ trim the second edges of the previous and the incoming
+ segment so that it will fit into the sequence.
+
+ If the incoming segment has the same sequence number as a
+ segment on the ->ooseq queue, we discard the segment that
+ contains less data. */
+
+#if LWIP_TCP_SACK_OUT
+ /* This is the left edge of the lowest possible SACK range.
+ It may start before the newly received segment (possibly adjusted below). */
+ u32_t sackbeg = TCP_SEQ_LT(seqno, pcb->ooseq->tcphdr->seqno) ? seqno : pcb->ooseq->tcphdr->seqno;
+#endif /* LWIP_TCP_SACK_OUT */
+ struct tcp_seg *next, *prev = NULL;
+ for (next = pcb->ooseq; next != NULL; next = next->next) {
+ if (seqno == next->tcphdr->seqno) {
+ /* The sequence number of the incoming segment is the
+ same as the sequence number of the segment on
+ ->ooseq. We check the lengths to see which one to
+ discard. */
+ if (inseg.len > next->len) {
+ /* The incoming segment is larger than the old
+ segment. We replace some segments with the new
+ one. */
+ struct tcp_seg *cseg = tcp_seg_copy(&inseg);
+ if (cseg != NULL) {
+ if (prev != NULL) {
+ prev->next = cseg;
+ } else {
+ pcb->ooseq = cseg;
+ }
+ tcp_oos_insert_segment(cseg, next);
+ }
+ break;
+ } else {
+ /* Either the lengths are the same or the incoming
+ segment was smaller than the old one; in either
+ case, we ditch the incoming segment. */
+ break;
+ }
+ } else {
+ if (prev == NULL) {
+ if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {
+ /* The sequence number of the incoming segment is lower
+ than the sequence number of the first segment on the
+ queue. We put the incoming segment first on the
+ queue. */
+ struct tcp_seg *cseg = tcp_seg_copy(&inseg);
+ if (cseg != NULL) {
+ pcb->ooseq = cseg;
+ tcp_oos_insert_segment(cseg, next);
+ }
+ break;
+ }
+ } else {
+ /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) &&
+ TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/
+ if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno + 1, next->tcphdr->seqno - 1)) {
+ /* The sequence number of the incoming segment is in
+ between the sequence numbers of the previous and
+ the next segment on ->ooseq. We trim trim the previous
+ segment, delete next segments that included in received segment
+ and trim received, if needed. */
+ struct tcp_seg *cseg = tcp_seg_copy(&inseg);
+ if (cseg != NULL) {
+ if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) {
+ /* We need to trim the prev segment. */
+ prev->len = (u16_t)(seqno - prev->tcphdr->seqno);
+ pbuf_realloc(prev->p, prev->len);
+ }
+ prev->next = cseg;
+ tcp_oos_insert_segment(cseg, next);
+ }
+ break;
+ }
+ }
+
+#if LWIP_TCP_SACK_OUT
+ /* The new segment goes after the 'next' one. If there is a "hole" in sequence numbers
+ between 'prev' and the beginning of 'next', we want to move sackbeg. */
+ if (prev != NULL && prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) {
+ sackbeg = next->tcphdr->seqno;
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+
+ /* We don't use 'prev' below, so let's set it to current 'next'.
+ This way even if we break the loop below, 'prev' will be pointing
+ at the segment right in front of the newly added one. */
+ prev = next;
+
+ /* If the "next" segment is the last segment on the
+ ooseq queue, we add the incoming segment to the end
+ of the list. */
+ if (next->next == NULL &&
+ TCP_SEQ_GT(seqno, next->tcphdr->seqno)) {
+ if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) {
+ /* segment "next" already contains all data */
+ break;
+ }
+ next->next = tcp_seg_copy(&inseg);
+ if (next->next != NULL) {
+ if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) {
+ /* We need to trim the last segment. */
+ next->len = (u16_t)(seqno - next->tcphdr->seqno);
+ pbuf_realloc(next->p, next->len);
+ }
+ /* check if the remote side overruns our receive window */
+ if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG,
+ ("tcp_receive: other end overran receive window"
+ "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n",
+ seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd));
+ if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) {
+ /* Must remove the FIN from the header as we're trimming
+ * that byte of sequence-space from the packet */
+ TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN);
+ }
+ /* Adjust length of segment to fit in the window. */
+ next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno);
+ pbuf_realloc(next->next->p, next->next->len);
+ tcplen = TCP_TCPLEN(next->next);
+ LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n",
+ (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd));
+ }
+ }
+ break;
+ }
+ }
+ }
+
+#if LWIP_TCP_SACK_OUT
+ if (pcb->flags & TF_SACK) {
+ if (prev == NULL) {
+ /* The new segment is at the beginning. sackbeg should already be set properly.
+ We need to find the right edge. */
+ next = pcb->ooseq;
+ } else if (prev->next != NULL) {
+ /* The new segment was added after 'prev'. If there is a "hole" between 'prev' and 'prev->next',
+ we need to move sackbeg. After that we should find the right edge. */
+ next = prev->next;
+ if (prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) {
+ sackbeg = next->tcphdr->seqno;
+ }
+ } else {
+ next = NULL;
+ }
+ if (next != NULL) {
+ u32_t sackend = next->tcphdr->seqno;
+ for ( ; (next != NULL) && (sackend == next->tcphdr->seqno); next = next->next) {
+ sackend += next->len;
+ }
+ tcp_add_sack(pcb, sackbeg, sackend);
+ }
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+ }
+#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT)
+ {
+ /* Check that the data on ooseq doesn't exceed one of the limits
+ and throw away everything above that limit. */
+#ifdef TCP_OOSEQ_BYTES_LIMIT
+ const u32_t ooseq_max_blen = TCP_OOSEQ_BYTES_LIMIT(pcb);
+ u32_t ooseq_blen = 0;
+#endif
+#ifdef TCP_OOSEQ_PBUFS_LIMIT
+ const u16_t ooseq_max_qlen = TCP_OOSEQ_PBUFS_LIMIT(pcb);
+ u16_t ooseq_qlen = 0;
+#endif
+ struct tcp_seg *next, *prev = NULL;
+ for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) {
+ struct pbuf *p = next->p;
+ int stop_here = 0;
+#ifdef TCP_OOSEQ_BYTES_LIMIT
+ ooseq_blen += p->tot_len;
+ if (ooseq_blen > ooseq_max_blen) {
+ stop_here = 1;
+ }
+#endif
+#ifdef TCP_OOSEQ_PBUFS_LIMIT
+ ooseq_qlen += pbuf_clen(p);
+ if (ooseq_qlen > ooseq_max_qlen) {
+ stop_here = 1;
+ }
+#endif
+ if (stop_here) {
+#if LWIP_TCP_SACK_OUT
+ if (pcb->flags & TF_SACK) {
+ /* Let's remove all SACKs from next's seqno up. */
+ tcp_remove_sacks_gt(pcb, next->tcphdr->seqno);
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+ /* too much ooseq data, dump this and everything after it */
+ tcp_segs_free(next);
+ if (prev == NULL) {
+ /* first ooseq segment is too much, dump the whole queue */
+ pcb->ooseq = NULL;
+ } else {
+ /* just dump 'next' and everything after it */
+ prev->next = NULL;
+ }
+ break;
+ }
+ }
+ }
+#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */
+#endif /* TCP_QUEUE_OOSEQ */
+
+ /* We send the ACK packet after we've (potentially) dealt with SACKs,
+ so they can be included in the acknowledgment. */
+ tcp_send_empty_ack(pcb);
+ }
+ } else {
+ /* The incoming segment is not within the window. */
+ tcp_send_empty_ack(pcb);
+ }
+ } else {
+ /* Segments with length 0 is taken care of here. Segments that
+ fall out of the window are ACKed. */
+ if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) {
+ tcp_ack_now(pcb);
+ }
+ }
+}
+
+static u8_t
+tcp_get_next_optbyte(void)
+{
+ u16_t optidx = tcp_optidx++;
+ if ((tcphdr_opt2 == NULL) || (optidx < tcphdr_opt1len)) {
+ u8_t *opts = (u8_t *)tcphdr + TCP_HLEN;
+ return opts[optidx];
+ } else {
+ u8_t idx = (u8_t)(optidx - tcphdr_opt1len);
+ return tcphdr_opt2[idx];
+ }
+}
+
+/**
+ * Parses the options contained in the incoming segment.
+ *
+ * Called from tcp_listen_input() and tcp_process().
+ * Currently, only the MSS option is supported!
+ *
+ * @param pcb the tcp_pcb for which a segment arrived
+ */
+static void
+tcp_parseopt(struct tcp_pcb *pcb)
+{
+ u8_t data;
+ u16_t mss;
+#if LWIP_TCP_TIMESTAMPS
+ u32_t tsval;
+#endif
+
+ LWIP_ASSERT("tcp_parseopt: invalid pcb", pcb != NULL);
+
+ /* Parse the TCP MSS option, if present. */
+ if (tcphdr_optlen != 0) {
+ for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) {
+ u8_t opt = tcp_get_next_optbyte();
+ switch (opt) {
+ case LWIP_TCP_OPT_EOL:
+ /* End of options. */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n"));
+ return;
+ case LWIP_TCP_OPT_NOP:
+ /* NOP option. */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n"));
+ break;
+ case LWIP_TCP_OPT_MSS:
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n"));
+ if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) {
+ /* Bad length */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n"));
+ return;
+ }
+ /* An MSS option with the right option length. */
+ mss = (u16_t)(tcp_get_next_optbyte() << 8);
+ mss |= tcp_get_next_optbyte();
+ /* Limit the mss to the configured TCP_MSS and prevent division by zero */
+ pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss;
+ break;
+#if LWIP_WND_SCALE
+ case LWIP_TCP_OPT_WS:
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n"));
+ if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) {
+ /* Bad length */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n"));
+ return;
+ }
+ /* An WND_SCALE option with the right option length. */
+ data = tcp_get_next_optbyte();
+ /* If syn was received with wnd scale option,
+ activate wnd scale opt, but only if this is not a retransmission */
+ if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) {
+ pcb->snd_scale = data;
+ if (pcb->snd_scale > 14U) {
+ pcb->snd_scale = 14U;
+ }
+ pcb->rcv_scale = TCP_RCV_SCALE;
+ tcp_set_flags(pcb, TF_WND_SCALE);
+ /* window scaling is enabled, we can use the full receive window */
+ LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND));
+ LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND));
+ pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND;
+ }
+ break;
+#endif /* LWIP_WND_SCALE */
+#if LWIP_TCP_TIMESTAMPS
+ case LWIP_TCP_OPT_TS:
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n"));
+ if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) {
+ /* Bad length */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n"));
+ return;
+ }
+ /* TCP timestamp option with valid length */
+ tsval = tcp_get_next_optbyte();
+ tsval |= (tcp_get_next_optbyte() << 8);
+ tsval |= (tcp_get_next_optbyte() << 16);
+ tsval |= (tcp_get_next_optbyte() << 24);
+ if (flags & TCP_SYN) {
+ pcb->ts_recent = lwip_ntohl(tsval);
+ /* Enable sending timestamps in every segment now that we know
+ the remote host supports it. */
+ tcp_set_flags(pcb, TF_TIMESTAMP);
+ } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno + tcplen)) {
+ pcb->ts_recent = lwip_ntohl(tsval);
+ }
+ /* Advance to next option (6 bytes already read) */
+ tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6;
+ break;
+#endif /* LWIP_TCP_TIMESTAMPS */
+#if LWIP_TCP_SACK_OUT
+ case LWIP_TCP_OPT_SACK_PERM:
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: SACK_PERM\n"));
+ if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_SACK_PERM || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_SACK_PERM) > tcphdr_optlen) {
+ /* Bad length */
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n"));
+ return;
+ }
+ /* TCP SACK_PERM option with valid length */
+ if (flags & TCP_SYN) {
+ /* We only set it if we receive it in a SYN (or SYN+ACK) packet */
+ tcp_set_flags(pcb, TF_SACK);
+ }
+ break;
+#endif /* LWIP_TCP_SACK_OUT */
+ default:
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n"));
+ data = tcp_get_next_optbyte();
+ if (data < 2) {
+ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n"));
+ /* If the length field is zero, the options are malformed
+ and we don't process them further. */
+ return;
+ }
+ /* All other options have a length field, so that we easily
+ can skip past them. */
+ tcp_optidx += data - 2;
+ }
+ }
+ }
+}
+
+void
+tcp_trigger_input_pcb_close(void)
+{
+ recv_flags |= TF_CLOSED;
+}
+
+#if LWIP_TCP_SACK_OUT
+/**
+ * Called by tcp_receive() to add new SACK entry.
+ *
+ * The new SACK entry will be placed at the beginning of rcv_sacks[], as the newest one.
+ * Existing SACK entries will be "pushed back", to preserve their order.
+ * This is the behavior described in RFC 2018, section 4.
+ *
+ * @param pcb the tcp_pcb for which a segment arrived
+ * @param left the left side of the SACK (the first sequence number)
+ * @param right the right side of the SACK (the first sequence number past this SACK)
+ */
+static void
+tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right)
+{
+ u8_t i;
+ u8_t unused_idx;
+
+ if ((pcb->flags & TF_SACK) == 0 || !TCP_SEQ_LT(left, right)) {
+ return;
+ }
+
+ /* First, let's remove all SACKs that are no longer needed (because they overlap with the newest one),
+ while moving all other SACKs forward.
+ We run this loop for all entries, until we find the first invalid one.
+ There is no point checking after that. */
+ for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) {
+ /* We only want to use SACK at [i] if it doesn't overlap with left:right range.
+ It does not overlap if its right side is before the newly added SACK,
+ or if its left side is after the newly added SACK.
+ NOTE: The equality should not really happen, but it doesn't hurt. */
+ if (TCP_SEQ_LEQ(pcb->rcv_sacks[i].right, left) || TCP_SEQ_LEQ(right, pcb->rcv_sacks[i].left)) {
+ if (unused_idx != i) {
+ /* We don't need to copy if it's already in the right spot */
+ pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i];
+ }
+ ++unused_idx;
+ }
+ }
+
+ /* Now 'unused_idx' is the index of the first invalid SACK entry,
+ anywhere between 0 (no valid entries) and LWIP_TCP_MAX_SACK_NUM (all entries are valid).
+ We want to clear this and all following SACKs.
+ However, we will be adding another one in the front (and shifting everything else back).
+ So let's just iterate from the back, and set each entry to the one to the left if it's valid,
+ or to 0 if it is not. */
+ for (i = LWIP_TCP_MAX_SACK_NUM - 1; i > 0; --i) {
+ /* [i] is the index we are setting, and the value should be at index [i-1],
+ or 0 if that index is unused (>= unused_idx). */
+ if (i - 1 >= unused_idx) {
+ /* [i-1] is unused. Let's clear [i]. */
+ pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0;
+ } else {
+ pcb->rcv_sacks[i] = pcb->rcv_sacks[i - 1];
+ }
+ }
+
+ /* And now we can store the newest SACK */
+ pcb->rcv_sacks[0].left = left;
+ pcb->rcv_sacks[0].right = right;
+}
+
+/**
+ * Called to remove a range of SACKs.
+ *
+ * SACK entries will be removed or adjusted to not acknowledge any sequence
+ * numbers that are less than 'seq' passed. It not only invalidates entries,
+ * but also moves all entries that are still valid to the beginning.
+ *
+ * @param pcb the tcp_pcb to modify
+ * @param seq the lowest sequence number to keep in SACK entries
+ */
+static void
+tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq)
+{
+ u8_t i;
+ u8_t unused_idx;
+
+ /* We run this loop for all entries, until we find the first invalid one.
+ There is no point checking after that. */
+ for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) {
+ /* We only want to use SACK at index [i] if its right side is > 'seq'. */
+ if (TCP_SEQ_GT(pcb->rcv_sacks[i].right, seq)) {
+ if (unused_idx != i) {
+ /* We only copy it if it's not in the right spot already. */
+ pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i];
+ }
+ /* NOTE: It is possible that its left side is < 'seq', in which case we should adjust it. */
+ if (TCP_SEQ_LT(pcb->rcv_sacks[unused_idx].left, seq)) {
+ pcb->rcv_sacks[unused_idx].left = seq;
+ }
+ ++unused_idx;
+ }
+ }
+
+ /* We also need to invalidate everything from 'unused_idx' till the end */
+ for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) {
+ pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0;
+ }
+}
+
+#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT)
+/**
+ * Called to remove a range of SACKs.
+ *
+ * SACK entries will be removed or adjusted to not acknowledge any sequence
+ * numbers that are greater than (or equal to) 'seq' passed. It not only invalidates entries,
+ * but also moves all entries that are still valid to the beginning.
+ *
+ * @param pcb the tcp_pcb to modify
+ * @param seq the highest sequence number to keep in SACK entries
+ */
+static void
+tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq)
+{
+ u8_t i;
+ u8_t unused_idx;
+
+ /* We run this loop for all entries, until we find the first invalid one.
+ There is no point checking after that. */
+ for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) {
+ /* We only want to use SACK at index [i] if its left side is < 'seq'. */
+ if (TCP_SEQ_LT(pcb->rcv_sacks[i].left, seq)) {
+ if (unused_idx != i) {
+ /* We only copy it if it's not in the right spot already. */
+ pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i];
+ }
+ /* NOTE: It is possible that its right side is > 'seq', in which case we should adjust it. */
+ if (TCP_SEQ_GT(pcb->rcv_sacks[unused_idx].right, seq)) {
+ pcb->rcv_sacks[unused_idx].right = seq;
+ }
+ ++unused_idx;
+ }
+ }
+
+ /* We also need to invalidate everything from 'unused_idx' till the end */
+ for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) {
+ pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0;
+ }
+}
+#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */
+
+#endif /* LWIP_TCP_SACK_OUT */
+
+#endif /* LWIP_TCP */
diff --git a/lwip/src/core/tcp_out.c b/lwip/src/core/tcp_out.c
new file mode 100644
index 0000000..724df10
--- /dev/null
+++ b/lwip/src/core/tcp_out.c
@@ -0,0 +1,2190 @@
+/**
+ * @file
+ * Transmission Control Protocol, outgoing traffic
+ *
+ * The output functions of TCP.
+ *
+ * There are two distinct ways for TCP segments to get sent:
+ * - queued data: these are segments transferring data or segments containing
+ * SYN or FIN (which both count as one sequence number). They are created as
+ * struct @ref pbuf together with a struct tcp_seg and enqueue to the
+ * unsent list of the pcb. They are sent by tcp_output:
+ * - @ref tcp_write : creates data segments
+ * - @ref tcp_split_unsent_seg : splits a data segment
+ * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments
+ * - @ref tcp_output / tcp_output_segment : finalize the tcp header
+ * (e.g. sequence numbers, options, checksum) and output to IP
+ * - the various tcp_rexmit functions shuffle around segments between the
+ * unsent an unacked lists to retransmit them
+ * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and
+ * segment for these functions
+ * - direct send: these segments don't contain data but control the connection
+ * behaviour. They are created as pbuf only and sent directly without
+ * enqueueing them:
+ * - @ref tcp_send_empty_ack sends an ACK-only segment
+ * - @ref tcp_rst sends a RST segment
+ * - @ref tcp_keepalive sends a keepalive segment
+ * - @ref tcp_zero_window_probe sends a window probe segment
+ * - tcp_output_alloc_header allocates a header-only pbuf for these functions
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/priv/tcp_priv.h"
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/memp.h"
+#include "lwip/ip_addr.h"
+#include "lwip/netif.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/stats.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#if LWIP_TCP_TIMESTAMPS
+#include "lwip/sys.h"
+#endif
+
+#include <string.h>
+
+#ifdef LWIP_HOOK_FILENAME
+#include LWIP_HOOK_FILENAME
+#endif
+
+/* Allow to add custom TCP header options by defining this hook */
+#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH
+#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags))
+#else
+#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags)
+#endif
+
+/* Define some copy-macros for checksum-on-copy so that the code looks
+ nicer by preventing too many ifdef's. */
+#if TCP_CHECKSUM_ON_COPY
+#define TCP_DATA_COPY(dst, src, len, seg) do { \
+ tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \
+ len, &seg->chksum, &seg->chksum_swapped); \
+ seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0)
+#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \
+ tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped);
+#else /* TCP_CHECKSUM_ON_COPY*/
+#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len)
+#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len)
+#endif /* TCP_CHECKSUM_ON_COPY*/
+
+/** Define this to 1 for an extra check that the output checksum is valid
+ * (usefule when the checksum is generated by the application, not the stack) */
+#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK
+#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0
+#endif
+/* Allow to override the failure of sanity check from warning to e.g. hard failure */
+#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
+#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL
+#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg)
+#endif
+#endif
+
+#if TCP_OVERSIZE
+/** The size of segment pbufs created when TCP_OVERSIZE is enabled */
+#ifndef TCP_OVERSIZE_CALC_LENGTH
+#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE)
+#endif
+#endif
+
+/* Forward declarations.*/
+static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif);
+
+/* tcp_route: common code that returns a fixed bound netif or calls ip_route */
+static struct netif *
+tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst)
+{
+ LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */
+
+ if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) {
+ return netif_get_by_index(pcb->netif_idx);
+ } else {
+ return ip_route(src, dst);
+ }
+}
+
+/**
+ * Create a TCP segment with prefilled header.
+ *
+ * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg
+ *
+ * @param pcb Protocol control block for the TCP connection.
+ * @param p pbuf that is used to hold the TCP header.
+ * @param hdrflags TCP flags for header.
+ * @param seqno TCP sequence number of this packet
+ * @param optflags options to include in TCP header
+ * @return a new tcp_seg pointing to p, or NULL.
+ * The TCP header is filled in except ackno and wnd.
+ * p is freed on failure.
+ */
+static struct tcp_seg *
+tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags)
+{
+ struct tcp_seg *seg;
+ u8_t optlen;
+
+ LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL);
+ LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL);
+
+ optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb);
+
+ if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n"));
+ pbuf_free(p);
+ return NULL;
+ }
+ seg->flags = optflags;
+ seg->next = NULL;
+ seg->p = p;
+ LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen);
+ seg->len = p->tot_len - optlen;
+#if TCP_OVERSIZE_DBGCHECK
+ seg->oversize_left = 0;
+#endif /* TCP_OVERSIZE_DBGCHECK */
+#if TCP_CHECKSUM_ON_COPY
+ seg->chksum = 0;
+ seg->chksum_swapped = 0;
+ /* check optflags */
+ LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED",
+ (optflags & TF_SEG_DATA_CHECKSUMMED) == 0);
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ /* build TCP header */
+ if (pbuf_add_header(p, TCP_HLEN)) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n"));
+ TCP_STATS_INC(tcp.err);
+ tcp_seg_free(seg);
+ return NULL;
+ }
+ seg->tcphdr = (struct tcp_hdr *)seg->p->payload;
+ seg->tcphdr->src = lwip_htons(pcb->local_port);
+ seg->tcphdr->dest = lwip_htons(pcb->remote_port);
+ seg->tcphdr->seqno = lwip_htonl(seqno);
+ /* ackno is set in tcp_output */
+ TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags);
+ /* wnd and chksum are set in tcp_output */
+ seg->tcphdr->urgp = 0;
+ return seg;
+}
+
+/**
+ * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end.
+ *
+ * This function is like pbuf_alloc(layer, length, PBUF_RAM) except
+ * there may be extra bytes available at the end.
+ *
+ * Called by @ref tcp_write
+ *
+ * @param layer flag to define header size.
+ * @param length size of the pbuf's payload.
+ * @param max_length maximum usable size of payload+oversize.
+ * @param oversize pointer to a u16_t that will receive the number of usable tail bytes.
+ * @param pcb The TCP connection that will enqueue the pbuf.
+ * @param apiflags API flags given to tcp_write.
+ * @param first_seg true when this pbuf will be used in the first enqueued segment.
+ */
+#if TCP_OVERSIZE
+static struct pbuf *
+tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length,
+ u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags,
+ u8_t first_seg)
+{
+ struct pbuf *p;
+ u16_t alloc = length;
+
+ LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL);
+ LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL);
+
+#if LWIP_NETIF_TX_SINGLE_PBUF
+ LWIP_UNUSED_ARG(max_length);
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_UNUSED_ARG(apiflags);
+ LWIP_UNUSED_ARG(first_seg);
+ alloc = max_length;
+#else /* LWIP_NETIF_TX_SINGLE_PBUF */
+ if (length < max_length) {
+ /* Should we allocate an oversized pbuf, or just the minimum
+ * length required? If tcp_write is going to be called again
+ * before this segment is transmitted, we want the oversized
+ * buffer. If the segment will be transmitted immediately, we can
+ * save memory by allocating only length. We use a simple
+ * heuristic based on the following information:
+ *
+ * Did the user set TCP_WRITE_FLAG_MORE?
+ *
+ * Will the Nagle algorithm defer transmission of this segment?
+ */
+ if ((apiflags & TCP_WRITE_FLAG_MORE) ||
+ (!(pcb->flags & TF_NODELAY) &&
+ (!first_seg ||
+ pcb->unsent != NULL ||
+ pcb->unacked != NULL))) {
+ alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length)));
+ }
+ }
+#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
+ p = pbuf_alloc(layer, alloc, PBUF_RAM);
+ if (p == NULL) {
+ return NULL;
+ }
+ LWIP_ASSERT("need unchained pbuf", p->next == NULL);
+ *oversize = p->len - length;
+ /* trim p->len to the currently used size */
+ p->len = p->tot_len = length;
+ return p;
+}
+#else /* TCP_OVERSIZE */
+#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM)
+#endif /* TCP_OVERSIZE */
+
+#if TCP_CHECKSUM_ON_COPY
+/** Add a checksum of newly added data to the segment.
+ *
+ * Called by tcp_write and tcp_split_unsent_seg.
+ */
+static void
+tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum,
+ u8_t *seg_chksum_swapped)
+{
+ u32_t helper;
+ /* add chksum to old chksum and fold to u16_t */
+ helper = chksum + *seg_chksum;
+ chksum = FOLD_U32T(helper);
+ if ((len & 1) != 0) {
+ *seg_chksum_swapped = 1 - *seg_chksum_swapped;
+ chksum = SWAP_BYTES_IN_WORD(chksum);
+ }
+ *seg_chksum = chksum;
+}
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen).
+ *
+ * @param pcb the tcp pcb to check for
+ * @param len length of data to send (checked agains snd_buf)
+ * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise
+ */
+static err_t
+tcp_write_checks(struct tcp_pcb *pcb, u16_t len)
+{
+ LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL);
+
+ /* connection is in invalid state for data transmission? */
+ if ((pcb->state != ESTABLISHED) &&
+ (pcb->state != CLOSE_WAIT) &&
+ (pcb->state != SYN_SENT) &&
+ (pcb->state != SYN_RCVD)) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n"));
+ return ERR_CONN;
+ } else if (len == 0) {
+ return ERR_OK;
+ }
+
+ /* fail on too much data */
+ if (len > pcb->snd_buf) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n",
+ len, pcb->snd_buf));
+ tcp_set_flags(pcb, TF_NAGLEMEMERR);
+ return ERR_MEM;
+ }
+
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen));
+
+ /* If total number of pbufs on the unsent/unacked queues exceeds the
+ * configured maximum, return an error */
+ /* check for configured max queuelen and possible overflow */
+ if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n",
+ pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN));
+ TCP_STATS_INC(tcp.memerr);
+ tcp_set_flags(pcb, TF_NAGLEMEMERR);
+ return ERR_MEM;
+ }
+ if (pcb->snd_queuelen != 0) {
+ LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty",
+ pcb->unacked != NULL || pcb->unsent != NULL);
+ } else {
+ LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty",
+ pcb->unacked == NULL && pcb->unsent == NULL);
+ }
+ return ERR_OK;
+}
+
+/**
+ * @ingroup tcp_raw
+ * Write data for sending (but does not send it immediately).
+ *
+ * It waits in the expectation of more data being sent soon (as
+ * it can send them more efficiently by combining them together).
+ * To prompt the system to send data now, call tcp_output() after
+ * calling tcp_write().
+ *
+ * This function enqueues the data pointed to by the argument dataptr. The length of
+ * the data is passed as the len parameter. The apiflags can be one or more of:
+ * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated
+ * for the data to be copied into. If this flag is not given, no new memory
+ * should be allocated and the data should only be referenced by pointer. This
+ * also means that the memory behind dataptr must not change until the data is
+ * ACKed by the remote host
+ * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted,
+ * the PSH flag is set in the last segment created by this call to tcp_write.
+ * If this flag is given, the PSH flag is not set.
+ *
+ * The tcp_write() function will fail and return ERR_MEM if the length
+ * of the data exceeds the current send buffer size or if the length of
+ * the queue of outgoing segment is larger than the upper limit defined
+ * in lwipopts.h. The number of bytes available in the output queue can
+ * be retrieved with the tcp_sndbuf() function.
+ *
+ * The proper way to use this function is to call the function with at
+ * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM,
+ * the application should wait until some of the currently enqueued
+ * data has been successfully received by the other host and try again.
+ *
+ * @param pcb Protocol control block for the TCP connection to enqueue data for.
+ * @param arg Pointer to the data to be enqueued for sending.
+ * @param len Data length in bytes
+ * @param apiflags combination of following flags :
+ * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack
+ * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent,
+ * @return ERR_OK if enqueued, another err_t on error
+ */
+err_t
+tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags)
+{
+ struct pbuf *concat_p = NULL;
+ struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL;
+ u16_t pos = 0; /* position in 'arg' data */
+ u16_t queuelen;
+ u8_t optlen;
+ u8_t optflags = 0;
+#if TCP_OVERSIZE
+ u16_t oversize = 0;
+ u16_t oversize_used = 0;
+#if TCP_OVERSIZE_DBGCHECK
+ u16_t oversize_add = 0;
+#endif /* TCP_OVERSIZE_DBGCHECK*/
+#endif /* TCP_OVERSIZE */
+ u16_t extendlen = 0;
+#if TCP_CHECKSUM_ON_COPY
+ u16_t concat_chksum = 0;
+ u8_t concat_chksum_swapped = 0;
+ u16_t concat_chksummed = 0;
+#endif /* TCP_CHECKSUM_ON_COPY */
+ err_t err;
+ u16_t mss_local;
+
+ LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG);
+
+ /* don't allocate segments bigger than half the maximum window we ever received */
+ mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2));
+ mss_local = mss_local ? mss_local : pcb->mss;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_NETIF_TX_SINGLE_PBUF
+ /* Always copy to try to create single pbufs for TX */
+ apiflags |= TCP_WRITE_FLAG_COPY;
+#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
+
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n",
+ (void *)pcb, arg, len, (u16_t)apiflags));
+ LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)",
+ arg != NULL, return ERR_ARG;);
+
+ err = tcp_write_checks(pcb, len);
+ if (err != ERR_OK) {
+ return err;
+ }
+ queuelen = pcb->snd_queuelen;
+
+#if LWIP_TCP_TIMESTAMPS
+ if ((pcb->flags & TF_TIMESTAMP)) {
+ /* Make sure the timestamp option is only included in data segments if we
+ agreed about it with the remote host. */
+ optflags = TF_SEG_OPTS_TS;
+ optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb);
+ /* ensure that segments can hold at least one data byte... */
+ mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1);
+ } else
+#endif /* LWIP_TCP_TIMESTAMPS */
+ {
+ optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
+ }
+
+
+ /*
+ * TCP segmentation is done in three phases with increasing complexity:
+ *
+ * 1. Copy data directly into an oversized pbuf.
+ * 2. Chain a new pbuf to the end of pcb->unsent.
+ * 3. Create new segments.
+ *
+ * We may run out of memory at any point. In that case we must
+ * return ERR_MEM and not change anything in pcb. Therefore, all
+ * changes are recorded in local variables and committed at the end
+ * of the function. Some pcb fields are maintained in local copies:
+ *
+ * queuelen = pcb->snd_queuelen
+ * oversize = pcb->unsent_oversize
+ *
+ * These variables are set consistently by the phases:
+ *
+ * seg points to the last segment tampered with.
+ *
+ * pos records progress as data is segmented.
+ */
+
+ /* Find the tail of the unsent queue. */
+ if (pcb->unsent != NULL) {
+ u16_t space;
+ u16_t unsent_optlen;
+
+ /* @todo: this could be sped up by keeping last_unsent in the pcb */
+ for (last_unsent = pcb->unsent; last_unsent->next != NULL;
+ last_unsent = last_unsent->next);
+
+ /* Usable space at the end of the last unsent segment */
+ unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb);
+ LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen);
+ space = mss_local - (last_unsent->len + unsent_optlen);
+
+ /*
+ * Phase 1: Copy data directly into an oversized pbuf.
+ *
+ * The number of bytes copied is recorded in the oversize_used
+ * variable. The actual copying is done at the bottom of the
+ * function.
+ */
+#if TCP_OVERSIZE
+#if TCP_OVERSIZE_DBGCHECK
+ /* check that pcb->unsent_oversize matches last_unsent->oversize_left */
+ LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)",
+ pcb->unsent_oversize == last_unsent->oversize_left);
+#endif /* TCP_OVERSIZE_DBGCHECK */
+ oversize = pcb->unsent_oversize;
+ if (oversize > 0) {
+ LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space);
+ seg = last_unsent;
+ oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len));
+ pos += oversize_used;
+ oversize -= oversize_used;
+ space -= oversize_used;
+ }
+ /* now we are either finished or oversize is zero */
+ LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len));
+#endif /* TCP_OVERSIZE */
+
+#if !LWIP_NETIF_TX_SINGLE_PBUF
+ /*
+ * Phase 2: Chain a new pbuf to the end of pcb->unsent.
+ *
+ * As an exception when NOT copying the data, if the given data buffer
+ * directly follows the last unsent data buffer in memory, extend the last
+ * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation.
+ *
+ * We don't extend segments containing SYN/FIN flags or options
+ * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at
+ * the end.
+ *
+ * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute
+ * it after rexmit puts a segment from unacked to unsent and at this point,
+ * oversize info is lost.
+ */
+ if ((pos < len) && (space > 0) && (last_unsent->len > 0)) {
+ u16_t seglen = LWIP_MIN(space, len - pos);
+ seg = last_unsent;
+
+ /* Create a pbuf with a copy or reference to seglen bytes. We
+ * can use PBUF_RAW here since the data appears in the middle of
+ * a segment. A header will never be prepended. */
+ if (apiflags & TCP_WRITE_FLAG_COPY) {
+ /* Data is copied */
+ if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n",
+ seglen));
+ goto memerr;
+ }
+#if TCP_OVERSIZE_DBGCHECK
+ oversize_add = oversize;
+#endif /* TCP_OVERSIZE_DBGCHECK */
+ TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped);
+#if TCP_CHECKSUM_ON_COPY
+ concat_chksummed += seglen;
+#endif /* TCP_CHECKSUM_ON_COPY */
+ queuelen += pbuf_clen(concat_p);
+ } else {
+ /* Data is not copied */
+ /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */
+ struct pbuf *p;
+ for (p = last_unsent->p; p->next != NULL; p = p->next);
+ if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) &&
+ (const u8_t *)p->payload + p->len == (const u8_t *)arg) {
+ LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0);
+ extendlen = seglen;
+ } else {
+ if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
+ goto memerr;
+ }
+ /* reference the non-volatile payload data */
+ ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos;
+ queuelen += pbuf_clen(concat_p);
+ }
+#if TCP_CHECKSUM_ON_COPY
+ /* calculate the checksum of nocopy-data */
+ tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen,
+ &concat_chksum, &concat_chksum_swapped);
+ concat_chksummed += seglen;
+#endif /* TCP_CHECKSUM_ON_COPY */
+ }
+
+ pos += seglen;
+ }
+#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
+ } else {
+#if TCP_OVERSIZE
+ LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)",
+ pcb->unsent_oversize == 0);
+#endif /* TCP_OVERSIZE */
+ }
+
+ /*
+ * Phase 3: Create new segments.
+ *
+ * The new segments are chained together in the local 'queue'
+ * variable, ready to be appended to pcb->unsent.
+ */
+ while (pos < len) {
+ struct pbuf *p;
+ u16_t left = len - pos;
+ u16_t max_len = mss_local - optlen;
+ u16_t seglen = LWIP_MIN(left, max_len);
+#if TCP_CHECKSUM_ON_COPY
+ u16_t chksum = 0;
+ u8_t chksum_swapped = 0;
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ if (apiflags & TCP_WRITE_FLAG_COPY) {
+ /* If copy is set, memory should be allocated and data copied
+ * into pbuf */
+ if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen));
+ goto memerr;
+ }
+ LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen",
+ (p->len >= seglen));
+ TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped);
+ } else {
+ /* Copy is not set: First allocate a pbuf for holding the data.
+ * Since the referenced data is available at least until it is
+ * sent out on the link (as it has to be ACKed by the remote
+ * party) we can safely use PBUF_ROM instead of PBUF_REF here.
+ */
+ struct pbuf *p2;
+#if TCP_OVERSIZE
+ LWIP_ASSERT("oversize == 0", oversize == 0);
+#endif /* TCP_OVERSIZE */
+ if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
+ goto memerr;
+ }
+#if TCP_CHECKSUM_ON_COPY
+ /* calculate the checksum of nocopy-data */
+ chksum = ~inet_chksum((const u8_t *)arg + pos, seglen);
+ if (seglen & 1) {
+ chksum_swapped = 1;
+ chksum = SWAP_BYTES_IN_WORD(chksum);
+ }
+#endif /* TCP_CHECKSUM_ON_COPY */
+ /* reference the non-volatile payload data */
+ ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos;
+
+ /* Second, allocate a pbuf for the headers. */
+ if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
+ /* If allocation fails, we have to deallocate the data pbuf as
+ * well. */
+ pbuf_free(p2);
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n"));
+ goto memerr;
+ }
+ /* Concatenate the headers and data pbufs together. */
+ pbuf_cat(p/*header*/, p2/*data*/);
+ }
+
+ queuelen += pbuf_clen(p);
+
+ /* Now that there are more segments queued, we check again if the
+ * length of the queue exceeds the configured maximum or
+ * overflows. */
+ if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n",
+ queuelen, (int)TCP_SND_QUEUELEN));
+ pbuf_free(p);
+ goto memerr;
+ }
+
+ if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) {
+ goto memerr;
+ }
+#if TCP_OVERSIZE_DBGCHECK
+ seg->oversize_left = oversize;
+#endif /* TCP_OVERSIZE_DBGCHECK */
+#if TCP_CHECKSUM_ON_COPY
+ seg->chksum = chksum;
+ seg->chksum_swapped = chksum_swapped;
+ seg->flags |= TF_SEG_DATA_CHECKSUMMED;
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ /* first segment of to-be-queued data? */
+ if (queue == NULL) {
+ queue = seg;
+ } else {
+ /* Attach the segment to the end of the queued segments */
+ LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL);
+ prev_seg->next = seg;
+ }
+ /* remember last segment of to-be-queued data for next iteration */
+ prev_seg = seg;
+
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n",
+ lwip_ntohl(seg->tcphdr->seqno),
+ lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg)));
+
+ pos += seglen;
+ }
+
+ /*
+ * All three segmentation phases were successful. We can commit the
+ * transaction.
+ */
+#if TCP_OVERSIZE_DBGCHECK
+ if ((last_unsent != NULL) && (oversize_add != 0)) {
+ last_unsent->oversize_left += oversize_add;
+ }
+#endif /* TCP_OVERSIZE_DBGCHECK */
+
+ /*
+ * Phase 1: If data has been added to the preallocated tail of
+ * last_unsent, we update the length fields of the pbuf chain.
+ */
+#if TCP_OVERSIZE
+ if (oversize_used > 0) {
+ struct pbuf *p;
+ /* Bump tot_len of whole chain, len of tail */
+ for (p = last_unsent->p; p; p = p->next) {
+ p->tot_len += oversize_used;
+ if (p->next == NULL) {
+ TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent);
+ p->len += oversize_used;
+ }
+ }
+ last_unsent->len += oversize_used;
+#if TCP_OVERSIZE_DBGCHECK
+ LWIP_ASSERT("last_unsent->oversize_left >= oversize_used",
+ last_unsent->oversize_left >= oversize_used);
+ last_unsent->oversize_left -= oversize_used;
+#endif /* TCP_OVERSIZE_DBGCHECK */
+ }
+ pcb->unsent_oversize = oversize;
+#endif /* TCP_OVERSIZE */
+
+ /*
+ * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we
+ * determined that the last ROM pbuf can be extended to include the new data.
+ */
+ if (concat_p != NULL) {
+ LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty",
+ (last_unsent != NULL));
+ pbuf_cat(last_unsent->p, concat_p);
+ last_unsent->len += concat_p->tot_len;
+ } else if (extendlen > 0) {
+ struct pbuf *p;
+ LWIP_ASSERT("tcp_write: extension of reference requires reference",
+ last_unsent != NULL && last_unsent->p != NULL);
+ for (p = last_unsent->p; p->next != NULL; p = p->next) {
+ p->tot_len += extendlen;
+ }
+ p->tot_len += extendlen;
+ p->len += extendlen;
+ last_unsent->len += extendlen;
+ }
+
+#if TCP_CHECKSUM_ON_COPY
+ if (concat_chksummed) {
+ LWIP_ASSERT("tcp_write: concat checksum needs concatenated data",
+ concat_p != NULL || extendlen > 0);
+ /*if concat checksumm swapped - swap it back */
+ if (concat_chksum_swapped) {
+ concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum);
+ }
+ tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum,
+ &last_unsent->chksum_swapped);
+ last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED;
+ }
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ /*
+ * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that
+ * is harmless
+ */
+ if (last_unsent == NULL) {
+ pcb->unsent = queue;
+ } else {
+ last_unsent->next = queue;
+ }
+
+ /*
+ * Finally update the pcb state.
+ */
+ pcb->snd_lbb += len;
+ pcb->snd_buf -= len;
+ pcb->snd_queuelen = queuelen;
+
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n",
+ pcb->snd_queuelen));
+ if (pcb->snd_queuelen != 0) {
+ LWIP_ASSERT("tcp_write: valid queue length",
+ pcb->unacked != NULL || pcb->unsent != NULL);
+ }
+
+ /* Set the PSH flag in the last segment that we enqueued. */
+ if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) {
+ TCPH_SET_FLAG(seg->tcphdr, TCP_PSH);
+ }
+
+ return ERR_OK;
+memerr:
+ tcp_set_flags(pcb, TF_NAGLEMEMERR);
+ TCP_STATS_INC(tcp.memerr);
+
+ if (concat_p != NULL) {
+ pbuf_free(concat_p);
+ }
+ if (queue != NULL) {
+ tcp_segs_free(queue);
+ }
+ if (pcb->snd_queuelen != 0) {
+ LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL ||
+ pcb->unsent != NULL);
+ }
+ LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen));
+ return ERR_MEM;
+}
+
+/**
+ * Split segment on the head of the unsent queue. If return is not
+ * ERR_OK, existing head remains intact
+ *
+ * The split is accomplished by creating a new TCP segment and pbuf
+ * which holds the remainder payload after the split. The original
+ * pbuf is trimmed to new length. This allows splitting of read-only
+ * pbufs
+ *
+ * @param pcb the tcp_pcb for which to split the unsent head
+ * @param split the amount of payload to remain in the head
+ */
+err_t
+tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split)
+{
+ struct tcp_seg *seg = NULL, *useg = NULL;
+ struct pbuf *p = NULL;
+ u8_t optlen;
+ u8_t optflags;
+ u8_t split_flags;
+ u8_t remainder_flags;
+ u16_t remainder;
+ u16_t offset;
+#if TCP_CHECKSUM_ON_COPY
+ u16_t chksum = 0;
+ u8_t chksum_swapped = 0;
+ struct pbuf *q;
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL);
+
+ useg = pcb->unsent;
+ if (useg == NULL) {
+ return ERR_MEM;
+ }
+
+ if (split == 0) {
+ LWIP_ASSERT("Can't split segment into length 0", 0);
+ return ERR_VAL;
+ }
+
+ if (useg->len <= split) {
+ return ERR_OK;
+ }
+
+ LWIP_ASSERT("split <= mss", split <= pcb->mss);
+ LWIP_ASSERT("useg->len > 0", useg->len > 0);
+
+ /* We should check that we don't exceed TCP_SND_QUEUELEN but we need
+ * to split this packet so we may actually exceed the max value by
+ * one!
+ */
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen));
+
+ optflags = useg->flags;
+#if TCP_CHECKSUM_ON_COPY
+ /* Remove since checksum is not stored until after tcp_create_segment() */
+ optflags &= ~TF_SEG_DATA_CHECKSUMMED;
+#endif /* TCP_CHECKSUM_ON_COPY */
+ optlen = LWIP_TCP_OPT_LENGTH(optflags);
+ remainder = useg->len - split;
+
+ /* Create new pbuf for the remainder of the split */
+ p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM);
+ if (p == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder));
+ goto memerr;
+ }
+
+ /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */
+ offset = useg->p->tot_len - useg->len + split;
+ /* Copy remainder into new pbuf, headers and options will not be filled out */
+ if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder));
+ goto memerr;
+ }
+#if TCP_CHECKSUM_ON_COPY
+ /* calculate the checksum on remainder data */
+ tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder,
+ &chksum, &chksum_swapped);
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ /* Options are created when calling tcp_output() */
+
+ /* Migrate flags from original segment */
+ split_flags = TCPH_FLAGS(useg->tcphdr);
+ remainder_flags = 0; /* ACK added in tcp_output() */
+
+ if (split_flags & TCP_PSH) {
+ split_flags &= ~TCP_PSH;
+ remainder_flags |= TCP_PSH;
+ }
+ if (split_flags & TCP_FIN) {
+ split_flags &= ~TCP_FIN;
+ remainder_flags |= TCP_FIN;
+ }
+ /* SYN should be left on split, RST should not be present with data */
+
+ seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags);
+ if (seg == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("tcp_split_unsent_seg: could not create new TCP segment\n"));
+ goto memerr;
+ }
+
+#if TCP_CHECKSUM_ON_COPY
+ seg->chksum = chksum;
+ seg->chksum_swapped = chksum_swapped;
+ seg->flags |= TF_SEG_DATA_CHECKSUMMED;
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ /* Remove this segment from the queue since trimming it may free pbufs */
+ pcb->snd_queuelen -= pbuf_clen(useg->p);
+
+ /* Trim the original pbuf into our split size. At this point our remainder segment must be setup
+ successfully because we are modifying the original segment */
+ pbuf_realloc(useg->p, useg->p->tot_len - remainder);
+ useg->len -= remainder;
+ TCPH_SET_FLAG(useg->tcphdr, split_flags);
+#if TCP_OVERSIZE_DBGCHECK
+ /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */
+ useg->oversize_left = 0;
+#endif /* TCP_OVERSIZE_DBGCHECK */
+
+ /* Add back to the queue with new trimmed pbuf */
+ pcb->snd_queuelen += pbuf_clen(useg->p);
+
+#if TCP_CHECKSUM_ON_COPY
+ /* The checksum on the split segment is now incorrect. We need to re-run it over the split */
+ useg->chksum = 0;
+ useg->chksum_swapped = 0;
+ q = useg->p;
+ offset = q->tot_len - useg->len; /* Offset due to exposed headers */
+
+ /* Advance to the pbuf where the offset ends */
+ while (q != NULL && offset > q->len) {
+ offset -= q->len;
+ q = q->next;
+ }
+ LWIP_ASSERT("Found start of payload pbuf", q != NULL);
+ /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */
+ for (; q != NULL; offset = 0, q = q->next) {
+ tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset,
+ &useg->chksum, &useg->chksum_swapped);
+ }
+#endif /* TCP_CHECKSUM_ON_COPY */
+
+ /* Update number of segments on the queues. Note that length now may
+ * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf
+ * because the total amount of data is constant when packet is split */
+ pcb->snd_queuelen += pbuf_clen(seg->p);
+
+ /* Finally insert remainder into queue after split (which stays head) */
+ seg->next = useg->next;
+ useg->next = seg;
+
+#if TCP_OVERSIZE
+ /* If remainder is last segment on the unsent, ensure we clear the oversize amount
+ * because the remainder is always sized to the exact remaining amount */
+ if (seg->next == NULL) {
+ pcb->unsent_oversize = 0;
+ }
+#endif /* TCP_OVERSIZE */
+
+ return ERR_OK;
+memerr:
+ TCP_STATS_INC(tcp.memerr);
+
+ LWIP_ASSERT("seg == NULL", seg == NULL);
+ if (p != NULL) {
+ pbuf_free(p);
+ }
+
+ return ERR_MEM;
+}
+
+/**
+ * Called by tcp_close() to send a segment including FIN flag but not data.
+ * This FIN may be added to an existing segment or a new, otherwise empty
+ * segment is enqueued.
+ *
+ * @param pcb the tcp_pcb over which to send a segment
+ * @return ERR_OK if sent, another err_t otherwise
+ */
+err_t
+tcp_send_fin(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL);
+
+ /* first, try to add the fin to the last unsent segment */
+ if (pcb->unsent != NULL) {
+ struct tcp_seg *last_unsent;
+ for (last_unsent = pcb->unsent; last_unsent->next != NULL;
+ last_unsent = last_unsent->next);
+
+ if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) {
+ /* no SYN/FIN/RST flag in the header, we can add the FIN flag */
+ TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN);
+ tcp_set_flags(pcb, TF_FIN);
+ return ERR_OK;
+ }
+ }
+ /* no data, no length, flags, copy=1, no optdata */
+ return tcp_enqueue_flags(pcb, TCP_FIN);
+}
+
+/**
+ * Enqueue SYN or FIN for transmission.
+ *
+ * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close
+ * (via @ref tcp_send_fin)
+ *
+ * @param pcb Protocol control block for the TCP connection.
+ * @param flags TCP header flags to set in the outgoing segment.
+ */
+err_t
+tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags)
+{
+ struct pbuf *p;
+ struct tcp_seg *seg;
+ u8_t optflags = 0;
+ u8_t optlen = 0;
+
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
+
+ LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)",
+ (flags & (TCP_SYN | TCP_FIN)) != 0);
+ LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL);
+
+ /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */
+
+ /* Get options for this segment. This is a special case since this is the
+ only place where a SYN can be sent. */
+ if (flags & TCP_SYN) {
+ optflags = TF_SEG_OPTS_MSS;
+#if LWIP_WND_SCALE
+ if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) {
+ /* In a <SYN,ACK> (sent in state SYN_RCVD), the window scale option may only
+ be sent if we received a window scale option from the remote host. */
+ optflags |= TF_SEG_OPTS_WND_SCALE;
+ }
+#endif /* LWIP_WND_SCALE */
+#if LWIP_TCP_SACK_OUT
+ if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) {
+ /* In a <SYN,ACK> (sent in state SYN_RCVD), the SACK_PERM option may only
+ be sent if we received a SACK_PERM option from the remote host. */
+ optflags |= TF_SEG_OPTS_SACK_PERM;
+ }
+#endif /* LWIP_TCP_SACK_OUT */
+ }
+#if LWIP_TCP_TIMESTAMPS
+ if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) {
+ /* Make sure the timestamp option is only included in data segments if we
+ agreed about it with the remote host (and in active open SYN segments). */
+ optflags |= TF_SEG_OPTS_TS;
+ }
+#endif /* LWIP_TCP_TIMESTAMPS */
+ optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb);
+
+ /* Allocate pbuf with room for TCP header + options */
+ if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
+ tcp_set_flags(pcb, TF_NAGLEMEMERR);
+ TCP_STATS_INC(tcp.memerr);
+ return ERR_MEM;
+ }
+ LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen",
+ (p->len >= optlen));
+
+ /* Allocate memory for tcp_seg, and fill in fields. */
+ if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) {
+ tcp_set_flags(pcb, TF_NAGLEMEMERR);
+ TCP_STATS_INC(tcp.memerr);
+ return ERR_MEM;
+ }
+ LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0);
+ LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0);
+
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE,
+ ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n",
+ lwip_ntohl(seg->tcphdr->seqno),
+ lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg),
+ (u16_t)flags));
+
+ /* Now append seg to pcb->unsent queue */
+ if (pcb->unsent == NULL) {
+ pcb->unsent = seg;
+ } else {
+ struct tcp_seg *useg;
+ for (useg = pcb->unsent; useg->next != NULL; useg = useg->next);
+ useg->next = seg;
+ }
+#if TCP_OVERSIZE
+ /* The new unsent tail has no space */
+ pcb->unsent_oversize = 0;
+#endif /* TCP_OVERSIZE */
+
+ /* SYN and FIN bump the sequence number */
+ if ((flags & TCP_SYN) || (flags & TCP_FIN)) {
+ pcb->snd_lbb++;
+ /* optlen does not influence snd_buf */
+ }
+ if (flags & TCP_FIN) {
+ tcp_set_flags(pcb, TF_FIN);
+ }
+
+ /* update number of segments on the queues */
+ pcb->snd_queuelen += pbuf_clen(seg->p);
+ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen));
+ if (pcb->snd_queuelen != 0) {
+ LWIP_ASSERT("tcp_enqueue_flags: invalid queue length",
+ pcb->unacked != NULL || pcb->unsent != NULL);
+ }
+
+ return ERR_OK;
+}
+
+#if LWIP_TCP_TIMESTAMPS
+/* Build a timestamp option (12 bytes long) at the specified options pointer)
+ *
+ * @param pcb tcp_pcb
+ * @param opts option pointer where to store the timestamp option
+ */
+static void
+tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts)
+{
+ LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL);
+
+ /* Pad with two NOP options to make everything nicely aligned */
+ opts[0] = PP_HTONL(0x0101080A);
+ opts[1] = lwip_htonl(sys_now());
+ opts[2] = lwip_htonl(pcb->ts_recent);
+}
+#endif
+
+#if LWIP_TCP_SACK_OUT
+/**
+ * Calculates the number of SACK entries that should be generated.
+ * It takes into account whether TF_SACK flag is set,
+ * the number of SACK entries in tcp_pcb that are valid,
+ * as well as the available options size.
+ *
+ * @param pcb tcp_pcb
+ * @param optlen the length of other TCP options (in bytes)
+ * @return the number of SACK ranges that can be used
+ */
+static u8_t
+tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen)
+{
+ u8_t num_sacks = 0;
+
+ LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL);
+
+ if (pcb->flags & TF_SACK) {
+ u8_t i;
+
+ /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options),
+ each additional one - 8 bytes. */
+ optlen += 12;
+
+ /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */
+ for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) &&
+ LWIP_TCP_SACK_VALID(pcb, i); ++i) {
+ ++num_sacks;
+ optlen += 8;
+ }
+ }
+
+ return num_sacks;
+}
+
+/** Build a SACK option (12 or more bytes long) at the specified options pointer)
+ *
+ * @param pcb tcp_pcb
+ * @param opts option pointer where to store the SACK option
+ * @param num_sacks the number of SACKs to store
+ */
+static void
+tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks)
+{
+ u8_t i;
+
+ LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL);
+ LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL);
+
+ /* Pad with two NOP options to make everything nicely aligned.
+ We add the length (of just the SACK option, not the NOPs in front of it),
+ which is 2B of header, plus 8B for each SACK. */
+ *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8);
+
+ for (i = 0; i < num_sacks; ++i) {
+ *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left);
+ *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right);
+ }
+}
+
+#endif
+
+#if LWIP_WND_SCALE
+/** Build a window scale option (3 bytes long) at the specified options pointer)
+ *
+ * @param opts option pointer where to store the window scale option
+ */
+static void
+tcp_build_wnd_scale_option(u32_t *opts)
+{
+ LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL);
+
+ /* Pad with one NOP option to make everything nicely aligned */
+ opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE);
+}
+#endif
+
+/**
+ * @ingroup tcp_raw
+ * Find out what we can send and send it
+ *
+ * @param pcb Protocol control block for the TCP connection to send data
+ * @return ERR_OK if data has been sent or nothing to send
+ * another err_t on error
+ */
+err_t
+tcp_output(struct tcp_pcb *pcb)
+{
+ struct tcp_seg *seg, *useg;
+ u32_t wnd, snd_nxt;
+ err_t err;
+ struct netif *netif;
+#if TCP_CWND_DEBUG
+ s16_t i = 0;
+#endif /* TCP_CWND_DEBUG */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL);
+ /* pcb->state LISTEN not allowed here */
+ LWIP_ASSERT("don't call tcp_output for listen-pcbs",
+ pcb->state != LISTEN);
+
+ /* First, check if we are invoked by the TCP input processing
+ code. If so, we do not output anything. Instead, we rely on the
+ input processing code to call us when input processing is done
+ with. */
+ if (tcp_input_pcb == pcb) {
+ return ERR_OK;
+ }
+
+ wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd);
+
+ seg = pcb->unsent;
+
+ if (seg == NULL) {
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n",
+ (void *)pcb->unsent));
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F
+ ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F
+ ", seg == NULL, ack %"U32_F"\n",
+ pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack));
+
+ /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct
+ * an empty ACK segment and send it. */
+ if (pcb->flags & TF_ACK_NOW) {
+ return tcp_send_empty_ack(pcb);
+ }
+ /* nothing to send: shortcut out of here */
+ goto output_done;
+ } else {
+ LWIP_DEBUGF(TCP_CWND_DEBUG,
+ ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F
+ ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n",
+ pcb->snd_wnd, pcb->cwnd, wnd,
+ lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len,
+ lwip_ntohl(seg->tcphdr->seqno), pcb->lastack));
+ }
+
+ netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip);
+ if (netif == NULL) {
+ return ERR_RTE;
+ }
+
+ /* If we don't have a local IP address, we get one from netif */
+ if (ip_addr_isany(&pcb->local_ip)) {
+ const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip);
+ if (local_ip == NULL) {
+ return ERR_RTE;
+ }
+ ip_addr_copy(pcb->local_ip, *local_ip);
+ }
+
+ /* Handle the current segment not fitting within the window */
+ if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) {
+ /* We need to start the persistent timer when the next unsent segment does not fit
+ * within the remaining (could be 0) send window and RTO timer is not running (we
+ * have no in-flight data). If window is still too small after persist timer fires,
+ * then we split the segment. We don't consider the congestion window since a cwnd
+ * smaller than 1 SMSS implies in-flight data
+ */
+ if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) {
+ pcb->persist_cnt = 0;
+ pcb->persist_backoff = 1;
+ pcb->persist_probe = 0;
+ }
+ /* We need an ACK, but can't send data now, so send an empty ACK */
+ if (pcb->flags & TF_ACK_NOW) {
+ return tcp_send_empty_ack(pcb);
+ }
+ goto output_done;
+ }
+ /* Stop persist timer, above conditions are not active */
+ pcb->persist_backoff = 0;
+
+ /* useg should point to last segment on unacked queue */
+ useg = pcb->unacked;
+ if (useg != NULL) {
+ for (; useg->next != NULL; useg = useg->next);
+ }
+ /* data available and window allows it to be sent? */
+ while (seg != NULL &&
+ lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) {
+ LWIP_ASSERT("RST not expected here!",
+ (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0);
+ /* Stop sending if the nagle algorithm would prevent it
+ * Don't stop:
+ * - if tcp_write had a memory error before (prevent delayed ACK timeout) or
+ * - if FIN was already enqueued for this PCB (SYN is always alone in a segment -
+ * either seg->next != NULL or pcb->unacked == NULL;
+ * RST is no sent using tcp_write/tcp_output.
+ */
+ if ((tcp_do_output_nagle(pcb) == 0) &&
+ ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) {
+ break;
+ }
+#if TCP_CWND_DEBUG
+ LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n",
+ pcb->snd_wnd, pcb->cwnd, wnd,
+ lwip_ntohl(seg->tcphdr->seqno) + seg->len -
+ pcb->lastack,
+ lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i));
+ ++i;
+#endif /* TCP_CWND_DEBUG */
+
+ if (pcb->state != SYN_SENT) {
+ TCPH_SET_FLAG(seg->tcphdr, TCP_ACK);
+ }
+
+ err = tcp_output_segment(seg, pcb, netif);
+ if (err != ERR_OK) {
+ /* segment could not be sent, for whatever reason */
+ tcp_set_flags(pcb, TF_NAGLEMEMERR);
+ return err;
+ }
+#if TCP_OVERSIZE_DBGCHECK
+ seg->oversize_left = 0;
+#endif /* TCP_OVERSIZE_DBGCHECK */
+ pcb->unsent = seg->next;
+ if (pcb->state != SYN_SENT) {
+ tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
+ }
+ snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
+ if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
+ pcb->snd_nxt = snd_nxt;
+ }
+ /* put segment on unacknowledged list if length > 0 */
+ if (TCP_TCPLEN(seg) > 0) {
+ seg->next = NULL;
+ /* unacked list is empty? */
+ if (pcb->unacked == NULL) {
+ pcb->unacked = seg;
+ useg = seg;
+ /* unacked list is not empty? */
+ } else {
+ /* In the case of fast retransmit, the packet should not go to the tail
+ * of the unacked queue, but rather somewhere before it. We need to check for
+ * this case. -STJ Jul 27, 2004 */
+ if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) {
+ /* add segment to before tail of unacked list, keeping the list sorted */
+ struct tcp_seg **cur_seg = &(pcb->unacked);
+ while (*cur_seg &&
+ TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) {
+ cur_seg = &((*cur_seg)->next );
+ }
+ seg->next = (*cur_seg);
+ (*cur_seg) = seg;
+ } else {
+ /* add segment to tail of unacked list */
+ useg->next = seg;
+ useg = useg->next;
+ }
+ }
+ /* do not queue empty segments on the unacked list */
+ } else {
+ tcp_seg_free(seg);
+ }
+ seg = pcb->unsent;
+ }
+#if TCP_OVERSIZE
+ if (pcb->unsent == NULL) {
+ /* last unsent has been removed, reset unsent_oversize */
+ pcb->unsent_oversize = 0;
+ }
+#endif /* TCP_OVERSIZE */
+
+output_done:
+ tcp_clear_flags(pcb, TF_NAGLEMEMERR);
+ return ERR_OK;
+}
+
+/** Check if a segment's pbufs are used by someone else than TCP.
+ * This can happen on retransmission if the pbuf of this segment is still
+ * referenced by the netif driver due to deferred transmission.
+ * This is the case (only!) if someone down the TX call path called
+ * pbuf_ref() on one of the pbufs!
+ *
+ * @arg seg the tcp segment to check
+ * @return 1 if ref != 1, 0 if ref == 1
+ */
+static int
+tcp_output_segment_busy(const struct tcp_seg *seg)
+{
+ LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL);
+
+ /* We only need to check the first pbuf here:
+ If a pbuf is queued for transmission, a driver calls pbuf_ref(),
+ which only changes the ref count of the first pbuf */
+ if (seg->p->ref != 1) {
+ /* other reference found */
+ return 1;
+ }
+ /* no other references found */
+ return 0;
+}
+
+/**
+ * Called by tcp_output() to actually send a TCP segment over IP.
+ *
+ * @param seg the tcp_seg to send
+ * @param pcb the tcp_pcb for the TCP connection used to send the segment
+ * @param netif the netif used to send the segment
+ */
+static err_t
+tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif)
+{
+ err_t err;
+ u16_t len;
+ u32_t *opts;
+#if TCP_CHECKSUM_ON_COPY
+ int seg_chksum_was_swapped = 0;
+#endif
+
+ LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL);
+ LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL);
+ LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL);
+
+ if (tcp_output_segment_busy(seg)) {
+ /* This should not happen: rexmit functions should have checked this.
+ However, since this function modifies p->len, we must not continue in this case. */
+ LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n"));
+ return ERR_OK;
+ }
+
+ /* The TCP header has already been constructed, but the ackno and
+ wnd fields remain. */
+ seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt);
+
+ /* advertise our receive window size in this TCP segment */
+#if LWIP_WND_SCALE
+ if (seg->flags & TF_SEG_OPTS_WND_SCALE) {
+ /* The Window field in a SYN segment itself (the only type where we send
+ the window scale option) is never scaled. */
+ seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd));
+ } else
+#endif /* LWIP_WND_SCALE */
+ {
+ seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd)));
+ }
+
+ pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
+
+ /* Add any requested options. NB MSS option is only set on SYN
+ packets, so ignore it here */
+ /* cast through void* to get rid of alignment warnings */
+ opts = (u32_t *)(void *)(seg->tcphdr + 1);
+ if (seg->flags & TF_SEG_OPTS_MSS) {
+ u16_t mss;
+#if TCP_CALCULATE_EFF_SEND_MSS
+ mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip);
+#else /* TCP_CALCULATE_EFF_SEND_MSS */
+ mss = TCP_MSS;
+#endif /* TCP_CALCULATE_EFF_SEND_MSS */
+ *opts = TCP_BUILD_MSS_OPTION(mss);
+ opts += 1;
+ }
+#if LWIP_TCP_TIMESTAMPS
+ pcb->ts_lastacksent = pcb->rcv_nxt;
+
+ if (seg->flags & TF_SEG_OPTS_TS) {
+ tcp_build_timestamp_option(pcb, opts);
+ opts += 3;
+ }
+#endif
+#if LWIP_WND_SCALE
+ if (seg->flags & TF_SEG_OPTS_WND_SCALE) {
+ tcp_build_wnd_scale_option(opts);
+ opts += 1;
+ }
+#endif
+#if LWIP_TCP_SACK_OUT
+ if (seg->flags & TF_SEG_OPTS_SACK_PERM) {
+ /* Pad with two NOP options to make everything nicely aligned
+ * NOTE: When we send both timestamp and SACK_PERM options,
+ * we could use the first two NOPs before the timestamp to store SACK_PERM option,
+ * but that would complicate the code.
+ */
+ *(opts++) = PP_HTONL(0x01010402);
+ }
+#endif
+
+ /* Set retransmission timer running if it is not currently enabled
+ This must be set before checking the route. */
+ if (pcb->rtime < 0) {
+ pcb->rtime = 0;
+ }
+
+ if (pcb->rttest == 0) {
+ pcb->rttest = tcp_ticks;
+ pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno);
+
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq));
+ }
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n",
+ lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) +
+ seg->len));
+
+ len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload);
+ if (len == 0) {
+ /** Exclude retransmitted segments from this count. */
+ MIB2_STATS_INC(mib2.tcpoutsegs);
+ }
+
+ seg->p->len -= len;
+ seg->p->tot_len -= len;
+
+ seg->p->payload = seg->tcphdr;
+
+ seg->tcphdr->chksum = 0;
+
+#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS
+ opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts);
+#endif
+ LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb));
+
+#if CHECKSUM_GEN_TCP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
+#if TCP_CHECKSUM_ON_COPY
+ u32_t acc;
+#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
+ u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP,
+ seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip);
+#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
+ if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) {
+ LWIP_ASSERT("data included but not checksummed",
+ seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr));
+ }
+
+ /* rebuild TCP header checksum (TCP header changes for retransmissions!) */
+ acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP,
+ seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip);
+ /* add payload checksum */
+ if (seg->chksum_swapped) {
+ seg_chksum_was_swapped = 1;
+ seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
+ seg->chksum_swapped = 0;
+ }
+ acc = (u16_t)~acc + seg->chksum;
+ seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc);
+#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
+ if (chksum_slow != seg->tcphdr->chksum) {
+ TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(
+ ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n",
+ seg->tcphdr->chksum, chksum_slow));
+ seg->tcphdr->chksum = chksum_slow;
+ }
+#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
+#else /* TCP_CHECKSUM_ON_COPY */
+ seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP,
+ seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip);
+#endif /* TCP_CHECKSUM_ON_COPY */
+ }
+#endif /* CHECKSUM_GEN_TCP */
+ TCP_STATS_INC(tcp.xmit);
+
+ NETIF_SET_HINTS(netif, &(pcb->netif_hints));
+ err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl,
+ pcb->tos, IP_PROTO_TCP, netif);
+ NETIF_RESET_HINTS(netif);
+
+#if TCP_CHECKSUM_ON_COPY
+ if (seg_chksum_was_swapped) {
+ /* if data is added to this segment later, chksum needs to be swapped,
+ so restore this now */
+ seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
+ seg->chksum_swapped = 1;
+ }
+#endif
+
+ return err;
+}
+
+/**
+ * Requeue all unacked segments for retransmission
+ *
+ * Called by tcp_slowtmr() for slow retransmission.
+ *
+ * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
+ */
+err_t
+tcp_rexmit_rto_prepare(struct tcp_pcb *pcb)
+{
+ struct tcp_seg *seg;
+
+ LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL);
+
+ if (pcb->unacked == NULL) {
+ return ERR_VAL;
+ }
+
+ /* Move all unacked segments to the head of the unsent queue.
+ However, give up if any of the unsent pbufs are still referenced by the
+ netif driver due to deferred transmission. No point loading the link further
+ if it is struggling to flush its buffered writes. */
+ for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) {
+ if (tcp_output_segment_busy(seg)) {
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n"));
+ return ERR_VAL;
+ }
+ }
+ if (tcp_output_segment_busy(seg)) {
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n"));
+ return ERR_VAL;
+ }
+ /* concatenate unsent queue after unacked queue */
+ seg->next = pcb->unsent;
+#if TCP_OVERSIZE_DBGCHECK
+ /* if last unsent changed, we need to update unsent_oversize */
+ if (pcb->unsent == NULL) {
+ pcb->unsent_oversize = seg->oversize_left;
+ }
+#endif /* TCP_OVERSIZE_DBGCHECK */
+ /* unsent queue is the concatenated queue (of unacked, unsent) */
+ pcb->unsent = pcb->unacked;
+ /* unacked queue is now empty */
+ pcb->unacked = NULL;
+
+ /* Mark RTO in-progress */
+ tcp_set_flags(pcb, TF_RTO);
+ /* Record the next byte following retransmit */
+ pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
+ /* Don't take any RTT measurements after retransmitting. */
+ pcb->rttest = 0;
+
+ return ERR_OK;
+}
+
+/**
+ * Requeue all unacked segments for retransmission
+ *
+ * Called by tcp_slowtmr() for slow retransmission.
+ *
+ * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
+ */
+void
+tcp_rexmit_rto_commit(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL);
+
+ /* increment number of retransmissions */
+ if (pcb->nrtx < 0xFF) {
+ ++pcb->nrtx;
+ }
+ /* Do the actual retransmission */
+ tcp_output(pcb);
+}
+
+/**
+ * Requeue all unacked segments for retransmission
+ *
+ * Called by tcp_process() only, tcp_slowtmr() needs to do some things between
+ * "prepare" and "commit".
+ *
+ * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
+ */
+void
+tcp_rexmit_rto(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL);
+
+ if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) {
+ tcp_rexmit_rto_commit(pcb);
+ }
+}
+
+/**
+ * Requeue the first unacked segment for retransmission
+ *
+ * Called by tcp_receive() for fast retransmit.
+ *
+ * @param pcb the tcp_pcb for which to retransmit the first unacked segment
+ */
+err_t
+tcp_rexmit(struct tcp_pcb *pcb)
+{
+ struct tcp_seg *seg;
+ struct tcp_seg **cur_seg;
+
+ LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL);
+
+ if (pcb->unacked == NULL) {
+ return ERR_VAL;
+ }
+
+ seg = pcb->unacked;
+
+ /* Give up if the segment is still referenced by the netif driver
+ due to deferred transmission. */
+ if (tcp_output_segment_busy(seg)) {
+ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n"));
+ return ERR_VAL;
+ }
+
+ /* Move the first unacked segment to the unsent queue */
+ /* Keep the unsent queue sorted. */
+ pcb->unacked = seg->next;
+
+ cur_seg = &(pcb->unsent);
+ while (*cur_seg &&
+ TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) {
+ cur_seg = &((*cur_seg)->next );
+ }
+ seg->next = *cur_seg;
+ *cur_seg = seg;
+#if TCP_OVERSIZE
+ if (seg->next == NULL) {
+ /* the retransmitted segment is last in unsent, so reset unsent_oversize */
+ pcb->unsent_oversize = 0;
+ }
+#endif /* TCP_OVERSIZE */
+
+ if (pcb->nrtx < 0xFF) {
+ ++pcb->nrtx;
+ }
+
+ /* Don't take any rtt measurements after retransmitting. */
+ pcb->rttest = 0;
+
+ /* Do the actual retransmission. */
+ MIB2_STATS_INC(mib2.tcpretranssegs);
+ /* No need to call tcp_output: we are always called from tcp_input()
+ and thus tcp_output directly returns. */
+ return ERR_OK;
+}
+
+
+/**
+ * Handle retransmission after three dupacks received
+ *
+ * @param pcb the tcp_pcb for which to retransmit the first unacked segment
+ */
+void
+tcp_rexmit_fast(struct tcp_pcb *pcb)
+{
+ LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL);
+
+ if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) {
+ /* This is fast retransmit. Retransmit the first unacked segment. */
+ LWIP_DEBUGF(TCP_FR_DEBUG,
+ ("tcp_receive: dupacks %"U16_F" (%"U32_F
+ "), fast retransmit %"U32_F"\n",
+ (u16_t)pcb->dupacks, pcb->lastack,
+ lwip_ntohl(pcb->unacked->tcphdr->seqno)));
+ if (tcp_rexmit(pcb) == ERR_OK) {
+ /* Set ssthresh to half of the minimum of the current
+ * cwnd and the advertised window */
+ pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2;
+
+ /* The minimum value for ssthresh should be 2 MSS */
+ if (pcb->ssthresh < (2U * pcb->mss)) {
+ LWIP_DEBUGF(TCP_FR_DEBUG,
+ ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F
+ " should be min 2 mss %"U16_F"...\n",
+ pcb->ssthresh, (u16_t)(2 * pcb->mss)));
+ pcb->ssthresh = 2 * pcb->mss;
+ }
+
+ pcb->cwnd = pcb->ssthresh + 3 * pcb->mss;
+ tcp_set_flags(pcb, TF_INFR);
+
+ /* Reset the retransmission timer to prevent immediate rto retransmissions */
+ pcb->rtime = 0;
+ }
+ }
+}
+
+static struct pbuf *
+tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen,
+ u32_t seqno_be /* already in network byte order */,
+ u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd)
+{
+ struct tcp_hdr *tcphdr;
+ struct pbuf *p;
+
+ p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM);
+ if (p != NULL) {
+ LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
+ (p->len >= TCP_HLEN + optlen));
+ tcphdr = (struct tcp_hdr *)p->payload;
+ tcphdr->src = lwip_htons(src_port);
+ tcphdr->dest = lwip_htons(dst_port);
+ tcphdr->seqno = seqno_be;
+ tcphdr->ackno = lwip_htonl(ackno);
+ TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags);
+ tcphdr->wnd = lwip_htons(wnd);
+ tcphdr->chksum = 0;
+ tcphdr->urgp = 0;
+ }
+ return p;
+}
+
+/** Allocate a pbuf and create a tcphdr at p->payload, used for output
+ * functions other than the default tcp_output -> tcp_output_segment
+ * (e.g. tcp_send_empty_ack, etc.)
+ *
+ * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr)
+ * @param optlen length of header-options
+ * @param datalen length of tcp data to reserve in pbuf
+ * @param seqno_be seqno in network byte order (big-endian)
+ * @return pbuf with p->payload being the tcp_hdr
+ */
+static struct pbuf *
+tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen,
+ u32_t seqno_be /* already in network byte order */)
+{
+ struct pbuf *p;
+
+ LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL);
+
+ p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen,
+ seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK,
+ TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd)));
+ if (p != NULL) {
+ /* If we're sending a packet, update the announced right window edge */
+ pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
+ }
+ return p;
+}
+
+/* Fill in options for control segments */
+static void
+tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks)
+{
+ struct tcp_hdr *tcphdr;
+ u32_t *opts;
+ u16_t sacks_len = 0;
+
+ LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL);
+
+ tcphdr = (struct tcp_hdr *)p->payload;
+ opts = (u32_t *)(void *)(tcphdr + 1);
+
+ /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */
+
+#if LWIP_TCP_TIMESTAMPS
+ if (optflags & TF_SEG_OPTS_TS) {
+ tcp_build_timestamp_option(pcb, opts);
+ opts += 3;
+ }
+#endif
+
+#if LWIP_TCP_SACK_OUT
+ if (pcb && (num_sacks > 0)) {
+ tcp_build_sack_option(pcb, opts, num_sacks);
+ /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */
+ sacks_len = 1 + num_sacks * 2;
+ opts += sacks_len;
+ }
+#else
+ LWIP_UNUSED_ARG(num_sacks);
+#endif
+
+#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS
+ opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts);
+#endif
+
+ LWIP_UNUSED_ARG(pcb);
+ LWIP_UNUSED_ARG(sacks_len);
+ LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb));
+ LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */
+ LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */
+}
+
+/** Output a control segment pbuf to IP.
+ *
+ * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe,
+ * this function combines selecting a netif for transmission, generating the tcp
+ * header checksum and calling ip_output_if while handling netif hints and stats.
+ */
+static err_t
+tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p,
+ const ip_addr_t *src, const ip_addr_t *dst)
+{
+ err_t err;
+ struct netif *netif;
+
+ LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL);
+
+ netif = tcp_route(pcb, src, dst);
+ if (netif == NULL) {
+ err = ERR_RTE;
+ } else {
+ u8_t ttl, tos;
+#if CHECKSUM_GEN_TCP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
+ struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload;
+ tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
+ src, dst);
+ }
+#endif
+ if (pcb != NULL) {
+ NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints)));
+ ttl = pcb->ttl;
+ tos = pcb->tos;
+ } else {
+ /* Send output with hardcoded TTL/HL since we have no access to the pcb */
+ ttl = TCP_TTL;
+ tos = 0;
+ }
+ TCP_STATS_INC(tcp.xmit);
+ err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif);
+ NETIF_RESET_HINTS(netif);
+ }
+ pbuf_free(p);
+ return err;
+}
+
+/**
+ * Send a TCP RESET packet (empty segment with RST flag set) either to
+ * abort a connection or to show that there is no matching local connection
+ * for a received segment.
+ *
+ * Called by tcp_abort() (to abort a local connection), tcp_input() (if no
+ * matching local pcb was found), tcp_listen_input() (if incoming segment
+ * has ACK flag set) and tcp_process() (received segment in the wrong state)
+ *
+ * Since a RST segment is in most cases not sent for an active connection,
+ * tcp_rst() has a number of arguments that are taken from a tcp_pcb for
+ * most other segment output functions.
+ *
+ * @param pcb TCP pcb (may be NULL if no pcb is available)
+ * @param seqno the sequence number to use for the outgoing segment
+ * @param ackno the acknowledge number to use for the outgoing segment
+ * @param local_ip the local IP address to send the segment from
+ * @param remote_ip the remote IP address to send the segment to
+ * @param local_port the local TCP port to send the segment from
+ * @param remote_port the remote TCP port to send the segment to
+ */
+void
+tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno,
+ const ip_addr_t *local_ip, const ip_addr_t *remote_ip,
+ u16_t local_port, u16_t remote_port)
+{
+ struct pbuf *p;
+ u16_t wnd;
+ u8_t optlen;
+
+ LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL);
+ LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL);
+
+ optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
+
+#if LWIP_WND_SCALE
+ wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF));
+#else
+ wnd = PP_HTONS(TCP_WND);
+#endif
+
+ p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port,
+ remote_port, TCP_RST | TCP_ACK, wnd);
+ if (p == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n"));
+ return;
+ }
+ tcp_output_fill_options(pcb, p, 0, optlen);
+
+ MIB2_STATS_INC(mib2.tcpoutrsts);
+
+ tcp_output_control_segment(pcb, p, local_ip, remote_ip);
+ LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno));
+}
+
+/**
+ * Send an ACK without data.
+ *
+ * @param pcb Protocol control block for the TCP connection to send the ACK
+ */
+err_t
+tcp_send_empty_ack(struct tcp_pcb *pcb)
+{
+ err_t err;
+ struct pbuf *p;
+ u8_t optlen, optflags = 0;
+ u8_t num_sacks = 0;
+
+ LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL);
+
+#if LWIP_TCP_TIMESTAMPS
+ if (pcb->flags & TF_TIMESTAMP) {
+ optflags = TF_SEG_OPTS_TS;
+ }
+#endif
+ optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb);
+
+#if LWIP_TCP_SACK_OUT
+ /* For now, SACKs are only sent with empty ACKs */
+ if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) {
+ optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */
+ }
+#endif
+
+ p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt));
+ if (p == NULL) {
+ /* let tcp_fasttmr retry sending this ACK */
+ tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n"));
+ return ERR_BUF;
+ }
+ tcp_output_fill_options(pcb, p, optflags, num_sacks);
+
+#if LWIP_TCP_TIMESTAMPS
+ pcb->ts_lastacksent = pcb->rcv_nxt;
+#endif
+
+ LWIP_DEBUGF(TCP_OUTPUT_DEBUG,
+ ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt));
+ err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip);
+ if (err != ERR_OK) {
+ /* let tcp_fasttmr retry sending this ACK */
+ tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
+ } else {
+ /* remove ACK flags from the PCB, as we sent an empty ACK now */
+ tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
+ }
+
+ return err;
+}
+
+/**
+ * Send keepalive packets to keep a connection active although
+ * no data is sent over it.
+ *
+ * Called by tcp_slowtmr()
+ *
+ * @param pcb the tcp_pcb for which to send a keepalive packet
+ */
+err_t
+tcp_keepalive(struct tcp_pcb *pcb)
+{
+ err_t err;
+ struct pbuf *p;
+ u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
+
+ LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL);
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to "));
+ ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip);
+ LWIP_DEBUGF(TCP_DEBUG, ("\n"));
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
+ tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent));
+
+ p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1));
+ if (p == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG,
+ ("tcp_keepalive: could not allocate memory for pbuf\n"));
+ return ERR_MEM;
+ }
+ tcp_output_fill_options(pcb, p, 0, optlen);
+ err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip);
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n",
+ pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err));
+ return err;
+}
+
+/**
+ * Send persist timer zero-window probes to keep a connection active
+ * when a window update is lost.
+ *
+ * Called by tcp_slowtmr()
+ *
+ * @param pcb the tcp_pcb for which to send a zero-window probe packet
+ */
+err_t
+tcp_zero_window_probe(struct tcp_pcb *pcb)
+{
+ err_t err;
+ struct pbuf *p;
+ struct tcp_hdr *tcphdr;
+ struct tcp_seg *seg;
+ u16_t len;
+ u8_t is_fin;
+ u32_t snd_nxt;
+ u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
+
+ LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL);
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to "));
+ ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip);
+ LWIP_DEBUGF(TCP_DEBUG, ("\n"));
+
+ LWIP_DEBUGF(TCP_DEBUG,
+ ("tcp_zero_window_probe: tcp_ticks %"U32_F
+ " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
+ tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent));
+
+ /* Only consider unsent, persist timer should be off when there is data in-flight */
+ seg = pcb->unsent;
+ if (seg == NULL) {
+ /* Not expected, persist timer should be off when the send buffer is empty */
+ return ERR_OK;
+ }
+
+ /* increment probe count. NOTE: we record probe even if it fails
+ to actually transmit due to an error. This ensures memory exhaustion/
+ routing problem doesn't leave a zero-window pcb as an indefinite zombie.
+ RTO mechanism has similar behavior, see pcb->nrtx */
+ if (pcb->persist_probe < 0xFF) {
+ ++pcb->persist_probe;
+ }
+
+ is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0);
+ /* we want to send one seqno: either FIN or data (no options) */
+ len = is_fin ? 0 : 1;
+
+ p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno);
+ if (p == NULL) {
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n"));
+ return ERR_MEM;
+ }
+ tcphdr = (struct tcp_hdr *)p->payload;
+
+ if (is_fin) {
+ /* FIN segment, no data */
+ TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN);
+ } else {
+ /* Data segment, copy in one byte from the head of the unacked queue */
+ char *d = ((char *)p->payload + TCP_HLEN);
+ /* Depending on whether the segment has already been sent (unacked) or not
+ (unsent), seg->p->payload points to the IP header or TCP header.
+ Ensure we copy the first TCP data byte: */
+ pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len);
+ }
+
+ /* The byte may be acknowledged without the window being opened. */
+ snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1;
+ if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
+ pcb->snd_nxt = snd_nxt;
+ }
+ tcp_output_fill_options(pcb, p, 0, optlen);
+
+ err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip);
+
+ LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F
+ " ackno %"U32_F" err %d.\n",
+ pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err));
+ return err;
+}
+#endif /* LWIP_TCP */
diff --git a/lwip/src/core/timeouts.c b/lwip/src/core/timeouts.c
new file mode 100644
index 0000000..f37acfe
--- /dev/null
+++ b/lwip/src/core/timeouts.c
@@ -0,0 +1,451 @@
+/**
+ * @file
+ * Stack-internal timers implementation.
+ * This file includes timer callbacks for stack-internal timers as well as
+ * functions to set up or stop timers and check for expired timers.
+ *
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ * Simon Goldschmidt
+ *
+ */
+
+#include "lwip/opt.h"
+
+#include "lwip/timeouts.h"
+#include "lwip/priv/tcp_priv.h"
+
+#include "lwip/def.h"
+#include "lwip/memp.h"
+#include "lwip/priv/tcpip_priv.h"
+
+#include "lwip/ip4_frag.h"
+#include "lwip/etharp.h"
+#include "lwip/dhcp.h"
+#include "lwip/autoip.h"
+#include "lwip/igmp.h"
+#include "lwip/dns.h"
+#include "lwip/nd6.h"
+#include "lwip/ip6_frag.h"
+#include "lwip/mld6.h"
+#include "lwip/dhcp6.h"
+#include "lwip/sys.h"
+#include "lwip/pbuf.h"
+
+#if LWIP_DEBUG_TIMERNAMES
+#define HANDLER(x) x, #x
+#else /* LWIP_DEBUG_TIMERNAMES */
+#define HANDLER(x) x
+#endif /* LWIP_DEBUG_TIMERNAMES */
+
+#define LWIP_MAX_TIMEOUT 0x7fffffff
+
+/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */
+#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 )
+
+/** This array contains all stack-internal cyclic timers. To get the number of
+ * timers, use LWIP_ARRAYSIZE() */
+const struct lwip_cyclic_timer lwip_cyclic_timers[] = {
+#if LWIP_TCP
+ /* The TCP timer is a special case: it does not have to run always and
+ is triggered to start from TCP using tcp_timer_needed() */
+ {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)},
+#endif /* LWIP_TCP */
+#if LWIP_IPV4
+#if IP_REASSEMBLY
+ {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)},
+#endif /* IP_REASSEMBLY */
+#if LWIP_ARP
+ {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)},
+#endif /* LWIP_ARP */
+#if LWIP_DHCP
+ {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)},
+ {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)},
+#endif /* LWIP_DHCP */
+#if LWIP_AUTOIP
+ {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)},
+#endif /* LWIP_AUTOIP */
+#if LWIP_IGMP
+ {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)},
+#endif /* LWIP_IGMP */
+#endif /* LWIP_IPV4 */
+#if LWIP_DNS
+ {DNS_TMR_INTERVAL, HANDLER(dns_tmr)},
+#endif /* LWIP_DNS */
+#if LWIP_IPV6
+ {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)},
+#if LWIP_IPV6_REASS
+ {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)},
+#endif /* LWIP_IPV6_REASS */
+#if LWIP_IPV6_MLD
+ {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)},
+#endif /* LWIP_IPV6_MLD */
+#if LWIP_IPV6_DHCP6
+ {DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)},
+#endif /* LWIP_IPV6_DHCP6 */
+#endif /* LWIP_IPV6 */
+};
+const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers);
+
+#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM
+
+/** The one and only timeout list */
+static struct sys_timeo *next_timeout;
+
+static u32_t current_timeout_due_time;
+
+#if LWIP_TESTMODE
+struct sys_timeo**
+sys_timeouts_get_next_timeout(void)
+{
+ return &next_timeout;
+}
+#endif
+
+#if LWIP_TCP
+/** global variable that shows if the tcp timer is currently scheduled or not */
+static int tcpip_tcp_timer_active;
+
+/**
+ * Timer callback function that calls tcp_tmr() and reschedules itself.
+ *
+ * @param arg unused argument
+ */
+static void
+tcpip_tcp_timer(void *arg)
+{
+ LWIP_UNUSED_ARG(arg);
+
+ /* call TCP timer handler */
+ tcp_tmr();
+ /* timer still needed? */
+ if (tcp_active_pcbs || tcp_tw_pcbs) {
+ /* restart timer */
+ sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL);
+ } else {
+ /* disable timer */
+ tcpip_tcp_timer_active = 0;
+ }
+}
+
+/**
+ * Called from TCP_REG when registering a new PCB:
+ * the reason is to have the TCP timer only running when
+ * there are active (or time-wait) PCBs.
+ */
+void
+tcp_timer_needed(void)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* timer is off but needed again? */
+ if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) {
+ /* enable and start timer */
+ tcpip_tcp_timer_active = 1;
+ sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL);
+ }
+}
+#endif /* LWIP_TCP */
+
+static void
+#if LWIP_DEBUG_TIMERNAMES
+sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name)
+#else /* LWIP_DEBUG_TIMERNAMES */
+sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg)
+#endif
+{
+ struct sys_timeo *timeout, *t;
+
+ timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT);
+ if (timeout == NULL) {
+ LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL);
+ return;
+ }
+
+ timeout->next = NULL;
+ timeout->h = handler;
+ timeout->arg = arg;
+ timeout->time = abs_time;
+
+#if LWIP_DEBUG_TIMERNAMES
+ timeout->handler_name = handler_name;
+ LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n",
+ (void *)timeout, abs_time, handler_name, (void *)arg));
+#endif /* LWIP_DEBUG_TIMERNAMES */
+
+ if (next_timeout == NULL) {
+ next_timeout = timeout;
+ return;
+ }
+ if (TIME_LESS_THAN(timeout->time, next_timeout->time)) {
+ timeout->next = next_timeout;
+ next_timeout = timeout;
+ } else {
+ for (t = next_timeout; t != NULL; t = t->next) {
+ if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) {
+ timeout->next = t->next;
+ t->next = timeout;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Timer callback function that calls cyclic->handler() and reschedules itself.
+ *
+ * @param arg unused argument
+ */
+#if !LWIP_TESTMODE
+static
+#endif
+void
+lwip_cyclic_timer(void *arg)
+{
+ u32_t now;
+ u32_t next_timeout_time;
+ const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg;
+
+#if LWIP_DEBUG_TIMERNAMES
+ LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name));
+#endif
+ cyclic->handler();
+
+ now = sys_now();
+ next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */
+ if (TIME_LESS_THAN(next_timeout_time, now)) {
+ /* timer would immediately expire again -> "overload" -> restart without any correction */
+#if LWIP_DEBUG_TIMERNAMES
+ sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name);
+#else
+ sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg);
+#endif
+
+ } else {
+ /* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */
+#if LWIP_DEBUG_TIMERNAMES
+ sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name);
+#else
+ sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg);
+#endif
+ }
+}
+
+/** Initialize this module */
+void sys_timeouts_init(void)
+{
+ size_t i;
+ /* tcp_tmr() at index 0 is started on demand */
+ for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) {
+ /* we have to cast via size_t to get rid of const warning
+ (this is OK as cyclic_timer() casts back to const* */
+ sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i]));
+ }
+}
+
+/**
+ * Create a one-shot timer (aka timeout). Timeouts are processed in the
+ * following cases:
+ * - while waiting for a message using sys_timeouts_mbox_fetch()
+ * - by calling sys_check_timeouts() (NO_SYS==1 only)
+ *
+ * @param msecs time in milliseconds after that the timer should expire
+ * @param handler callback function to call when msecs have elapsed
+ * @param arg argument to pass to the callback function
+ */
+#if LWIP_DEBUG_TIMERNAMES
+void
+sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name)
+#else /* LWIP_DEBUG_TIMERNAMES */
+void
+sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg)
+#endif /* LWIP_DEBUG_TIMERNAMES */
+{
+ u32_t next_timeout_time;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4));
+
+ next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */
+
+#if LWIP_DEBUG_TIMERNAMES
+ sys_timeout_abs(next_timeout_time, handler, arg, handler_name);
+#else
+ sys_timeout_abs(next_timeout_time, handler, arg);
+#endif
+}
+
+/**
+ * Go through timeout list (for this task only) and remove the first matching
+ * entry (subsequent entries remain untouched), even though the timeout has not
+ * triggered yet.
+ *
+ * @param handler callback function that would be called by the timeout
+ * @param arg callback argument that would be passed to handler
+*/
+void
+sys_untimeout(sys_timeout_handler handler, void *arg)
+{
+ struct sys_timeo *prev_t, *t;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (next_timeout == NULL) {
+ return;
+ }
+
+ for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) {
+ if ((t->h == handler) && (t->arg == arg)) {
+ /* We have a match */
+ /* Unlink from previous in list */
+ if (prev_t == NULL) {
+ next_timeout = t->next;
+ } else {
+ prev_t->next = t->next;
+ }
+ memp_free(MEMP_SYS_TIMEOUT, t);
+ return;
+ }
+ }
+ return;
+}
+
+/**
+ * @ingroup lwip_nosys
+ * Handle timeouts for NO_SYS==1 (i.e. without using
+ * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout
+ * handler functions when timeouts expire.
+ *
+ * Must be called periodically from your main loop.
+ */
+void
+sys_check_timeouts(void)
+{
+ u32_t now;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ /* Process only timers expired at the start of the function. */
+ now = sys_now();
+
+ do {
+ struct sys_timeo *tmptimeout;
+ sys_timeout_handler handler;
+ void *arg;
+
+ PBUF_CHECK_FREE_OOSEQ();
+
+ tmptimeout = next_timeout;
+ if (tmptimeout == NULL) {
+ return;
+ }
+
+ if (TIME_LESS_THAN(now, tmptimeout->time)) {
+ return;
+ }
+
+ /* Timeout has expired */
+ next_timeout = tmptimeout->next;
+ handler = tmptimeout->h;
+ arg = tmptimeout->arg;
+ current_timeout_due_time = tmptimeout->time;
+#if LWIP_DEBUG_TIMERNAMES
+ if (handler != NULL) {
+ LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n",
+ tmptimeout->handler_name, sys_now() - tmptimeout->time, arg));
+ }
+#endif /* LWIP_DEBUG_TIMERNAMES */
+ memp_free(MEMP_SYS_TIMEOUT, tmptimeout);
+ if (handler != NULL) {
+ handler(arg);
+ }
+ LWIP_TCPIP_THREAD_ALIVE();
+
+ /* Repeat until all expired timers have been called */
+ } while (1);
+}
+
+/** Rebase the timeout times to the current time.
+ * This is necessary if sys_check_timeouts() hasn't been called for a long
+ * time (e.g. while saving energy) to prevent all timer functions of that
+ * period being called.
+ */
+void
+sys_restart_timeouts(void)
+{
+ u32_t now;
+ u32_t base;
+ struct sys_timeo *t;
+
+ if (next_timeout == NULL) {
+ return;
+ }
+
+ now = sys_now();
+ base = next_timeout->time;
+
+ for (t = next_timeout; t != NULL; t = t->next) {
+ t->time = (t->time - base) + now;
+ }
+}
+
+/** Return the time left before the next timeout is due. If no timeouts are
+ * enqueued, returns 0xffffffff
+ */
+u32_t
+sys_timeouts_sleeptime(void)
+{
+ u32_t now;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (next_timeout == NULL) {
+ return SYS_TIMEOUTS_SLEEPTIME_INFINITE;
+ }
+ now = sys_now();
+ if (TIME_LESS_THAN(next_timeout->time, now)) {
+ return 0;
+ } else {
+ u32_t ret = (u32_t)(next_timeout->time - now);
+ LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT);
+ return ret;
+ }
+}
+
+#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */
+/* Satisfy the TCP code which calls this function */
+void
+tcp_timer_needed(void)
+{
+}
+#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */
diff --git a/lwip/src/core/udp.c b/lwip/src/core/udp.c
new file mode 100644
index 0000000..9d2cb4a
--- /dev/null
+++ b/lwip/src/core/udp.c
@@ -0,0 +1,1314 @@
+/**
+ * @file
+ * User Datagram Protocol module\n
+ * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n
+ * See also @ref udp_raw
+ *
+ * @defgroup udp_raw UDP
+ * @ingroup callbackstyle_api
+ * User Datagram Protocol module\n
+ * @see @ref api
+ */
+
+/*
+ * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ * Author: Adam Dunkels <adam@sics.se>
+ *
+ */
+
+/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'!
+ */
+
+#include "lwip/opt.h"
+
+#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */
+
+#include "lwip/udp.h"
+#include "lwip/def.h"
+#include "lwip/memp.h"
+#include "lwip/inet_chksum.h"
+#include "lwip/ip_addr.h"
+#include "lwip/ip6.h"
+#include "lwip/ip6_addr.h"
+#include "lwip/netif.h"
+#include "lwip/icmp.h"
+#include "lwip/icmp6.h"
+#include "lwip/stats.h"
+#include "lwip/snmp.h"
+#include "lwip/dhcp.h"
+
+#include <string.h>
+
+#ifndef UDP_LOCAL_PORT_RANGE_START
+/* From http://www.iana.org/assignments/port-numbers:
+ "The Dynamic and/or Private Ports are those from 49152 through 65535" */
+#define UDP_LOCAL_PORT_RANGE_START 0xc000
+#define UDP_LOCAL_PORT_RANGE_END 0xffff
+#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START))
+#endif
+
+/* last local UDP port */
+static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START;
+
+/* The list of UDP PCBs */
+/* exported in udp.h (was static) */
+struct udp_pcb *udp_pcbs;
+
+/**
+ * Initialize this module.
+ */
+void
+udp_init(void)
+{
+#ifdef LWIP_RAND
+ udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND());
+#endif /* LWIP_RAND */
+}
+
+/**
+ * Allocate a new local UDP port.
+ *
+ * @return a new (free) local UDP port number
+ */
+static u16_t
+udp_new_port(void)
+{
+ u16_t n = 0;
+ struct udp_pcb *pcb;
+
+again:
+ if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) {
+ udp_port = UDP_LOCAL_PORT_RANGE_START;
+ }
+ /* Check all PCBs. */
+ for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) {
+ if (pcb->local_port == udp_port) {
+ if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) {
+ return 0;
+ }
+ goto again;
+ }
+ }
+ return udp_port;
+}
+
+/** Common code to see if the current input packet matches the pcb
+ * (current input packet is accessed via ip(4/6)_current_* macros)
+ *
+ * @param pcb pcb to check
+ * @param inp network interface on which the datagram was received (only used for IPv4)
+ * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4)
+ * @return 1 on match, 0 otherwise
+ */
+static u8_t
+udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast)
+{
+ LWIP_UNUSED_ARG(inp); /* in IPv6 only case */
+ LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */
+
+ LWIP_ASSERT("udp_input_local_match: invalid pcb", pcb != NULL);
+ LWIP_ASSERT("udp_input_local_match: invalid netif", inp != NULL);
+
+ /* check if PCB is bound to specific netif */
+ if ((pcb->netif_idx != NETIF_NO_INDEX) &&
+ (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) {
+ return 0;
+ }
+
+ /* Dual-stack: PCBs listening to any IP type also listen to any IP address */
+ if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
+#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV
+ if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) {
+ return 0;
+ }
+#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */
+ return 1;
+ }
+
+ /* Only need to check PCB if incoming IP version matches PCB IP version */
+ if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) {
+#if LWIP_IPV4
+ /* Special case: IPv4 broadcast: all or broadcasts in my subnet
+ * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */
+ if (broadcast != 0) {
+#if IP_SOF_BROADCAST_RECV
+ if (ip_get_option(pcb, SOF_BROADCAST))
+#endif /* IP_SOF_BROADCAST_RECV */
+ {
+ if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) ||
+ ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) ||
+ ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) {
+ return 1;
+ }
+ }
+ } else
+#endif /* LWIP_IPV4 */
+ /* Handle IPv4 and IPv6: all or exact match */
+ if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Process an incoming UDP datagram.
+ *
+ * Given an incoming UDP datagram (as a chain of pbufs) this function
+ * finds a corresponding UDP PCB and hands over the pbuf to the pcbs
+ * recv function. If no pcb is found or the datagram is incorrect, the
+ * pbuf is freed.
+ *
+ * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header)
+ * @param inp network interface on which the datagram was received.
+ *
+ */
+void
+udp_input(struct pbuf *p, struct netif *inp)
+{
+ struct udp_hdr *udphdr;
+ struct udp_pcb *pcb, *prev;
+ struct udp_pcb *uncon_pcb;
+ u16_t src, dest;
+ u8_t broadcast;
+ u8_t for_us = 0;
+
+ LWIP_UNUSED_ARG(inp);
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ASSERT("udp_input: invalid pbuf", p != NULL);
+ LWIP_ASSERT("udp_input: invalid netif", inp != NULL);
+
+ PERF_START;
+
+ UDP_STATS_INC(udp.recv);
+
+ /* Check minimum length (UDP header) */
+ if (p->len < UDP_HLEN) {
+ /* drop short packets */
+ LWIP_DEBUGF(UDP_DEBUG,
+ ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len));
+ UDP_STATS_INC(udp.lenerr);
+ UDP_STATS_INC(udp.drop);
+ MIB2_STATS_INC(mib2.udpinerrors);
+ pbuf_free(p);
+ goto end;
+ }
+
+ udphdr = (struct udp_hdr *)p->payload;
+
+ /* is broadcast packet ? */
+ broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif());
+
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len));
+
+ /* convert src and dest ports to host byte order */
+ src = lwip_ntohs(udphdr->src);
+ dest = lwip_ntohs(udphdr->dest);
+
+ udp_debug_print(udphdr);
+
+ /* print the UDP source and destination */
+ LWIP_DEBUGF(UDP_DEBUG, ("udp ("));
+ ip_addr_debug_print_val(UDP_DEBUG, *ip_current_dest_addr());
+ LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest)));
+ ip_addr_debug_print_val(UDP_DEBUG, *ip_current_src_addr());
+ LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src)));
+
+ pcb = NULL;
+ prev = NULL;
+ uncon_pcb = NULL;
+ /* Iterate through the UDP pcb list for a matching pcb.
+ * 'Perfect match' pcbs (connected to the remote port & ip address) are
+ * preferred. If no perfect match is found, the first unconnected pcb that
+ * matches the local port and ip address gets the datagram. */
+ for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) {
+ /* print the PCB local and remote address */
+ LWIP_DEBUGF(UDP_DEBUG, ("pcb ("));
+ ip_addr_debug_print_val(UDP_DEBUG, pcb->local_ip);
+ LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port));
+ ip_addr_debug_print_val(UDP_DEBUG, pcb->remote_ip);
+ LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port));
+
+ /* compare PCB local addr+port to UDP destination addr+port */
+ if ((pcb->local_port == dest) &&
+ (udp_input_local_match(pcb, inp, broadcast) != 0)) {
+ if ((pcb->flags & UDP_FLAGS_CONNECTED) == 0) {
+ if (uncon_pcb == NULL) {
+ /* the first unconnected matching PCB */
+ uncon_pcb = pcb;
+#if LWIP_IPV4
+ } else if (broadcast && ip4_current_dest_addr()->addr == IPADDR_BROADCAST) {
+ /* global broadcast address (only valid for IPv4; match was checked before) */
+ if (!IP_IS_V4_VAL(uncon_pcb->local_ip) || !ip4_addr_cmp(ip_2_ip4(&uncon_pcb->local_ip), netif_ip4_addr(inp))) {
+ /* uncon_pcb does not match the input netif, check this pcb */
+ if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(ip_2_ip4(&pcb->local_ip), netif_ip4_addr(inp))) {
+ /* better match */
+ uncon_pcb = pcb;
+ }
+ }
+#endif /* LWIP_IPV4 */
+ }
+#if SO_REUSE
+ else if (!ip_addr_isany(&pcb->local_ip)) {
+ /* prefer specific IPs over catch-all */
+ uncon_pcb = pcb;
+ }
+#endif /* SO_REUSE */
+ }
+
+ /* compare PCB remote addr+port to UDP source addr+port */
+ if ((pcb->remote_port == src) &&
+ (ip_addr_isany_val(pcb->remote_ip) ||
+ ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) {
+ /* the first fully matching PCB */
+ if (prev != NULL) {
+ /* move the pcb to the front of udp_pcbs so that is
+ found faster next time */
+ prev->next = pcb->next;
+ pcb->next = udp_pcbs;
+ udp_pcbs = pcb;
+ } else {
+ UDP_STATS_INC(udp.cachehit);
+ }
+ break;
+ }
+ }
+
+ prev = pcb;
+ }
+ /* no fully matching pcb found? then look for an unconnected pcb */
+ if (pcb == NULL) {
+ pcb = uncon_pcb;
+ }
+
+ /* Check checksum if this is a match or if it was directed at us. */
+ if (pcb != NULL) {
+ for_us = 1;
+ } else {
+#if LWIP_IPV6
+ if (ip_current_is_v6()) {
+ for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0;
+ }
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4
+ if (!ip_current_is_v6()) {
+ for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr());
+ }
+#endif /* LWIP_IPV4 */
+ }
+
+ if (for_us) {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n"));
+#if CHECKSUM_CHECK_UDP
+ IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_UDP) {
+#if LWIP_UDPLITE
+ if (ip_current_header_proto() == IP_PROTO_UDPLITE) {
+ /* Do the UDP Lite checksum */
+ u16_t chklen = lwip_ntohs(udphdr->len);
+ if (chklen < sizeof(struct udp_hdr)) {
+ if (chklen == 0) {
+ /* For UDP-Lite, checksum length of 0 means checksum
+ over the complete packet (See RFC 3828 chap. 3.1) */
+ chklen = p->tot_len;
+ } else {
+ /* At least the UDP-Lite header must be covered by the
+ checksum! (Again, see RFC 3828 chap. 3.1) */
+ goto chkerr;
+ }
+ }
+ if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE,
+ p->tot_len, chklen,
+ ip_current_src_addr(), ip_current_dest_addr()) != 0) {
+ goto chkerr;
+ }
+ } else
+#endif /* LWIP_UDPLITE */
+ {
+ if (udphdr->chksum != 0) {
+ if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len,
+ ip_current_src_addr(),
+ ip_current_dest_addr()) != 0) {
+ goto chkerr;
+ }
+ }
+ }
+ }
+#endif /* CHECKSUM_CHECK_UDP */
+ if (pbuf_remove_header(p, UDP_HLEN)) {
+ /* Can we cope with this failing? Just assert for now */
+ LWIP_ASSERT("pbuf_remove_header failed\n", 0);
+ UDP_STATS_INC(udp.drop);
+ MIB2_STATS_INC(mib2.udpinerrors);
+ pbuf_free(p);
+ goto end;
+ }
+
+ if (pcb != NULL) {
+ MIB2_STATS_INC(mib2.udpindatagrams);
+#if SO_REUSE && SO_REUSE_RXTOALL
+ if (ip_get_option(pcb, SOF_REUSEADDR) &&
+ (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) {
+ /* pass broadcast- or multicast packets to all multicast pcbs
+ if SOF_REUSEADDR is set on the first match */
+ struct udp_pcb *mpcb;
+ for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) {
+ if (mpcb != pcb) {
+ /* compare PCB local addr+port to UDP destination addr+port */
+ if ((mpcb->local_port == dest) &&
+ (udp_input_local_match(mpcb, inp, broadcast) != 0)) {
+ /* pass a copy of the packet to all local matches */
+ if (mpcb->recv != NULL) {
+ struct pbuf *q;
+ q = pbuf_clone(PBUF_RAW, PBUF_POOL, p);
+ if (q != NULL) {
+ mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src);
+ }
+ }
+ }
+ }
+ }
+ }
+#endif /* SO_REUSE && SO_REUSE_RXTOALL */
+ /* callback */
+ if (pcb->recv != NULL) {
+ /* now the recv function is responsible for freeing p */
+ pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src);
+ } else {
+ /* no recv function registered? then we have to free the pbuf! */
+ pbuf_free(p);
+ goto end;
+ }
+ } else {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n"));
+
+#if LWIP_ICMP || LWIP_ICMP6
+ /* No match was found, send ICMP destination port unreachable unless
+ destination address was broadcast/multicast. */
+ if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) {
+ /* move payload pointer back to ip header */
+ pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN));
+ icmp_port_unreach(ip_current_is_v6(), p);
+ }
+#endif /* LWIP_ICMP || LWIP_ICMP6 */
+ UDP_STATS_INC(udp.proterr);
+ UDP_STATS_INC(udp.drop);
+ MIB2_STATS_INC(mib2.udpnoports);
+ pbuf_free(p);
+ }
+ } else {
+ pbuf_free(p);
+ }
+end:
+ PERF_STOP("udp_input");
+ return;
+#if CHECKSUM_CHECK_UDP
+chkerr:
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n"));
+ UDP_STATS_INC(udp.chkerr);
+ UDP_STATS_INC(udp.drop);
+ MIB2_STATS_INC(mib2.udpinerrors);
+ pbuf_free(p);
+ PERF_STOP("udp_input");
+#endif /* CHECKSUM_CHECK_UDP */
+}
+
+/**
+ * @ingroup udp_raw
+ * Sends the pbuf p using UDP. The pbuf is not deallocated.
+ *
+ *
+ * @param pcb UDP PCB used to send the data.
+ * @param p chain of pbuf's to be sent.
+ *
+ * The datagram will be sent to the current remote_ip & remote_port
+ * stored in pcb. If the pcb is not bound to a port, it will
+ * automatically be bound to a random port.
+ *
+ * @return lwIP error code.
+ * - ERR_OK. Successful. No error occurred.
+ * - ERR_MEM. Out of memory.
+ * - ERR_RTE. Could not find route to destination address.
+ * - ERR_VAL. No PCB or PCB is dual-stack
+ * - More errors could be returned by lower protocol layers.
+ *
+ * @see udp_disconnect() udp_sendto()
+ */
+err_t
+udp_send(struct udp_pcb *pcb, struct pbuf *p)
+{
+ LWIP_ERROR("udp_send: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_send: invalid pbuf", p != NULL, return ERR_ARG);
+
+ if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) {
+ return ERR_VAL;
+ }
+
+ /* send to the packet using remote ip and port stored in the pcb */
+ return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port);
+}
+
+#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP
+/** @ingroup udp_raw
+ * Same as udp_send() but with checksum
+ */
+err_t
+udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p,
+ u8_t have_chksum, u16_t chksum)
+{
+ LWIP_ERROR("udp_send_chksum: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_send_chksum: invalid pbuf", p != NULL, return ERR_ARG);
+
+ if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) {
+ return ERR_VAL;
+ }
+
+ /* send to the packet using remote ip and port stored in the pcb */
+ return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port,
+ have_chksum, chksum);
+}
+#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+
+/**
+ * @ingroup udp_raw
+ * Send data to a specified address using UDP.
+ *
+ * @param pcb UDP PCB used to send the data.
+ * @param p chain of pbuf's to be sent.
+ * @param dst_ip Destination IP address.
+ * @param dst_port Destination UDP port.
+ *
+ * dst_ip & dst_port are expected to be in the same byte order as in the pcb.
+ *
+ * If the PCB already has a remote address association, it will
+ * be restored after the data is sent.
+ *
+ * @return lwIP error code (@see udp_send for possible error codes)
+ *
+ * @see udp_disconnect() udp_send()
+ */
+err_t
+udp_sendto(struct udp_pcb *pcb, struct pbuf *p,
+ const ip_addr_t *dst_ip, u16_t dst_port)
+{
+#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP
+ return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0);
+}
+
+/** @ingroup udp_raw
+ * Same as udp_sendto(), but with checksum */
+err_t
+udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip,
+ u16_t dst_port, u8_t have_chksum, u16_t chksum)
+{
+#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+ struct netif *netif;
+
+ LWIP_ERROR("udp_sendto: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto: invalid pbuf", p != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto: invalid dst_ip", dst_ip != NULL, return ERR_ARG);
+
+ if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) {
+ return ERR_VAL;
+ }
+
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n"));
+
+ if (pcb->netif_idx != NETIF_NO_INDEX) {
+ netif = netif_get_by_index(pcb->netif_idx);
+ } else {
+#if LWIP_MULTICAST_TX_OPTIONS
+ netif = NULL;
+ if (ip_addr_ismulticast(dst_ip)) {
+ /* For IPv6, the interface to use for packets with a multicast destination
+ * is specified using an interface index. The same approach may be used for
+ * IPv4 as well, in which case it overrides the IPv4 multicast override
+ * address below. Here we have to look up the netif by going through the
+ * list, but by doing so we skip a route lookup. If the interface index has
+ * gone stale, we fall through and do the regular route lookup after all. */
+ if (pcb->mcast_ifindex != NETIF_NO_INDEX) {
+ netif = netif_get_by_index(pcb->mcast_ifindex);
+ }
+#if LWIP_IPV4
+ else
+#if LWIP_IPV6
+ if (IP_IS_V4(dst_ip))
+#endif /* LWIP_IPV6 */
+ {
+ /* IPv4 does not use source-based routing by default, so we use an
+ administratively selected interface for multicast by default.
+ However, this can be overridden by setting an interface address
+ in pcb->mcast_ip4 that is used for routing. If this routing lookup
+ fails, we try regular routing as though no override was set. */
+ if (!ip4_addr_isany_val(pcb->mcast_ip4) &&
+ !ip4_addr_cmp(&pcb->mcast_ip4, IP4_ADDR_BROADCAST)) {
+ netif = ip4_route_src(ip_2_ip4(&pcb->local_ip), &pcb->mcast_ip4);
+ }
+ }
+#endif /* LWIP_IPV4 */
+ }
+
+ if (netif == NULL)
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+ {
+ /* find the outgoing network interface for this packet */
+ netif = ip_route(&pcb->local_ip, dst_ip);
+ }
+ }
+
+ /* no outgoing network interface could be found? */
+ if (netif == NULL) {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to "));
+ ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip);
+ LWIP_DEBUGF(UDP_DEBUG, ("\n"));
+ UDP_STATS_INC(udp.rterr);
+ return ERR_RTE;
+ }
+#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP
+ return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum);
+#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+ return udp_sendto_if(pcb, p, dst_ip, dst_port, netif);
+#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+}
+
+/**
+ * @ingroup udp_raw
+ * Send data to a specified address using UDP.
+ * The netif used for sending can be specified.
+ *
+ * This function exists mainly for DHCP, to be able to send UDP packets
+ * on a netif that is still down.
+ *
+ * @param pcb UDP PCB used to send the data.
+ * @param p chain of pbuf's to be sent.
+ * @param dst_ip Destination IP address.
+ * @param dst_port Destination UDP port.
+ * @param netif the netif used for sending.
+ *
+ * dst_ip & dst_port are expected to be in the same byte order as in the pcb.
+ *
+ * @return lwIP error code (@see udp_send for possible error codes)
+ *
+ * @see udp_disconnect() udp_send()
+ */
+err_t
+udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p,
+ const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif)
+{
+#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP
+ return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0);
+}
+
+/** Same as udp_sendto_if(), but with checksum */
+err_t
+udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip,
+ u16_t dst_port, struct netif *netif, u8_t have_chksum,
+ u16_t chksum)
+{
+#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+ const ip_addr_t *src_ip;
+
+ LWIP_ERROR("udp_sendto_if: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if: invalid pbuf", p != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if: invalid dst_ip", dst_ip != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if: invalid netif", netif != NULL, return ERR_ARG);
+
+ if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) {
+ return ERR_VAL;
+ }
+
+ /* PCB local address is IP_ANY_ADDR or multicast? */
+#if LWIP_IPV6
+ if (IP_IS_V6(dst_ip)) {
+ if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip)) ||
+ ip6_addr_ismulticast(ip_2_ip6(&pcb->local_ip))) {
+ src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip));
+ if (src_ip == NULL) {
+ /* No suitable source address was found. */
+ return ERR_RTE;
+ }
+ } else {
+ /* use UDP PCB local IPv6 address as source address, if still valid. */
+ if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) {
+ /* Address isn't valid anymore. */
+ return ERR_RTE;
+ }
+ src_ip = &pcb->local_ip;
+ }
+ }
+#endif /* LWIP_IPV6 */
+#if LWIP_IPV4 && LWIP_IPV6
+ else
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+#if LWIP_IPV4
+ if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) ||
+ ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) {
+ /* if the local_ip is any or multicast
+ * use the outgoing network interface IP address as source address */
+ src_ip = netif_ip_addr4(netif);
+ } else {
+ /* check if UDP PCB local IP address is correct
+ * this could be an old address if netif->ip_addr has changed */
+ if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) {
+ /* local_ip doesn't match, drop the packet */
+ return ERR_RTE;
+ }
+ /* use UDP PCB local IP address as source address */
+ src_ip = &pcb->local_ip;
+ }
+#endif /* LWIP_IPV4 */
+#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP
+ return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip);
+#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+ return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip);
+#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+}
+
+/** @ingroup udp_raw
+ * Same as @ref udp_sendto_if, but with source address */
+err_t
+udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p,
+ const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip)
+{
+#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP
+ return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip);
+}
+
+/** Same as udp_sendto_if_src(), but with checksum */
+err_t
+udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip,
+ u16_t dst_port, struct netif *netif, u8_t have_chksum,
+ u16_t chksum, const ip_addr_t *src_ip)
+{
+#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */
+ struct udp_hdr *udphdr;
+ err_t err;
+ struct pbuf *q; /* q will be sent down the stack */
+ u8_t ip_proto;
+ u8_t ttl;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("udp_sendto_if_src: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if_src: invalid pbuf", p != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if_src: invalid dst_ip", dst_ip != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if_src: invalid src_ip", src_ip != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_sendto_if_src: invalid netif", netif != NULL, return ERR_ARG);
+
+ if (!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) ||
+ !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) {
+ return ERR_VAL;
+ }
+
+#if LWIP_IPV4 && IP_SOF_BROADCAST
+ /* broadcast filter? */
+ if (!ip_get_option(pcb, SOF_BROADCAST) &&
+#if LWIP_IPV6
+ IP_IS_V4(dst_ip) &&
+#endif /* LWIP_IPV6 */
+ ip_addr_isbroadcast(dst_ip, netif)) {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
+ ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb));
+ return ERR_VAL;
+ }
+#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */
+
+ /* if the PCB is not yet bound to a port, bind it here */
+ if (pcb->local_port == 0) {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n"));
+ err = udp_bind(pcb, &pcb->local_ip, pcb->local_port);
+ if (err != ERR_OK) {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n"));
+ return err;
+ }
+ }
+
+ /* packet too large to add a UDP header without causing an overflow? */
+ if ((u16_t)(p->tot_len + UDP_HLEN) < p->tot_len) {
+ return ERR_MEM;
+ }
+ /* not enough space to add an UDP header to first pbuf in given p chain? */
+ if (pbuf_add_header(p, UDP_HLEN)) {
+ /* allocate header in a separate new pbuf */
+ q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM);
+ /* new header pbuf could not be allocated? */
+ if (q == NULL) {
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n"));
+ return ERR_MEM;
+ }
+ if (p->tot_len != 0) {
+ /* chain header q in front of given pbuf p (only if p contains data) */
+ pbuf_chain(q, p);
+ }
+ /* first pbuf q points to header pbuf */
+ LWIP_DEBUGF(UDP_DEBUG,
+ ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p));
+ } else {
+ /* adding space for header within p succeeded */
+ /* first pbuf q equals given pbuf */
+ q = p;
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p));
+ }
+ LWIP_ASSERT("check that first pbuf can hold struct udp_hdr",
+ (q->len >= sizeof(struct udp_hdr)));
+ /* q now represents the packet to be sent */
+ udphdr = (struct udp_hdr *)q->payload;
+ udphdr->src = lwip_htons(pcb->local_port);
+ udphdr->dest = lwip_htons(dst_port);
+ /* in UDP, 0 checksum means 'no checksum' */
+ udphdr->chksum = 0x0000;
+
+ /* Multicast Loop? */
+#if LWIP_MULTICAST_TX_OPTIONS
+ if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) {
+ q->flags |= PBUF_FLAG_MCASTLOOP;
+ }
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len));
+
+#if LWIP_UDPLITE
+ /* UDP Lite protocol? */
+ if (pcb->flags & UDP_FLAGS_UDPLITE) {
+ u16_t chklen, chklen_hdr;
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len));
+ /* set UDP message length in UDP header */
+ chklen_hdr = chklen = pcb->chksum_len_tx;
+ if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) {
+ if (chklen != 0) {
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen));
+ }
+ /* For UDP-Lite, checksum length of 0 means checksum
+ over the complete packet. (See RFC 3828 chap. 3.1)
+ At least the UDP-Lite header must be covered by the
+ checksum, therefore, if chksum_len has an illegal
+ value, we generate the checksum over the complete
+ packet to be safe. */
+ chklen_hdr = 0;
+ chklen = q->tot_len;
+ }
+ udphdr->len = lwip_htons(chklen_hdr);
+ /* calculate checksum */
+#if CHECKSUM_GEN_UDP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) {
+#if LWIP_CHECKSUM_ON_COPY
+ if (have_chksum) {
+ chklen = UDP_HLEN;
+ }
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE,
+ q->tot_len, chklen, src_ip, dst_ip);
+#if LWIP_CHECKSUM_ON_COPY
+ if (have_chksum) {
+ u32_t acc;
+ acc = udphdr->chksum + (u16_t)~(chksum);
+ udphdr->chksum = FOLD_U32T(acc);
+ }
+#endif /* LWIP_CHECKSUM_ON_COPY */
+
+ /* chksum zero must become 0xffff, as zero means 'no checksum' */
+ if (udphdr->chksum == 0x0000) {
+ udphdr->chksum = 0xffff;
+ }
+ }
+#endif /* CHECKSUM_GEN_UDP */
+
+ ip_proto = IP_PROTO_UDPLITE;
+ } else
+#endif /* LWIP_UDPLITE */
+ { /* UDP */
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len));
+ udphdr->len = lwip_htons(q->tot_len);
+ /* calculate checksum */
+#if CHECKSUM_GEN_UDP
+ IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) {
+ /* Checksum is mandatory over IPv6. */
+ if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) {
+ u16_t udpchksum;
+#if LWIP_CHECKSUM_ON_COPY
+ if (have_chksum) {
+ u32_t acc;
+ udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP,
+ q->tot_len, UDP_HLEN, src_ip, dst_ip);
+ acc = udpchksum + (u16_t)~(chksum);
+ udpchksum = FOLD_U32T(acc);
+ } else
+#endif /* LWIP_CHECKSUM_ON_COPY */
+ {
+ udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len,
+ src_ip, dst_ip);
+ }
+
+ /* chksum zero must become 0xffff, as zero means 'no checksum' */
+ if (udpchksum == 0x0000) {
+ udpchksum = 0xffff;
+ }
+ udphdr->chksum = udpchksum;
+ }
+ }
+#endif /* CHECKSUM_GEN_UDP */
+ ip_proto = IP_PROTO_UDP;
+ }
+
+ /* Determine TTL to use */
+#if LWIP_MULTICAST_TX_OPTIONS
+ ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl);
+#else /* LWIP_MULTICAST_TX_OPTIONS */
+ ttl = pcb->ttl;
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum));
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto));
+ /* output to IP */
+ NETIF_SET_HINTS(netif, &(pcb->netif_hints));
+ err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif);
+ NETIF_RESET_HINTS(netif);
+
+ /* @todo: must this be increased even if error occurred? */
+ MIB2_STATS_INC(mib2.udpoutdatagrams);
+
+ /* did we chain a separate header pbuf earlier? */
+ if (q != p) {
+ /* free the header pbuf */
+ pbuf_free(q);
+ q = NULL;
+ /* p is still referenced by the caller, and will live on */
+ }
+
+ UDP_STATS_INC(udp.xmit);
+ return err;
+}
+
+/**
+ * @ingroup udp_raw
+ * Bind an UDP PCB.
+ *
+ * @param pcb UDP PCB to be bound with a local address ipaddr and port.
+ * @param ipaddr local IP address to bind with. Use IP_ANY_TYPE to
+ * bind to all local interfaces.
+ * @param port local UDP port to bind with. Use 0 to automatically bind
+ * to a random port between UDP_LOCAL_PORT_RANGE_START and
+ * UDP_LOCAL_PORT_RANGE_END.
+ *
+ * ipaddr & port are expected to be in the same byte order as in the pcb.
+ *
+ * @return lwIP error code.
+ * - ERR_OK. Successful. No error occurred.
+ * - ERR_USE. The specified ipaddr and port are already bound to by
+ * another UDP PCB.
+ *
+ * @see udp_disconnect()
+ */
+err_t
+udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port)
+{
+ struct udp_pcb *ipcb;
+ u8_t rebind;
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ ip_addr_t zoned_ipaddr;
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+#if LWIP_IPV4
+ /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
+ if (ipaddr == NULL) {
+ ipaddr = IP4_ADDR_ANY;
+ }
+#else /* LWIP_IPV4 */
+ LWIP_ERROR("udp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG);
+#endif /* LWIP_IPV4 */
+
+ LWIP_ERROR("udp_bind: invalid pcb", pcb != NULL, return ERR_ARG);
+
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = "));
+ ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr);
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port));
+
+ rebind = 0;
+ /* Check for double bind and rebind of the same pcb */
+ for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) {
+ /* is this UDP PCB already on active list? */
+ if (pcb == ipcb) {
+ rebind = 1;
+ break;
+ }
+ }
+
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ /* If the given IP address should have a zone but doesn't, assign one now.
+ * This is legacy support: scope-aware callers should always provide properly
+ * zoned source addresses. Do the zone selection before the address-in-use
+ * check below; as such we have to make a temporary copy of the address. */
+ if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNKNOWN)) {
+ ip_addr_copy(zoned_ipaddr, *ipaddr);
+ ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr));
+ ipaddr = &zoned_ipaddr;
+ }
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+
+ /* no port specified? */
+ if (port == 0) {
+ port = udp_new_port();
+ if (port == 0) {
+ /* no more ports available in local range */
+ LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n"));
+ return ERR_USE;
+ }
+ } else {
+ for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) {
+ if (pcb != ipcb) {
+ /* By default, we don't allow to bind to a port that any other udp
+ PCB is already bound to, unless *all* PCBs with that port have tha
+ REUSEADDR flag set. */
+#if SO_REUSE
+ if (!ip_get_option(pcb, SOF_REUSEADDR) ||
+ !ip_get_option(ipcb, SOF_REUSEADDR))
+#endif /* SO_REUSE */
+ {
+ /* port matches that of PCB in list and REUSEADDR not set -> reject */
+ if ((ipcb->local_port == port) &&
+ /* IP address matches or any IP used? */
+ (ip_addr_cmp(&ipcb->local_ip, ipaddr) || ip_addr_isany(ipaddr) ||
+ ip_addr_isany(&ipcb->local_ip))) {
+ /* other PCB already binds to this local IP and port */
+ LWIP_DEBUGF(UDP_DEBUG,
+ ("udp_bind: local port %"U16_F" already bound by another pcb\n", port));
+ return ERR_USE;
+ }
+ }
+ }
+ }
+ }
+
+ ip_addr_set_ipaddr(&pcb->local_ip, ipaddr);
+
+ pcb->local_port = port;
+ mib2_udp_bind(pcb);
+ /* pcb not active yet? */
+ if (rebind == 0) {
+ /* place the PCB on the active list if not already there */
+ pcb->next = udp_pcbs;
+ udp_pcbs = pcb;
+ }
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to "));
+ ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, pcb->local_ip);
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port));
+ return ERR_OK;
+}
+
+/**
+ * @ingroup udp_raw
+ * Bind an UDP PCB to a specific netif.
+ * After calling this function, all packets received via this PCB
+ * are guaranteed to have come in via the specified netif, and all
+ * outgoing packets will go out via the specified netif.
+ *
+ * @param pcb UDP PCB to be bound.
+ * @param netif netif to bind udp pcb to. Can be NULL.
+ *
+ * @see udp_disconnect()
+ */
+void
+udp_bind_netif(struct udp_pcb *pcb, const struct netif *netif)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ if (netif != NULL) {
+ pcb->netif_idx = netif_get_index(netif);
+ } else {
+ pcb->netif_idx = NETIF_NO_INDEX;
+ }
+}
+
+/**
+ * @ingroup udp_raw
+ * Sets the remote end of the pcb. This function does not generate any
+ * network traffic, but only sets the remote address of the pcb.
+ *
+ * @param pcb UDP PCB to be connected with remote address ipaddr and port.
+ * @param ipaddr remote IP address to connect with.
+ * @param port remote UDP port to connect with.
+ *
+ * @return lwIP error code
+ *
+ * ipaddr & port are expected to be in the same byte order as in the pcb.
+ *
+ * The udp pcb is bound to a random local port if not already bound.
+ *
+ * @see udp_disconnect()
+ */
+err_t
+udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port)
+{
+ struct udp_pcb *ipcb;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("udp_connect: invalid pcb", pcb != NULL, return ERR_ARG);
+ LWIP_ERROR("udp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG);
+
+ if (pcb->local_port == 0) {
+ err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port);
+ if (err != ERR_OK) {
+ return err;
+ }
+ }
+
+ ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr);
+#if LWIP_IPV6 && LWIP_IPV6_SCOPES
+ /* If the given IP address should have a zone but doesn't, assign one now,
+ * using the bound address to make a more informed decision when possible. */
+ if (IP_IS_V6(&pcb->remote_ip) &&
+ ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) {
+ ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip));
+ }
+#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
+
+ pcb->remote_port = port;
+ pcb->flags |= UDP_FLAGS_CONNECTED;
+
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to "));
+ ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
+ pcb->remote_ip);
+ LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port));
+
+ /* Insert UDP PCB into the list of active UDP PCBs. */
+ for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) {
+ if (pcb == ipcb) {
+ /* already on the list, just return */
+ return ERR_OK;
+ }
+ }
+ /* PCB not yet on the list, add PCB now */
+ pcb->next = udp_pcbs;
+ udp_pcbs = pcb;
+ return ERR_OK;
+}
+
+/**
+ * @ingroup udp_raw
+ * Remove the remote end of the pcb. This function does not generate
+ * any network traffic, but only removes the remote address of the pcb.
+ *
+ * @param pcb the udp pcb to disconnect.
+ */
+void
+udp_disconnect(struct udp_pcb *pcb)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("udp_disconnect: invalid pcb", pcb != NULL, return);
+
+ /* reset remote address association */
+#if LWIP_IPV4 && LWIP_IPV6
+ if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
+ ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE);
+ } else {
+#endif
+ ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip);
+#if LWIP_IPV4 && LWIP_IPV6
+ }
+#endif
+ pcb->remote_port = 0;
+ pcb->netif_idx = NETIF_NO_INDEX;
+ /* mark PCB as unconnected */
+ udp_clear_flags(pcb, UDP_FLAGS_CONNECTED);
+}
+
+/**
+ * @ingroup udp_raw
+ * Set a receive callback for a UDP PCB.
+ * This callback will be called when receiving a datagram for the pcb.
+ *
+ * @param pcb the pcb for which to set the recv callback
+ * @param recv function pointer of the callback function
+ * @param recv_arg additional argument to pass to the callback function
+ */
+void
+udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg)
+{
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("udp_recv: invalid pcb", pcb != NULL, return);
+
+ /* remember recv() callback and user data */
+ pcb->recv = recv;
+ pcb->recv_arg = recv_arg;
+}
+
+/**
+ * @ingroup udp_raw
+ * Removes and deallocates the pcb.
+ *
+ * @param pcb UDP PCB to be removed. The PCB is removed from the list of
+ * UDP PCB's and the data structure is freed from memory.
+ *
+ * @see udp_new()
+ */
+void
+udp_remove(struct udp_pcb *pcb)
+{
+ struct udp_pcb *pcb2;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ LWIP_ERROR("udp_remove: invalid pcb", pcb != NULL, return);
+
+ mib2_udp_unbind(pcb);
+ /* pcb to be removed is first in list? */
+ if (udp_pcbs == pcb) {
+ /* make list start at 2nd pcb */
+ udp_pcbs = udp_pcbs->next;
+ /* pcb not 1st in list */
+ } else {
+ for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) {
+ /* find pcb in udp_pcbs list */
+ if (pcb2->next != NULL && pcb2->next == pcb) {
+ /* remove pcb from list */
+ pcb2->next = pcb->next;
+ break;
+ }
+ }
+ }
+ memp_free(MEMP_UDP_PCB, pcb);
+}
+
+/**
+ * @ingroup udp_raw
+ * Creates a new UDP pcb which can be used for UDP communication. The
+ * pcb is not active until it has either been bound to a local address
+ * or connected to a remote address.
+ *
+ * @return The UDP PCB which was created. NULL if the PCB data structure
+ * could not be allocated.
+ *
+ * @see udp_remove()
+ */
+struct udp_pcb *
+udp_new(void)
+{
+ struct udp_pcb *pcb;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB);
+ /* could allocate UDP PCB? */
+ if (pcb != NULL) {
+ /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0
+ * which means checksum is generated over the whole datagram per default
+ * (recommended as default by RFC 3828). */
+ /* initialize PCB to all zeroes */
+ memset(pcb, 0, sizeof(struct udp_pcb));
+ pcb->ttl = UDP_TTL;
+#if LWIP_MULTICAST_TX_OPTIONS
+ udp_set_multicast_ttl(pcb, UDP_TTL);
+#endif /* LWIP_MULTICAST_TX_OPTIONS */
+ }
+ return pcb;
+}
+
+/**
+ * @ingroup udp_raw
+ * Create a UDP PCB for specific IP type.
+ * The pcb is not active until it has either been bound to a local address
+ * or connected to a remote address.
+ *
+ * @param type IP address type, see @ref lwip_ip_addr_type definitions.
+ * If you want to listen to IPv4 and IPv6 (dual-stack) packets,
+ * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE.
+ * @return The UDP PCB which was created. NULL if the PCB data structure
+ * could not be allocated.
+ *
+ * @see udp_remove()
+ */
+struct udp_pcb *
+udp_new_ip_type(u8_t type)
+{
+ struct udp_pcb *pcb;
+
+ LWIP_ASSERT_CORE_LOCKED();
+
+ pcb = udp_new();
+#if LWIP_IPV4 && LWIP_IPV6
+ if (pcb != NULL) {
+ IP_SET_TYPE_VAL(pcb->local_ip, type);
+ IP_SET_TYPE_VAL(pcb->remote_ip, type);
+ }
+#else
+ LWIP_UNUSED_ARG(type);
+#endif /* LWIP_IPV4 && LWIP_IPV6 */
+ return pcb;
+}
+
+/** This function is called from netif.c when address is changed
+ *
+ * @param old_addr IP address of the netif before change
+ * @param new_addr IP address of the netif after change
+ */
+void udp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr)
+{
+ struct udp_pcb *upcb;
+
+ if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) {
+ for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) {
+ /* PCB bound to current local interface address? */
+ if (ip_addr_cmp(&upcb->local_ip, old_addr)) {
+ /* The PCB is bound to the old ipaddr and
+ * is set to bound to the new one instead */
+ ip_addr_copy(upcb->local_ip, *new_addr);
+ }
+ }
+ }
+}
+
+#if UDP_DEBUG
+/**
+ * Print UDP header information for debug purposes.
+ *
+ * @param udphdr pointer to the udp header in memory.
+ */
+void
+udp_debug_print(struct udp_hdr *udphdr)
+{
+ LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n"));
+ LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n",
+ lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest)));
+ LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n"));
+ LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n",
+ lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum)));
+ LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n"));
+}
+#endif /* UDP_DEBUG */
+
+#endif /* LWIP_UDP */