blob: 2c8a401c05f1bb3183964f729cb8615f3ef82d66 [file] [log] [blame]
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990 Mentat Inc. */
#pragma ident "%Z%%M% %I% %E% SMI"
const char udp_version[] = "%Z%%M% %I% %E% SMI";
#include <sys/types.h>
#include <sys/stream.h>
#include <sys/dlpi.h>
#include <sys/pattr.h>
#include <sys/stropts.h>
#include <sys/strlog.h>
#include <sys/strsun.h>
#include <sys/time.h>
#define _SUN_TPI_VERSION 2
#include <sys/tihdr.h>
#include <sys/timod.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/strsubr.h>
#include <sys/suntpi.h>
#include <sys/xti_inet.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/policy.h>
#include <sys/ucred.h>
#include <sys/zone.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/vtrace.h>
#include <sys/debug.h>
#include <sys/isa_defs.h>
#include <sys/random.h>
#include <netinet/in.h>
#include <netinet/ip6.h>
#include <netinet/icmp6.h>
#include <netinet/udp.h>
#include <net/if.h>
#include <net/route.h>
#include <inet/common.h>
#include <inet/ip.h>
#include <inet/ip_impl.h>
#include <inet/ip6.h>
#include <inet/ip_ire.h>
#include <inet/ip_if.h>
#include <inet/ip_multi.h>
#include <inet/mi.h>
#include <inet/mib2.h>
#include <inet/nd.h>
#include <inet/optcom.h>
#include <inet/snmpcom.h>
#include <inet/kstatcom.h>
#include <inet/udp_impl.h>
#include <inet/ipclassifier.h>
#include <inet/ipsec_impl.h>
#include <inet/ipp_common.h>
/*
* The ipsec_info.h header file is here since it has the definition for the
* M_CTL message types used by IP to convey information to the ULP. The
* ipsec_info.h needs the pfkeyv2.h, hence the latter's presence.
*/
#include <net/pfkeyv2.h>
#include <inet/ipsec_info.h>
#include <sys/tsol/label.h>
#include <sys/tsol/tnet.h>
#include <rpc/pmap_prot.h>
/*
* Synchronization notes:
*
* UDP uses a combination of its internal perimeter, a global lock and
* a set of bind hash locks to protect its data structures. Please see
* the note above udp_mode_assertions for details about the internal
* perimeter.
*
* When a UDP endpoint is bound to a local port, it is inserted into
* a bind hash list. The list consists of an array of udp_fanout_t buckets.
* The size of the array is controlled by the udp_bind_fanout_size variable.
* This variable can be changed in /etc/system if the default value is
* not large enough. Each bind hash bucket is protected by a per bucket
* lock. It protects the udp_bind_hash and udp_ptpbhn fields in the udp_t
* structure. An UDP endpoint is removed from the bind hash list only
* when it is being unbound or being closed. The per bucket lock also
* protects a UDP endpoint's state changes.
*
* Plumbing notes:
*
* Both udp and ip are merged, but the streams plumbing is kept unchanged
* in that udp is always pushed atop /dev/ip. This is done to preserve
* backwards compatibility for certain applications which rely on such
* plumbing geometry to do things such as issuing I_POP on the stream
* in order to obtain direct access to /dev/ip, etc.
*
* All UDP processings happen in the /dev/ip instance; the udp module
* instance does not possess any state about the endpoint, and merely
* acts as a dummy module whose presence is to keep the streams plumbing
* appearance unchanged. At open time /dev/ip allocates a conn_t that
* happens to embed a udp_t. This stays dormant until the time udp is
* pushed, which indicates to /dev/ip that it must convert itself from
* an IP to a UDP endpoint.
*
* We only allow for the following plumbing cases:
*
* Normal:
* /dev/ip is first opened and later udp is pushed directly on top.
* This is the default action that happens when a udp socket or
* /dev/udp is opened. The conn_t created by /dev/ip instance is
* now shared and is marked with IPCL_UDP.
*
* SNMP-only:
* udp is pushed on top of a module other than /dev/ip. When this
* happens it will support only SNMP semantics. A new conn_t is
* allocated and marked with IPCL_UDPMOD.
*
* The above cases imply that we don't support any intermediate module to
* reside in between /dev/ip and udp -- in fact, we never supported such
* scenario in the past as the inter-layer communication semantics have
* always been private. Also note that the normal case allows for SNMP
* requests to be processed in addition to the rest of UDP operations.
*
* The normal case plumbing is depicted by the following diagram:
*
* +---------------+---------------+
* | | | udp
* | udp_wq | udp_rq |
* | | UDP_RD |
* | | |
* +---------------+---------------+
* | ^
* v |
* +---------------+---------------+
* | | | /dev/ip
* | ip_wq | ip_rq | conn_t
* | UDP_WR | |
* | | |
* +---------------+---------------+
*
* Messages arriving at udp_wq from above will end up in ip_wq before
* it gets processed, i.e. udp write entry points will advance udp_wq
* and use its q_next value as ip_wq in order to use the conn_t that
* is stored in its q_ptr. Likewise, messages generated by ip to the
* module above udp will appear as if they are originated from udp_rq,
* i.e. putnext() calls to the module above udp is done using the
* udp_rq instead of ip_rq in order to avoid udp_rput() which does
* nothing more than calling putnext().
*
* The above implies the following rule of thumb:
*
* 1. udp_t is obtained from conn_t, which is created by the /dev/ip
* instance and is stored in q_ptr of both ip_wq and ip_rq. There
* is no direct reference to conn_t from either udp_wq or udp_rq.
*
* 2. Write-side entry points of udp can obtain the conn_t via the
* Q_TO_CONN() macro, using the queue value obtain from UDP_WR().
*
* 3. While in /dev/ip context, putnext() to the module above udp can
* be done by supplying the queue value obtained from UDP_RD().
*
*/
static queue_t *UDP_WR(queue_t *);
static queue_t *UDP_RD(queue_t *);
udp_stat_t udp_statistics = {
{ "udp_ip_send", KSTAT_DATA_UINT64 },
{ "udp_ip_ire_send", KSTAT_DATA_UINT64 },
{ "udp_ire_null", KSTAT_DATA_UINT64 },
{ "udp_drain", KSTAT_DATA_UINT64 },
{ "udp_sock_fallback", KSTAT_DATA_UINT64 },
{ "udp_rrw_busy", KSTAT_DATA_UINT64 },
{ "udp_rrw_msgcnt", KSTAT_DATA_UINT64 },
{ "udp_out_sw_cksum", KSTAT_DATA_UINT64 },
{ "udp_out_sw_cksum_bytes", KSTAT_DATA_UINT64 },
{ "udp_out_opt", KSTAT_DATA_UINT64 },
{ "udp_out_err_notconn", KSTAT_DATA_UINT64 },
{ "udp_out_err_output", KSTAT_DATA_UINT64 },
{ "udp_out_err_tudr", KSTAT_DATA_UINT64 },
{ "udp_in_pktinfo", KSTAT_DATA_UINT64 },
{ "udp_in_recvdstaddr", KSTAT_DATA_UINT64 },
{ "udp_in_recvopts", KSTAT_DATA_UINT64 },
{ "udp_in_recvif", KSTAT_DATA_UINT64 },
{ "udp_in_recvslla", KSTAT_DATA_UINT64 },
{ "udp_in_recvucred", KSTAT_DATA_UINT64 },
{ "udp_in_recvttl", KSTAT_DATA_UINT64 },
{ "udp_in_recvhopopts", KSTAT_DATA_UINT64 },
{ "udp_in_recvhoplimit", KSTAT_DATA_UINT64 },
{ "udp_in_recvdstopts", KSTAT_DATA_UINT64 },
{ "udp_in_recvrtdstopts", KSTAT_DATA_UINT64 },
{ "udp_in_recvrthdr", KSTAT_DATA_UINT64 },
{ "udp_in_recvpktinfo", KSTAT_DATA_UINT64 },
{ "udp_in_recvtclass", KSTAT_DATA_UINT64 },
{ "udp_in_timestamp", KSTAT_DATA_UINT64 },
#ifdef DEBUG
{ "udp_data_conn", KSTAT_DATA_UINT64 },
{ "udp_data_notconn", KSTAT_DATA_UINT64 },
#endif
};
static kstat_t *udp_ksp;
struct kmem_cache *udp_cache;
/*
* Bind hash list size and hash function. It has to be a power of 2 for
* hashing.
*/
#define UDP_BIND_FANOUT_SIZE 512
#define UDP_BIND_HASH(lport) \
((ntohs((uint16_t)lport)) & (udp_bind_fanout_size - 1))
/* UDP bind fanout hash structure. */
typedef struct udp_fanout_s {
udp_t *uf_udp;
kmutex_t uf_lock;
#if defined(_LP64) || defined(_I32LPx)
char uf_pad[48];
#else
char uf_pad[56];
#endif
} udp_fanout_t;
uint_t udp_bind_fanout_size = UDP_BIND_FANOUT_SIZE;
/* udp_fanout_t *udp_bind_fanout. */
static udp_fanout_t *udp_bind_fanout;
/*
* This controls the rate some ndd info report functions can be used
* by non-privileged users. It stores the last time such info is
* requested. When those report functions are called again, this
* is checked with the current time and compare with the ndd param
* udp_ndd_get_info_interval.
*/
static clock_t udp_last_ndd_get_info_time;
#define NDD_TOO_QUICK_MSG \
"ndd get info rate too high for non-privileged users, try again " \
"later.\n"
#define NDD_OUT_OF_BUF_MSG "<< Out of buffer >>\n"
/* Option processing attrs */
typedef struct udpattrs_s {
ip6_pkt_t *udpattr_ipp;
mblk_t *udpattr_mb;
boolean_t udpattr_credset;
} udpattrs_t;
static void udp_addr_req(queue_t *q, mblk_t *mp);
static void udp_bind(queue_t *q, mblk_t *mp);
static void udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp);
static void udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock);
static int udp_build_hdrs(queue_t *q, udp_t *udp);
static void udp_capability_req(queue_t *q, mblk_t *mp);
static int udp_close(queue_t *q);
static void udp_connect(queue_t *q, mblk_t *mp);
static void udp_disconnect(queue_t *q, mblk_t *mp);
static void udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error,
int sys_error);
static void udp_err_ack_prim(queue_t *q, mblk_t *mp, int primitive,
t_scalar_t tlierr, int unixerr);
static int udp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp,
cred_t *cr);
static int udp_extra_priv_ports_add(queue_t *q, mblk_t *mp,
char *value, caddr_t cp, cred_t *cr);
static int udp_extra_priv_ports_del(queue_t *q, mblk_t *mp,
char *value, caddr_t cp, cred_t *cr);
static void udp_icmp_error(queue_t *q, mblk_t *mp);
static void udp_icmp_error_ipv6(queue_t *q, mblk_t *mp);
static void udp_info_req(queue_t *q, mblk_t *mp);
static mblk_t *udp_ip_bind_mp(udp_t *udp, t_scalar_t bind_prim,
t_scalar_t addr_length);
static int udp_open(queue_t *q, dev_t *devp, int flag, int sflag,
cred_t *credp);
static int udp_unitdata_opt_process(queue_t *q, mblk_t *mp,
int *errorp, udpattrs_t *udpattrs);
static boolean_t udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name);
static int udp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr);
static boolean_t udp_param_register(udpparam_t *udppa, int cnt);
static int udp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
cred_t *cr);
static void udp_report_item(mblk_t *mp, udp_t *udp);
static void udp_rput(queue_t *q, mblk_t *mp);
static void udp_rput_other(queue_t *, mblk_t *);
static int udp_rinfop(queue_t *q, infod_t *dp);
static int udp_rrw(queue_t *q, struiod_t *dp);
static void udp_rput_bind_ack(queue_t *q, mblk_t *mp);
static int udp_status_report(queue_t *q, mblk_t *mp, caddr_t cp,
cred_t *cr);
static void udp_send_data(udp_t *udp, queue_t *q, mblk_t *mp, ipha_t *ipha);
static void udp_ud_err(queue_t *q, mblk_t *mp, uchar_t *destaddr,
t_scalar_t destlen, t_scalar_t err);
static void udp_unbind(queue_t *q, mblk_t *mp);
static in_port_t udp_update_next_port(udp_t *udp, in_port_t port,
boolean_t random);
static void udp_wput(queue_t *q, mblk_t *mp);
static mblk_t *udp_output_v4(conn_t *, mblk_t *mp, ipaddr_t v4dst,
uint16_t port, uint_t srcid, int *error);
static mblk_t *udp_output_v6(conn_t *connp, mblk_t *mp, sin6_t *sin6,
int *error);
static void udp_wput_other(queue_t *q, mblk_t *mp);
static void udp_wput_iocdata(queue_t *q, mblk_t *mp);
static void udp_output(conn_t *connp, mblk_t *mp, struct sockaddr *addr,
socklen_t addrlen);
static size_t udp_set_rcv_hiwat(udp_t *udp, size_t size);
static void udp_kstat_init(void);
static void udp_kstat_fini(void);
static int udp_kstat_update(kstat_t *kp, int rw);
static void udp_input_wrapper(void *arg, mblk_t *mp, void *arg2);
static void udp_rput_other_wrapper(void *arg, mblk_t *mp, void *arg2);
static void udp_wput_other_wrapper(void *arg, mblk_t *mp, void *arg2);
static void udp_resume_bind_cb(void *arg, mblk_t *mp, void *arg2);
static void udp_rcv_enqueue(queue_t *q, udp_t *udp, mblk_t *mp,
uint_t pkt_len);
static void udp_rcv_drain(queue_t *q, udp_t *udp, boolean_t closing);
static void udp_enter(conn_t *, mblk_t *, sqproc_t, uint8_t);
static void udp_exit(conn_t *);
static void udp_become_writer(conn_t *, mblk_t *, sqproc_t, uint8_t);
#ifdef DEBUG
static void udp_mode_assertions(udp_t *, int);
#endif /* DEBUG */
major_t UDP6_MAJ;
#define UDP6 "udp6"
#define UDP_RECV_HIWATER (56 * 1024)
#define UDP_RECV_LOWATER 128
#define UDP_XMIT_HIWATER (56 * 1024)
#define UDP_XMIT_LOWATER 1024
static struct module_info udp_info = {
UDP_MOD_ID, UDP_MOD_NAME, 1, INFPSZ, UDP_RECV_HIWATER, UDP_RECV_LOWATER
};
static struct qinit udp_rinit = {
(pfi_t)udp_rput, NULL, udp_open, udp_close, NULL,
&udp_info, NULL, udp_rrw, udp_rinfop, STRUIOT_STANDARD
};
static struct qinit udp_winit = {
(pfi_t)udp_wput, NULL, NULL, NULL, NULL,
&udp_info, NULL, NULL, NULL, STRUIOT_NONE
};
static struct qinit winit = {
(pfi_t)putnext, NULL, NULL, NULL, NULL,
&udp_info, NULL, NULL, NULL, STRUIOT_NONE
};
/* Support for just SNMP if UDP is not pushed directly over device IP */
struct qinit udp_snmp_rinit = {
(pfi_t)putnext, NULL, udp_open, ip_snmpmod_close, NULL,
&udp_info, NULL, NULL, NULL, STRUIOT_NONE
};
struct qinit udp_snmp_winit = {
(pfi_t)ip_snmpmod_wput, NULL, udp_open, ip_snmpmod_close, NULL,
&udp_info, NULL, NULL, NULL, STRUIOT_NONE
};
struct streamtab udpinfo = {
&udp_rinit, &winit
};
static sin_t sin_null; /* Zero address for quick clears */
static sin6_t sin6_null; /* Zero address for quick clears */
/* Hint not protected by any lock */
static in_port_t udp_g_next_port_to_try;
/*
* Extra privileged ports. In host byte order.
*/
#define UDP_NUM_EPRIV_PORTS 64
static int udp_g_num_epriv_ports = UDP_NUM_EPRIV_PORTS;
static in_port_t udp_g_epriv_ports[UDP_NUM_EPRIV_PORTS] = { 2049, 4045 };
/* Only modified during _init and _fini thus no locking is needed. */
static IDP udp_g_nd; /* Points to table of UDP ND variables. */
/* MIB-2 stuff for SNMP */
static mib2_udp_t udp_mib; /* SNMP fixed size info */
static kstat_t *udp_mibkp; /* kstat exporting udp_mib data */
#define UDP_MAXPACKET_IPV4 (IP_MAXPACKET - UDPH_SIZE - IP_SIMPLE_HDR_LENGTH)
/* Default structure copied into T_INFO_ACK messages */
static struct T_info_ack udp_g_t_info_ack_ipv4 = {
T_INFO_ACK,
UDP_MAXPACKET_IPV4, /* TSDU_size. Excl. headers */
T_INVALID, /* ETSU_size. udp does not support expedited data. */
T_INVALID, /* CDATA_size. udp does not support connect data. */
T_INVALID, /* DDATA_size. udp does not support disconnect data. */
sizeof (sin_t), /* ADDR_size. */
0, /* OPT_size - not initialized here */
UDP_MAXPACKET_IPV4, /* TIDU_size. Excl. headers */
T_CLTS, /* SERV_type. udp supports connection-less. */
TS_UNBND, /* CURRENT_state. This is set from udp_state. */
(XPG4_1|SENDZERO) /* PROVIDER_flag */
};
#define UDP_MAXPACKET_IPV6 (IP_MAXPACKET - UDPH_SIZE - IPV6_HDR_LEN)
static struct T_info_ack udp_g_t_info_ack_ipv6 = {
T_INFO_ACK,
UDP_MAXPACKET_IPV6, /* TSDU_size. Excl. headers */
T_INVALID, /* ETSU_size. udp does not support expedited data. */
T_INVALID, /* CDATA_size. udp does not support connect data. */
T_INVALID, /* DDATA_size. udp does not support disconnect data. */
sizeof (sin6_t), /* ADDR_size. */
0, /* OPT_size - not initialized here */
UDP_MAXPACKET_IPV6, /* TIDU_size. Excl. headers */
T_CLTS, /* SERV_type. udp supports connection-less. */
TS_UNBND, /* CURRENT_state. This is set from udp_state. */
(XPG4_1|SENDZERO) /* PROVIDER_flag */
};
/* largest UDP port number */
#define UDP_MAX_PORT 65535
/*
* Table of ND variables supported by udp. These are loaded into udp_g_nd
* in udp_open.
* All of these are alterable, within the min/max values given, at run time.
*/
/* BEGIN CSTYLED */
udpparam_t udp_param_arr[] = {
/*min max value name */
{ 0L, 256, 32, "udp_wroff_extra" },
{ 1L, 255, 255, "udp_ipv4_ttl" },
{ 0, IPV6_MAX_HOPS, IPV6_DEFAULT_HOPS, "udp_ipv6_hoplimit"},
{ 1024, (32 * 1024), 1024, "udp_smallest_nonpriv_port" },
{ 0, 1, 1, "udp_do_checksum" },
{ 1024, UDP_MAX_PORT, (32 * 1024), "udp_smallest_anon_port" },
{ 1024, UDP_MAX_PORT, UDP_MAX_PORT, "udp_largest_anon_port" },
{ UDP_XMIT_LOWATER, (1<<30), UDP_XMIT_HIWATER, "udp_xmit_hiwat"},
{ 0, (1<<30), UDP_XMIT_LOWATER, "udp_xmit_lowat"},
{ UDP_RECV_LOWATER, (1<<30), UDP_RECV_HIWATER, "udp_recv_hiwat"},
{ 65536, (1<<30), 2*1024*1024, "udp_max_buf"},
{ 100, 60000, 1000, "udp_ndd_get_info_interval"},
};
/* END CSTYLED */
/*
* The smallest anonymous port in the privileged port range which UDP
* looks for free port. Use in the option UDP_ANONPRIVBIND.
*/
static in_port_t udp_min_anonpriv_port = 512;
/* If set to 0, pick ephemeral port sequentially; otherwise randomly. */
uint32_t udp_random_anon_port = 1;
/*
* Hook functions to enable cluster networking.
* On non-clustered systems these vectors must always be NULL
*/
void (*cl_inet_bind)(uchar_t protocol, sa_family_t addr_family,
uint8_t *laddrp, in_port_t lport) = NULL;
void (*cl_inet_unbind)(uint8_t protocol, sa_family_t addr_family,
uint8_t *laddrp, in_port_t lport) = NULL;
typedef union T_primitives *t_primp_t;
#define UDP_ENQUEUE_MP(udp, mp, proc, tag) { \
ASSERT((mp)->b_prev == NULL && (mp)->b_queue == NULL); \
ASSERT(MUTEX_HELD(&(udp)->udp_connp->conn_lock)); \
(mp)->b_queue = (queue_t *)((uintptr_t)tag); \
(mp)->b_prev = (mblk_t *)proc; \
if ((udp)->udp_mphead == NULL) \
(udp)->udp_mphead = (mp); \
else \
(udp)->udp_mptail->b_next = (mp); \
(udp)->udp_mptail = (mp); \
(udp)->udp_mpcount++; \
}
#define UDP_READERS_INCREF(udp) { \
ASSERT(MUTEX_HELD(&(udp)->udp_connp->conn_lock)); \
(udp)->udp_reader_count++; \
}
#define UDP_READERS_DECREF(udp) { \
ASSERT(MUTEX_HELD(&(udp)->udp_connp->conn_lock)); \
(udp)->udp_reader_count--; \
if ((udp)->udp_reader_count == 0) \
cv_broadcast(&(udp)->udp_connp->conn_cv); \
}
#define UDP_SQUEUE_DECREF(udp) { \
ASSERT(MUTEX_HELD(&(udp)->udp_connp->conn_lock)); \
(udp)->udp_squeue_count--; \
if ((udp)->udp_squeue_count == 0) \
cv_broadcast(&(udp)->udp_connp->conn_cv); \
}
/*
* Notes on UDP endpoint synchronization:
*
* UDP needs exclusive operation on a per endpoint basis, when executing
* functions that modify the endpoint state. udp_rput_other() deals with
* packets with IP options, and processing these packets end up having
* to update the endpoint's option related state. udp_wput_other() deals
* with control operations from the top, e.g. connect() that needs to
* update the endpoint state. These could be synchronized using locks,
* but the current version uses squeues for this purpose. squeues may
* give performance improvement for certain cases such as connected UDP
* sockets; thus the framework allows for using squeues.
*
* The perimeter routines are described as follows:
*
* udp_enter():
* Enter the UDP endpoint perimeter.
*
* udp_become_writer():
* Become exclusive on the UDP endpoint. Specifies a function
* that will be called exclusively either immediately or later
* when the perimeter is available exclusively.
*
* udp_exit():
* Exit the UDP perimeter.
*
* Entering UDP from the top or from the bottom must be done using
* udp_enter(). No lock must be held while attempting to enter the UDP
* perimeter. When finished, udp_exit() must be called to get out of
* the perimeter.
*
* UDP operates in either MT_HOT mode or in SQUEUE mode. In MT_HOT mode,
* multiple threads may enter a UDP endpoint concurrently. This is used
* for sending and/or receiving normal data. Control operations and other
* special cases call udp_become_writer() to become exclusive on a per
* endpoint basis and this results in transitioning to SQUEUE mode. squeue
* by definition serializes access to the conn_t. When there are no more
* pending messages on the squeue for the UDP connection, the endpoint
* reverts to MT_HOT mode. During the interregnum when not all MT threads
* of an endpoint have finished, messages are queued in the UDP endpoint
* and the UDP is in UDP_MT_QUEUED mode or UDP_QUEUED_SQUEUE mode.
*
* These modes have the following analogs:
*
* UDP_MT_HOT/udp_reader_count==0 none
* UDP_MT_HOT/udp_reader_count>0 RW_READ_LOCK
* UDP_MT_QUEUED RW_WRITE_WANTED
* UDP_SQUEUE or UDP_QUEUED_SQUEUE RW_WRITE_LOCKED
*
* Stable modes: UDP_MT_HOT, UDP_SQUEUE
* Transient modes: UDP_MT_QUEUED, UDP_QUEUED_SQUEUE
*
* While in stable modes, UDP keeps track of the number of threads
* operating on the endpoint. The udp_reader_count variable represents
* the number of threads entering the endpoint as readers while it is
* in UDP_MT_HOT mode. Transitioning to UDP_SQUEUE happens when there
* is only a single reader, i.e. when this counter drops to 1. Likewise,
* udp_squeue_count represents the number of threads operating on the
* endpoint's squeue while it is in UDP_SQUEUE mode. The mode transition
* to UDP_MT_HOT happens after the last thread exits the endpoint, i.e.
* when this counter drops to 0.
*
* The default mode is set to UDP_MT_HOT and UDP alternates between
* UDP_MT_HOT and UDP_SQUEUE as shown in the state transition below.
*
* Mode transition:
* ----------------------------------------------------------------
* old mode Event New mode
* ----------------------------------------------------------------
* UDP_MT_HOT Call to udp_become_writer() UDP_SQUEUE
* and udp_reader_count == 1
*
* UDP_MT_HOT Call to udp_become_writer() UDP_MT_QUEUED
* and udp_reader_count > 1
*
* UDP_MT_QUEUED udp_reader_count drops to zero UDP_QUEUED_SQUEUE
*
* UDP_QUEUED_SQUEUE All messages enqueued on the UDP_SQUEUE
* internal UDP queue successfully
* moved to squeue AND udp_squeue_count != 0
*
* UDP_QUEUED_SQUEUE All messages enqueued on the UDP_MT_HOT
* internal UDP queue successfully
* moved to squeue AND udp_squeue_count
* drops to zero
*
* UDP_SQUEUE udp_squeue_count drops to zero UDP_MT_HOT
* ----------------------------------------------------------------
*/
static queue_t *
UDP_WR(queue_t *q)
{
ASSERT(q->q_ptr == NULL && _OTHERQ(q)->q_ptr == NULL);
ASSERT(WR(q)->q_next != NULL && WR(q)->q_next->q_ptr != NULL);
ASSERT(IPCL_IS_UDP(Q_TO_CONN(WR(q)->q_next)));
return (_WR(q)->q_next);
}
static queue_t *
UDP_RD(queue_t *q)
{
ASSERT(q->q_ptr != NULL && _OTHERQ(q)->q_ptr != NULL);
ASSERT(IPCL_IS_UDP(Q_TO_CONN(q)));
ASSERT(RD(q)->q_next != NULL && RD(q)->q_next->q_ptr == NULL);
return (_RD(q)->q_next);
}
#ifdef DEBUG
#define UDP_MODE_ASSERTIONS(udp, caller) udp_mode_assertions(udp, caller)
#else
#define UDP_MODE_ASSERTIONS(udp, caller)
#endif
/* Invariants */
#ifdef DEBUG
uint32_t udp_count[4];
/* Context of udp_mode_assertions */
#define UDP_ENTER 1
#define UDP_BECOME_WRITER 2
#define UDP_EXIT 3
static void
udp_mode_assertions(udp_t *udp, int caller)
{
ASSERT(MUTEX_HELD(&udp->udp_connp->conn_lock));
switch (udp->udp_mode) {
case UDP_MT_HOT:
/*
* Messages have not yet been enqueued on the internal queue,
* otherwise we would have switched to UDP_MT_QUEUED. Likewise
* by definition, there can't be any messages enqueued on the
* squeue. The UDP could be quiescent, so udp_reader_count
* could be zero at entry.
*/
ASSERT(udp->udp_mphead == NULL && udp->udp_mpcount == 0 &&
udp->udp_squeue_count == 0);
ASSERT(caller == UDP_ENTER || udp->udp_reader_count != 0);
udp_count[0]++;
break;
case UDP_MT_QUEUED:
/*
* The last MT thread to exit the udp perimeter empties the
* internal queue and then switches the UDP to
* UDP_QUEUED_SQUEUE mode. Since we are still in UDP_MT_QUEUED
* mode, it means there must be at least 1 MT thread still in
* the perimeter and at least 1 message on the internal queue.
*/
ASSERT(udp->udp_reader_count >= 1 && udp->udp_mphead != NULL &&
udp->udp_mpcount != 0 && udp->udp_squeue_count == 0);
udp_count[1]++;
break;
case UDP_QUEUED_SQUEUE:
/*
* The switch has happened from MT to SQUEUE. So there can't
* any MT threads. Messages could still pile up on the internal
* queue until the transition is complete and we move to
* UDP_SQUEUE mode. We can't assert on nonzero udp_squeue_count
* since the squeue could drain any time.
*/
ASSERT(udp->udp_reader_count == 0);
udp_count[2]++;
break;
case UDP_SQUEUE:
/*
* The transition is complete. Thre can't be any messages on
* the internal queue. The udp could be quiescent or the squeue
* could drain any time, so we can't assert on nonzero
* udp_squeue_count during entry. Nor can we assert that
* udp_reader_count is zero, since, a reader thread could have
* directly become writer in line by calling udp_become_writer
* without going through the queued states.
*/
ASSERT(udp->udp_mphead == NULL && udp->udp_mpcount == 0);
ASSERT(caller == UDP_ENTER || udp->udp_squeue_count != 0);
udp_count[3]++;
break;
}
}
#endif
#define _UDP_ENTER(connp, mp, proc, tag) { \
udp_t *_udp = (connp)->conn_udp; \
\
mutex_enter(&(connp)->conn_lock); \
if ((connp)->conn_state_flags & CONN_CLOSING) { \
mutex_exit(&(connp)->conn_lock); \
freemsg(mp); \
} else { \
UDP_MODE_ASSERTIONS(_udp, UDP_ENTER); \
\
switch (_udp->udp_mode) { \
case UDP_MT_HOT: \
/* We can execute as reader right away. */ \
UDP_READERS_INCREF(_udp); \
mutex_exit(&(connp)->conn_lock); \
(*(proc))(connp, mp, (connp)->conn_sqp); \
break; \
\
case UDP_SQUEUE: \
/* \
* We are in squeue mode, send the \
* packet to the squeue \
*/ \
_udp->udp_squeue_count++; \
CONN_INC_REF_LOCKED(connp); \
mutex_exit(&(connp)->conn_lock); \
squeue_enter((connp)->conn_sqp, mp, proc, \
connp, tag); \
break; \
\
case UDP_MT_QUEUED: \
case UDP_QUEUED_SQUEUE: \
/* \
* Some messages may have been enqueued \
* ahead of us. Enqueue the new message \
* at the tail of the internal queue to \
* preserve message ordering. \
*/ \
UDP_ENQUEUE_MP(_udp, mp, proc, tag); \
mutex_exit(&(connp)->conn_lock); \
break; \
} \
} \
}
static void
udp_enter(conn_t *connp, mblk_t *mp, sqproc_t proc, uint8_t tag)
{
_UDP_ENTER(connp, mp, proc, tag);
}
static void
udp_become_writer(conn_t *connp, mblk_t *mp, sqproc_t proc, uint8_t tag)
{
udp_t *udp;
udp = connp->conn_udp;
mutex_enter(&connp->conn_lock);
UDP_MODE_ASSERTIONS(udp, UDP_BECOME_WRITER);
switch (udp->udp_mode) {
case UDP_MT_HOT:
if (udp->udp_reader_count == 1) {
/*
* We are the only MT thread. Switch to squeue mode
* immediately.
*/
udp->udp_mode = UDP_SQUEUE;
udp->udp_squeue_count = 1;
CONN_INC_REF_LOCKED(connp);
mutex_exit(&connp->conn_lock);
squeue_enter(connp->conn_sqp, mp, proc, connp, tag);
return;
}
/* FALLTHRU */
case UDP_MT_QUEUED:
/* Enqueue the packet internally in UDP */
udp->udp_mode = UDP_MT_QUEUED;
UDP_ENQUEUE_MP(udp, mp, proc, tag);
mutex_exit(&connp->conn_lock);
return;
case UDP_SQUEUE:
case UDP_QUEUED_SQUEUE:
/*
* We are already exclusive. i.e. we are already
* writer. Simply call the desired function.
*/
udp->udp_squeue_count++;
mutex_exit(&connp->conn_lock);
(*proc)(connp, mp, connp->conn_sqp);
return;
}
}
/*
* Transition from MT mode to SQUEUE mode, when the last MT thread
* is exiting the UDP perimeter. Move all messages from the internal
* udp queue to the squeue. A better way would be to move all the
* messages in one shot, this needs more support from the squeue framework
*/
static void
udp_switch_to_squeue(udp_t *udp)
{
mblk_t *mp;
mblk_t *mp_next;
sqproc_t proc;
uint8_t tag;
conn_t *connp = udp->udp_connp;
ASSERT(MUTEX_HELD(&connp->conn_lock));
ASSERT(udp->udp_mode == UDP_MT_QUEUED);
while (udp->udp_mphead != NULL) {
mp = udp->udp_mphead;
udp->udp_mphead = NULL;
udp->udp_mptail = NULL;
udp->udp_mpcount = 0;
udp->udp_mode = UDP_QUEUED_SQUEUE;
mutex_exit(&connp->conn_lock);
/*
* It is best not to hold any locks across the calls
* to squeue functions. Since we drop the lock we
* need to go back and check the udp_mphead once again
* after the squeue_fill and hence the while loop at
* the top of this function
*/
for (; mp != NULL; mp = mp_next) {
mp_next = mp->b_next;
proc = (sqproc_t)mp->b_prev;
tag = (uint8_t)((uintptr_t)mp->b_queue);
mp->b_next = NULL;
mp->b_prev = NULL;
mp->b_queue = NULL;
CONN_INC_REF(connp);
udp->udp_squeue_count++;
squeue_fill(connp->conn_sqp, mp, proc, connp,
tag);
}
mutex_enter(&connp->conn_lock);
}
/*
* udp_squeue_count of zero implies that the squeue has drained
* even before we arrived here (i.e. after the squeue_fill above)
*/
udp->udp_mode = (udp->udp_squeue_count != 0) ?
UDP_SQUEUE : UDP_MT_HOT;
}
#define _UDP_EXIT(connp) { \
udp_t *_udp = (connp)->conn_udp; \
\
mutex_enter(&(connp)->conn_lock); \
UDP_MODE_ASSERTIONS(_udp, UDP_EXIT); \
\
switch (_udp->udp_mode) { \
case UDP_MT_HOT: \
UDP_READERS_DECREF(_udp); \
mutex_exit(&(connp)->conn_lock); \
break; \
\
case UDP_SQUEUE: \
UDP_SQUEUE_DECREF(_udp); \
if (_udp->udp_squeue_count == 0) \
_udp->udp_mode = UDP_MT_HOT; \
mutex_exit(&(connp)->conn_lock); \
break; \
\
case UDP_MT_QUEUED: \
/* \
* If this is the last MT thread, we need to \
* switch to squeue mode \
*/ \
UDP_READERS_DECREF(_udp); \
if (_udp->udp_reader_count == 0) \
udp_switch_to_squeue(_udp); \
mutex_exit(&(connp)->conn_lock); \
break; \
\
case UDP_QUEUED_SQUEUE: \
UDP_SQUEUE_DECREF(_udp); \
/* \
* Even if the udp_squeue_count drops to zero, we \
* don't want to change udp_mode to UDP_MT_HOT here. \
* The thread in udp_switch_to_squeue will take care \
* of the transition to UDP_MT_HOT, after emptying \
* any more new messages that have been enqueued in \
* udp_mphead. \
*/ \
mutex_exit(&(connp)->conn_lock); \
break; \
} \
}
static void
udp_exit(conn_t *connp)
{
_UDP_EXIT(connp);
}
/*
* Return the next anonymous port in the privileged port range for
* bind checking.
*
* Trusted Extension (TX) notes: TX allows administrator to mark or
* reserve ports as Multilevel ports (MLP). MLP has special function
* on TX systems. Once a port is made MLP, it's not available as
* ordinary port. This creates "holes" in the port name space. It
* may be necessary to skip the "holes" find a suitable anon port.
*/
static in_port_t
udp_get_next_priv_port(udp_t *udp)
{
static in_port_t next_priv_port = IPPORT_RESERVED - 1;
in_port_t nextport;
boolean_t restart = B_FALSE;
retry:
if (next_priv_port < udp_min_anonpriv_port ||
next_priv_port >= IPPORT_RESERVED) {
next_priv_port = IPPORT_RESERVED - 1;
if (restart)
return (0);
restart = B_TRUE;
}
if (is_system_labeled() &&
(nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred),
next_priv_port, IPPROTO_UDP, B_FALSE)) != 0) {
next_priv_port = nextport;
goto retry;
}
return (next_priv_port--);
}
/* UDP bind hash report triggered via the Named Dispatch mechanism. */
/* ARGSUSED */
static int
udp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
{
udp_fanout_t *udpf;
int i;
zoneid_t zoneid;
conn_t *connp;
udp_t *udp;
connp = Q_TO_CONN(q);
udp = connp->conn_udp;
/* Refer to comments in udp_status_report(). */
if (cr == NULL || secpolicy_net_config(cr, B_TRUE) != 0) {
if (ddi_get_lbolt() - udp_last_ndd_get_info_time <
drv_usectohz(udp_ndd_get_info_interval * 1000)) {
(void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG);
return (0);
}
}
if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) {
/* The following may work even if we cannot get a large buf. */
(void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG);
return (0);
}
(void) mi_mpprintf(mp,
"UDP " MI_COL_HDRPAD_STR
/* 12345678[89ABCDEF] */
" zone lport src addr dest addr port state");
/* 1234 12345 xxx.xxx.xxx.xxx xxx.xxx.xxx.xxx 12345 UNBOUND */
zoneid = connp->conn_zoneid;
for (i = 0; i < udp_bind_fanout_size; i++) {
udpf = &udp_bind_fanout[i];
mutex_enter(&udpf->uf_lock);
/* Print the hash index. */
udp = udpf->uf_udp;
if (zoneid != GLOBAL_ZONEID) {
/* skip to first entry in this zone; might be none */
while (udp != NULL &&
udp->udp_connp->conn_zoneid != zoneid)
udp = udp->udp_bind_hash;
}
if (udp != NULL) {
uint_t print_len, buf_len;
buf_len = mp->b_cont->b_datap->db_lim -
mp->b_cont->b_wptr;
print_len = snprintf((char *)mp->b_cont->b_wptr,
buf_len, "%d\n", i);
if (print_len < buf_len) {
mp->b_cont->b_wptr += print_len;
} else {
mp->b_cont->b_wptr += buf_len;
}
for (; udp != NULL; udp = udp->udp_bind_hash) {
if (zoneid == GLOBAL_ZONEID ||
zoneid == udp->udp_connp->conn_zoneid)
udp_report_item(mp->b_cont, udp);
}
}
mutex_exit(&udpf->uf_lock);
}
udp_last_ndd_get_info_time = ddi_get_lbolt();
return (0);
}
/*
* Hash list removal routine for udp_t structures.
*/
static void
udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock)
{
udp_t *udpnext;
kmutex_t *lockp;
if (udp->udp_ptpbhn == NULL)
return;
/*
* Extract the lock pointer in case there are concurrent
* hash_remove's for this instance.
*/
ASSERT(udp->udp_port != 0);
if (!caller_holds_lock) {
lockp = &udp_bind_fanout[UDP_BIND_HASH(udp->udp_port)].uf_lock;
ASSERT(lockp != NULL);
mutex_enter(lockp);
}
if (udp->udp_ptpbhn != NULL) {
udpnext = udp->udp_bind_hash;
if (udpnext != NULL) {
udpnext->udp_ptpbhn = udp->udp_ptpbhn;
udp->udp_bind_hash = NULL;
}
*udp->udp_ptpbhn = udpnext;
udp->udp_ptpbhn = NULL;
}
if (!caller_holds_lock) {
mutex_exit(lockp);
}
}
static void
udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp)
{
udp_t **udpp;
udp_t *udpnext;
ASSERT(MUTEX_HELD(&uf->uf_lock));
if (udp->udp_ptpbhn != NULL) {
udp_bind_hash_remove(udp, B_TRUE);
}
udpp = &uf->uf_udp;
udpnext = udpp[0];
if (udpnext != NULL) {
/*
* If the new udp bound to the INADDR_ANY address
* and the first one in the list is not bound to
* INADDR_ANY we skip all entries until we find the
* first one bound to INADDR_ANY.
* This makes sure that applications binding to a
* specific address get preference over those binding to
* INADDR_ANY.
*/
if (V6_OR_V4_INADDR_ANY(udp->udp_bound_v6src) &&
!V6_OR_V4_INADDR_ANY(udpnext->udp_bound_v6src)) {
while ((udpnext = udpp[0]) != NULL &&
!V6_OR_V4_INADDR_ANY(
udpnext->udp_bound_v6src)) {
udpp = &(udpnext->udp_bind_hash);
}
if (udpnext != NULL)
udpnext->udp_ptpbhn = &udp->udp_bind_hash;
} else {
udpnext->udp_ptpbhn = &udp->udp_bind_hash;
}
}
udp->udp_bind_hash = udpnext;
udp->udp_ptpbhn = udpp;
udpp[0] = udp;
}
/*
* This routine is called to handle each O_T_BIND_REQ/T_BIND_REQ message
* passed to udp_wput.
* It associates a port number and local address with the stream.
* The O_T_BIND_REQ/T_BIND_REQ is passed downstream to ip with the UDP
* protocol type (IPPROTO_UDP) placed in the message following the address.
* A T_BIND_ACK message is passed upstream when ip acknowledges the request.
* (Called as writer.)
*
* Note that UDP over IPv4 and IPv6 sockets can use the same port number
* without setting SO_REUSEADDR. This is needed so that they
* can be viewed as two independent transport protocols.
* However, anonymouns ports are allocated from the same range to avoid
* duplicating the udp_g_next_port_to_try.
*/
static void
udp_bind(queue_t *q, mblk_t *mp)
{
sin_t *sin;
sin6_t *sin6;
mblk_t *mp1;
in_port_t port; /* Host byte order */
in_port_t requested_port; /* Host byte order */
struct T_bind_req *tbr;
int count;
in6_addr_t v6src;
boolean_t bind_to_req_port_only;
int loopmax;
udp_fanout_t *udpf;
in_port_t lport; /* Network byte order */
zoneid_t zoneid;
conn_t *connp;
udp_t *udp;
boolean_t is_inaddr_any;
mlp_type_t addrtype, mlptype;
connp = Q_TO_CONN(q);
udp = connp->conn_udp;
if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) {
(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
"udp_bind: bad req, len %u",
(uint_t)(mp->b_wptr - mp->b_rptr));
udp_err_ack(q, mp, TPROTO, 0);
return;
}
if (udp->udp_state != TS_UNBND) {
(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
"udp_bind: bad state, %u", udp->udp_state);
udp_err_ack(q, mp, TOUTSTATE, 0);
return;
}
/*
* Reallocate the message to make sure we have enough room for an
* address and the protocol type.
*/
mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t) + 1, 1);
if (!mp1) {
udp_err_ack(q, mp, TSYSERR, ENOMEM);
return;
}
mp = mp1;
tbr = (struct T_bind_req *)mp->b_rptr;
switch (tbr->ADDR_length) {
case 0: /* Request for a generic port */
tbr->ADDR_offset = sizeof (struct T_bind_req);
if (udp->udp_family == AF_INET) {
tbr->ADDR_length = sizeof (sin_t);
sin = (sin_t *)&tbr[1];
*sin = sin_null;
sin->sin_family = AF_INET;
mp->b_wptr = (uchar_t *)&sin[1];
} else {
ASSERT(udp->udp_family == AF_INET6);
tbr->ADDR_length = sizeof (sin6_t);
sin6 = (sin6_t *)&tbr[1];
*sin6 = sin6_null;
sin6->sin6_family = AF_INET6;
mp->b_wptr = (uchar_t *)&sin6[1];
}
port = 0;
break;
case sizeof (sin_t): /* Complete IPv4 address */
sin = (sin_t *)mi_offset_param(mp, tbr->ADDR_offset,
sizeof (sin_t));
if (sin == NULL || !OK_32PTR((char *)sin)) {
udp_err_ack(q, mp, TSYSERR, EINVAL);
return;
}
if (udp->udp_family != AF_INET ||
sin->sin_family != AF_INET) {
udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
return;
}
port = ntohs(sin->sin_port);
break;
case sizeof (sin6_t): /* complete IPv6 address */
sin6 = (sin6_t *)mi_offset_param(mp, tbr->ADDR_offset,
sizeof (sin6_t));
if (sin6 == NULL || !OK_32PTR((char *)sin6)) {
udp_err_ack(q, mp, TSYSERR, EINVAL);
return;
}
if (udp->udp_family != AF_INET6 ||
sin6->sin6_family != AF_INET6) {
udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
return;
}
port = ntohs(sin6->sin6_port);
break;
default: /* Invalid request */
(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
"udp_bind: bad ADDR_length length %u", tbr->ADDR_length);
udp_err_ack(q, mp, TBADADDR, 0);
return;
}
requested_port = port;
if (requested_port == 0 || tbr->PRIM_type == O_T_BIND_REQ)
bind_to_req_port_only = B_FALSE;
else /* T_BIND_REQ and requested_port != 0 */
bind_to_req_port_only = B_TRUE;
if (requested_port == 0) {
/*
* If the application passed in zero for the port number, it
* doesn't care which port number we bind to. Get one in the
* valid range.
*/
if (udp->udp_anon_priv_bind) {
port = udp_get_next_priv_port(udp);
} else {
port = udp_update_next_port(udp,
udp_g_next_port_to_try, B_TRUE);
}
} else {
/*
* If the port is in the well-known privileged range,
* make sure the caller was privileged.
*/
int i;
boolean_t priv = B_FALSE;
if (port < udp_smallest_nonpriv_port) {
priv = B_TRUE;
} else {
for (i = 0; i < udp_g_num_epriv_ports; i++) {
if (port == udp_g_epriv_ports[i]) {
priv = B_TRUE;
break;
}
}
}
if (priv) {
cred_t *cr = DB_CREDDEF(mp, connp->conn_cred);
if (secpolicy_net_privaddr(cr, port) != 0) {
udp_err_ack(q, mp, TACCES, 0);
return;
}
}
}
if (port == 0) {
udp_err_ack(q, mp, TNOADDR, 0);
return;
}
/*
* Copy the source address into our udp structure. This address
* may still be zero; if so, IP will fill in the correct address
* each time an outbound packet is passed to it.
*/
if (udp->udp_family == AF_INET) {
ASSERT(sin != NULL);
ASSERT(udp->udp_ipversion == IPV4_VERSION);
udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH + UDPH_SIZE +
udp->udp_ip_snd_options_len;
IN6_IPADDR_TO_V4MAPPED(sin->sin_addr.s_addr, &v6src);
} else {
ASSERT(sin6 != NULL);
v6src = sin6->sin6_addr;
if (IN6_IS_ADDR_V4MAPPED(&v6src)) {
udp->udp_ipversion = IPV4_VERSION;
udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH +
UDPH_SIZE + udp->udp_ip_snd_options_len;
} else {
udp->udp_ipversion = IPV6_VERSION;
udp->udp_max_hdr_len = udp->udp_sticky_hdrs_len;
}
}
/*
* If udp_reuseaddr is not set, then we have to make sure that
* the IP address and port number the application requested
* (or we selected for the application) is not being used by
* another stream. If another stream is already using the
* requested IP address and port, the behavior depends on
* "bind_to_req_port_only". If set the bind fails; otherwise we
* search for any an unused port to bind to the the stream.
*
* As per the BSD semantics, as modified by the Deering multicast
* changes, if udp_reuseaddr is set, then we allow multiple binds
* to the same port independent of the local IP address.
*
* This is slightly different than in SunOS 4.X which did not
* support IP multicast. Note that the change implemented by the
* Deering multicast code effects all binds - not only binding
* to IP multicast addresses.
*
* Note that when binding to port zero we ignore SO_REUSEADDR in
* order to guarantee a unique port.
*/
count = 0;
if (udp->udp_anon_priv_bind) {
/* loopmax = (IPPORT_RESERVED-1) - udp_min_anonpriv_port + 1 */
loopmax = IPPORT_RESERVED - udp_min_anonpriv_port;
} else {
loopmax = udp_largest_anon_port - udp_smallest_anon_port + 1;
}
is_inaddr_any = V6_OR_V4_INADDR_ANY(v6src);
zoneid = connp->conn_zoneid;
for (;;) {
udp_t *udp1;
boolean_t found_exclbind = B_FALSE;
/*
* Walk through the list of udp streams bound to
* requested port with the same IP address.
*/
lport = htons(port);
udpf = &udp_bind_fanout[UDP_BIND_HASH(lport)];
mutex_enter(&udpf->uf_lock);
for (udp1 = udpf->uf_udp; udp1 != NULL;
udp1 = udp1->udp_bind_hash) {
if (lport != udp1->udp_port)
continue;
/*
* On a labeled system, we must treat bindings to ports
* on shared IP addresses by sockets with MAC exemption
* privilege as being in all zones, as there's
* otherwise no way to identify the right receiver.
*/
if (zoneid != udp1->udp_connp->conn_zoneid &&
!udp->udp_mac_exempt && !udp1->udp_mac_exempt)
continue;
/*
* If UDP_EXCLBIND is set for either the bound or
* binding endpoint, the semantics of bind
* is changed according to the following chart.
*
* spec = specified address (v4 or v6)
* unspec = unspecified address (v4 or v6)
* A = specified addresses are different for endpoints
*
* bound bind to allowed?
* -------------------------------------
* unspec unspec no
* unspec spec no
* spec unspec no
* spec spec yes if A
*
* For labeled systems, SO_MAC_EXEMPT behaves the same
* as UDP_EXCLBIND, except that zoneid is ignored.
*/
if (udp1->udp_exclbind || udp->udp_exclbind ||
udp1->udp_mac_exempt || udp->udp_mac_exempt) {
if (V6_OR_V4_INADDR_ANY(
udp1->udp_bound_v6src) ||
is_inaddr_any ||
IN6_ARE_ADDR_EQUAL(&udp1->udp_bound_v6src,
&v6src)) {
found_exclbind = B_TRUE;
break;
}
continue;
}
/*
* Check ipversion to allow IPv4 and IPv6 sockets to
* have disjoint port number spaces.
*/
if (udp->udp_ipversion != udp1->udp_ipversion)
continue;
/*
* No difference depending on SO_REUSEADDR.
*
* If existing port is bound to a
* non-wildcard IP address and
* the requesting stream is bound to
* a distinct different IP addresses
* (non-wildcard, also), keep going.
*/
if (!is_inaddr_any &&
!V6_OR_V4_INADDR_ANY(udp1->udp_bound_v6src) &&
!IN6_ARE_ADDR_EQUAL(&udp1->udp_bound_v6src,
&v6src)) {
continue;
}
break;
}
if (!found_exclbind &&
(udp->udp_reuseaddr && requested_port != 0)) {
break;
}
if (udp1 == NULL) {
/*
* No other stream has this IP address
* and port number. We can use it.
*/
break;
}
mutex_exit(&udpf->uf_lock);
if (bind_to_req_port_only) {
/*
* We get here only when requested port
* is bound (and only first of the for()
* loop iteration).
*
* The semantics of this bind request
* require it to fail so we return from
* the routine (and exit the loop).
*
*/
udp_err_ack(q, mp, TADDRBUSY, 0);
return;
}
if (udp->udp_anon_priv_bind) {
port = udp_get_next_priv_port(udp);
} else {
if ((count == 0) && (requested_port != 0)) {
/*
* If the application wants us to find
* a port, get one to start with. Set
* requested_port to 0, so that we will
* update udp_g_next_port_to_try below.
*/
port = udp_update_next_port(udp,
udp_g_next_port_to_try, B_TRUE);
requested_port = 0;
} else {
port = udp_update_next_port(udp, port + 1,
B_FALSE);
}
}
if (port == 0 || ++count >= loopmax) {
/*
* We've tried every possible port number and
* there are none available, so send an error
* to the user.
*/
udp_err_ack(q, mp, TNOADDR, 0);
return;
}
}
/*
* Copy the source address into our udp structure. This address
* may still be zero; if so, ip will fill in the correct address
* each time an outbound packet is passed to it.
* If we are binding to a broadcast or multicast address udp_rput
* will clear the source address when it receives the T_BIND_ACK.
*/
udp->udp_v6src = udp->udp_bound_v6src = v6src;
udp->udp_port = lport;
/*
* Now reset the the next anonymous port if the application requested
* an anonymous port, or we handed out the next anonymous port.
*/
if ((requested_port == 0) && (!udp->udp_anon_priv_bind)) {
udp_g_next_port_to_try = port + 1;
}
/* Initialize the O_T_BIND_REQ/T_BIND_REQ for ip. */
if (udp->udp_family == AF_INET) {
sin->sin_port = udp->udp_port;
} else {
int error;
sin6->sin6_port = udp->udp_port;
/* Rebuild the header template */
error = udp_build_hdrs(q, udp);
if (error != 0) {
mutex_exit(&udpf->uf_lock);
udp_err_ack(q, mp, TSYSERR, error);
return;
}
}
udp->udp_state = TS_IDLE;
udp_bind_hash_insert(udpf, udp);
mutex_exit(&udpf->uf_lock);
if (cl_inet_bind) {
/*
* Running in cluster mode - register bind information
*/
if (udp->udp_ipversion == IPV4_VERSION) {
(*cl_inet_bind)(IPPROTO_UDP, AF_INET,
(uint8_t *)(&V4_PART_OF_V6(udp->udp_v6src)),
(in_port_t)udp->udp_port);
} else {
(*cl_inet_bind)(IPPROTO_UDP, AF_INET6,
(uint8_t *)&(udp->udp_v6src),
(in_port_t)udp->udp_port);
}
}
connp->conn_anon_port = (is_system_labeled() && requested_port == 0);
if (is_system_labeled() && (!connp->conn_anon_port ||
connp->conn_anon_mlp)) {
uint16_t mlpport;
cred_t *cr = connp->conn_cred;
zone_t *zone;
connp->conn_mlp_type = udp->udp_recvucred ? mlptBoth :
mlptSingle;
addrtype = tsol_mlp_addr_type(zoneid, IPV6_VERSION, &v6src);
if (addrtype == mlptSingle) {
udp_err_ack(q, mp, TNOADDR, 0);
connp->conn_anon_port = B_FALSE;
connp->conn_mlp_type = mlptSingle;
return;
}
mlpport = connp->conn_anon_port ? PMAPPORT : port;
zone = crgetzone(cr);
mlptype = tsol_mlp_port_type(zone, IPPROTO_UDP, mlpport,
addrtype);
if (mlptype != mlptSingle &&
(connp->conn_mlp_type == mlptSingle ||
secpolicy_net_bindmlp(cr) != 0)) {
if (udp->udp_debug) {
(void) strlog(UDP_MOD_ID, 0, 1,
SL_ERROR|SL_TRACE,
"udp_bind: no priv for multilevel port %d",
mlpport);
}
udp_err_ack(q, mp, TACCES, 0);
connp->conn_anon_port = B_FALSE;
connp->conn_mlp_type = mlptSingle;
return;
}
/*
* If we're specifically binding a shared IP address and the
* port is MLP on shared addresses, then check to see if this
* zone actually owns the MLP. Reject if not.
*/
if (mlptype == mlptShared && addrtype == mlptShared) {
zoneid_t mlpzone;
mlpzone = tsol_mlp_findzone(IPPROTO_UDP,
htons(mlpport));
if (connp->conn_zoneid != mlpzone) {
if (udp->udp_debug) {
(void) strlog(UDP_MOD_ID, 0, 1,
SL_ERROR|SL_TRACE,
"udp_bind: attempt to bind port "
"%d on shared addr in zone %d "
"(should be %d)",
mlpport, connp->conn_zoneid,
mlpzone);
}
udp_err_ack(q, mp, TACCES, 0);
connp->conn_anon_port = B_FALSE;
connp->conn_mlp_type = mlptSingle;
return;
}
}
if (connp->conn_anon_port) {
int error;
error = tsol_mlp_anon(zone, mlptype, connp->conn_ulp,
port, B_TRUE);
if (error != 0) {
if (udp->udp_debug) {
(void) strlog(UDP_MOD_ID, 0, 1,
SL_ERROR|SL_TRACE,
"udp_bind: cannot establish anon "
"MLP for port %d", port);
}
udp_err_ack(q, mp, TACCES, 0);
connp->conn_anon_port = B_FALSE;
connp->conn_mlp_type = mlptSingle;
return;
}
}
connp->conn_mlp_type = mlptype;
}
/* Pass the protocol number in the message following the address. */
*mp->b_wptr++ = IPPROTO_UDP;
if (!V6_OR_V4_INADDR_ANY(udp->udp_v6src)) {
/*
* Append a request for an IRE if udp_v6src not
* zero (IPv4 - INADDR_ANY, or IPv6 - all-zeroes address).
*/
mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
if (!mp->b_cont) {
udp_err_ack(q, mp, TSYSERR, ENOMEM);
return;
}
mp->b_cont->b_wptr += sizeof (ire_t);
mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE;
}
if (udp->udp_family == AF_INET6)
mp = ip_bind_v6(q, mp, connp, NULL);
else
mp = ip_bind_v4(q, mp, connp);
if (mp != NULL)
udp_rput_other(_RD(q), mp);
else
CONN_INC_REF(connp);
}
void
udp_resume_bind(conn_t *connp, mblk_t *mp)
{
udp_enter(connp, mp, udp_resume_bind_cb, SQTAG_BIND_RETRY);
}
/*
* This is called from ip_wput_nondata to resume a deferred UDP bind.
*/
/* ARGSUSED */
static void
udp_resume_bind_cb(void *arg, mblk_t *mp, void *arg2)
{
conn_t *connp = arg;
ASSERT(connp != NULL && IPCL_IS_UDP(connp));
udp_rput_other(connp->conn_rq, mp);
CONN_OPER_PENDING_DONE(connp);
udp_exit(connp);
}
/*
* This routine handles each T_CONN_REQ message passed to udp. It
* associates a default destination address with the stream.
*
* This routine sends down a T_BIND_REQ to IP with the following mblks:
* T_BIND_REQ - specifying local and remote address/port
* IRE_DB_REQ_TYPE - to get an IRE back containing ire_type and src
* T_OK_ACK - for the T_CONN_REQ
* T_CONN_CON - to keep the TPI user happy
*
* The connect completes in udp_rput.
* When a T_BIND_ACK is received information is extracted from the IRE
* and the two appended messages are sent to the TPI user.
* Should udp_rput receive T_ERROR_ACK for the T_BIND_REQ it will convert
* it to an error ack for the appropriate primitive.
*/
static void
udp_connect(queue_t *q, mblk_t *mp)
{
sin6_t *sin6;
sin_t *sin;
struct T_conn_req *tcr;
in6_addr_t v6dst;
ipaddr_t v4dst;
uint16_t dstport;
uint32_t flowinfo;
mblk_t *mp1, *mp2;
udp_fanout_t *udpf;
udp_t *udp, *udp1;
udp = Q_TO_UDP(q);
tcr = (struct T_conn_req *)mp->b_rptr;
/* A bit of sanity checking */
if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_req)) {
udp_err_ack(q, mp, TPROTO, 0);
return;
}
/*
* This UDP must have bound to a port already before doing
* a connect.
*/
if (udp->udp_state == TS_UNBND) {
(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
"udp_connect: bad state, %u", udp->udp_state);
udp_err_ack(q, mp, TOUTSTATE, 0);
return;
}
ASSERT(udp->udp_port != 0 && udp->udp_ptpbhn != NULL);
udpf = &udp_bind_fanout[UDP_BIND_HASH(udp->udp_port)];
if (udp->udp_state == TS_DATA_XFER) {
/* Already connected - clear out state */
mutex_enter(&udpf->uf_lock);
udp->udp_v6src = udp->udp_bound_v6src;
udp->udp_state = TS_IDLE;
mutex_exit(&udpf->uf_lock);
}
if (tcr->OPT_length != 0) {
udp_err_ack(q, mp, TBADOPT, 0);
return;
}
/*
* Determine packet type based on type of address passed in
* the request should contain an IPv4 or IPv6 address.
* Make sure that address family matches the type of
* family of the the address passed down
*/
switch (tcr->DEST_length) {
default:
udp_err_ack(q, mp, TBADADDR, 0);
return;
case sizeof (sin_t):
sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset,
sizeof (sin_t));
if (sin == NULL || !OK_32PTR((char *)sin)) {
udp_err_ack(q, mp, TSYSERR, EINVAL);
return;
}
if (udp->udp_family != AF_INET ||
sin->sin_family != AF_INET) {
udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
return;
}
v4dst = sin->sin_addr.s_addr;
dstport = sin->sin_port;
IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst);
ASSERT(udp->udp_ipversion == IPV4_VERSION);
udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH + UDPH_SIZE +
udp->udp_ip_snd_options_len;
break;
case sizeof (sin6_t):
sin6 = (sin6_t *)mi_offset_param(mp, tcr->DEST_offset,
sizeof (sin6_t));
if (sin6 == NULL || !OK_32PTR((char *)sin6)) {
udp_err_ack(q, mp, TSYSERR, EINVAL);
return;
}
if (udp->udp_family != AF_INET6 ||
sin6->sin6_family != AF_INET6) {
udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
return;
}
v6dst = sin6->sin6_addr;
if (IN6_IS_ADDR_V4MAPPED(&v6dst)) {
IN6_V4MAPPED_TO_IPADDR(&v6dst, v4dst);
udp->udp_ipversion = IPV4_VERSION;
udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH +
UDPH_SIZE + udp->udp_ip_snd_options_len;
flowinfo = 0;
} else {
udp->udp_ipversion = IPV6_VERSION;
udp->udp_max_hdr_len = udp->udp_sticky_hdrs_len;
flowinfo = sin6->sin6_flowinfo;
}
dstport = sin6->sin6_port;
break;
}
if (dstport == 0) {
udp_err_ack(q, mp, TBADADDR, 0);
return;
}
/*
* Create a default IP header with no IP options.
*/
udp->udp_dstport = dstport;
if (udp->udp_ipversion == IPV4_VERSION) {
/*
* Interpret a zero destination to mean loopback.
* Update the T_CONN_REQ (sin/sin6) since it is used to
* generate the T_CONN_CON.
*/
if (v4dst == INADDR_ANY) {
v4dst = htonl(INADDR_LOOPBACK);
IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst);
if (udp->udp_family == AF_INET) {
sin->sin_addr.s_addr = v4dst;
} else {
sin6->sin6_addr = v6dst;
}
}
udp->udp_v6dst = v6dst;
udp->udp_flowinfo = 0;
/*
* If the destination address is multicast and
* an outgoing multicast interface has been set,
* use the address of that interface as our
* source address if no source address has been set.
*/
if (V4_PART_OF_V6(udp->udp_v6src) == INADDR_ANY &&
CLASSD(v4dst) &&
udp->udp_multicast_if_addr != INADDR_ANY) {
IN6_IPADDR_TO_V4MAPPED(udp->udp_multicast_if_addr,
&udp->udp_v6src);
}
} else {
ASSERT(udp->udp_ipversion == IPV6_VERSION);
/*
* Interpret a zero destination to mean loopback.
* Update the T_CONN_REQ (sin/sin6) since it is used to
* generate the T_CONN_CON.
*/
if (IN6_IS_ADDR_UNSPECIFIED(&v6dst)) {
v6dst = ipv6_loopback;
sin6->sin6_addr = v6dst;
}
udp->udp_v6dst = v6dst;
udp->udp_flowinfo = flowinfo;
/*
* If the destination address is multicast and
* an outgoing multicast interface has been set,
* then the ip bind logic will pick the correct source
* address (i.e. matching the outgoing multicast interface).
*/
}
/*
* Verify that the src/port/dst/port is unique for all
* connections in TS_DATA_XFER
*/
mutex_enter(&udpf->uf_lock);
for (udp1 = udpf->uf_udp; udp1 != NULL; udp1 = udp1->udp_bind_hash) {
if (udp1->udp_state != TS_DATA_XFER)
continue;
if (udp->udp_port != udp1->udp_port ||
udp->udp_ipversion != udp1->udp_ipversion ||
dstport != udp1->udp_dstport ||
!IN6_ARE_ADDR_EQUAL(&udp->udp_v6src, &udp1->udp_v6src) ||
!IN6_ARE_ADDR_EQUAL(&v6dst, &udp1->udp_v6dst))
continue;
mutex_exit(&udpf->uf_lock);
udp_err_ack(q, mp, TBADADDR, 0);
return;
}
udp->udp_state = TS_DATA_XFER;
mutex_exit(&udpf->uf_lock);
/*
* Send down bind to IP to verify that there is a route
* and to determine the source address.
* This will come back as T_BIND_ACK with an IRE_DB_TYPE in rput.
*/
if (udp->udp_family == AF_INET)
mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (ipa_conn_t));
else
mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (ipa6_conn_t));
if (mp1 == NULL) {
udp_err_ack(q, mp, TSYSERR, ENOMEM);
bind_failed:
mutex_enter(&udpf->uf_lock);
udp->udp_state = TS_IDLE;
mutex_exit(&udpf->uf_lock);
return;
}
/*
* We also have to send a connection confirmation to
* keep TLI happy. Prepare it for udp_rput.
*/
if (udp->udp_family == AF_INET)
mp2 = mi_tpi_conn_con(NULL, (char *)sin,
sizeof (*sin), NULL, 0);
else
mp2 = mi_tpi_conn_con(NULL, (char *)sin6,
sizeof (*sin6), NULL, 0);
if (mp2 == NULL) {
freemsg(mp1);
udp_err_ack(q, mp, TSYSERR, ENOMEM);
goto bind_failed;
}
mp = mi_tpi_ok_ack_alloc(mp);
if (mp == NULL) {
/* Unable to reuse the T_CONN_REQ for the ack. */
freemsg(mp2);
udp_err_ack_prim(q, mp1, T_CONN_REQ, TSYSERR, ENOMEM);
goto bind_failed;
}
/* Hang onto the T_OK_ACK and T_CONN_CON for later. */
linkb(mp1, mp);
linkb(mp1, mp2);
mblk_setcred(mp1, udp->udp_connp->conn_cred);
if (udp->udp_family == AF_INET)
mp1 = ip_bind_v4(q, mp1, udp->udp_connp);
else
mp1 = ip_bind_v6(q, mp1, udp->udp_connp, NULL);
if (mp1 != NULL)
udp_rput_other(_RD(q), mp1);
else
CONN_INC_REF(udp->udp_connp);
}
static int
udp_close(queue_t *q)
{
conn_t *connp = Q_TO_CONN(UDP_WR(q));
udp_t *udp;
queue_t *ip_rq = RD(UDP_WR(q));
ASSERT(connp != NULL && IPCL_IS_UDP(connp));
udp = connp->conn_udp;
ip_quiesce_conn(connp);
/*
* Disable read-side synchronous stream
* interface and drain any queued data.
*/
udp_rcv_drain(q, udp, B_TRUE);
ASSERT(!udp->udp_direct_sockfs);
qprocsoff(q);
/* restore IP module's high and low water marks to default values */
ip_rq->q_hiwat = ip_rq->q_qinfo->qi_minfo->mi_hiwat;
WR(ip_rq)->q_hiwat = WR(ip_rq)->q_qinfo->qi_minfo->mi_hiwat;
WR(ip_rq)->q_lowat = WR(ip_rq)->q_qinfo->qi_minfo->mi_lowat;
ASSERT(udp->udp_rcv_cnt == 0);
ASSERT(udp->udp_rcv_msgcnt == 0);
ASSERT(udp->udp_rcv_list_head == NULL);
ASSERT(udp->udp_rcv_list_tail == NULL);
/* connp is now single threaded. */
udp_close_free(connp);
/*
* Restore connp as an IP endpoint. We don't need
* any locks since we are now single threaded
*/
connp->conn_flags &= ~IPCL_UDP;
connp->conn_state_flags &=
~(CONN_CLOSING | CONN_CONDEMNED | CONN_QUIESCED);
connp->conn_ulp_labeled = B_FALSE;
return (0);
}
/*
* Called in the close path from IP (ip_quiesce_conn) to quiesce the conn
*/
void
udp_quiesce_conn(conn_t *connp)
{
udp_t *udp = connp->conn_udp;
if (cl_inet_unbind != NULL && udp->udp_state == TS_IDLE) {
/*
* Running in cluster mode - register unbind information
*/
if (udp->udp_ipversion == IPV4_VERSION) {
(*cl_inet_unbind)(IPPROTO_UDP, AF_INET,
(uint8_t *)(&(V4_PART_OF_V6(udp->udp_v6src))),
(in_port_t)udp->udp_port);
} else {
(*cl_inet_unbind)(IPPROTO_UDP, AF_INET6,
(uint8_t *)(&(udp->udp_v6src)),
(in_port_t)udp->udp_port);
}
}
udp_bind_hash_remove(udp, B_FALSE);
mutex_enter(&connp->conn_lock);
while (udp->udp_reader_count != 0 || udp->udp_squeue_count != 0 ||
udp->udp_mode != UDP_MT_HOT) {
cv_wait(&connp->conn_cv, &connp->conn_lock);
}
mutex_exit(&connp->conn_lock);
}
void
udp_close_free(conn_t *connp)
{
udp_t *udp = connp->conn_udp;
/* If there are any options associated with the stream, free them. */
if (udp->udp_ip_snd_options) {
mi_free((char *)udp->udp_ip_snd_options);
udp->udp_ip_snd_options = NULL;
}
if (udp->udp_ip_rcv_options) {
mi_free((char *)udp->udp_ip_rcv_options);
udp->udp_ip_rcv_options = NULL;
}
/* Free memory associated with sticky options */
if (udp->udp_sticky_hdrs_len != 0) {
kmem_free(udp->udp_sticky_hdrs,
udp->udp_sticky_hdrs_len);
udp->udp_sticky_hdrs = NULL;
udp->udp_sticky_hdrs_len = 0;
}
ip6_pkt_free(&udp->udp_sticky_ipp);
udp->udp_connp = NULL;
connp->conn_udp = NULL;
kmem_cache_free(udp_cache, udp);
}
/*
* This routine handles each T_DISCON_REQ message passed to udp
* as an indicating that UDP is no longer connected. This results
* in sending a T_BIND_REQ to IP to restore the binding to just
* the local address/port.
*
* This routine sends down a T_BIND_REQ to IP with the following mblks:
* T_BIND_REQ - specifying just the local address/port
* T_OK_ACK - for the T_DISCON_REQ
*
* The disconnect completes in udp_rput.
* When a T_BIND_ACK is received the appended T_OK_ACK is sent to the TPI user.
* Should udp_rput receive T_ERROR_ACK for the T_BIND_REQ it will convert
* it to an error ack for the appropriate primitive.
*/
static void
udp_disconnect(queue_t *q, mblk_t *mp)
{
udp_t *udp = Q_TO_UDP(q);
mblk_t *mp1;
udp_fanout_t *udpf;
if (udp->udp_state != TS_DATA_XFER) {
(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
"udp_disconnect: bad state, %u", udp->udp_state);
udp_err_ack(q, mp, TOUTSTATE, 0);
return;
}
udpf = &udp_bind_fanout[UDP_BIND_HASH(udp->udp_port)];
mutex_enter(&udpf->uf_lock);
udp->udp_v6src = udp->udp_bound_v6src;
udp->udp_state = TS_IDLE;
mutex_exit(&udpf->uf_lock);
/*
* Send down bind to IP to remove the full binding and revert
* to the local address binding.
*/
if (udp->udp_family == AF_INET)
mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (sin_t));
else
mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (sin6_t));
if (mp1 == NULL) {
udp_err_ack(q, mp, TSYSERR, ENOMEM);
return;
}
mp = mi_tpi_ok_ack_alloc(mp);
if (mp == NULL) {
/* Unable to reuse the T_DISCON_REQ for the ack. */
udp_err_ack_prim(q, mp1, T_DISCON_REQ, TSYSERR, ENOMEM);
return;
}
if (udp->udp_family == AF_INET6) {
int error;
/* Rebuild the header template */
error = udp_build_hdrs(q, udp);
if (error != 0) {
udp_err_ack_prim(q, mp, T_DISCON_REQ, TSYSERR, error);
freemsg(mp1);
return;
}
}
mutex_enter(&udpf->uf_lock);
udp->udp_discon_pending = 1;
mutex_exit(&udpf->uf_lock);
/* Append the T_OK_ACK to the T_BIND_REQ for udp_rput */
linkb(mp1, mp);
if (udp->udp_family == AF_INET6)
mp1 = ip_bind_v6(q, mp1, udp->udp_connp, NULL);
else
mp1 = ip_bind_v4(q, mp1, udp->udp_connp);
if (mp1 != NULL)
udp_rput_other(_RD(q), mp1);
else
CONN_INC_REF(udp->udp_connp);
}
/* This routine creates a T_ERROR_ACK message and passes it upstream. */
static void
udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error, int sys_error)
{
if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
putnext(UDP_RD(q), mp);
}
/* Shorthand to generate and send TPI error acks to our client */
static void
udp_err_ack_prim(queue_t *q, mblk_t *mp, int primitive, t_scalar_t t_error,
int sys_error)
{
struct T_error_ack *teackp;
if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack),
M_PCPROTO, T_ERROR_ACK)) != NULL) {
teackp = (struct T_error_ack *)mp->b_rptr;
teackp->ERROR_prim = primitive;
teackp->TLI_error = t_error;
teackp->UNIX_error = sys_error;
putnext(UDP_RD(q), mp);
}
}
/*ARGSUSED*/
static int
udp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
{
int i;
for (i = 0; i < udp_g_num_epriv_ports; i++) {
if (udp_g_epriv_ports[i] != 0)
(void) mi_mpprintf(mp, "%d ", udp_g_epriv_ports[i]);
}
return (0);
}
/* ARGSUSED */
static int
udp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
cred_t *cr)
{
long new_value;
int i;
/*
* Fail the request if the new value does not lie within the
* port number limits.
*/
if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
new_value <= 0 || new_value >= 65536) {
return (EINVAL);
}
/* Check if the value is already in the list */
for (i = 0; i < udp_g_num_epriv_ports; i++) {
if (new_value == udp_g_epriv_ports[i]) {
return (EEXIST);
}
}
/* Find an empty slot */
for (i = 0; i < udp_g_num_epriv_ports; i++) {
if (udp_g_epriv_ports[i] == 0)
break;
}
if (i == udp_g_num_epriv_ports) {
return (EOVERFLOW);
}
/* Set the new value */
udp_g_epriv_ports[i] = (in_port_t)new_value;
return (0);
}
/* ARGSUSED */
static int
udp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
cred_t *cr)
{
long new_value;
int i;
/*
* Fail the request if the new value does not lie within the
* port number limits.
*/
if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
new_value <= 0 || new_value >= 65536) {
return (EINVAL);
}
/* Check that the value is already in the list */
for (i = 0; i < udp_g_num_epriv_ports; i++) {
if (udp_g_epriv_ports[i] == new_value)
break;
}
if (i == udp_g_num_epriv_ports) {
return (ESRCH);
}
/* Clear the value */
udp_g_epriv_ports[i] = 0;
return (0);
}
/* At minimum we need 4 bytes of UDP header */
#define ICMP_MIN_UDP_HDR 4
/*
* udp_icmp_error is called by udp_rput to process ICMP msgs. passed up by IP.
* Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors.
* Assumes that IP has pulled up everything up to and including the ICMP header.
* An M_CTL could potentially come here from some other module (i.e. if UDP
* is pushed on some module other than IP). Thus, if we find that the M_CTL
* does not have enough ICMP information , following STREAMS conventions,
* we send it upstream assuming it is an M_CTL we don't understand.
*/
static void
udp_icmp_error(queue_t *q, mblk_t *mp)
{
icmph_t *icmph;
ipha_t *ipha;
int iph_hdr_length;
udpha_t *udpha;
sin_t sin;
sin6_t sin6;
mblk_t *mp1;
int error = 0;
size_t mp_size = MBLKL(mp);
udp_t *udp = Q_TO_UDP(q);
/*
* Assume IP provides aligned packets - otherwise toss
*/
if (!OK_32PTR(mp->b_rptr)) {
freemsg(mp);
return;
}
/*
* Verify that we have a complete IP header and the application has
* asked for errors. If not, send it upstream.
*/
if (!udp->udp_dgram_errind || mp_size < sizeof (ipha_t)) {
noticmpv4:
putnext(UDP_RD(q), mp);
return;
}
ipha = (ipha_t *)mp->b_rptr;
/*
* Verify IP version. Anything other than IPv4 or IPv6 packet is sent
* upstream. ICMPv6 is handled in udp_icmp_error_ipv6.
*/
switch (IPH_HDR_VERSION(ipha)) {
case IPV6_VERSION:
udp_icmp_error_ipv6(q, mp);
return;
case IPV4_VERSION:
break;
default:
goto noticmpv4;
}
/* Skip past the outer IP and ICMP headers */
iph_hdr_length = IPH_HDR_LENGTH(ipha);
icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
/*
* If we don't have the correct outer IP header length or if the ULP
* is not IPPROTO_ICMP or if we don't have a complete inner IP header
* send the packet upstream.
*/
if (iph_hdr_length < sizeof (ipha_t) ||
ipha->ipha_protocol != IPPROTO_ICMP ||
(ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) {
goto noticmpv4;
}
ipha = (ipha_t *)&icmph[1];
/* Skip past the inner IP and find the ULP header */
iph_hdr_length = IPH_HDR_LENGTH(ipha);
udpha = (udpha_t *)((char *)ipha + iph_hdr_length);
/*
* If we don't have the correct inner IP header length or if the ULP
* is not IPPROTO_UDP or if we don't have at least ICMP_MIN_UDP_HDR
* bytes of UDP header, send it upstream.
*/
if (iph_hdr_length < sizeof (ipha_t) ||
ipha->ipha_protocol != IPPROTO_UDP ||
(uchar_t *)udpha + ICMP_MIN_UDP_HDR > mp->b_wptr) {
goto noticmpv4;
}
switch (icmph->icmph_type) {
case ICMP_DEST_UNREACHABLE:
switch (icmph->icmph_code) {
case ICMP_FRAGMENTATION_NEEDED:
/*
* IP has already adjusted the path MTU.
* XXX Somehow pass MTU indication to application?
*/
break;
case ICMP_PORT_UNREACHABLE:
case ICMP_PROTOCOL_UNREACHABLE:
error = ECONNREFUSED;
break;
default:
/* Transient errors */
break;
}
break;
default:
/* Transient errors */
break;
}
if (error == 0) {
freemsg(mp);
return;
}
switch (udp->udp_family) {
case AF_INET:
sin = sin_null;
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ipha->ipha_dst;
sin.sin_port = udpha->uha_dst_port;
mp1 = mi_tpi_uderror_ind((char *)&sin, sizeof (sin_t), NULL, 0,
error);
break;
case AF_INET6:
sin6 = sin6_null;
sin6.sin6_family = AF_INET6;
IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &sin6.sin6_addr);
sin6.sin6_port = udpha->uha_dst_port;
mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t),
NULL, 0, error);
break;
}
if (mp1)
putnext(UDP_RD(q), mp1);
freemsg(mp);
}
/*
* udp_icmp_error_ipv6 is called by udp_icmp_error to process ICMP for IPv6.
* Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors.
* Assumes that IP has pulled up all the extension headers as well as the
* ICMPv6 header.
* An M_CTL could potentially come here from some other module (i.e. if UDP
* is pushed on some module other than IP). Thus, if we find that the M_CTL
* does not have enough ICMP information , following STREAMS conventions,
* we send it upstream assuming it is an M_CTL we don't understand. The reason
* it might get here is if the non-ICMP M_CTL accidently has 6 in the version
* field (when cast to ipha_t in udp_icmp_error).
*/
static void
udp_icmp_error_ipv6(queue_t *q, mblk_t *mp)
{
icmp6_t *icmp6;
ip6_t *ip6h, *outer_ip6h;
uint16_t hdr_length;
uint8_t *nexthdrp;
udpha_t *udpha;
sin6_t sin6;
mblk_t *mp1;
int error = 0;
size_t mp_size = MBLKL(mp);
udp_t *udp = Q_TO_UDP(q);
/*
* Verify that we have a complete IP header. If not, send it upstream.
*/
if (mp_size < sizeof (ip6_t)) {
noticmpv6:
putnext(UDP_RD(q), mp);
return;
}
outer_ip6h = (ip6_t *)mp->b_rptr;
/*
* Verify this is an ICMPV6 packet, else send it upstream
*/
if (outer_ip6h->ip6_nxt == IPPROTO_ICMPV6) {
hdr_length = IPV6_HDR_LEN;
} else if (!ip_hdr_length_nexthdr_v6(mp, outer_ip6h, &hdr_length,
&nexthdrp) ||
*nexthdrp != IPPROTO_ICMPV6) {
goto noticmpv6;
}
icmp6 = (icmp6_t *)&mp->b_rptr[hdr_length];
ip6h = (ip6_t *)&icmp6[1];
/*
* Verify we have a complete ICMP and inner IP header.
*/
if ((uchar_t *)&ip6h[1] > mp->b_wptr)
goto noticmpv6;
if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &hdr_length, &nexthdrp))
goto noticmpv6;
udpha = (udpha_t *)((char *)ip6h + hdr_length);
/*
* Validate inner header. If the ULP is not IPPROTO_UDP or if we don't
* have at least ICMP_MIN_UDP_HDR bytes of UDP header send the
* packet upstream.
*/
if ((*nexthdrp != IPPROTO_UDP) ||
((uchar_t *)udpha + ICMP_MIN_UDP_HDR) > mp->b_wptr) {
goto noticmpv6;
}
switch (icmp6->icmp6_type) {
case ICMP6_DST_UNREACH:
switch (icmp6->icmp6_code) {
case ICMP6_DST_UNREACH_NOPORT:
error = ECONNREFUSED;
break;
case ICMP6_DST_UNREACH_ADMIN:
case ICMP6_DST_UNREACH_NOROUTE:
case ICMP6_DST_UNREACH_BEYONDSCOPE:
case ICMP6_DST_UNREACH_ADDR:
/* Transient errors */
break;
default:
break;
}
break;
case ICMP6_PACKET_TOO_BIG: {
struct T_unitdata_ind *tudi;
struct T_opthdr *toh;
size_t udi_size;
mblk_t *newmp;
t_scalar_t opt_length = sizeof (struct T_opthdr) +
sizeof (struct ip6_mtuinfo);
sin6_t *sin6;
struct ip6_mtuinfo *mtuinfo;
/*
* If the application has requested to receive path mtu
* information, send up an empty message containing an
* IPV6_PATHMTU ancillary data item.
*/
if (!udp->udp_ipv6_recvpathmtu)
break;
udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t) +
opt_length;
if ((newmp = allocb(udi_size, BPRI_MED)) == NULL) {
BUMP_MIB(&udp_mib, udpInErrors);
break;
}
/*
* newmp->b_cont is left to NULL on purpose. This is an
* empty message containing only ancillary data.
*/
newmp->b_datap->db_type = M_PROTO;
tudi = (struct T_unitdata_ind *)newmp->b_rptr;
newmp->b_wptr = (uchar_t *)tudi + udi_size;
tudi->PRIM_type = T_UNITDATA_IND;
tudi->SRC_length = sizeof (sin6_t);
tudi->SRC_offset = sizeof (struct T_unitdata_ind);
tudi->OPT_offset = tudi->SRC_offset + sizeof (sin6_t);
tudi->OPT_length = opt_length;
sin6 = (sin6_t *)&tudi[1];
bzero(sin6, sizeof (sin6_t));
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = udp->udp_v6dst;
toh = (struct T_opthdr *)&sin6[1];
toh->level = IPPROTO_IPV6;
toh->name = IPV6_PATHMTU;
toh->len = opt_length;
toh->status = 0;
mtuinfo = (struct ip6_mtuinfo *)&toh[1];
bzero(mtuinfo, sizeof (struct ip6_mtuinfo));
mtuinfo->ip6m_addr.sin6_family = AF_INET6;
mtuinfo->ip6m_addr.sin6_addr = ip6h->ip6_dst;
mtuinfo->ip6m_mtu = icmp6->icmp6_mtu;
/*
* We've consumed everything we need from the original
* message. Free it, then send our empty message.
*/
freemsg(mp);
putnext(UDP_RD(q), newmp);
return;
}
case ICMP6_TIME_EXCEEDED:
/* Transient errors */
break;
case ICMP6_PARAM_PROB:
/* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */
if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER &&
(uchar_t *)ip6h + icmp6->icmp6_pptr ==
(uchar_t *)nexthdrp) {
error = ECONNREFUSED;
break;
}
break;
}
if (error == 0) {
freemsg(mp);
return;
}
sin6 = sin6_null;
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ip6h->ip6_dst;
sin6.sin6_port = udpha->uha_dst_port;
sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK;
mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t), NULL, 0,
error);
if (mp1)
putnext(UDP_RD(q), mp1);
freemsg(mp);
}
/*
* This routine responds to T_ADDR_REQ messages. It is called by udp_wput.
* The local address is filled in if endpoint is bound. The remote address
* is filled in if remote address has been precified ("connected endpoint")
* (The concept of connected CLTS sockets is alien to published TPI
* but we support it anyway).
*/
static void
udp_addr_req(queue_t *q, mblk_t *mp)
{
sin_t *sin;
sin6_t *sin6;
mblk_t *ackmp;
struct T_addr_ack *taa;
udp_t *udp = Q_TO_UDP(q);
/* Make it large enough for worst case */
ackmp = reallocb(mp, sizeof (struct T_addr_ack) +
2 * sizeof (sin6_t), 1);
if (ackmp == NULL) {
udp_err_ack(q, mp, TSYSERR, ENOMEM);
return;
}
taa = (struct T_addr_ack *)ackmp->b_rptr;
bzero(taa, sizeof (struct T_addr_ack));
ackmp->b_wptr = (uchar_t *)&taa[1];
taa->PRIM_type = T_ADDR_ACK;
ackmp->b_datap->db_type = M_PCPROTO;
/*
* Note: Following code assumes 32 bit alignment of basic
* data structures like sin_t and struct T_addr_ack.
*/
if (udp->udp_state != TS_UNBND) {
/*
* Fill in local address first
*/
taa->LOCADDR_offset = sizeof (*taa);
if (udp->udp_family == AF_INET) {
taa->LOCADDR_length = sizeof (sin_t);
sin = (sin_t *)&taa[1];
/* Fill zeroes and then initialize non-zero fields */
*sin = sin_null;
sin->sin_family = AF_INET;
if (!IN6_IS_ADDR_V4MAPPED_ANY(&udp->udp_v6src) &&
!IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src)) {
IN6_V4MAPPED_TO_IPADDR(&udp->udp_v6src,
sin->sin_addr.s_addr);
} else {
/*
* INADDR_ANY
* udp_v6src is not set, we might be bound to
* broadcast/multicast. Use udp_bound_v6src as
* local address instead (that could
* also still be INADDR_ANY)
*/
IN6_V4MAPPED_TO_IPADDR(&udp->udp_bound_v6src,
sin->sin_addr.s_addr);
}
sin->sin_port = udp->udp_port;
ackmp->b_wptr = (uchar_t *)&sin[1];
if (udp->udp_state == TS_DATA_XFER) {
/*
* connected, fill remote address too
*/
taa->REMADDR_length = sizeof (sin_t);
/* assumed 32-bit alignment */
taa->REMADDR_offset = taa->LOCADDR_offset +
taa->LOCADDR_length;
sin = (sin_t *)(ackmp->b_rptr +
taa->REMADDR_offset);
/* initialize */
*sin = sin_null;
sin->sin_family = AF_INET;
sin->sin_addr.s_addr =
V4_PART_OF_V6(udp->udp_v6dst);
sin->sin_port = udp->udp_dstport;
ackmp->b_wptr = (uchar_t *)&sin[1];
}
} else {
taa->LOCADDR_length = sizeof (sin6_t);
sin6 = (sin6_t *)&taa[1];
/* Fill zeroes and then initialize non-zero fields */
*sin6 = sin6_null;
sin6->sin6_family = AF_INET6;
if (!IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src)) {
sin6->sin6_addr = udp->udp_v6src;
} else {
/*
* UNSPECIFIED
* udp_v6src is not set, we might be bound to
* broadcast/multicast. Use udp_bound_v6src as
* local address instead (that could
* also still be UNSPECIFIED)
*/
sin6->sin6_addr =
udp->udp_bound_v6src;
}
sin6->sin6_port = udp->udp_port;
ackmp->b_wptr = (uchar_t *)&sin6[1];
if (udp->udp_state == TS_DATA_XFER) {
/*
* connected, fill remote address too
*/
taa->REMADDR_length = sizeof (sin6_t);
/* assumed 32-bit alignment */
taa->REMADDR_offset = taa->LOCADDR_offset +
taa->LOCADDR_length;
sin6 = (sin6_t *)(ackmp->b_rptr +
taa->REMADDR_offset);
/* initialize */
*sin6 = sin6_null;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = udp->udp_v6dst;
sin6->sin6_port = udp->udp_dstport;
ackmp->b_wptr = (uchar_t *)&sin6[1];
}
ackmp->b_wptr = (uchar_t *)&sin6[1];
}
}
ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
putnext(UDP_RD(q), ackmp);
}
static void
udp_copy_info(struct T_info_ack *tap, udp_t *udp)
{
if (udp->udp_family == AF_INET) {
*tap = udp_g_t_info_ack_ipv4;
} else {
*tap = udp_g_t_info_ack_ipv6;
}
tap->CURRENT_state = udp->udp_state;
tap->OPT_size = udp_max_optsize;
}
/*
* This routine responds to T_CAPABILITY_REQ messages. It is called by
* udp_wput. Much of the T_CAPABILITY_ACK information is copied from
* udp_g_t_info_ack. The current state of the stream is copied from
* udp_state.
*/
static void
udp_capability_req(queue_t *q, mblk_t *mp)
{
t_uscalar_t cap_bits1;
struct T_capability_ack *tcap;
udp_t *udp = Q_TO_UDP(q);
cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
mp->b_datap->db_type, T_CAPABILITY_ACK);
if (!mp)
return;
tcap = (struct T_capability_ack *)mp->b_rptr;
tcap->CAP_bits1 = 0;
if (cap_bits1 & TC1_INFO) {
udp_copy_info(&tcap->INFO_ack, udp);
tcap->CAP_bits1 |= TC1_INFO;
}
putnext(UDP_RD(q), mp);
}
/*
* This routine responds to T_INFO_REQ messages. It is called by udp_wput.
* Most of the T_INFO_ACK information is copied from udp_g_t_info_ack.
* The current state of the stream is copied from udp_state.
*/
static void
udp_info_req(queue_t *q, mblk_t *mp)
{
udp_t *udp = Q_TO_UDP(q);
/* Create a T_INFO_ACK message. */
mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
T_INFO_ACK);
if (!mp)
return;
udp_copy_info((struct T_info_ack *)mp->b_rptr, udp);
putnext(UDP_RD(q), mp);
}
/*
* IP recognizes seven kinds of bind requests:
*
* - A zero-length address binds only to the protocol number.
*
* - A 4-byte address is treated as a request to
* validate that the address is a valid local IPv4
* address, appropriate for an application to bind to.
* IP does the verification, but does not make any note
* of the address at this time.
*
* - A 16-byte address contains is treated as a request
* to validate a local IPv6 address, as the 4-byte
* address case above.
*
* - A 16-byte sockaddr_in to validate the local IPv4 address and also
* use it for the inbound fanout of packets.
*
* - A 24-byte sockaddr_in6 to validate the local IPv6 address and also
* use it for the inbound fanout of packets.
*
* - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout
* information consisting of local and remote addresses
* and ports. In this case, the addresses are both
* validated as appropriate for this operation, and, if
* so, the information is retained for use in the
* inbound fanout.
*
* - A 36-byte address address (ipa6_conn_t) containing complete IPv6
* fanout information, like the 12-byte case above.
*
* IP will also fill in the IRE request mblk with information
* regarding our peer. In all cases, we notify IP of our protocol
* type by appending a single protocol byte to the bind request.
*/
static mblk_t *
udp_ip_bind_mp(udp_t *udp, t_scalar_t bind_prim, t_scalar_t addr_length)
{
char *cp;
mblk_t *mp;
struct T_bind_req *tbr;
ipa_conn_t *ac;
ipa6_conn_t *ac6;
sin_t *sin;
sin6_t *sin6;
ASSERT(bind_prim == O_T_BIND_REQ || bind_prim == T_BIND_REQ);
mp = allocb(sizeof (*tbr) + addr_length + 1, BPRI_HI);
if (!mp)
return (mp);
mp->b_datap->db_type = M_PROTO;
tbr = (struct T_bind_req *)mp->b_rptr;
tbr->PRIM_type = bind_prim;
tbr->ADDR_offset = sizeof (*tbr);
tbr->CONIND_number = 0;
tbr->ADDR_length = addr_length;
cp = (char *)&tbr[1];
switch (addr_length) {
case sizeof (ipa_conn_t):
ASSERT(udp->udp_family == AF_INET);
/* Append a request for an IRE */
mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
if (!mp->b_cont) {
freemsg(mp);
return (NULL);
}