mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-12-04 08:09:08 +00:00
sys/net*: minor spelling fixes.
No functional change.
This commit is contained in:
parent
31ae3b070d
commit
a4641f4eaa
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=298995
@ -701,7 +701,7 @@ cbq_modify_class(acp)
|
||||
* struct rm_class *parent, struct rm_class *borrow)
|
||||
*
|
||||
* This function create a new traffic class in the CBQ class hierarchy of
|
||||
* given paramters. The class that created is either the root, default,
|
||||
* given parameters. The class that created is either the root, default,
|
||||
* or a new dynamic class. If CBQ is not initilaized, the the root class
|
||||
* will be created.
|
||||
*/
|
||||
|
@ -158,7 +158,7 @@
|
||||
#define TH_MIN 5 /* min threshold */
|
||||
#define TH_MAX 15 /* max threshold */
|
||||
|
||||
#define RED_LIMIT 60 /* default max queue lenght */
|
||||
#define RED_LIMIT 60 /* default max queue length */
|
||||
#define RED_STATS /* collect statistics */
|
||||
|
||||
/*
|
||||
@ -171,7 +171,7 @@
|
||||
#ifdef ALTQ3_COMPAT
|
||||
#ifdef ALTQ_FLOWVALVE
|
||||
/*
|
||||
* flow-valve is an extention to protect red from unresponsive flows
|
||||
* flow-valve is an extension to protect red from unresponsive flows
|
||||
* and to promote end-to-end congestion control.
|
||||
* flow-valve observes the average drop rates of the flows that have
|
||||
* experienced packet drops in the recent past.
|
||||
|
@ -146,7 +146,7 @@
|
||||
#define TH_MIN 5 /* min threshold */
|
||||
#define TH_MAX 15 /* max threshold */
|
||||
|
||||
#define RIO_LIMIT 60 /* default max queue lenght */
|
||||
#define RIO_LIMIT 60 /* default max queue length */
|
||||
#define RIO_STATS /* collect statistics */
|
||||
|
||||
#define TV_DELTA(a, b, delta) { \
|
||||
|
@ -188,7 +188,7 @@ struct rm_class {
|
||||
*/
|
||||
struct rm_ifdat {
|
||||
int queued_; /* # pkts queued downstream */
|
||||
int efficient_; /* Link Efficency bit */
|
||||
int efficient_; /* Link Efficiency bit */
|
||||
int wrr_; /* Enable Weighted Round-Robin */
|
||||
u_long ns_per_byte_; /* Link byte speed. */
|
||||
int maxqueued_; /* Max packets to queue */
|
||||
|
@ -579,7 +579,7 @@ struct bpf_zbuf_header {
|
||||
* input packets such as port scans, packets from old lost connections,
|
||||
* etc. to force the connection to stay up).
|
||||
*
|
||||
* The first byte of the PPP header (0xff03) is modified to accomodate
|
||||
* The first byte of the PPP header (0xff03) is modified to accommodate
|
||||
* the direction - 0x00 = IN, 0x01 = OUT.
|
||||
*/
|
||||
#define DLT_PPP_PPPD 166
|
||||
|
@ -789,7 +789,7 @@ bstp_assign_roles(struct bstp_state *bs)
|
||||
bs->bs_root_htime = bs->bs_bridge_htime;
|
||||
bs->bs_root_port = NULL;
|
||||
|
||||
/* check if any recieved info supersedes us */
|
||||
/* check if any received info supersedes us */
|
||||
LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
|
||||
if (bp->bp_infois != BSTP_INFO_RECEIVED)
|
||||
continue;
|
||||
|
@ -558,7 +558,7 @@ ifq_delete(struct ifaltq *ifq)
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform generic interface initalization tasks and attach the interface
|
||||
* Perform generic interface initialization tasks and attach the interface
|
||||
* to the list of "active" interfaces. If vmove flag is set on entry
|
||||
* to if_attach_internal(), perform only a limited subset of initialization
|
||||
* tasks, given that we are moving from one vnet to another an ifnet which
|
||||
|
@ -345,7 +345,7 @@ arc_frag_next(struct ifnet *ifp)
|
||||
|
||||
/*
|
||||
* Defragmenter. Returns mbuf if last packet found, else
|
||||
* NULL. frees imcoming mbuf as necessary.
|
||||
* NULL. frees incoming mbuf as necessary.
|
||||
*/
|
||||
|
||||
static __inline struct mbuf *
|
||||
|
@ -96,7 +96,7 @@ struct ifatm_mib {
|
||||
|
||||
/*
|
||||
* Traffic parameters for ATM connections. This contains all parameters
|
||||
* to accomodate UBR, UBR+MCR, CBR, VBR and ABR connections.
|
||||
* to accommodate UBR, UBR+MCR, CBR, VBR and ABR connections.
|
||||
*
|
||||
* Keep in sync with ng_atm.h
|
||||
*/
|
||||
|
@ -71,7 +71,7 @@
|
||||
* - Currently only supports Ethernet-like interfaces (Ethernet,
|
||||
* 802.11, VLANs on Ethernet, etc.) Figure out a nice way
|
||||
* to bridge other types of interfaces (FDDI-FDDI, and maybe
|
||||
* consider heterogenous bridges).
|
||||
* consider heterogeneous bridges).
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
@ -484,7 +484,7 @@ if_clone_list(struct if_clonereq *ifcr)
|
||||
* below, but that's not a major problem. Not caping our
|
||||
* allocation to the number of cloners actually in the system
|
||||
* could be because that would let arbitrary users cause us to
|
||||
* allocate abritrary amounts of kernel memory.
|
||||
* allocate arbitrary amounts of kernel memory.
|
||||
*/
|
||||
buf_count = (V_if_cloners_count < ifcr->ifcr_count) ?
|
||||
V_if_cloners_count : ifcr->ifcr_count;
|
||||
|
@ -514,7 +514,7 @@ epair_transmit_locked(struct ifnet *ifp, struct mbuf *m)
|
||||
DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname);
|
||||
|
||||
#ifdef ALTQ
|
||||
/* Support ALTQ via the clasic if_start() path. */
|
||||
/* Support ALTQ via the classic if_start() path. */
|
||||
IF_LOCK(&ifp->if_snd);
|
||||
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
|
||||
ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
|
||||
|
@ -2031,7 +2031,7 @@ lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
||||
if (!LAGG_PORTACTIVE(sc->sc_primary)) {
|
||||
tmp_tp = lagg_link_active(sc, sc->sc_primary);
|
||||
/*
|
||||
* If tmp_tp is null, we've recieved a packet when all
|
||||
* If tmp_tp is null, we've received a packet when all
|
||||
* our links are down. Weird, but process it anyways.
|
||||
*/
|
||||
if ((tmp_tp == NULL || tmp_tp == lp)) {
|
||||
|
@ -409,7 +409,7 @@ lltable_update_ifaddr(struct lltable *llt)
|
||||
|
||||
/*
|
||||
*
|
||||
* Performes generic cleanup routines and frees lle.
|
||||
* Performs generic cleanup routines and frees lle.
|
||||
*
|
||||
* Called for non-linked entries, with callouts and
|
||||
* other AF-specific cleanups performed.
|
||||
|
@ -78,7 +78,7 @@ struct sauth {
|
||||
|
||||
/*
|
||||
* Don't change the order of this. Ordering the phases this way allows
|
||||
* for a comparision of ``pp_phase >= PHASE_AUTHENTICATE'' in order to
|
||||
* for a comparison of ``pp_phase >= PHASE_AUTHENTICATE'' in order to
|
||||
* know whether LCP is up.
|
||||
*/
|
||||
enum ppp_phase {
|
||||
|
@ -2962,7 +2962,7 @@ sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len)
|
||||
* since our algorithm always uses the
|
||||
* original option to NAK it with new values,
|
||||
* things would become more complicated. In
|
||||
* pratice, the only commonly implemented IP
|
||||
* practice, the only commonly implemented IP
|
||||
* compression option is VJ anyway, so the
|
||||
* difference is negligible.
|
||||
*/
|
||||
@ -4295,7 +4295,7 @@ sppp_chap_tlu(struct sppp *sp)
|
||||
if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0)
|
||||
log(-1, "next re-challenge in %d seconds\n", i);
|
||||
else
|
||||
log(-1, "re-challenging supressed\n");
|
||||
log(-1, "re-challenging suppressed\n");
|
||||
}
|
||||
|
||||
SPPP_LOCK(sp);
|
||||
|
@ -700,7 +700,7 @@ vlan_devat(struct ifnet *ifp, uint16_t vid)
|
||||
* VLAN support can be loaded as a module. The only place in the
|
||||
* system that's intimately aware of this is ether_input. We hook
|
||||
* into this code through vlan_input_p which is defined there and
|
||||
* set here. Noone else in the system should be aware of this so
|
||||
* set here. No one else in the system should be aware of this so
|
||||
* we use an explicit reference here.
|
||||
*/
|
||||
extern void (*vlan_input_p)(struct ifnet *, struct mbuf *);
|
||||
@ -873,7 +873,7 @@ vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
|
||||
* o specify no parameters and get an unattached device that
|
||||
* must be configured separately.
|
||||
* The first technique is preferred; the latter two are
|
||||
* supported for backwards compatibilty.
|
||||
* supported for backwards compatibility.
|
||||
*
|
||||
* XXXRW: Note historic use of the word "tag" here. New ioctls may be
|
||||
* called for.
|
||||
|
@ -168,7 +168,7 @@ static uint8_t rss_key[RSS_KEYSIZE] = {
|
||||
|
||||
/*
|
||||
* RSS hash->CPU table, which maps hashed packet headers to particular CPUs.
|
||||
* Drivers may supplement this table with a seperate CPU<->queue table when
|
||||
* Drivers may supplement this table with a separate CPU<->queue table when
|
||||
* programming devices.
|
||||
*/
|
||||
struct rss_table_entry {
|
||||
|
@ -1199,7 +1199,7 @@ rtsock_msg_buffer(int type, struct rt_addrinfo *rtinfo, struct walkarg *w, int *
|
||||
|
||||
/*
|
||||
* This routine is called to generate a message from the routing
|
||||
* socket indicating that a redirect has occured, a routing lookup
|
||||
* socket indicating that a redirect has occurred, a routing lookup
|
||||
* has failed, or that a protocol has detected timeouts to a particular
|
||||
* destination.
|
||||
*/
|
||||
|
@ -431,7 +431,7 @@ static const char *sff_8024_id[SFF_8024_ID_LAST + 1] = {"Unknown",
|
||||
"SMM8",
|
||||
"CDFP3"};
|
||||
|
||||
/* Keep compability with old definitions */
|
||||
/* Keep compatibility with old definitions */
|
||||
#define SFF_8472_ID_UNKNOWN SFF_8024_ID_UNKNOWN
|
||||
#define SFF_8472_ID_GBIC SFF_8024_ID_GBIC
|
||||
#define SFF_8472_ID_SFF SFF_8024_ID_SFF
|
||||
|
@ -1833,7 +1833,7 @@ ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode m
|
||||
{ 6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 },
|
||||
{ 9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4 },
|
||||
{ 54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 },
|
||||
/* NB: OFDM72 doesn't realy exist so we don't handle it */
|
||||
/* NB: OFDM72 doesn't really exist so we don't handle it */
|
||||
};
|
||||
static const struct ratemedia htrates[] = {
|
||||
{ 0, IFM_IEEE80211_MCS },
|
||||
|
@ -775,7 +775,7 @@ adhoc_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
|
||||
*
|
||||
* Since there's no (current) way to inform
|
||||
* the driver that a channel width change has
|
||||
* occured for a single node, just stub this
|
||||
* occurred for a single node, just stub this
|
||||
* out.
|
||||
*/
|
||||
#if 0
|
||||
|
@ -1452,7 +1452,7 @@ ieee80211_parse_rsn(struct ieee80211vap *vap, const uint8_t *frm,
|
||||
}
|
||||
|
||||
/*
|
||||
* WPA/802.11i assocation request processing.
|
||||
* WPA/802.11i association request processing.
|
||||
*/
|
||||
static int
|
||||
wpa_assocreq(struct ieee80211_node *ni, struct ieee80211_rsnparms *rsnparms,
|
||||
|
@ -2298,7 +2298,7 @@ bar_timeout(void *arg)
|
||||
* to make sure we notify the driver that a BAR
|
||||
* TX did occur and fail. This gives the driver
|
||||
* a chance to undo any queue pause that may
|
||||
* have occured.
|
||||
* have occurred.
|
||||
*/
|
||||
ic->ic_bar_response(ni, tap, 1);
|
||||
ieee80211_ampdu_stop(ni, tap, IEEE80211_REASON_TIMEOUT);
|
||||
|
@ -3428,10 +3428,10 @@ ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
||||
break;
|
||||
case SIOCSIFADDR:
|
||||
/*
|
||||
* XXX Handle this directly so we can supress if_init calls.
|
||||
* XXX Handle this directly so we can suppress if_init calls.
|
||||
* XXX This should be done in ether_ioctl but for the moment
|
||||
* XXX there are too many other parts of the system that
|
||||
* XXX set IFF_UP and so supress if_init being called when
|
||||
* XXX set IFF_UP and so suppress if_init being called when
|
||||
* XXX it should be.
|
||||
*/
|
||||
ifa = (struct ifaddr *) data;
|
||||
|
@ -1155,7 +1155,7 @@ mesh_forward(struct ieee80211vap *vap, struct mbuf *m,
|
||||
IEEE80211_TX_UNLOCK_ASSERT(ic);
|
||||
|
||||
/*
|
||||
* mesh ttl of 1 means we are the last one receving it,
|
||||
* mesh ttl of 1 means we are the last one receiving it,
|
||||
* according to amendment we decrement and then check if
|
||||
* 0, if so we dont forward.
|
||||
*/
|
||||
|
@ -2217,7 +2217,7 @@ ieee80211_node_timeout(void *arg)
|
||||
* Defer timeout processing if a channel switch is pending.
|
||||
* We typically need to be mute so not doing things that
|
||||
* might generate frames is good to handle in one place.
|
||||
* Supressing the station timeout processing may extend the
|
||||
* Suppressing the station timeout processing may extend the
|
||||
* lifetime of inactive stations (by not decrementing their
|
||||
* idle counters) but this should be ok unless the CSA is
|
||||
* active for an unusually long time.
|
||||
|
@ -2894,7 +2894,7 @@ ieee80211_tx_mgt_cb(struct ieee80211_node *ni, void *arg, int status)
|
||||
|
||||
/*
|
||||
* Frame transmit completed; arrange timer callback. If
|
||||
* transmit was successfuly we wait for response. Otherwise
|
||||
* transmit was successfully we wait for response. Otherwise
|
||||
* we arrange an immediate callback instead of doing the
|
||||
* callback directly since we don't know what state the driver
|
||||
* is in (e.g. what locks it is holding). This work should
|
||||
@ -3231,10 +3231,10 @@ ieee80211_beacon_update(struct ieee80211_node *ni, struct mbuf *m, int mcast)
|
||||
struct ieee80211_wme_state *wme = &ic->ic_wme;
|
||||
|
||||
/*
|
||||
* Check for agressive mode change. When there is
|
||||
* Check for aggressive mode change. When there is
|
||||
* significant high priority traffic in the BSS
|
||||
* throttle back BE traffic by using conservative
|
||||
* parameters. Otherwise BE uses agressive params
|
||||
* parameters. Otherwise BE uses aggressive params
|
||||
* to optimize performance of legacy/non-QoS traffic.
|
||||
*/
|
||||
if (wme->wme_flags & WME_F_AGGRMODE) {
|
||||
|
@ -1082,7 +1082,7 @@ ieee80211_wme_initparams_locked(struct ieee80211vap *vap)
|
||||
/* NB: check ic_bss to avoid NULL deref on initial attach */
|
||||
if (vap->iv_bss != NULL) {
|
||||
/*
|
||||
* Calculate agressive mode switching threshold based
|
||||
* Calculate aggressive mode switching threshold based
|
||||
* on beacon interval. This doesn't need locking since
|
||||
* we're only called before entering the RUN state at
|
||||
* which point we start sending beacon frames.
|
||||
@ -1164,11 +1164,11 @@ ieee80211_wme_updateparams_locked(struct ieee80211vap *vap)
|
||||
mode = IEEE80211_MODE_AUTO;
|
||||
|
||||
/*
|
||||
* This implements agressive mode as found in certain
|
||||
* This implements aggressive mode as found in certain
|
||||
* vendors' AP's. When there is significant high
|
||||
* priority (VI/VO) traffic in the BSS throttle back BE
|
||||
* traffic by using conservative parameters. Otherwise
|
||||
* BE uses agressive params to optimize performance of
|
||||
* BE uses aggressive params to optimize performance of
|
||||
* legacy/non-QoS traffic.
|
||||
*/
|
||||
|
||||
|
@ -276,10 +276,10 @@ struct chanAccParams {
|
||||
|
||||
struct ieee80211_wme_state {
|
||||
u_int wme_flags;
|
||||
#define WME_F_AGGRMODE 0x00000001 /* STATUS: WME agressive mode */
|
||||
#define WME_F_AGGRMODE 0x00000001 /* STATUS: WME aggressive mode */
|
||||
u_int wme_hipri_traffic; /* VI/VO frames in beacon interval */
|
||||
u_int wme_hipri_switch_thresh;/* agressive mode switch thresh */
|
||||
u_int wme_hipri_switch_hysteresis;/* agressive mode switch hysteresis */
|
||||
u_int wme_hipri_switch_thresh;/* aggressive mode switch thresh */
|
||||
u_int wme_hipri_switch_hysteresis;/* aggressive mode switch hysteresis */
|
||||
|
||||
struct wmeParams wme_params[4]; /* from assoc resp for each AC*/
|
||||
struct chanAccParams wme_wmeChanParams; /* WME params applied to self */
|
||||
|
@ -266,7 +266,7 @@ ieee80211_alloc_countryie(struct ieee80211com *ic)
|
||||
* Indoor/Outdoor portion of country string:
|
||||
* 'I' indoor only
|
||||
* 'O' outdoor only
|
||||
* ' ' all enviroments
|
||||
* ' ' all environments
|
||||
*/
|
||||
ie->cc[2] = (rd->location == 'I' ? 'I' :
|
||||
rd->location == 'O' ? 'O' : ' ');
|
||||
|
@ -771,7 +771,7 @@ scan_end(struct ieee80211_scan_state *ss, int scandone)
|
||||
/* XXX scan state can change! Re-validate scan state! */
|
||||
|
||||
/*
|
||||
* Since a cancellation may have occured during one of the
|
||||
* Since a cancellation may have occurred during one of the
|
||||
* driver calls (whilst unlocked), update scandone.
|
||||
*/
|
||||
if (scandone == 0 && (ss_priv->ss_iflags & ISCAN_CANCEL) != 0) {
|
||||
@ -818,7 +818,7 @@ scan_end(struct ieee80211_scan_state *ss, int scandone)
|
||||
"[ticks %u, dwell min %lu scanend %lu]\n",
|
||||
__func__,
|
||||
ticks, ss->ss_mindwell, ss_priv->ss_scanend);
|
||||
ss->ss_next = 0; /* reset to begining */
|
||||
ss->ss_next = 0; /* reset to beginning */
|
||||
if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
|
||||
vap->iv_stats.is_scan_active++;
|
||||
else
|
||||
@ -840,7 +840,7 @@ scan_end(struct ieee80211_scan_state *ss, int scandone)
|
||||
ticks, ss->ss_mindwell, ss_priv->ss_scanend);
|
||||
|
||||
/*
|
||||
* Since a cancellation may have occured during one of the
|
||||
* Since a cancellation may have occurred during one of the
|
||||
* driver calls (whilst unlocked), update scandone.
|
||||
*/
|
||||
if (scandone == 0 && (ss_priv->ss_iflags & ISCAN_CANCEL) != 0) {
|
||||
|
@ -143,7 +143,7 @@ cubic_ack_received(struct cc_var *ccv, uint16_t type)
|
||||
* the I-D. Using min_rtt in the tf_cwnd calculation
|
||||
* causes w_tf to grow much faster than it should if the
|
||||
* RTT is dominated by network buffering rather than
|
||||
* propogation delay.
|
||||
* propagation delay.
|
||||
*/
|
||||
w_tf = tf_cwnd(ticks_since_cong,
|
||||
cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
|
||||
|
@ -202,7 +202,7 @@ dctcp_cb_init(struct cc_var *ccv)
|
||||
dctcp_data->bytes_ecn = 0;
|
||||
dctcp_data->bytes_total = 0;
|
||||
/*
|
||||
* When alpha is set to 0 in the beggining, DCTCP sender transfers as
|
||||
* When alpha is set to 0 in the beginning, DCTCP sender transfers as
|
||||
* much data as possible until the value converges which may expand the
|
||||
* queueing delay at the switch. When alpha is set to 1, queueing delay
|
||||
* is kept small.
|
||||
|
@ -440,7 +440,7 @@ htcp_recalc_beta(struct cc_var *ccv)
|
||||
/*
|
||||
* TCPTV_SRTTBASE is the initialised value of each connection's SRTT, so
|
||||
* we only calc beta if the connection's SRTT has been changed from its
|
||||
* inital value. beta is bounded to ensure it is always between
|
||||
* initial value. beta is bounded to ensure it is always between
|
||||
* HTCP_MINBETA and HTCP_MAXBETA.
|
||||
*/
|
||||
if (V_htcp_adaptive_backoff && htcp_data->minrtt != TCPTV_SRTTBASE &&
|
||||
|
@ -1103,9 +1103,9 @@ out_locked:
|
||||
}
|
||||
|
||||
/*
|
||||
* Process a recieved IGMPv3 group-specific or group-and-source-specific
|
||||
* Process a received IGMPv3 group-specific or group-and-source-specific
|
||||
* query.
|
||||
* Return <0 if any error occured. Currently this is ignored.
|
||||
* Return <0 if any error occurred. Currently this is ignored.
|
||||
*/
|
||||
static int
|
||||
igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
|
||||
|
@ -619,7 +619,7 @@ inm_clear_recorded(struct in_multi *inm)
|
||||
*
|
||||
* Return 0 if the source didn't exist or was already marked as recorded.
|
||||
* Return 1 if the source was marked as recorded by this function.
|
||||
* Return <0 if any error occured (negated errno code).
|
||||
* Return <0 if any error occurred (negated errno code).
|
||||
*/
|
||||
int
|
||||
inm_record_source(struct in_multi *inm, const in_addr_t naddr)
|
||||
|
@ -121,7 +121,7 @@ struct in_conninfo {
|
||||
*/
|
||||
#define INC_ISIPV6 0x01
|
||||
|
||||
#define inc_isipv6 inc_flags /* temp compatability */
|
||||
#define inc_isipv6 inc_flags /* temp compatibility */
|
||||
#define inc_fport inc_ie.ie_fport
|
||||
#define inc_lport inc_ie.ie_lport
|
||||
#define inc_faddr inc_ie.ie_faddr
|
||||
|
@ -147,7 +147,7 @@ struct ip {
|
||||
#define IPOPT_SECURITY 130 /* provide s,c,h,tcc */
|
||||
#define IPOPT_LSRR 131 /* loose source route */
|
||||
#define IPOPT_ESO 133 /* extended security */
|
||||
#define IPOPT_CIPSO 134 /* commerical security */
|
||||
#define IPOPT_CIPSO 134 /* commercial security */
|
||||
#define IPOPT_SATID 136 /* satnet id */
|
||||
#define IPOPT_SSRR 137 /* strict source route */
|
||||
#define IPOPT_RA 148 /* router alert */
|
||||
|
@ -205,7 +205,7 @@ ip_tryforward(struct mbuf *m)
|
||||
*
|
||||
* XXX: Probably some of these checks could be direct drop
|
||||
* conditions. However it is not clear whether there are some
|
||||
* hacks or obscure behaviours which make it neccessary to
|
||||
* hacks or obscure behaviours which make it necessary to
|
||||
* let ip_input handle it. We play safe here and let ip_input
|
||||
* deal with it until it is proven that we can directly drop it.
|
||||
*/
|
||||
|
@ -861,9 +861,9 @@ typedef struct _ipfw_obj_tentry {
|
||||
#define IPFW_CTF_ATOMIC 0x01 /* Perform atomic operation */
|
||||
/* Operation results */
|
||||
#define IPFW_TR_IGNORED 0 /* Entry was ignored (rollback) */
|
||||
#define IPFW_TR_ADDED 1 /* Entry was succesfully added */
|
||||
#define IPFW_TR_UPDATED 2 /* Entry was succesfully updated*/
|
||||
#define IPFW_TR_DELETED 3 /* Entry was succesfully deleted*/
|
||||
#define IPFW_TR_ADDED 1 /* Entry was successfully added */
|
||||
#define IPFW_TR_UPDATED 2 /* Entry was successfully updated*/
|
||||
#define IPFW_TR_DELETED 3 /* Entry was successfully deleted*/
|
||||
#define IPFW_TR_LIMIT 4 /* Entry was ignored (limit) */
|
||||
#define IPFW_TR_NOTFOUND 5 /* Entry was not found */
|
||||
#define IPFW_TR_EXISTS 6 /* Entry already exists */
|
||||
|
@ -223,7 +223,7 @@ icmp_error(struct mbuf *n, int type, int code, uint32_t dest, int mtu)
|
||||
/*
|
||||
* Calculate length to quote from original packet and
|
||||
* prevent the ICMP mbuf from overflowing.
|
||||
* Unfortunatly this is non-trivial since ip_forward()
|
||||
* Unfortunately this is non-trivial since ip_forward()
|
||||
* sends us truncated packets.
|
||||
*/
|
||||
nlen = m_length(n, NULL);
|
||||
|
@ -706,7 +706,7 @@ bad:
|
||||
* may change in future.
|
||||
* Router alert options SHOULD be passed if running in IPSTEALTH mode and
|
||||
* we are not the endpoint.
|
||||
* Length checks on individual options should already have been peformed
|
||||
* Length checks on individual options should already have been performed
|
||||
* by ip_dooptions() therefore they are folded under INVARIANTS here.
|
||||
*
|
||||
* Return zero if not present or options are invalid, non-zero if present.
|
||||
|
@ -44,7 +44,7 @@ __FBSDID("$FreeBSD$");
|
||||
Version 2.1: May, 1997 (cjm)
|
||||
Very minor changes to conform with
|
||||
local/global/function naming conventions
|
||||
withing the packet alising module.
|
||||
within the packet alising module.
|
||||
*/
|
||||
|
||||
/* Includes */
|
||||
@ -482,7 +482,7 @@ lPACKET_DONE:
|
||||
which will generate a type-error on all but 32-bit machines.
|
||||
|
||||
[Note 2] This routine really ought to be replaced with one that
|
||||
creates a transparent proxy on the aliasing host, to allow arbitary
|
||||
creates a transparent proxy on the aliasing host, to allow arbitrary
|
||||
changes in the TCP stream. This should not be too difficult given
|
||||
this base; I (ee) will try to do this some time later.
|
||||
*/
|
||||
|
@ -357,7 +357,7 @@ void PunchFWHole(struct alias_link *_lnk);
|
||||
/* Housekeeping function */
|
||||
void HouseKeeping(struct libalias *);
|
||||
|
||||
/* Tcp specfic routines */
|
||||
/* Tcp specific routines */
|
||||
/* lint -save -library Suppress flexelint warnings */
|
||||
|
||||
/* Transparent proxy routines */
|
||||
|
@ -518,7 +518,7 @@ AliasHandleRtspOut(struct libalias *la, struct ip *pip, struct alias_link *lnk,
|
||||
|
||||
/*
|
||||
* When aliasing a server, check for the 200 reply
|
||||
* Accomodate varying number of blanks between 200 & OK
|
||||
* Accommodate varying number of blanks between 200 & OK
|
||||
*/
|
||||
|
||||
if (dlen >= (int)strlen(str200)) {
|
||||
|
@ -1067,7 +1067,7 @@ In case the application provides a
|
||||
.Dv SIGHUP
|
||||
signal handler, add a call to
|
||||
.Fn LibAliasRefreshModules
|
||||
inside the handler, and everytime you want to refresh the loaded modules,
|
||||
inside the handler, and every time you want to refresh the loaded modules,
|
||||
send it the
|
||||
.Dv SIGHUP
|
||||
signal:
|
||||
|
@ -365,7 +365,7 @@ cc_conn_init(struct tcpcb *tp)
|
||||
/*
|
||||
* There's some sort of gateway or interface
|
||||
* buffer limit on the path. Use this to set
|
||||
* the slow start threshhold, but set the
|
||||
* the slow start threshold, but set the
|
||||
* threshold to no less than 2*mss.
|
||||
*/
|
||||
tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh);
|
||||
@ -2533,7 +2533,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* change and FIN isn't set),
|
||||
* the ack is the biggest we've
|
||||
* seen and we've seen exactly our rexmt
|
||||
* threshhold of them, assume a packet
|
||||
* threshold of them, assume a packet
|
||||
* has been dropped and retransmit it.
|
||||
* Kludge snd_nxt & the congestion
|
||||
* window so we send only this one
|
||||
|
@ -215,7 +215,7 @@ tcp_output(struct tcpcb *tp)
|
||||
*/
|
||||
if ((tp->t_flags & TF_FASTOPEN) &&
|
||||
(tp->t_state == TCPS_SYN_RECEIVED) &&
|
||||
SEQ_GT(tp->snd_max, tp->snd_una) && /* inital SYN|ACK sent */
|
||||
SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */
|
||||
(tp->snd_nxt != tp->snd_una)) /* not a retransmit */
|
||||
return (0);
|
||||
#endif
|
||||
@ -495,9 +495,9 @@ after_sack_rexmit:
|
||||
* and does at most one step per received ACK. This fast
|
||||
* scaling has the drawback of growing the send buffer beyond
|
||||
* what is strictly necessary to make full use of a given
|
||||
* delay*bandwith product. However testing has shown this not
|
||||
* delay*bandwidth product. However testing has shown this not
|
||||
* to be much of an problem. At worst we are trading wasting
|
||||
* of available bandwith (the non-use of it) for wasting some
|
||||
* of available bandwidth (the non-use of it) for wasting some
|
||||
* socket buffer memory.
|
||||
*
|
||||
* TODO: Shrink send buffer during idle periods together
|
||||
@ -1619,7 +1619,7 @@ tcp_setpersist(struct tcpcb *tp)
|
||||
if (tcp_timer_active(tp, TT_REXMT))
|
||||
panic("tcp_setpersist: retransmit pending");
|
||||
/*
|
||||
* Start/restart persistance timer.
|
||||
* Start/restart persistence timer.
|
||||
*/
|
||||
TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
|
||||
tcp_persmin, tcp_persmax);
|
||||
|
@ -401,8 +401,8 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
|
||||
|
||||
/*
|
||||
* Sort the SACK blocks so we can update the scoreboard with just one
|
||||
* pass. The overhead of sorting upto 4+1 elements is less than
|
||||
* making upto 4+1 passes over the scoreboard.
|
||||
* pass. The overhead of sorting up to 4+1 elements is less than
|
||||
* making up to 4+1 passes over the scoreboard.
|
||||
*/
|
||||
for (i = 0; i < num_sack_blks; i++) {
|
||||
for (j = i + 1; j < num_sack_blks; j++) {
|
||||
|
@ -185,7 +185,7 @@ tcp_do_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
short ostate = 0;
|
||||
#endif
|
||||
/*
|
||||
* The following if statment will be true if
|
||||
* The following if statement will be true if
|
||||
* we are doing the win_up_in_fp <and>
|
||||
* - We have more new data (SEQ_LT(tp->snd_wl1, th->th_seq)) <or>
|
||||
* - No more new data, but we have an ack for new data
|
||||
@ -1061,7 +1061,7 @@ tcp_do_slowpath(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* change and FIN isn't set),
|
||||
* the ack is the biggest we've
|
||||
* seen and we've seen exactly our rexmt
|
||||
* threshhold of them, assume a packet
|
||||
* threshold of them, assume a packet
|
||||
* has been dropped and retransmit it.
|
||||
* Kludge snd_nxt & the congestion
|
||||
* window so we send only this one
|
||||
|
@ -1376,7 +1376,7 @@ tcp_discardcb(struct tcpcb *tp)
|
||||
* Update the ssthresh always when the conditions below
|
||||
* are satisfied. This gives us better new start value
|
||||
* for the congestion avoidance for new connections.
|
||||
* ssthresh is only set if packet loss occured on a session.
|
||||
* ssthresh is only set if packet loss occurred on a session.
|
||||
*
|
||||
* XXXRW: 'so' may be NULL here, and/or socket buffer may be
|
||||
* being torn down. Ideally this code would not use 'so'.
|
||||
|
@ -1419,7 +1419,7 @@ skip_alloc:
|
||||
* With the default maxsockbuf of 256K, a scale factor
|
||||
* of 3 will be chosen by this algorithm. Those who
|
||||
* choose a larger maxsockbuf should watch out
|
||||
* for the compatiblity problems mentioned above.
|
||||
* for the compatibility problems mentioned above.
|
||||
*
|
||||
* RFC1323: The Window field in a SYN (i.e., a <SYN>
|
||||
* or <SYN,ACK>) segment itself is never scaled.
|
||||
@ -1746,7 +1746,7 @@ syncache_respond(struct syncache *sc, struct syncache_head *sch, int locked,
|
||||
* with the latter taking over when the former is exhausted. When matching
|
||||
* syncache entry is found the syncookie is ignored.
|
||||
*
|
||||
* The only reliable information persisting the 3WHS is our inital sequence
|
||||
* The only reliable information persisting the 3WHS is our initial sequence
|
||||
* number ISS of 32 bits. Syncookies embed a cryptographically sufficient
|
||||
* strong hash (MAC) value and a few bits of TCP SYN options in the ISS
|
||||
* of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK
|
||||
|
@ -522,7 +522,7 @@ tcp_timer_persist(void *xtp)
|
||||
KASSERT((tp->t_timers->tt_flags & TT_PERSIST) != 0,
|
||||
("%s: tp %p persist callout should be running", __func__, tp));
|
||||
/*
|
||||
* Persistance timer into zero window.
|
||||
* Persistence timer into zero window.
|
||||
* Force a byte to be output, if possible.
|
||||
*/
|
||||
TCPSTAT_INC(tcps_persisttimeo);
|
||||
|
@ -135,7 +135,7 @@ frag6_init(void)
|
||||
* fragment's Fragment header.
|
||||
* -> should grab it from the first fragment only
|
||||
*
|
||||
* The following note also contradicts with fragment rule - noone is going to
|
||||
* The following note also contradicts with fragment rule - no one is going to
|
||||
* send different fragment with different next header field.
|
||||
*
|
||||
* additional note (p22):
|
||||
|
@ -1955,7 +1955,7 @@ in6if_do_dad(struct ifnet *ifp)
|
||||
/*
|
||||
* Our DAD routine requires the interface up and running.
|
||||
* However, some interfaces can be up before the RUNNING
|
||||
* status. Additionaly, users may try to assign addresses
|
||||
* status. Additionally, users may try to assign addresses
|
||||
* before the interface becomes up (or running).
|
||||
* This function returns EAGAIN in that case.
|
||||
* The caller should mark "tentative" on the address instead of
|
||||
|
@ -574,7 +574,7 @@ in6m_clear_recorded(struct in6_multi *inm)
|
||||
*
|
||||
* Return 0 if the source didn't exist or was already marked as recorded.
|
||||
* Return 1 if the source was marked as recorded by this function.
|
||||
* Return <0 if any error occured (negated errno code).
|
||||
* Return <0 if any error occurred (negated errno code).
|
||||
*/
|
||||
int
|
||||
in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
|
||||
|
@ -282,7 +282,7 @@ ip6_forward(struct mbuf *m, int srcrt)
|
||||
* ipsec6_proces_packet will send the packet using ip6_output
|
||||
*/
|
||||
error = ipsec6_process_packet(m, sp->req);
|
||||
/* Release SP if an error occured */
|
||||
/* Release SP if an error occurred */
|
||||
if (error != 0)
|
||||
KEY_FREESP(&sp);
|
||||
if (error == EJUSTRETURN) {
|
||||
|
@ -108,7 +108,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
struct randomtab {
|
||||
const int ru_bits; /* resulting bits */
|
||||
const long ru_out; /* Time after wich will be reseeded */
|
||||
const long ru_out; /* Time after which will be reseeded */
|
||||
const u_int32_t ru_max; /* Uniq cycle, avoid blackjack prediction */
|
||||
const u_int32_t ru_gen; /* Starting generator */
|
||||
const u_int32_t ru_n; /* ru_n: prime, ru_n - 1: product of pfacts[] */
|
||||
@ -128,7 +128,7 @@ struct randomtab {
|
||||
|
||||
static struct randomtab randomtab_32 = {
|
||||
32, /* resulting bits */
|
||||
180, /* Time after wich will be reseeded */
|
||||
180, /* Time after which will be reseeded */
|
||||
1000000000, /* Uniq cycle, avoid blackjack prediction */
|
||||
2, /* Starting generator */
|
||||
2147483629, /* RU_N-1 = 2^2*3^2*59652323 */
|
||||
@ -139,7 +139,7 @@ static struct randomtab randomtab_32 = {
|
||||
|
||||
static struct randomtab randomtab_20 = {
|
||||
20, /* resulting bits */
|
||||
180, /* Time after wich will be reseeded */
|
||||
180, /* Time after which will be reseeded */
|
||||
200000, /* Uniq cycle, avoid blackjack prediction */
|
||||
2, /* Starting generator */
|
||||
524269, /* RU_N-1 = 2^2*3^2*14563 */
|
||||
|
@ -92,7 +92,7 @@ struct pr_usrreqs;
|
||||
*
|
||||
* ip6c_finaldst usually points to ip6c_ip6->ip6_dst. if the original
|
||||
* (internal) packet carries a routing header, it may point the final
|
||||
* dstination address in the routing header.
|
||||
* destination address in the routing header.
|
||||
*
|
||||
* ip6c_src: ip6c_ip6->ip6_src + scope info + flowlabel in ip6c_ip6
|
||||
* (beware of flowlabel, if you try to compare it against others)
|
||||
|
@ -971,9 +971,9 @@ out_locked:
|
||||
}
|
||||
|
||||
/*
|
||||
* Process a recieved MLDv2 group-specific or group-and-source-specific
|
||||
* Process a received MLDv2 group-specific or group-and-source-specific
|
||||
* query.
|
||||
* Return <0 if any error occured. Currently this is ignored.
|
||||
* Return <0 if any error occurred. Currently this is ignored.
|
||||
*/
|
||||
static int
|
||||
mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
|
||||
|
@ -479,7 +479,7 @@ nd6_options(union nd_opts *ndopts)
|
||||
default:
|
||||
/*
|
||||
* Unknown options must be silently ignored,
|
||||
* to accomodate future extension to the protocol.
|
||||
* to accommodate future extension to the protocol.
|
||||
*/
|
||||
nd6log((LOG_DEBUG,
|
||||
"nd6_options: unsupported option %d - "
|
||||
@ -1604,7 +1604,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
|
||||
case SIOCSIFINFO_IN6:
|
||||
/*
|
||||
* used to change host variables from userland.
|
||||
* intented for a use on router to reflect RA configurations.
|
||||
* intended for a use on router to reflect RA configurations.
|
||||
*/
|
||||
/* 0 means 'unspecified' */
|
||||
if (ND.linkmtu != 0) {
|
||||
|
@ -482,7 +482,7 @@ nd6_rtmsg(int cmd, struct rtentry *rt)
|
||||
}
|
||||
|
||||
/*
|
||||
* default router list proccessing sub routines
|
||||
* default router list processing sub routines
|
||||
*/
|
||||
|
||||
static void
|
||||
|
@ -290,7 +290,7 @@ key_allocsp_default(const char* where, int tag)
|
||||
* 0 : bypass
|
||||
* EACCES : discard packet.
|
||||
* ENOENT : ipsec_acquire() in progress, maybe.
|
||||
* others : error occured.
|
||||
* others : error occurred.
|
||||
* others: a pointer to SP
|
||||
*
|
||||
* NOTE: IPv6 mapped adddress concern is implemented here.
|
||||
@ -318,7 +318,7 @@ ipsec_getpolicy(struct tdb_ident *tdbi, u_int dir)
|
||||
* 0 : bypass
|
||||
* EACCES : discard packet.
|
||||
* ENOENT : ipsec_acquire() in progress, maybe.
|
||||
* others : error occured.
|
||||
* others : error occurred.
|
||||
* others: a pointer to SP
|
||||
*
|
||||
* NOTE: IPv6 mapped adddress concern is implemented here.
|
||||
@ -425,7 +425,7 @@ ipsec_getpolicybysock(const struct mbuf *m, u_int dir, struct inpcb *inp,
|
||||
* 0 : bypass
|
||||
* EACCES : discard packet.
|
||||
* ENOENT : ipsec_acquire() in progress, maybe.
|
||||
* others : error occured.
|
||||
* others : error occurred.
|
||||
*/
|
||||
struct secpolicy *
|
||||
ipsec_getpolicybyaddr(const struct mbuf *m, u_int dir, int *error)
|
||||
|
@ -589,7 +589,7 @@ ipsec4_process_packet(struct mbuf *m, struct ipsecrequest *isr)
|
||||
* packet will be returned for transmission after crypto
|
||||
* processing, etc. are completed.
|
||||
*
|
||||
* NB: m & sav are ``passed to caller'' who's reponsible for
|
||||
* NB: m & sav are ``passed to caller'' who's responsible for
|
||||
* for reclaiming their resources.
|
||||
*/
|
||||
switch(dst->sa.sa_family) {
|
||||
|
@ -933,7 +933,7 @@ key_do_allocsa_policy(struct secashead *sah, u_int state)
|
||||
{
|
||||
struct secasvar *sav, *nextsav, *candidate, *d;
|
||||
|
||||
/* initilize */
|
||||
/* initialize */
|
||||
candidate = NULL;
|
||||
|
||||
SAHTREE_LOCK();
|
||||
@ -1058,7 +1058,7 @@ key_do_allocsa_policy(struct secashead *sah, u_int state)
|
||||
* allocating a usable SA entry for a *INBOUND* packet.
|
||||
* Must call key_freesav() later.
|
||||
* OUT: positive: pointer to a usable sav (i.e. MATURE or DYING state).
|
||||
* NULL: not found, or error occured.
|
||||
* NULL: not found, or error occurred.
|
||||
*
|
||||
* In the comparison, no source address is used--for RFC2401 conformance.
|
||||
* To quote, from section 4.1:
|
||||
@ -2335,7 +2335,7 @@ key_spdget(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
|
||||
* send
|
||||
* <base, policy(*)>
|
||||
* to KMD, and expect to receive
|
||||
* <base> with SADB_X_SPDACQUIRE if error occured,
|
||||
* <base> with SADB_X_SPDACQUIRE if error occurred,
|
||||
* or
|
||||
* <base, policy>
|
||||
* with SADB_X_SPDUPDATE from KMD by PF_KEY.
|
||||
@ -6153,7 +6153,7 @@ key_getprop(const struct secasindex *saidx)
|
||||
* <base, SA, address(SD), (address(P)), x_policy,
|
||||
* (identity(SD),) (sensitivity,) proposal>
|
||||
* to KMD, and expect to receive
|
||||
* <base> with SADB_ACQUIRE if error occured,
|
||||
* <base> with SADB_ACQUIRE if error occurred,
|
||||
* or
|
||||
* <base, src address, dst address, (SPI range)> with SADB_GETSPI
|
||||
* from KMD by PF_KEY.
|
||||
@ -6517,9 +6517,9 @@ key_acquire2(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
|
||||
|
||||
/*
|
||||
* Error message from KMd.
|
||||
* We assume that if error was occured in IKEd, the length of PFKEY
|
||||
* We assume that if error was occurred in IKEd, the length of PFKEY
|
||||
* message is equal to the size of sadb_msg structure.
|
||||
* We do not raise error even if error occured in this function.
|
||||
* We do not raise error even if error occurred in this function.
|
||||
*/
|
||||
if (mhp->msg->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) {
|
||||
struct secacq *acq;
|
||||
|
@ -83,7 +83,7 @@ enum {
|
||||
* heap_insert() adds a key-pointer pair to the heap
|
||||
*
|
||||
* HEAP_TOP() returns a pointer to the top element of the heap,
|
||||
* but makes no checks on its existance (XXX should we change ?)
|
||||
* but makes no checks on its existence (XXX should we change ?)
|
||||
*
|
||||
* heap_extract() removes the entry at the top, returing the pointer.
|
||||
* (the key should have been read before).
|
||||
@ -146,7 +146,7 @@ int heap_scan(struct dn_heap *, int (*)(void *, uintptr_t), uintptr_t);
|
||||
* of the dn_ht_find(), and of the callbacks:
|
||||
*
|
||||
* DNHT_KEY_IS_OBJ means the key is the object pointer.
|
||||
* It is usally of interest for the hash and match functions.
|
||||
* It is usually of interest for the hash and match functions.
|
||||
*
|
||||
* DNHT_MATCH_PTR during a lookup, match pointers instead
|
||||
* of calling match(). Normally used when removing specific
|
||||
|
@ -86,7 +86,7 @@ USERLAND-KERNEL API (ip_dummynet.h)
|
||||
|
||||
struct dn_link:
|
||||
contains data about the physical link such as
|
||||
bandwith, delay, burst size;
|
||||
bandwidth, delay, burst size;
|
||||
|
||||
struct dn_fs:
|
||||
describes a flowset, i.e. a template for queues.
|
||||
@ -444,7 +444,7 @@ of the object to remove
|
||||
|
||||
Delete of pipe x
|
||||
----------------
|
||||
A pipe can be deleted by the user throught the command 'ipfw pipe x delete'.
|
||||
A pipe can be deleted by the user through the command 'ipfw pipe x delete'.
|
||||
To delete a pipe, the pipe is removed from the pipe list, and then deleted.
|
||||
Also the scheduler associated with this pipe should be deleted.
|
||||
For compatibility with old dummynet syntax, the associated FIFO scheduler and
|
||||
@ -452,7 +452,7 @@ FIFO flowset must be deleted.
|
||||
|
||||
Delete of flowset x
|
||||
-------------------
|
||||
To remove a flowset, we must be sure that is no loger referenced by any object.
|
||||
To remove a flowset, we must be sure that is no longer referenced by any object.
|
||||
If the flowset to remove is in the unlinked flowset list, there is not any
|
||||
issue, the flowset can be safely removed calling a free() (the flowset
|
||||
extension is not yet created if the flowset is in this list).
|
||||
@ -492,7 +492,7 @@ If the counter was not 0, we wait for it. Every time the dummynet_task()
|
||||
function extract a scheduler from the system_heap, the counter is decremented.
|
||||
If the scheduler has the delete flag enabled the dequeue() is not called and
|
||||
delete_scheduler_instance() is called to delete the instance.
|
||||
Obviously this scheduler instance is no loger inserted in the system_heap.
|
||||
Obviously this scheduler instance is no longer inserted in the system_heap.
|
||||
If the counter reaches 0, the delete_scheduler_template() function is called
|
||||
all memory is released.
|
||||
NOTE: Flowsets that belong to this scheduler are not deleted, so if a new
|
||||
@ -559,7 +559,7 @@ There are four request for old dummynet:
|
||||
depending of its version. There are two function that build the
|
||||
corrected buffer, ip_dummynet_get7() and ip_dummynet_get8(). These
|
||||
functions reproduce the buffer exactly as 'ipfw' expect. The only difference
|
||||
is that the weight parameter for a queue is no loger sent by dummynet and so
|
||||
is that the weight parameter for a queue is no longer sent by dummynet and so
|
||||
it is set to 0.
|
||||
Moreover, because of the internal structure has changed, the bucket size
|
||||
of a queue could not be correct, because now all flowset share the hash
|
||||
@ -581,7 +581,7 @@ I have to modify the ip_fw2.c file to manage these two case, and added a
|
||||
variable (is7) to store the ipfw version used, using an approach like the
|
||||
previous file:
|
||||
- when a new rule is added (option IP_FW_ADD) the is7 variable is set if the
|
||||
size of the rule received corrispond to FreeBSD 7.2 ipfw version. If so, the
|
||||
size of the rule received correspond to FreeBSD 7.2 ipfw version. If so, the
|
||||
rule is converted to version 8 calling the function convert_rule_to_8().
|
||||
Moreover, after the insertion of the rule, the rule is now reconverted to
|
||||
version 7 because the ipfw binary will print it.
|
||||
|
@ -778,7 +778,7 @@ ip_dummynet_compat(struct sockopt *sopt)
|
||||
void *v = NULL;
|
||||
struct dn_id oid;
|
||||
|
||||
/* Lenght of data, used to found ipfw version... */
|
||||
/* Length of data, used to found ipfw version... */
|
||||
int len = sopt->sopt_valsize;
|
||||
|
||||
/* len can be 0 if command was dummynet_flush */
|
||||
|
@ -1629,7 +1629,7 @@ dummynet_flush(void)
|
||||
* with an oid which is at least a dn_id.
|
||||
* - the first object is the command (config, delete, flush, ...)
|
||||
* - config_link must be issued after the corresponding config_sched
|
||||
* - parameters (DN_TXT) for an object must preceed the object
|
||||
* - parameters (DN_TXT) for an object must precede the object
|
||||
* processed on a config_sched.
|
||||
*/
|
||||
int
|
||||
|
@ -936,7 +936,7 @@ ipfw_chk(struct ip_fw_args *args)
|
||||
* offset == 0 means that (if this is an IPv4 packet)
|
||||
* this is the first or only fragment.
|
||||
* For IPv6 offset|ip6f_mf == 0 means there is no Fragment Header
|
||||
* or there is a single packet fragement (fragement header added
|
||||
* or there is a single packet fragment (fragment header added
|
||||
* without needed). We will treat a single packet fragment as if
|
||||
* there was no fragment header (or log/block depending on the
|
||||
* V_fw_permit_single_frag6 sysctl setting).
|
||||
@ -1999,7 +1999,7 @@ do { \
|
||||
* certainly be inp_user_cookie?
|
||||
*/
|
||||
|
||||
/* For incomming packet, lookup up the
|
||||
/* For incoming packet, lookup up the
|
||||
inpcb using the src/dest ip/port tuple */
|
||||
if (inp == NULL) {
|
||||
inp = in_pcblookup(pi,
|
||||
|
@ -989,7 +989,7 @@ ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
|
||||
}
|
||||
|
||||
/*
|
||||
* This procedure is used to perform various maintance
|
||||
* This procedure is used to perform various maintenance
|
||||
* on dynamic hash list. Currently it is called every second.
|
||||
*/
|
||||
static void
|
||||
@ -1021,7 +1021,7 @@ ipfw_dyn_tick(void * vnetx)
|
||||
|
||||
|
||||
/*
|
||||
* Walk thru all dynamic states doing generic maintance:
|
||||
* Walk through all dynamic states doing generic maintenance:
|
||||
* 1) free expired states
|
||||
* 2) free all states based on deleted rule / set
|
||||
* 3) send keepalives for states if needed
|
||||
|
@ -932,7 +932,7 @@ ipfw_nat_cfg(struct sockopt *sopt)
|
||||
|
||||
/*
|
||||
* Allocate 2x buffer to store converted structures.
|
||||
* new redir_cfg has shrinked, so we're sure that
|
||||
* new redir_cfg has shrunk, so we're sure that
|
||||
* new buffer size is enough.
|
||||
*/
|
||||
buf = malloc(roundup2(len, 8) + len2, M_TEMP, M_WAITOK | M_ZERO);
|
||||
|
@ -2467,7 +2467,7 @@ ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
|
||||
if (error != 0)
|
||||
break;
|
||||
/*
|
||||
* Compability stuff for old clients:
|
||||
* Compatibility stuff for old clients:
|
||||
* prepare to automaitcally create non-existing objects.
|
||||
*/
|
||||
if (unresolved != 0) {
|
||||
@ -2579,7 +2579,7 @@ free:
|
||||
* Rules in reply are modified to store their actual ruleset number.
|
||||
*
|
||||
* (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
|
||||
* accoring to their idx field and there has to be no duplicates.
|
||||
* according to their idx field and there has to be no duplicates.
|
||||
* (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
|
||||
* (*3) Each ip_fw structure needs to be aligned to u64 boundary.
|
||||
*
|
||||
@ -3279,7 +3279,7 @@ ipfw_flush_sopt_data(struct sockopt_data *sd)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensures that @sd buffer has contigious @neeeded number of
|
||||
* Ensures that @sd buffer has contiguous @neeeded number of
|
||||
* bytes.
|
||||
*
|
||||
* Returns pointer to requested space or NULL.
|
||||
@ -3307,7 +3307,7 @@ ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
|
||||
}
|
||||
|
||||
/*
|
||||
* Requests @needed contigious bytes from @sd buffer.
|
||||
* Requests @needed contiguous bytes from @sd buffer.
|
||||
* Function is used to notify subsystem that we are
|
||||
* interesed in first @needed bytes (request header)
|
||||
* and the rest buffer can be safely zeroed.
|
||||
@ -3396,7 +3396,7 @@ ipfw_ctl3(struct sockopt *sopt)
|
||||
/*
|
||||
* Determine opcode type/buffer size:
|
||||
* allocate sliding-window buf for data export or
|
||||
* contigious buffer for special ops.
|
||||
* contiguous buffer for special ops.
|
||||
*/
|
||||
if ((h.dir & HDIR_SET) != 0) {
|
||||
/* Set request. Allocate contigous buffer. */
|
||||
|
@ -319,7 +319,7 @@ find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
|
||||
if (op == OP_DEL)
|
||||
return (ESRCH);
|
||||
|
||||
/* Compability mode: create new table for old clients */
|
||||
/* Compatibility mode: create new table for old clients */
|
||||
if ((tei->flags & TEI_FLAGS_COMPAT) == 0)
|
||||
return (ESRCH);
|
||||
|
||||
@ -927,7 +927,7 @@ manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
|
||||
tei.masklen = xent->masklen;
|
||||
ipfw_import_table_value_legacy(xent->value, &v);
|
||||
tei.pvalue = &v;
|
||||
/* Old requests compability */
|
||||
/* Old requests compatibility */
|
||||
tei.flags = TEI_FLAGS_COMPAT;
|
||||
if (xent->type == IPFW_TABLE_ADDR) {
|
||||
if (xent->len - hdrlen == sizeof(in_addr_t))
|
||||
@ -1207,7 +1207,7 @@ flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
|
||||
uint8_t tflags;
|
||||
|
||||
/*
|
||||
* Stage 1: save table algoritm.
|
||||
* Stage 1: save table algorithm.
|
||||
* Reference found table to ensure it won't disappear.
|
||||
*/
|
||||
IPFW_UH_WLOCK(ch);
|
||||
@ -2582,7 +2582,7 @@ ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx,
|
||||
*/
|
||||
|
||||
/*
|
||||
* Finds algoritm by index, table type or supplied name.
|
||||
* Finds algorithm by index, table type or supplied name.
|
||||
*
|
||||
* Returns pointer to algo or NULL.
|
||||
*/
|
||||
@ -3224,7 +3224,7 @@ ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t set,
|
||||
* Move all tables which are reference by rules in @rr to set @new_set.
|
||||
* Makes sure that all relevant tables are referenced ONLLY by given rules.
|
||||
*
|
||||
* Retuns 0 on success,
|
||||
* Returns 0 on success,
|
||||
*/
|
||||
int
|
||||
ipfw_move_tables_sets(struct ip_fw_chain *ch, ipfw_range_tlv *rt,
|
||||
|
@ -181,7 +181,7 @@ __FBSDID("$FreeBSD$");
|
||||
* OPTIONAL, locked (UH). (M_NOWAIT). Returns 0 on success.
|
||||
*
|
||||
* Finds entry specified by given key.
|
||||
* * Caller is requred to do the following:
|
||||
* * Caller is required to do the following:
|
||||
* entry found: returns 0, export entry to @tent
|
||||
* entry not found: returns ENOENT
|
||||
*
|
||||
@ -263,7 +263,7 @@ __FBSDID("$FreeBSD$");
|
||||
* Dumps entry @e to @tent.
|
||||
*
|
||||
*
|
||||
* -print_config: prints custom algoritm options into buffer.
|
||||
* -print_config: prints custom algorithm options into buffer.
|
||||
* typedef void (ta_print_config)(void *ta_state, struct table_info *ti,
|
||||
* char *buf, size_t bufsize);
|
||||
* OPTIONAL. locked(UH). (M_NOWAIT).
|
||||
|
@ -500,7 +500,7 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
|
||||
count = ts->count;
|
||||
for (i = 0; i < count; i++) {
|
||||
ptei = &tei[i];
|
||||
ptei->value = 0; /* Ensure value is always 0 in the beginnig */
|
||||
ptei->value = 0; /* Ensure value is always 0 in the beginning */
|
||||
mask_table_value(ptei->pvalue, &tval, ts->vmask);
|
||||
ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
|
||||
(char *)&tval);
|
||||
@ -603,7 +603,7 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
|
||||
}
|
||||
|
||||
/*
|
||||
* Compability function used to import data from old
|
||||
* Compatibility function used to import data from old
|
||||
* IP_FW_TABLE_ADD / IP_FW_TABLE_XADD opcodes.
|
||||
*/
|
||||
void
|
||||
|
@ -3957,7 +3957,7 @@ pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
|
||||
* (Selective ACK). We could optionally validate the SACK values
|
||||
* against the current ACK window, either forwards or backwards, but
|
||||
* I'm not confident that SACK has been implemented properly
|
||||
* everywhere. It wouldn't surprise me if several stacks accidently
|
||||
* everywhere. It wouldn't surprise me if several stacks accidentally
|
||||
* SACK too far backwards of previously ACKed data. There really aren't
|
||||
* any security implications of bad SACKing unless the target stack
|
||||
* doesn't validate the option length correctly. Someone trying to
|
||||
|
@ -3539,7 +3539,7 @@ shutdown_pf(void)
|
||||
pf_clear_srcnodes(NULL);
|
||||
|
||||
/* status does not use malloced mem so no need to cleanup */
|
||||
/* fingerprints and interfaces have thier own cleanup code */
|
||||
/* fingerprints and interfaces have their own cleanup code */
|
||||
} while(0);
|
||||
|
||||
return (error);
|
||||
|
@ -62,7 +62,7 @@ __FBSDID("$FreeBSD$");
|
||||
struct pf_frent {
|
||||
TAILQ_ENTRY(pf_frent) fr_next;
|
||||
struct mbuf *fe_m;
|
||||
uint16_t fe_hdrlen; /* ipv4 header lenght with ip options
|
||||
uint16_t fe_hdrlen; /* ipv4 header length with ip options
|
||||
ipv6, extension, fragment header */
|
||||
uint16_t fe_extoff; /* last extension header offset or 0 */
|
||||
uint16_t fe_len; /* fragment length */
|
||||
|
@ -158,7 +158,7 @@ out:
|
||||
/*
|
||||
* Connect to the resource specified by smbioc_ossn structure.
|
||||
* It may either find an existing connection or try to establish a new one.
|
||||
* If no errors occured smb_vc returned locked and referenced.
|
||||
* If no errors occurred smb_vc returned locked and referenced.
|
||||
*/
|
||||
int
|
||||
smb_usr_opensession(struct smbioc_ossn *dp, struct smb_cred *scred,
|
||||
|
Loading…
Reference in New Issue
Block a user