aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c135
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c87
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c41
-rw-r--r--drivers/net/wireless/libertas/mesh.c11
-rw-r--r--drivers/net/wireless/wl1251/ps.c41
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/xt_addrtype.h44
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/ip_vs.h198
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netns/ip_vs.h143
-rw-r--r--net/ipv4/netfilter/Kconfig10
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/arp_tables.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c3
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c134
-rw-r--r--net/ipv6/netfilter/ip6_tables.c3
-rw-r--r--net/mac80211/cfg.c21
-rw-r--r--net/mac80211/chan.c3
-rw-r--r--net/mac80211/driver-ops.h26
-rw-r--r--net/mac80211/driver-trace.h52
-rw-r--r--net/mac80211/mlme.c37
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c41
-rw-r--r--net/netfilter/Kconfig11
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c104
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c256
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c63
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c31
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c11
-rw-r--r--net/netfilter/nf_conntrack_core.c1
-rw-r--r--net/netfilter/x_tables.c26
-rw-r--r--net/netfilter/xt_addrtype.c229
-rw-r--r--net/netfilter/xt_connlimit.c59
-rw-r--r--net/wireless/ethtool.c33
58 files changed, 1441 insertions, 801 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index d6f5255ca547..a9c4245e2fd8 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -637,3 +637,12 @@ Why: The original implementation of memsw feature enabled by
Who: Michal Hocko <[email protected]>
----------------------------
+
+What: ipt_addrtype match include file
+When: 2012
+Why: superseded by xt_addrtype
+Who: Florian Westphal <[email protected]>
+Files: include/linux/netfilter_ipv4/ipt_addrtype.h
+>>>>>>> 2f5dc63123905a89d4260ab8ee08d19ec104db04
+
+----------------------------
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e6ff62e60a79..4d7f21ee111c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -943,6 +943,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
spin_lock_init(&txq->lock);
txq->setup = true;
txq->txq_len = 0;
+ txq->txq_max = ATH5K_TXQ_LEN_MAX;
txq->txq_poll_mark = false;
txq->txq_stuck = 0;
}
@@ -1534,7 +1535,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
goto drop_packet;
}
- if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
+ if (txq->txq_len >= txq->txq_max)
ieee80211_stop_queue(hw, txq->qnum);
spin_lock_irqsave(&sc->txbuflock, flags);
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 8d1df1fa2351..978f1f4ac2f3 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -86,6 +86,7 @@ struct ath5k_txq {
spinlock_t lock; /* lock on q and link */
bool setup;
int txq_len; /* number of queued buffers */
+ int txq_max; /* max allowed num of queued buffers */
bool txq_poll_mark;
unsigned int txq_stuck; /* informational counter */
};
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index c9b0b676adda..9be29b728b1c 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -740,6 +740,47 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
}
+static void ath5k_get_ringparam(struct ieee80211_hw *hw,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
+
+ *tx_max = ATH5K_TXQ_LEN_MAX;
+ *rx = *rx_max = ATH_RXBUF;
+}
+
+
+static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
+{
+ struct ath5k_softc *sc = hw->priv;
+ u16 qnum;
+
+ /* only support setting tx ring size for now */
+ if (rx != ATH_RXBUF)
+ return -EINVAL;
+
+ /* restrict tx ring size min/max */
+ if (!tx || tx > ATH5K_TXQ_LEN_MAX)
+ return -EINVAL;
+
+ for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
+ if (!sc->txqs[qnum].setup)
+ continue;
+ if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
+ sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
+ continue;
+
+ sc->txqs[qnum].txq_max = tx;
+ if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
+ ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
+ }
+
+ return 0;
+}
+
+
const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
.start = ath5k_start,
@@ -778,4 +819,6 @@ const struct ieee80211_ops ath5k_hw_ops = {
/* .napi_poll = not implemented */
.set_antenna = ath5k_set_antenna,
.get_antenna = ath5k_get_antenna,
+ .set_ringparam = ath5k_set_ringparam,
+ .get_ringparam = ath5k_get_ringparam,
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index eac4d8526fc1..71cc0a3a29fb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -667,6 +667,7 @@ static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
static const u32 ar9485_1_0_soc_preamble[][2] = {
/* Addr allmodes */
+ {0x00004090, 0x00aa10aa},
{0x000040a4, 0x00a0c9c9},
{0x00007048, 0x00000004},
};
@@ -1708,6 +1709,7 @@ static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
static const u32 ar9485_1_1_soc_preamble[][2] = {
/* Addr allmodes */
{0x00004014, 0xba280400},
+ {0x00004090, 0x00aa10aa},
{0x000040a4, 0x00a0c9c9},
{0x00007010, 0x00000022},
{0x00007020, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c718ab512a97..099bd4183ad0 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -189,7 +189,6 @@ struct ath_txq {
u32 axq_ampdu_depth;
bool stopped;
bool axq_tx_inprogress;
- bool txq_flush_inprogress;
struct list_head axq_acq;
struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
struct list_head txq_fifo_pending;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a4bdfdb043ef..6d2a545fc35e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -373,6 +373,7 @@ void ath_beacon_tasklet(unsigned long data)
ath_dbg(common, ATH_DBG_BSTUCK,
"missed %u consecutive beacons\n",
sc->beacon.bmisscnt);
+ ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
ath9k_hw_bstuck_nfcal(ah);
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
ath_dbg(common, ATH_DBG_BSTUCK,
@@ -450,16 +451,6 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.updateslot = OK;
}
if (bfaddr != 0) {
- /*
- * Stop any current dma and put the new frame(s) on the queue.
- * This should never fail since we check above that no frames
- * are still pending on the queue.
- */
- if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
- ath_err(common, "beacon queue %u did not stop?\n",
- sc->beacon.beaconq);
- }
-
/* NB: cabq traffic should already be queued and primed */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
ath9k_hw_txstart(ah, sc->beacon.beaconq);
@@ -780,7 +771,7 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
ah->imask &= ~ATH9K_INT_SWBA;
ath9k_hw_set_interrupts(ah, ah->imask);
tasklet_kill(&sc->bcon_tasklet);
- ath9k_hw_stoptxdma(ah, sc->beacon.beaconq);
+ ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
}
ath9k_ps_restore(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index b4a92a4313f6..8649581fa4dd 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -262,7 +262,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* since 250us often results in NF load timeout and causes deaf
* condition during stress testing 12/12/2009
*/
- for (j = 0; j < 1000; j++) {
+ for (j = 0; j < 10000; j++) {
if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_NF) == 0)
break;
@@ -278,7 +278,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* here, the baseband nf cal will just be capped by our present
* noisefloor until the next calibration timer.
*/
- if (j == 1000) {
+ if (j == 10000) {
ath_dbg(common, ATH_DBG_ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9a3438174f86..338b07502f1a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -701,7 +701,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
- udelay(100);
+ udelay(1000);
REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
@@ -713,7 +713,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
- udelay(110);
+ udelay(1000);
}
pll = ath9k_hw_compute_pll_control(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ef79f4c876ca..6650fd48415c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -95,9 +95,9 @@
#define REG_READ_FIELD(_a, _r, _f) \
(((REG_READ(_a, _r) & _f) >> _f##_S))
#define REG_SET_BIT(_a, _r, _f) \
- REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
+ REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f))
#define REG_CLR_BIT(_a, _r, _f) \
- REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
+ REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f))
#define DO_DELAY(x) do { \
if ((++(x) % 64) == 0) \
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 5efc869d65ff..562257ac52cf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -143,84 +143,59 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
{
-#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
-#define ATH9K_TIME_QUANTUM 100 /* usec */
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- struct ath9k_tx_queue_info *qi;
- u32 tsfLow, j, wait;
- u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+ int i, q;
- if (q >= pCap->total_queues) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Stopping TX DMA, invalid queue: %u\n", q);
- return false;
- }
+ REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
- qi = &ah->txq[q];
- if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Stopping TX DMA, inactive queue: %u\n", q);
- return false;
- }
+ REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
- REG_WRITE(ah, AR_Q_TXD, 1 << q);
+ for (q = 0; q < AR_NUM_QCU; q++) {
+ for (i = 0; i < 1000; i++) {
+ if (i)
+ udelay(5);
- for (wait = wait_time; wait != 0; wait--) {
- if (ath9k_hw_numtxpending(ah, q) == 0)
- break;
- udelay(ATH9K_TIME_QUANTUM);
+ if (!ath9k_hw_numtxpending(ah, q))
+ break;
+ }
}
- if (ath9k_hw_numtxpending(ah, q)) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "%s: Num of pending TX Frames %d on Q %d\n",
- __func__, ath9k_hw_numtxpending(ah, q), q);
-
- for (j = 0; j < 2; j++) {
- tsfLow = REG_READ(ah, AR_TSF_L32);
- REG_WRITE(ah, AR_QUIET2,
- SM(10, AR_QUIET2_QUIET_DUR));
- REG_WRITE(ah, AR_QUIET_PERIOD, 100);
- REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
- REG_SET_BIT(ah, AR_TIMER_MODE,
- AR_QUIET_TIMER_EN);
-
- if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
- break;
+ REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
- ath_dbg(common, ATH_DBG_QUEUE,
- "TSF has moved while trying to set quiet time TSF: 0x%08x\n",
- tsfLow);
- }
+ REG_WRITE(ah, AR_Q_TXD, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
- REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
+{
+#define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
+#define ATH9K_TIME_QUANTUM 100 /* usec */
+ int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+ int wait;
- udelay(200);
- REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
+ REG_WRITE(ah, AR_Q_TXD, 1 << q);
- wait = wait_time;
- while (ath9k_hw_numtxpending(ah, q)) {
- if ((--wait) == 0) {
- ath_err(common,
- "Failed to stop TX DMA in 100 msec after killing last frame\n");
- break;
- }
+ for (wait = wait_time; wait != 0; wait--) {
+ if (wait != wait_time)
udelay(ATH9K_TIME_QUANTUM);
- }
- REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ if (ath9k_hw_numtxpending(ah, q) == 0)
+ break;
}
REG_WRITE(ah, AR_Q_TXD, 0);
+
return wait != 0;
#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
-EXPORT_SYMBOL(ath9k_hw_stoptxdma);
+EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
{
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 04d58ae923bb..b2b2ff852c32 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -676,7 +676,8 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2e228aada1a9..115f162c617a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2128,56 +2128,42 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
{
-#define ATH_FLUSH_TIMEOUT 60 /* ms */
struct ath_softc *sc = hw->priv;
- struct ath_txq *txq = NULL;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int i, j, npend = 0;
+ int timeout = 200; /* ms */
+ int i, j;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
cancel_delayed_work_sync(&sc->tx_complete_work);
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
- txq = &sc->tx.txq[i];
+ if (drop)
+ timeout = 1;
- if (!drop) {
- for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
- if (!ath9k_has_pending_frames(sc, txq))
- break;
- usleep_range(1000, 2000);
- }
- }
+ for (j = 0; j < timeout; j++) {
+ int npend = 0;
+
+ if (j)
+ usleep_range(1000, 2000);
- if (drop || ath9k_has_pending_frames(sc, txq)) {
- ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
- txq->axq_qnum);
- spin_lock_bh(&txq->axq_lock);
- txq->txq_flush_inprogress = true;
- spin_unlock_bh(&txq->axq_lock);
-
- ath9k_ps_wakeup(sc);
- ath9k_hw_stoptxdma(ah, txq->axq_qnum);
- npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
- ath9k_ps_restore(sc);
- if (npend)
- break;
-
- ath_draintxq(sc, txq, false);
- txq->txq_flush_inprogress = false;
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
}
+
+ if (!npend)
+ goto out;
}
- if (npend) {
+ if (!ath_drain_all_txq(sc, false))
ath_reset(sc, false);
- txq->txq_flush_inprogress = false;
- }
+out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index cb559e345b86..a9c3f4672aa0 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -413,9 +413,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
* mode interface or when in monitor mode. AP mode does not need this
* since it receives all in-BSS frames anyway.
*/
- if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
- (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
- (sc->sc_ah->is_monitoring))
+ if (sc->sc_ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if (sc->rx.rxfilter & FIF_CONTROL)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index e16136d61799..ef22096d40c9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -166,7 +166,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
fi = get_frame_info(bf->bf_mpdu);
if (fi->retries) {
ath_tx_update_baw(sc, tid, fi->seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
} else {
ath_tx_send_normal(sc, txq, NULL, &bf_head);
}
@@ -1194,16 +1194,14 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (sc->sc_flags & SC_OP_INVALID)
return true;
- /* Stop beacon queue */
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ ath9k_hw_abort_tx_dma(ah);
- /* Stop data queues */
+ /* Check if any queue remains active */
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i)) {
- txq = &sc->tx.txq[i];
- ath9k_hw_stoptxdma(ah, txq->axq_qnum);
- npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
- }
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
}
if (npend)
@@ -2014,8 +2012,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
- if (sc->sc_flags & SC_OP_TXAGGR &&
- !txq->txq_flush_inprogress)
+ if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
break;
@@ -2096,7 +2093,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
- if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
+ if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
@@ -2267,18 +2264,17 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
spin_lock_bh(&txq->axq_lock);
- if (!txq->txq_flush_inprogress) {
- if (!list_empty(&txq->txq_fifo_pending)) {
- INIT_LIST_HEAD(&bf_head);
- bf = list_first_entry(&txq->txq_fifo_pending,
- struct ath_buf, list);
- list_cut_position(&bf_head,
- &txq->txq_fifo_pending,
- &bf->bf_lastbf->list);
- ath_tx_txqaddbuf(sc, txq, &bf_head);
- } else if (sc->sc_flags & SC_OP_TXAGGR)
- ath_txq_schedule(sc, txq);
- }
+ if (!list_empty(&txq->txq_fifo_pending)) {
+ INIT_LIST_HEAD(&bf_head);
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ } else if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+
spin_unlock_bh(&txq->axq_lock);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 25fccf9a3001..2003c1d4295f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -1115,6 +1115,18 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
return added;
}
+static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
+{
+ struct sk_buff *skb = priv->_agn.offchan_tx_skb;
+
+ if (skb->len < maxlen)
+ maxlen = skb->len;
+
+ memcpy(data, skb->data, maxlen);
+
+ return maxlen;
+}
+
int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct iwl_host_cmd cmd = {
@@ -1157,17 +1169,25 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
- if (iwl_is_any_associated(priv)) {
+ if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
+ iwl_is_any_associated(priv)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- if (priv->is_internal_short_scan)
+ switch (priv->scan_type) {
+ case IWL_SCAN_OFFCH_TX:
+ WARN_ON(1);
+ break;
+ case IWL_SCAN_RADIO_RESET:
interval = 0;
- else
+ break;
+ case IWL_SCAN_NORMAL:
interval = vif->bss_conf.beacon_int;
+ break;
+ }
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1180,29 +1200,41 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->suspend_time = cpu_to_le32(scan_suspend_time);
IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
scan_suspend_time, interval);
+ } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
+ scan->suspend_time = 0;
+ scan->max_out_time =
+ cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
}
- if (priv->is_internal_short_scan) {
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+ break;
+ case IWL_SCAN_NORMAL:
+ if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+ break;
+ case IWL_SCAN_OFFCH_TX:
+ IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
+ break;
+ }
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = ctx->bcast_sta_id;
@@ -1300,38 +1332,77 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- if (!priv->is_internal_short_scan) {
+ switch (priv->scan_type) {
+ case IWL_SCAN_NORMAL:
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
- } else {
+ break;
+ case IWL_SCAN_RADIO_RESET:
/* use bcast addr, will not be transmitted but must be valid */
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
+ break;
+ case IWL_SCAN_OFFCH_TX:
+ cmd_len = iwl_fill_offch_tx(priv, scan->data,
+ IWL_MAX_SCAN_SIZE
+ - sizeof(*scan)
+ - sizeof(struct iwl_scan_channel));
+ scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
+ break;
+ default:
+ BUG();
}
scan->tx_cmd.len = cpu_to_le16(cmd_len);
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- if (priv->is_internal_short_scan) {
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
scan->channel_count =
iwl_get_single_channel_for_scan(priv, vif, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_NORMAL:
scan->channel_count =
iwl_get_channels_for_scan(priv, vif, band,
is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_OFFCH_TX: {
+ struct iwl_scan_channel *scan_ch;
+
+ scan->channel_count = 1;
+
+ scan_ch = (void *)&scan->data[cmd_len];
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+ scan_ch->channel =
+ cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
+ scan_ch->active_dwell =
+ cpu_to_le16(priv->_agn.offchan_tx_timeout);
+ scan_ch->passive_dwell = 0;
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ }
+ break;
}
+
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
return -EIO;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e0cd3e27a0d9..581dc9f10273 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2937,6 +2937,91 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
+static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int wait)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ /* Not supported if we don't have PAN */
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ /* Not supported on pre-P2P firmware */
+ if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+ BIT(NL80211_IFTYPE_P2P_CLIENT))) {
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ mutex_lock(&priv->mutex);
+
+ if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
+ /*
+ * If the PAN context is free, use the normal
+ * way of doing remain-on-channel offload + TX.
+ */
+ ret = 1;
+ goto out;
+ }
+
+ /* TODO: queue up if scanning? */
+ if (test_bit(STATUS_SCANNING, &priv->status) ||
+ priv->_agn.offchan_tx_skb) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * max_scan_ie_len doesn't include the blank SSID or the header,
+ * so need to add that again here.
+ */
+ if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ priv->_agn.offchan_tx_skb = skb;
+ priv->_agn.offchan_tx_timeout = wait;
+ priv->_agn.offchan_tx_chan = chan;
+
+ ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
+ IWL_SCAN_OFFCH_TX, chan->band);
+ if (ret)
+ priv->_agn.offchan_tx_skb = NULL;
+ out:
+ mutex_unlock(&priv->mutex);
+ free:
+ if (ret < 0)
+ kfree_skb(skb);
+
+ return ret;
+}
+
+static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+
+ if (!priv->_agn.offchan_tx_skb)
+ return -EINVAL;
+
+ priv->_agn.offchan_tx_skb = NULL;
+
+ ret = iwl_scan_cancel_timeout(priv, 200);
+ if (ret)
+ ret = -EIO;
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
/*****************************************************************************
*
* mac80211 entry point functions
@@ -3815,6 +3900,8 @@ struct ieee80211_ops iwlagn_hw_ops = {
.tx_last_beacon = iwl_mac_tx_last_beacon,
.remain_on_channel = iwl_mac_remain_on_channel,
.cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
+ .offchannel_tx = iwl_mac_offchannel_tx,
+ .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
};
static void iwl_hw_detect(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 03cfb74da2bc..ca42ffa63ed7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -2964,9 +2964,15 @@ struct iwl3945_scan_cmd {
u8 data[0];
} __packed;
+enum iwl_scan_flags {
+ /* BIT(0) currently unused */
+ IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
+ /* bits 2-7 reserved */
+};
+
struct iwl_scan_cmd {
__le16 len;
- u8 reserved0;
+ u8 scan_flags; /* scan flags: see enum iwl_scan_flags */
u8 channel_count; /* # channels in channel list */
__le16 quiet_time; /* dwell only this # millisecs on quiet channel
* (only for active scan) */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index af47750f8985..b316d833d9a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,6 +63,8 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
+#include "iwl-dev.h"
+
/************************
* forward declarations *
************************/
@@ -551,6 +553,10 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
struct ieee80211_vif *vif);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum iwl_scan_type scan_type,
+ enum ieee80211_band band);
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6a41deba6863..68b953f2bdc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1230,6 +1230,12 @@ struct iwl_rxon_context {
} ht;
};
+enum iwl_scan_type {
+ IWL_SCAN_NORMAL,
+ IWL_SCAN_RADIO_RESET,
+ IWL_SCAN_OFFCH_TX,
+};
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1290,7 +1296,7 @@ struct iwl_priv {
enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
- bool is_internal_short_scan;
+ enum iwl_scan_type scan_type;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1504,6 +1510,10 @@ struct iwl_priv {
struct delayed_work hw_roc_work;
enum nl80211_channel_type hw_roc_chantype;
int hw_roc_duration;
+
+ struct sk_buff *offchan_tx_skb;
+ int offchan_tx_timeout;
+ struct ieee80211_channel *offchan_tx_chan;
} _agn;
#endif
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index faa6d34cb658..3a4d9e6b0421 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -101,7 +101,7 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- priv->is_internal_short_scan = false;
+ priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
priv->scan_request = NULL;
}
@@ -339,10 +339,10 @@ void iwl_init_scan_params(struct iwl_priv *priv)
priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
}
-static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool internal,
- enum ieee80211_band band)
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum iwl_scan_type scan_type,
+ enum ieee80211_band band)
{
int ret;
@@ -370,17 +370,19 @@ static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
}
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
- internal ? "internal short " : "");
+ scan_type == IWL_SCAN_NORMAL ? "" :
+ scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
+ "internal short ");
set_bit(STATUS_SCANNING, &priv->status);
- priv->is_internal_short_scan = internal;
+ priv->scan_type = scan_type;
priv->scan_start = jiffies;
priv->scan_band = band;
ret = priv->cfg->ops->utils->request_scan(priv, vif);
if (ret) {
clear_bit(STATUS_SCANNING, &priv->status);
- priv->is_internal_short_scan = false;
+ priv->scan_type = IWL_SCAN_NORMAL;
return ret;
}
@@ -405,7 +407,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
if (test_bit(STATUS_SCANNING, &priv->status) &&
- !priv->is_internal_short_scan) {
+ priv->scan_type != IWL_SCAN_NORMAL) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
ret = -EAGAIN;
goto out_unlock;
@@ -419,11 +421,11 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
* If an internal scan is in progress, just set
* up the scan_request as per above.
*/
- if (priv->is_internal_short_scan) {
+ if (priv->scan_type != IWL_SCAN_NORMAL) {
IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
ret = 0;
} else
- ret = iwl_scan_initiate(priv, vif, false,
+ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
req->channels[0]->band);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -452,7 +454,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
mutex_lock(&priv->mutex);
- if (priv->is_internal_short_scan == true) {
+ if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
goto unlock;
}
@@ -462,7 +464,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
goto unlock;
}
- if (iwl_scan_initiate(priv, NULL, true, priv->band))
+ if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
unlock:
mutex_unlock(&priv->mutex);
@@ -549,8 +551,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
container_of(work, struct iwl_priv, scan_completed);
bool aborted;
- IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
- priv->is_internal_short_scan ? "internal short " : "");
+ IWL_DEBUG_SCAN(priv, "Completed scan.\n");
cancel_delayed_work(&priv->scan_check);
@@ -565,7 +566,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
goto out_settings;
}
- if (priv->is_internal_short_scan && !aborted) {
+ if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) {
+ ieee80211_tx_status_irqsafe(priv->hw,
+ priv->_agn.offchan_tx_skb);
+ priv->_agn.offchan_tx_skb = NULL;
+ }
+
+ if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
int err;
/* Check if mac80211 requested scan during our internal scan */
@@ -573,7 +580,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
goto out_complete;
/* If so request a new scan */
- err = iwl_scan_initiate(priv, priv->scan_vif, false,
+ err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
priv->scan_request->channels[0]->band);
if (err) {
IWL_DEBUG_SCAN(priv,
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index acf3bf63ee33..9d097b9c8005 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -918,7 +918,6 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mrvl_mesh_defaults defs;
- int maxlen;
int ret;
ret = mesh_get_default_parameters(dev, &defs);
@@ -931,13 +930,11 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
}
- /* SSID not null terminated: reserve room for \0 + \n */
- maxlen = defs.meshie.val.mesh_id_len + 2;
- maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
+ memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len);
+ buf[defs.meshie.val.mesh_id_len] = '\n';
+ buf[defs.meshie.val.mesh_id_len + 1] = '\0';
- defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
-
- return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
+ return defs.meshie.val.mesh_id_len + 1;
}
/**
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 9ba23ede51bd..9cc514703d2a 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -58,7 +58,6 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
unsigned long delay;
if (wl->psm) {
- cancel_delayed_work(&wl->elp_work);
delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
}
@@ -69,6 +68,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
unsigned long timeout, start;
u32 elp_reg;
+ if (delayed_work_pending(&wl->elp_work))
+ cancel_delayed_work(&wl->elp_work);
+
if (!wl->elp)
return 0;
@@ -102,38 +104,6 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
return 0;
}
-static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable)
-{
- int ret;
-
- if (enable) {
- wl1251_debug(DEBUG_PSM, "sleep auth psm/elp");
-
- ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
- if (ret < 0)
- return ret;
-
- wl1251_ps_elp_sleep(wl);
- } else {
- wl1251_debug(DEBUG_PSM, "sleep auth cam");
-
- /*
- * When the target is in ELP, we can only
- * access the ELP control register. Thus,
- * we have to wake the target up before
- * changing the power authorization.
- */
-
- wl1251_ps_elp_wakeup(wl);
-
- ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
{
int ret;
@@ -162,7 +132,7 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
if (ret < 0)
return ret;
- ret = wl1251_ps_set_elp(wl, true);
+ ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
if (ret < 0)
return ret;
@@ -171,7 +141,8 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
case STATION_ACTIVE_MODE:
default:
wl1251_debug(DEBUG_PSM, "leaving psm");
- ret = wl1251_ps_set_elp(wl, false);
+
+ ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
if (ret < 0)
return ret;
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 15e83bf3dd58..a1b410c76fc3 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -29,6 +29,7 @@ header-y += xt_TCPMSS.h
header-y += xt_TCPOPTSTRIP.h
header-y += xt_TEE.h
header-y += xt_TPROXY.h
+header-y += xt_addrtype.h
header-y += xt_cluster.h
header-y += xt_comment.h
header-y += xt_connbytes.h
diff --git a/include/linux/netfilter/xt_addrtype.h b/include/linux/netfilter/xt_addrtype.h
new file mode 100644
index 000000000000..b156baa9d55e
--- /dev/null
+++ b/include/linux/netfilter/xt_addrtype.h
@@ -0,0 +1,44 @@
+#ifndef _XT_ADDRTYPE_H
+#define _XT_ADDRTYPE_H
+
+#include <linux/types.h>
+
+enum {
+ XT_ADDRTYPE_INVERT_SOURCE = 0x0001,
+ XT_ADDRTYPE_INVERT_DEST = 0x0002,
+ XT_ADDRTYPE_LIMIT_IFACE_IN = 0x0004,
+ XT_ADDRTYPE_LIMIT_IFACE_OUT = 0x0008,
+};
+
+
+/* rtn_type enum values from rtnetlink.h, but shifted */
+enum {
+ XT_ADDRTYPE_UNSPEC = 1 << 0,
+ XT_ADDRTYPE_UNICAST = 1 << 1, /* 1 << RTN_UNICAST */
+ XT_ADDRTYPE_LOCAL = 1 << 2, /* 1 << RTN_LOCAL, etc */
+ XT_ADDRTYPE_BROADCAST = 1 << 3,
+ XT_ADDRTYPE_ANYCAST = 1 << 4,
+ XT_ADDRTYPE_MULTICAST = 1 << 5,
+ XT_ADDRTYPE_BLACKHOLE = 1 << 6,
+ XT_ADDRTYPE_UNREACHABLE = 1 << 7,
+ XT_ADDRTYPE_PROHIBIT = 1 << 8,
+ XT_ADDRTYPE_THROW = 1 << 9,
+ XT_ADDRTYPE_NAT = 1 << 10,
+ XT_ADDRTYPE_XRESOLVE = 1 << 11,
+};
+
+struct xt_addrtype_info_v1 {
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 flags;
+};
+
+/* revision 0 */
+struct xt_addrtype_info {
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 invert_source;
+ __u32 invert_dest;
+};
+
+#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1ac5786da14b..60f7876b6da8 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1197,6 +1197,10 @@ struct cfg80211_pmksa {
* (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
*
* @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy);
@@ -1364,6 +1368,10 @@ struct cfg80211_ops {
int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+
+ int (*set_ringparam)(struct wiphy *wiphy, u32 tx, u32 rx);
+ void (*get_ringparam)(struct wiphy *wiphy,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
};
/*
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index e74da41ebd1b..272f59336b73 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -374,24 +374,9 @@ struct ip_vs_stats {
struct ip_vs_estimator est; /* estimator */
struct ip_vs_cpu_stats *cpustats; /* per cpu counters */
spinlock_t lock; /* spin lock */
+ struct ip_vs_stats_user ustats0; /* reset values */
};
-/*
- * Helper Macros for per cpu
- * ipvs->tot_stats->ustats.count
- */
-#define IPVS_STAT_INC(ipvs, count) \
- __this_cpu_inc((ipvs)->ustats->count)
-
-#define IPVS_STAT_ADD(ipvs, count, value) \
- do {\
- write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
- raw_smp_processor_id())); \
- __this_cpu_add((ipvs)->ustats->count, value); \
- write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
- raw_smp_processor_id())); \
- } while (0)
-
struct dst_entry;
struct iphdr;
struct ip_vs_conn;
@@ -803,6 +788,171 @@ struct ip_vs_app {
void (*timeout_change)(struct ip_vs_app *app, int flags);
};
+/* IPVS in network namespace */
+struct netns_ipvs {
+ int gen; /* Generation */
+ /*
+ * Hash table: for real service lookups
+ */
+ #define IP_VS_RTAB_BITS 4
+ #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+ #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+ struct list_head rs_table[IP_VS_RTAB_SIZE];
+ /* ip_vs_app */
+ struct list_head app_list;
+ struct mutex app_mutex;
+ struct lock_class_key app_key; /* mutex debuging */
+
+ /* ip_vs_proto */
+ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
+ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+ /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ #define TCP_APP_TAB_BITS 4
+ #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
+ #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
+ struct list_head tcp_apps[TCP_APP_TAB_SIZE];
+ spinlock_t tcp_app_lock;
+#endif
+ /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ #define UDP_APP_TAB_BITS 4
+ #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
+ #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
+ struct list_head udp_apps[UDP_APP_TAB_SIZE];
+ spinlock_t udp_app_lock;
+#endif
+ /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ #define SCTP_APP_TAB_BITS 4
+ #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+ #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+ /* Hash table for SCTP application incarnations */
+ struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+ spinlock_t sctp_app_lock;
+#endif
+ /* ip_vs_conn */
+ atomic_t conn_count; /* connection counter */
+
+ /* ip_vs_ctl */
+ struct ip_vs_stats tot_stats; /* Statistics & est. */
+
+ int num_services; /* no of virtual services */
+
+ rwlock_t rs_lock; /* real services table */
+ /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+ struct lock_class_key ctl_key; /* ctl_mutex debuging */
+ /* Trash for destinations */
+ struct list_head dest_trash;
+ /* Service counters */
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+
+#ifdef CONFIG_SYSCTL
+ /* 1/rate drop and drop-entry variables */
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+ spinlock_t droppacket_lock; /* drop packet handling */
+ spinlock_t securetcp_lock; /* state and timeout tables */
+
+ /* sys-ctl struct */
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+#endif
+
+ /* sysctl variables */
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+ int sysctl_conntrack;
+#endif
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ int sysctl_nat_icmp_send;
+
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+ struct timer_list est_timer; /* Estimation timer */
+ /* ip_vs_sync */
+ struct list_head sync_queue;
+ spinlock_t sync_lock;
+ struct ip_vs_sync_buff *sync_buff;
+ spinlock_t sync_buff_lock;
+ struct sockaddr_in sync_mcast_addr;
+ struct task_struct *master_thread;
+ struct task_struct *backup_thread;
+ int send_mesg_maxlen;
+ int recv_mesg_maxlen;
+ volatile int sync_state;
+ volatile int master_syncid;
+ volatile int backup_syncid;
+ /* multicast interface name */
+ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ /* net name space ptr */
+ struct net *net; /* Needed by timer routines */
+};
+
+#define DEFAULT_SYNC_THRESHOLD 3
+#define DEFAULT_SYNC_PERIOD 50
+#define DEFAULT_SYNC_VER 1
+
+#ifdef CONFIG_SYSCTL
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_threshold[0];
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_threshold[1];
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_sync_ver;
+}
+
+#else
+
+static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_THRESHOLD;
+}
+
+static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_PERIOD;
+}
+
+static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
+{
+ return DEFAULT_SYNC_VER;
+}
+
+#endif
/*
* IPVS core functions
@@ -1071,9 +1221,11 @@ extern void ip_vs_sync_cleanup(void);
*/
extern int ip_vs_estimator_init(void);
extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+ struct ip_vs_stats *stats);
/*
* Various IPVS packet transmitters (from ip_vs_xmit.c)
@@ -1106,6 +1258,7 @@ extern int ip_vs_icmp_xmit_v6
int offset);
#endif
+#ifdef CONFIG_SYSCTL
/*
* This is a simple mechanism to ignore packets when
* we are loaded. Just set ip_vs_drop_rate to 'n' and
@@ -1121,6 +1274,9 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
ipvs->drop_counter = ipvs->drop_rate;
return 1;
}
+#else
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
+#endif
/*
* ip_vs_fwd_tag returns the forwarding tag of the connection
@@ -1190,7 +1346,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
enum ip_conntrack_info ctinfo;
- struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (!ct || !nf_ct_is_untracked(ct)) {
nf_reset(skb);
@@ -1208,7 +1364,11 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
*/
static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
+#ifdef CONFIG_SYSCTL
return ipvs->sysctl_conntrack;
+#else
+ return 0;
+#endif
}
extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 2b072fa99399..8650e7bf2ed0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1804,6 +1804,10 @@ enum ieee80211_ampdu_mlme_action {
* return value is 1, then the @remain_on_channel will be used with a
* regular transmission (if supported.)
* @offchannel_tx_cancel_wait: cancel wait associated with offchannel TX
+ *
+ * @set_ringparam: Set tx and rx ring sizes.
+ *
+ * @get_ringparam: Get tx and rx ring current and maximum sizes.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -1888,6 +1892,9 @@ struct ieee80211_ops {
enum nl80211_channel_type channel_type,
unsigned int wait);
int (*offchannel_tx_cancel_wait)(struct ieee80211_hw *hw);
+ int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
+ void (*get_ringparam)(struct ieee80211_hw *hw,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
};
/**
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index b3b4a34cb2cc..3ae491932bc8 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -20,7 +20,6 @@
#include <net/netns/conntrack.h>
#endif
#include <net/netns/xfrm.h>
-#include <net/netns/ip_vs.h>
struct proc_dir_entry;
struct net_device;
@@ -28,6 +27,7 @@ struct sock;
struct ctl_table_header;
struct net_generic;
struct sock;
+struct netns_ipvs;
#define NETDEV_HASHBITS 8
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
deleted file mode 100644
index 259ebac904bf..000000000000
--- a/include/net/netns/ip_vs.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * IP Virtual Server
- * Data structure for network namspace
- *
- */
-
-#ifndef IP_VS_H_
-#define IP_VS_H_
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/list_nulls.h>
-#include <linux/ip_vs.h>
-#include <asm/atomic.h>
-#include <linux/in.h>
-
-struct ip_vs_stats;
-struct ip_vs_sync_buff;
-struct ctl_table_header;
-
-struct netns_ipvs {
- int gen; /* Generation */
- /*
- * Hash table: for real service lookups
- */
- #define IP_VS_RTAB_BITS 4
- #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
- #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
- struct list_head rs_table[IP_VS_RTAB_SIZE];
- /* ip_vs_app */
- struct list_head app_list;
- struct mutex app_mutex;
- struct lock_class_key app_key; /* mutex debuging */
-
- /* ip_vs_proto */
- #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
- struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
- /* ip_vs_proto_tcp */
-#ifdef CONFIG_IP_VS_PROTO_TCP
- #define TCP_APP_TAB_BITS 4
- #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
- #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
- struct list_head tcp_apps[TCP_APP_TAB_SIZE];
- spinlock_t tcp_app_lock;
-#endif
- /* ip_vs_proto_udp */
-#ifdef CONFIG_IP_VS_PROTO_UDP
- #define UDP_APP_TAB_BITS 4
- #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
- #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
- struct list_head udp_apps[UDP_APP_TAB_SIZE];
- spinlock_t udp_app_lock;
-#endif
- /* ip_vs_proto_sctp */
-#ifdef CONFIG_IP_VS_PROTO_SCTP
- #define SCTP_APP_TAB_BITS 4
- #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
- #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
- /* Hash table for SCTP application incarnations */
- struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
- spinlock_t sctp_app_lock;
-#endif
- /* ip_vs_conn */
- atomic_t conn_count; /* connection counter */
-
- /* ip_vs_ctl */
- struct ip_vs_stats *tot_stats; /* Statistics & est. */
- struct ip_vs_cpu_stats __percpu *cpustats; /* Stats per cpu */
- seqcount_t *ustats_seq; /* u64 read retry */
-
- int num_services; /* no of virtual services */
- /* 1/rate drop and drop-entry variables */
- struct delayed_work defense_work; /* Work handler */
- int drop_rate;
- int drop_counter;
- atomic_t dropentry;
- /* locks in ctl.c */
- spinlock_t dropentry_lock; /* drop entry handling */
- spinlock_t droppacket_lock; /* drop packet handling */
- spinlock_t securetcp_lock; /* state and timeout tables */
- rwlock_t rs_lock; /* real services table */
- /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
- struct lock_class_key ctl_key; /* ctl_mutex debuging */
- /* Trash for destinations */
- struct list_head dest_trash;
- /* Service counters */
- atomic_t ftpsvc_counter;
- atomic_t nullsvc_counter;
-
- /* sys-ctl struct */
- struct ctl_table_header *sysctl_hdr;
- struct ctl_table *sysctl_tbl;
- /* sysctl variables */
- int sysctl_amemthresh;
- int sysctl_am_droprate;
- int sysctl_drop_entry;
- int sysctl_drop_packet;
- int sysctl_secure_tcp;
-#ifdef CONFIG_IP_VS_NFCT
- int sysctl_conntrack;
-#endif
- int sysctl_snat_reroute;
- int sysctl_sync_ver;
- int sysctl_cache_bypass;
- int sysctl_expire_nodest_conn;
- int sysctl_expire_quiescent_template;
- int sysctl_sync_threshold[2];
- int sysctl_nat_icmp_send;
-
- /* ip_vs_lblc */
- int sysctl_lblc_expiration;
- struct ctl_table_header *lblc_ctl_header;
- struct ctl_table *lblc_ctl_table;
- /* ip_vs_lblcr */
- int sysctl_lblcr_expiration;
- struct ctl_table_header *lblcr_ctl_header;
- struct ctl_table *lblcr_ctl_table;
- /* ip_vs_est */
- struct list_head est_list; /* estimator list */
- spinlock_t est_lock;
- struct timer_list est_timer; /* Estimation timer */
- /* ip_vs_sync */
- struct list_head sync_queue;
- spinlock_t sync_lock;
- struct ip_vs_sync_buff *sync_buff;
- spinlock_t sync_buff_lock;
- struct sockaddr_in sync_mcast_addr;
- struct task_struct *master_thread;
- struct task_struct *backup_thread;
- int send_mesg_maxlen;
- int recv_mesg_maxlen;
- volatile int sync_state;
- volatile int master_syncid;
- volatile int backup_syncid;
- /* multicast interface name */
- char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
- char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
- /* net name space ptr */
- struct net *net; /* Needed by timer routines */
-};
-
-#endif /* IP_VS_H_ */
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f926a310075d..1dfc18a03fd4 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -64,16 +64,6 @@ config IP_NF_IPTABLES
if IP_NF_IPTABLES
# The matches.
-config IP_NF_MATCH_ADDRTYPE
- tristate '"addrtype" address type match support'
- depends on NETFILTER_ADVANCED
- help
- This option allows you to match what routing thinks of an address,
- eg. UNICAST, LOCAL, BROADCAST, ...
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
-
config IP_NF_MATCH_AH
tristate '"ah" match support'
depends on NETFILTER_ADVANCED
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 19eb59d01037..dca2082ec683 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
# matches
-obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e95054c690c6..4b5d457c2d76 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1066,6 +1066,7 @@ static int do_replace(struct net *net, const void __user *user,
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
@@ -1488,6 +1489,7 @@ static int compat_do_replace(struct net *net, void __user *user,
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
@@ -1740,6 +1742,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
ret = -EFAULT;
break;
}
+ rev.name[sizeof(rev.name)-1] = 0;
try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
rev.revision, 1, &ret),
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ef7d7b9680ea..b09ed0d080f9 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1262,6 +1262,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
@@ -1807,6 +1808,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
@@ -2036,6 +2038,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EFAULT;
break;
}
+ rev.name[sizeof(rev.name)-1] = 0;
if (cmd == IPT_SO_GET_REVISION_TARGET)
target = 1;
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
deleted file mode 100644
index db8bff0fb86d..000000000000
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * iptables module to match inet_addr_type() of an ip.
- *
- * Copyright (c) 2004 Patrick McHardy <[email protected]>
- * (C) 2007 Laszlo Attila Toth <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ip.h>
-#include <net/route.h>
-
-#include <linux/netfilter_ipv4/ipt_addrtype.h>
-#include <linux/netfilter/x_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <[email protected]>");
-MODULE_DESCRIPTION("Xtables: address type match for IPv4");
-
-static inline bool match_type(struct net *net, const struct net_device *dev,
- __be32 addr, u_int16_t mask)
-{
- return !!(mask & (1 << inet_dev_addr_type(net, dev, addr)));
-}
-
-static bool
-addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
-{
- struct net *net = dev_net(par->in ? par->in : par->out);
- const struct ipt_addrtype_info *info = par->matchinfo;
- const struct iphdr *iph = ip_hdr(skb);
- bool ret = true;
-
- if (info->source)
- ret &= match_type(net, NULL, iph->saddr, info->source) ^
- info->invert_source;
- if (info->dest)
- ret &= match_type(net, NULL, iph->daddr, info->dest) ^
- info->invert_dest;
-
- return ret;
-}
-
-static bool
-addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
-{
- struct net *net = dev_net(par->in ? par->in : par->out);
- const struct ipt_addrtype_info_v1 *info = par->matchinfo;
- const struct iphdr *iph = ip_hdr(skb);
- const struct net_device *dev = NULL;
- bool ret = true;
-
- if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN)
- dev = par->in;
- else if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT)
- dev = par->out;
-
- if (info->source)
- ret &= match_type(net, dev, iph->saddr, info->source) ^
- (info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
- if (ret && info->dest)
- ret &= match_type(net, dev, iph->daddr, info->dest) ^
- !!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
- return ret;
-}
-
-static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
-{
- struct ipt_addrtype_info_v1 *info = par->matchinfo;
-
- if (info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN &&
- info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
- pr_info("both incoming and outgoing "
- "interface limitation cannot be selected\n");
- return -EINVAL;
- }
-
- if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_LOCAL_IN)) &&
- info->flags & IPT_ADDRTYPE_LIMIT_IFACE_OUT) {
- pr_info("output interface limitation "
- "not valid in PREROUTING and INPUT\n");
- return -EINVAL;
- }
-
- if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT)) &&
- info->flags & IPT_ADDRTYPE_LIMIT_IFACE_IN) {
- pr_info("input interface limitation "
- "not valid in POSTROUTING and OUTPUT\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct xt_match addrtype_mt_reg[] __read_mostly = {
- {
- .name = "addrtype",
- .family = NFPROTO_IPV4,
- .match = addrtype_mt_v0,
- .matchsize = sizeof(struct ipt_addrtype_info),
- .me = THIS_MODULE
- },
- {
- .name = "addrtype",
- .family = NFPROTO_IPV4,
- .revision = 1,
- .match = addrtype_mt_v1,
- .checkentry = addrtype_mt_checkentry_v1,
- .matchsize = sizeof(struct ipt_addrtype_info_v1),
- .me = THIS_MODULE
- }
-};
-
-static int __init addrtype_mt_init(void)
-{
- return xt_register_matches(addrtype_mt_reg,
- ARRAY_SIZE(addrtype_mt_reg));
-}
-
-static void __exit addrtype_mt_exit(void)
-{
- xt_unregister_matches(addrtype_mt_reg, ARRAY_SIZE(addrtype_mt_reg));
-}
-
-module_init(addrtype_mt_init);
-module_exit(addrtype_mt_exit);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 47b7b8df7fac..c9598a9067d7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1275,6 +1275,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
@@ -1822,6 +1823,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
@@ -2051,6 +2053,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EFAULT;
break;
}
+ rev.name[sizeof(rev.name)-1] = 0;
if (cmd == IP6T_SO_GET_REVISION_TARGET)
target = 1;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7b701dcddb50..334213571ad0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -834,6 +834,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
rcu_read_unlock();
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ ieee80211_recalc_ps(local, -1);
+
return 0;
}
@@ -2008,6 +2012,21 @@ static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
return drv_get_antenna(local, tx_ant, rx_ant);
}
+static int ieee80211_set_ringparam(struct wiphy *wiphy, u32 tx, u32 rx)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ return drv_set_ringparam(local, tx, rx);
+}
+
+static void ieee80211_get_ringparam(struct wiphy *wiphy,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ drv_get_ringparam(local, tx, tx_max, rx, rx_max);
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2065,4 +2084,6 @@ struct cfg80211_ops mac80211_config_ops = {
.mgmt_frame_register = ieee80211_mgmt_frame_register,
.set_antenna = ieee80211_set_antenna,
.get_antenna = ieee80211_get_antenna,
+ .set_ringparam = ieee80211_set_ringparam,
+ .get_ringparam = ieee80211_get_ringparam,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5b24740fc0b0..889c3e93e0f4 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -77,6 +77,9 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
switch (tmp->vif.bss_conf.channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
+ if (superchan > tmp->vif.bss_conf.channel_type)
+ break;
+
superchan = tmp->vif.bss_conf.channel_type;
break;
case NL80211_CHAN_HT40PLUS:
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 3729296f6f95..9c0d62bb0ea3 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -526,4 +526,30 @@ static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
return ret;
}
+static inline int drv_set_ringparam(struct ieee80211_local *local,
+ u32 tx, u32 rx)
+{
+ int ret = -ENOTSUPP;
+
+ might_sleep();
+
+ trace_drv_set_ringparam(local, tx, rx);
+ if (local->ops->set_ringparam)
+ ret = local->ops->set_ringparam(&local->hw, tx, rx);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline void drv_get_ringparam(struct ieee80211_local *local,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+ might_sleep();
+
+ trace_drv_get_ringparam(local, tx, tx_max, rx, rx_max);
+ if (local->ops->get_ringparam)
+ local->ops->get_ringparam(&local->hw, tx, tx_max, rx, rx_max);
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 520fe2444893..45aab80738e2 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -912,6 +912,58 @@ TRACE_EVENT(drv_offchannel_tx,
)
);
+TRACE_EVENT(drv_set_ringparam,
+ TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx),
+
+ TP_ARGS(local, tx, rx),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, tx)
+ __field(u32, rx)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->tx = tx;
+ __entry->rx = rx;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " tx:%d rx %d",
+ LOCAL_PR_ARG, __entry->tx, __entry->rx
+ )
+);
+
+TRACE_EVENT(drv_get_ringparam,
+ TP_PROTO(struct ieee80211_local *local, u32 *tx, u32 *tx_max,
+ u32 *rx, u32 *rx_max),
+
+ TP_ARGS(local, tx, tx_max, rx, rx_max),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, tx)
+ __field(u32, tx_max)
+ __field(u32, rx)
+ __field(u32, rx_max)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->tx = *tx;
+ __entry->tx_max = *tx_max;
+ __entry->rx = *rx;
+ __entry->rx_max = *rx_max;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " tx:%d tx_max %d rx %d rx_max %d",
+ LOCAL_PR_ARG,
+ __entry->tx, __entry->tx_max, __entry->rx, __entry->rx_max
+ )
+);
+
DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
TP_PROTO(struct ieee80211_local *local),
TP_ARGS(local)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cc984bd861cf..64d92d5a7f40 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -613,6 +613,37 @@ static void ieee80211_change_ps(struct ieee80211_local *local)
}
}
+static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *mgd = &sdata->u.mgd;
+ struct sta_info *sta = NULL;
+ u32 sta_flags = 0;
+
+ if (!mgd->powersave)
+ return false;
+
+ if (!mgd->associated)
+ return false;
+
+ if (!mgd->associated->beacon_ies)
+ return false;
+
+ if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))
+ return false;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgd->bssid);
+ if (sta)
+ sta_flags = get_sta_flags(sta);
+ rcu_read_unlock();
+
+ if (!(sta_flags & WLAN_STA_AUTHORIZED))
+ return false;
+
+ return true;
+}
+
/* need to hold RTNL or interface lock */
void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
{
@@ -647,11 +678,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
count++;
}
- if (count == 1 && found->u.mgd.powersave &&
- found->u.mgd.associated &&
- found->u.mgd.associated->beacon_ies &&
- !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL))) {
+ if (count == 1 && ieee80211_powersave_allowed(found)) {
struct ieee80211_conf *conf = &local->hw.conf;
s32 beaconint_us;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index bce14fbfc3b6..8212a8bebf06 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -598,19 +598,46 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
sample = true;
minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
txrc, true, false);
- minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
- txrc, false, false);
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
txrc, false, false);
- minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
- txrc, false, true);
}
- minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, !sample);
- ar[3].count = 0;
- ar[3].idx = -1;
+ if (mp->hw->max_rates >= 3) {
+ /*
+ * At least 3 tx rates supported, use
+ * sample_rate -> max_tp_rate -> max_prob_rate for sampling and
+ * max_tp_rate -> max_tp_rate2 -> max_prob_rate by default.
+ */
+ if (sample_idx >= 0)
+ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
+ txrc, false, false);
+ else
+ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
+ txrc, false, true);
+
+ minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
+ txrc, false, !sample);
+
+ ar[3].count = 0;
+ ar[3].idx = -1;
+ } else if (mp->hw->max_rates == 2) {
+ /*
+ * Only 2 tx rates supported, use
+ * sample_rate -> max_prob_rate for sampling and
+ * max_tp_rate -> max_prob_rate by default.
+ */
+ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
+ txrc, false, !sample);
+
+ ar[2].count = 0;
+ ar[2].idx = -1;
+ } else {
+ /* Not using MRR, only use the first rate */
+ ar[1].count = 0;
+ ar[1].idx = -1;
+ }
mi->total_packets++;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 82a6e0d80f05..c3f988aa1152 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -649,6 +649,17 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP
comment "Xtables matches"
+config NETFILTER_XT_MATCH_ADDRTYPE
+ tristate '"addrtype" address type match support'
+ depends on NETFILTER_ADVANCED
+ depends on (IPV6 || IPV6=n)
+ ---help---
+ This option allows you to match what routing thinks of an address,
+ eg. UNICAST, LOCAL, BROADCAST, ...
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+
config NETFILTER_XT_MATCH_CLUSTER
tristate '"cluster" match support'
depends on NF_CONNTRACK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index d57a890eaee5..1a02853df863 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
# matches
+obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 8b1a54c1e400..618a615acc9d 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -612,7 +612,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
- struct ip_set *set, *clash;
+ struct ip_set *set, *clash = NULL;
ip_set_id_t index = IPSET_INVALID_ID;
struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
const char *name, *typename;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 9c2a517b69c8..f289306cbf12 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -680,6 +680,16 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
atomic_dec(&dest->refcnt);
}
+static int expire_quiescent_template(struct netns_ipvs *ipvs,
+ struct ip_vs_dest *dest)
+{
+#ifdef CONFIG_SYSCTL
+ return ipvs->sysctl_expire_quiescent_template &&
+ (atomic_read(&dest->weight) == 0);
+#else
+ return 0;
+#endif
+}
/*
* Checking if the destination of a connection template is available.
@@ -696,8 +706,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- (ipvs->sysctl_expire_quiescent_template &&
- (atomic_read(&dest->weight) == 0))) {
+ expire_quiescent_template(ipvs, dest)) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2d1f932add46..07accf6b2401 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -132,7 +132,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
s->ustats.inbytes += skb->len;
u64_stats_update_end(&s->syncp);
- s = this_cpu_ptr(ipvs->cpustats);
+ s = this_cpu_ptr(ipvs->tot_stats.cpustats);
s->ustats.inpkts++;
u64_stats_update_begin(&s->syncp);
s->ustats.inbytes += skb->len;
@@ -162,7 +162,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
s->ustats.outbytes += skb->len;
u64_stats_update_end(&s->syncp);
- s = this_cpu_ptr(ipvs->cpustats);
+ s = this_cpu_ptr(ipvs->tot_stats.cpustats);
s->ustats.outpkts++;
u64_stats_update_begin(&s->syncp);
s->ustats.outbytes += skb->len;
@@ -183,7 +183,7 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
s = this_cpu_ptr(svc->stats.cpustats);
s->ustats.conns++;
- s = this_cpu_ptr(ipvs->cpustats);
+ s = this_cpu_ptr(ipvs->tot_stats.cpustats);
s->ustats.conns++;
}
@@ -499,11 +499,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
- struct net *net;
- struct netns_ipvs *ipvs;
__be16 _ports[2], *pptr;
struct ip_vs_iphdr iph;
+#ifdef CONFIG_SYSCTL
+ struct net *net;
+ struct netns_ipvs *ipvs;
int unicast;
+#endif
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
@@ -512,6 +514,8 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_service_put(svc);
return NF_DROP;
}
+
+#ifdef CONFIG_SYSCTL
net = skb_net(skb);
#ifdef CONFIG_IP_VS_IPV6
@@ -563,6 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_conn_put(cp);
return ret;
}
+#endif
/*
* When the virtual ftp service is presented, packets destined
@@ -599,6 +604,33 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
return NF_DROP;
}
+#ifdef CONFIG_SYSCTL
+
+static int sysctl_snat_reroute(struct sk_buff *skb)
+{
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+ return ipvs->sysctl_snat_reroute;
+}
+
+static int sysctl_nat_icmp_send(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ return ipvs->sysctl_nat_icmp_send;
+}
+
+static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_expire_nodest_conn;
+}
+
+#else
+
+static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
+static int sysctl_nat_icmp_send(struct net *net) { return 0; }
+static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
+
+#endif
+
__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
{
return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
@@ -631,6 +663,22 @@ static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
}
#endif
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+ if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
+ return 1;
+ } else
+#endif
+ if ((sysctl_snat_reroute(skb) ||
+ skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+ ip_route_me_harder(skb, RTN_LOCAL) != 0)
+ return 1;
+
+ return 0;
+}
+
/*
* Packet has been made sufficiently writable in caller
* - inout: 1=in->out, 0=out->in
@@ -737,7 +785,6 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
struct ip_vs_protocol *pp,
unsigned int offset, unsigned int ihl)
{
- struct netns_ipvs *ipvs;
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != 0) {
@@ -759,8 +806,6 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
if (!skb_make_writable(skb, offset))
goto out;
- ipvs = net_ipvs(skb_net(skb));
-
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_nat_icmp_v6(skb, pp, cp, 1);
@@ -768,16 +813,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
#endif
ip_vs_nat_icmp(skb, pp, cp, 1);
-#ifdef CONFIG_IP_VS_IPV6
- if (af == AF_INET6) {
- if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
- goto out;
- } else
-#endif
- if ((ipvs->sysctl_snat_reroute ||
- skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
- ip_route_me_harder(skb, RTN_LOCAL) != 0)
- goto out;
+ if (ip_vs_route_me_harder(af, skb))
+ goto out;
/* do the statistics and put it back */
ip_vs_out_stats(cp, skb);
@@ -985,7 +1022,6 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
struct ip_vs_conn *cp, int ihl)
{
struct ip_vs_protocol *pp = pd->pp;
- struct netns_ipvs *ipvs;
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
@@ -1021,18 +1057,8 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
* if it came from this machine itself. So re-compute
* the routing information.
*/
- ipvs = net_ipvs(skb_net(skb));
-
-#ifdef CONFIG_IP_VS_IPV6
- if (af == AF_INET6) {
- if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
- goto drop;
- } else
-#endif
- if ((ipvs->sysctl_snat_reroute ||
- skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
- ip_route_me_harder(skb, RTN_LOCAL) != 0)
- goto drop;
+ if (ip_vs_route_me_harder(af, skb))
+ goto drop;
IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1066,7 +1092,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
- struct netns_ipvs *ipvs;
EnterFunction(11);
@@ -1141,11 +1166,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
* Check if the packet belongs to an existing entry
*/
cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
- ipvs = net_ipvs(net);
if (likely(cp))
return handle_response(af, skb, pd, cp, iph.len);
- if (ipvs->sysctl_nat_icmp_send &&
+ if (sysctl_nat_icmp_send(net) &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
pp->protocol == IPPROTO_SCTP)) {
@@ -1570,7 +1594,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
/* the destination server is not available */
- if (ipvs->sysctl_expire_nodest_conn) {
+ if (sysctl_expire_nodest_conn(ipvs)) {
/* try to expire the connection immediately */
ip_vs_conn_expire_now(cp);
}
@@ -1600,15 +1624,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
*/
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
- pkts = ipvs->sysctl_sync_threshold[0];
+ pkts = sysctl_sync_threshold(ipvs);
else
pkts = atomic_add_return(1, &cp->in_pkts);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
cp->protocol == IPPROTO_SCTP) {
if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (pkts % ipvs->sysctl_sync_threshold[1]
- == ipvs->sysctl_sync_threshold[0])) ||
+ (pkts % sysctl_sync_period(ipvs)
+ == sysctl_sync_threshold(ipvs))) ||
(cp->old_state != cp->state &&
((cp->state == IP_VS_SCTP_S_CLOSED) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
@@ -1622,8 +1646,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (pkts % ipvs->sysctl_sync_threshold[1]
- == ipvs->sysctl_sync_threshold[0])) ||
+ (pkts % sysctl_sync_period(ipvs)
+ == sysctl_sync_threshold(ipvs))) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
(cp->state == IP_VS_TCP_S_CLOSE) ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index a60b20fa142e..b799cea31f95 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -86,6 +86,8 @@ static int __ip_vs_addr_is_local_v6(struct net *net,
return 0;
}
#endif
+
+#ifdef CONFIG_SYSCTL
/*
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
@@ -227,6 +229,7 @@ static void defense_work_handler(struct work_struct *work)
ip_vs_random_dropentry(ipvs->net);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
+#endif
int
ip_vs_use_count_inc(void)
@@ -409,9 +412,11 @@ ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
/*
* Check the table hashed by fwmark first
*/
- svc = __ip_vs_svc_fwm_find(net, af, fwmark);
- if (fwmark && svc)
- goto out;
+ if (fwmark) {
+ svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+ if (svc)
+ goto out;
+ }
/*
* Check the table hashed by <protocol,addr,port>
@@ -707,13 +712,39 @@ static void ip_vs_trash_cleanup(struct net *net)
}
}
+static void
+ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
+{
+#define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->ustats.c - src->ustats0.c
+
+ spin_lock_bh(&src->lock);
+
+ IP_VS_SHOW_STATS_COUNTER(conns);
+ IP_VS_SHOW_STATS_COUNTER(inpkts);
+ IP_VS_SHOW_STATS_COUNTER(outpkts);
+ IP_VS_SHOW_STATS_COUNTER(inbytes);
+ IP_VS_SHOW_STATS_COUNTER(outbytes);
+
+ ip_vs_read_estimator(dst, src);
+
+ spin_unlock_bh(&src->lock);
+}
static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
- memset(&stats->ustats, 0, sizeof(stats->ustats));
+ /* get current counters as zero point, rates are zeroed */
+
+#define IP_VS_ZERO_STATS_COUNTER(c) stats->ustats0.c = stats->ustats.c
+
+ IP_VS_ZERO_STATS_COUNTER(conns);
+ IP_VS_ZERO_STATS_COUNTER(inpkts);
+ IP_VS_ZERO_STATS_COUNTER(outpkts);
+ IP_VS_ZERO_STATS_COUNTER(inbytes);
+ IP_VS_ZERO_STATS_COUNTER(outbytes);
+
ip_vs_zero_estimator(stats);
spin_unlock_bh(&stats->lock);
@@ -772,7 +803,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
spin_unlock_bh(&dest->dst_lock);
if (add)
- ip_vs_new_estimator(svc->net, &dest->stats);
+ ip_vs_start_estimator(svc->net, &dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
@@ -978,7 +1009,7 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
{
struct netns_ipvs *ipvs = net_ipvs(net);
- ip_vs_kill_estimator(net, &dest->stats);
+ ip_vs_stop_estimator(net, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
@@ -1171,7 +1202,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
else if (svc->port == 0)
atomic_inc(&ipvs->nullsvc_counter);
- ip_vs_new_estimator(net, &svc->stats);
+ ip_vs_start_estimator(net, &svc->stats);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
@@ -1323,7 +1354,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
if (svc->af == AF_INET)
ipvs->num_services--;
- ip_vs_kill_estimator(svc->net, &svc->stats);
+ ip_vs_stop_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
@@ -1477,11 +1508,11 @@ static int ip_vs_zero_all(struct net *net)
}
}
- ip_vs_zero_stats(net_ipvs(net)->tot_stats);
+ ip_vs_zero_stats(&net_ipvs(net)->tot_stats);
return 0;
}
-
+#ifdef CONFIG_SYSCTL
static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1503,7 +1534,6 @@ proc_do_defense_mode(ctl_table *table, int write,
return rc;
}
-
static int
proc_do_sync_threshold(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1737,6 +1767,7 @@ const struct ctl_path net_vs_ctl_path[] = {
{ }
};
EXPORT_SYMBOL_GPL(net_vs_ctl_path);
+#endif
#ifdef CONFIG_PROC_FS
@@ -1959,7 +1990,7 @@ static const struct file_operations ip_vs_info_fops = {
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
- struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+ struct ip_vs_stats_user show;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
@@ -1967,22 +1998,18 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
seq_printf(seq,
" Conns Packets Packets Bytes Bytes\n");
- spin_lock_bh(&tot_stats->lock);
- seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
- tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
- (unsigned long long) tot_stats->ustats.inbytes,
- (unsigned long long) tot_stats->ustats.outbytes);
+ ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats);
+ seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns,
+ show.inpkts, show.outpkts,
+ (unsigned long long) show.inbytes,
+ (unsigned long long) show.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
- seq_printf(seq,"%8X %8X %8X %16X %16X\n",
- tot_stats->ustats.cps,
- tot_stats->ustats.inpps,
- tot_stats->ustats.outpps,
- tot_stats->ustats.inbps,
- tot_stats->ustats.outbps);
- spin_unlock_bh(&tot_stats->lock);
+ seq_printf(seq, "%8X %8X %8X %16X %16X\n",
+ show.cps, show.inpps, show.outpps,
+ show.inbps, show.outbps);
return 0;
}
@@ -2003,7 +2030,9 @@ static const struct file_operations ip_vs_stats_fops = {
static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_single_net(seq);
- struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+ struct ip_vs_stats *tot_stats = &net_ipvs(net)->tot_stats;
+ struct ip_vs_cpu_stats *cpustats = tot_stats->cpustats;
+ struct ip_vs_stats_user rates;
int i;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
@@ -2013,30 +2042,43 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
"CPU Conns Packets Packets Bytes Bytes\n");
for_each_possible_cpu(i) {
- struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
+ struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i);
+ unsigned int start;
+ __u64 inbytes, outbytes;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&u->syncp);
+ inbytes = u->ustats.inbytes;
+ outbytes = u->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&u->syncp, start));
+
seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
- i, u->ustats.conns, u->ustats.inpkts,
- u->ustats.outpkts, (__u64)u->ustats.inbytes,
- (__u64)u->ustats.outbytes);
+ i, u->ustats.conns, u->ustats.inpkts,
+ u->ustats.outpkts, (__u64)inbytes,
+ (__u64)outbytes);
}
spin_lock_bh(&tot_stats->lock);
+
seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
tot_stats->ustats.conns, tot_stats->ustats.inpkts,
tot_stats->ustats.outpkts,
(unsigned long long) tot_stats->ustats.inbytes,
(unsigned long long) tot_stats->ustats.outbytes);
+ ip_vs_read_estimator(&rates, tot_stats);
+
+ spin_unlock_bh(&tot_stats->lock);
+
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq, " %8X %8X %8X %16X %16X\n",
- tot_stats->ustats.cps,
- tot_stats->ustats.inpps,
- tot_stats->ustats.outpps,
- tot_stats->ustats.inbps,
- tot_stats->ustats.outbps);
- spin_unlock_bh(&tot_stats->lock);
+ rates.cps,
+ rates.inpps,
+ rates.outpps,
+ rates.inbps,
+ rates.outbps);
return 0;
}
@@ -2284,14 +2326,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
static void
-ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
-{
- spin_lock_bh(&src->lock);
- memcpy(dst, &src->ustats, sizeof(*dst));
- spin_unlock_bh(&src->lock);
-}
-
-static void
ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
{
dst->protocol = src->protocol;
@@ -2677,31 +2711,29 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
struct ip_vs_stats *stats)
{
+ struct ip_vs_stats_user ustats;
struct nlattr *nl_stats = nla_nest_start(skb, container_type);
if (!nl_stats)
return -EMSGSIZE;
- spin_lock_bh(&stats->lock);
-
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts);
- NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes);
- NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps);
- NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps);
+ ip_vs_copy_stats(&ustats, stats);
- spin_unlock_bh(&stats->lock);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts);
+ NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes);
+ NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps);
+ NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps);
nla_nest_end(skb, nl_stats);
return 0;
nla_put_failure:
- spin_unlock_bh(&stats->lock);
nla_nest_cancel(skb, nl_stats);
return -EMSGSIZE;
}
@@ -3480,7 +3512,8 @@ static void ip_vs_genl_unregister(void)
/*
* per netns intit/exit func.
*/
-int __net_init __ip_vs_control_init(struct net *net)
+#ifdef CONFIG_SYSCTL
+int __net_init __ip_vs_control_init_sysctl(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3490,38 +3523,11 @@ int __net_init __ip_vs_control_init(struct net *net)
spin_lock_init(&ipvs->dropentry_lock);
spin_lock_init(&ipvs->droppacket_lock);
spin_lock_init(&ipvs->securetcp_lock);
- ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
-
- /* Initialize rs_table */
- for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
- INIT_LIST_HEAD(&ipvs->rs_table[idx]);
-
- INIT_LIST_HEAD(&ipvs->dest_trash);
- atomic_set(&ipvs->ftpsvc_counter, 0);
- atomic_set(&ipvs->nullsvc_counter, 0);
-
- /* procfs stats */
- ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
- if (ipvs->tot_stats == NULL) {
- pr_err("%s(): no memory.\n", __func__);
- return -ENOMEM;
- }
- ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!ipvs->cpustats) {
- pr_err("%s() alloc_percpu failed\n", __func__);
- goto err_alloc;
- }
- spin_lock_init(&ipvs->tot_stats->lock);
-
- proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
- proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
- proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
- &ip_vs_stats_percpu_fops);
if (!net_eq(net, &init_net)) {
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
- goto err_dup;
+ return -ENOMEM;
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
@@ -3543,52 +3549,94 @@ int __net_init __ip_vs_control_init(struct net *net)
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
- ipvs->sysctl_sync_threshold[0] = 3;
- ipvs->sysctl_sync_threshold[1] = 50;
+ ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+ ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
-#ifdef CONFIG_SYSCTL
ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
tbl);
if (ipvs->sysctl_hdr == NULL) {
if (!net_eq(net, &init_net))
kfree(tbl);
- goto err_dup;
+ return -ENOMEM;
}
-#endif
- ip_vs_new_estimator(net, ipvs->tot_stats);
+ ip_vs_start_estimator(net, &ipvs->tot_stats);
ipvs->sysctl_tbl = tbl;
/* Schedule defense work */
INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
- return 0;
-err_dup:
- free_percpu(ipvs->cpustats);
-err_alloc:
- kfree(ipvs->tot_stats);
- return -ENOMEM;
+ return 0;
}
-static void __net_exit __ip_vs_control_cleanup(struct net *net)
+void __net_init __ip_vs_control_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
- ip_vs_trash_cleanup(net);
- ip_vs_kill_estimator(net, ipvs->tot_stats);
cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
-#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(ipvs->sysctl_hdr);
+}
+
+#else
+
+int __net_init __ip_vs_control_init_sysctl(struct net *net) { return 0; }
+void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }
+
#endif
+
+int __net_init __ip_vs_control_init(struct net *net)
+{
+ int idx;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+ /* Initialize rs_table */
+ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+ INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+ INIT_LIST_HEAD(&ipvs->dest_trash);
+ atomic_set(&ipvs->ftpsvc_counter, 0);
+ atomic_set(&ipvs->nullsvc_counter, 0);
+
+ /* procfs stats */
+ ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (ipvs->tot_stats.cpustats) {
+ pr_err("%s(): alloc_percpu.\n", __func__);
+ return -ENOMEM;
+ }
+ spin_lock_init(&ipvs->tot_stats.lock);
+
+ proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+ proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+ proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+ &ip_vs_stats_percpu_fops);
+
+ if (__ip_vs_control_init_sysctl(net))
+ goto err;
+
+ return 0;
+
+err:
+ free_percpu(ipvs->tot_stats.cpustats);
+ return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_trash_cleanup(net);
+ ip_vs_stop_estimator(net, &ipvs->tot_stats);
+ __ip_vs_control_cleanup_sysctl(net);
proc_net_remove(net, "ip_vs_stats_percpu");
proc_net_remove(net, "ip_vs_stats");
proc_net_remove(net, "ip_vs");
- free_percpu(ipvs->cpustats);
- kfree(ipvs->tot_stats);
+ free_percpu(ipvs->tot_stats.cpustats);
}
static struct pernet_operations ipvs_control_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index f560a05c965a..8c8766ca56ad 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -69,10 +69,10 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
sum->inpkts += s->ustats.inpkts;
sum->outpkts += s->ustats.outpkts;
do {
- start = u64_stats_fetch_begin_bh(&s->syncp);
+ start = u64_stats_fetch_begin(&s->syncp);
inbytes = s->ustats.inbytes;
outbytes = s->ustats.outbytes;
- } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ } while (u64_stats_fetch_retry(&s->syncp, start));
sum->inbytes += inbytes;
sum->outbytes += outbytes;
} else {
@@ -80,10 +80,10 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
sum->inpkts = s->ustats.inpkts;
sum->outpkts = s->ustats.outpkts;
do {
- start = u64_stats_fetch_begin_bh(&s->syncp);
+ start = u64_stats_fetch_begin(&s->syncp);
sum->inbytes = s->ustats.inbytes;
sum->outbytes = s->ustats.outbytes;
- } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ } while (u64_stats_fetch_retry(&s->syncp, start));
}
}
}
@@ -101,13 +101,12 @@ static void estimation_timer(unsigned long arg)
struct netns_ipvs *ipvs;
ipvs = net_ipvs(net);
- ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
spin_lock(&ipvs->est_lock);
list_for_each_entry(e, &ipvs->est_list, list) {
s = container_of(e, struct ip_vs_stats, est);
- ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
spin_lock(&s->lock);
+ ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
n_conns = s->ustats.conns;
n_inpkts = s->ustats.inpkts;
n_outpkts = s->ustats.outpkts;
@@ -118,61 +117,41 @@ static void estimation_timer(unsigned long arg)
rate = (n_conns - e->last_conns) << 9;
e->last_conns = n_conns;
e->cps += ((long)rate - (long)e->cps) >> 2;
- s->ustats.cps = (e->cps + 0x1FF) >> 10;
rate = (n_inpkts - e->last_inpkts) << 9;
e->last_inpkts = n_inpkts;
e->inpps += ((long)rate - (long)e->inpps) >> 2;
- s->ustats.inpps = (e->inpps + 0x1FF) >> 10;
rate = (n_outpkts - e->last_outpkts) << 9;
e->last_outpkts = n_outpkts;
e->outpps += ((long)rate - (long)e->outpps) >> 2;
- s->ustats.outpps = (e->outpps + 0x1FF) >> 10;
rate = (n_inbytes - e->last_inbytes) << 4;
e->last_inbytes = n_inbytes;
e->inbps += ((long)rate - (long)e->inbps) >> 2;
- s->ustats.inbps = (e->inbps + 0xF) >> 5;
rate = (n_outbytes - e->last_outbytes) << 4;
e->last_outbytes = n_outbytes;
e->outbps += ((long)rate - (long)e->outbps) >> 2;
- s->ustats.outbps = (e->outbps + 0xF) >> 5;
spin_unlock(&s->lock);
}
spin_unlock(&ipvs->est_lock);
mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
}
-void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats)
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats)
{
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
INIT_LIST_HEAD(&est->list);
- est->last_conns = stats->ustats.conns;
- est->cps = stats->ustats.cps<<10;
-
- est->last_inpkts = stats->ustats.inpkts;
- est->inpps = stats->ustats.inpps<<10;
-
- est->last_outpkts = stats->ustats.outpkts;
- est->outpps = stats->ustats.outpps<<10;
-
- est->last_inbytes = stats->ustats.inbytes;
- est->inbps = stats->ustats.inbps<<5;
-
- est->last_outbytes = stats->ustats.outbytes;
- est->outbps = stats->ustats.outbps<<5;
-
spin_lock_bh(&ipvs->est_lock);
list_add(&est->list, &ipvs->est_list);
spin_unlock_bh(&ipvs->est_lock);
}
-void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats)
{
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
@@ -185,13 +164,14 @@ void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
-
- /* set counters zero, caller must hold the stats->lock lock */
- est->last_inbytes = 0;
- est->last_outbytes = 0;
- est->last_conns = 0;
- est->last_inpkts = 0;
- est->last_outpkts = 0;
+ struct ip_vs_stats_user *u = &stats->ustats;
+
+ /* reset counters, caller must hold the stats->lock lock */
+ est->last_inbytes = u->inbytes;
+ est->last_outbytes = u->outbytes;
+ est->last_conns = u->conns;
+ est->last_inpkts = u->inpkts;
+ est->last_outpkts = u->outpkts;
est->cps = 0;
est->inpps = 0;
est->outpps = 0;
@@ -199,6 +179,19 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
est->outbps = 0;
}
+/* Get decoded rates */
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+ struct ip_vs_stats *stats)
+{
+ struct ip_vs_estimator *e = &stats->est;
+
+ dst->cps = (e->cps + 0x1FF) >> 10;
+ dst->inpps = (e->inpps + 0x1FF) >> 10;
+ dst->outpps = (e->outpps + 0x1FF) >> 10;
+ dst->inbps = (e->inbps + 0xF) >> 5;
+ dst->outbps = (e->outbps + 0xF) >> 5;
+}
+
static int __net_init __ip_vs_estimator_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 6bf7a807649c..f276df9896b3 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -63,6 +63,8 @@
#define CHECK_EXPIRE_INTERVAL (60*HZ)
#define ENTRY_TIMEOUT (6*60*HZ)
+#define DEFAULT_EXPIRATION (24*60*60*HZ)
+
/*
* It is for full expiration check.
* When there is no partial expiration check (garbage collection)
@@ -112,7 +114,7 @@ struct ip_vs_lblc_table {
/*
* IPVS LBLC sysctl table
*/
-
+#ifdef CONFIG_SYSCTL
static ctl_table vs_vars_table[] = {
{
.procname = "lblc_expiration",
@@ -123,6 +125,7 @@ static ctl_table vs_vars_table[] = {
},
{ }
};
+#endif
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{
@@ -238,6 +241,15 @@ static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
}
}
+static int sysctl_lblc_expiration(struct ip_vs_service *svc)
+{
+#ifdef CONFIG_SYSCTL
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
+ return ipvs->sysctl_lblc_expiration;
+#else
+ return DEFAULT_EXPIRATION;
+#endif
+}
static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
{
@@ -245,7 +257,6 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
struct ip_vs_lblc_entry *en, *nxt;
unsigned long now = jiffies;
int i, j;
- struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
@@ -254,7 +265,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now,
en->lastuse +
- ipvs->sysctl_lblc_expiration))
+ sysctl_lblc_expiration(svc)))
continue;
ip_vs_lblc_free(en);
@@ -538,6 +549,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
/*
* per netns init.
*/
+#ifdef CONFIG_SYSCTL
static int __net_init __ip_vs_lblc_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -550,10 +562,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
return -ENOMEM;
} else
ipvs->lblc_ctl_table = vs_vars_table;
- ipvs->sysctl_lblc_expiration = 24*60*60*HZ;
+ ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
-#ifdef CONFIG_SYSCTL
ipvs->lblc_ctl_header =
register_net_sysctl_table(net, net_vs_ctl_path,
ipvs->lblc_ctl_table);
@@ -562,7 +573,6 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
kfree(ipvs->lblc_ctl_table);
return -ENOMEM;
}
-#endif
return 0;
}
@@ -571,14 +581,19 @@ static void __net_exit __ip_vs_lblc_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
-#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(ipvs->lblc_ctl_header);
-#endif
if (!net_eq(net, &init_net))
kfree(ipvs->lblc_ctl_table);
}
+#else
+
+static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
+static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
+
+#endif
+
static struct pernet_operations ip_vs_lblc_ops = {
.init = __ip_vs_lblc_init,
.exit = __ip_vs_lblc_exit,
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 00631765b92a..cb1c9913d38b 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -63,6 +63,8 @@
#define CHECK_EXPIRE_INTERVAL (60*HZ)
#define ENTRY_TIMEOUT (6*60*HZ)
+#define DEFAULT_EXPIRATION (24*60*60*HZ)
+
/*
* It is for full expiration check.
* When there is no partial expiration check (garbage collection)
@@ -283,6 +285,7 @@ struct ip_vs_lblcr_table {
};
+#ifdef CONFIG_SYSCTL
/*
* IPVS LBLCR sysctl table
*/
@@ -297,6 +300,7 @@ static ctl_table vs_vars_table[] = {
},
{ }
};
+#endif
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{
@@ -410,6 +414,15 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
}
}
+static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
+{
+#ifdef CONFIG_SYSCTL
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
+ return ipvs->sysctl_lblcr_expiration;
+#else
+ return DEFAULT_EXPIRATION;
+#endif
+}
static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
{
@@ -417,15 +430,14 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
unsigned long now = jiffies;
int i, j;
struct ip_vs_lblcr_entry *en, *nxt;
- struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
- if (time_after(en->lastuse
- + ipvs->sysctl_lblcr_expiration, now))
+ if (time_after(en->lastuse +
+ sysctl_lblcr_expiration(svc), now))
continue;
ip_vs_lblcr_free(en);
@@ -650,7 +662,6 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
read_lock(&svc->sched_lock);
en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
if (en) {
- struct netns_ipvs *ipvs = net_ipvs(svc->net);
/* We only hold a read lock, but this is atomic */
en->lastuse = jiffies;
@@ -662,7 +673,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* More than one destination + enough time passed by, cleanup */
if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod +
- ipvs->sysctl_lblcr_expiration)) {
+ sysctl_lblcr_expiration(svc))) {
struct ip_vs_dest *m;
write_lock(&en->set.lock);
@@ -734,6 +745,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
/*
* per netns init.
*/
+#ifdef CONFIG_SYSCTL
static int __net_init __ip_vs_lblcr_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -746,10 +758,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
return -ENOMEM;
} else
ipvs->lblcr_ctl_table = vs_vars_table;
- ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
+ ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
-#ifdef CONFIG_SYSCTL
ipvs->lblcr_ctl_header =
register_net_sysctl_table(net, net_vs_ctl_path,
ipvs->lblcr_ctl_table);
@@ -758,7 +769,6 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
kfree(ipvs->lblcr_ctl_table);
return -ENOMEM;
}
-#endif
return 0;
}
@@ -767,14 +777,19 @@ static void __net_exit __ip_vs_lblcr_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
-#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
-#endif
if (!net_eq(net, &init_net))
kfree(ipvs->lblcr_ctl_table);
}
+#else
+
+static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
+static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
+
+#endif
+
static struct pernet_operations ip_vs_lblcr_ops = {
.init = __ip_vs_lblcr_init,
.exit = __ip_vs_lblcr_exit,
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 0d83bc01fed4..13d607ae9c52 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -92,14 +92,13 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
return -EINVAL;
- p->pe_data = kmalloc(matchlen, GFP_ATOMIC);
- if (!p->pe_data)
- return -ENOMEM;
-
/* N.B: pe_data is only set on success,
* this allows fallback to the default persistence logic on failure
*/
- memcpy(p->pe_data, dptr + matchoff, matchlen);
+ p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
+ if (!p->pe_data)
+ return -ENOMEM;
+
p->pe_data_len = matchlen;
return 0;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index fecf24de4af3..3e7961e85e9c 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -394,7 +394,7 @@ void ip_vs_sync_switch_mode(struct net *net, int mode)
if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
return;
- if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff)
+ if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff)
return;
spin_lock_bh(&ipvs->sync_buff_lock);
@@ -521,7 +521,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
unsigned int len, pe_name_len, pad;
/* Handle old version of the protocol */
- if (ipvs->sysctl_sync_ver == 0) {
+ if (sysctl_sync_ver(ipvs) == 0) {
ip_vs_sync_conn_v0(net, cp);
return;
}
@@ -650,7 +650,7 @@ control:
if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
int pkts = atomic_add_return(1, &cp->in_pkts);
- if (pkts % ipvs->sysctl_sync_threshold[1] != 1)
+ if (pkts % sysctl_sync_period(ipvs) != 1)
return;
}
goto sloop;
@@ -697,13 +697,12 @@ ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
return 1;
}
- p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
+ p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC);
if (!p->pe_data) {
if (p->pe->module)
module_put(p->pe->module);
return -ENOMEM;
}
- memcpy(p->pe_data, pe_data, pe_data_len);
p->pe_data_len = pe_data_len;
}
return 0;
@@ -795,7 +794,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
if (opt)
memcpy(&cp->in_seq, opt, sizeof(*opt));
- atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]);
+ atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
cp->state = state;
cp->old_state = cp->state;
/*
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2f454efa1a8b..941286ca911d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1301,6 +1301,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
nf_conntrack_ecache_fini(net);
+ nf_conntrack_tstamp_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 0a77d2ff2154..a9adf4c6b299 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -183,14 +183,14 @@ EXPORT_SYMBOL(xt_unregister_matches);
/*
* These are weird, but module loading must not be done with mutex
* held (since they will register), and we have to have a single
- * function to use try_then_request_module().
+ * function to use.
*/
/* Find match, grabs ref. Returns ERR_PTR() on error. */
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
{
struct xt_match *m;
- int err = 0;
+ int err = -ENOENT;
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return ERR_PTR(-EINTR);
@@ -221,9 +221,13 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{
struct xt_match *match;
- match = try_then_request_module(xt_find_match(nfproto, name, revision),
- "%st_%s", xt_prefix[nfproto], name);
- return (match != NULL) ? match : ERR_PTR(-ENOENT);
+ match = xt_find_match(nfproto, name, revision);
+ if (IS_ERR(match)) {
+ request_module("%st_%s", xt_prefix[nfproto], name);
+ match = xt_find_match(nfproto, name, revision);
+ }
+
+ return match;
}
EXPORT_SYMBOL_GPL(xt_request_find_match);
@@ -231,7 +235,7 @@ EXPORT_SYMBOL_GPL(xt_request_find_match);
struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *t;
- int err = 0;
+ int err = -ENOENT;
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return ERR_PTR(-EINTR);
@@ -261,9 +265,13 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *target;
- target = try_then_request_module(xt_find_target(af, name, revision),
- "%st_%s", xt_prefix[af], name);
- return (target != NULL) ? target : ERR_PTR(-ENOENT);
+ target = xt_find_target(af, name, revision);
+ if (IS_ERR(target)) {
+ request_module("%st_%s", xt_prefix[af], name);
+ target = xt_find_target(af, name, revision);
+ }
+
+ return target;
}
EXPORT_SYMBOL_GPL(xt_request_find_target);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
new file mode 100644
index 000000000000..2220b85e9519
--- /dev/null
+++ b/net/netfilter/xt_addrtype.c
@@ -0,0 +1,229 @@
+/*
+ * iptables module to match inet_addr_type() of an ip.
+ *
+ * Copyright (c) 2004 Patrick McHardy <[email protected]>
+ * (C) 2007 Laszlo Attila Toth <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <net/route.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_fib.h>
+#endif
+
+#include <linux/netfilter/xt_addrtype.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <[email protected]>");
+MODULE_DESCRIPTION("Xtables: address type match");
+MODULE_ALIAS("ipt_addrtype");
+MODULE_ALIAS("ip6t_addrtype");
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
+{
+ u32 ret;
+
+ if (!rt)
+ return XT_ADDRTYPE_UNREACHABLE;
+
+ if (rt->rt6i_flags & RTF_REJECT)
+ ret = XT_ADDRTYPE_UNREACHABLE;
+ else
+ ret = 0;
+
+ if (rt->rt6i_flags & RTF_LOCAL)
+ ret |= XT_ADDRTYPE_LOCAL;
+ if (rt->rt6i_flags & RTF_ANYCAST)
+ ret |= XT_ADDRTYPE_ANYCAST;
+ return ret;
+}
+
+static bool match_type6(struct net *net, const struct net_device *dev,
+ const struct in6_addr *addr, u16 mask)
+{
+ int addr_type = ipv6_addr_type(addr);
+
+ if ((mask & XT_ADDRTYPE_MULTICAST) &&
+ !(addr_type & IPV6_ADDR_MULTICAST))
+ return false;
+ if ((mask & XT_ADDRTYPE_UNICAST) && !(addr_type & IPV6_ADDR_UNICAST))
+ return false;
+ if ((mask & XT_ADDRTYPE_UNSPEC) && addr_type != IPV6_ADDR_ANY)
+ return false;
+
+ if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
+ XT_ADDRTYPE_UNREACHABLE) & mask) {
+ struct rt6_info *rt;
+ u32 type;
+ int ifindex = dev ? dev->ifindex : 0;
+
+ rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
+
+ type = xt_addrtype_rt6_to_type(rt);
+
+ dst_release(&rt->dst);
+ return !!(mask & type);
+ }
+ return true;
+}
+
+static bool
+addrtype_mt6(struct net *net, const struct net_device *dev,
+ const struct sk_buff *skb, const struct xt_addrtype_info_v1 *info)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ bool ret = true;
+
+ if (info->source)
+ ret &= match_type6(net, dev, &iph->saddr, info->source) ^
+ (info->flags & XT_ADDRTYPE_INVERT_SOURCE);
+ if (ret && info->dest)
+ ret &= match_type6(net, dev, &iph->daddr, info->dest) ^
+ !!(info->flags & XT_ADDRTYPE_INVERT_DEST);
+ return ret;
+}
+#endif
+
+static inline bool match_type(struct net *net, const struct net_device *dev,
+ __be32 addr, u_int16_t mask)
+{
+ return !!(mask & (1 << inet_dev_addr_type(net, dev, addr)));
+}
+
+static bool
+addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct net *net = dev_net(par->in ? par->in : par->out);
+ const struct xt_addrtype_info *info = par->matchinfo;
+ const struct iphdr *iph = ip_hdr(skb);
+ bool ret = true;
+
+ if (info->source)
+ ret &= match_type(net, NULL, iph->saddr, info->source) ^
+ info->invert_source;
+ if (info->dest)
+ ret &= match_type(net, NULL, iph->daddr, info->dest) ^
+ info->invert_dest;
+
+ return ret;
+}
+
+static bool
+addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct net *net = dev_net(par->in ? par->in : par->out);
+ const struct xt_addrtype_info_v1 *info = par->matchinfo;
+ const struct iphdr *iph;
+ const struct net_device *dev = NULL;
+ bool ret = true;
+
+ if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN)
+ dev = par->in;
+ else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
+ dev = par->out;
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ if (par->family == NFPROTO_IPV6)
+ return addrtype_mt6(net, dev, skb, info);
+#endif
+ iph = ip_hdr(skb);
+ if (info->source)
+ ret &= match_type(net, dev, iph->saddr, info->source) ^
+ (info->flags & XT_ADDRTYPE_INVERT_SOURCE);
+ if (ret && info->dest)
+ ret &= match_type(net, dev, iph->daddr, info->dest) ^
+ !!(info->flags & XT_ADDRTYPE_INVERT_DEST);
+ return ret;
+}
+
+static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
+{
+ struct xt_addrtype_info_v1 *info = par->matchinfo;
+
+ if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
+ info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
+ pr_info("both incoming and outgoing "
+ "interface limitation cannot be selected\n");
+ return -EINVAL;
+ }
+
+ if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN)) &&
+ info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
+ pr_info("output interface limitation "
+ "not valid in PREROUTING and INPUT\n");
+ return -EINVAL;
+ }
+
+ if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT)) &&
+ info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
+ pr_info("input interface limitation "
+ "not valid in POSTROUTING and OUTPUT\n");
+ return -EINVAL;
+ }
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ if (par->family == NFPROTO_IPV6) {
+ if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
+ pr_err("ipv6 BLACKHOLE matching not supported\n");
+ return -EINVAL;
+ }
+ if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
+ pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+ return -EINVAL;
+ }
+ if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
+ pr_err("ipv6 does not support BROADCAST matching\n");
+ return -EINVAL;
+ }
+ }
+#endif
+ return 0;
+}
+
+static struct xt_match addrtype_mt_reg[] __read_mostly = {
+ {
+ .name = "addrtype",
+ .family = NFPROTO_IPV4,
+ .match = addrtype_mt_v0,
+ .matchsize = sizeof(struct xt_addrtype_info),
+ .me = THIS_MODULE
+ },
+ {
+ .name = "addrtype",
+ .family = NFPROTO_UNSPEC,
+ .revision = 1,
+ .match = addrtype_mt_v1,
+ .checkentry = addrtype_mt_checkentry_v1,
+ .matchsize = sizeof(struct xt_addrtype_info_v1),
+ .me = THIS_MODULE
+ }
+};
+
+static int __init addrtype_mt_init(void)
+{
+ return xt_register_matches(addrtype_mt_reg,
+ ARRAY_SIZE(addrtype_mt_reg));
+}
+
+static void __exit addrtype_mt_exit(void)
+{
+ xt_unregister_matches(addrtype_mt_reg, ARRAY_SIZE(addrtype_mt_reg));
+}
+
+module_init(addrtype_mt_init);
+module_exit(addrtype_mt_exit);
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index e029c4807404..c6d5a83450c9 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -33,17 +33,17 @@
/* we will save the tuples of all connections we care about */
struct xt_connlimit_conn {
- struct list_head list;
- struct nf_conntrack_tuple tuple;
+ struct hlist_node node;
+ struct nf_conntrack_tuple tuple;
+ union nf_inet_addr addr;
};
struct xt_connlimit_data {
- struct list_head iphash[256];
- spinlock_t lock;
+ struct hlist_head iphash[256];
+ spinlock_t lock;
};
static u_int32_t connlimit_rnd __read_mostly;
-static bool connlimit_rnd_inited __read_mostly;
static inline unsigned int connlimit_iphash(__be32 addr)
{
@@ -101,9 +101,9 @@ static int count_them(struct net *net,
{
const struct nf_conntrack_tuple_hash *found;
struct xt_connlimit_conn *conn;
- struct xt_connlimit_conn *tmp;
+ struct hlist_node *pos, *n;
struct nf_conn *found_ct;
- struct list_head *hash;
+ struct hlist_head *hash;
bool addit = true;
int matches = 0;
@@ -115,7 +115,7 @@ static int count_them(struct net *net,
rcu_read_lock();
/* check the saved connections */
- list_for_each_entry_safe(conn, tmp, hash, list) {
+ hlist_for_each_entry_safe(conn, pos, n, hash, node) {
found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
&conn->tuple);
found_ct = NULL;
@@ -135,7 +135,7 @@ static int count_them(struct net *net,
if (found == NULL) {
/* this one is gone */
- list_del(&conn->list);
+ hlist_del(&conn->node);
kfree(conn);
continue;
}
@@ -146,12 +146,12 @@ static int count_them(struct net *net,
* closed already -> ditch it
*/
nf_ct_put(found_ct);
- list_del(&conn->list);
+ hlist_del(&conn->node);
kfree(conn);
continue;
}
- if (same_source_net(addr, mask, &conn->tuple.src.u3, family))
+ if (same_source_net(addr, mask, &conn->addr, family))
/* same source network -> be counted! */
++matches;
nf_ct_put(found_ct);
@@ -161,11 +161,12 @@ static int count_them(struct net *net,
if (addit) {
/* save the new connection in our list */
- conn = kzalloc(sizeof(*conn), GFP_ATOMIC);
+ conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
if (conn == NULL)
return -ENOMEM;
conn->tuple = *tuple;
- list_add(&conn->list, hash);
+ conn->addr = *addr;
+ hlist_add_head(&conn->node, hash);
++matches;
}
@@ -185,15 +186,11 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
int connections;
ct = nf_ct_get(skb, &ctinfo);
- if (ct != NULL) {
- if (info->flags & XT_CONNLIMIT_DADDR)
- tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- else
- tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
- par->family, &tuple)) {
+ if (ct != NULL)
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ par->family, &tuple))
goto hotdrop;
- }
if (par->family == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -228,9 +225,13 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
unsigned int i;
int ret;
- if (unlikely(!connlimit_rnd_inited)) {
- get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
- connlimit_rnd_inited = true;
+ if (unlikely(!connlimit_rnd)) {
+ u_int32_t rand;
+
+ do {
+ get_random_bytes(&rand, sizeof(rand));
+ } while (!rand);
+ cmpxchg(&connlimit_rnd, 0, rand);
}
ret = nf_ct_l3proto_try_module_get(par->family);
if (ret < 0) {
@@ -248,7 +249,7 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
spin_lock_init(&info->data->lock);
for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
- INIT_LIST_HEAD(&info->data->iphash[i]);
+ INIT_HLIST_HEAD(&info->data->iphash[i]);
return 0;
}
@@ -257,15 +258,15 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_connlimit_info *info = par->matchinfo;
struct xt_connlimit_conn *conn;
- struct xt_connlimit_conn *tmp;
- struct list_head *hash = info->data->iphash;
+ struct hlist_node *pos, *n;
+ struct hlist_head *hash = info->data->iphash;
unsigned int i;
nf_ct_l3proto_module_put(par->family);
for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
- list_for_each_entry_safe(conn, tmp, &hash[i], list) {
- list_del(&conn->list);
+ hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) {
+ hlist_del(&conn->node);
kfree(conn);
}
}
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index ca4c825be93d..9bde4d1d3e9b 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -1,5 +1,6 @@
#include <linux/utsname.h>
#include <net/cfg80211.h>
+#include "core.h"
#include "ethtool.h"
static void cfg80211_get_drvinfo(struct net_device *dev,
@@ -37,9 +38,41 @@ static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->len = 0;
}
+static void cfg80211_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *rp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ memset(rp, 0, sizeof(*rp));
+
+ if (rdev->ops->get_ringparam)
+ rdev->ops->get_ringparam(wdev->wiphy,
+ &rp->tx_pending, &rp->tx_max_pending,
+ &rp->rx_pending, &rp->rx_max_pending);
+}
+
+static int cfg80211_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *rp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
+ return -EINVAL;
+
+ if (rdev->ops->set_ringparam)
+ return rdev->ops->set_ringparam(wdev->wiphy,
+ rp->tx_pending, rp->rx_pending);
+
+ return -ENOTSUPP;
+}
+
const struct ethtool_ops cfg80211_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
.get_regs_len = cfg80211_get_regs_len,
.get_regs = cfg80211_get_regs,
.get_link = ethtool_op_get_link,
+ .get_ringparam = cfg80211_get_ringparam,
+ .set_ringparam = cfg80211_set_ringparam,
};