diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2011-12-21 09:51:23 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2011-12-21 09:51:23 +0100 |
commit | dfa9d178cd28caf5d76bc4f37f4b3e7e8df8e697 (patch) | |
tree | d97ca7790d8a622cba9fdd19832decdb0aa6418a /net/core | |
parent | 3f19f08a7ec74cfc50fbad3c5e615760afbd23a0 (diff) | |
parent | 7b4050381127ae11fcfc74a106d715a5fbbf888a (diff) |
Merge branch 'devfreq-for-next' of git://git.infradead.org/users/kmpark/linux-samsung into pm-devfreq
* 'devfreq-for-next' of git://git.infradead.org/users/kmpark/linux-samsung: (765 commits)
PM/Devfreq: Add Exynos4-bus device DVFS driver for Exynos4210/4212/4412.
pci: Fix hotplug of Express Module with pci bridges
i2c-eg20t: correct the driver init order of pch_i2c_probe()
I2C: OMAP: fix FIFO usage for OMAP4
i2c-s3c2410: Fix return code of s3c24xx_i2c_parse_dt_gpio
i2c: i2c-s3c2410: Add a cpu_relax() to busy wait for bus idle
Linux 3.2-rc6
Revert "drm/i915: fix infinite recursion on unbind due to ilk vt-d w/a"
btrfs: lower the dirty balance poll interval
drm/i915/dp: Dither down to 6bpc if it makes the mode fit
drm/i915: enable semaphores on per-device defaults
drm/i915: don't set unpin_work if vblank_get fails
drm/i915: By default, enable RC6 on IVB and SNB when reasonable
iommu: Export intel_iommu_enabled to signal when iommu is in use
drm/i915/sdvo: Include LVDS panels for the IS_DIGITAL check
drm/i915: prevent division by zero when asking for chipset power
drm/i915: add PCH info to i915_capabilities
drm/i915: set the right SDVO transcoder for CPT
drm/i915: no-lvds quirk for ASUS AT5NM10T-I
sched: Fix select_idle_sibling() regression in selecting an idle SMT sibling
...
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 9 | ||||
-rw-r--r-- | net/core/dev_addr_lists.c | 3 | ||||
-rw-r--r-- | net/core/neighbour.c | 5 | ||||
-rw-r--r-- | net/core/request_sock.c | 7 | ||||
-rw-r--r-- | net/core/secure_seq.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 2 |
6 files changed, 20 insertions, 8 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 6ba50a1e404c..5a13edfc9f73 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1396,7 +1396,7 @@ rollback: for_each_net(net) { for_each_netdev(net, dev) { if (dev == last) - break; + goto outroll; if (dev->flags & IFF_UP) { nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); @@ -1407,6 +1407,7 @@ rollback: } } +outroll: raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } @@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file) sizeof(struct dev_iter_state)); } +int dev_seq_open_ops(struct inode *inode, struct file *file, + const struct seq_operations *ops) +{ + return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state)); +} + static const struct file_operations dev_seq_fops = { .owner = THIS_MODULE, .open = dev_seq_open, diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 277faef9148d..febba516db62 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = { static int dev_mc_seq_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &dev_mc_seq_ops, - sizeof(struct seq_net_private)); + return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); } static const struct file_operations dev_mc_seq_fops = { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 039d51e6c284..5ac07d31fbc9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct neigh_table *tbl = state->tbl; - pn = pn->next; + do { + pn = pn->next; + } while (pn && !net_eq(pneigh_net(pn), net)); + while (!pn) { if (++state->bucket > PNEIGH_HASHMASK) break; diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 182236b2510a..9b570a6a33c5 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -26,10 +26,11 @@ * but then some measure against one socket starving all other sockets * would be needed. * - * It was 128 by default. Experiments with real servers show, that + * The minimum value of it is 128. Experiments with real servers show that * it is absolutely not enough even at 100conn/sec. 256 cures most - * of problems. This value is adjusted to 128 for very small machines - * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). + * of problems. + * This value is adjusted to 128 for low memory machines, + * and it will increase in proportion to the memory of machine. * Note : Dont forget somaxconn that may limit backlog too. */ int sysctl_max_syn_backlog = 256; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 025233de25f9..925991ae6f52 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -19,6 +19,7 @@ static int __init net_secret_init(void) } late_initcall(net_secret_init); +#ifdef CONFIG_INET static u32 seq_scale(u32 seq) { /* @@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq) */ return seq + (ktime_to_ns(ktime_get_real()) >> 6); } +#endif #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 18a3cebb753d..3c30ee4a5710 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb) * @shiftlen: shift up to this many bytes * * Attempts to shift up to shiftlen worth of bytes, which may be less than - * the length of the skb, from tgt to skb. Returns number bytes shifted. + * the length of the skb, from skb to tgt. Returns number bytes shifted. * It's up to caller to free skb if everything was shifted. * * If @tgt runs out of frags, the whole operation is aborted. |